Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20150702-v3' into staging
[qemu/ar7.git] / target-i386 / cpu.c
blob36b07f99aac7c8e47df7b9571c5d4cc51689cd97
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "exec/address-spaces.h"
49 #include "hw/xen/xen.h"
50 #include "hw/i386/apic_internal.h"
51 #endif
54 /* Cache topology CPUID constants: */
56 /* CPUID Leaf 2 Descriptors */
58 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
59 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
60 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
63 /* CPUID Leaf 4 constants: */
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
70 #define CPUID_4_LEVEL(l) ((l) << 5)
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
80 #define ASSOC_FULL 0xFF
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
97 /* Definitions of the hardcoded cache entries we expose: */
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
135 /* No L3 cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
141 /* TLB definitions: */
143 #define L1_DTLB_2M_ASSOC 1
144 #define L1_DTLB_2M_ENTRIES 255
145 #define L1_DTLB_4K_ASSOC 1
146 #define L1_DTLB_4K_ENTRIES 255
148 #define L1_ITLB_2M_ASSOC 1
149 #define L1_ITLB_2M_ENTRIES 255
150 #define L1_ITLB_4K_ASSOC 1
151 #define L1_ITLB_4K_ENTRIES 255
153 #define L2_DTLB_2M_ASSOC 0 /* disabled */
154 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
155 #define L2_DTLB_4K_ASSOC 4
156 #define L2_DTLB_4K_ENTRIES 512
158 #define L2_ITLB_2M_ASSOC 0 /* disabled */
159 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
160 #define L2_ITLB_4K_ASSOC 4
161 #define L2_ITLB_4K_ENTRIES 512
165 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
166 uint32_t vendor2, uint32_t vendor3)
168 int i;
169 for (i = 0; i < 4; i++) {
170 dst[i] = vendor1 >> (8 * i);
171 dst[i + 4] = vendor2 >> (8 * i);
172 dst[i + 8] = vendor3 >> (8 * i);
174 dst[CPUID_VENDOR_SZ] = '\0';
177 /* feature flags taken from "Intel Processor Identification and the CPUID
178 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
179 * between feature naming conventions, aliases may be added.
181 static const char *feature_name[] = {
182 "fpu", "vme", "de", "pse",
183 "tsc", "msr", "pae", "mce",
184 "cx8", "apic", NULL, "sep",
185 "mtrr", "pge", "mca", "cmov",
186 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
187 NULL, "ds" /* Intel dts */, "acpi", "mmx",
188 "fxsr", "sse", "sse2", "ss",
189 "ht" /* Intel htt */, "tm", "ia64", "pbe",
191 static const char *ext_feature_name[] = {
192 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
193 "ds_cpl", "vmx", "smx", "est",
194 "tm2", "ssse3", "cid", NULL,
195 "fma", "cx16", "xtpr", "pdcm",
196 NULL, "pcid", "dca", "sse4.1|sse4_1",
197 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
198 "tsc-deadline", "aes", "xsave", "osxsave",
199 "avx", "f16c", "rdrand", "hypervisor",
201 /* Feature names that are already defined on feature_name[] but are set on
202 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
203 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
204 * if and only if CPU vendor is AMD.
206 static const char *ext2_feature_name[] = {
207 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
208 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
209 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
210 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
211 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
212 "nx|xd", NULL, "mmxext", NULL /* mmx */,
213 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
214 NULL, "lm|i64", "3dnowext", "3dnow",
216 static const char *ext3_feature_name[] = {
217 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
218 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
219 "3dnowprefetch", "osvw", "ibs", "xop",
220 "skinit", "wdt", NULL, "lwp",
221 "fma4", "tce", NULL, "nodeid_msr",
222 NULL, "tbm", "topoext", "perfctr_core",
223 "perfctr_nb", NULL, NULL, NULL,
224 NULL, NULL, NULL, NULL,
227 static const char *ext4_feature_name[] = {
228 NULL, NULL, "xstore", "xstore-en",
229 NULL, NULL, "xcrypt", "xcrypt-en",
230 "ace2", "ace2-en", "phe", "phe-en",
231 "pmm", "pmm-en", NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 NULL, NULL, NULL, NULL,
238 static const char *kvm_feature_name[] = {
239 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
240 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
245 "kvmclock-stable-bit", NULL, NULL, NULL,
246 NULL, NULL, NULL, NULL,
249 static const char *svm_feature_name[] = {
250 "npt", "lbrv", "svm_lock", "nrip_save",
251 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
252 NULL, NULL, "pause_filter", NULL,
253 "pfthreshold", NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 NULL, NULL, NULL, NULL,
260 static const char *cpuid_7_0_ebx_feature_name[] = {
261 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
262 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
263 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
264 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
267 static const char *cpuid_apm_edx_feature_name[] = {
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 "invtsc", NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 NULL, NULL, NULL, NULL,
278 static const char *cpuid_xsave_feature_name[] = {
279 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 NULL, NULL, NULL, NULL,
289 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
290 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
291 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
292 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
293 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
294 CPUID_PSE36 | CPUID_FXSR)
295 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
296 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
297 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
298 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
299 CPUID_PAE | CPUID_SEP | CPUID_APIC)
301 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
302 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
303 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
304 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
305 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
306 /* partly implemented:
307 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
308 /* missing:
309 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
310 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
311 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
312 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
313 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
314 /* missing:
315 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
316 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
317 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
318 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
319 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
320 CPUID_EXT_RDRAND */
322 #ifdef TARGET_X86_64
323 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
324 #else
325 #define TCG_EXT2_X86_64_FEATURES 0
326 #endif
328 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
329 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
330 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
331 TCG_EXT2_X86_64_FEATURES)
332 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
333 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
334 #define TCG_EXT4_FEATURES 0
335 #define TCG_SVM_FEATURES 0
336 #define TCG_KVM_FEATURES 0
337 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
338 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
339 /* missing:
340 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
341 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
342 CPUID_7_0_EBX_RDSEED */
343 #define TCG_APM_FEATURES 0
346 typedef struct FeatureWordInfo {
347 const char **feat_names;
348 uint32_t cpuid_eax; /* Input EAX for CPUID */
349 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
350 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
351 int cpuid_reg; /* output register (R_* constant) */
352 uint32_t tcg_features; /* Feature flags supported by TCG */
353 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
354 } FeatureWordInfo;
356 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
357 [FEAT_1_EDX] = {
358 .feat_names = feature_name,
359 .cpuid_eax = 1, .cpuid_reg = R_EDX,
360 .tcg_features = TCG_FEATURES,
362 [FEAT_1_ECX] = {
363 .feat_names = ext_feature_name,
364 .cpuid_eax = 1, .cpuid_reg = R_ECX,
365 .tcg_features = TCG_EXT_FEATURES,
367 [FEAT_8000_0001_EDX] = {
368 .feat_names = ext2_feature_name,
369 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
370 .tcg_features = TCG_EXT2_FEATURES,
372 [FEAT_8000_0001_ECX] = {
373 .feat_names = ext3_feature_name,
374 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
375 .tcg_features = TCG_EXT3_FEATURES,
377 [FEAT_C000_0001_EDX] = {
378 .feat_names = ext4_feature_name,
379 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
380 .tcg_features = TCG_EXT4_FEATURES,
382 [FEAT_KVM] = {
383 .feat_names = kvm_feature_name,
384 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
385 .tcg_features = TCG_KVM_FEATURES,
387 [FEAT_SVM] = {
388 .feat_names = svm_feature_name,
389 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
390 .tcg_features = TCG_SVM_FEATURES,
392 [FEAT_7_0_EBX] = {
393 .feat_names = cpuid_7_0_ebx_feature_name,
394 .cpuid_eax = 7,
395 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
396 .cpuid_reg = R_EBX,
397 .tcg_features = TCG_7_0_EBX_FEATURES,
399 [FEAT_8000_0007_EDX] = {
400 .feat_names = cpuid_apm_edx_feature_name,
401 .cpuid_eax = 0x80000007,
402 .cpuid_reg = R_EDX,
403 .tcg_features = TCG_APM_FEATURES,
404 .unmigratable_flags = CPUID_APM_INVTSC,
406 [FEAT_XSAVE] = {
407 .feat_names = cpuid_xsave_feature_name,
408 .cpuid_eax = 0xd,
409 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
410 .cpuid_reg = R_EAX,
411 .tcg_features = 0,
415 typedef struct X86RegisterInfo32 {
416 /* Name of register */
417 const char *name;
418 /* QAPI enum value register */
419 X86CPURegister32 qapi_enum;
420 } X86RegisterInfo32;
422 #define REGISTER(reg) \
423 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
424 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
425 REGISTER(EAX),
426 REGISTER(ECX),
427 REGISTER(EDX),
428 REGISTER(EBX),
429 REGISTER(ESP),
430 REGISTER(EBP),
431 REGISTER(ESI),
432 REGISTER(EDI),
434 #undef REGISTER
436 typedef struct ExtSaveArea {
437 uint32_t feature, bits;
438 uint32_t offset, size;
439 } ExtSaveArea;
441 static const ExtSaveArea ext_save_areas[] = {
442 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
443 .offset = 0x240, .size = 0x100 },
444 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
445 .offset = 0x3c0, .size = 0x40 },
446 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
447 .offset = 0x400, .size = 0x40 },
448 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
449 .offset = 0x440, .size = 0x40 },
450 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
451 .offset = 0x480, .size = 0x200 },
452 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
453 .offset = 0x680, .size = 0x400 },
456 const char *get_register_name_32(unsigned int reg)
458 if (reg >= CPU_NB_REGS32) {
459 return NULL;
461 return x86_reg_info_32[reg].name;
464 /* KVM-specific features that are automatically added to all CPU models
465 * when KVM is enabled.
467 static uint32_t kvm_default_features[FEATURE_WORDS] = {
468 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
469 (1 << KVM_FEATURE_NOP_IO_DELAY) |
470 (1 << KVM_FEATURE_CLOCKSOURCE2) |
471 (1 << KVM_FEATURE_ASYNC_PF) |
472 (1 << KVM_FEATURE_STEAL_TIME) |
473 (1 << KVM_FEATURE_PV_EOI) |
474 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
475 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
478 /* Features that are not added by default to any CPU model when KVM is enabled.
480 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
481 [FEAT_1_EDX] = CPUID_ACPI,
482 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
483 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
486 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
488 kvm_default_features[w] &= ~features;
491 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
493 kvm_default_unset_features[w] &= ~features;
497 * Returns the set of feature flags that are supported and migratable by
498 * QEMU, for a given FeatureWord.
500 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
502 FeatureWordInfo *wi = &feature_word_info[w];
503 uint32_t r = 0;
504 int i;
506 for (i = 0; i < 32; i++) {
507 uint32_t f = 1U << i;
508 /* If the feature name is unknown, it is not supported by QEMU yet */
509 if (!wi->feat_names[i]) {
510 continue;
512 /* Skip features known to QEMU, but explicitly marked as unmigratable */
513 if (wi->unmigratable_flags & f) {
514 continue;
516 r |= f;
518 return r;
521 void host_cpuid(uint32_t function, uint32_t count,
522 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
524 uint32_t vec[4];
526 #ifdef __x86_64__
527 asm volatile("cpuid"
528 : "=a"(vec[0]), "=b"(vec[1]),
529 "=c"(vec[2]), "=d"(vec[3])
530 : "0"(function), "c"(count) : "cc");
531 #elif defined(__i386__)
532 asm volatile("pusha \n\t"
533 "cpuid \n\t"
534 "mov %%eax, 0(%2) \n\t"
535 "mov %%ebx, 4(%2) \n\t"
536 "mov %%ecx, 8(%2) \n\t"
537 "mov %%edx, 12(%2) \n\t"
538 "popa"
539 : : "a"(function), "c"(count), "S"(vec)
540 : "memory", "cc");
541 #else
542 abort();
543 #endif
545 if (eax)
546 *eax = vec[0];
547 if (ebx)
548 *ebx = vec[1];
549 if (ecx)
550 *ecx = vec[2];
551 if (edx)
552 *edx = vec[3];
555 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
557 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
558 * a substring. ex if !NULL points to the first char after a substring,
559 * otherwise the string is assumed to sized by a terminating nul.
560 * Return lexical ordering of *s1:*s2.
562 static int sstrcmp(const char *s1, const char *e1,
563 const char *s2, const char *e2)
565 for (;;) {
566 if (!*s1 || !*s2 || *s1 != *s2)
567 return (*s1 - *s2);
568 ++s1, ++s2;
569 if (s1 == e1 && s2 == e2)
570 return (0);
571 else if (s1 == e1)
572 return (*s2);
573 else if (s2 == e2)
574 return (*s1);
578 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
579 * '|' delimited (possibly empty) strings in which case search for a match
580 * within the alternatives proceeds left to right. Return 0 for success,
581 * non-zero otherwise.
583 static int altcmp(const char *s, const char *e, const char *altstr)
585 const char *p, *q;
587 for (q = p = altstr; ; ) {
588 while (*p && *p != '|')
589 ++p;
590 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
591 return (0);
592 if (!*p)
593 return (1);
594 else
595 q = ++p;
599 /* search featureset for flag *[s..e), if found set corresponding bit in
600 * *pval and return true, otherwise return false
602 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
603 const char **featureset)
605 uint32_t mask;
606 const char **ppc;
607 bool found = false;
609 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
610 if (*ppc && !altcmp(s, e, *ppc)) {
611 *pval |= mask;
612 found = true;
615 return found;
618 static void add_flagname_to_bitmaps(const char *flagname,
619 FeatureWordArray words,
620 Error **errp)
622 FeatureWord w;
623 for (w = 0; w < FEATURE_WORDS; w++) {
624 FeatureWordInfo *wi = &feature_word_info[w];
625 if (wi->feat_names &&
626 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
627 break;
630 if (w == FEATURE_WORDS) {
631 error_setg(errp, "CPU feature %s not found", flagname);
635 /* CPU class name definitions: */
637 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
638 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
640 /* Return type name for a given CPU model name
641 * Caller is responsible for freeing the returned string.
643 static char *x86_cpu_type_name(const char *model_name)
645 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
648 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
650 ObjectClass *oc;
651 char *typename;
653 if (cpu_model == NULL) {
654 return NULL;
657 typename = x86_cpu_type_name(cpu_model);
658 oc = object_class_by_name(typename);
659 g_free(typename);
660 return oc;
663 struct X86CPUDefinition {
664 const char *name;
665 uint32_t level;
666 uint32_t xlevel;
667 uint32_t xlevel2;
668 /* vendor is zero-terminated, 12 character ASCII string */
669 char vendor[CPUID_VENDOR_SZ + 1];
670 int family;
671 int model;
672 int stepping;
673 FeatureWordArray features;
674 char model_id[48];
675 bool cache_info_passthrough;
678 static X86CPUDefinition builtin_x86_defs[] = {
680 .name = "qemu64",
681 .level = 4,
682 .vendor = CPUID_VENDOR_AMD,
683 .family = 6,
684 .model = 6,
685 .stepping = 3,
686 .features[FEAT_1_EDX] =
687 PPRO_FEATURES |
688 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
689 CPUID_PSE36,
690 .features[FEAT_1_ECX] =
691 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
692 .features[FEAT_8000_0001_EDX] =
693 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
694 .features[FEAT_8000_0001_ECX] =
695 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
696 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
697 .xlevel = 0x8000000A,
700 .name = "phenom",
701 .level = 5,
702 .vendor = CPUID_VENDOR_AMD,
703 .family = 16,
704 .model = 2,
705 .stepping = 3,
706 /* Missing: CPUID_HT */
707 .features[FEAT_1_EDX] =
708 PPRO_FEATURES |
709 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
710 CPUID_PSE36 | CPUID_VME,
711 .features[FEAT_1_ECX] =
712 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
713 CPUID_EXT_POPCNT,
714 .features[FEAT_8000_0001_EDX] =
715 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
716 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
717 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
718 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
719 CPUID_EXT3_CR8LEG,
720 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
721 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
722 .features[FEAT_8000_0001_ECX] =
723 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
724 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
725 /* Missing: CPUID_SVM_LBRV */
726 .features[FEAT_SVM] =
727 CPUID_SVM_NPT,
728 .xlevel = 0x8000001A,
729 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
732 .name = "core2duo",
733 .level = 10,
734 .vendor = CPUID_VENDOR_INTEL,
735 .family = 6,
736 .model = 15,
737 .stepping = 11,
738 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
739 .features[FEAT_1_EDX] =
740 PPRO_FEATURES |
741 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
742 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
743 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
744 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
745 .features[FEAT_1_ECX] =
746 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
747 CPUID_EXT_CX16,
748 .features[FEAT_8000_0001_EDX] =
749 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
750 .features[FEAT_8000_0001_ECX] =
751 CPUID_EXT3_LAHF_LM,
752 .xlevel = 0x80000008,
753 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
756 .name = "kvm64",
757 .level = 5,
758 .vendor = CPUID_VENDOR_INTEL,
759 .family = 15,
760 .model = 6,
761 .stepping = 1,
762 /* Missing: CPUID_HT */
763 .features[FEAT_1_EDX] =
764 PPRO_FEATURES | CPUID_VME |
765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
766 CPUID_PSE36,
767 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
768 .features[FEAT_1_ECX] =
769 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
770 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
771 .features[FEAT_8000_0001_EDX] =
772 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
773 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
774 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
775 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
776 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
777 .features[FEAT_8000_0001_ECX] =
779 .xlevel = 0x80000008,
780 .model_id = "Common KVM processor"
783 .name = "qemu32",
784 .level = 4,
785 .vendor = CPUID_VENDOR_INTEL,
786 .family = 6,
787 .model = 6,
788 .stepping = 3,
789 .features[FEAT_1_EDX] =
790 PPRO_FEATURES,
791 .features[FEAT_1_ECX] =
792 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
793 .xlevel = 0x80000004,
796 .name = "kvm32",
797 .level = 5,
798 .vendor = CPUID_VENDOR_INTEL,
799 .family = 15,
800 .model = 6,
801 .stepping = 1,
802 .features[FEAT_1_EDX] =
803 PPRO_FEATURES | CPUID_VME |
804 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
805 .features[FEAT_1_ECX] =
806 CPUID_EXT_SSE3,
807 .features[FEAT_8000_0001_ECX] =
809 .xlevel = 0x80000008,
810 .model_id = "Common 32-bit KVM processor"
813 .name = "coreduo",
814 .level = 10,
815 .vendor = CPUID_VENDOR_INTEL,
816 .family = 6,
817 .model = 14,
818 .stepping = 8,
819 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
820 .features[FEAT_1_EDX] =
821 PPRO_FEATURES | CPUID_VME |
822 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
823 CPUID_SS,
824 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
825 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
826 .features[FEAT_1_ECX] =
827 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
828 .features[FEAT_8000_0001_EDX] =
829 CPUID_EXT2_NX,
830 .xlevel = 0x80000008,
831 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
834 .name = "486",
835 .level = 1,
836 .vendor = CPUID_VENDOR_INTEL,
837 .family = 4,
838 .model = 8,
839 .stepping = 0,
840 .features[FEAT_1_EDX] =
841 I486_FEATURES,
842 .xlevel = 0,
845 .name = "pentium",
846 .level = 1,
847 .vendor = CPUID_VENDOR_INTEL,
848 .family = 5,
849 .model = 4,
850 .stepping = 3,
851 .features[FEAT_1_EDX] =
852 PENTIUM_FEATURES,
853 .xlevel = 0,
856 .name = "pentium2",
857 .level = 2,
858 .vendor = CPUID_VENDOR_INTEL,
859 .family = 6,
860 .model = 5,
861 .stepping = 2,
862 .features[FEAT_1_EDX] =
863 PENTIUM2_FEATURES,
864 .xlevel = 0,
867 .name = "pentium3",
868 .level = 2,
869 .vendor = CPUID_VENDOR_INTEL,
870 .family = 6,
871 .model = 7,
872 .stepping = 3,
873 .features[FEAT_1_EDX] =
874 PENTIUM3_FEATURES,
875 .xlevel = 0,
878 .name = "athlon",
879 .level = 2,
880 .vendor = CPUID_VENDOR_AMD,
881 .family = 6,
882 .model = 2,
883 .stepping = 3,
884 .features[FEAT_1_EDX] =
885 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
886 CPUID_MCA,
887 .features[FEAT_8000_0001_EDX] =
888 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
889 .xlevel = 0x80000008,
892 .name = "n270",
893 /* original is on level 10 */
894 .level = 5,
895 .vendor = CPUID_VENDOR_INTEL,
896 .family = 6,
897 .model = 28,
898 .stepping = 2,
899 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
900 .features[FEAT_1_EDX] =
901 PPRO_FEATURES |
902 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
903 CPUID_ACPI | CPUID_SS,
904 /* Some CPUs got no CPUID_SEP */
905 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
906 * CPUID_EXT_XTPR */
907 .features[FEAT_1_ECX] =
908 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
909 CPUID_EXT_MOVBE,
910 .features[FEAT_8000_0001_EDX] =
911 CPUID_EXT2_NX,
912 .features[FEAT_8000_0001_ECX] =
913 CPUID_EXT3_LAHF_LM,
914 .xlevel = 0x8000000A,
915 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
918 .name = "Conroe",
919 .level = 4,
920 .vendor = CPUID_VENDOR_INTEL,
921 .family = 6,
922 .model = 15,
923 .stepping = 3,
924 .features[FEAT_1_EDX] =
925 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
926 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
927 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
928 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
929 CPUID_DE | CPUID_FP87,
930 .features[FEAT_1_ECX] =
931 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
932 .features[FEAT_8000_0001_EDX] =
933 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
934 .features[FEAT_8000_0001_ECX] =
935 CPUID_EXT3_LAHF_LM,
936 .xlevel = 0x8000000A,
937 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
940 .name = "Penryn",
941 .level = 4,
942 .vendor = CPUID_VENDOR_INTEL,
943 .family = 6,
944 .model = 23,
945 .stepping = 3,
946 .features[FEAT_1_EDX] =
947 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
948 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
949 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
950 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
951 CPUID_DE | CPUID_FP87,
952 .features[FEAT_1_ECX] =
953 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
954 CPUID_EXT_SSE3,
955 .features[FEAT_8000_0001_EDX] =
956 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
957 .features[FEAT_8000_0001_ECX] =
958 CPUID_EXT3_LAHF_LM,
959 .xlevel = 0x8000000A,
960 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
963 .name = "Nehalem",
964 .level = 4,
965 .vendor = CPUID_VENDOR_INTEL,
966 .family = 6,
967 .model = 26,
968 .stepping = 3,
969 .features[FEAT_1_EDX] =
970 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
971 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
972 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
973 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
974 CPUID_DE | CPUID_FP87,
975 .features[FEAT_1_ECX] =
976 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
977 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
978 .features[FEAT_8000_0001_EDX] =
979 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
980 .features[FEAT_8000_0001_ECX] =
981 CPUID_EXT3_LAHF_LM,
982 .xlevel = 0x8000000A,
983 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
986 .name = "Westmere",
987 .level = 11,
988 .vendor = CPUID_VENDOR_INTEL,
989 .family = 6,
990 .model = 44,
991 .stepping = 1,
992 .features[FEAT_1_EDX] =
993 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
994 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
995 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
996 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
997 CPUID_DE | CPUID_FP87,
998 .features[FEAT_1_ECX] =
999 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1000 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1001 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1002 .features[FEAT_8000_0001_EDX] =
1003 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1004 .features[FEAT_8000_0001_ECX] =
1005 CPUID_EXT3_LAHF_LM,
1006 .xlevel = 0x8000000A,
1007 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1010 .name = "SandyBridge",
1011 .level = 0xd,
1012 .vendor = CPUID_VENDOR_INTEL,
1013 .family = 6,
1014 .model = 42,
1015 .stepping = 1,
1016 .features[FEAT_1_EDX] =
1017 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1018 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1019 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1020 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1021 CPUID_DE | CPUID_FP87,
1022 .features[FEAT_1_ECX] =
1023 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1024 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1025 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1026 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1027 CPUID_EXT_SSE3,
1028 .features[FEAT_8000_0001_EDX] =
1029 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1030 CPUID_EXT2_SYSCALL,
1031 .features[FEAT_8000_0001_ECX] =
1032 CPUID_EXT3_LAHF_LM,
1033 .features[FEAT_XSAVE] =
1034 CPUID_XSAVE_XSAVEOPT,
1035 .xlevel = 0x8000000A,
1036 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1039 .name = "IvyBridge",
1040 .level = 0xd,
1041 .vendor = CPUID_VENDOR_INTEL,
1042 .family = 6,
1043 .model = 58,
1044 .stepping = 9,
1045 .features[FEAT_1_EDX] =
1046 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1047 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1048 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1049 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1050 CPUID_DE | CPUID_FP87,
1051 .features[FEAT_1_ECX] =
1052 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1053 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1054 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1055 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1056 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1057 .features[FEAT_7_0_EBX] =
1058 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1059 CPUID_7_0_EBX_ERMS,
1060 .features[FEAT_8000_0001_EDX] =
1061 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1062 CPUID_EXT2_SYSCALL,
1063 .features[FEAT_8000_0001_ECX] =
1064 CPUID_EXT3_LAHF_LM,
1065 .features[FEAT_XSAVE] =
1066 CPUID_XSAVE_XSAVEOPT,
1067 .xlevel = 0x8000000A,
1068 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1071 .name = "Haswell-noTSX",
1072 .level = 0xd,
1073 .vendor = CPUID_VENDOR_INTEL,
1074 .family = 6,
1075 .model = 60,
1076 .stepping = 1,
1077 .features[FEAT_1_EDX] =
1078 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1079 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1080 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1081 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1082 CPUID_DE | CPUID_FP87,
1083 .features[FEAT_1_ECX] =
1084 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1085 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1086 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1087 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1088 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1089 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1090 .features[FEAT_8000_0001_EDX] =
1091 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1092 CPUID_EXT2_SYSCALL,
1093 .features[FEAT_8000_0001_ECX] =
1094 CPUID_EXT3_LAHF_LM,
1095 .features[FEAT_7_0_EBX] =
1096 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1097 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1098 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1099 .features[FEAT_XSAVE] =
1100 CPUID_XSAVE_XSAVEOPT,
1101 .xlevel = 0x8000000A,
1102 .model_id = "Intel Core Processor (Haswell, no TSX)",
1103 }, {
1104 .name = "Haswell",
1105 .level = 0xd,
1106 .vendor = CPUID_VENDOR_INTEL,
1107 .family = 6,
1108 .model = 60,
1109 .stepping = 1,
1110 .features[FEAT_1_EDX] =
1111 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1112 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1113 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1114 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1115 CPUID_DE | CPUID_FP87,
1116 .features[FEAT_1_ECX] =
1117 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1118 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1119 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1120 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1121 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1122 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1123 .features[FEAT_8000_0001_EDX] =
1124 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1125 CPUID_EXT2_SYSCALL,
1126 .features[FEAT_8000_0001_ECX] =
1127 CPUID_EXT3_LAHF_LM,
1128 .features[FEAT_7_0_EBX] =
1129 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1130 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1131 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1132 CPUID_7_0_EBX_RTM,
1133 .features[FEAT_XSAVE] =
1134 CPUID_XSAVE_XSAVEOPT,
1135 .xlevel = 0x8000000A,
1136 .model_id = "Intel Core Processor (Haswell)",
1139 .name = "Broadwell-noTSX",
1140 .level = 0xd,
1141 .vendor = CPUID_VENDOR_INTEL,
1142 .family = 6,
1143 .model = 61,
1144 .stepping = 2,
1145 .features[FEAT_1_EDX] =
1146 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1147 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1148 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1149 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1150 CPUID_DE | CPUID_FP87,
1151 .features[FEAT_1_ECX] =
1152 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1153 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1154 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1155 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1156 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1157 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1158 .features[FEAT_8000_0001_EDX] =
1159 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1160 CPUID_EXT2_SYSCALL,
1161 .features[FEAT_8000_0001_ECX] =
1162 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1163 .features[FEAT_7_0_EBX] =
1164 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1165 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1166 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1167 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1168 CPUID_7_0_EBX_SMAP,
1169 .features[FEAT_XSAVE] =
1170 CPUID_XSAVE_XSAVEOPT,
1171 .xlevel = 0x8000000A,
1172 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1175 .name = "Broadwell",
1176 .level = 0xd,
1177 .vendor = CPUID_VENDOR_INTEL,
1178 .family = 6,
1179 .model = 61,
1180 .stepping = 2,
1181 .features[FEAT_1_EDX] =
1182 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1183 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1184 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1185 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1186 CPUID_DE | CPUID_FP87,
1187 .features[FEAT_1_ECX] =
1188 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1189 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1190 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1191 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1192 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1193 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1194 .features[FEAT_8000_0001_EDX] =
1195 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1196 CPUID_EXT2_SYSCALL,
1197 .features[FEAT_8000_0001_ECX] =
1198 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1199 .features[FEAT_7_0_EBX] =
1200 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1201 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1202 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1203 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1204 CPUID_7_0_EBX_SMAP,
1205 .features[FEAT_XSAVE] =
1206 CPUID_XSAVE_XSAVEOPT,
1207 .xlevel = 0x8000000A,
1208 .model_id = "Intel Core Processor (Broadwell)",
1211 .name = "Opteron_G1",
1212 .level = 5,
1213 .vendor = CPUID_VENDOR_AMD,
1214 .family = 15,
1215 .model = 6,
1216 .stepping = 1,
1217 .features[FEAT_1_EDX] =
1218 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1219 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1220 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1221 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1222 CPUID_DE | CPUID_FP87,
1223 .features[FEAT_1_ECX] =
1224 CPUID_EXT_SSE3,
1225 .features[FEAT_8000_0001_EDX] =
1226 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1227 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1228 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1229 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1230 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1231 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1232 .xlevel = 0x80000008,
1233 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1236 .name = "Opteron_G2",
1237 .level = 5,
1238 .vendor = CPUID_VENDOR_AMD,
1239 .family = 15,
1240 .model = 6,
1241 .stepping = 1,
1242 .features[FEAT_1_EDX] =
1243 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1244 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1245 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1246 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1247 CPUID_DE | CPUID_FP87,
1248 .features[FEAT_1_ECX] =
1249 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1250 .features[FEAT_8000_0001_EDX] =
1251 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1252 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1253 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1254 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1255 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1256 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1257 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1258 .features[FEAT_8000_0001_ECX] =
1259 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1260 .xlevel = 0x80000008,
1261 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1264 .name = "Opteron_G3",
1265 .level = 5,
1266 .vendor = CPUID_VENDOR_AMD,
1267 .family = 15,
1268 .model = 6,
1269 .stepping = 1,
1270 .features[FEAT_1_EDX] =
1271 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1272 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1273 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1274 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1275 CPUID_DE | CPUID_FP87,
1276 .features[FEAT_1_ECX] =
1277 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1278 CPUID_EXT_SSE3,
1279 .features[FEAT_8000_0001_EDX] =
1280 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1281 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1282 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1283 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1284 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1285 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1286 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1287 .features[FEAT_8000_0001_ECX] =
1288 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1289 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1290 .xlevel = 0x80000008,
1291 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1294 .name = "Opteron_G4",
1295 .level = 0xd,
1296 .vendor = CPUID_VENDOR_AMD,
1297 .family = 21,
1298 .model = 1,
1299 .stepping = 2,
1300 .features[FEAT_1_EDX] =
1301 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1302 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1303 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1304 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1305 CPUID_DE | CPUID_FP87,
1306 .features[FEAT_1_ECX] =
1307 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1308 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1309 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1310 CPUID_EXT_SSE3,
1311 .features[FEAT_8000_0001_EDX] =
1312 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1313 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1314 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1315 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1316 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1317 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1318 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1319 .features[FEAT_8000_0001_ECX] =
1320 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1321 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1322 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1323 CPUID_EXT3_LAHF_LM,
1324 /* no xsaveopt! */
1325 .xlevel = 0x8000001A,
1326 .model_id = "AMD Opteron 62xx class CPU",
1329 .name = "Opteron_G5",
1330 .level = 0xd,
1331 .vendor = CPUID_VENDOR_AMD,
1332 .family = 21,
1333 .model = 2,
1334 .stepping = 0,
1335 .features[FEAT_1_EDX] =
1336 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1337 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1338 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1339 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1340 CPUID_DE | CPUID_FP87,
1341 .features[FEAT_1_ECX] =
1342 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1343 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1344 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1345 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1346 .features[FEAT_8000_0001_EDX] =
1347 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1348 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1349 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1350 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1351 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1352 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1353 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1354 .features[FEAT_8000_0001_ECX] =
1355 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1356 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1357 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1358 CPUID_EXT3_LAHF_LM,
1359 /* no xsaveopt! */
1360 .xlevel = 0x8000001A,
1361 .model_id = "AMD Opteron 63xx class CPU",
1366 * x86_cpu_compat_set_features:
1367 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1368 * @w: Identifies the feature word to be changed.
1369 * @feat_add: Feature bits to be added to feature word
1370 * @feat_remove: Feature bits to be removed from feature word
1372 * Change CPU model feature bits for compatibility.
1374 * This function may be used by machine-type compatibility functions
1375 * to enable or disable feature bits on specific CPU models.
1377 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1378 uint32_t feat_add, uint32_t feat_remove)
1380 X86CPUDefinition *def;
1381 int i;
1382 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1383 def = &builtin_x86_defs[i];
1384 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1385 def->features[w] |= feat_add;
1386 def->features[w] &= ~feat_remove;
1391 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1392 bool migratable_only);
1394 #ifdef CONFIG_KVM
1396 static int cpu_x86_fill_model_id(char *str)
1398 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1399 int i;
1401 for (i = 0; i < 3; i++) {
1402 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1403 memcpy(str + i * 16 + 0, &eax, 4);
1404 memcpy(str + i * 16 + 4, &ebx, 4);
1405 memcpy(str + i * 16 + 8, &ecx, 4);
1406 memcpy(str + i * 16 + 12, &edx, 4);
1408 return 0;
1411 static X86CPUDefinition host_cpudef;
1413 static Property host_x86_cpu_properties[] = {
1414 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1415 DEFINE_PROP_END_OF_LIST()
1418 /* class_init for the "host" CPU model
1420 * This function may be called before KVM is initialized.
1422 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1424 DeviceClass *dc = DEVICE_CLASS(oc);
1425 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1426 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1428 xcc->kvm_required = true;
1430 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1431 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1433 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1434 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1435 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1436 host_cpudef.stepping = eax & 0x0F;
1438 cpu_x86_fill_model_id(host_cpudef.model_id);
1440 xcc->cpu_def = &host_cpudef;
1441 host_cpudef.cache_info_passthrough = true;
1443 /* level, xlevel, xlevel2, and the feature words are initialized on
1444 * instance_init, because they require KVM to be initialized.
1447 dc->props = host_x86_cpu_properties;
1450 static void host_x86_cpu_initfn(Object *obj)
1452 X86CPU *cpu = X86_CPU(obj);
1453 CPUX86State *env = &cpu->env;
1454 KVMState *s = kvm_state;
1456 assert(kvm_enabled());
1458 /* We can't fill the features array here because we don't know yet if
1459 * "migratable" is true or false.
1461 cpu->host_features = true;
1463 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1464 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1465 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1467 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1470 static const TypeInfo host_x86_cpu_type_info = {
1471 .name = X86_CPU_TYPE_NAME("host"),
1472 .parent = TYPE_X86_CPU,
1473 .instance_init = host_x86_cpu_initfn,
1474 .class_init = host_x86_cpu_class_init,
1477 #endif
1479 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1481 FeatureWordInfo *f = &feature_word_info[w];
1482 int i;
1484 for (i = 0; i < 32; ++i) {
1485 if (1 << i & mask) {
1486 const char *reg = get_register_name_32(f->cpuid_reg);
1487 assert(reg);
1488 fprintf(stderr, "warning: %s doesn't support requested feature: "
1489 "CPUID.%02XH:%s%s%s [bit %d]\n",
1490 kvm_enabled() ? "host" : "TCG",
1491 f->cpuid_eax, reg,
1492 f->feat_names[i] ? "." : "",
1493 f->feat_names[i] ? f->feat_names[i] : "", i);
1498 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1499 const char *name, Error **errp)
1501 X86CPU *cpu = X86_CPU(obj);
1502 CPUX86State *env = &cpu->env;
1503 int64_t value;
1505 value = (env->cpuid_version >> 8) & 0xf;
1506 if (value == 0xf) {
1507 value += (env->cpuid_version >> 20) & 0xff;
1509 visit_type_int(v, &value, name, errp);
1512 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1513 const char *name, Error **errp)
1515 X86CPU *cpu = X86_CPU(obj);
1516 CPUX86State *env = &cpu->env;
1517 const int64_t min = 0;
1518 const int64_t max = 0xff + 0xf;
1519 Error *local_err = NULL;
1520 int64_t value;
1522 visit_type_int(v, &value, name, &local_err);
1523 if (local_err) {
1524 error_propagate(errp, local_err);
1525 return;
1527 if (value < min || value > max) {
1528 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1529 name ? name : "null", value, min, max);
1530 return;
1533 env->cpuid_version &= ~0xff00f00;
1534 if (value > 0x0f) {
1535 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1536 } else {
1537 env->cpuid_version |= value << 8;
1541 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1542 const char *name, Error **errp)
1544 X86CPU *cpu = X86_CPU(obj);
1545 CPUX86State *env = &cpu->env;
1546 int64_t value;
1548 value = (env->cpuid_version >> 4) & 0xf;
1549 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1550 visit_type_int(v, &value, name, errp);
1553 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1554 const char *name, Error **errp)
1556 X86CPU *cpu = X86_CPU(obj);
1557 CPUX86State *env = &cpu->env;
1558 const int64_t min = 0;
1559 const int64_t max = 0xff;
1560 Error *local_err = NULL;
1561 int64_t value;
1563 visit_type_int(v, &value, name, &local_err);
1564 if (local_err) {
1565 error_propagate(errp, local_err);
1566 return;
1568 if (value < min || value > max) {
1569 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1570 name ? name : "null", value, min, max);
1571 return;
1574 env->cpuid_version &= ~0xf00f0;
1575 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1578 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1579 void *opaque, const char *name,
1580 Error **errp)
1582 X86CPU *cpu = X86_CPU(obj);
1583 CPUX86State *env = &cpu->env;
1584 int64_t value;
1586 value = env->cpuid_version & 0xf;
1587 visit_type_int(v, &value, name, errp);
1590 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1591 void *opaque, const char *name,
1592 Error **errp)
1594 X86CPU *cpu = X86_CPU(obj);
1595 CPUX86State *env = &cpu->env;
1596 const int64_t min = 0;
1597 const int64_t max = 0xf;
1598 Error *local_err = NULL;
1599 int64_t value;
1601 visit_type_int(v, &value, name, &local_err);
1602 if (local_err) {
1603 error_propagate(errp, local_err);
1604 return;
1606 if (value < min || value > max) {
1607 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1608 name ? name : "null", value, min, max);
1609 return;
1612 env->cpuid_version &= ~0xf;
1613 env->cpuid_version |= value & 0xf;
1616 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1618 X86CPU *cpu = X86_CPU(obj);
1619 CPUX86State *env = &cpu->env;
1620 char *value;
1622 value = g_malloc(CPUID_VENDOR_SZ + 1);
1623 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1624 env->cpuid_vendor3);
1625 return value;
1628 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1629 Error **errp)
1631 X86CPU *cpu = X86_CPU(obj);
1632 CPUX86State *env = &cpu->env;
1633 int i;
1635 if (strlen(value) != CPUID_VENDOR_SZ) {
1636 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1637 return;
1640 env->cpuid_vendor1 = 0;
1641 env->cpuid_vendor2 = 0;
1642 env->cpuid_vendor3 = 0;
1643 for (i = 0; i < 4; i++) {
1644 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1645 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1646 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1650 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1652 X86CPU *cpu = X86_CPU(obj);
1653 CPUX86State *env = &cpu->env;
1654 char *value;
1655 int i;
1657 value = g_malloc(48 + 1);
1658 for (i = 0; i < 48; i++) {
1659 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1661 value[48] = '\0';
1662 return value;
1665 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1666 Error **errp)
1668 X86CPU *cpu = X86_CPU(obj);
1669 CPUX86State *env = &cpu->env;
1670 int c, len, i;
1672 if (model_id == NULL) {
1673 model_id = "";
1675 len = strlen(model_id);
1676 memset(env->cpuid_model, 0, 48);
1677 for (i = 0; i < 48; i++) {
1678 if (i >= len) {
1679 c = '\0';
1680 } else {
1681 c = (uint8_t)model_id[i];
1683 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1687 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1688 const char *name, Error **errp)
1690 X86CPU *cpu = X86_CPU(obj);
1691 int64_t value;
1693 value = cpu->env.tsc_khz * 1000;
1694 visit_type_int(v, &value, name, errp);
1697 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1698 const char *name, Error **errp)
1700 X86CPU *cpu = X86_CPU(obj);
1701 const int64_t min = 0;
1702 const int64_t max = INT64_MAX;
1703 Error *local_err = NULL;
1704 int64_t value;
1706 visit_type_int(v, &value, name, &local_err);
1707 if (local_err) {
1708 error_propagate(errp, local_err);
1709 return;
1711 if (value < min || value > max) {
1712 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1713 name ? name : "null", value, min, max);
1714 return;
1717 cpu->env.tsc_khz = value / 1000;
1720 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1721 const char *name, Error **errp)
1723 X86CPU *cpu = X86_CPU(obj);
1724 int64_t value = cpu->apic_id;
1726 visit_type_int(v, &value, name, errp);
1729 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1730 const char *name, Error **errp)
1732 X86CPU *cpu = X86_CPU(obj);
1733 DeviceState *dev = DEVICE(obj);
1734 const int64_t min = 0;
1735 const int64_t max = UINT32_MAX;
1736 Error *error = NULL;
1737 int64_t value;
1739 if (dev->realized) {
1740 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1741 "it was realized", name, object_get_typename(obj));
1742 return;
1745 visit_type_int(v, &value, name, &error);
1746 if (error) {
1747 error_propagate(errp, error);
1748 return;
1750 if (value < min || value > max) {
1751 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1752 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1753 object_get_typename(obj), name, value, min, max);
1754 return;
1757 if ((value != cpu->apic_id) && cpu_exists(value)) {
1758 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1759 return;
1761 cpu->apic_id = value;
1764 /* Generic getter for "feature-words" and "filtered-features" properties */
1765 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1766 const char *name, Error **errp)
1768 uint32_t *array = (uint32_t *)opaque;
1769 FeatureWord w;
1770 Error *err = NULL;
1771 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1772 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1773 X86CPUFeatureWordInfoList *list = NULL;
1775 for (w = 0; w < FEATURE_WORDS; w++) {
1776 FeatureWordInfo *wi = &feature_word_info[w];
1777 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1778 qwi->cpuid_input_eax = wi->cpuid_eax;
1779 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1780 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1781 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1782 qwi->features = array[w];
1784 /* List will be in reverse order, but order shouldn't matter */
1785 list_entries[w].next = list;
1786 list_entries[w].value = &word_infos[w];
1787 list = &list_entries[w];
1790 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1791 error_propagate(errp, err);
1794 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1795 const char *name, Error **errp)
1797 X86CPU *cpu = X86_CPU(obj);
1798 int64_t value = cpu->hyperv_spinlock_attempts;
1800 visit_type_int(v, &value, name, errp);
1803 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1804 const char *name, Error **errp)
1806 const int64_t min = 0xFFF;
1807 const int64_t max = UINT_MAX;
1808 X86CPU *cpu = X86_CPU(obj);
1809 Error *err = NULL;
1810 int64_t value;
1812 visit_type_int(v, &value, name, &err);
1813 if (err) {
1814 error_propagate(errp, err);
1815 return;
1818 if (value < min || value > max) {
1819 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1820 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1821 object_get_typename(obj), name ? name : "null",
1822 value, min, max);
1823 return;
1825 cpu->hyperv_spinlock_attempts = value;
1828 static PropertyInfo qdev_prop_spinlocks = {
1829 .name = "int",
1830 .get = x86_get_hv_spinlocks,
1831 .set = x86_set_hv_spinlocks,
1834 /* Convert all '_' in a feature string option name to '-', to make feature
1835 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1837 static inline void feat2prop(char *s)
1839 while ((s = strchr(s, '_'))) {
1840 *s = '-';
1844 /* Parse "+feature,-feature,feature=foo" CPU feature string
1846 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1847 Error **errp)
1849 X86CPU *cpu = X86_CPU(cs);
1850 char *featurestr; /* Single 'key=value" string being parsed */
1851 FeatureWord w;
1852 /* Features to be added */
1853 FeatureWordArray plus_features = { 0 };
1854 /* Features to be removed */
1855 FeatureWordArray minus_features = { 0 };
1856 uint32_t numvalue;
1857 CPUX86State *env = &cpu->env;
1858 Error *local_err = NULL;
1860 featurestr = features ? strtok(features, ",") : NULL;
1862 while (featurestr) {
1863 char *val;
1864 if (featurestr[0] == '+') {
1865 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1866 } else if (featurestr[0] == '-') {
1867 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1868 } else if ((val = strchr(featurestr, '='))) {
1869 *val = 0; val++;
1870 feat2prop(featurestr);
1871 if (!strcmp(featurestr, "xlevel")) {
1872 char *err;
1873 char num[32];
1875 numvalue = strtoul(val, &err, 0);
1876 if (!*val || *err) {
1877 error_setg(errp, "bad numerical value %s", val);
1878 return;
1880 if (numvalue < 0x80000000) {
1881 error_report("xlevel value shall always be >= 0x80000000"
1882 ", fixup will be removed in future versions");
1883 numvalue += 0x80000000;
1885 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1886 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1887 } else if (!strcmp(featurestr, "tsc-freq")) {
1888 int64_t tsc_freq;
1889 char *err;
1890 char num[32];
1892 tsc_freq = strtosz_suffix_unit(val, &err,
1893 STRTOSZ_DEFSUFFIX_B, 1000);
1894 if (tsc_freq < 0 || *err) {
1895 error_setg(errp, "bad numerical value %s", val);
1896 return;
1898 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1899 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1900 &local_err);
1901 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1902 char *err;
1903 const int min = 0xFFF;
1904 char num[32];
1905 numvalue = strtoul(val, &err, 0);
1906 if (!*val || *err) {
1907 error_setg(errp, "bad numerical value %s", val);
1908 return;
1910 if (numvalue < min) {
1911 error_report("hv-spinlocks value shall always be >= 0x%x"
1912 ", fixup will be removed in future versions",
1913 min);
1914 numvalue = min;
1916 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1917 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1918 } else {
1919 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1921 } else {
1922 feat2prop(featurestr);
1923 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1925 if (local_err) {
1926 error_propagate(errp, local_err);
1927 return;
1929 featurestr = strtok(NULL, ",");
1932 if (cpu->host_features) {
1933 for (w = 0; w < FEATURE_WORDS; w++) {
1934 env->features[w] =
1935 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1939 for (w = 0; w < FEATURE_WORDS; w++) {
1940 env->features[w] |= plus_features[w];
1941 env->features[w] &= ~minus_features[w];
1945 /* Print all cpuid feature names in featureset
1947 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1949 int bit;
1950 bool first = true;
1952 for (bit = 0; bit < 32; bit++) {
1953 if (featureset[bit]) {
1954 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1955 first = false;
1960 /* generate CPU information. */
1961 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1963 X86CPUDefinition *def;
1964 char buf[256];
1965 int i;
1967 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1968 def = &builtin_x86_defs[i];
1969 snprintf(buf, sizeof(buf), "%s", def->name);
1970 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1972 #ifdef CONFIG_KVM
1973 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1974 "KVM processor with all supported host features "
1975 "(only available in KVM mode)");
1976 #endif
1978 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1979 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1980 FeatureWordInfo *fw = &feature_word_info[i];
1982 (*cpu_fprintf)(f, " ");
1983 listflags(f, cpu_fprintf, fw->feat_names);
1984 (*cpu_fprintf)(f, "\n");
1988 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1990 CpuDefinitionInfoList *cpu_list = NULL;
1991 X86CPUDefinition *def;
1992 int i;
1994 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1995 CpuDefinitionInfoList *entry;
1996 CpuDefinitionInfo *info;
1998 def = &builtin_x86_defs[i];
1999 info = g_malloc0(sizeof(*info));
2000 info->name = g_strdup(def->name);
2002 entry = g_malloc0(sizeof(*entry));
2003 entry->value = info;
2004 entry->next = cpu_list;
2005 cpu_list = entry;
2008 return cpu_list;
2011 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2012 bool migratable_only)
2014 FeatureWordInfo *wi = &feature_word_info[w];
2015 uint32_t r;
2017 if (kvm_enabled()) {
2018 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2019 wi->cpuid_ecx,
2020 wi->cpuid_reg);
2021 } else if (tcg_enabled()) {
2022 r = wi->tcg_features;
2023 } else {
2024 return ~0;
2026 if (migratable_only) {
2027 r &= x86_cpu_get_migratable_flags(w);
2029 return r;
2033 * Filters CPU feature words based on host availability of each feature.
2035 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2037 static int x86_cpu_filter_features(X86CPU *cpu)
2039 CPUX86State *env = &cpu->env;
2040 FeatureWord w;
2041 int rv = 0;
2043 for (w = 0; w < FEATURE_WORDS; w++) {
2044 uint32_t host_feat =
2045 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2046 uint32_t requested_features = env->features[w];
2047 env->features[w] &= host_feat;
2048 cpu->filtered_features[w] = requested_features & ~env->features[w];
2049 if (cpu->filtered_features[w]) {
2050 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2051 report_unavailable_features(w, cpu->filtered_features[w]);
2053 rv = 1;
2057 return rv;
2060 /* Load data from X86CPUDefinition
2062 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2064 CPUX86State *env = &cpu->env;
2065 const char *vendor;
2066 char host_vendor[CPUID_VENDOR_SZ + 1];
2067 FeatureWord w;
2069 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2070 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2071 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2072 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2073 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2074 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2075 cpu->cache_info_passthrough = def->cache_info_passthrough;
2076 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2077 for (w = 0; w < FEATURE_WORDS; w++) {
2078 env->features[w] = def->features[w];
2081 /* Special cases not set in the X86CPUDefinition structs: */
2082 if (kvm_enabled()) {
2083 FeatureWord w;
2084 for (w = 0; w < FEATURE_WORDS; w++) {
2085 env->features[w] |= kvm_default_features[w];
2086 env->features[w] &= ~kvm_default_unset_features[w];
2090 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2092 /* sysenter isn't supported in compatibility mode on AMD,
2093 * syscall isn't supported in compatibility mode on Intel.
2094 * Normally we advertise the actual CPU vendor, but you can
2095 * override this using the 'vendor' property if you want to use
2096 * KVM's sysenter/syscall emulation in compatibility mode and
2097 * when doing cross vendor migration
2099 vendor = def->vendor;
2100 if (kvm_enabled()) {
2101 uint32_t ebx = 0, ecx = 0, edx = 0;
2102 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2103 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2104 vendor = host_vendor;
2107 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2111 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2113 X86CPU *cpu = NULL;
2114 X86CPUClass *xcc;
2115 ObjectClass *oc;
2116 gchar **model_pieces;
2117 char *name, *features;
2118 Error *error = NULL;
2120 model_pieces = g_strsplit(cpu_model, ",", 2);
2121 if (!model_pieces[0]) {
2122 error_setg(&error, "Invalid/empty CPU model name");
2123 goto out;
2125 name = model_pieces[0];
2126 features = model_pieces[1];
2128 oc = x86_cpu_class_by_name(name);
2129 if (oc == NULL) {
2130 error_setg(&error, "Unable to find CPU definition: %s", name);
2131 goto out;
2133 xcc = X86_CPU_CLASS(oc);
2135 if (xcc->kvm_required && !kvm_enabled()) {
2136 error_setg(&error, "CPU model '%s' requires KVM", name);
2137 goto out;
2140 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2142 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2143 if (error) {
2144 goto out;
2147 out:
2148 if (error != NULL) {
2149 error_propagate(errp, error);
2150 if (cpu) {
2151 object_unref(OBJECT(cpu));
2152 cpu = NULL;
2155 g_strfreev(model_pieces);
2156 return cpu;
2159 X86CPU *cpu_x86_init(const char *cpu_model)
2161 Error *error = NULL;
2162 X86CPU *cpu;
2164 cpu = cpu_x86_create(cpu_model, &error);
2165 if (error) {
2166 goto out;
2169 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2171 out:
2172 if (error) {
2173 error_report_err(error);
2174 if (cpu != NULL) {
2175 object_unref(OBJECT(cpu));
2176 cpu = NULL;
2179 return cpu;
2182 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2184 X86CPUDefinition *cpudef = data;
2185 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2187 xcc->cpu_def = cpudef;
2190 static void x86_register_cpudef_type(X86CPUDefinition *def)
2192 char *typename = x86_cpu_type_name(def->name);
2193 TypeInfo ti = {
2194 .name = typename,
2195 .parent = TYPE_X86_CPU,
2196 .class_init = x86_cpu_cpudef_class_init,
2197 .class_data = def,
2200 type_register(&ti);
2201 g_free(typename);
2204 #if !defined(CONFIG_USER_ONLY)
2206 void cpu_clear_apic_feature(CPUX86State *env)
2208 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2211 #endif /* !CONFIG_USER_ONLY */
2213 /* Initialize list of CPU models, filling some non-static fields if necessary
2215 void x86_cpudef_setup(void)
2217 int i, j;
2218 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2220 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2221 X86CPUDefinition *def = &builtin_x86_defs[i];
2223 /* Look for specific "cpudef" models that */
2224 /* have the QEMU version in .model_id */
2225 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2226 if (strcmp(model_with_versions[j], def->name) == 0) {
2227 pstrcpy(def->model_id, sizeof(def->model_id),
2228 "QEMU Virtual CPU version ");
2229 pstrcat(def->model_id, sizeof(def->model_id),
2230 qemu_get_version());
2231 break;
2237 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2238 uint32_t *eax, uint32_t *ebx,
2239 uint32_t *ecx, uint32_t *edx)
2241 X86CPU *cpu = x86_env_get_cpu(env);
2242 CPUState *cs = CPU(cpu);
2244 /* test if maximum index reached */
2245 if (index & 0x80000000) {
2246 if (index > env->cpuid_xlevel) {
2247 if (env->cpuid_xlevel2 > 0) {
2248 /* Handle the Centaur's CPUID instruction. */
2249 if (index > env->cpuid_xlevel2) {
2250 index = env->cpuid_xlevel2;
2251 } else if (index < 0xC0000000) {
2252 index = env->cpuid_xlevel;
2254 } else {
2255 /* Intel documentation states that invalid EAX input will
2256 * return the same information as EAX=cpuid_level
2257 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2259 index = env->cpuid_level;
2262 } else {
2263 if (index > env->cpuid_level)
2264 index = env->cpuid_level;
2267 switch(index) {
2268 case 0:
2269 *eax = env->cpuid_level;
2270 *ebx = env->cpuid_vendor1;
2271 *edx = env->cpuid_vendor2;
2272 *ecx = env->cpuid_vendor3;
2273 break;
2274 case 1:
2275 *eax = env->cpuid_version;
2276 *ebx = (cpu->apic_id << 24) |
2277 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2278 *ecx = env->features[FEAT_1_ECX];
2279 *edx = env->features[FEAT_1_EDX];
2280 if (cs->nr_cores * cs->nr_threads > 1) {
2281 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2282 *edx |= 1 << 28; /* HTT bit */
2284 break;
2285 case 2:
2286 /* cache info: needed for Pentium Pro compatibility */
2287 if (cpu->cache_info_passthrough) {
2288 host_cpuid(index, 0, eax, ebx, ecx, edx);
2289 break;
2291 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2292 *ebx = 0;
2293 *ecx = 0;
2294 *edx = (L1D_DESCRIPTOR << 16) | \
2295 (L1I_DESCRIPTOR << 8) | \
2296 (L2_DESCRIPTOR);
2297 break;
2298 case 4:
2299 /* cache info: needed for Core compatibility */
2300 if (cpu->cache_info_passthrough) {
2301 host_cpuid(index, count, eax, ebx, ecx, edx);
2302 *eax &= ~0xFC000000;
2303 } else {
2304 *eax = 0;
2305 switch (count) {
2306 case 0: /* L1 dcache info */
2307 *eax |= CPUID_4_TYPE_DCACHE | \
2308 CPUID_4_LEVEL(1) | \
2309 CPUID_4_SELF_INIT_LEVEL;
2310 *ebx = (L1D_LINE_SIZE - 1) | \
2311 ((L1D_PARTITIONS - 1) << 12) | \
2312 ((L1D_ASSOCIATIVITY - 1) << 22);
2313 *ecx = L1D_SETS - 1;
2314 *edx = CPUID_4_NO_INVD_SHARING;
2315 break;
2316 case 1: /* L1 icache info */
2317 *eax |= CPUID_4_TYPE_ICACHE | \
2318 CPUID_4_LEVEL(1) | \
2319 CPUID_4_SELF_INIT_LEVEL;
2320 *ebx = (L1I_LINE_SIZE - 1) | \
2321 ((L1I_PARTITIONS - 1) << 12) | \
2322 ((L1I_ASSOCIATIVITY - 1) << 22);
2323 *ecx = L1I_SETS - 1;
2324 *edx = CPUID_4_NO_INVD_SHARING;
2325 break;
2326 case 2: /* L2 cache info */
2327 *eax |= CPUID_4_TYPE_UNIFIED | \
2328 CPUID_4_LEVEL(2) | \
2329 CPUID_4_SELF_INIT_LEVEL;
2330 if (cs->nr_threads > 1) {
2331 *eax |= (cs->nr_threads - 1) << 14;
2333 *ebx = (L2_LINE_SIZE - 1) | \
2334 ((L2_PARTITIONS - 1) << 12) | \
2335 ((L2_ASSOCIATIVITY - 1) << 22);
2336 *ecx = L2_SETS - 1;
2337 *edx = CPUID_4_NO_INVD_SHARING;
2338 break;
2339 default: /* end of info */
2340 *eax = 0;
2341 *ebx = 0;
2342 *ecx = 0;
2343 *edx = 0;
2344 break;
2348 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2349 if ((*eax & 31) && cs->nr_cores > 1) {
2350 *eax |= (cs->nr_cores - 1) << 26;
2352 break;
2353 case 5:
2354 /* mwait info: needed for Core compatibility */
2355 *eax = 0; /* Smallest monitor-line size in bytes */
2356 *ebx = 0; /* Largest monitor-line size in bytes */
2357 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2358 *edx = 0;
2359 break;
2360 case 6:
2361 /* Thermal and Power Leaf */
2362 *eax = 0;
2363 *ebx = 0;
2364 *ecx = 0;
2365 *edx = 0;
2366 break;
2367 case 7:
2368 /* Structured Extended Feature Flags Enumeration Leaf */
2369 if (count == 0) {
2370 *eax = 0; /* Maximum ECX value for sub-leaves */
2371 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2372 *ecx = 0; /* Reserved */
2373 *edx = 0; /* Reserved */
2374 } else {
2375 *eax = 0;
2376 *ebx = 0;
2377 *ecx = 0;
2378 *edx = 0;
2380 break;
2381 case 9:
2382 /* Direct Cache Access Information Leaf */
2383 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2384 *ebx = 0;
2385 *ecx = 0;
2386 *edx = 0;
2387 break;
2388 case 0xA:
2389 /* Architectural Performance Monitoring Leaf */
2390 if (kvm_enabled() && cpu->enable_pmu) {
2391 KVMState *s = cs->kvm_state;
2393 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2394 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2395 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2396 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2397 } else {
2398 *eax = 0;
2399 *ebx = 0;
2400 *ecx = 0;
2401 *edx = 0;
2403 break;
2404 case 0xD: {
2405 KVMState *s = cs->kvm_state;
2406 uint64_t kvm_mask;
2407 int i;
2409 /* Processor Extended State */
2410 *eax = 0;
2411 *ebx = 0;
2412 *ecx = 0;
2413 *edx = 0;
2414 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2415 break;
2417 kvm_mask =
2418 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2419 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2421 if (count == 0) {
2422 *ecx = 0x240;
2423 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2424 const ExtSaveArea *esa = &ext_save_areas[i];
2425 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2426 (kvm_mask & (1 << i)) != 0) {
2427 if (i < 32) {
2428 *eax |= 1 << i;
2429 } else {
2430 *edx |= 1 << (i - 32);
2432 *ecx = MAX(*ecx, esa->offset + esa->size);
2435 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2436 *ebx = *ecx;
2437 } else if (count == 1) {
2438 *eax = env->features[FEAT_XSAVE];
2439 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2440 const ExtSaveArea *esa = &ext_save_areas[count];
2441 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2442 (kvm_mask & (1 << count)) != 0) {
2443 *eax = esa->size;
2444 *ebx = esa->offset;
2447 break;
2449 case 0x80000000:
2450 *eax = env->cpuid_xlevel;
2451 *ebx = env->cpuid_vendor1;
2452 *edx = env->cpuid_vendor2;
2453 *ecx = env->cpuid_vendor3;
2454 break;
2455 case 0x80000001:
2456 *eax = env->cpuid_version;
2457 *ebx = 0;
2458 *ecx = env->features[FEAT_8000_0001_ECX];
2459 *edx = env->features[FEAT_8000_0001_EDX];
2461 /* The Linux kernel checks for the CMPLegacy bit and
2462 * discards multiple thread information if it is set.
2463 * So dont set it here for Intel to make Linux guests happy.
2465 if (cs->nr_cores * cs->nr_threads > 1) {
2466 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2467 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2468 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2469 *ecx |= 1 << 1; /* CmpLegacy bit */
2472 break;
2473 case 0x80000002:
2474 case 0x80000003:
2475 case 0x80000004:
2476 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2477 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2478 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2479 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2480 break;
2481 case 0x80000005:
2482 /* cache info (L1 cache) */
2483 if (cpu->cache_info_passthrough) {
2484 host_cpuid(index, 0, eax, ebx, ecx, edx);
2485 break;
2487 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2488 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2489 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2490 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2491 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2492 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2493 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2494 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2495 break;
2496 case 0x80000006:
2497 /* cache info (L2 cache) */
2498 if (cpu->cache_info_passthrough) {
2499 host_cpuid(index, 0, eax, ebx, ecx, edx);
2500 break;
2502 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2503 (L2_DTLB_2M_ENTRIES << 16) | \
2504 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2505 (L2_ITLB_2M_ENTRIES);
2506 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2507 (L2_DTLB_4K_ENTRIES << 16) | \
2508 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2509 (L2_ITLB_4K_ENTRIES);
2510 *ecx = (L2_SIZE_KB_AMD << 16) | \
2511 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2512 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2513 *edx = ((L3_SIZE_KB/512) << 18) | \
2514 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2515 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2516 break;
2517 case 0x80000007:
2518 *eax = 0;
2519 *ebx = 0;
2520 *ecx = 0;
2521 *edx = env->features[FEAT_8000_0007_EDX];
2522 break;
2523 case 0x80000008:
2524 /* virtual & phys address size in low 2 bytes. */
2525 /* XXX: This value must match the one used in the MMU code. */
2526 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2527 /* 64 bit processor */
2528 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2529 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2530 } else {
2531 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2532 *eax = 0x00000024; /* 36 bits physical */
2533 } else {
2534 *eax = 0x00000020; /* 32 bits physical */
2537 *ebx = 0;
2538 *ecx = 0;
2539 *edx = 0;
2540 if (cs->nr_cores * cs->nr_threads > 1) {
2541 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2543 break;
2544 case 0x8000000A:
2545 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2546 *eax = 0x00000001; /* SVM Revision */
2547 *ebx = 0x00000010; /* nr of ASIDs */
2548 *ecx = 0;
2549 *edx = env->features[FEAT_SVM]; /* optional features */
2550 } else {
2551 *eax = 0;
2552 *ebx = 0;
2553 *ecx = 0;
2554 *edx = 0;
2556 break;
2557 case 0xC0000000:
2558 *eax = env->cpuid_xlevel2;
2559 *ebx = 0;
2560 *ecx = 0;
2561 *edx = 0;
2562 break;
2563 case 0xC0000001:
2564 /* Support for VIA CPU's CPUID instruction */
2565 *eax = env->cpuid_version;
2566 *ebx = 0;
2567 *ecx = 0;
2568 *edx = env->features[FEAT_C000_0001_EDX];
2569 break;
2570 case 0xC0000002:
2571 case 0xC0000003:
2572 case 0xC0000004:
2573 /* Reserved for the future, and now filled with zero */
2574 *eax = 0;
2575 *ebx = 0;
2576 *ecx = 0;
2577 *edx = 0;
2578 break;
2579 default:
2580 /* reserved values: zero */
2581 *eax = 0;
2582 *ebx = 0;
2583 *ecx = 0;
2584 *edx = 0;
2585 break;
2589 /* CPUClass::reset() */
2590 static void x86_cpu_reset(CPUState *s)
2592 X86CPU *cpu = X86_CPU(s);
2593 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2594 CPUX86State *env = &cpu->env;
2595 int i;
2597 xcc->parent_reset(s);
2599 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2601 tlb_flush(s, 1);
2603 env->old_exception = -1;
2605 /* init to reset state */
2607 #ifdef CONFIG_SOFTMMU
2608 env->hflags |= HF_SOFTMMU_MASK;
2609 #endif
2610 env->hflags2 |= HF2_GIF_MASK;
2612 cpu_x86_update_cr0(env, 0x60000010);
2613 env->a20_mask = ~0x0;
2614 env->smbase = 0x30000;
2616 env->idt.limit = 0xffff;
2617 env->gdt.limit = 0xffff;
2618 env->ldt.limit = 0xffff;
2619 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2620 env->tr.limit = 0xffff;
2621 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2623 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2624 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2625 DESC_R_MASK | DESC_A_MASK);
2626 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2627 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2628 DESC_A_MASK);
2629 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2630 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2631 DESC_A_MASK);
2632 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2633 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2634 DESC_A_MASK);
2635 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2636 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2637 DESC_A_MASK);
2638 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2639 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2640 DESC_A_MASK);
2642 env->eip = 0xfff0;
2643 env->regs[R_EDX] = env->cpuid_version;
2645 env->eflags = 0x2;
2647 /* FPU init */
2648 for (i = 0; i < 8; i++) {
2649 env->fptags[i] = 1;
2651 cpu_set_fpuc(env, 0x37f);
2653 env->mxcsr = 0x1f80;
2654 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2656 env->pat = 0x0007040600070406ULL;
2657 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2659 memset(env->dr, 0, sizeof(env->dr));
2660 env->dr[6] = DR6_FIXED_1;
2661 env->dr[7] = DR7_FIXED_1;
2662 cpu_breakpoint_remove_all(s, BP_CPU);
2663 cpu_watchpoint_remove_all(s, BP_CPU);
2665 env->xcr0 = 1;
2668 * SDM 11.11.5 requires:
2669 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2670 * - IA32_MTRR_PHYSMASKn.V = 0
2671 * All other bits are undefined. For simplification, zero it all.
2673 env->mtrr_deftype = 0;
2674 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2675 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2677 #if !defined(CONFIG_USER_ONLY)
2678 /* We hard-wire the BSP to the first CPU. */
2679 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2681 s->halted = !cpu_is_bsp(cpu);
2683 if (kvm_enabled()) {
2684 kvm_arch_reset_vcpu(cpu);
2686 #endif
2689 #ifndef CONFIG_USER_ONLY
2690 bool cpu_is_bsp(X86CPU *cpu)
2692 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2695 /* TODO: remove me, when reset over QOM tree is implemented */
2696 static void x86_cpu_machine_reset_cb(void *opaque)
2698 X86CPU *cpu = opaque;
2699 cpu_reset(CPU(cpu));
2701 #endif
2703 static void mce_init(X86CPU *cpu)
2705 CPUX86State *cenv = &cpu->env;
2706 unsigned int bank;
2708 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2709 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2710 (CPUID_MCE | CPUID_MCA)) {
2711 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2712 cenv->mcg_ctl = ~(uint64_t)0;
2713 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2714 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2719 #ifndef CONFIG_USER_ONLY
2720 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2722 DeviceState *dev = DEVICE(cpu);
2723 APICCommonState *apic;
2724 const char *apic_type = "apic";
2726 if (kvm_irqchip_in_kernel()) {
2727 apic_type = "kvm-apic";
2728 } else if (xen_enabled()) {
2729 apic_type = "xen-apic";
2732 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2733 if (cpu->apic_state == NULL) {
2734 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2735 return;
2738 object_property_add_child(OBJECT(cpu), "apic",
2739 OBJECT(cpu->apic_state), NULL);
2740 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2741 /* TODO: convert to link<> */
2742 apic = APIC_COMMON(cpu->apic_state);
2743 apic->cpu = cpu;
2746 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2748 if (cpu->apic_state == NULL) {
2749 return;
2751 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2752 errp);
2755 static void x86_cpu_machine_done(Notifier *n, void *unused)
2757 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2758 MemoryRegion *smram =
2759 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2761 if (smram) {
2762 cpu->smram = g_new(MemoryRegion, 1);
2763 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2764 smram, 0, 1ull << 32);
2765 memory_region_set_enabled(cpu->smram, false);
2766 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2769 #else
2770 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2773 #endif
2776 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2777 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2778 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2779 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2780 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2781 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2782 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2784 CPUState *cs = CPU(dev);
2785 X86CPU *cpu = X86_CPU(dev);
2786 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2787 CPUX86State *env = &cpu->env;
2788 Error *local_err = NULL;
2789 static bool ht_warned;
2791 if (cpu->apic_id < 0) {
2792 error_setg(errp, "apic-id property was not initialized properly");
2793 return;
2796 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2797 env->cpuid_level = 7;
2800 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2801 * CPUID[1].EDX.
2803 if (IS_AMD_CPU(env)) {
2804 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2805 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2806 & CPUID_EXT2_AMD_ALIASES);
2810 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2811 error_setg(&local_err,
2812 kvm_enabled() ?
2813 "Host doesn't support requested features" :
2814 "TCG doesn't support requested features");
2815 goto out;
2818 #ifndef CONFIG_USER_ONLY
2819 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2821 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2822 x86_cpu_apic_create(cpu, &local_err);
2823 if (local_err != NULL) {
2824 goto out;
2827 #endif
2829 mce_init(cpu);
2831 #ifndef CONFIG_USER_ONLY
2832 if (tcg_enabled()) {
2833 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2834 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2835 cs->as = g_new(AddressSpace, 1);
2837 /* Outer container... */
2838 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2839 memory_region_set_enabled(cpu->cpu_as_root, true);
2841 /* ... with two regions inside: normal system memory with low
2842 * priority, and...
2844 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2845 get_system_memory(), 0, ~0ull);
2846 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2847 memory_region_set_enabled(cpu->cpu_as_mem, true);
2848 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2850 /* ... SMRAM with higher priority, linked from /machine/smram. */
2851 cpu->machine_done.notify = x86_cpu_machine_done;
2852 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2854 #endif
2856 qemu_init_vcpu(cs);
2858 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2859 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2860 * based on inputs (sockets,cores,threads), it is still better to gives
2861 * users a warning.
2863 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2864 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2866 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2867 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2868 " -smp options properly.");
2869 ht_warned = true;
2872 x86_cpu_apic_realize(cpu, &local_err);
2873 if (local_err != NULL) {
2874 goto out;
2876 cpu_reset(cs);
2878 xcc->parent_realize(dev, &local_err);
2880 out:
2881 if (local_err != NULL) {
2882 error_propagate(errp, local_err);
2883 return;
2887 typedef struct BitProperty {
2888 uint32_t *ptr;
2889 uint32_t mask;
2890 } BitProperty;
2892 static void x86_cpu_get_bit_prop(Object *obj,
2893 struct Visitor *v,
2894 void *opaque,
2895 const char *name,
2896 Error **errp)
2898 BitProperty *fp = opaque;
2899 bool value = (*fp->ptr & fp->mask) == fp->mask;
2900 visit_type_bool(v, &value, name, errp);
2903 static void x86_cpu_set_bit_prop(Object *obj,
2904 struct Visitor *v,
2905 void *opaque,
2906 const char *name,
2907 Error **errp)
2909 DeviceState *dev = DEVICE(obj);
2910 BitProperty *fp = opaque;
2911 Error *local_err = NULL;
2912 bool value;
2914 if (dev->realized) {
2915 qdev_prop_set_after_realize(dev, name, errp);
2916 return;
2919 visit_type_bool(v, &value, name, &local_err);
2920 if (local_err) {
2921 error_propagate(errp, local_err);
2922 return;
2925 if (value) {
2926 *fp->ptr |= fp->mask;
2927 } else {
2928 *fp->ptr &= ~fp->mask;
2932 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2933 void *opaque)
2935 BitProperty *prop = opaque;
2936 g_free(prop);
2939 /* Register a boolean property to get/set a single bit in a uint32_t field.
2941 * The same property name can be registered multiple times to make it affect
2942 * multiple bits in the same FeatureWord. In that case, the getter will return
2943 * true only if all bits are set.
2945 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2946 const char *prop_name,
2947 uint32_t *field,
2948 int bitnr)
2950 BitProperty *fp;
2951 ObjectProperty *op;
2952 uint32_t mask = (1UL << bitnr);
2954 op = object_property_find(OBJECT(cpu), prop_name, NULL);
2955 if (op) {
2956 fp = op->opaque;
2957 assert(fp->ptr == field);
2958 fp->mask |= mask;
2959 } else {
2960 fp = g_new0(BitProperty, 1);
2961 fp->ptr = field;
2962 fp->mask = mask;
2963 object_property_add(OBJECT(cpu), prop_name, "bool",
2964 x86_cpu_get_bit_prop,
2965 x86_cpu_set_bit_prop,
2966 x86_cpu_release_bit_prop, fp, &error_abort);
2970 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
2971 FeatureWord w,
2972 int bitnr)
2974 Object *obj = OBJECT(cpu);
2975 int i;
2976 char **names;
2977 FeatureWordInfo *fi = &feature_word_info[w];
2979 if (!fi->feat_names) {
2980 return;
2982 if (!fi->feat_names[bitnr]) {
2983 return;
2986 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
2988 feat2prop(names[0]);
2989 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
2991 for (i = 1; names[i]; i++) {
2992 feat2prop(names[i]);
2993 object_property_add_alias(obj, names[i], obj, g_strdup(names[0]),
2994 &error_abort);
2997 g_strfreev(names);
3000 static void x86_cpu_initfn(Object *obj)
3002 CPUState *cs = CPU(obj);
3003 X86CPU *cpu = X86_CPU(obj);
3004 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3005 CPUX86State *env = &cpu->env;
3006 FeatureWord w;
3007 static int inited;
3009 cs->env_ptr = env;
3010 cpu_exec_init(env);
3012 object_property_add(obj, "family", "int",
3013 x86_cpuid_version_get_family,
3014 x86_cpuid_version_set_family, NULL, NULL, NULL);
3015 object_property_add(obj, "model", "int",
3016 x86_cpuid_version_get_model,
3017 x86_cpuid_version_set_model, NULL, NULL, NULL);
3018 object_property_add(obj, "stepping", "int",
3019 x86_cpuid_version_get_stepping,
3020 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3021 object_property_add_str(obj, "vendor",
3022 x86_cpuid_get_vendor,
3023 x86_cpuid_set_vendor, NULL);
3024 object_property_add_str(obj, "model-id",
3025 x86_cpuid_get_model_id,
3026 x86_cpuid_set_model_id, NULL);
3027 object_property_add(obj, "tsc-frequency", "int",
3028 x86_cpuid_get_tsc_freq,
3029 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3030 object_property_add(obj, "apic-id", "int",
3031 x86_cpuid_get_apic_id,
3032 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3033 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3034 x86_cpu_get_feature_words,
3035 NULL, NULL, (void *)env->features, NULL);
3036 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3037 x86_cpu_get_feature_words,
3038 NULL, NULL, (void *)cpu->filtered_features, NULL);
3040 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3042 #ifndef CONFIG_USER_ONLY
3043 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3044 cpu->apic_id = -1;
3045 #endif
3047 for (w = 0; w < FEATURE_WORDS; w++) {
3048 int bitnr;
3050 for (bitnr = 0; bitnr < 32; bitnr++) {
3051 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3055 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3057 /* init various static tables used in TCG mode */
3058 if (tcg_enabled() && !inited) {
3059 inited = 1;
3060 optimize_flags_init();
3064 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3066 X86CPU *cpu = X86_CPU(cs);
3068 return cpu->apic_id;
3071 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3073 X86CPU *cpu = X86_CPU(cs);
3075 return cpu->env.cr[0] & CR0_PG_MASK;
3078 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3080 X86CPU *cpu = X86_CPU(cs);
3082 cpu->env.eip = value;
3085 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3087 X86CPU *cpu = X86_CPU(cs);
3089 cpu->env.eip = tb->pc - tb->cs_base;
3092 static bool x86_cpu_has_work(CPUState *cs)
3094 X86CPU *cpu = X86_CPU(cs);
3095 CPUX86State *env = &cpu->env;
3097 #if !defined(CONFIG_USER_ONLY)
3098 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
3099 apic_poll_irq(cpu->apic_state);
3100 cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
3102 #endif
3104 return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
3105 (env->eflags & IF_MASK)) ||
3106 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3107 CPU_INTERRUPT_INIT |
3108 CPU_INTERRUPT_SIPI |
3109 CPU_INTERRUPT_MCE)) ||
3110 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3111 !(env->hflags & HF_SMM_MASK));
3114 static Property x86_cpu_properties[] = {
3115 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3116 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3117 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3118 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3119 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3120 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
3121 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3122 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3123 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3124 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3125 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3126 DEFINE_PROP_END_OF_LIST()
3129 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3131 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3132 CPUClass *cc = CPU_CLASS(oc);
3133 DeviceClass *dc = DEVICE_CLASS(oc);
3135 xcc->parent_realize = dc->realize;
3136 dc->realize = x86_cpu_realizefn;
3137 dc->bus_type = TYPE_ICC_BUS;
3138 dc->props = x86_cpu_properties;
3140 xcc->parent_reset = cc->reset;
3141 cc->reset = x86_cpu_reset;
3142 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3144 cc->class_by_name = x86_cpu_class_by_name;
3145 cc->parse_features = x86_cpu_parse_featurestr;
3146 cc->has_work = x86_cpu_has_work;
3147 cc->do_interrupt = x86_cpu_do_interrupt;
3148 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3149 cc->dump_state = x86_cpu_dump_state;
3150 cc->set_pc = x86_cpu_set_pc;
3151 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3152 cc->gdb_read_register = x86_cpu_gdb_read_register;
3153 cc->gdb_write_register = x86_cpu_gdb_write_register;
3154 cc->get_arch_id = x86_cpu_get_arch_id;
3155 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3156 #ifdef CONFIG_USER_ONLY
3157 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3158 #else
3159 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3160 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3161 cc->write_elf64_note = x86_cpu_write_elf64_note;
3162 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3163 cc->write_elf32_note = x86_cpu_write_elf32_note;
3164 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3165 cc->vmsd = &vmstate_x86_cpu;
3166 #endif
3167 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3168 #ifndef CONFIG_USER_ONLY
3169 cc->debug_excp_handler = breakpoint_handler;
3170 #endif
3171 cc->cpu_exec_enter = x86_cpu_exec_enter;
3172 cc->cpu_exec_exit = x86_cpu_exec_exit;
3175 static const TypeInfo x86_cpu_type_info = {
3176 .name = TYPE_X86_CPU,
3177 .parent = TYPE_CPU,
3178 .instance_size = sizeof(X86CPU),
3179 .instance_init = x86_cpu_initfn,
3180 .abstract = true,
3181 .class_size = sizeof(X86CPUClass),
3182 .class_init = x86_cpu_common_class_init,
3185 static void x86_cpu_register_types(void)
3187 int i;
3189 type_register_static(&x86_cpu_type_info);
3190 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3191 x86_register_cpudef_type(&builtin_x86_defs[i]);
3193 #ifdef CONFIG_KVM
3194 type_register_static(&host_x86_cpu_type_info);
3195 #endif
3198 type_init(x86_cpu_register_types)