hw/net/e1000: fix integer endianness
[qemu/cris-port.git] / target-i386 / cpu.c
blobb2d1c95df45280eee8070d15590d0978ca91af64
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #include "hw/hw.h"
39 #if defined(CONFIG_KVM)
40 #include <linux/kvm_para.h>
41 #endif
43 #include "sysemu/sysemu.h"
44 #include "hw/qdev-properties.h"
45 #include "hw/cpu/icc_bus.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "hw/xen/xen.h"
48 #include "hw/i386/apic_internal.h"
49 #endif
52 /* Cache topology CPUID constants: */
54 /* CPUID Leaf 2 Descriptors */
56 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
57 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
58 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
61 /* CPUID Leaf 4 constants: */
63 /* EAX: */
64 #define CPUID_4_TYPE_DCACHE 1
65 #define CPUID_4_TYPE_ICACHE 2
66 #define CPUID_4_TYPE_UNIFIED 3
68 #define CPUID_4_LEVEL(l) ((l) << 5)
70 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
71 #define CPUID_4_FULLY_ASSOC (1 << 9)
73 /* EDX: */
74 #define CPUID_4_NO_INVD_SHARING (1 << 0)
75 #define CPUID_4_INCLUSIVE (1 << 1)
76 #define CPUID_4_COMPLEX_IDX (1 << 2)
78 #define ASSOC_FULL 0xFF
80 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
81 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
82 a == 2 ? 0x2 : \
83 a == 4 ? 0x4 : \
84 a == 8 ? 0x6 : \
85 a == 16 ? 0x8 : \
86 a == 32 ? 0xA : \
87 a == 48 ? 0xB : \
88 a == 64 ? 0xC : \
89 a == 96 ? 0xD : \
90 a == 128 ? 0xE : \
91 a == ASSOC_FULL ? 0xF : \
92 0 /* invalid value */)
95 /* Definitions of the hardcoded cache entries we expose: */
97 /* L1 data cache: */
98 #define L1D_LINE_SIZE 64
99 #define L1D_ASSOCIATIVITY 8
100 #define L1D_SETS 64
101 #define L1D_PARTITIONS 1
102 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
103 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
104 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
105 #define L1D_LINES_PER_TAG 1
106 #define L1D_SIZE_KB_AMD 64
107 #define L1D_ASSOCIATIVITY_AMD 2
109 /* L1 instruction cache: */
110 #define L1I_LINE_SIZE 64
111 #define L1I_ASSOCIATIVITY 8
112 #define L1I_SETS 64
113 #define L1I_PARTITIONS 1
114 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
115 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
116 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
117 #define L1I_LINES_PER_TAG 1
118 #define L1I_SIZE_KB_AMD 64
119 #define L1I_ASSOCIATIVITY_AMD 2
121 /* Level 2 unified cache: */
122 #define L2_LINE_SIZE 64
123 #define L2_ASSOCIATIVITY 16
124 #define L2_SETS 4096
125 #define L2_PARTITIONS 1
126 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
127 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
128 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
129 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
130 #define L2_LINES_PER_TAG 1
131 #define L2_SIZE_KB_AMD 512
133 /* No L3 cache: */
134 #define L3_SIZE_KB 0 /* disabled */
135 #define L3_ASSOCIATIVITY 0 /* disabled */
136 #define L3_LINES_PER_TAG 0 /* disabled */
137 #define L3_LINE_SIZE 0 /* disabled */
139 /* TLB definitions: */
141 #define L1_DTLB_2M_ASSOC 1
142 #define L1_DTLB_2M_ENTRIES 255
143 #define L1_DTLB_4K_ASSOC 1
144 #define L1_DTLB_4K_ENTRIES 255
146 #define L1_ITLB_2M_ASSOC 1
147 #define L1_ITLB_2M_ENTRIES 255
148 #define L1_ITLB_4K_ASSOC 1
149 #define L1_ITLB_4K_ENTRIES 255
151 #define L2_DTLB_2M_ASSOC 0 /* disabled */
152 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
153 #define L2_DTLB_4K_ASSOC 4
154 #define L2_DTLB_4K_ENTRIES 512
156 #define L2_ITLB_2M_ASSOC 0 /* disabled */
157 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
158 #define L2_ITLB_4K_ASSOC 4
159 #define L2_ITLB_4K_ENTRIES 512
163 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
164 uint32_t vendor2, uint32_t vendor3)
166 int i;
167 for (i = 0; i < 4; i++) {
168 dst[i] = vendor1 >> (8 * i);
169 dst[i + 4] = vendor2 >> (8 * i);
170 dst[i + 8] = vendor3 >> (8 * i);
172 dst[CPUID_VENDOR_SZ] = '\0';
175 /* feature flags taken from "Intel Processor Identification and the CPUID
176 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
177 * between feature naming conventions, aliases may be added.
179 static const char *feature_name[] = {
180 "fpu", "vme", "de", "pse",
181 "tsc", "msr", "pae", "mce",
182 "cx8", "apic", NULL, "sep",
183 "mtrr", "pge", "mca", "cmov",
184 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
185 NULL, "ds" /* Intel dts */, "acpi", "mmx",
186 "fxsr", "sse", "sse2", "ss",
187 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 static const char *ext_feature_name[] = {
190 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
191 "ds_cpl", "vmx", "smx", "est",
192 "tm2", "ssse3", "cid", NULL,
193 "fma", "cx16", "xtpr", "pdcm",
194 NULL, "pcid", "dca", "sse4.1|sse4_1",
195 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
196 "tsc-deadline", "aes", "xsave", "osxsave",
197 "avx", "f16c", "rdrand", "hypervisor",
199 /* Feature names that are already defined on feature_name[] but are set on
200 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
201 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
202 * if and only if CPU vendor is AMD.
204 static const char *ext2_feature_name[] = {
205 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
206 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
207 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
208 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
209 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
210 "nx|xd", NULL, "mmxext", NULL /* mmx */,
211 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
212 NULL, "lm|i64", "3dnowext", "3dnow",
214 static const char *ext3_feature_name[] = {
215 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
216 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
217 "3dnowprefetch", "osvw", "ibs", "xop",
218 "skinit", "wdt", NULL, "lwp",
219 "fma4", "tce", NULL, "nodeid_msr",
220 NULL, "tbm", "topoext", "perfctr_core",
221 "perfctr_nb", NULL, NULL, NULL,
222 NULL, NULL, NULL, NULL,
225 static const char *ext4_feature_name[] = {
226 NULL, NULL, "xstore", "xstore-en",
227 NULL, NULL, "xcrypt", "xcrypt-en",
228 "ace2", "ace2-en", "phe", "phe-en",
229 "pmm", "pmm-en", NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
236 static const char *kvm_feature_name[] = {
237 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
238 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 "kvmclock-stable-bit", NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
247 static const char *svm_feature_name[] = {
248 "npt", "lbrv", "svm_lock", "nrip_save",
249 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
250 NULL, NULL, "pause_filter", NULL,
251 "pfthreshold", NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
258 static const char *cpuid_7_0_ebx_feature_name[] = {
259 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
260 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
261 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
262 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
265 static const char *cpuid_apm_edx_feature_name[] = {
266 NULL, NULL, NULL, NULL,
267 NULL, NULL, NULL, NULL,
268 "invtsc", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
276 static const char *cpuid_xsave_feature_name[] = {
277 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
287 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
288 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
289 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
290 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
291 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
292 CPUID_PSE36 | CPUID_FXSR)
293 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
294 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
295 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
296 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
297 CPUID_PAE | CPUID_SEP | CPUID_APIC)
299 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
300 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
301 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
302 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
303 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
304 /* partly implemented:
305 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
306 /* missing:
307 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
308 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
309 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
310 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
311 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
312 /* missing:
313 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
314 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
315 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
316 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
317 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
318 CPUID_EXT_RDRAND */
320 #ifdef TARGET_X86_64
321 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
322 #else
323 #define TCG_EXT2_X86_64_FEATURES 0
324 #endif
326 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
327 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
328 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
329 TCG_EXT2_X86_64_FEATURES)
330 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
331 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
332 #define TCG_EXT4_FEATURES 0
333 #define TCG_SVM_FEATURES 0
334 #define TCG_KVM_FEATURES 0
335 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
336 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
337 /* missing:
338 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
339 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
340 CPUID_7_0_EBX_RDSEED */
341 #define TCG_APM_FEATURES 0
344 typedef struct FeatureWordInfo {
345 const char **feat_names;
346 uint32_t cpuid_eax; /* Input EAX for CPUID */
347 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
348 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
349 int cpuid_reg; /* output register (R_* constant) */
350 uint32_t tcg_features; /* Feature flags supported by TCG */
351 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
352 } FeatureWordInfo;
354 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
355 [FEAT_1_EDX] = {
356 .feat_names = feature_name,
357 .cpuid_eax = 1, .cpuid_reg = R_EDX,
358 .tcg_features = TCG_FEATURES,
360 [FEAT_1_ECX] = {
361 .feat_names = ext_feature_name,
362 .cpuid_eax = 1, .cpuid_reg = R_ECX,
363 .tcg_features = TCG_EXT_FEATURES,
365 [FEAT_8000_0001_EDX] = {
366 .feat_names = ext2_feature_name,
367 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
368 .tcg_features = TCG_EXT2_FEATURES,
370 [FEAT_8000_0001_ECX] = {
371 .feat_names = ext3_feature_name,
372 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
373 .tcg_features = TCG_EXT3_FEATURES,
375 [FEAT_C000_0001_EDX] = {
376 .feat_names = ext4_feature_name,
377 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
378 .tcg_features = TCG_EXT4_FEATURES,
380 [FEAT_KVM] = {
381 .feat_names = kvm_feature_name,
382 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
383 .tcg_features = TCG_KVM_FEATURES,
385 [FEAT_SVM] = {
386 .feat_names = svm_feature_name,
387 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
388 .tcg_features = TCG_SVM_FEATURES,
390 [FEAT_7_0_EBX] = {
391 .feat_names = cpuid_7_0_ebx_feature_name,
392 .cpuid_eax = 7,
393 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
394 .cpuid_reg = R_EBX,
395 .tcg_features = TCG_7_0_EBX_FEATURES,
397 [FEAT_8000_0007_EDX] = {
398 .feat_names = cpuid_apm_edx_feature_name,
399 .cpuid_eax = 0x80000007,
400 .cpuid_reg = R_EDX,
401 .tcg_features = TCG_APM_FEATURES,
402 .unmigratable_flags = CPUID_APM_INVTSC,
404 [FEAT_XSAVE] = {
405 .feat_names = cpuid_xsave_feature_name,
406 .cpuid_eax = 0xd,
407 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
408 .cpuid_reg = R_EAX,
409 .tcg_features = 0,
413 typedef struct X86RegisterInfo32 {
414 /* Name of register */
415 const char *name;
416 /* QAPI enum value register */
417 X86CPURegister32 qapi_enum;
418 } X86RegisterInfo32;
420 #define REGISTER(reg) \
421 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
422 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
423 REGISTER(EAX),
424 REGISTER(ECX),
425 REGISTER(EDX),
426 REGISTER(EBX),
427 REGISTER(ESP),
428 REGISTER(EBP),
429 REGISTER(ESI),
430 REGISTER(EDI),
432 #undef REGISTER
434 typedef struct ExtSaveArea {
435 uint32_t feature, bits;
436 uint32_t offset, size;
437 } ExtSaveArea;
439 static const ExtSaveArea ext_save_areas[] = {
440 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
441 .offset = 0x240, .size = 0x100 },
442 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
443 .offset = 0x3c0, .size = 0x40 },
444 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
445 .offset = 0x400, .size = 0x40 },
446 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
447 .offset = 0x440, .size = 0x40 },
448 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
449 .offset = 0x480, .size = 0x200 },
450 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
451 .offset = 0x680, .size = 0x400 },
454 const char *get_register_name_32(unsigned int reg)
456 if (reg >= CPU_NB_REGS32) {
457 return NULL;
459 return x86_reg_info_32[reg].name;
462 /* KVM-specific features that are automatically added to all CPU models
463 * when KVM is enabled.
465 static uint32_t kvm_default_features[FEATURE_WORDS] = {
466 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
467 (1 << KVM_FEATURE_NOP_IO_DELAY) |
468 (1 << KVM_FEATURE_CLOCKSOURCE2) |
469 (1 << KVM_FEATURE_ASYNC_PF) |
470 (1 << KVM_FEATURE_STEAL_TIME) |
471 (1 << KVM_FEATURE_PV_EOI) |
472 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
473 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
476 /* Features that are not added by default to any CPU model when KVM is enabled.
478 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
479 [FEAT_1_EDX] = CPUID_ACPI,
480 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
481 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
484 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
486 kvm_default_features[w] &= ~features;
489 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
491 kvm_default_unset_features[w] &= ~features;
495 * Returns the set of feature flags that are supported and migratable by
496 * QEMU, for a given FeatureWord.
498 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
500 FeatureWordInfo *wi = &feature_word_info[w];
501 uint32_t r = 0;
502 int i;
504 for (i = 0; i < 32; i++) {
505 uint32_t f = 1U << i;
506 /* If the feature name is unknown, it is not supported by QEMU yet */
507 if (!wi->feat_names[i]) {
508 continue;
510 /* Skip features known to QEMU, but explicitly marked as unmigratable */
511 if (wi->unmigratable_flags & f) {
512 continue;
514 r |= f;
516 return r;
519 void host_cpuid(uint32_t function, uint32_t count,
520 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
522 uint32_t vec[4];
524 #ifdef __x86_64__
525 asm volatile("cpuid"
526 : "=a"(vec[0]), "=b"(vec[1]),
527 "=c"(vec[2]), "=d"(vec[3])
528 : "0"(function), "c"(count) : "cc");
529 #elif defined(__i386__)
530 asm volatile("pusha \n\t"
531 "cpuid \n\t"
532 "mov %%eax, 0(%2) \n\t"
533 "mov %%ebx, 4(%2) \n\t"
534 "mov %%ecx, 8(%2) \n\t"
535 "mov %%edx, 12(%2) \n\t"
536 "popa"
537 : : "a"(function), "c"(count), "S"(vec)
538 : "memory", "cc");
539 #else
540 abort();
541 #endif
543 if (eax)
544 *eax = vec[0];
545 if (ebx)
546 *ebx = vec[1];
547 if (ecx)
548 *ecx = vec[2];
549 if (edx)
550 *edx = vec[3];
553 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
555 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
556 * a substring. ex if !NULL points to the first char after a substring,
557 * otherwise the string is assumed to sized by a terminating nul.
558 * Return lexical ordering of *s1:*s2.
560 static int sstrcmp(const char *s1, const char *e1,
561 const char *s2, const char *e2)
563 for (;;) {
564 if (!*s1 || !*s2 || *s1 != *s2)
565 return (*s1 - *s2);
566 ++s1, ++s2;
567 if (s1 == e1 && s2 == e2)
568 return (0);
569 else if (s1 == e1)
570 return (*s2);
571 else if (s2 == e2)
572 return (*s1);
576 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
577 * '|' delimited (possibly empty) strings in which case search for a match
578 * within the alternatives proceeds left to right. Return 0 for success,
579 * non-zero otherwise.
581 static int altcmp(const char *s, const char *e, const char *altstr)
583 const char *p, *q;
585 for (q = p = altstr; ; ) {
586 while (*p && *p != '|')
587 ++p;
588 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
589 return (0);
590 if (!*p)
591 return (1);
592 else
593 q = ++p;
597 /* search featureset for flag *[s..e), if found set corresponding bit in
598 * *pval and return true, otherwise return false
600 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
601 const char **featureset)
603 uint32_t mask;
604 const char **ppc;
605 bool found = false;
607 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
608 if (*ppc && !altcmp(s, e, *ppc)) {
609 *pval |= mask;
610 found = true;
613 return found;
616 static void add_flagname_to_bitmaps(const char *flagname,
617 FeatureWordArray words,
618 Error **errp)
620 FeatureWord w;
621 for (w = 0; w < FEATURE_WORDS; w++) {
622 FeatureWordInfo *wi = &feature_word_info[w];
623 if (wi->feat_names &&
624 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
625 break;
628 if (w == FEATURE_WORDS) {
629 error_setg(errp, "CPU feature %s not found", flagname);
633 /* CPU class name definitions: */
635 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
636 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
638 /* Return type name for a given CPU model name
639 * Caller is responsible for freeing the returned string.
641 static char *x86_cpu_type_name(const char *model_name)
643 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
646 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
648 ObjectClass *oc;
649 char *typename;
651 if (cpu_model == NULL) {
652 return NULL;
655 typename = x86_cpu_type_name(cpu_model);
656 oc = object_class_by_name(typename);
657 g_free(typename);
658 return oc;
661 struct X86CPUDefinition {
662 const char *name;
663 uint32_t level;
664 uint32_t xlevel;
665 uint32_t xlevel2;
666 /* vendor is zero-terminated, 12 character ASCII string */
667 char vendor[CPUID_VENDOR_SZ + 1];
668 int family;
669 int model;
670 int stepping;
671 FeatureWordArray features;
672 char model_id[48];
673 bool cache_info_passthrough;
676 static X86CPUDefinition builtin_x86_defs[] = {
678 .name = "qemu64",
679 .level = 4,
680 .vendor = CPUID_VENDOR_AMD,
681 .family = 6,
682 .model = 6,
683 .stepping = 3,
684 .features[FEAT_1_EDX] =
685 PPRO_FEATURES |
686 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
687 CPUID_PSE36,
688 .features[FEAT_1_ECX] =
689 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
690 .features[FEAT_8000_0001_EDX] =
691 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
692 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
693 .features[FEAT_8000_0001_ECX] =
694 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
695 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
696 .xlevel = 0x8000000A,
699 .name = "phenom",
700 .level = 5,
701 .vendor = CPUID_VENDOR_AMD,
702 .family = 16,
703 .model = 2,
704 .stepping = 3,
705 /* Missing: CPUID_HT */
706 .features[FEAT_1_EDX] =
707 PPRO_FEATURES |
708 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
709 CPUID_PSE36 | CPUID_VME,
710 .features[FEAT_1_ECX] =
711 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
712 CPUID_EXT_POPCNT,
713 .features[FEAT_8000_0001_EDX] =
714 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
715 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
716 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
717 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
718 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
719 CPUID_EXT3_CR8LEG,
720 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
721 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
722 .features[FEAT_8000_0001_ECX] =
723 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
724 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
725 /* Missing: CPUID_SVM_LBRV */
726 .features[FEAT_SVM] =
727 CPUID_SVM_NPT,
728 .xlevel = 0x8000001A,
729 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
732 .name = "core2duo",
733 .level = 10,
734 .vendor = CPUID_VENDOR_INTEL,
735 .family = 6,
736 .model = 15,
737 .stepping = 11,
738 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
739 .features[FEAT_1_EDX] =
740 PPRO_FEATURES |
741 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
742 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
743 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
744 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
745 .features[FEAT_1_ECX] =
746 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
747 CPUID_EXT_CX16,
748 .features[FEAT_8000_0001_EDX] =
749 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
750 .features[FEAT_8000_0001_ECX] =
751 CPUID_EXT3_LAHF_LM,
752 .xlevel = 0x80000008,
753 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
756 .name = "kvm64",
757 .level = 5,
758 .vendor = CPUID_VENDOR_INTEL,
759 .family = 15,
760 .model = 6,
761 .stepping = 1,
762 /* Missing: CPUID_HT */
763 .features[FEAT_1_EDX] =
764 PPRO_FEATURES | CPUID_VME |
765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
766 CPUID_PSE36,
767 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
768 .features[FEAT_1_ECX] =
769 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
770 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
771 .features[FEAT_8000_0001_EDX] =
772 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
773 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
774 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
775 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
776 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
777 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
778 .features[FEAT_8000_0001_ECX] =
780 .xlevel = 0x80000008,
781 .model_id = "Common KVM processor"
784 .name = "qemu32",
785 .level = 4,
786 .vendor = CPUID_VENDOR_INTEL,
787 .family = 6,
788 .model = 6,
789 .stepping = 3,
790 .features[FEAT_1_EDX] =
791 PPRO_FEATURES,
792 .features[FEAT_1_ECX] =
793 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
794 .xlevel = 0x80000004,
797 .name = "kvm32",
798 .level = 5,
799 .vendor = CPUID_VENDOR_INTEL,
800 .family = 15,
801 .model = 6,
802 .stepping = 1,
803 .features[FEAT_1_EDX] =
804 PPRO_FEATURES | CPUID_VME |
805 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
806 .features[FEAT_1_ECX] =
807 CPUID_EXT_SSE3,
808 .features[FEAT_8000_0001_EDX] =
809 PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
810 .features[FEAT_8000_0001_ECX] =
812 .xlevel = 0x80000008,
813 .model_id = "Common 32-bit KVM processor"
816 .name = "coreduo",
817 .level = 10,
818 .vendor = CPUID_VENDOR_INTEL,
819 .family = 6,
820 .model = 14,
821 .stepping = 8,
822 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
823 .features[FEAT_1_EDX] =
824 PPRO_FEATURES | CPUID_VME |
825 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
826 CPUID_SS,
827 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
828 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
829 .features[FEAT_1_ECX] =
830 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
831 .features[FEAT_8000_0001_EDX] =
832 CPUID_EXT2_NX,
833 .xlevel = 0x80000008,
834 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
837 .name = "486",
838 .level = 1,
839 .vendor = CPUID_VENDOR_INTEL,
840 .family = 4,
841 .model = 8,
842 .stepping = 0,
843 .features[FEAT_1_EDX] =
844 I486_FEATURES,
845 .xlevel = 0,
848 .name = "pentium",
849 .level = 1,
850 .vendor = CPUID_VENDOR_INTEL,
851 .family = 5,
852 .model = 4,
853 .stepping = 3,
854 .features[FEAT_1_EDX] =
855 PENTIUM_FEATURES,
856 .xlevel = 0,
859 .name = "pentium2",
860 .level = 2,
861 .vendor = CPUID_VENDOR_INTEL,
862 .family = 6,
863 .model = 5,
864 .stepping = 2,
865 .features[FEAT_1_EDX] =
866 PENTIUM2_FEATURES,
867 .xlevel = 0,
870 .name = "pentium3",
871 .level = 2,
872 .vendor = CPUID_VENDOR_INTEL,
873 .family = 6,
874 .model = 7,
875 .stepping = 3,
876 .features[FEAT_1_EDX] =
877 PENTIUM3_FEATURES,
878 .xlevel = 0,
881 .name = "athlon",
882 .level = 2,
883 .vendor = CPUID_VENDOR_AMD,
884 .family = 6,
885 .model = 2,
886 .stepping = 3,
887 .features[FEAT_1_EDX] =
888 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
889 CPUID_MCA,
890 .features[FEAT_8000_0001_EDX] =
891 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
892 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
893 .xlevel = 0x80000008,
896 .name = "n270",
897 /* original is on level 10 */
898 .level = 5,
899 .vendor = CPUID_VENDOR_INTEL,
900 .family = 6,
901 .model = 28,
902 .stepping = 2,
903 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
904 .features[FEAT_1_EDX] =
905 PPRO_FEATURES |
906 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
907 CPUID_ACPI | CPUID_SS,
908 /* Some CPUs got no CPUID_SEP */
909 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
910 * CPUID_EXT_XTPR */
911 .features[FEAT_1_ECX] =
912 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
913 CPUID_EXT_MOVBE,
914 .features[FEAT_8000_0001_EDX] =
915 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
916 CPUID_EXT2_NX,
917 .features[FEAT_8000_0001_ECX] =
918 CPUID_EXT3_LAHF_LM,
919 .xlevel = 0x8000000A,
920 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
923 .name = "Conroe",
924 .level = 4,
925 .vendor = CPUID_VENDOR_INTEL,
926 .family = 6,
927 .model = 15,
928 .stepping = 3,
929 .features[FEAT_1_EDX] =
930 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
931 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
932 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
933 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
934 CPUID_DE | CPUID_FP87,
935 .features[FEAT_1_ECX] =
936 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
937 .features[FEAT_8000_0001_EDX] =
938 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
939 .features[FEAT_8000_0001_ECX] =
940 CPUID_EXT3_LAHF_LM,
941 .xlevel = 0x8000000A,
942 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
945 .name = "Penryn",
946 .level = 4,
947 .vendor = CPUID_VENDOR_INTEL,
948 .family = 6,
949 .model = 23,
950 .stepping = 3,
951 .features[FEAT_1_EDX] =
952 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
953 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
954 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
955 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
956 CPUID_DE | CPUID_FP87,
957 .features[FEAT_1_ECX] =
958 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
959 CPUID_EXT_SSE3,
960 .features[FEAT_8000_0001_EDX] =
961 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
962 .features[FEAT_8000_0001_ECX] =
963 CPUID_EXT3_LAHF_LM,
964 .xlevel = 0x8000000A,
965 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
968 .name = "Nehalem",
969 .level = 4,
970 .vendor = CPUID_VENDOR_INTEL,
971 .family = 6,
972 .model = 26,
973 .stepping = 3,
974 .features[FEAT_1_EDX] =
975 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
976 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
977 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
978 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
979 CPUID_DE | CPUID_FP87,
980 .features[FEAT_1_ECX] =
981 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
982 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
983 .features[FEAT_8000_0001_EDX] =
984 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
985 .features[FEAT_8000_0001_ECX] =
986 CPUID_EXT3_LAHF_LM,
987 .xlevel = 0x8000000A,
988 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
991 .name = "Westmere",
992 .level = 11,
993 .vendor = CPUID_VENDOR_INTEL,
994 .family = 6,
995 .model = 44,
996 .stepping = 1,
997 .features[FEAT_1_EDX] =
998 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
999 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1000 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1001 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1002 CPUID_DE | CPUID_FP87,
1003 .features[FEAT_1_ECX] =
1004 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1005 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1006 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1007 .features[FEAT_8000_0001_EDX] =
1008 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1009 .features[FEAT_8000_0001_ECX] =
1010 CPUID_EXT3_LAHF_LM,
1011 .xlevel = 0x8000000A,
1012 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1015 .name = "SandyBridge",
1016 .level = 0xd,
1017 .vendor = CPUID_VENDOR_INTEL,
1018 .family = 6,
1019 .model = 42,
1020 .stepping = 1,
1021 .features[FEAT_1_EDX] =
1022 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1023 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1024 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1025 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1026 CPUID_DE | CPUID_FP87,
1027 .features[FEAT_1_ECX] =
1028 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1029 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1030 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1031 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1032 CPUID_EXT_SSE3,
1033 .features[FEAT_8000_0001_EDX] =
1034 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1035 CPUID_EXT2_SYSCALL,
1036 .features[FEAT_8000_0001_ECX] =
1037 CPUID_EXT3_LAHF_LM,
1038 .features[FEAT_XSAVE] =
1039 CPUID_XSAVE_XSAVEOPT,
1040 .xlevel = 0x8000000A,
1041 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1044 .name = "IvyBridge",
1045 .level = 0xd,
1046 .vendor = CPUID_VENDOR_INTEL,
1047 .family = 6,
1048 .model = 58,
1049 .stepping = 9,
1050 .features[FEAT_1_EDX] =
1051 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1052 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1053 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1054 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1055 CPUID_DE | CPUID_FP87,
1056 .features[FEAT_1_ECX] =
1057 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1058 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1059 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1060 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1061 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1062 .features[FEAT_7_0_EBX] =
1063 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1064 CPUID_7_0_EBX_ERMS,
1065 .features[FEAT_8000_0001_EDX] =
1066 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1067 CPUID_EXT2_SYSCALL,
1068 .features[FEAT_8000_0001_ECX] =
1069 CPUID_EXT3_LAHF_LM,
1070 .features[FEAT_XSAVE] =
1071 CPUID_XSAVE_XSAVEOPT,
1072 .xlevel = 0x8000000A,
1073 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1076 .name = "Haswell-noTSX",
1077 .level = 0xd,
1078 .vendor = CPUID_VENDOR_INTEL,
1079 .family = 6,
1080 .model = 60,
1081 .stepping = 1,
1082 .features[FEAT_1_EDX] =
1083 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1084 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1085 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1086 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1087 CPUID_DE | CPUID_FP87,
1088 .features[FEAT_1_ECX] =
1089 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1090 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1091 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1092 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1093 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1094 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1095 .features[FEAT_8000_0001_EDX] =
1096 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1097 CPUID_EXT2_SYSCALL,
1098 .features[FEAT_8000_0001_ECX] =
1099 CPUID_EXT3_LAHF_LM,
1100 .features[FEAT_7_0_EBX] =
1101 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1102 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1103 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1104 .features[FEAT_XSAVE] =
1105 CPUID_XSAVE_XSAVEOPT,
1106 .xlevel = 0x8000000A,
1107 .model_id = "Intel Core Processor (Haswell, no TSX)",
1108 }, {
1109 .name = "Haswell",
1110 .level = 0xd,
1111 .vendor = CPUID_VENDOR_INTEL,
1112 .family = 6,
1113 .model = 60,
1114 .stepping = 1,
1115 .features[FEAT_1_EDX] =
1116 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1117 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1118 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1119 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1120 CPUID_DE | CPUID_FP87,
1121 .features[FEAT_1_ECX] =
1122 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1123 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1124 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1125 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1126 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1127 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1128 .features[FEAT_8000_0001_EDX] =
1129 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1130 CPUID_EXT2_SYSCALL,
1131 .features[FEAT_8000_0001_ECX] =
1132 CPUID_EXT3_LAHF_LM,
1133 .features[FEAT_7_0_EBX] =
1134 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1135 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1136 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1137 CPUID_7_0_EBX_RTM,
1138 .features[FEAT_XSAVE] =
1139 CPUID_XSAVE_XSAVEOPT,
1140 .xlevel = 0x8000000A,
1141 .model_id = "Intel Core Processor (Haswell)",
1144 .name = "Broadwell-noTSX",
1145 .level = 0xd,
1146 .vendor = CPUID_VENDOR_INTEL,
1147 .family = 6,
1148 .model = 61,
1149 .stepping = 2,
1150 .features[FEAT_1_EDX] =
1151 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1152 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1153 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1154 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1155 CPUID_DE | CPUID_FP87,
1156 .features[FEAT_1_ECX] =
1157 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1158 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1159 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1160 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1161 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1162 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1163 .features[FEAT_8000_0001_EDX] =
1164 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1165 CPUID_EXT2_SYSCALL,
1166 .features[FEAT_8000_0001_ECX] =
1167 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1168 .features[FEAT_7_0_EBX] =
1169 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1170 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1171 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1172 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1173 CPUID_7_0_EBX_SMAP,
1174 .features[FEAT_XSAVE] =
1175 CPUID_XSAVE_XSAVEOPT,
1176 .xlevel = 0x8000000A,
1177 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1180 .name = "Broadwell",
1181 .level = 0xd,
1182 .vendor = CPUID_VENDOR_INTEL,
1183 .family = 6,
1184 .model = 61,
1185 .stepping = 2,
1186 .features[FEAT_1_EDX] =
1187 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1188 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1189 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1190 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1191 CPUID_DE | CPUID_FP87,
1192 .features[FEAT_1_ECX] =
1193 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1194 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1195 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1196 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1197 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1198 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1199 .features[FEAT_8000_0001_EDX] =
1200 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1201 CPUID_EXT2_SYSCALL,
1202 .features[FEAT_8000_0001_ECX] =
1203 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1204 .features[FEAT_7_0_EBX] =
1205 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1206 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1207 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1208 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1209 CPUID_7_0_EBX_SMAP,
1210 .features[FEAT_XSAVE] =
1211 CPUID_XSAVE_XSAVEOPT,
1212 .xlevel = 0x8000000A,
1213 .model_id = "Intel Core Processor (Broadwell)",
1216 .name = "Opteron_G1",
1217 .level = 5,
1218 .vendor = CPUID_VENDOR_AMD,
1219 .family = 15,
1220 .model = 6,
1221 .stepping = 1,
1222 .features[FEAT_1_EDX] =
1223 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1224 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1225 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1226 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1227 CPUID_DE | CPUID_FP87,
1228 .features[FEAT_1_ECX] =
1229 CPUID_EXT_SSE3,
1230 .features[FEAT_8000_0001_EDX] =
1231 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1232 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1233 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1234 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1235 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1236 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1237 .xlevel = 0x80000008,
1238 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1241 .name = "Opteron_G2",
1242 .level = 5,
1243 .vendor = CPUID_VENDOR_AMD,
1244 .family = 15,
1245 .model = 6,
1246 .stepping = 1,
1247 .features[FEAT_1_EDX] =
1248 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1249 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1250 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1251 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1252 CPUID_DE | CPUID_FP87,
1253 .features[FEAT_1_ECX] =
1254 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1255 .features[FEAT_8000_0001_EDX] =
1256 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1257 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1258 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1259 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1260 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1261 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1262 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1263 .features[FEAT_8000_0001_ECX] =
1264 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1265 .xlevel = 0x80000008,
1266 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1269 .name = "Opteron_G3",
1270 .level = 5,
1271 .vendor = CPUID_VENDOR_AMD,
1272 .family = 15,
1273 .model = 6,
1274 .stepping = 1,
1275 .features[FEAT_1_EDX] =
1276 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1277 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1278 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1279 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1280 CPUID_DE | CPUID_FP87,
1281 .features[FEAT_1_ECX] =
1282 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1283 CPUID_EXT_SSE3,
1284 .features[FEAT_8000_0001_EDX] =
1285 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1286 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1287 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1288 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1289 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1290 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1291 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1292 .features[FEAT_8000_0001_ECX] =
1293 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1294 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1295 .xlevel = 0x80000008,
1296 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1299 .name = "Opteron_G4",
1300 .level = 0xd,
1301 .vendor = CPUID_VENDOR_AMD,
1302 .family = 21,
1303 .model = 1,
1304 .stepping = 2,
1305 .features[FEAT_1_EDX] =
1306 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1307 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1308 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1309 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1310 CPUID_DE | CPUID_FP87,
1311 .features[FEAT_1_ECX] =
1312 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1313 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1314 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1315 CPUID_EXT_SSE3,
1316 .features[FEAT_8000_0001_EDX] =
1317 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1318 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1319 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1320 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1321 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1322 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1323 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1324 .features[FEAT_8000_0001_ECX] =
1325 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1326 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1327 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1328 CPUID_EXT3_LAHF_LM,
1329 /* no xsaveopt! */
1330 .xlevel = 0x8000001A,
1331 .model_id = "AMD Opteron 62xx class CPU",
1334 .name = "Opteron_G5",
1335 .level = 0xd,
1336 .vendor = CPUID_VENDOR_AMD,
1337 .family = 21,
1338 .model = 2,
1339 .stepping = 0,
1340 .features[FEAT_1_EDX] =
1341 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1342 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1343 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1344 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1345 CPUID_DE | CPUID_FP87,
1346 .features[FEAT_1_ECX] =
1347 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1348 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1349 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1350 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1351 .features[FEAT_8000_0001_EDX] =
1352 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1353 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1354 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1355 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1356 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1357 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1358 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1359 .features[FEAT_8000_0001_ECX] =
1360 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1361 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1362 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1363 CPUID_EXT3_LAHF_LM,
1364 /* no xsaveopt! */
1365 .xlevel = 0x8000001A,
1366 .model_id = "AMD Opteron 63xx class CPU",
1371 * x86_cpu_compat_set_features:
1372 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1373 * @w: Identifies the feature word to be changed.
1374 * @feat_add: Feature bits to be added to feature word
1375 * @feat_remove: Feature bits to be removed from feature word
1377 * Change CPU model feature bits for compatibility.
1379 * This function may be used by machine-type compatibility functions
1380 * to enable or disable feature bits on specific CPU models.
1382 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1383 uint32_t feat_add, uint32_t feat_remove)
1385 X86CPUDefinition *def;
1386 int i;
1387 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1388 def = &builtin_x86_defs[i];
1389 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1390 def->features[w] |= feat_add;
1391 def->features[w] &= ~feat_remove;
1396 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1397 bool migratable_only);
1399 #ifdef CONFIG_KVM
1401 static int cpu_x86_fill_model_id(char *str)
1403 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1404 int i;
1406 for (i = 0; i < 3; i++) {
1407 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1408 memcpy(str + i * 16 + 0, &eax, 4);
1409 memcpy(str + i * 16 + 4, &ebx, 4);
1410 memcpy(str + i * 16 + 8, &ecx, 4);
1411 memcpy(str + i * 16 + 12, &edx, 4);
1413 return 0;
1416 static X86CPUDefinition host_cpudef;
1418 static Property host_x86_cpu_properties[] = {
1419 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1420 DEFINE_PROP_END_OF_LIST()
1423 /* class_init for the "host" CPU model
1425 * This function may be called before KVM is initialized.
1427 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1429 DeviceClass *dc = DEVICE_CLASS(oc);
1430 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1431 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1433 xcc->kvm_required = true;
1435 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1436 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1438 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1439 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1440 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1441 host_cpudef.stepping = eax & 0x0F;
1443 cpu_x86_fill_model_id(host_cpudef.model_id);
1445 xcc->cpu_def = &host_cpudef;
1446 host_cpudef.cache_info_passthrough = true;
1448 /* level, xlevel, xlevel2, and the feature words are initialized on
1449 * instance_init, because they require KVM to be initialized.
1452 dc->props = host_x86_cpu_properties;
1455 static void host_x86_cpu_initfn(Object *obj)
1457 X86CPU *cpu = X86_CPU(obj);
1458 CPUX86State *env = &cpu->env;
1459 KVMState *s = kvm_state;
1461 assert(kvm_enabled());
1463 /* We can't fill the features array here because we don't know yet if
1464 * "migratable" is true or false.
1466 cpu->host_features = true;
1468 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1469 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1470 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1472 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1475 static const TypeInfo host_x86_cpu_type_info = {
1476 .name = X86_CPU_TYPE_NAME("host"),
1477 .parent = TYPE_X86_CPU,
1478 .instance_init = host_x86_cpu_initfn,
1479 .class_init = host_x86_cpu_class_init,
1482 #endif
1484 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1486 FeatureWordInfo *f = &feature_word_info[w];
1487 int i;
1489 for (i = 0; i < 32; ++i) {
1490 if (1 << i & mask) {
1491 const char *reg = get_register_name_32(f->cpuid_reg);
1492 assert(reg);
1493 fprintf(stderr, "warning: %s doesn't support requested feature: "
1494 "CPUID.%02XH:%s%s%s [bit %d]\n",
1495 kvm_enabled() ? "host" : "TCG",
1496 f->cpuid_eax, reg,
1497 f->feat_names[i] ? "." : "",
1498 f->feat_names[i] ? f->feat_names[i] : "", i);
1503 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1504 const char *name, Error **errp)
1506 X86CPU *cpu = X86_CPU(obj);
1507 CPUX86State *env = &cpu->env;
1508 int64_t value;
1510 value = (env->cpuid_version >> 8) & 0xf;
1511 if (value == 0xf) {
1512 value += (env->cpuid_version >> 20) & 0xff;
1514 visit_type_int(v, &value, name, errp);
1517 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1518 const char *name, Error **errp)
1520 X86CPU *cpu = X86_CPU(obj);
1521 CPUX86State *env = &cpu->env;
1522 const int64_t min = 0;
1523 const int64_t max = 0xff + 0xf;
1524 Error *local_err = NULL;
1525 int64_t value;
1527 visit_type_int(v, &value, name, &local_err);
1528 if (local_err) {
1529 error_propagate(errp, local_err);
1530 return;
1532 if (value < min || value > max) {
1533 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1534 name ? name : "null", value, min, max);
1535 return;
1538 env->cpuid_version &= ~0xff00f00;
1539 if (value > 0x0f) {
1540 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1541 } else {
1542 env->cpuid_version |= value << 8;
1546 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1547 const char *name, Error **errp)
1549 X86CPU *cpu = X86_CPU(obj);
1550 CPUX86State *env = &cpu->env;
1551 int64_t value;
1553 value = (env->cpuid_version >> 4) & 0xf;
1554 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1555 visit_type_int(v, &value, name, errp);
1558 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1559 const char *name, Error **errp)
1561 X86CPU *cpu = X86_CPU(obj);
1562 CPUX86State *env = &cpu->env;
1563 const int64_t min = 0;
1564 const int64_t max = 0xff;
1565 Error *local_err = NULL;
1566 int64_t value;
1568 visit_type_int(v, &value, name, &local_err);
1569 if (local_err) {
1570 error_propagate(errp, local_err);
1571 return;
1573 if (value < min || value > max) {
1574 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1575 name ? name : "null", value, min, max);
1576 return;
1579 env->cpuid_version &= ~0xf00f0;
1580 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1583 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1584 void *opaque, const char *name,
1585 Error **errp)
1587 X86CPU *cpu = X86_CPU(obj);
1588 CPUX86State *env = &cpu->env;
1589 int64_t value;
1591 value = env->cpuid_version & 0xf;
1592 visit_type_int(v, &value, name, errp);
1595 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1596 void *opaque, const char *name,
1597 Error **errp)
1599 X86CPU *cpu = X86_CPU(obj);
1600 CPUX86State *env = &cpu->env;
1601 const int64_t min = 0;
1602 const int64_t max = 0xf;
1603 Error *local_err = NULL;
1604 int64_t value;
1606 visit_type_int(v, &value, name, &local_err);
1607 if (local_err) {
1608 error_propagate(errp, local_err);
1609 return;
1611 if (value < min || value > max) {
1612 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1613 name ? name : "null", value, min, max);
1614 return;
1617 env->cpuid_version &= ~0xf;
1618 env->cpuid_version |= value & 0xf;
1621 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
1622 const char *name, Error **errp)
1624 X86CPU *cpu = X86_CPU(obj);
1626 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1629 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
1630 const char *name, Error **errp)
1632 X86CPU *cpu = X86_CPU(obj);
1634 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1637 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
1638 const char *name, Error **errp)
1640 X86CPU *cpu = X86_CPU(obj);
1642 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1645 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
1646 const char *name, Error **errp)
1648 X86CPU *cpu = X86_CPU(obj);
1650 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1653 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1655 X86CPU *cpu = X86_CPU(obj);
1656 CPUX86State *env = &cpu->env;
1657 char *value;
1659 value = g_malloc(CPUID_VENDOR_SZ + 1);
1660 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1661 env->cpuid_vendor3);
1662 return value;
1665 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1666 Error **errp)
1668 X86CPU *cpu = X86_CPU(obj);
1669 CPUX86State *env = &cpu->env;
1670 int i;
1672 if (strlen(value) != CPUID_VENDOR_SZ) {
1673 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1674 "vendor", value);
1675 return;
1678 env->cpuid_vendor1 = 0;
1679 env->cpuid_vendor2 = 0;
1680 env->cpuid_vendor3 = 0;
1681 for (i = 0; i < 4; i++) {
1682 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1683 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1684 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1688 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1690 X86CPU *cpu = X86_CPU(obj);
1691 CPUX86State *env = &cpu->env;
1692 char *value;
1693 int i;
1695 value = g_malloc(48 + 1);
1696 for (i = 0; i < 48; i++) {
1697 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1699 value[48] = '\0';
1700 return value;
1703 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1704 Error **errp)
1706 X86CPU *cpu = X86_CPU(obj);
1707 CPUX86State *env = &cpu->env;
1708 int c, len, i;
1710 if (model_id == NULL) {
1711 model_id = "";
1713 len = strlen(model_id);
1714 memset(env->cpuid_model, 0, 48);
1715 for (i = 0; i < 48; i++) {
1716 if (i >= len) {
1717 c = '\0';
1718 } else {
1719 c = (uint8_t)model_id[i];
1721 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1725 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1726 const char *name, Error **errp)
1728 X86CPU *cpu = X86_CPU(obj);
1729 int64_t value;
1731 value = cpu->env.tsc_khz * 1000;
1732 visit_type_int(v, &value, name, errp);
1735 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1736 const char *name, Error **errp)
1738 X86CPU *cpu = X86_CPU(obj);
1739 const int64_t min = 0;
1740 const int64_t max = INT64_MAX;
1741 Error *local_err = NULL;
1742 int64_t value;
1744 visit_type_int(v, &value, name, &local_err);
1745 if (local_err) {
1746 error_propagate(errp, local_err);
1747 return;
1749 if (value < min || value > max) {
1750 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1751 name ? name : "null", value, min, max);
1752 return;
1755 cpu->env.tsc_khz = value / 1000;
1758 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1759 const char *name, Error **errp)
1761 X86CPU *cpu = X86_CPU(obj);
1762 int64_t value = cpu->apic_id;
1764 visit_type_int(v, &value, name, errp);
1767 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1768 const char *name, Error **errp)
1770 X86CPU *cpu = X86_CPU(obj);
1771 DeviceState *dev = DEVICE(obj);
1772 const int64_t min = 0;
1773 const int64_t max = UINT32_MAX;
1774 Error *error = NULL;
1775 int64_t value;
1777 if (dev->realized) {
1778 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1779 "it was realized", name, object_get_typename(obj));
1780 return;
1783 visit_type_int(v, &value, name, &error);
1784 if (error) {
1785 error_propagate(errp, error);
1786 return;
1788 if (value < min || value > max) {
1789 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1790 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1791 object_get_typename(obj), name, value, min, max);
1792 return;
1795 if ((value != cpu->apic_id) && cpu_exists(value)) {
1796 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1797 return;
1799 cpu->apic_id = value;
1802 /* Generic getter for "feature-words" and "filtered-features" properties */
1803 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1804 const char *name, Error **errp)
1806 uint32_t *array = (uint32_t *)opaque;
1807 FeatureWord w;
1808 Error *err = NULL;
1809 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1810 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1811 X86CPUFeatureWordInfoList *list = NULL;
1813 for (w = 0; w < FEATURE_WORDS; w++) {
1814 FeatureWordInfo *wi = &feature_word_info[w];
1815 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1816 qwi->cpuid_input_eax = wi->cpuid_eax;
1817 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1818 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1819 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1820 qwi->features = array[w];
1822 /* List will be in reverse order, but order shouldn't matter */
1823 list_entries[w].next = list;
1824 list_entries[w].value = &word_infos[w];
1825 list = &list_entries[w];
1828 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1829 error_propagate(errp, err);
1832 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1833 const char *name, Error **errp)
1835 X86CPU *cpu = X86_CPU(obj);
1836 int64_t value = cpu->hyperv_spinlock_attempts;
1838 visit_type_int(v, &value, name, errp);
1841 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1842 const char *name, Error **errp)
1844 const int64_t min = 0xFFF;
1845 const int64_t max = UINT_MAX;
1846 X86CPU *cpu = X86_CPU(obj);
1847 Error *err = NULL;
1848 int64_t value;
1850 visit_type_int(v, &value, name, &err);
1851 if (err) {
1852 error_propagate(errp, err);
1853 return;
1856 if (value < min || value > max) {
1857 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1858 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1859 object_get_typename(obj), name ? name : "null",
1860 value, min, max);
1861 return;
1863 cpu->hyperv_spinlock_attempts = value;
1866 static PropertyInfo qdev_prop_spinlocks = {
1867 .name = "int",
1868 .get = x86_get_hv_spinlocks,
1869 .set = x86_set_hv_spinlocks,
1872 /* Convert all '_' in a feature string option name to '-', to make feature
1873 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1875 static inline void feat2prop(char *s)
1877 while ((s = strchr(s, '_'))) {
1878 *s = '-';
1882 /* Parse "+feature,-feature,feature=foo" CPU feature string
1884 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1885 Error **errp)
1887 X86CPU *cpu = X86_CPU(cs);
1888 char *featurestr; /* Single 'key=value" string being parsed */
1889 FeatureWord w;
1890 /* Features to be added */
1891 FeatureWordArray plus_features = { 0 };
1892 /* Features to be removed */
1893 FeatureWordArray minus_features = { 0 };
1894 uint32_t numvalue;
1895 CPUX86State *env = &cpu->env;
1896 Error *local_err = NULL;
1898 featurestr = features ? strtok(features, ",") : NULL;
1900 while (featurestr) {
1901 char *val;
1902 if (featurestr[0] == '+') {
1903 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1904 } else if (featurestr[0] == '-') {
1905 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1906 } else if ((val = strchr(featurestr, '='))) {
1907 *val = 0; val++;
1908 feat2prop(featurestr);
1909 if (!strcmp(featurestr, "xlevel")) {
1910 char *err;
1911 char num[32];
1913 numvalue = strtoul(val, &err, 0);
1914 if (!*val || *err) {
1915 error_setg(errp, "bad numerical value %s", val);
1916 return;
1918 if (numvalue < 0x80000000) {
1919 error_report("xlevel value shall always be >= 0x80000000"
1920 ", fixup will be removed in future versions");
1921 numvalue += 0x80000000;
1923 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1924 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1925 } else if (!strcmp(featurestr, "tsc-freq")) {
1926 int64_t tsc_freq;
1927 char *err;
1928 char num[32];
1930 tsc_freq = strtosz_suffix_unit(val, &err,
1931 STRTOSZ_DEFSUFFIX_B, 1000);
1932 if (tsc_freq < 0 || *err) {
1933 error_setg(errp, "bad numerical value %s", val);
1934 return;
1936 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1937 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1938 &local_err);
1939 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1940 char *err;
1941 const int min = 0xFFF;
1942 char num[32];
1943 numvalue = strtoul(val, &err, 0);
1944 if (!*val || *err) {
1945 error_setg(errp, "bad numerical value %s", val);
1946 return;
1948 if (numvalue < min) {
1949 error_report("hv-spinlocks value shall always be >= 0x%x"
1950 ", fixup will be removed in future versions",
1951 min);
1952 numvalue = min;
1954 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1955 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1956 } else {
1957 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1959 } else {
1960 feat2prop(featurestr);
1961 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1963 if (local_err) {
1964 error_propagate(errp, local_err);
1965 return;
1967 featurestr = strtok(NULL, ",");
1970 if (cpu->host_features) {
1971 for (w = 0; w < FEATURE_WORDS; w++) {
1972 env->features[w] =
1973 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1977 for (w = 0; w < FEATURE_WORDS; w++) {
1978 env->features[w] |= plus_features[w];
1979 env->features[w] &= ~minus_features[w];
1983 /* Print all cpuid feature names in featureset
1985 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1987 int bit;
1988 bool first = true;
1990 for (bit = 0; bit < 32; bit++) {
1991 if (featureset[bit]) {
1992 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1993 first = false;
1998 /* generate CPU information. */
1999 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2001 X86CPUDefinition *def;
2002 char buf[256];
2003 int i;
2005 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2006 def = &builtin_x86_defs[i];
2007 snprintf(buf, sizeof(buf), "%s", def->name);
2008 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2010 #ifdef CONFIG_KVM
2011 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2012 "KVM processor with all supported host features "
2013 "(only available in KVM mode)");
2014 #endif
2016 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2017 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2018 FeatureWordInfo *fw = &feature_word_info[i];
2020 (*cpu_fprintf)(f, " ");
2021 listflags(f, cpu_fprintf, fw->feat_names);
2022 (*cpu_fprintf)(f, "\n");
2026 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2028 CpuDefinitionInfoList *cpu_list = NULL;
2029 X86CPUDefinition *def;
2030 int i;
2032 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2033 CpuDefinitionInfoList *entry;
2034 CpuDefinitionInfo *info;
2036 def = &builtin_x86_defs[i];
2037 info = g_malloc0(sizeof(*info));
2038 info->name = g_strdup(def->name);
2040 entry = g_malloc0(sizeof(*entry));
2041 entry->value = info;
2042 entry->next = cpu_list;
2043 cpu_list = entry;
2046 return cpu_list;
2049 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2050 bool migratable_only)
2052 FeatureWordInfo *wi = &feature_word_info[w];
2053 uint32_t r;
2055 if (kvm_enabled()) {
2056 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2057 wi->cpuid_ecx,
2058 wi->cpuid_reg);
2059 } else if (tcg_enabled()) {
2060 r = wi->tcg_features;
2061 } else {
2062 return ~0;
2064 if (migratable_only) {
2065 r &= x86_cpu_get_migratable_flags(w);
2067 return r;
2071 * Filters CPU feature words based on host availability of each feature.
2073 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2075 static int x86_cpu_filter_features(X86CPU *cpu)
2077 CPUX86State *env = &cpu->env;
2078 FeatureWord w;
2079 int rv = 0;
2081 for (w = 0; w < FEATURE_WORDS; w++) {
2082 uint32_t host_feat =
2083 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2084 uint32_t requested_features = env->features[w];
2085 env->features[w] &= host_feat;
2086 cpu->filtered_features[w] = requested_features & ~env->features[w];
2087 if (cpu->filtered_features[w]) {
2088 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2089 report_unavailable_features(w, cpu->filtered_features[w]);
2091 rv = 1;
2095 return rv;
2098 /* Load data from X86CPUDefinition
2100 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2102 CPUX86State *env = &cpu->env;
2103 const char *vendor;
2104 char host_vendor[CPUID_VENDOR_SZ + 1];
2105 FeatureWord w;
2107 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2108 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2109 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2110 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2111 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2112 env->cpuid_xlevel2 = def->xlevel2;
2113 cpu->cache_info_passthrough = def->cache_info_passthrough;
2114 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2115 for (w = 0; w < FEATURE_WORDS; w++) {
2116 env->features[w] = def->features[w];
2119 /* Special cases not set in the X86CPUDefinition structs: */
2120 if (kvm_enabled()) {
2121 FeatureWord w;
2122 for (w = 0; w < FEATURE_WORDS; w++) {
2123 env->features[w] |= kvm_default_features[w];
2124 env->features[w] &= ~kvm_default_unset_features[w];
2128 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2130 /* sysenter isn't supported in compatibility mode on AMD,
2131 * syscall isn't supported in compatibility mode on Intel.
2132 * Normally we advertise the actual CPU vendor, but you can
2133 * override this using the 'vendor' property if you want to use
2134 * KVM's sysenter/syscall emulation in compatibility mode and
2135 * when doing cross vendor migration
2137 vendor = def->vendor;
2138 if (kvm_enabled()) {
2139 uint32_t ebx = 0, ecx = 0, edx = 0;
2140 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2141 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2142 vendor = host_vendor;
2145 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2149 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2151 X86CPU *cpu = NULL;
2152 X86CPUClass *xcc;
2153 ObjectClass *oc;
2154 gchar **model_pieces;
2155 char *name, *features;
2156 Error *error = NULL;
2158 model_pieces = g_strsplit(cpu_model, ",", 2);
2159 if (!model_pieces[0]) {
2160 error_setg(&error, "Invalid/empty CPU model name");
2161 goto out;
2163 name = model_pieces[0];
2164 features = model_pieces[1];
2166 oc = x86_cpu_class_by_name(name);
2167 if (oc == NULL) {
2168 error_setg(&error, "Unable to find CPU definition: %s", name);
2169 goto out;
2171 xcc = X86_CPU_CLASS(oc);
2173 if (xcc->kvm_required && !kvm_enabled()) {
2174 error_setg(&error, "CPU model '%s' requires KVM", name);
2175 goto out;
2178 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2180 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2181 if (error) {
2182 goto out;
2185 out:
2186 if (error != NULL) {
2187 error_propagate(errp, error);
2188 if (cpu) {
2189 object_unref(OBJECT(cpu));
2190 cpu = NULL;
2193 g_strfreev(model_pieces);
2194 return cpu;
2197 X86CPU *cpu_x86_init(const char *cpu_model)
2199 Error *error = NULL;
2200 X86CPU *cpu;
2202 cpu = cpu_x86_create(cpu_model, &error);
2203 if (error) {
2204 goto out;
2207 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2209 out:
2210 if (error) {
2211 error_report_err(error);
2212 if (cpu != NULL) {
2213 object_unref(OBJECT(cpu));
2214 cpu = NULL;
2217 return cpu;
2220 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2222 X86CPUDefinition *cpudef = data;
2223 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2225 xcc->cpu_def = cpudef;
2228 static void x86_register_cpudef_type(X86CPUDefinition *def)
2230 char *typename = x86_cpu_type_name(def->name);
2231 TypeInfo ti = {
2232 .name = typename,
2233 .parent = TYPE_X86_CPU,
2234 .class_init = x86_cpu_cpudef_class_init,
2235 .class_data = def,
2238 type_register(&ti);
2239 g_free(typename);
2242 #if !defined(CONFIG_USER_ONLY)
2244 void cpu_clear_apic_feature(CPUX86State *env)
2246 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2249 #endif /* !CONFIG_USER_ONLY */
2251 /* Initialize list of CPU models, filling some non-static fields if necessary
2253 void x86_cpudef_setup(void)
2255 int i, j;
2256 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2258 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2259 X86CPUDefinition *def = &builtin_x86_defs[i];
2261 /* Look for specific "cpudef" models that */
2262 /* have the QEMU version in .model_id */
2263 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2264 if (strcmp(model_with_versions[j], def->name) == 0) {
2265 pstrcpy(def->model_id, sizeof(def->model_id),
2266 "QEMU Virtual CPU version ");
2267 pstrcat(def->model_id, sizeof(def->model_id),
2268 qemu_get_version());
2269 break;
2275 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2276 uint32_t *eax, uint32_t *ebx,
2277 uint32_t *ecx, uint32_t *edx)
2279 X86CPU *cpu = x86_env_get_cpu(env);
2280 CPUState *cs = CPU(cpu);
2282 /* test if maximum index reached */
2283 if (index & 0x80000000) {
2284 if (index > env->cpuid_xlevel) {
2285 if (env->cpuid_xlevel2 > 0) {
2286 /* Handle the Centaur's CPUID instruction. */
2287 if (index > env->cpuid_xlevel2) {
2288 index = env->cpuid_xlevel2;
2289 } else if (index < 0xC0000000) {
2290 index = env->cpuid_xlevel;
2292 } else {
2293 /* Intel documentation states that invalid EAX input will
2294 * return the same information as EAX=cpuid_level
2295 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2297 index = env->cpuid_level;
2300 } else {
2301 if (index > env->cpuid_level)
2302 index = env->cpuid_level;
2305 switch(index) {
2306 case 0:
2307 *eax = env->cpuid_level;
2308 *ebx = env->cpuid_vendor1;
2309 *edx = env->cpuid_vendor2;
2310 *ecx = env->cpuid_vendor3;
2311 break;
2312 case 1:
2313 *eax = env->cpuid_version;
2314 *ebx = (cpu->apic_id << 24) |
2315 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2316 *ecx = env->features[FEAT_1_ECX];
2317 *edx = env->features[FEAT_1_EDX];
2318 if (cs->nr_cores * cs->nr_threads > 1) {
2319 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2320 *edx |= 1 << 28; /* HTT bit */
2322 break;
2323 case 2:
2324 /* cache info: needed for Pentium Pro compatibility */
2325 if (cpu->cache_info_passthrough) {
2326 host_cpuid(index, 0, eax, ebx, ecx, edx);
2327 break;
2329 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2330 *ebx = 0;
2331 *ecx = 0;
2332 *edx = (L1D_DESCRIPTOR << 16) | \
2333 (L1I_DESCRIPTOR << 8) | \
2334 (L2_DESCRIPTOR);
2335 break;
2336 case 4:
2337 /* cache info: needed for Core compatibility */
2338 if (cpu->cache_info_passthrough) {
2339 host_cpuid(index, count, eax, ebx, ecx, edx);
2340 *eax &= ~0xFC000000;
2341 } else {
2342 *eax = 0;
2343 switch (count) {
2344 case 0: /* L1 dcache info */
2345 *eax |= CPUID_4_TYPE_DCACHE | \
2346 CPUID_4_LEVEL(1) | \
2347 CPUID_4_SELF_INIT_LEVEL;
2348 *ebx = (L1D_LINE_SIZE - 1) | \
2349 ((L1D_PARTITIONS - 1) << 12) | \
2350 ((L1D_ASSOCIATIVITY - 1) << 22);
2351 *ecx = L1D_SETS - 1;
2352 *edx = CPUID_4_NO_INVD_SHARING;
2353 break;
2354 case 1: /* L1 icache info */
2355 *eax |= CPUID_4_TYPE_ICACHE | \
2356 CPUID_4_LEVEL(1) | \
2357 CPUID_4_SELF_INIT_LEVEL;
2358 *ebx = (L1I_LINE_SIZE - 1) | \
2359 ((L1I_PARTITIONS - 1) << 12) | \
2360 ((L1I_ASSOCIATIVITY - 1) << 22);
2361 *ecx = L1I_SETS - 1;
2362 *edx = CPUID_4_NO_INVD_SHARING;
2363 break;
2364 case 2: /* L2 cache info */
2365 *eax |= CPUID_4_TYPE_UNIFIED | \
2366 CPUID_4_LEVEL(2) | \
2367 CPUID_4_SELF_INIT_LEVEL;
2368 if (cs->nr_threads > 1) {
2369 *eax |= (cs->nr_threads - 1) << 14;
2371 *ebx = (L2_LINE_SIZE - 1) | \
2372 ((L2_PARTITIONS - 1) << 12) | \
2373 ((L2_ASSOCIATIVITY - 1) << 22);
2374 *ecx = L2_SETS - 1;
2375 *edx = CPUID_4_NO_INVD_SHARING;
2376 break;
2377 default: /* end of info */
2378 *eax = 0;
2379 *ebx = 0;
2380 *ecx = 0;
2381 *edx = 0;
2382 break;
2386 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2387 if ((*eax & 31) && cs->nr_cores > 1) {
2388 *eax |= (cs->nr_cores - 1) << 26;
2390 break;
2391 case 5:
2392 /* mwait info: needed for Core compatibility */
2393 *eax = 0; /* Smallest monitor-line size in bytes */
2394 *ebx = 0; /* Largest monitor-line size in bytes */
2395 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2396 *edx = 0;
2397 break;
2398 case 6:
2399 /* Thermal and Power Leaf */
2400 *eax = 0;
2401 *ebx = 0;
2402 *ecx = 0;
2403 *edx = 0;
2404 break;
2405 case 7:
2406 /* Structured Extended Feature Flags Enumeration Leaf */
2407 if (count == 0) {
2408 *eax = 0; /* Maximum ECX value for sub-leaves */
2409 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2410 *ecx = 0; /* Reserved */
2411 *edx = 0; /* Reserved */
2412 } else {
2413 *eax = 0;
2414 *ebx = 0;
2415 *ecx = 0;
2416 *edx = 0;
2418 break;
2419 case 9:
2420 /* Direct Cache Access Information Leaf */
2421 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2422 *ebx = 0;
2423 *ecx = 0;
2424 *edx = 0;
2425 break;
2426 case 0xA:
2427 /* Architectural Performance Monitoring Leaf */
2428 if (kvm_enabled() && cpu->enable_pmu) {
2429 KVMState *s = cs->kvm_state;
2431 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2432 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2433 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2434 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2435 } else {
2436 *eax = 0;
2437 *ebx = 0;
2438 *ecx = 0;
2439 *edx = 0;
2441 break;
2442 case 0xD: {
2443 KVMState *s = cs->kvm_state;
2444 uint64_t kvm_mask;
2445 int i;
2447 /* Processor Extended State */
2448 *eax = 0;
2449 *ebx = 0;
2450 *ecx = 0;
2451 *edx = 0;
2452 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2453 break;
2455 kvm_mask =
2456 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2457 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2459 if (count == 0) {
2460 *ecx = 0x240;
2461 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2462 const ExtSaveArea *esa = &ext_save_areas[i];
2463 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2464 (kvm_mask & (1 << i)) != 0) {
2465 if (i < 32) {
2466 *eax |= 1 << i;
2467 } else {
2468 *edx |= 1 << (i - 32);
2470 *ecx = MAX(*ecx, esa->offset + esa->size);
2473 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2474 *ebx = *ecx;
2475 } else if (count == 1) {
2476 *eax = env->features[FEAT_XSAVE];
2477 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2478 const ExtSaveArea *esa = &ext_save_areas[count];
2479 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2480 (kvm_mask & (1 << count)) != 0) {
2481 *eax = esa->size;
2482 *ebx = esa->offset;
2485 break;
2487 case 0x80000000:
2488 *eax = env->cpuid_xlevel;
2489 *ebx = env->cpuid_vendor1;
2490 *edx = env->cpuid_vendor2;
2491 *ecx = env->cpuid_vendor3;
2492 break;
2493 case 0x80000001:
2494 *eax = env->cpuid_version;
2495 *ebx = 0;
2496 *ecx = env->features[FEAT_8000_0001_ECX];
2497 *edx = env->features[FEAT_8000_0001_EDX];
2499 /* The Linux kernel checks for the CMPLegacy bit and
2500 * discards multiple thread information if it is set.
2501 * So dont set it here for Intel to make Linux guests happy.
2503 if (cs->nr_cores * cs->nr_threads > 1) {
2504 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2505 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2506 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2507 *ecx |= 1 << 1; /* CmpLegacy bit */
2510 break;
2511 case 0x80000002:
2512 case 0x80000003:
2513 case 0x80000004:
2514 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2515 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2516 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2517 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2518 break;
2519 case 0x80000005:
2520 /* cache info (L1 cache) */
2521 if (cpu->cache_info_passthrough) {
2522 host_cpuid(index, 0, eax, ebx, ecx, edx);
2523 break;
2525 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2526 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2527 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2528 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2529 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2530 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2531 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2532 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2533 break;
2534 case 0x80000006:
2535 /* cache info (L2 cache) */
2536 if (cpu->cache_info_passthrough) {
2537 host_cpuid(index, 0, eax, ebx, ecx, edx);
2538 break;
2540 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2541 (L2_DTLB_2M_ENTRIES << 16) | \
2542 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2543 (L2_ITLB_2M_ENTRIES);
2544 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2545 (L2_DTLB_4K_ENTRIES << 16) | \
2546 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2547 (L2_ITLB_4K_ENTRIES);
2548 *ecx = (L2_SIZE_KB_AMD << 16) | \
2549 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2550 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2551 *edx = ((L3_SIZE_KB/512) << 18) | \
2552 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2553 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2554 break;
2555 case 0x80000007:
2556 *eax = 0;
2557 *ebx = 0;
2558 *ecx = 0;
2559 *edx = env->features[FEAT_8000_0007_EDX];
2560 break;
2561 case 0x80000008:
2562 /* virtual & phys address size in low 2 bytes. */
2563 /* XXX: This value must match the one used in the MMU code. */
2564 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2565 /* 64 bit processor */
2566 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2567 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2568 } else {
2569 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2570 *eax = 0x00000024; /* 36 bits physical */
2571 } else {
2572 *eax = 0x00000020; /* 32 bits physical */
2575 *ebx = 0;
2576 *ecx = 0;
2577 *edx = 0;
2578 if (cs->nr_cores * cs->nr_threads > 1) {
2579 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2581 break;
2582 case 0x8000000A:
2583 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2584 *eax = 0x00000001; /* SVM Revision */
2585 *ebx = 0x00000010; /* nr of ASIDs */
2586 *ecx = 0;
2587 *edx = env->features[FEAT_SVM]; /* optional features */
2588 } else {
2589 *eax = 0;
2590 *ebx = 0;
2591 *ecx = 0;
2592 *edx = 0;
2594 break;
2595 case 0xC0000000:
2596 *eax = env->cpuid_xlevel2;
2597 *ebx = 0;
2598 *ecx = 0;
2599 *edx = 0;
2600 break;
2601 case 0xC0000001:
2602 /* Support for VIA CPU's CPUID instruction */
2603 *eax = env->cpuid_version;
2604 *ebx = 0;
2605 *ecx = 0;
2606 *edx = env->features[FEAT_C000_0001_EDX];
2607 break;
2608 case 0xC0000002:
2609 case 0xC0000003:
2610 case 0xC0000004:
2611 /* Reserved for the future, and now filled with zero */
2612 *eax = 0;
2613 *ebx = 0;
2614 *ecx = 0;
2615 *edx = 0;
2616 break;
2617 default:
2618 /* reserved values: zero */
2619 *eax = 0;
2620 *ebx = 0;
2621 *ecx = 0;
2622 *edx = 0;
2623 break;
2627 /* CPUClass::reset() */
2628 static void x86_cpu_reset(CPUState *s)
2630 X86CPU *cpu = X86_CPU(s);
2631 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2632 CPUX86State *env = &cpu->env;
2633 int i;
2635 xcc->parent_reset(s);
2637 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2639 tlb_flush(s, 1);
2641 env->old_exception = -1;
2643 /* init to reset state */
2645 #ifdef CONFIG_SOFTMMU
2646 env->hflags |= HF_SOFTMMU_MASK;
2647 #endif
2648 env->hflags2 |= HF2_GIF_MASK;
2650 cpu_x86_update_cr0(env, 0x60000010);
2651 env->a20_mask = ~0x0;
2652 env->smbase = 0x30000;
2654 env->idt.limit = 0xffff;
2655 env->gdt.limit = 0xffff;
2656 env->ldt.limit = 0xffff;
2657 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2658 env->tr.limit = 0xffff;
2659 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2661 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2662 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2663 DESC_R_MASK | DESC_A_MASK);
2664 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2665 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2666 DESC_A_MASK);
2667 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2668 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2669 DESC_A_MASK);
2670 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2671 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2672 DESC_A_MASK);
2673 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2674 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2675 DESC_A_MASK);
2676 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2677 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2678 DESC_A_MASK);
2680 env->eip = 0xfff0;
2681 env->regs[R_EDX] = env->cpuid_version;
2683 env->eflags = 0x2;
2685 /* FPU init */
2686 for (i = 0; i < 8; i++) {
2687 env->fptags[i] = 1;
2689 cpu_set_fpuc(env, 0x37f);
2691 env->mxcsr = 0x1f80;
2692 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2694 env->pat = 0x0007040600070406ULL;
2695 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2697 memset(env->dr, 0, sizeof(env->dr));
2698 env->dr[6] = DR6_FIXED_1;
2699 env->dr[7] = DR7_FIXED_1;
2700 cpu_breakpoint_remove_all(s, BP_CPU);
2701 cpu_watchpoint_remove_all(s, BP_CPU);
2703 env->xcr0 = 1;
2706 * SDM 11.11.5 requires:
2707 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2708 * - IA32_MTRR_PHYSMASKn.V = 0
2709 * All other bits are undefined. For simplification, zero it all.
2711 env->mtrr_deftype = 0;
2712 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2713 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2715 #if !defined(CONFIG_USER_ONLY)
2716 /* We hard-wire the BSP to the first CPU. */
2717 if (s->cpu_index == 0) {
2718 apic_designate_bsp(cpu->apic_state);
2721 s->halted = !cpu_is_bsp(cpu);
2723 if (kvm_enabled()) {
2724 kvm_arch_reset_vcpu(cpu);
2726 #endif
2729 #ifndef CONFIG_USER_ONLY
2730 bool cpu_is_bsp(X86CPU *cpu)
2732 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2735 /* TODO: remove me, when reset over QOM tree is implemented */
2736 static void x86_cpu_machine_reset_cb(void *opaque)
2738 X86CPU *cpu = opaque;
2739 cpu_reset(CPU(cpu));
2741 #endif
2743 static void mce_init(X86CPU *cpu)
2745 CPUX86State *cenv = &cpu->env;
2746 unsigned int bank;
2748 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2749 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2750 (CPUID_MCE | CPUID_MCA)) {
2751 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2752 cenv->mcg_ctl = ~(uint64_t)0;
2753 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2754 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2759 #ifndef CONFIG_USER_ONLY
2760 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2762 DeviceState *dev = DEVICE(cpu);
2763 APICCommonState *apic;
2764 const char *apic_type = "apic";
2766 if (kvm_irqchip_in_kernel()) {
2767 apic_type = "kvm-apic";
2768 } else if (xen_enabled()) {
2769 apic_type = "xen-apic";
2772 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2773 if (cpu->apic_state == NULL) {
2774 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2775 return;
2778 object_property_add_child(OBJECT(cpu), "apic",
2779 OBJECT(cpu->apic_state), NULL);
2780 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2781 /* TODO: convert to link<> */
2782 apic = APIC_COMMON(cpu->apic_state);
2783 apic->cpu = cpu;
2786 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2788 if (cpu->apic_state == NULL) {
2789 return;
2791 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2792 errp);
2794 #else
2795 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2798 #endif
2801 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2802 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2803 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2804 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2805 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2806 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2807 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2809 CPUState *cs = CPU(dev);
2810 X86CPU *cpu = X86_CPU(dev);
2811 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2812 CPUX86State *env = &cpu->env;
2813 Error *local_err = NULL;
2814 static bool ht_warned;
2816 if (cpu->apic_id < 0) {
2817 error_setg(errp, "apic-id property was not initialized properly");
2818 return;
2821 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2822 env->cpuid_level = 7;
2825 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2826 * CPUID[1].EDX.
2828 if (IS_AMD_CPU(env)) {
2829 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2830 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2831 & CPUID_EXT2_AMD_ALIASES);
2835 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2836 error_setg(&local_err,
2837 kvm_enabled() ?
2838 "Host doesn't support requested features" :
2839 "TCG doesn't support requested features");
2840 goto out;
2843 #ifndef CONFIG_USER_ONLY
2844 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2846 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2847 x86_cpu_apic_create(cpu, &local_err);
2848 if (local_err != NULL) {
2849 goto out;
2852 #endif
2854 mce_init(cpu);
2855 qemu_init_vcpu(cs);
2857 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2858 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2859 * based on inputs (sockets,cores,threads), it is still better to gives
2860 * users a warning.
2862 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2863 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2865 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2866 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2867 " -smp options properly.");
2868 ht_warned = true;
2871 x86_cpu_apic_realize(cpu, &local_err);
2872 if (local_err != NULL) {
2873 goto out;
2875 cpu_reset(cs);
2877 xcc->parent_realize(dev, &local_err);
2878 out:
2879 if (local_err != NULL) {
2880 error_propagate(errp, local_err);
2881 return;
2885 static void x86_cpu_initfn(Object *obj)
2887 CPUState *cs = CPU(obj);
2888 X86CPU *cpu = X86_CPU(obj);
2889 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
2890 CPUX86State *env = &cpu->env;
2891 static int inited;
2893 cs->env_ptr = env;
2894 cpu_exec_init(env);
2896 object_property_add(obj, "family", "int",
2897 x86_cpuid_version_get_family,
2898 x86_cpuid_version_set_family, NULL, NULL, NULL);
2899 object_property_add(obj, "model", "int",
2900 x86_cpuid_version_get_model,
2901 x86_cpuid_version_set_model, NULL, NULL, NULL);
2902 object_property_add(obj, "stepping", "int",
2903 x86_cpuid_version_get_stepping,
2904 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
2905 object_property_add(obj, "level", "int",
2906 x86_cpuid_get_level,
2907 x86_cpuid_set_level, NULL, NULL, NULL);
2908 object_property_add(obj, "xlevel", "int",
2909 x86_cpuid_get_xlevel,
2910 x86_cpuid_set_xlevel, NULL, NULL, NULL);
2911 object_property_add_str(obj, "vendor",
2912 x86_cpuid_get_vendor,
2913 x86_cpuid_set_vendor, NULL);
2914 object_property_add_str(obj, "model-id",
2915 x86_cpuid_get_model_id,
2916 x86_cpuid_set_model_id, NULL);
2917 object_property_add(obj, "tsc-frequency", "int",
2918 x86_cpuid_get_tsc_freq,
2919 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
2920 object_property_add(obj, "apic-id", "int",
2921 x86_cpuid_get_apic_id,
2922 x86_cpuid_set_apic_id, NULL, NULL, NULL);
2923 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
2924 x86_cpu_get_feature_words,
2925 NULL, NULL, (void *)env->features, NULL);
2926 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
2927 x86_cpu_get_feature_words,
2928 NULL, NULL, (void *)cpu->filtered_features, NULL);
2930 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
2932 #ifndef CONFIG_USER_ONLY
2933 /* Any code creating new X86CPU objects have to set apic-id explicitly */
2934 cpu->apic_id = -1;
2935 #endif
2937 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
2939 /* init various static tables used in TCG mode */
2940 if (tcg_enabled() && !inited) {
2941 inited = 1;
2942 optimize_flags_init();
2946 static int64_t x86_cpu_get_arch_id(CPUState *cs)
2948 X86CPU *cpu = X86_CPU(cs);
2950 return cpu->apic_id;
2953 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
2955 X86CPU *cpu = X86_CPU(cs);
2957 return cpu->env.cr[0] & CR0_PG_MASK;
2960 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
2962 X86CPU *cpu = X86_CPU(cs);
2964 cpu->env.eip = value;
2967 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
2969 X86CPU *cpu = X86_CPU(cs);
2971 cpu->env.eip = tb->pc - tb->cs_base;
2974 static bool x86_cpu_has_work(CPUState *cs)
2976 X86CPU *cpu = X86_CPU(cs);
2977 CPUX86State *env = &cpu->env;
2979 #if !defined(CONFIG_USER_ONLY)
2980 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
2981 apic_poll_irq(cpu->apic_state);
2982 cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
2984 #endif
2986 return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2987 (env->eflags & IF_MASK)) ||
2988 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
2989 CPU_INTERRUPT_INIT |
2990 CPU_INTERRUPT_SIPI |
2991 CPU_INTERRUPT_MCE));
2994 static Property x86_cpu_properties[] = {
2995 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
2996 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
2997 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
2998 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
2999 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3000 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
3001 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3002 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3003 DEFINE_PROP_END_OF_LIST()
3006 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3008 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3009 CPUClass *cc = CPU_CLASS(oc);
3010 DeviceClass *dc = DEVICE_CLASS(oc);
3012 xcc->parent_realize = dc->realize;
3013 dc->realize = x86_cpu_realizefn;
3014 dc->bus_type = TYPE_ICC_BUS;
3015 dc->props = x86_cpu_properties;
3017 xcc->parent_reset = cc->reset;
3018 cc->reset = x86_cpu_reset;
3019 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3021 cc->class_by_name = x86_cpu_class_by_name;
3022 cc->parse_features = x86_cpu_parse_featurestr;
3023 cc->has_work = x86_cpu_has_work;
3024 cc->do_interrupt = x86_cpu_do_interrupt;
3025 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3026 cc->dump_state = x86_cpu_dump_state;
3027 cc->set_pc = x86_cpu_set_pc;
3028 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3029 cc->gdb_read_register = x86_cpu_gdb_read_register;
3030 cc->gdb_write_register = x86_cpu_gdb_write_register;
3031 cc->get_arch_id = x86_cpu_get_arch_id;
3032 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3033 #ifdef CONFIG_USER_ONLY
3034 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3035 #else
3036 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3037 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3038 cc->write_elf64_note = x86_cpu_write_elf64_note;
3039 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3040 cc->write_elf32_note = x86_cpu_write_elf32_note;
3041 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3042 cc->vmsd = &vmstate_x86_cpu;
3043 #endif
3044 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3045 #ifndef CONFIG_USER_ONLY
3046 cc->debug_excp_handler = breakpoint_handler;
3047 #endif
3048 cc->cpu_exec_enter = x86_cpu_exec_enter;
3049 cc->cpu_exec_exit = x86_cpu_exec_exit;
3052 static const TypeInfo x86_cpu_type_info = {
3053 .name = TYPE_X86_CPU,
3054 .parent = TYPE_CPU,
3055 .instance_size = sizeof(X86CPU),
3056 .instance_init = x86_cpu_initfn,
3057 .abstract = true,
3058 .class_size = sizeof(X86CPUClass),
3059 .class_init = x86_cpu_common_class_init,
3062 static void x86_cpu_register_types(void)
3064 int i;
3066 type_register_static(&x86_cpu_type_info);
3067 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3068 x86_register_cpudef_type(&builtin_x86_defs[i]);
3070 #ifdef CONFIG_KVM
3071 type_register_static(&host_x86_cpu_type_info);
3072 #endif
3075 type_init(x86_cpu_register_types)