target-i386: Remove ABM from qemu64 CPU model
[qemu/ar7.git] / target-i386 / cpu.c
blob90e4529208e5d7c73519107d2f13d318a43c4d24
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 static const char *cpuid_xsave_feature_name[] = {
278 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
288 static const char *cpuid_6_feature_name[] = {
289 NULL, NULL, "arat", NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
299 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
300 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
301 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
302 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
303 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
304 CPUID_PSE36 | CPUID_FXSR)
305 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
306 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
307 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
308 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
309 CPUID_PAE | CPUID_SEP | CPUID_APIC)
311 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
312 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
313 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
314 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
315 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
316 /* partly implemented:
317 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
318 /* missing:
319 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
320 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
321 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
322 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
323 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
324 /* missing:
325 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
326 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
327 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
328 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
329 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
330 CPUID_EXT_RDRAND */
332 #ifdef TARGET_X86_64
333 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
334 #else
335 #define TCG_EXT2_X86_64_FEATURES 0
336 #endif
338 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
339 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
340 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
341 TCG_EXT2_X86_64_FEATURES)
342 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
343 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
344 #define TCG_EXT4_FEATURES 0
345 #define TCG_SVM_FEATURES 0
346 #define TCG_KVM_FEATURES 0
347 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
348 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
349 /* missing:
350 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
351 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
352 CPUID_7_0_EBX_RDSEED */
353 #define TCG_APM_FEATURES 0
354 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
357 typedef struct FeatureWordInfo {
358 const char **feat_names;
359 uint32_t cpuid_eax; /* Input EAX for CPUID */
360 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
361 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
362 int cpuid_reg; /* output register (R_* constant) */
363 uint32_t tcg_features; /* Feature flags supported by TCG */
364 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
365 } FeatureWordInfo;
367 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
368 [FEAT_1_EDX] = {
369 .feat_names = feature_name,
370 .cpuid_eax = 1, .cpuid_reg = R_EDX,
371 .tcg_features = TCG_FEATURES,
373 [FEAT_1_ECX] = {
374 .feat_names = ext_feature_name,
375 .cpuid_eax = 1, .cpuid_reg = R_ECX,
376 .tcg_features = TCG_EXT_FEATURES,
378 [FEAT_8000_0001_EDX] = {
379 .feat_names = ext2_feature_name,
380 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
381 .tcg_features = TCG_EXT2_FEATURES,
383 [FEAT_8000_0001_ECX] = {
384 .feat_names = ext3_feature_name,
385 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
386 .tcg_features = TCG_EXT3_FEATURES,
388 [FEAT_C000_0001_EDX] = {
389 .feat_names = ext4_feature_name,
390 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
391 .tcg_features = TCG_EXT4_FEATURES,
393 [FEAT_KVM] = {
394 .feat_names = kvm_feature_name,
395 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
396 .tcg_features = TCG_KVM_FEATURES,
398 [FEAT_SVM] = {
399 .feat_names = svm_feature_name,
400 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
401 .tcg_features = TCG_SVM_FEATURES,
403 [FEAT_7_0_EBX] = {
404 .feat_names = cpuid_7_0_ebx_feature_name,
405 .cpuid_eax = 7,
406 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
407 .cpuid_reg = R_EBX,
408 .tcg_features = TCG_7_0_EBX_FEATURES,
410 [FEAT_8000_0007_EDX] = {
411 .feat_names = cpuid_apm_edx_feature_name,
412 .cpuid_eax = 0x80000007,
413 .cpuid_reg = R_EDX,
414 .tcg_features = TCG_APM_FEATURES,
415 .unmigratable_flags = CPUID_APM_INVTSC,
417 [FEAT_XSAVE] = {
418 .feat_names = cpuid_xsave_feature_name,
419 .cpuid_eax = 0xd,
420 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
421 .cpuid_reg = R_EAX,
422 .tcg_features = 0,
424 [FEAT_6_EAX] = {
425 .feat_names = cpuid_6_feature_name,
426 .cpuid_eax = 6, .cpuid_reg = R_EAX,
427 .tcg_features = TCG_6_EAX_FEATURES,
431 typedef struct X86RegisterInfo32 {
432 /* Name of register */
433 const char *name;
434 /* QAPI enum value register */
435 X86CPURegister32 qapi_enum;
436 } X86RegisterInfo32;
438 #define REGISTER(reg) \
439 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
440 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
441 REGISTER(EAX),
442 REGISTER(ECX),
443 REGISTER(EDX),
444 REGISTER(EBX),
445 REGISTER(ESP),
446 REGISTER(EBP),
447 REGISTER(ESI),
448 REGISTER(EDI),
450 #undef REGISTER
452 typedef struct ExtSaveArea {
453 uint32_t feature, bits;
454 uint32_t offset, size;
455 } ExtSaveArea;
457 static const ExtSaveArea ext_save_areas[] = {
458 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
459 .offset = 0x240, .size = 0x100 },
460 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
461 .offset = 0x3c0, .size = 0x40 },
462 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
463 .offset = 0x400, .size = 0x40 },
464 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
465 .offset = 0x440, .size = 0x40 },
466 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
467 .offset = 0x480, .size = 0x200 },
468 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
469 .offset = 0x680, .size = 0x400 },
472 const char *get_register_name_32(unsigned int reg)
474 if (reg >= CPU_NB_REGS32) {
475 return NULL;
477 return x86_reg_info_32[reg].name;
481 * Returns the set of feature flags that are supported and migratable by
482 * QEMU, for a given FeatureWord.
484 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
486 FeatureWordInfo *wi = &feature_word_info[w];
487 uint32_t r = 0;
488 int i;
490 for (i = 0; i < 32; i++) {
491 uint32_t f = 1U << i;
492 /* If the feature name is unknown, it is not supported by QEMU yet */
493 if (!wi->feat_names[i]) {
494 continue;
496 /* Skip features known to QEMU, but explicitly marked as unmigratable */
497 if (wi->unmigratable_flags & f) {
498 continue;
500 r |= f;
502 return r;
505 void host_cpuid(uint32_t function, uint32_t count,
506 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
508 uint32_t vec[4];
510 #ifdef __x86_64__
511 asm volatile("cpuid"
512 : "=a"(vec[0]), "=b"(vec[1]),
513 "=c"(vec[2]), "=d"(vec[3])
514 : "0"(function), "c"(count) : "cc");
515 #elif defined(__i386__)
516 asm volatile("pusha \n\t"
517 "cpuid \n\t"
518 "mov %%eax, 0(%2) \n\t"
519 "mov %%ebx, 4(%2) \n\t"
520 "mov %%ecx, 8(%2) \n\t"
521 "mov %%edx, 12(%2) \n\t"
522 "popa"
523 : : "a"(function), "c"(count), "S"(vec)
524 : "memory", "cc");
525 #else
526 abort();
527 #endif
529 if (eax)
530 *eax = vec[0];
531 if (ebx)
532 *ebx = vec[1];
533 if (ecx)
534 *ecx = vec[2];
535 if (edx)
536 *edx = vec[3];
539 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
541 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
542 * a substring. ex if !NULL points to the first char after a substring,
543 * otherwise the string is assumed to sized by a terminating nul.
544 * Return lexical ordering of *s1:*s2.
546 static int sstrcmp(const char *s1, const char *e1,
547 const char *s2, const char *e2)
549 for (;;) {
550 if (!*s1 || !*s2 || *s1 != *s2)
551 return (*s1 - *s2);
552 ++s1, ++s2;
553 if (s1 == e1 && s2 == e2)
554 return (0);
555 else if (s1 == e1)
556 return (*s2);
557 else if (s2 == e2)
558 return (*s1);
562 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
563 * '|' delimited (possibly empty) strings in which case search for a match
564 * within the alternatives proceeds left to right. Return 0 for success,
565 * non-zero otherwise.
567 static int altcmp(const char *s, const char *e, const char *altstr)
569 const char *p, *q;
571 for (q = p = altstr; ; ) {
572 while (*p && *p != '|')
573 ++p;
574 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
575 return (0);
576 if (!*p)
577 return (1);
578 else
579 q = ++p;
583 /* search featureset for flag *[s..e), if found set corresponding bit in
584 * *pval and return true, otherwise return false
586 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
587 const char **featureset)
589 uint32_t mask;
590 const char **ppc;
591 bool found = false;
593 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
594 if (*ppc && !altcmp(s, e, *ppc)) {
595 *pval |= mask;
596 found = true;
599 return found;
602 static void add_flagname_to_bitmaps(const char *flagname,
603 FeatureWordArray words,
604 Error **errp)
606 FeatureWord w;
607 for (w = 0; w < FEATURE_WORDS; w++) {
608 FeatureWordInfo *wi = &feature_word_info[w];
609 if (wi->feat_names &&
610 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
611 break;
614 if (w == FEATURE_WORDS) {
615 error_setg(errp, "CPU feature %s not found", flagname);
619 /* CPU class name definitions: */
621 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
622 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
624 /* Return type name for a given CPU model name
625 * Caller is responsible for freeing the returned string.
627 static char *x86_cpu_type_name(const char *model_name)
629 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
632 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
634 ObjectClass *oc;
635 char *typename;
637 if (cpu_model == NULL) {
638 return NULL;
641 typename = x86_cpu_type_name(cpu_model);
642 oc = object_class_by_name(typename);
643 g_free(typename);
644 return oc;
647 struct X86CPUDefinition {
648 const char *name;
649 uint32_t level;
650 uint32_t xlevel;
651 uint32_t xlevel2;
652 /* vendor is zero-terminated, 12 character ASCII string */
653 char vendor[CPUID_VENDOR_SZ + 1];
654 int family;
655 int model;
656 int stepping;
657 FeatureWordArray features;
658 char model_id[48];
661 static X86CPUDefinition builtin_x86_defs[] = {
663 .name = "qemu64",
664 .level = 0xd,
665 .vendor = CPUID_VENDOR_AMD,
666 .family = 6,
667 .model = 6,
668 .stepping = 3,
669 .features[FEAT_1_EDX] =
670 PPRO_FEATURES |
671 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
672 CPUID_PSE36,
673 .features[FEAT_1_ECX] =
674 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
675 .features[FEAT_8000_0001_EDX] =
676 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
677 .features[FEAT_8000_0001_ECX] =
678 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
679 .xlevel = 0x8000000A,
682 .name = "phenom",
683 .level = 5,
684 .vendor = CPUID_VENDOR_AMD,
685 .family = 16,
686 .model = 2,
687 .stepping = 3,
688 /* Missing: CPUID_HT */
689 .features[FEAT_1_EDX] =
690 PPRO_FEATURES |
691 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
692 CPUID_PSE36 | CPUID_VME,
693 .features[FEAT_1_ECX] =
694 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
695 CPUID_EXT_POPCNT,
696 .features[FEAT_8000_0001_EDX] =
697 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
698 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
699 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
700 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
701 CPUID_EXT3_CR8LEG,
702 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
703 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
704 .features[FEAT_8000_0001_ECX] =
705 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
706 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
707 /* Missing: CPUID_SVM_LBRV */
708 .features[FEAT_SVM] =
709 CPUID_SVM_NPT,
710 .xlevel = 0x8000001A,
711 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
714 .name = "core2duo",
715 .level = 10,
716 .vendor = CPUID_VENDOR_INTEL,
717 .family = 6,
718 .model = 15,
719 .stepping = 11,
720 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
721 .features[FEAT_1_EDX] =
722 PPRO_FEATURES |
723 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
724 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
725 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
726 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
727 .features[FEAT_1_ECX] =
728 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
729 CPUID_EXT_CX16,
730 .features[FEAT_8000_0001_EDX] =
731 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
732 .features[FEAT_8000_0001_ECX] =
733 CPUID_EXT3_LAHF_LM,
734 .xlevel = 0x80000008,
735 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
738 .name = "kvm64",
739 .level = 0xd,
740 .vendor = CPUID_VENDOR_INTEL,
741 .family = 15,
742 .model = 6,
743 .stepping = 1,
744 /* Missing: CPUID_HT */
745 .features[FEAT_1_EDX] =
746 PPRO_FEATURES | CPUID_VME |
747 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
748 CPUID_PSE36,
749 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
750 .features[FEAT_1_ECX] =
751 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
752 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
753 .features[FEAT_8000_0001_EDX] =
754 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
755 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
756 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
757 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
758 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
759 .features[FEAT_8000_0001_ECX] =
761 .xlevel = 0x80000008,
762 .model_id = "Common KVM processor"
765 .name = "qemu32",
766 .level = 4,
767 .vendor = CPUID_VENDOR_INTEL,
768 .family = 6,
769 .model = 6,
770 .stepping = 3,
771 .features[FEAT_1_EDX] =
772 PPRO_FEATURES,
773 .features[FEAT_1_ECX] =
774 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
775 .xlevel = 0x80000004,
778 .name = "kvm32",
779 .level = 5,
780 .vendor = CPUID_VENDOR_INTEL,
781 .family = 15,
782 .model = 6,
783 .stepping = 1,
784 .features[FEAT_1_EDX] =
785 PPRO_FEATURES | CPUID_VME |
786 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
787 .features[FEAT_1_ECX] =
788 CPUID_EXT_SSE3,
789 .features[FEAT_8000_0001_ECX] =
791 .xlevel = 0x80000008,
792 .model_id = "Common 32-bit KVM processor"
795 .name = "coreduo",
796 .level = 10,
797 .vendor = CPUID_VENDOR_INTEL,
798 .family = 6,
799 .model = 14,
800 .stepping = 8,
801 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
802 .features[FEAT_1_EDX] =
803 PPRO_FEATURES | CPUID_VME |
804 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
805 CPUID_SS,
806 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
807 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
808 .features[FEAT_1_ECX] =
809 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
810 .features[FEAT_8000_0001_EDX] =
811 CPUID_EXT2_NX,
812 .xlevel = 0x80000008,
813 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
816 .name = "486",
817 .level = 1,
818 .vendor = CPUID_VENDOR_INTEL,
819 .family = 4,
820 .model = 8,
821 .stepping = 0,
822 .features[FEAT_1_EDX] =
823 I486_FEATURES,
824 .xlevel = 0,
827 .name = "pentium",
828 .level = 1,
829 .vendor = CPUID_VENDOR_INTEL,
830 .family = 5,
831 .model = 4,
832 .stepping = 3,
833 .features[FEAT_1_EDX] =
834 PENTIUM_FEATURES,
835 .xlevel = 0,
838 .name = "pentium2",
839 .level = 2,
840 .vendor = CPUID_VENDOR_INTEL,
841 .family = 6,
842 .model = 5,
843 .stepping = 2,
844 .features[FEAT_1_EDX] =
845 PENTIUM2_FEATURES,
846 .xlevel = 0,
849 .name = "pentium3",
850 .level = 3,
851 .vendor = CPUID_VENDOR_INTEL,
852 .family = 6,
853 .model = 7,
854 .stepping = 3,
855 .features[FEAT_1_EDX] =
856 PENTIUM3_FEATURES,
857 .xlevel = 0,
860 .name = "athlon",
861 .level = 2,
862 .vendor = CPUID_VENDOR_AMD,
863 .family = 6,
864 .model = 2,
865 .stepping = 3,
866 .features[FEAT_1_EDX] =
867 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
868 CPUID_MCA,
869 .features[FEAT_8000_0001_EDX] =
870 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
871 .xlevel = 0x80000008,
874 .name = "n270",
875 .level = 10,
876 .vendor = CPUID_VENDOR_INTEL,
877 .family = 6,
878 .model = 28,
879 .stepping = 2,
880 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
881 .features[FEAT_1_EDX] =
882 PPRO_FEATURES |
883 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
884 CPUID_ACPI | CPUID_SS,
885 /* Some CPUs got no CPUID_SEP */
886 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
887 * CPUID_EXT_XTPR */
888 .features[FEAT_1_ECX] =
889 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
890 CPUID_EXT_MOVBE,
891 .features[FEAT_8000_0001_EDX] =
892 CPUID_EXT2_NX,
893 .features[FEAT_8000_0001_ECX] =
894 CPUID_EXT3_LAHF_LM,
895 .xlevel = 0x80000008,
896 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
899 .name = "Conroe",
900 .level = 10,
901 .vendor = CPUID_VENDOR_INTEL,
902 .family = 6,
903 .model = 15,
904 .stepping = 3,
905 .features[FEAT_1_EDX] =
906 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
907 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
908 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
909 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
910 CPUID_DE | CPUID_FP87,
911 .features[FEAT_1_ECX] =
912 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
913 .features[FEAT_8000_0001_EDX] =
914 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
915 .features[FEAT_8000_0001_ECX] =
916 CPUID_EXT3_LAHF_LM,
917 .xlevel = 0x80000008,
918 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
921 .name = "Penryn",
922 .level = 10,
923 .vendor = CPUID_VENDOR_INTEL,
924 .family = 6,
925 .model = 23,
926 .stepping = 3,
927 .features[FEAT_1_EDX] =
928 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
929 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
930 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
931 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
932 CPUID_DE | CPUID_FP87,
933 .features[FEAT_1_ECX] =
934 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
935 CPUID_EXT_SSE3,
936 .features[FEAT_8000_0001_EDX] =
937 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
938 .features[FEAT_8000_0001_ECX] =
939 CPUID_EXT3_LAHF_LM,
940 .xlevel = 0x80000008,
941 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
944 .name = "Nehalem",
945 .level = 11,
946 .vendor = CPUID_VENDOR_INTEL,
947 .family = 6,
948 .model = 26,
949 .stepping = 3,
950 .features[FEAT_1_EDX] =
951 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
952 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
953 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
954 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
955 CPUID_DE | CPUID_FP87,
956 .features[FEAT_1_ECX] =
957 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
958 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
959 .features[FEAT_8000_0001_EDX] =
960 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
961 .features[FEAT_8000_0001_ECX] =
962 CPUID_EXT3_LAHF_LM,
963 .xlevel = 0x80000008,
964 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
967 .name = "Westmere",
968 .level = 11,
969 .vendor = CPUID_VENDOR_INTEL,
970 .family = 6,
971 .model = 44,
972 .stepping = 1,
973 .features[FEAT_1_EDX] =
974 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
975 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
976 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
977 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
978 CPUID_DE | CPUID_FP87,
979 .features[FEAT_1_ECX] =
980 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
981 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
982 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
983 .features[FEAT_8000_0001_EDX] =
984 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
985 .features[FEAT_8000_0001_ECX] =
986 CPUID_EXT3_LAHF_LM,
987 .features[FEAT_6_EAX] =
988 CPUID_6_EAX_ARAT,
989 .xlevel = 0x80000008,
990 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
993 .name = "SandyBridge",
994 .level = 0xd,
995 .vendor = CPUID_VENDOR_INTEL,
996 .family = 6,
997 .model = 42,
998 .stepping = 1,
999 .features[FEAT_1_EDX] =
1000 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1001 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1002 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1003 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1004 CPUID_DE | CPUID_FP87,
1005 .features[FEAT_1_ECX] =
1006 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1007 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1008 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1009 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1010 CPUID_EXT_SSE3,
1011 .features[FEAT_8000_0001_EDX] =
1012 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1013 CPUID_EXT2_SYSCALL,
1014 .features[FEAT_8000_0001_ECX] =
1015 CPUID_EXT3_LAHF_LM,
1016 .features[FEAT_XSAVE] =
1017 CPUID_XSAVE_XSAVEOPT,
1018 .features[FEAT_6_EAX] =
1019 CPUID_6_EAX_ARAT,
1020 .xlevel = 0x80000008,
1021 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1024 .name = "IvyBridge",
1025 .level = 0xd,
1026 .vendor = CPUID_VENDOR_INTEL,
1027 .family = 6,
1028 .model = 58,
1029 .stepping = 9,
1030 .features[FEAT_1_EDX] =
1031 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1032 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1033 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1034 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1035 CPUID_DE | CPUID_FP87,
1036 .features[FEAT_1_ECX] =
1037 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1038 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1039 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1040 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1041 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1042 .features[FEAT_7_0_EBX] =
1043 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1044 CPUID_7_0_EBX_ERMS,
1045 .features[FEAT_8000_0001_EDX] =
1046 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1047 CPUID_EXT2_SYSCALL,
1048 .features[FEAT_8000_0001_ECX] =
1049 CPUID_EXT3_LAHF_LM,
1050 .features[FEAT_XSAVE] =
1051 CPUID_XSAVE_XSAVEOPT,
1052 .features[FEAT_6_EAX] =
1053 CPUID_6_EAX_ARAT,
1054 .xlevel = 0x80000008,
1055 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1058 .name = "Haswell-noTSX",
1059 .level = 0xd,
1060 .vendor = CPUID_VENDOR_INTEL,
1061 .family = 6,
1062 .model = 60,
1063 .stepping = 1,
1064 .features[FEAT_1_EDX] =
1065 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1066 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1067 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1068 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1069 CPUID_DE | CPUID_FP87,
1070 .features[FEAT_1_ECX] =
1071 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1072 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1073 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1074 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1075 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1076 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1077 .features[FEAT_8000_0001_EDX] =
1078 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1079 CPUID_EXT2_SYSCALL,
1080 .features[FEAT_8000_0001_ECX] =
1081 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1082 .features[FEAT_7_0_EBX] =
1083 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1084 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1085 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1086 .features[FEAT_XSAVE] =
1087 CPUID_XSAVE_XSAVEOPT,
1088 .features[FEAT_6_EAX] =
1089 CPUID_6_EAX_ARAT,
1090 .xlevel = 0x80000008,
1091 .model_id = "Intel Core Processor (Haswell, no TSX)",
1092 }, {
1093 .name = "Haswell",
1094 .level = 0xd,
1095 .vendor = CPUID_VENDOR_INTEL,
1096 .family = 6,
1097 .model = 60,
1098 .stepping = 1,
1099 .features[FEAT_1_EDX] =
1100 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1101 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1102 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1103 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1104 CPUID_DE | CPUID_FP87,
1105 .features[FEAT_1_ECX] =
1106 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1107 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1108 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1109 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1110 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1111 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1112 .features[FEAT_8000_0001_EDX] =
1113 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1114 CPUID_EXT2_SYSCALL,
1115 .features[FEAT_8000_0001_ECX] =
1116 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1117 .features[FEAT_7_0_EBX] =
1118 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1119 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1120 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1121 CPUID_7_0_EBX_RTM,
1122 .features[FEAT_XSAVE] =
1123 CPUID_XSAVE_XSAVEOPT,
1124 .features[FEAT_6_EAX] =
1125 CPUID_6_EAX_ARAT,
1126 .xlevel = 0x80000008,
1127 .model_id = "Intel Core Processor (Haswell)",
1130 .name = "Broadwell-noTSX",
1131 .level = 0xd,
1132 .vendor = CPUID_VENDOR_INTEL,
1133 .family = 6,
1134 .model = 61,
1135 .stepping = 2,
1136 .features[FEAT_1_EDX] =
1137 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1138 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1139 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1140 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1141 CPUID_DE | CPUID_FP87,
1142 .features[FEAT_1_ECX] =
1143 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1144 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1145 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1146 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1147 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1148 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1149 .features[FEAT_8000_0001_EDX] =
1150 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1151 CPUID_EXT2_SYSCALL,
1152 .features[FEAT_8000_0001_ECX] =
1153 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1154 .features[FEAT_7_0_EBX] =
1155 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1156 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1157 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1158 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1159 CPUID_7_0_EBX_SMAP,
1160 .features[FEAT_XSAVE] =
1161 CPUID_XSAVE_XSAVEOPT,
1162 .features[FEAT_6_EAX] =
1163 CPUID_6_EAX_ARAT,
1164 .xlevel = 0x80000008,
1165 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1168 .name = "Broadwell",
1169 .level = 0xd,
1170 .vendor = CPUID_VENDOR_INTEL,
1171 .family = 6,
1172 .model = 61,
1173 .stepping = 2,
1174 .features[FEAT_1_EDX] =
1175 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1176 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1177 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1178 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1179 CPUID_DE | CPUID_FP87,
1180 .features[FEAT_1_ECX] =
1181 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1182 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1183 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1184 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1185 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1186 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1187 .features[FEAT_8000_0001_EDX] =
1188 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1189 CPUID_EXT2_SYSCALL,
1190 .features[FEAT_8000_0001_ECX] =
1191 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1192 .features[FEAT_7_0_EBX] =
1193 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1194 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1195 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1196 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1197 CPUID_7_0_EBX_SMAP,
1198 .features[FEAT_XSAVE] =
1199 CPUID_XSAVE_XSAVEOPT,
1200 .features[FEAT_6_EAX] =
1201 CPUID_6_EAX_ARAT,
1202 .xlevel = 0x80000008,
1203 .model_id = "Intel Core Processor (Broadwell)",
1206 .name = "Opteron_G1",
1207 .level = 5,
1208 .vendor = CPUID_VENDOR_AMD,
1209 .family = 15,
1210 .model = 6,
1211 .stepping = 1,
1212 .features[FEAT_1_EDX] =
1213 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1214 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1215 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1216 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1217 CPUID_DE | CPUID_FP87,
1218 .features[FEAT_1_ECX] =
1219 CPUID_EXT_SSE3,
1220 .features[FEAT_8000_0001_EDX] =
1221 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1222 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1223 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1224 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1225 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1226 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1227 .xlevel = 0x80000008,
1228 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1231 .name = "Opteron_G2",
1232 .level = 5,
1233 .vendor = CPUID_VENDOR_AMD,
1234 .family = 15,
1235 .model = 6,
1236 .stepping = 1,
1237 .features[FEAT_1_EDX] =
1238 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1239 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1240 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1241 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1242 CPUID_DE | CPUID_FP87,
1243 .features[FEAT_1_ECX] =
1244 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1245 .features[FEAT_8000_0001_EDX] =
1246 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1247 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1248 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1249 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1250 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1251 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1252 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1253 .features[FEAT_8000_0001_ECX] =
1254 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1255 .xlevel = 0x80000008,
1256 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1259 .name = "Opteron_G3",
1260 .level = 5,
1261 .vendor = CPUID_VENDOR_AMD,
1262 .family = 15,
1263 .model = 6,
1264 .stepping = 1,
1265 .features[FEAT_1_EDX] =
1266 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1267 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1268 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1269 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1270 CPUID_DE | CPUID_FP87,
1271 .features[FEAT_1_ECX] =
1272 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1273 CPUID_EXT_SSE3,
1274 .features[FEAT_8000_0001_EDX] =
1275 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1276 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1277 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1278 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1279 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1280 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1281 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1282 .features[FEAT_8000_0001_ECX] =
1283 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1284 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1285 .xlevel = 0x80000008,
1286 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1289 .name = "Opteron_G4",
1290 .level = 0xd,
1291 .vendor = CPUID_VENDOR_AMD,
1292 .family = 21,
1293 .model = 1,
1294 .stepping = 2,
1295 .features[FEAT_1_EDX] =
1296 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1297 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1298 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1299 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1300 CPUID_DE | CPUID_FP87,
1301 .features[FEAT_1_ECX] =
1302 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1303 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1304 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1305 CPUID_EXT_SSE3,
1306 .features[FEAT_8000_0001_EDX] =
1307 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1308 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1309 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1310 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1311 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1312 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1313 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1314 .features[FEAT_8000_0001_ECX] =
1315 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1316 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1317 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1318 CPUID_EXT3_LAHF_LM,
1319 /* no xsaveopt! */
1320 .xlevel = 0x8000001A,
1321 .model_id = "AMD Opteron 62xx class CPU",
1324 .name = "Opteron_G5",
1325 .level = 0xd,
1326 .vendor = CPUID_VENDOR_AMD,
1327 .family = 21,
1328 .model = 2,
1329 .stepping = 0,
1330 .features[FEAT_1_EDX] =
1331 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1332 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1333 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1334 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1335 CPUID_DE | CPUID_FP87,
1336 .features[FEAT_1_ECX] =
1337 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1338 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1339 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1340 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1341 .features[FEAT_8000_0001_EDX] =
1342 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1343 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1344 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1345 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1346 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1347 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1348 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1349 .features[FEAT_8000_0001_ECX] =
1350 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1351 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1352 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1353 CPUID_EXT3_LAHF_LM,
1354 /* no xsaveopt! */
1355 .xlevel = 0x8000001A,
1356 .model_id = "AMD Opteron 63xx class CPU",
1360 typedef struct PropValue {
1361 const char *prop, *value;
1362 } PropValue;
1364 /* KVM-specific features that are automatically added/removed
1365 * from all CPU models when KVM is enabled.
1367 static PropValue kvm_default_props[] = {
1368 { "kvmclock", "on" },
1369 { "kvm-nopiodelay", "on" },
1370 { "kvm-asyncpf", "on" },
1371 { "kvm-steal-time", "on" },
1372 { "kvm-pv-eoi", "on" },
1373 { "kvmclock-stable-bit", "on" },
1374 { "x2apic", "on" },
1375 { "acpi", "off" },
1376 { "monitor", "off" },
1377 { "svm", "off" },
1378 { NULL, NULL },
1381 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1383 PropValue *pv;
1384 for (pv = kvm_default_props; pv->prop; pv++) {
1385 if (!strcmp(pv->prop, prop)) {
1386 pv->value = value;
1387 break;
1391 /* It is valid to call this function only for properties that
1392 * are already present in the kvm_default_props table.
1394 assert(pv->prop);
1397 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1398 bool migratable_only);
1400 #ifdef CONFIG_KVM
1402 static int cpu_x86_fill_model_id(char *str)
1404 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1405 int i;
1407 for (i = 0; i < 3; i++) {
1408 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1409 memcpy(str + i * 16 + 0, &eax, 4);
1410 memcpy(str + i * 16 + 4, &ebx, 4);
1411 memcpy(str + i * 16 + 8, &ecx, 4);
1412 memcpy(str + i * 16 + 12, &edx, 4);
1414 return 0;
1417 static X86CPUDefinition host_cpudef;
1419 static Property host_x86_cpu_properties[] = {
1420 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1421 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1422 DEFINE_PROP_END_OF_LIST()
1425 /* class_init for the "host" CPU model
1427 * This function may be called before KVM is initialized.
1429 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1431 DeviceClass *dc = DEVICE_CLASS(oc);
1432 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1433 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1435 xcc->kvm_required = true;
1437 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1438 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1440 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1441 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1442 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1443 host_cpudef.stepping = eax & 0x0F;
1445 cpu_x86_fill_model_id(host_cpudef.model_id);
1447 xcc->cpu_def = &host_cpudef;
1449 /* level, xlevel, xlevel2, and the feature words are initialized on
1450 * instance_init, because they require KVM to be initialized.
1453 dc->props = host_x86_cpu_properties;
1454 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1455 dc->cannot_destroy_with_object_finalize_yet = true;
1458 static void host_x86_cpu_initfn(Object *obj)
1460 X86CPU *cpu = X86_CPU(obj);
1461 CPUX86State *env = &cpu->env;
1462 KVMState *s = kvm_state;
1464 assert(kvm_enabled());
1466 /* We can't fill the features array here because we don't know yet if
1467 * "migratable" is true or false.
1469 cpu->host_features = true;
1471 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1472 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1473 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1475 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1478 static const TypeInfo host_x86_cpu_type_info = {
1479 .name = X86_CPU_TYPE_NAME("host"),
1480 .parent = TYPE_X86_CPU,
1481 .instance_init = host_x86_cpu_initfn,
1482 .class_init = host_x86_cpu_class_init,
1485 #endif
1487 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1489 FeatureWordInfo *f = &feature_word_info[w];
1490 int i;
1492 for (i = 0; i < 32; ++i) {
1493 if ((1UL << i) & mask) {
1494 const char *reg = get_register_name_32(f->cpuid_reg);
1495 assert(reg);
1496 fprintf(stderr, "warning: %s doesn't support requested feature: "
1497 "CPUID.%02XH:%s%s%s [bit %d]\n",
1498 kvm_enabled() ? "host" : "TCG",
1499 f->cpuid_eax, reg,
1500 f->feat_names[i] ? "." : "",
1501 f->feat_names[i] ? f->feat_names[i] : "", i);
1506 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1507 const char *name, Error **errp)
1509 X86CPU *cpu = X86_CPU(obj);
1510 CPUX86State *env = &cpu->env;
1511 int64_t value;
1513 value = (env->cpuid_version >> 8) & 0xf;
1514 if (value == 0xf) {
1515 value += (env->cpuid_version >> 20) & 0xff;
1517 visit_type_int(v, &value, name, errp);
1520 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1521 const char *name, Error **errp)
1523 X86CPU *cpu = X86_CPU(obj);
1524 CPUX86State *env = &cpu->env;
1525 const int64_t min = 0;
1526 const int64_t max = 0xff + 0xf;
1527 Error *local_err = NULL;
1528 int64_t value;
1530 visit_type_int(v, &value, name, &local_err);
1531 if (local_err) {
1532 error_propagate(errp, local_err);
1533 return;
1535 if (value < min || value > max) {
1536 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1537 name ? name : "null", value, min, max);
1538 return;
1541 env->cpuid_version &= ~0xff00f00;
1542 if (value > 0x0f) {
1543 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1544 } else {
1545 env->cpuid_version |= value << 8;
1549 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1550 const char *name, Error **errp)
1552 X86CPU *cpu = X86_CPU(obj);
1553 CPUX86State *env = &cpu->env;
1554 int64_t value;
1556 value = (env->cpuid_version >> 4) & 0xf;
1557 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1558 visit_type_int(v, &value, name, errp);
1561 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1562 const char *name, Error **errp)
1564 X86CPU *cpu = X86_CPU(obj);
1565 CPUX86State *env = &cpu->env;
1566 const int64_t min = 0;
1567 const int64_t max = 0xff;
1568 Error *local_err = NULL;
1569 int64_t value;
1571 visit_type_int(v, &value, name, &local_err);
1572 if (local_err) {
1573 error_propagate(errp, local_err);
1574 return;
1576 if (value < min || value > max) {
1577 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1578 name ? name : "null", value, min, max);
1579 return;
1582 env->cpuid_version &= ~0xf00f0;
1583 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1586 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1587 void *opaque, const char *name,
1588 Error **errp)
1590 X86CPU *cpu = X86_CPU(obj);
1591 CPUX86State *env = &cpu->env;
1592 int64_t value;
1594 value = env->cpuid_version & 0xf;
1595 visit_type_int(v, &value, name, errp);
1598 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1599 void *opaque, const char *name,
1600 Error **errp)
1602 X86CPU *cpu = X86_CPU(obj);
1603 CPUX86State *env = &cpu->env;
1604 const int64_t min = 0;
1605 const int64_t max = 0xf;
1606 Error *local_err = NULL;
1607 int64_t value;
1609 visit_type_int(v, &value, name, &local_err);
1610 if (local_err) {
1611 error_propagate(errp, local_err);
1612 return;
1614 if (value < min || value > max) {
1615 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1616 name ? name : "null", value, min, max);
1617 return;
1620 env->cpuid_version &= ~0xf;
1621 env->cpuid_version |= value & 0xf;
1624 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1626 X86CPU *cpu = X86_CPU(obj);
1627 CPUX86State *env = &cpu->env;
1628 char *value;
1630 value = g_malloc(CPUID_VENDOR_SZ + 1);
1631 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1632 env->cpuid_vendor3);
1633 return value;
1636 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1637 Error **errp)
1639 X86CPU *cpu = X86_CPU(obj);
1640 CPUX86State *env = &cpu->env;
1641 int i;
1643 if (strlen(value) != CPUID_VENDOR_SZ) {
1644 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1645 return;
1648 env->cpuid_vendor1 = 0;
1649 env->cpuid_vendor2 = 0;
1650 env->cpuid_vendor3 = 0;
1651 for (i = 0; i < 4; i++) {
1652 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1653 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1654 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1658 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1660 X86CPU *cpu = X86_CPU(obj);
1661 CPUX86State *env = &cpu->env;
1662 char *value;
1663 int i;
1665 value = g_malloc(48 + 1);
1666 for (i = 0; i < 48; i++) {
1667 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1669 value[48] = '\0';
1670 return value;
1673 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1674 Error **errp)
1676 X86CPU *cpu = X86_CPU(obj);
1677 CPUX86State *env = &cpu->env;
1678 int c, len, i;
1680 if (model_id == NULL) {
1681 model_id = "";
1683 len = strlen(model_id);
1684 memset(env->cpuid_model, 0, 48);
1685 for (i = 0; i < 48; i++) {
1686 if (i >= len) {
1687 c = '\0';
1688 } else {
1689 c = (uint8_t)model_id[i];
1691 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1695 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1696 const char *name, Error **errp)
1698 X86CPU *cpu = X86_CPU(obj);
1699 int64_t value;
1701 value = cpu->env.tsc_khz * 1000;
1702 visit_type_int(v, &value, name, errp);
1705 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1706 const char *name, Error **errp)
1708 X86CPU *cpu = X86_CPU(obj);
1709 const int64_t min = 0;
1710 const int64_t max = INT64_MAX;
1711 Error *local_err = NULL;
1712 int64_t value;
1714 visit_type_int(v, &value, name, &local_err);
1715 if (local_err) {
1716 error_propagate(errp, local_err);
1717 return;
1719 if (value < min || value > max) {
1720 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1721 name ? name : "null", value, min, max);
1722 return;
1725 cpu->env.tsc_khz = value / 1000;
1728 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1729 const char *name, Error **errp)
1731 X86CPU *cpu = X86_CPU(obj);
1732 int64_t value = cpu->apic_id;
1734 visit_type_int(v, &value, name, errp);
1737 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1738 const char *name, Error **errp)
1740 X86CPU *cpu = X86_CPU(obj);
1741 DeviceState *dev = DEVICE(obj);
1742 const int64_t min = 0;
1743 const int64_t max = UINT32_MAX;
1744 Error *error = NULL;
1745 int64_t value;
1747 if (dev->realized) {
1748 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1749 "it was realized", name, object_get_typename(obj));
1750 return;
1753 visit_type_int(v, &value, name, &error);
1754 if (error) {
1755 error_propagate(errp, error);
1756 return;
1758 if (value < min || value > max) {
1759 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1760 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1761 object_get_typename(obj), name, value, min, max);
1762 return;
1765 if ((value != cpu->apic_id) && cpu_exists(value)) {
1766 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1767 return;
1769 cpu->apic_id = value;
1772 /* Generic getter for "feature-words" and "filtered-features" properties */
1773 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1774 const char *name, Error **errp)
1776 uint32_t *array = (uint32_t *)opaque;
1777 FeatureWord w;
1778 Error *err = NULL;
1779 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1780 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1781 X86CPUFeatureWordInfoList *list = NULL;
1783 for (w = 0; w < FEATURE_WORDS; w++) {
1784 FeatureWordInfo *wi = &feature_word_info[w];
1785 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1786 qwi->cpuid_input_eax = wi->cpuid_eax;
1787 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1788 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1789 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1790 qwi->features = array[w];
1792 /* List will be in reverse order, but order shouldn't matter */
1793 list_entries[w].next = list;
1794 list_entries[w].value = &word_infos[w];
1795 list = &list_entries[w];
1798 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1799 error_propagate(errp, err);
1802 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1803 const char *name, Error **errp)
1805 X86CPU *cpu = X86_CPU(obj);
1806 int64_t value = cpu->hyperv_spinlock_attempts;
1808 visit_type_int(v, &value, name, errp);
1811 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1812 const char *name, Error **errp)
1814 const int64_t min = 0xFFF;
1815 const int64_t max = UINT_MAX;
1816 X86CPU *cpu = X86_CPU(obj);
1817 Error *err = NULL;
1818 int64_t value;
1820 visit_type_int(v, &value, name, &err);
1821 if (err) {
1822 error_propagate(errp, err);
1823 return;
1826 if (value < min || value > max) {
1827 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1828 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1829 object_get_typename(obj), name ? name : "null",
1830 value, min, max);
1831 return;
1833 cpu->hyperv_spinlock_attempts = value;
1836 static PropertyInfo qdev_prop_spinlocks = {
1837 .name = "int",
1838 .get = x86_get_hv_spinlocks,
1839 .set = x86_set_hv_spinlocks,
1842 /* Convert all '_' in a feature string option name to '-', to make feature
1843 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1845 static inline void feat2prop(char *s)
1847 while ((s = strchr(s, '_'))) {
1848 *s = '-';
1852 /* Parse "+feature,-feature,feature=foo" CPU feature string
1854 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1855 Error **errp)
1857 X86CPU *cpu = X86_CPU(cs);
1858 char *featurestr; /* Single 'key=value" string being parsed */
1859 FeatureWord w;
1860 /* Features to be added */
1861 FeatureWordArray plus_features = { 0 };
1862 /* Features to be removed */
1863 FeatureWordArray minus_features = { 0 };
1864 uint32_t numvalue;
1865 CPUX86State *env = &cpu->env;
1866 Error *local_err = NULL;
1868 featurestr = features ? strtok(features, ",") : NULL;
1870 while (featurestr) {
1871 char *val;
1872 if (featurestr[0] == '+') {
1873 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1874 } else if (featurestr[0] == '-') {
1875 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1876 } else if ((val = strchr(featurestr, '='))) {
1877 *val = 0; val++;
1878 feat2prop(featurestr);
1879 if (!strcmp(featurestr, "xlevel")) {
1880 char *err;
1881 char num[32];
1883 numvalue = strtoul(val, &err, 0);
1884 if (!*val || *err) {
1885 error_setg(errp, "bad numerical value %s", val);
1886 return;
1888 if (numvalue < 0x80000000) {
1889 error_report("xlevel value shall always be >= 0x80000000"
1890 ", fixup will be removed in future versions");
1891 numvalue += 0x80000000;
1893 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1894 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1895 } else if (!strcmp(featurestr, "tsc-freq")) {
1896 int64_t tsc_freq;
1897 char *err;
1898 char num[32];
1900 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1901 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1902 if (tsc_freq < 0 || *err) {
1903 error_setg(errp, "bad numerical value %s", val);
1904 return;
1906 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1907 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1908 &local_err);
1909 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1910 char *err;
1911 const int min = 0xFFF;
1912 char num[32];
1913 numvalue = strtoul(val, &err, 0);
1914 if (!*val || *err) {
1915 error_setg(errp, "bad numerical value %s", val);
1916 return;
1918 if (numvalue < min) {
1919 error_report("hv-spinlocks value shall always be >= 0x%x"
1920 ", fixup will be removed in future versions",
1921 min);
1922 numvalue = min;
1924 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1925 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1926 } else {
1927 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1929 } else {
1930 feat2prop(featurestr);
1931 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1933 if (local_err) {
1934 error_propagate(errp, local_err);
1935 return;
1937 featurestr = strtok(NULL, ",");
1940 if (cpu->host_features) {
1941 for (w = 0; w < FEATURE_WORDS; w++) {
1942 env->features[w] =
1943 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1947 for (w = 0; w < FEATURE_WORDS; w++) {
1948 env->features[w] |= plus_features[w];
1949 env->features[w] &= ~minus_features[w];
1953 /* Print all cpuid feature names in featureset
1955 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1957 int bit;
1958 bool first = true;
1960 for (bit = 0; bit < 32; bit++) {
1961 if (featureset[bit]) {
1962 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1963 first = false;
1968 /* generate CPU information. */
1969 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1971 X86CPUDefinition *def;
1972 char buf[256];
1973 int i;
1975 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1976 def = &builtin_x86_defs[i];
1977 snprintf(buf, sizeof(buf), "%s", def->name);
1978 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1980 #ifdef CONFIG_KVM
1981 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1982 "KVM processor with all supported host features "
1983 "(only available in KVM mode)");
1984 #endif
1986 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1987 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1988 FeatureWordInfo *fw = &feature_word_info[i];
1990 (*cpu_fprintf)(f, " ");
1991 listflags(f, cpu_fprintf, fw->feat_names);
1992 (*cpu_fprintf)(f, "\n");
1996 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1998 CpuDefinitionInfoList *cpu_list = NULL;
1999 X86CPUDefinition *def;
2000 int i;
2002 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2003 CpuDefinitionInfoList *entry;
2004 CpuDefinitionInfo *info;
2006 def = &builtin_x86_defs[i];
2007 info = g_malloc0(sizeof(*info));
2008 info->name = g_strdup(def->name);
2010 entry = g_malloc0(sizeof(*entry));
2011 entry->value = info;
2012 entry->next = cpu_list;
2013 cpu_list = entry;
2016 return cpu_list;
2019 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2020 bool migratable_only)
2022 FeatureWordInfo *wi = &feature_word_info[w];
2023 uint32_t r;
2025 if (kvm_enabled()) {
2026 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2027 wi->cpuid_ecx,
2028 wi->cpuid_reg);
2029 } else if (tcg_enabled()) {
2030 r = wi->tcg_features;
2031 } else {
2032 return ~0;
2034 if (migratable_only) {
2035 r &= x86_cpu_get_migratable_flags(w);
2037 return r;
2041 * Filters CPU feature words based on host availability of each feature.
2043 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2045 static int x86_cpu_filter_features(X86CPU *cpu)
2047 CPUX86State *env = &cpu->env;
2048 FeatureWord w;
2049 int rv = 0;
2051 for (w = 0; w < FEATURE_WORDS; w++) {
2052 uint32_t host_feat =
2053 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2054 uint32_t requested_features = env->features[w];
2055 env->features[w] &= host_feat;
2056 cpu->filtered_features[w] = requested_features & ~env->features[w];
2057 if (cpu->filtered_features[w]) {
2058 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2059 report_unavailable_features(w, cpu->filtered_features[w]);
2061 rv = 1;
2065 return rv;
2068 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2070 PropValue *pv;
2071 for (pv = props; pv->prop; pv++) {
2072 if (!pv->value) {
2073 continue;
2075 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2076 &error_abort);
2080 /* Load data from X86CPUDefinition
2082 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2084 CPUX86State *env = &cpu->env;
2085 const char *vendor;
2086 char host_vendor[CPUID_VENDOR_SZ + 1];
2087 FeatureWord w;
2089 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2090 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2091 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2092 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2093 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2094 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2095 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2096 for (w = 0; w < FEATURE_WORDS; w++) {
2097 env->features[w] = def->features[w];
2100 /* Special cases not set in the X86CPUDefinition structs: */
2101 if (kvm_enabled()) {
2102 x86_cpu_apply_props(cpu, kvm_default_props);
2105 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2107 /* sysenter isn't supported in compatibility mode on AMD,
2108 * syscall isn't supported in compatibility mode on Intel.
2109 * Normally we advertise the actual CPU vendor, but you can
2110 * override this using the 'vendor' property if you want to use
2111 * KVM's sysenter/syscall emulation in compatibility mode and
2112 * when doing cross vendor migration
2114 vendor = def->vendor;
2115 if (kvm_enabled()) {
2116 uint32_t ebx = 0, ecx = 0, edx = 0;
2117 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2118 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2119 vendor = host_vendor;
2122 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2126 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2128 X86CPU *cpu = NULL;
2129 X86CPUClass *xcc;
2130 ObjectClass *oc;
2131 gchar **model_pieces;
2132 char *name, *features;
2133 Error *error = NULL;
2135 model_pieces = g_strsplit(cpu_model, ",", 2);
2136 if (!model_pieces[0]) {
2137 error_setg(&error, "Invalid/empty CPU model name");
2138 goto out;
2140 name = model_pieces[0];
2141 features = model_pieces[1];
2143 oc = x86_cpu_class_by_name(name);
2144 if (oc == NULL) {
2145 error_setg(&error, "Unable to find CPU definition: %s", name);
2146 goto out;
2148 xcc = X86_CPU_CLASS(oc);
2150 if (xcc->kvm_required && !kvm_enabled()) {
2151 error_setg(&error, "CPU model '%s' requires KVM", name);
2152 goto out;
2155 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2157 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2158 if (error) {
2159 goto out;
2162 out:
2163 if (error != NULL) {
2164 error_propagate(errp, error);
2165 if (cpu) {
2166 object_unref(OBJECT(cpu));
2167 cpu = NULL;
2170 g_strfreev(model_pieces);
2171 return cpu;
2174 X86CPU *cpu_x86_init(const char *cpu_model)
2176 Error *error = NULL;
2177 X86CPU *cpu;
2179 cpu = cpu_x86_create(cpu_model, &error);
2180 if (error) {
2181 goto out;
2184 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2186 out:
2187 if (error) {
2188 error_report_err(error);
2189 if (cpu != NULL) {
2190 object_unref(OBJECT(cpu));
2191 cpu = NULL;
2194 return cpu;
2197 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2199 X86CPUDefinition *cpudef = data;
2200 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2202 xcc->cpu_def = cpudef;
2205 static void x86_register_cpudef_type(X86CPUDefinition *def)
2207 char *typename = x86_cpu_type_name(def->name);
2208 TypeInfo ti = {
2209 .name = typename,
2210 .parent = TYPE_X86_CPU,
2211 .class_init = x86_cpu_cpudef_class_init,
2212 .class_data = def,
2215 type_register(&ti);
2216 g_free(typename);
2219 #if !defined(CONFIG_USER_ONLY)
2221 void cpu_clear_apic_feature(CPUX86State *env)
2223 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2226 #endif /* !CONFIG_USER_ONLY */
2228 /* Initialize list of CPU models, filling some non-static fields if necessary
2230 void x86_cpudef_setup(void)
2232 int i, j;
2233 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2235 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2236 X86CPUDefinition *def = &builtin_x86_defs[i];
2238 /* Look for specific "cpudef" models that */
2239 /* have the QEMU version in .model_id */
2240 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2241 if (strcmp(model_with_versions[j], def->name) == 0) {
2242 pstrcpy(def->model_id, sizeof(def->model_id),
2243 "QEMU Virtual CPU version ");
2244 pstrcat(def->model_id, sizeof(def->model_id),
2245 qemu_hw_version());
2246 break;
2252 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2253 uint32_t *eax, uint32_t *ebx,
2254 uint32_t *ecx, uint32_t *edx)
2256 X86CPU *cpu = x86_env_get_cpu(env);
2257 CPUState *cs = CPU(cpu);
2259 /* test if maximum index reached */
2260 if (index & 0x80000000) {
2261 if (index > env->cpuid_xlevel) {
2262 if (env->cpuid_xlevel2 > 0) {
2263 /* Handle the Centaur's CPUID instruction. */
2264 if (index > env->cpuid_xlevel2) {
2265 index = env->cpuid_xlevel2;
2266 } else if (index < 0xC0000000) {
2267 index = env->cpuid_xlevel;
2269 } else {
2270 /* Intel documentation states that invalid EAX input will
2271 * return the same information as EAX=cpuid_level
2272 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2274 index = env->cpuid_level;
2277 } else {
2278 if (index > env->cpuid_level)
2279 index = env->cpuid_level;
2282 switch(index) {
2283 case 0:
2284 *eax = env->cpuid_level;
2285 *ebx = env->cpuid_vendor1;
2286 *edx = env->cpuid_vendor2;
2287 *ecx = env->cpuid_vendor3;
2288 break;
2289 case 1:
2290 *eax = env->cpuid_version;
2291 *ebx = (cpu->apic_id << 24) |
2292 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2293 *ecx = env->features[FEAT_1_ECX];
2294 *edx = env->features[FEAT_1_EDX];
2295 if (cs->nr_cores * cs->nr_threads > 1) {
2296 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2297 *edx |= 1 << 28; /* HTT bit */
2299 break;
2300 case 2:
2301 /* cache info: needed for Pentium Pro compatibility */
2302 if (cpu->cache_info_passthrough) {
2303 host_cpuid(index, 0, eax, ebx, ecx, edx);
2304 break;
2306 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2307 *ebx = 0;
2308 *ecx = 0;
2309 *edx = (L1D_DESCRIPTOR << 16) | \
2310 (L1I_DESCRIPTOR << 8) | \
2311 (L2_DESCRIPTOR);
2312 break;
2313 case 4:
2314 /* cache info: needed for Core compatibility */
2315 if (cpu->cache_info_passthrough) {
2316 host_cpuid(index, count, eax, ebx, ecx, edx);
2317 *eax &= ~0xFC000000;
2318 } else {
2319 *eax = 0;
2320 switch (count) {
2321 case 0: /* L1 dcache info */
2322 *eax |= CPUID_4_TYPE_DCACHE | \
2323 CPUID_4_LEVEL(1) | \
2324 CPUID_4_SELF_INIT_LEVEL;
2325 *ebx = (L1D_LINE_SIZE - 1) | \
2326 ((L1D_PARTITIONS - 1) << 12) | \
2327 ((L1D_ASSOCIATIVITY - 1) << 22);
2328 *ecx = L1D_SETS - 1;
2329 *edx = CPUID_4_NO_INVD_SHARING;
2330 break;
2331 case 1: /* L1 icache info */
2332 *eax |= CPUID_4_TYPE_ICACHE | \
2333 CPUID_4_LEVEL(1) | \
2334 CPUID_4_SELF_INIT_LEVEL;
2335 *ebx = (L1I_LINE_SIZE - 1) | \
2336 ((L1I_PARTITIONS - 1) << 12) | \
2337 ((L1I_ASSOCIATIVITY - 1) << 22);
2338 *ecx = L1I_SETS - 1;
2339 *edx = CPUID_4_NO_INVD_SHARING;
2340 break;
2341 case 2: /* L2 cache info */
2342 *eax |= CPUID_4_TYPE_UNIFIED | \
2343 CPUID_4_LEVEL(2) | \
2344 CPUID_4_SELF_INIT_LEVEL;
2345 if (cs->nr_threads > 1) {
2346 *eax |= (cs->nr_threads - 1) << 14;
2348 *ebx = (L2_LINE_SIZE - 1) | \
2349 ((L2_PARTITIONS - 1) << 12) | \
2350 ((L2_ASSOCIATIVITY - 1) << 22);
2351 *ecx = L2_SETS - 1;
2352 *edx = CPUID_4_NO_INVD_SHARING;
2353 break;
2354 default: /* end of info */
2355 *eax = 0;
2356 *ebx = 0;
2357 *ecx = 0;
2358 *edx = 0;
2359 break;
2363 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2364 if ((*eax & 31) && cs->nr_cores > 1) {
2365 *eax |= (cs->nr_cores - 1) << 26;
2367 break;
2368 case 5:
2369 /* mwait info: needed for Core compatibility */
2370 *eax = 0; /* Smallest monitor-line size in bytes */
2371 *ebx = 0; /* Largest monitor-line size in bytes */
2372 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2373 *edx = 0;
2374 break;
2375 case 6:
2376 /* Thermal and Power Leaf */
2377 *eax = env->features[FEAT_6_EAX];
2378 *ebx = 0;
2379 *ecx = 0;
2380 *edx = 0;
2381 break;
2382 case 7:
2383 /* Structured Extended Feature Flags Enumeration Leaf */
2384 if (count == 0) {
2385 *eax = 0; /* Maximum ECX value for sub-leaves */
2386 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2387 *ecx = 0; /* Reserved */
2388 *edx = 0; /* Reserved */
2389 } else {
2390 *eax = 0;
2391 *ebx = 0;
2392 *ecx = 0;
2393 *edx = 0;
2395 break;
2396 case 9:
2397 /* Direct Cache Access Information Leaf */
2398 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2399 *ebx = 0;
2400 *ecx = 0;
2401 *edx = 0;
2402 break;
2403 case 0xA:
2404 /* Architectural Performance Monitoring Leaf */
2405 if (kvm_enabled() && cpu->enable_pmu) {
2406 KVMState *s = cs->kvm_state;
2408 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2409 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2410 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2411 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2412 } else {
2413 *eax = 0;
2414 *ebx = 0;
2415 *ecx = 0;
2416 *edx = 0;
2418 break;
2419 case 0xD: {
2420 KVMState *s = cs->kvm_state;
2421 uint64_t kvm_mask;
2422 int i;
2424 /* Processor Extended State */
2425 *eax = 0;
2426 *ebx = 0;
2427 *ecx = 0;
2428 *edx = 0;
2429 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2430 break;
2432 kvm_mask =
2433 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2434 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2436 if (count == 0) {
2437 *ecx = 0x240;
2438 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2439 const ExtSaveArea *esa = &ext_save_areas[i];
2440 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2441 (kvm_mask & (1 << i)) != 0) {
2442 if (i < 32) {
2443 *eax |= 1 << i;
2444 } else {
2445 *edx |= 1 << (i - 32);
2447 *ecx = MAX(*ecx, esa->offset + esa->size);
2450 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2451 *ebx = *ecx;
2452 } else if (count == 1) {
2453 *eax = env->features[FEAT_XSAVE];
2454 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2455 const ExtSaveArea *esa = &ext_save_areas[count];
2456 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2457 (kvm_mask & (1 << count)) != 0) {
2458 *eax = esa->size;
2459 *ebx = esa->offset;
2462 break;
2464 case 0x80000000:
2465 *eax = env->cpuid_xlevel;
2466 *ebx = env->cpuid_vendor1;
2467 *edx = env->cpuid_vendor2;
2468 *ecx = env->cpuid_vendor3;
2469 break;
2470 case 0x80000001:
2471 *eax = env->cpuid_version;
2472 *ebx = 0;
2473 *ecx = env->features[FEAT_8000_0001_ECX];
2474 *edx = env->features[FEAT_8000_0001_EDX];
2476 /* The Linux kernel checks for the CMPLegacy bit and
2477 * discards multiple thread information if it is set.
2478 * So dont set it here for Intel to make Linux guests happy.
2480 if (cs->nr_cores * cs->nr_threads > 1) {
2481 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2482 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2483 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2484 *ecx |= 1 << 1; /* CmpLegacy bit */
2487 break;
2488 case 0x80000002:
2489 case 0x80000003:
2490 case 0x80000004:
2491 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2492 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2493 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2494 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2495 break;
2496 case 0x80000005:
2497 /* cache info (L1 cache) */
2498 if (cpu->cache_info_passthrough) {
2499 host_cpuid(index, 0, eax, ebx, ecx, edx);
2500 break;
2502 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2503 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2504 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2505 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2506 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2507 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2508 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2509 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2510 break;
2511 case 0x80000006:
2512 /* cache info (L2 cache) */
2513 if (cpu->cache_info_passthrough) {
2514 host_cpuid(index, 0, eax, ebx, ecx, edx);
2515 break;
2517 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2518 (L2_DTLB_2M_ENTRIES << 16) | \
2519 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2520 (L2_ITLB_2M_ENTRIES);
2521 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2522 (L2_DTLB_4K_ENTRIES << 16) | \
2523 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2524 (L2_ITLB_4K_ENTRIES);
2525 *ecx = (L2_SIZE_KB_AMD << 16) | \
2526 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2527 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2528 *edx = ((L3_SIZE_KB/512) << 18) | \
2529 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2530 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2531 break;
2532 case 0x80000007:
2533 *eax = 0;
2534 *ebx = 0;
2535 *ecx = 0;
2536 *edx = env->features[FEAT_8000_0007_EDX];
2537 break;
2538 case 0x80000008:
2539 /* virtual & phys address size in low 2 bytes. */
2540 /* XXX: This value must match the one used in the MMU code. */
2541 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2542 /* 64 bit processor */
2543 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2544 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2545 } else {
2546 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2547 *eax = 0x00000024; /* 36 bits physical */
2548 } else {
2549 *eax = 0x00000020; /* 32 bits physical */
2552 *ebx = 0;
2553 *ecx = 0;
2554 *edx = 0;
2555 if (cs->nr_cores * cs->nr_threads > 1) {
2556 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2558 break;
2559 case 0x8000000A:
2560 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2561 *eax = 0x00000001; /* SVM Revision */
2562 *ebx = 0x00000010; /* nr of ASIDs */
2563 *ecx = 0;
2564 *edx = env->features[FEAT_SVM]; /* optional features */
2565 } else {
2566 *eax = 0;
2567 *ebx = 0;
2568 *ecx = 0;
2569 *edx = 0;
2571 break;
2572 case 0xC0000000:
2573 *eax = env->cpuid_xlevel2;
2574 *ebx = 0;
2575 *ecx = 0;
2576 *edx = 0;
2577 break;
2578 case 0xC0000001:
2579 /* Support for VIA CPU's CPUID instruction */
2580 *eax = env->cpuid_version;
2581 *ebx = 0;
2582 *ecx = 0;
2583 *edx = env->features[FEAT_C000_0001_EDX];
2584 break;
2585 case 0xC0000002:
2586 case 0xC0000003:
2587 case 0xC0000004:
2588 /* Reserved for the future, and now filled with zero */
2589 *eax = 0;
2590 *ebx = 0;
2591 *ecx = 0;
2592 *edx = 0;
2593 break;
2594 default:
2595 /* reserved values: zero */
2596 *eax = 0;
2597 *ebx = 0;
2598 *ecx = 0;
2599 *edx = 0;
2600 break;
2604 /* CPUClass::reset() */
2605 static void x86_cpu_reset(CPUState *s)
2607 X86CPU *cpu = X86_CPU(s);
2608 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2609 CPUX86State *env = &cpu->env;
2610 int i;
2612 xcc->parent_reset(s);
2614 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2616 tlb_flush(s, 1);
2618 env->old_exception = -1;
2620 /* init to reset state */
2622 #ifdef CONFIG_SOFTMMU
2623 env->hflags |= HF_SOFTMMU_MASK;
2624 #endif
2625 env->hflags2 |= HF2_GIF_MASK;
2627 cpu_x86_update_cr0(env, 0x60000010);
2628 env->a20_mask = ~0x0;
2629 env->smbase = 0x30000;
2631 env->idt.limit = 0xffff;
2632 env->gdt.limit = 0xffff;
2633 env->ldt.limit = 0xffff;
2634 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2635 env->tr.limit = 0xffff;
2636 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2638 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2639 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2640 DESC_R_MASK | DESC_A_MASK);
2641 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2642 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2643 DESC_A_MASK);
2644 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2645 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2646 DESC_A_MASK);
2647 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2648 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2649 DESC_A_MASK);
2650 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2651 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2652 DESC_A_MASK);
2653 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2654 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2655 DESC_A_MASK);
2657 env->eip = 0xfff0;
2658 env->regs[R_EDX] = env->cpuid_version;
2660 env->eflags = 0x2;
2662 /* FPU init */
2663 for (i = 0; i < 8; i++) {
2664 env->fptags[i] = 1;
2666 cpu_set_fpuc(env, 0x37f);
2668 env->mxcsr = 0x1f80;
2669 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2671 env->pat = 0x0007040600070406ULL;
2672 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2674 memset(env->dr, 0, sizeof(env->dr));
2675 env->dr[6] = DR6_FIXED_1;
2676 env->dr[7] = DR7_FIXED_1;
2677 cpu_breakpoint_remove_all(s, BP_CPU);
2678 cpu_watchpoint_remove_all(s, BP_CPU);
2680 env->xcr0 = 1;
2683 * SDM 11.11.5 requires:
2684 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2685 * - IA32_MTRR_PHYSMASKn.V = 0
2686 * All other bits are undefined. For simplification, zero it all.
2688 env->mtrr_deftype = 0;
2689 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2690 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2692 #if !defined(CONFIG_USER_ONLY)
2693 /* We hard-wire the BSP to the first CPU. */
2694 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2696 s->halted = !cpu_is_bsp(cpu);
2698 if (kvm_enabled()) {
2699 kvm_arch_reset_vcpu(cpu);
2701 #endif
2704 #ifndef CONFIG_USER_ONLY
2705 bool cpu_is_bsp(X86CPU *cpu)
2707 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2710 /* TODO: remove me, when reset over QOM tree is implemented */
2711 static void x86_cpu_machine_reset_cb(void *opaque)
2713 X86CPU *cpu = opaque;
2714 cpu_reset(CPU(cpu));
2716 #endif
2718 static void mce_init(X86CPU *cpu)
2720 CPUX86State *cenv = &cpu->env;
2721 unsigned int bank;
2723 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2724 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2725 (CPUID_MCE | CPUID_MCA)) {
2726 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2727 cenv->mcg_ctl = ~(uint64_t)0;
2728 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2729 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2734 #ifndef CONFIG_USER_ONLY
2735 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2737 APICCommonState *apic;
2738 const char *apic_type = "apic";
2740 if (kvm_irqchip_in_kernel()) {
2741 apic_type = "kvm-apic";
2742 } else if (xen_enabled()) {
2743 apic_type = "xen-apic";
2746 cpu->apic_state = DEVICE(object_new(apic_type));
2748 object_property_add_child(OBJECT(cpu), "apic",
2749 OBJECT(cpu->apic_state), NULL);
2750 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2751 /* TODO: convert to link<> */
2752 apic = APIC_COMMON(cpu->apic_state);
2753 apic->cpu = cpu;
2754 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2757 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2759 APICCommonState *apic;
2760 static bool apic_mmio_map_once;
2762 if (cpu->apic_state == NULL) {
2763 return;
2765 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2766 errp);
2768 /* Map APIC MMIO area */
2769 apic = APIC_COMMON(cpu->apic_state);
2770 if (!apic_mmio_map_once) {
2771 memory_region_add_subregion_overlap(get_system_memory(),
2772 apic->apicbase &
2773 MSR_IA32_APICBASE_BASE,
2774 &apic->io_memory,
2775 0x1000);
2776 apic_mmio_map_once = true;
2780 static void x86_cpu_machine_done(Notifier *n, void *unused)
2782 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2783 MemoryRegion *smram =
2784 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2786 if (smram) {
2787 cpu->smram = g_new(MemoryRegion, 1);
2788 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2789 smram, 0, 1ull << 32);
2790 memory_region_set_enabled(cpu->smram, false);
2791 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2794 #else
2795 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2798 #endif
2801 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2802 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2803 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2804 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2805 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2806 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2807 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2809 CPUState *cs = CPU(dev);
2810 X86CPU *cpu = X86_CPU(dev);
2811 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2812 CPUX86State *env = &cpu->env;
2813 Error *local_err = NULL;
2814 static bool ht_warned;
2816 if (cpu->apic_id < 0) {
2817 error_setg(errp, "apic-id property was not initialized properly");
2818 return;
2821 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2822 env->cpuid_level = 7;
2825 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2826 * CPUID[1].EDX.
2828 if (IS_AMD_CPU(env)) {
2829 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2830 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2831 & CPUID_EXT2_AMD_ALIASES);
2835 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2836 error_setg(&local_err,
2837 kvm_enabled() ?
2838 "Host doesn't support requested features" :
2839 "TCG doesn't support requested features");
2840 goto out;
2843 #ifndef CONFIG_USER_ONLY
2844 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2846 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2847 x86_cpu_apic_create(cpu, &local_err);
2848 if (local_err != NULL) {
2849 goto out;
2852 #endif
2854 mce_init(cpu);
2856 #ifndef CONFIG_USER_ONLY
2857 if (tcg_enabled()) {
2858 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2859 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2860 cs->as = g_new(AddressSpace, 1);
2862 /* Outer container... */
2863 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2864 memory_region_set_enabled(cpu->cpu_as_root, true);
2866 /* ... with two regions inside: normal system memory with low
2867 * priority, and...
2869 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2870 get_system_memory(), 0, ~0ull);
2871 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2872 memory_region_set_enabled(cpu->cpu_as_mem, true);
2873 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2875 /* ... SMRAM with higher priority, linked from /machine/smram. */
2876 cpu->machine_done.notify = x86_cpu_machine_done;
2877 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2879 #endif
2881 qemu_init_vcpu(cs);
2883 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2884 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2885 * based on inputs (sockets,cores,threads), it is still better to gives
2886 * users a warning.
2888 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2889 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2891 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2892 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2893 " -smp options properly.");
2894 ht_warned = true;
2897 x86_cpu_apic_realize(cpu, &local_err);
2898 if (local_err != NULL) {
2899 goto out;
2901 cpu_reset(cs);
2903 xcc->parent_realize(dev, &local_err);
2905 out:
2906 if (local_err != NULL) {
2907 error_propagate(errp, local_err);
2908 return;
2912 typedef struct BitProperty {
2913 uint32_t *ptr;
2914 uint32_t mask;
2915 } BitProperty;
2917 static void x86_cpu_get_bit_prop(Object *obj,
2918 struct Visitor *v,
2919 void *opaque,
2920 const char *name,
2921 Error **errp)
2923 BitProperty *fp = opaque;
2924 bool value = (*fp->ptr & fp->mask) == fp->mask;
2925 visit_type_bool(v, &value, name, errp);
2928 static void x86_cpu_set_bit_prop(Object *obj,
2929 struct Visitor *v,
2930 void *opaque,
2931 const char *name,
2932 Error **errp)
2934 DeviceState *dev = DEVICE(obj);
2935 BitProperty *fp = opaque;
2936 Error *local_err = NULL;
2937 bool value;
2939 if (dev->realized) {
2940 qdev_prop_set_after_realize(dev, name, errp);
2941 return;
2944 visit_type_bool(v, &value, name, &local_err);
2945 if (local_err) {
2946 error_propagate(errp, local_err);
2947 return;
2950 if (value) {
2951 *fp->ptr |= fp->mask;
2952 } else {
2953 *fp->ptr &= ~fp->mask;
2957 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2958 void *opaque)
2960 BitProperty *prop = opaque;
2961 g_free(prop);
2964 /* Register a boolean property to get/set a single bit in a uint32_t field.
2966 * The same property name can be registered multiple times to make it affect
2967 * multiple bits in the same FeatureWord. In that case, the getter will return
2968 * true only if all bits are set.
2970 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2971 const char *prop_name,
2972 uint32_t *field,
2973 int bitnr)
2975 BitProperty *fp;
2976 ObjectProperty *op;
2977 uint32_t mask = (1UL << bitnr);
2979 op = object_property_find(OBJECT(cpu), prop_name, NULL);
2980 if (op) {
2981 fp = op->opaque;
2982 assert(fp->ptr == field);
2983 fp->mask |= mask;
2984 } else {
2985 fp = g_new0(BitProperty, 1);
2986 fp->ptr = field;
2987 fp->mask = mask;
2988 object_property_add(OBJECT(cpu), prop_name, "bool",
2989 x86_cpu_get_bit_prop,
2990 x86_cpu_set_bit_prop,
2991 x86_cpu_release_bit_prop, fp, &error_abort);
2995 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
2996 FeatureWord w,
2997 int bitnr)
2999 Object *obj = OBJECT(cpu);
3000 int i;
3001 char **names;
3002 FeatureWordInfo *fi = &feature_word_info[w];
3004 if (!fi->feat_names) {
3005 return;
3007 if (!fi->feat_names[bitnr]) {
3008 return;
3011 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3013 feat2prop(names[0]);
3014 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3016 for (i = 1; names[i]; i++) {
3017 feat2prop(names[i]);
3018 object_property_add_alias(obj, names[i], obj, names[0],
3019 &error_abort);
3022 g_strfreev(names);
3025 static void x86_cpu_initfn(Object *obj)
3027 CPUState *cs = CPU(obj);
3028 X86CPU *cpu = X86_CPU(obj);
3029 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3030 CPUX86State *env = &cpu->env;
3031 FeatureWord w;
3032 static int inited;
3034 cs->env_ptr = env;
3035 cpu_exec_init(cs, &error_abort);
3037 object_property_add(obj, "family", "int",
3038 x86_cpuid_version_get_family,
3039 x86_cpuid_version_set_family, NULL, NULL, NULL);
3040 object_property_add(obj, "model", "int",
3041 x86_cpuid_version_get_model,
3042 x86_cpuid_version_set_model, NULL, NULL, NULL);
3043 object_property_add(obj, "stepping", "int",
3044 x86_cpuid_version_get_stepping,
3045 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3046 object_property_add_str(obj, "vendor",
3047 x86_cpuid_get_vendor,
3048 x86_cpuid_set_vendor, NULL);
3049 object_property_add_str(obj, "model-id",
3050 x86_cpuid_get_model_id,
3051 x86_cpuid_set_model_id, NULL);
3052 object_property_add(obj, "tsc-frequency", "int",
3053 x86_cpuid_get_tsc_freq,
3054 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3055 object_property_add(obj, "apic-id", "int",
3056 x86_cpuid_get_apic_id,
3057 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3058 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3059 x86_cpu_get_feature_words,
3060 NULL, NULL, (void *)env->features, NULL);
3061 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3062 x86_cpu_get_feature_words,
3063 NULL, NULL, (void *)cpu->filtered_features, NULL);
3065 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3067 #ifndef CONFIG_USER_ONLY
3068 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3069 cpu->apic_id = -1;
3070 #endif
3072 for (w = 0; w < FEATURE_WORDS; w++) {
3073 int bitnr;
3075 for (bitnr = 0; bitnr < 32; bitnr++) {
3076 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3080 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3082 /* init various static tables used in TCG mode */
3083 if (tcg_enabled() && !inited) {
3084 inited = 1;
3085 optimize_flags_init();
3089 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3091 X86CPU *cpu = X86_CPU(cs);
3093 return cpu->apic_id;
3096 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3098 X86CPU *cpu = X86_CPU(cs);
3100 return cpu->env.cr[0] & CR0_PG_MASK;
3103 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3105 X86CPU *cpu = X86_CPU(cs);
3107 cpu->env.eip = value;
3110 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3112 X86CPU *cpu = X86_CPU(cs);
3114 cpu->env.eip = tb->pc - tb->cs_base;
3117 static bool x86_cpu_has_work(CPUState *cs)
3119 X86CPU *cpu = X86_CPU(cs);
3120 CPUX86State *env = &cpu->env;
3122 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3123 CPU_INTERRUPT_POLL)) &&
3124 (env->eflags & IF_MASK)) ||
3125 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3126 CPU_INTERRUPT_INIT |
3127 CPU_INTERRUPT_SIPI |
3128 CPU_INTERRUPT_MCE)) ||
3129 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3130 !(env->hflags & HF_SMM_MASK));
3133 static Property x86_cpu_properties[] = {
3134 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3135 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3136 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3137 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3138 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3139 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3140 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3141 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3142 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3143 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3144 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3145 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3146 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3147 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3148 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3149 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3150 DEFINE_PROP_END_OF_LIST()
3153 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3155 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3156 CPUClass *cc = CPU_CLASS(oc);
3157 DeviceClass *dc = DEVICE_CLASS(oc);
3159 xcc->parent_realize = dc->realize;
3160 dc->realize = x86_cpu_realizefn;
3161 dc->props = x86_cpu_properties;
3163 xcc->parent_reset = cc->reset;
3164 cc->reset = x86_cpu_reset;
3165 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3167 cc->class_by_name = x86_cpu_class_by_name;
3168 cc->parse_features = x86_cpu_parse_featurestr;
3169 cc->has_work = x86_cpu_has_work;
3170 cc->do_interrupt = x86_cpu_do_interrupt;
3171 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3172 cc->dump_state = x86_cpu_dump_state;
3173 cc->set_pc = x86_cpu_set_pc;
3174 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3175 cc->gdb_read_register = x86_cpu_gdb_read_register;
3176 cc->gdb_write_register = x86_cpu_gdb_write_register;
3177 cc->get_arch_id = x86_cpu_get_arch_id;
3178 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3179 #ifdef CONFIG_USER_ONLY
3180 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3181 #else
3182 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3183 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3184 cc->write_elf64_note = x86_cpu_write_elf64_note;
3185 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3186 cc->write_elf32_note = x86_cpu_write_elf32_note;
3187 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3188 cc->vmsd = &vmstate_x86_cpu;
3189 #endif
3190 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3191 #ifndef CONFIG_USER_ONLY
3192 cc->debug_excp_handler = breakpoint_handler;
3193 #endif
3194 cc->cpu_exec_enter = x86_cpu_exec_enter;
3195 cc->cpu_exec_exit = x86_cpu_exec_exit;
3198 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3199 * object in cpus -> dangling pointer after final object_unref().
3201 dc->cannot_destroy_with_object_finalize_yet = true;
3204 static const TypeInfo x86_cpu_type_info = {
3205 .name = TYPE_X86_CPU,
3206 .parent = TYPE_CPU,
3207 .instance_size = sizeof(X86CPU),
3208 .instance_init = x86_cpu_initfn,
3209 .abstract = true,
3210 .class_size = sizeof(X86CPUClass),
3211 .class_init = x86_cpu_common_class_init,
3214 static void x86_cpu_register_types(void)
3216 int i;
3218 type_register_static(&x86_cpu_type_info);
3219 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3220 x86_register_cpudef_type(&builtin_x86_defs[i]);
3222 #ifdef CONFIG_KVM
3223 type_register_static(&host_x86_cpu_type_info);
3224 #endif
3227 type_init(x86_cpu_register_types)