target-i386: Add clflushopt/clwb/pcommit to TCG_7_0_EBX_FEATURES
[qemu/ar7.git] / target-i386 / cpu.c
blobe5f1c5bcda6b8e0e50413cddc973f57522f342eb
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 static const char *cpuid_xsave_feature_name[] = {
278 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
288 static const char *cpuid_6_feature_name[] = {
289 NULL, NULL, "arat", NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
299 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
300 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
301 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
302 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
303 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
304 CPUID_PSE36 | CPUID_FXSR)
305 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
306 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
307 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
308 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
309 CPUID_PAE | CPUID_SEP | CPUID_APIC)
311 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
312 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
313 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
314 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
315 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
316 /* partly implemented:
317 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
318 /* missing:
319 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
320 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
321 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
322 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
323 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
324 /* missing:
325 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
326 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
327 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
328 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
329 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
330 CPUID_EXT_RDRAND */
332 #ifdef TARGET_X86_64
333 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
334 #else
335 #define TCG_EXT2_X86_64_FEATURES 0
336 #endif
338 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
339 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
340 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
341 TCG_EXT2_X86_64_FEATURES)
342 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
343 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
344 #define TCG_EXT4_FEATURES 0
345 #define TCG_SVM_FEATURES 0
346 #define TCG_KVM_FEATURES 0
347 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
348 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
349 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
350 CPUID_7_0_EBX_CLWB)
351 /* missing:
352 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
353 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
354 CPUID_7_0_EBX_RDSEED */
355 #define TCG_APM_FEATURES 0
356 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
359 typedef struct FeatureWordInfo {
360 const char **feat_names;
361 uint32_t cpuid_eax; /* Input EAX for CPUID */
362 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
363 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
364 int cpuid_reg; /* output register (R_* constant) */
365 uint32_t tcg_features; /* Feature flags supported by TCG */
366 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
367 } FeatureWordInfo;
369 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
370 [FEAT_1_EDX] = {
371 .feat_names = feature_name,
372 .cpuid_eax = 1, .cpuid_reg = R_EDX,
373 .tcg_features = TCG_FEATURES,
375 [FEAT_1_ECX] = {
376 .feat_names = ext_feature_name,
377 .cpuid_eax = 1, .cpuid_reg = R_ECX,
378 .tcg_features = TCG_EXT_FEATURES,
380 [FEAT_8000_0001_EDX] = {
381 .feat_names = ext2_feature_name,
382 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
383 .tcg_features = TCG_EXT2_FEATURES,
385 [FEAT_8000_0001_ECX] = {
386 .feat_names = ext3_feature_name,
387 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
388 .tcg_features = TCG_EXT3_FEATURES,
390 [FEAT_C000_0001_EDX] = {
391 .feat_names = ext4_feature_name,
392 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
393 .tcg_features = TCG_EXT4_FEATURES,
395 [FEAT_KVM] = {
396 .feat_names = kvm_feature_name,
397 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
398 .tcg_features = TCG_KVM_FEATURES,
400 [FEAT_SVM] = {
401 .feat_names = svm_feature_name,
402 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
403 .tcg_features = TCG_SVM_FEATURES,
405 [FEAT_7_0_EBX] = {
406 .feat_names = cpuid_7_0_ebx_feature_name,
407 .cpuid_eax = 7,
408 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
409 .cpuid_reg = R_EBX,
410 .tcg_features = TCG_7_0_EBX_FEATURES,
412 [FEAT_8000_0007_EDX] = {
413 .feat_names = cpuid_apm_edx_feature_name,
414 .cpuid_eax = 0x80000007,
415 .cpuid_reg = R_EDX,
416 .tcg_features = TCG_APM_FEATURES,
417 .unmigratable_flags = CPUID_APM_INVTSC,
419 [FEAT_XSAVE] = {
420 .feat_names = cpuid_xsave_feature_name,
421 .cpuid_eax = 0xd,
422 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
423 .cpuid_reg = R_EAX,
424 .tcg_features = 0,
426 [FEAT_6_EAX] = {
427 .feat_names = cpuid_6_feature_name,
428 .cpuid_eax = 6, .cpuid_reg = R_EAX,
429 .tcg_features = TCG_6_EAX_FEATURES,
433 typedef struct X86RegisterInfo32 {
434 /* Name of register */
435 const char *name;
436 /* QAPI enum value register */
437 X86CPURegister32 qapi_enum;
438 } X86RegisterInfo32;
440 #define REGISTER(reg) \
441 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
442 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
443 REGISTER(EAX),
444 REGISTER(ECX),
445 REGISTER(EDX),
446 REGISTER(EBX),
447 REGISTER(ESP),
448 REGISTER(EBP),
449 REGISTER(ESI),
450 REGISTER(EDI),
452 #undef REGISTER
454 typedef struct ExtSaveArea {
455 uint32_t feature, bits;
456 uint32_t offset, size;
457 } ExtSaveArea;
459 static const ExtSaveArea ext_save_areas[] = {
460 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
461 .offset = 0x240, .size = 0x100 },
462 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
463 .offset = 0x3c0, .size = 0x40 },
464 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
465 .offset = 0x400, .size = 0x40 },
466 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
467 .offset = 0x440, .size = 0x40 },
468 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
469 .offset = 0x480, .size = 0x200 },
470 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
471 .offset = 0x680, .size = 0x400 },
474 const char *get_register_name_32(unsigned int reg)
476 if (reg >= CPU_NB_REGS32) {
477 return NULL;
479 return x86_reg_info_32[reg].name;
483 * Returns the set of feature flags that are supported and migratable by
484 * QEMU, for a given FeatureWord.
486 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
488 FeatureWordInfo *wi = &feature_word_info[w];
489 uint32_t r = 0;
490 int i;
492 for (i = 0; i < 32; i++) {
493 uint32_t f = 1U << i;
494 /* If the feature name is unknown, it is not supported by QEMU yet */
495 if (!wi->feat_names[i]) {
496 continue;
498 /* Skip features known to QEMU, but explicitly marked as unmigratable */
499 if (wi->unmigratable_flags & f) {
500 continue;
502 r |= f;
504 return r;
507 void host_cpuid(uint32_t function, uint32_t count,
508 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
510 uint32_t vec[4];
512 #ifdef __x86_64__
513 asm volatile("cpuid"
514 : "=a"(vec[0]), "=b"(vec[1]),
515 "=c"(vec[2]), "=d"(vec[3])
516 : "0"(function), "c"(count) : "cc");
517 #elif defined(__i386__)
518 asm volatile("pusha \n\t"
519 "cpuid \n\t"
520 "mov %%eax, 0(%2) \n\t"
521 "mov %%ebx, 4(%2) \n\t"
522 "mov %%ecx, 8(%2) \n\t"
523 "mov %%edx, 12(%2) \n\t"
524 "popa"
525 : : "a"(function), "c"(count), "S"(vec)
526 : "memory", "cc");
527 #else
528 abort();
529 #endif
531 if (eax)
532 *eax = vec[0];
533 if (ebx)
534 *ebx = vec[1];
535 if (ecx)
536 *ecx = vec[2];
537 if (edx)
538 *edx = vec[3];
541 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
543 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
544 * a substring. ex if !NULL points to the first char after a substring,
545 * otherwise the string is assumed to sized by a terminating nul.
546 * Return lexical ordering of *s1:*s2.
548 static int sstrcmp(const char *s1, const char *e1,
549 const char *s2, const char *e2)
551 for (;;) {
552 if (!*s1 || !*s2 || *s1 != *s2)
553 return (*s1 - *s2);
554 ++s1, ++s2;
555 if (s1 == e1 && s2 == e2)
556 return (0);
557 else if (s1 == e1)
558 return (*s2);
559 else if (s2 == e2)
560 return (*s1);
564 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
565 * '|' delimited (possibly empty) strings in which case search for a match
566 * within the alternatives proceeds left to right. Return 0 for success,
567 * non-zero otherwise.
569 static int altcmp(const char *s, const char *e, const char *altstr)
571 const char *p, *q;
573 for (q = p = altstr; ; ) {
574 while (*p && *p != '|')
575 ++p;
576 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
577 return (0);
578 if (!*p)
579 return (1);
580 else
581 q = ++p;
585 /* search featureset for flag *[s..e), if found set corresponding bit in
586 * *pval and return true, otherwise return false
588 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
589 const char **featureset)
591 uint32_t mask;
592 const char **ppc;
593 bool found = false;
595 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
596 if (*ppc && !altcmp(s, e, *ppc)) {
597 *pval |= mask;
598 found = true;
601 return found;
604 static void add_flagname_to_bitmaps(const char *flagname,
605 FeatureWordArray words,
606 Error **errp)
608 FeatureWord w;
609 for (w = 0; w < FEATURE_WORDS; w++) {
610 FeatureWordInfo *wi = &feature_word_info[w];
611 if (wi->feat_names &&
612 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
613 break;
616 if (w == FEATURE_WORDS) {
617 error_setg(errp, "CPU feature %s not found", flagname);
621 /* CPU class name definitions: */
623 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
624 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
626 /* Return type name for a given CPU model name
627 * Caller is responsible for freeing the returned string.
629 static char *x86_cpu_type_name(const char *model_name)
631 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
634 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
636 ObjectClass *oc;
637 char *typename;
639 if (cpu_model == NULL) {
640 return NULL;
643 typename = x86_cpu_type_name(cpu_model);
644 oc = object_class_by_name(typename);
645 g_free(typename);
646 return oc;
649 struct X86CPUDefinition {
650 const char *name;
651 uint32_t level;
652 uint32_t xlevel;
653 uint32_t xlevel2;
654 /* vendor is zero-terminated, 12 character ASCII string */
655 char vendor[CPUID_VENDOR_SZ + 1];
656 int family;
657 int model;
658 int stepping;
659 FeatureWordArray features;
660 char model_id[48];
663 static X86CPUDefinition builtin_x86_defs[] = {
665 .name = "qemu64",
666 .level = 0xd,
667 .vendor = CPUID_VENDOR_AMD,
668 .family = 6,
669 .model = 6,
670 .stepping = 3,
671 .features[FEAT_1_EDX] =
672 PPRO_FEATURES |
673 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
674 CPUID_PSE36,
675 .features[FEAT_1_ECX] =
676 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
677 .features[FEAT_8000_0001_EDX] =
678 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
679 .features[FEAT_8000_0001_ECX] =
680 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
681 .xlevel = 0x8000000A,
684 .name = "phenom",
685 .level = 5,
686 .vendor = CPUID_VENDOR_AMD,
687 .family = 16,
688 .model = 2,
689 .stepping = 3,
690 /* Missing: CPUID_HT */
691 .features[FEAT_1_EDX] =
692 PPRO_FEATURES |
693 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
694 CPUID_PSE36 | CPUID_VME,
695 .features[FEAT_1_ECX] =
696 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
697 CPUID_EXT_POPCNT,
698 .features[FEAT_8000_0001_EDX] =
699 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
700 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
701 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
702 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
703 CPUID_EXT3_CR8LEG,
704 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
705 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
706 .features[FEAT_8000_0001_ECX] =
707 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
708 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
709 /* Missing: CPUID_SVM_LBRV */
710 .features[FEAT_SVM] =
711 CPUID_SVM_NPT,
712 .xlevel = 0x8000001A,
713 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
716 .name = "core2duo",
717 .level = 10,
718 .vendor = CPUID_VENDOR_INTEL,
719 .family = 6,
720 .model = 15,
721 .stepping = 11,
722 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
723 .features[FEAT_1_EDX] =
724 PPRO_FEATURES |
725 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
726 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
727 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
728 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
729 .features[FEAT_1_ECX] =
730 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
731 CPUID_EXT_CX16,
732 .features[FEAT_8000_0001_EDX] =
733 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
734 .features[FEAT_8000_0001_ECX] =
735 CPUID_EXT3_LAHF_LM,
736 .xlevel = 0x80000008,
737 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
740 .name = "kvm64",
741 .level = 0xd,
742 .vendor = CPUID_VENDOR_INTEL,
743 .family = 15,
744 .model = 6,
745 .stepping = 1,
746 /* Missing: CPUID_HT */
747 .features[FEAT_1_EDX] =
748 PPRO_FEATURES | CPUID_VME |
749 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
750 CPUID_PSE36,
751 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
752 .features[FEAT_1_ECX] =
753 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
754 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
755 .features[FEAT_8000_0001_EDX] =
756 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
757 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
758 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
759 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
760 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
761 .features[FEAT_8000_0001_ECX] =
763 .xlevel = 0x80000008,
764 .model_id = "Common KVM processor"
767 .name = "qemu32",
768 .level = 4,
769 .vendor = CPUID_VENDOR_INTEL,
770 .family = 6,
771 .model = 6,
772 .stepping = 3,
773 .features[FEAT_1_EDX] =
774 PPRO_FEATURES,
775 .features[FEAT_1_ECX] =
776 CPUID_EXT_SSE3,
777 .xlevel = 0x80000004,
780 .name = "kvm32",
781 .level = 5,
782 .vendor = CPUID_VENDOR_INTEL,
783 .family = 15,
784 .model = 6,
785 .stepping = 1,
786 .features[FEAT_1_EDX] =
787 PPRO_FEATURES | CPUID_VME |
788 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
789 .features[FEAT_1_ECX] =
790 CPUID_EXT_SSE3,
791 .features[FEAT_8000_0001_ECX] =
793 .xlevel = 0x80000008,
794 .model_id = "Common 32-bit KVM processor"
797 .name = "coreduo",
798 .level = 10,
799 .vendor = CPUID_VENDOR_INTEL,
800 .family = 6,
801 .model = 14,
802 .stepping = 8,
803 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
804 .features[FEAT_1_EDX] =
805 PPRO_FEATURES | CPUID_VME |
806 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
807 CPUID_SS,
808 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
809 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
810 .features[FEAT_1_ECX] =
811 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
812 .features[FEAT_8000_0001_EDX] =
813 CPUID_EXT2_NX,
814 .xlevel = 0x80000008,
815 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
818 .name = "486",
819 .level = 1,
820 .vendor = CPUID_VENDOR_INTEL,
821 .family = 4,
822 .model = 8,
823 .stepping = 0,
824 .features[FEAT_1_EDX] =
825 I486_FEATURES,
826 .xlevel = 0,
829 .name = "pentium",
830 .level = 1,
831 .vendor = CPUID_VENDOR_INTEL,
832 .family = 5,
833 .model = 4,
834 .stepping = 3,
835 .features[FEAT_1_EDX] =
836 PENTIUM_FEATURES,
837 .xlevel = 0,
840 .name = "pentium2",
841 .level = 2,
842 .vendor = CPUID_VENDOR_INTEL,
843 .family = 6,
844 .model = 5,
845 .stepping = 2,
846 .features[FEAT_1_EDX] =
847 PENTIUM2_FEATURES,
848 .xlevel = 0,
851 .name = "pentium3",
852 .level = 3,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 6,
855 .model = 7,
856 .stepping = 3,
857 .features[FEAT_1_EDX] =
858 PENTIUM3_FEATURES,
859 .xlevel = 0,
862 .name = "athlon",
863 .level = 2,
864 .vendor = CPUID_VENDOR_AMD,
865 .family = 6,
866 .model = 2,
867 .stepping = 3,
868 .features[FEAT_1_EDX] =
869 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
870 CPUID_MCA,
871 .features[FEAT_8000_0001_EDX] =
872 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
873 .xlevel = 0x80000008,
876 .name = "n270",
877 .level = 10,
878 .vendor = CPUID_VENDOR_INTEL,
879 .family = 6,
880 .model = 28,
881 .stepping = 2,
882 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
883 .features[FEAT_1_EDX] =
884 PPRO_FEATURES |
885 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
886 CPUID_ACPI | CPUID_SS,
887 /* Some CPUs got no CPUID_SEP */
888 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
889 * CPUID_EXT_XTPR */
890 .features[FEAT_1_ECX] =
891 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
892 CPUID_EXT_MOVBE,
893 .features[FEAT_8000_0001_EDX] =
894 CPUID_EXT2_NX,
895 .features[FEAT_8000_0001_ECX] =
896 CPUID_EXT3_LAHF_LM,
897 .xlevel = 0x80000008,
898 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
901 .name = "Conroe",
902 .level = 10,
903 .vendor = CPUID_VENDOR_INTEL,
904 .family = 6,
905 .model = 15,
906 .stepping = 3,
907 .features[FEAT_1_EDX] =
908 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
909 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
910 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
911 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
912 CPUID_DE | CPUID_FP87,
913 .features[FEAT_1_ECX] =
914 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
915 .features[FEAT_8000_0001_EDX] =
916 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
917 .features[FEAT_8000_0001_ECX] =
918 CPUID_EXT3_LAHF_LM,
919 .xlevel = 0x80000008,
920 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
923 .name = "Penryn",
924 .level = 10,
925 .vendor = CPUID_VENDOR_INTEL,
926 .family = 6,
927 .model = 23,
928 .stepping = 3,
929 .features[FEAT_1_EDX] =
930 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
931 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
932 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
933 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
934 CPUID_DE | CPUID_FP87,
935 .features[FEAT_1_ECX] =
936 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
937 CPUID_EXT_SSE3,
938 .features[FEAT_8000_0001_EDX] =
939 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
940 .features[FEAT_8000_0001_ECX] =
941 CPUID_EXT3_LAHF_LM,
942 .xlevel = 0x80000008,
943 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
946 .name = "Nehalem",
947 .level = 11,
948 .vendor = CPUID_VENDOR_INTEL,
949 .family = 6,
950 .model = 26,
951 .stepping = 3,
952 .features[FEAT_1_EDX] =
953 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
954 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
955 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
956 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
957 CPUID_DE | CPUID_FP87,
958 .features[FEAT_1_ECX] =
959 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
960 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
961 .features[FEAT_8000_0001_EDX] =
962 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
963 .features[FEAT_8000_0001_ECX] =
964 CPUID_EXT3_LAHF_LM,
965 .xlevel = 0x80000008,
966 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
969 .name = "Westmere",
970 .level = 11,
971 .vendor = CPUID_VENDOR_INTEL,
972 .family = 6,
973 .model = 44,
974 .stepping = 1,
975 .features[FEAT_1_EDX] =
976 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
977 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
978 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
979 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
980 CPUID_DE | CPUID_FP87,
981 .features[FEAT_1_ECX] =
982 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
983 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
984 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
985 .features[FEAT_8000_0001_EDX] =
986 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
987 .features[FEAT_8000_0001_ECX] =
988 CPUID_EXT3_LAHF_LM,
989 .features[FEAT_6_EAX] =
990 CPUID_6_EAX_ARAT,
991 .xlevel = 0x80000008,
992 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
995 .name = "SandyBridge",
996 .level = 0xd,
997 .vendor = CPUID_VENDOR_INTEL,
998 .family = 6,
999 .model = 42,
1000 .stepping = 1,
1001 .features[FEAT_1_EDX] =
1002 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1003 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1004 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1005 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1006 CPUID_DE | CPUID_FP87,
1007 .features[FEAT_1_ECX] =
1008 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1009 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1010 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1011 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1012 CPUID_EXT_SSE3,
1013 .features[FEAT_8000_0001_EDX] =
1014 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1015 CPUID_EXT2_SYSCALL,
1016 .features[FEAT_8000_0001_ECX] =
1017 CPUID_EXT3_LAHF_LM,
1018 .features[FEAT_XSAVE] =
1019 CPUID_XSAVE_XSAVEOPT,
1020 .features[FEAT_6_EAX] =
1021 CPUID_6_EAX_ARAT,
1022 .xlevel = 0x80000008,
1023 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1026 .name = "IvyBridge",
1027 .level = 0xd,
1028 .vendor = CPUID_VENDOR_INTEL,
1029 .family = 6,
1030 .model = 58,
1031 .stepping = 9,
1032 .features[FEAT_1_EDX] =
1033 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1034 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1035 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1036 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1037 CPUID_DE | CPUID_FP87,
1038 .features[FEAT_1_ECX] =
1039 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1040 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1041 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1042 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1043 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1044 .features[FEAT_7_0_EBX] =
1045 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1046 CPUID_7_0_EBX_ERMS,
1047 .features[FEAT_8000_0001_EDX] =
1048 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1049 CPUID_EXT2_SYSCALL,
1050 .features[FEAT_8000_0001_ECX] =
1051 CPUID_EXT3_LAHF_LM,
1052 .features[FEAT_XSAVE] =
1053 CPUID_XSAVE_XSAVEOPT,
1054 .features[FEAT_6_EAX] =
1055 CPUID_6_EAX_ARAT,
1056 .xlevel = 0x80000008,
1057 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1060 .name = "Haswell-noTSX",
1061 .level = 0xd,
1062 .vendor = CPUID_VENDOR_INTEL,
1063 .family = 6,
1064 .model = 60,
1065 .stepping = 1,
1066 .features[FEAT_1_EDX] =
1067 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1068 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1069 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1070 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1071 CPUID_DE | CPUID_FP87,
1072 .features[FEAT_1_ECX] =
1073 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1074 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1075 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1076 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1077 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1078 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1079 .features[FEAT_8000_0001_EDX] =
1080 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1081 CPUID_EXT2_SYSCALL,
1082 .features[FEAT_8000_0001_ECX] =
1083 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1084 .features[FEAT_7_0_EBX] =
1085 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1086 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1087 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1088 .features[FEAT_XSAVE] =
1089 CPUID_XSAVE_XSAVEOPT,
1090 .features[FEAT_6_EAX] =
1091 CPUID_6_EAX_ARAT,
1092 .xlevel = 0x80000008,
1093 .model_id = "Intel Core Processor (Haswell, no TSX)",
1094 }, {
1095 .name = "Haswell",
1096 .level = 0xd,
1097 .vendor = CPUID_VENDOR_INTEL,
1098 .family = 6,
1099 .model = 60,
1100 .stepping = 1,
1101 .features[FEAT_1_EDX] =
1102 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1103 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1104 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1105 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1106 CPUID_DE | CPUID_FP87,
1107 .features[FEAT_1_ECX] =
1108 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1109 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1110 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1111 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1112 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1113 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1114 .features[FEAT_8000_0001_EDX] =
1115 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1116 CPUID_EXT2_SYSCALL,
1117 .features[FEAT_8000_0001_ECX] =
1118 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1119 .features[FEAT_7_0_EBX] =
1120 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1121 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1122 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1123 CPUID_7_0_EBX_RTM,
1124 .features[FEAT_XSAVE] =
1125 CPUID_XSAVE_XSAVEOPT,
1126 .features[FEAT_6_EAX] =
1127 CPUID_6_EAX_ARAT,
1128 .xlevel = 0x80000008,
1129 .model_id = "Intel Core Processor (Haswell)",
1132 .name = "Broadwell-noTSX",
1133 .level = 0xd,
1134 .vendor = CPUID_VENDOR_INTEL,
1135 .family = 6,
1136 .model = 61,
1137 .stepping = 2,
1138 .features[FEAT_1_EDX] =
1139 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1140 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1141 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1142 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1143 CPUID_DE | CPUID_FP87,
1144 .features[FEAT_1_ECX] =
1145 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1146 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1147 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1148 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1149 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1150 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1151 .features[FEAT_8000_0001_EDX] =
1152 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1153 CPUID_EXT2_SYSCALL,
1154 .features[FEAT_8000_0001_ECX] =
1155 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1156 .features[FEAT_7_0_EBX] =
1157 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1158 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1159 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1160 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1161 CPUID_7_0_EBX_SMAP,
1162 .features[FEAT_XSAVE] =
1163 CPUID_XSAVE_XSAVEOPT,
1164 .features[FEAT_6_EAX] =
1165 CPUID_6_EAX_ARAT,
1166 .xlevel = 0x80000008,
1167 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1170 .name = "Broadwell",
1171 .level = 0xd,
1172 .vendor = CPUID_VENDOR_INTEL,
1173 .family = 6,
1174 .model = 61,
1175 .stepping = 2,
1176 .features[FEAT_1_EDX] =
1177 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1178 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1179 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1180 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1181 CPUID_DE | CPUID_FP87,
1182 .features[FEAT_1_ECX] =
1183 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1184 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1185 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1186 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1187 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1188 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1189 .features[FEAT_8000_0001_EDX] =
1190 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1191 CPUID_EXT2_SYSCALL,
1192 .features[FEAT_8000_0001_ECX] =
1193 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1194 .features[FEAT_7_0_EBX] =
1195 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1196 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1197 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1198 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1199 CPUID_7_0_EBX_SMAP,
1200 .features[FEAT_XSAVE] =
1201 CPUID_XSAVE_XSAVEOPT,
1202 .features[FEAT_6_EAX] =
1203 CPUID_6_EAX_ARAT,
1204 .xlevel = 0x80000008,
1205 .model_id = "Intel Core Processor (Broadwell)",
1208 .name = "Opteron_G1",
1209 .level = 5,
1210 .vendor = CPUID_VENDOR_AMD,
1211 .family = 15,
1212 .model = 6,
1213 .stepping = 1,
1214 .features[FEAT_1_EDX] =
1215 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1216 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1217 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1218 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1219 CPUID_DE | CPUID_FP87,
1220 .features[FEAT_1_ECX] =
1221 CPUID_EXT_SSE3,
1222 .features[FEAT_8000_0001_EDX] =
1223 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1224 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1225 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1226 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1227 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1228 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1229 .xlevel = 0x80000008,
1230 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1233 .name = "Opteron_G2",
1234 .level = 5,
1235 .vendor = CPUID_VENDOR_AMD,
1236 .family = 15,
1237 .model = 6,
1238 .stepping = 1,
1239 .features[FEAT_1_EDX] =
1240 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1241 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1242 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1243 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1244 CPUID_DE | CPUID_FP87,
1245 .features[FEAT_1_ECX] =
1246 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1247 .features[FEAT_8000_0001_EDX] =
1248 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1249 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1250 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1251 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1252 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1253 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1254 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1255 .features[FEAT_8000_0001_ECX] =
1256 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1257 .xlevel = 0x80000008,
1258 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1261 .name = "Opteron_G3",
1262 .level = 5,
1263 .vendor = CPUID_VENDOR_AMD,
1264 .family = 15,
1265 .model = 6,
1266 .stepping = 1,
1267 .features[FEAT_1_EDX] =
1268 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1269 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1270 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1271 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1272 CPUID_DE | CPUID_FP87,
1273 .features[FEAT_1_ECX] =
1274 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1275 CPUID_EXT_SSE3,
1276 .features[FEAT_8000_0001_EDX] =
1277 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1278 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1279 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1280 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1281 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1282 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1283 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1284 .features[FEAT_8000_0001_ECX] =
1285 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1286 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1287 .xlevel = 0x80000008,
1288 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1291 .name = "Opteron_G4",
1292 .level = 0xd,
1293 .vendor = CPUID_VENDOR_AMD,
1294 .family = 21,
1295 .model = 1,
1296 .stepping = 2,
1297 .features[FEAT_1_EDX] =
1298 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1299 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1300 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1301 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1302 CPUID_DE | CPUID_FP87,
1303 .features[FEAT_1_ECX] =
1304 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1305 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1306 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1307 CPUID_EXT_SSE3,
1308 .features[FEAT_8000_0001_EDX] =
1309 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1310 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1311 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1312 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1313 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1314 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1315 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1316 .features[FEAT_8000_0001_ECX] =
1317 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1318 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1319 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1320 CPUID_EXT3_LAHF_LM,
1321 /* no xsaveopt! */
1322 .xlevel = 0x8000001A,
1323 .model_id = "AMD Opteron 62xx class CPU",
1326 .name = "Opteron_G5",
1327 .level = 0xd,
1328 .vendor = CPUID_VENDOR_AMD,
1329 .family = 21,
1330 .model = 2,
1331 .stepping = 0,
1332 .features[FEAT_1_EDX] =
1333 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1334 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1335 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1336 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1337 CPUID_DE | CPUID_FP87,
1338 .features[FEAT_1_ECX] =
1339 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1340 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1341 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1342 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1343 .features[FEAT_8000_0001_EDX] =
1344 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1345 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1346 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1347 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1348 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1349 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1350 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1351 .features[FEAT_8000_0001_ECX] =
1352 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1353 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1354 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1355 CPUID_EXT3_LAHF_LM,
1356 /* no xsaveopt! */
1357 .xlevel = 0x8000001A,
1358 .model_id = "AMD Opteron 63xx class CPU",
1362 typedef struct PropValue {
1363 const char *prop, *value;
1364 } PropValue;
1366 /* KVM-specific features that are automatically added/removed
1367 * from all CPU models when KVM is enabled.
1369 static PropValue kvm_default_props[] = {
1370 { "kvmclock", "on" },
1371 { "kvm-nopiodelay", "on" },
1372 { "kvm-asyncpf", "on" },
1373 { "kvm-steal-time", "on" },
1374 { "kvm-pv-eoi", "on" },
1375 { "kvmclock-stable-bit", "on" },
1376 { "x2apic", "on" },
1377 { "acpi", "off" },
1378 { "monitor", "off" },
1379 { "svm", "off" },
1380 { NULL, NULL },
1383 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1385 PropValue *pv;
1386 for (pv = kvm_default_props; pv->prop; pv++) {
1387 if (!strcmp(pv->prop, prop)) {
1388 pv->value = value;
1389 break;
1393 /* It is valid to call this function only for properties that
1394 * are already present in the kvm_default_props table.
1396 assert(pv->prop);
1399 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1400 bool migratable_only);
1402 #ifdef CONFIG_KVM
1404 static int cpu_x86_fill_model_id(char *str)
1406 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1407 int i;
1409 for (i = 0; i < 3; i++) {
1410 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1411 memcpy(str + i * 16 + 0, &eax, 4);
1412 memcpy(str + i * 16 + 4, &ebx, 4);
1413 memcpy(str + i * 16 + 8, &ecx, 4);
1414 memcpy(str + i * 16 + 12, &edx, 4);
1416 return 0;
1419 static X86CPUDefinition host_cpudef;
1421 static Property host_x86_cpu_properties[] = {
1422 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1423 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1424 DEFINE_PROP_END_OF_LIST()
1427 /* class_init for the "host" CPU model
1429 * This function may be called before KVM is initialized.
1431 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1433 DeviceClass *dc = DEVICE_CLASS(oc);
1434 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1435 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1437 xcc->kvm_required = true;
1439 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1440 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1442 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1443 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1444 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1445 host_cpudef.stepping = eax & 0x0F;
1447 cpu_x86_fill_model_id(host_cpudef.model_id);
1449 xcc->cpu_def = &host_cpudef;
1451 /* level, xlevel, xlevel2, and the feature words are initialized on
1452 * instance_init, because they require KVM to be initialized.
1455 dc->props = host_x86_cpu_properties;
1456 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1457 dc->cannot_destroy_with_object_finalize_yet = true;
1460 static void host_x86_cpu_initfn(Object *obj)
1462 X86CPU *cpu = X86_CPU(obj);
1463 CPUX86State *env = &cpu->env;
1464 KVMState *s = kvm_state;
1466 assert(kvm_enabled());
1468 /* We can't fill the features array here because we don't know yet if
1469 * "migratable" is true or false.
1471 cpu->host_features = true;
1473 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1474 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1475 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1477 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1480 static const TypeInfo host_x86_cpu_type_info = {
1481 .name = X86_CPU_TYPE_NAME("host"),
1482 .parent = TYPE_X86_CPU,
1483 .instance_init = host_x86_cpu_initfn,
1484 .class_init = host_x86_cpu_class_init,
1487 #endif
1489 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1491 FeatureWordInfo *f = &feature_word_info[w];
1492 int i;
1494 for (i = 0; i < 32; ++i) {
1495 if ((1UL << i) & mask) {
1496 const char *reg = get_register_name_32(f->cpuid_reg);
1497 assert(reg);
1498 fprintf(stderr, "warning: %s doesn't support requested feature: "
1499 "CPUID.%02XH:%s%s%s [bit %d]\n",
1500 kvm_enabled() ? "host" : "TCG",
1501 f->cpuid_eax, reg,
1502 f->feat_names[i] ? "." : "",
1503 f->feat_names[i] ? f->feat_names[i] : "", i);
1508 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1509 const char *name, Error **errp)
1511 X86CPU *cpu = X86_CPU(obj);
1512 CPUX86State *env = &cpu->env;
1513 int64_t value;
1515 value = (env->cpuid_version >> 8) & 0xf;
1516 if (value == 0xf) {
1517 value += (env->cpuid_version >> 20) & 0xff;
1519 visit_type_int(v, &value, name, errp);
1522 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1523 const char *name, Error **errp)
1525 X86CPU *cpu = X86_CPU(obj);
1526 CPUX86State *env = &cpu->env;
1527 const int64_t min = 0;
1528 const int64_t max = 0xff + 0xf;
1529 Error *local_err = NULL;
1530 int64_t value;
1532 visit_type_int(v, &value, name, &local_err);
1533 if (local_err) {
1534 error_propagate(errp, local_err);
1535 return;
1537 if (value < min || value > max) {
1538 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1539 name ? name : "null", value, min, max);
1540 return;
1543 env->cpuid_version &= ~0xff00f00;
1544 if (value > 0x0f) {
1545 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1546 } else {
1547 env->cpuid_version |= value << 8;
1551 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1552 const char *name, Error **errp)
1554 X86CPU *cpu = X86_CPU(obj);
1555 CPUX86State *env = &cpu->env;
1556 int64_t value;
1558 value = (env->cpuid_version >> 4) & 0xf;
1559 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1560 visit_type_int(v, &value, name, errp);
1563 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1564 const char *name, Error **errp)
1566 X86CPU *cpu = X86_CPU(obj);
1567 CPUX86State *env = &cpu->env;
1568 const int64_t min = 0;
1569 const int64_t max = 0xff;
1570 Error *local_err = NULL;
1571 int64_t value;
1573 visit_type_int(v, &value, name, &local_err);
1574 if (local_err) {
1575 error_propagate(errp, local_err);
1576 return;
1578 if (value < min || value > max) {
1579 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1580 name ? name : "null", value, min, max);
1581 return;
1584 env->cpuid_version &= ~0xf00f0;
1585 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1588 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1589 void *opaque, const char *name,
1590 Error **errp)
1592 X86CPU *cpu = X86_CPU(obj);
1593 CPUX86State *env = &cpu->env;
1594 int64_t value;
1596 value = env->cpuid_version & 0xf;
1597 visit_type_int(v, &value, name, errp);
1600 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1601 void *opaque, const char *name,
1602 Error **errp)
1604 X86CPU *cpu = X86_CPU(obj);
1605 CPUX86State *env = &cpu->env;
1606 const int64_t min = 0;
1607 const int64_t max = 0xf;
1608 Error *local_err = NULL;
1609 int64_t value;
1611 visit_type_int(v, &value, name, &local_err);
1612 if (local_err) {
1613 error_propagate(errp, local_err);
1614 return;
1616 if (value < min || value > max) {
1617 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1618 name ? name : "null", value, min, max);
1619 return;
1622 env->cpuid_version &= ~0xf;
1623 env->cpuid_version |= value & 0xf;
1626 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1628 X86CPU *cpu = X86_CPU(obj);
1629 CPUX86State *env = &cpu->env;
1630 char *value;
1632 value = g_malloc(CPUID_VENDOR_SZ + 1);
1633 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1634 env->cpuid_vendor3);
1635 return value;
1638 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1639 Error **errp)
1641 X86CPU *cpu = X86_CPU(obj);
1642 CPUX86State *env = &cpu->env;
1643 int i;
1645 if (strlen(value) != CPUID_VENDOR_SZ) {
1646 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1647 return;
1650 env->cpuid_vendor1 = 0;
1651 env->cpuid_vendor2 = 0;
1652 env->cpuid_vendor3 = 0;
1653 for (i = 0; i < 4; i++) {
1654 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1655 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1656 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1660 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1662 X86CPU *cpu = X86_CPU(obj);
1663 CPUX86State *env = &cpu->env;
1664 char *value;
1665 int i;
1667 value = g_malloc(48 + 1);
1668 for (i = 0; i < 48; i++) {
1669 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1671 value[48] = '\0';
1672 return value;
1675 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1676 Error **errp)
1678 X86CPU *cpu = X86_CPU(obj);
1679 CPUX86State *env = &cpu->env;
1680 int c, len, i;
1682 if (model_id == NULL) {
1683 model_id = "";
1685 len = strlen(model_id);
1686 memset(env->cpuid_model, 0, 48);
1687 for (i = 0; i < 48; i++) {
1688 if (i >= len) {
1689 c = '\0';
1690 } else {
1691 c = (uint8_t)model_id[i];
1693 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1697 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1698 const char *name, Error **errp)
1700 X86CPU *cpu = X86_CPU(obj);
1701 int64_t value;
1703 value = cpu->env.tsc_khz * 1000;
1704 visit_type_int(v, &value, name, errp);
1707 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1708 const char *name, Error **errp)
1710 X86CPU *cpu = X86_CPU(obj);
1711 const int64_t min = 0;
1712 const int64_t max = INT64_MAX;
1713 Error *local_err = NULL;
1714 int64_t value;
1716 visit_type_int(v, &value, name, &local_err);
1717 if (local_err) {
1718 error_propagate(errp, local_err);
1719 return;
1721 if (value < min || value > max) {
1722 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1723 name ? name : "null", value, min, max);
1724 return;
1727 cpu->env.tsc_khz = value / 1000;
1730 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1731 const char *name, Error **errp)
1733 X86CPU *cpu = X86_CPU(obj);
1734 int64_t value = cpu->apic_id;
1736 visit_type_int(v, &value, name, errp);
1739 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1740 const char *name, Error **errp)
1742 X86CPU *cpu = X86_CPU(obj);
1743 DeviceState *dev = DEVICE(obj);
1744 const int64_t min = 0;
1745 const int64_t max = UINT32_MAX;
1746 Error *error = NULL;
1747 int64_t value;
1749 if (dev->realized) {
1750 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1751 "it was realized", name, object_get_typename(obj));
1752 return;
1755 visit_type_int(v, &value, name, &error);
1756 if (error) {
1757 error_propagate(errp, error);
1758 return;
1760 if (value < min || value > max) {
1761 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1762 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1763 object_get_typename(obj), name, value, min, max);
1764 return;
1767 if ((value != cpu->apic_id) && cpu_exists(value)) {
1768 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1769 return;
1771 cpu->apic_id = value;
1774 /* Generic getter for "feature-words" and "filtered-features" properties */
1775 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1776 const char *name, Error **errp)
1778 uint32_t *array = (uint32_t *)opaque;
1779 FeatureWord w;
1780 Error *err = NULL;
1781 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1782 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1783 X86CPUFeatureWordInfoList *list = NULL;
1785 for (w = 0; w < FEATURE_WORDS; w++) {
1786 FeatureWordInfo *wi = &feature_word_info[w];
1787 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1788 qwi->cpuid_input_eax = wi->cpuid_eax;
1789 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1790 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1791 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1792 qwi->features = array[w];
1794 /* List will be in reverse order, but order shouldn't matter */
1795 list_entries[w].next = list;
1796 list_entries[w].value = &word_infos[w];
1797 list = &list_entries[w];
1800 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1801 error_propagate(errp, err);
1804 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1805 const char *name, Error **errp)
1807 X86CPU *cpu = X86_CPU(obj);
1808 int64_t value = cpu->hyperv_spinlock_attempts;
1810 visit_type_int(v, &value, name, errp);
1813 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1814 const char *name, Error **errp)
1816 const int64_t min = 0xFFF;
1817 const int64_t max = UINT_MAX;
1818 X86CPU *cpu = X86_CPU(obj);
1819 Error *err = NULL;
1820 int64_t value;
1822 visit_type_int(v, &value, name, &err);
1823 if (err) {
1824 error_propagate(errp, err);
1825 return;
1828 if (value < min || value > max) {
1829 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1830 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1831 object_get_typename(obj), name ? name : "null",
1832 value, min, max);
1833 return;
1835 cpu->hyperv_spinlock_attempts = value;
1838 static PropertyInfo qdev_prop_spinlocks = {
1839 .name = "int",
1840 .get = x86_get_hv_spinlocks,
1841 .set = x86_set_hv_spinlocks,
1844 /* Convert all '_' in a feature string option name to '-', to make feature
1845 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1847 static inline void feat2prop(char *s)
1849 while ((s = strchr(s, '_'))) {
1850 *s = '-';
1854 /* Parse "+feature,-feature,feature=foo" CPU feature string
1856 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1857 Error **errp)
1859 X86CPU *cpu = X86_CPU(cs);
1860 char *featurestr; /* Single 'key=value" string being parsed */
1861 FeatureWord w;
1862 /* Features to be added */
1863 FeatureWordArray plus_features = { 0 };
1864 /* Features to be removed */
1865 FeatureWordArray minus_features = { 0 };
1866 uint32_t numvalue;
1867 CPUX86State *env = &cpu->env;
1868 Error *local_err = NULL;
1870 featurestr = features ? strtok(features, ",") : NULL;
1872 while (featurestr) {
1873 char *val;
1874 if (featurestr[0] == '+') {
1875 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1876 } else if (featurestr[0] == '-') {
1877 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1878 } else if ((val = strchr(featurestr, '='))) {
1879 *val = 0; val++;
1880 feat2prop(featurestr);
1881 if (!strcmp(featurestr, "xlevel")) {
1882 char *err;
1883 char num[32];
1885 numvalue = strtoul(val, &err, 0);
1886 if (!*val || *err) {
1887 error_setg(errp, "bad numerical value %s", val);
1888 return;
1890 if (numvalue < 0x80000000) {
1891 error_report("xlevel value shall always be >= 0x80000000"
1892 ", fixup will be removed in future versions");
1893 numvalue += 0x80000000;
1895 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1896 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1897 } else if (!strcmp(featurestr, "tsc-freq")) {
1898 int64_t tsc_freq;
1899 char *err;
1900 char num[32];
1902 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1903 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1904 if (tsc_freq < 0 || *err) {
1905 error_setg(errp, "bad numerical value %s", val);
1906 return;
1908 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1909 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1910 &local_err);
1911 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1912 char *err;
1913 const int min = 0xFFF;
1914 char num[32];
1915 numvalue = strtoul(val, &err, 0);
1916 if (!*val || *err) {
1917 error_setg(errp, "bad numerical value %s", val);
1918 return;
1920 if (numvalue < min) {
1921 error_report("hv-spinlocks value shall always be >= 0x%x"
1922 ", fixup will be removed in future versions",
1923 min);
1924 numvalue = min;
1926 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1927 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1928 } else {
1929 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1931 } else {
1932 feat2prop(featurestr);
1933 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1935 if (local_err) {
1936 error_propagate(errp, local_err);
1937 return;
1939 featurestr = strtok(NULL, ",");
1942 if (cpu->host_features) {
1943 for (w = 0; w < FEATURE_WORDS; w++) {
1944 env->features[w] =
1945 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1949 for (w = 0; w < FEATURE_WORDS; w++) {
1950 env->features[w] |= plus_features[w];
1951 env->features[w] &= ~minus_features[w];
1955 /* Print all cpuid feature names in featureset
1957 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1959 int bit;
1960 bool first = true;
1962 for (bit = 0; bit < 32; bit++) {
1963 if (featureset[bit]) {
1964 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1965 first = false;
1970 /* generate CPU information. */
1971 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1973 X86CPUDefinition *def;
1974 char buf[256];
1975 int i;
1977 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1978 def = &builtin_x86_defs[i];
1979 snprintf(buf, sizeof(buf), "%s", def->name);
1980 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1982 #ifdef CONFIG_KVM
1983 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1984 "KVM processor with all supported host features "
1985 "(only available in KVM mode)");
1986 #endif
1988 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1989 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1990 FeatureWordInfo *fw = &feature_word_info[i];
1992 (*cpu_fprintf)(f, " ");
1993 listflags(f, cpu_fprintf, fw->feat_names);
1994 (*cpu_fprintf)(f, "\n");
1998 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2000 CpuDefinitionInfoList *cpu_list = NULL;
2001 X86CPUDefinition *def;
2002 int i;
2004 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2005 CpuDefinitionInfoList *entry;
2006 CpuDefinitionInfo *info;
2008 def = &builtin_x86_defs[i];
2009 info = g_malloc0(sizeof(*info));
2010 info->name = g_strdup(def->name);
2012 entry = g_malloc0(sizeof(*entry));
2013 entry->value = info;
2014 entry->next = cpu_list;
2015 cpu_list = entry;
2018 return cpu_list;
2021 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2022 bool migratable_only)
2024 FeatureWordInfo *wi = &feature_word_info[w];
2025 uint32_t r;
2027 if (kvm_enabled()) {
2028 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2029 wi->cpuid_ecx,
2030 wi->cpuid_reg);
2031 } else if (tcg_enabled()) {
2032 r = wi->tcg_features;
2033 } else {
2034 return ~0;
2036 if (migratable_only) {
2037 r &= x86_cpu_get_migratable_flags(w);
2039 return r;
2043 * Filters CPU feature words based on host availability of each feature.
2045 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2047 static int x86_cpu_filter_features(X86CPU *cpu)
2049 CPUX86State *env = &cpu->env;
2050 FeatureWord w;
2051 int rv = 0;
2053 for (w = 0; w < FEATURE_WORDS; w++) {
2054 uint32_t host_feat =
2055 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2056 uint32_t requested_features = env->features[w];
2057 env->features[w] &= host_feat;
2058 cpu->filtered_features[w] = requested_features & ~env->features[w];
2059 if (cpu->filtered_features[w]) {
2060 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2061 report_unavailable_features(w, cpu->filtered_features[w]);
2063 rv = 1;
2067 return rv;
2070 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2072 PropValue *pv;
2073 for (pv = props; pv->prop; pv++) {
2074 if (!pv->value) {
2075 continue;
2077 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2078 &error_abort);
2082 /* Load data from X86CPUDefinition
2084 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2086 CPUX86State *env = &cpu->env;
2087 const char *vendor;
2088 char host_vendor[CPUID_VENDOR_SZ + 1];
2089 FeatureWord w;
2091 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2092 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2093 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2094 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2095 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2096 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2097 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2098 for (w = 0; w < FEATURE_WORDS; w++) {
2099 env->features[w] = def->features[w];
2102 /* Special cases not set in the X86CPUDefinition structs: */
2103 if (kvm_enabled()) {
2104 x86_cpu_apply_props(cpu, kvm_default_props);
2107 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2109 /* sysenter isn't supported in compatibility mode on AMD,
2110 * syscall isn't supported in compatibility mode on Intel.
2111 * Normally we advertise the actual CPU vendor, but you can
2112 * override this using the 'vendor' property if you want to use
2113 * KVM's sysenter/syscall emulation in compatibility mode and
2114 * when doing cross vendor migration
2116 vendor = def->vendor;
2117 if (kvm_enabled()) {
2118 uint32_t ebx = 0, ecx = 0, edx = 0;
2119 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2120 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2121 vendor = host_vendor;
2124 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2128 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2130 X86CPU *cpu = NULL;
2131 X86CPUClass *xcc;
2132 ObjectClass *oc;
2133 gchar **model_pieces;
2134 char *name, *features;
2135 Error *error = NULL;
2137 model_pieces = g_strsplit(cpu_model, ",", 2);
2138 if (!model_pieces[0]) {
2139 error_setg(&error, "Invalid/empty CPU model name");
2140 goto out;
2142 name = model_pieces[0];
2143 features = model_pieces[1];
2145 oc = x86_cpu_class_by_name(name);
2146 if (oc == NULL) {
2147 error_setg(&error, "Unable to find CPU definition: %s", name);
2148 goto out;
2150 xcc = X86_CPU_CLASS(oc);
2152 if (xcc->kvm_required && !kvm_enabled()) {
2153 error_setg(&error, "CPU model '%s' requires KVM", name);
2154 goto out;
2157 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2159 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2160 if (error) {
2161 goto out;
2164 out:
2165 if (error != NULL) {
2166 error_propagate(errp, error);
2167 if (cpu) {
2168 object_unref(OBJECT(cpu));
2169 cpu = NULL;
2172 g_strfreev(model_pieces);
2173 return cpu;
2176 X86CPU *cpu_x86_init(const char *cpu_model)
2178 Error *error = NULL;
2179 X86CPU *cpu;
2181 cpu = cpu_x86_create(cpu_model, &error);
2182 if (error) {
2183 goto out;
2186 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2188 out:
2189 if (error) {
2190 error_report_err(error);
2191 if (cpu != NULL) {
2192 object_unref(OBJECT(cpu));
2193 cpu = NULL;
2196 return cpu;
2199 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2201 X86CPUDefinition *cpudef = data;
2202 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2204 xcc->cpu_def = cpudef;
2207 static void x86_register_cpudef_type(X86CPUDefinition *def)
2209 char *typename = x86_cpu_type_name(def->name);
2210 TypeInfo ti = {
2211 .name = typename,
2212 .parent = TYPE_X86_CPU,
2213 .class_init = x86_cpu_cpudef_class_init,
2214 .class_data = def,
2217 type_register(&ti);
2218 g_free(typename);
2221 #if !defined(CONFIG_USER_ONLY)
2223 void cpu_clear_apic_feature(CPUX86State *env)
2225 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2228 #endif /* !CONFIG_USER_ONLY */
2230 /* Initialize list of CPU models, filling some non-static fields if necessary
2232 void x86_cpudef_setup(void)
2234 int i, j;
2235 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2237 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2238 X86CPUDefinition *def = &builtin_x86_defs[i];
2240 /* Look for specific "cpudef" models that */
2241 /* have the QEMU version in .model_id */
2242 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2243 if (strcmp(model_with_versions[j], def->name) == 0) {
2244 pstrcpy(def->model_id, sizeof(def->model_id),
2245 "QEMU Virtual CPU version ");
2246 pstrcat(def->model_id, sizeof(def->model_id),
2247 qemu_hw_version());
2248 break;
2254 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2255 uint32_t *eax, uint32_t *ebx,
2256 uint32_t *ecx, uint32_t *edx)
2258 X86CPU *cpu = x86_env_get_cpu(env);
2259 CPUState *cs = CPU(cpu);
2261 /* test if maximum index reached */
2262 if (index & 0x80000000) {
2263 if (index > env->cpuid_xlevel) {
2264 if (env->cpuid_xlevel2 > 0) {
2265 /* Handle the Centaur's CPUID instruction. */
2266 if (index > env->cpuid_xlevel2) {
2267 index = env->cpuid_xlevel2;
2268 } else if (index < 0xC0000000) {
2269 index = env->cpuid_xlevel;
2271 } else {
2272 /* Intel documentation states that invalid EAX input will
2273 * return the same information as EAX=cpuid_level
2274 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2276 index = env->cpuid_level;
2279 } else {
2280 if (index > env->cpuid_level)
2281 index = env->cpuid_level;
2284 switch(index) {
2285 case 0:
2286 *eax = env->cpuid_level;
2287 *ebx = env->cpuid_vendor1;
2288 *edx = env->cpuid_vendor2;
2289 *ecx = env->cpuid_vendor3;
2290 break;
2291 case 1:
2292 *eax = env->cpuid_version;
2293 *ebx = (cpu->apic_id << 24) |
2294 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2295 *ecx = env->features[FEAT_1_ECX];
2296 *edx = env->features[FEAT_1_EDX];
2297 if (cs->nr_cores * cs->nr_threads > 1) {
2298 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2299 *edx |= 1 << 28; /* HTT bit */
2301 break;
2302 case 2:
2303 /* cache info: needed for Pentium Pro compatibility */
2304 if (cpu->cache_info_passthrough) {
2305 host_cpuid(index, 0, eax, ebx, ecx, edx);
2306 break;
2308 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2309 *ebx = 0;
2310 *ecx = 0;
2311 *edx = (L1D_DESCRIPTOR << 16) | \
2312 (L1I_DESCRIPTOR << 8) | \
2313 (L2_DESCRIPTOR);
2314 break;
2315 case 4:
2316 /* cache info: needed for Core compatibility */
2317 if (cpu->cache_info_passthrough) {
2318 host_cpuid(index, count, eax, ebx, ecx, edx);
2319 *eax &= ~0xFC000000;
2320 } else {
2321 *eax = 0;
2322 switch (count) {
2323 case 0: /* L1 dcache info */
2324 *eax |= CPUID_4_TYPE_DCACHE | \
2325 CPUID_4_LEVEL(1) | \
2326 CPUID_4_SELF_INIT_LEVEL;
2327 *ebx = (L1D_LINE_SIZE - 1) | \
2328 ((L1D_PARTITIONS - 1) << 12) | \
2329 ((L1D_ASSOCIATIVITY - 1) << 22);
2330 *ecx = L1D_SETS - 1;
2331 *edx = CPUID_4_NO_INVD_SHARING;
2332 break;
2333 case 1: /* L1 icache info */
2334 *eax |= CPUID_4_TYPE_ICACHE | \
2335 CPUID_4_LEVEL(1) | \
2336 CPUID_4_SELF_INIT_LEVEL;
2337 *ebx = (L1I_LINE_SIZE - 1) | \
2338 ((L1I_PARTITIONS - 1) << 12) | \
2339 ((L1I_ASSOCIATIVITY - 1) << 22);
2340 *ecx = L1I_SETS - 1;
2341 *edx = CPUID_4_NO_INVD_SHARING;
2342 break;
2343 case 2: /* L2 cache info */
2344 *eax |= CPUID_4_TYPE_UNIFIED | \
2345 CPUID_4_LEVEL(2) | \
2346 CPUID_4_SELF_INIT_LEVEL;
2347 if (cs->nr_threads > 1) {
2348 *eax |= (cs->nr_threads - 1) << 14;
2350 *ebx = (L2_LINE_SIZE - 1) | \
2351 ((L2_PARTITIONS - 1) << 12) | \
2352 ((L2_ASSOCIATIVITY - 1) << 22);
2353 *ecx = L2_SETS - 1;
2354 *edx = CPUID_4_NO_INVD_SHARING;
2355 break;
2356 default: /* end of info */
2357 *eax = 0;
2358 *ebx = 0;
2359 *ecx = 0;
2360 *edx = 0;
2361 break;
2365 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2366 if ((*eax & 31) && cs->nr_cores > 1) {
2367 *eax |= (cs->nr_cores - 1) << 26;
2369 break;
2370 case 5:
2371 /* mwait info: needed for Core compatibility */
2372 *eax = 0; /* Smallest monitor-line size in bytes */
2373 *ebx = 0; /* Largest monitor-line size in bytes */
2374 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2375 *edx = 0;
2376 break;
2377 case 6:
2378 /* Thermal and Power Leaf */
2379 *eax = env->features[FEAT_6_EAX];
2380 *ebx = 0;
2381 *ecx = 0;
2382 *edx = 0;
2383 break;
2384 case 7:
2385 /* Structured Extended Feature Flags Enumeration Leaf */
2386 if (count == 0) {
2387 *eax = 0; /* Maximum ECX value for sub-leaves */
2388 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2389 *ecx = 0; /* Reserved */
2390 *edx = 0; /* Reserved */
2391 } else {
2392 *eax = 0;
2393 *ebx = 0;
2394 *ecx = 0;
2395 *edx = 0;
2397 break;
2398 case 9:
2399 /* Direct Cache Access Information Leaf */
2400 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2401 *ebx = 0;
2402 *ecx = 0;
2403 *edx = 0;
2404 break;
2405 case 0xA:
2406 /* Architectural Performance Monitoring Leaf */
2407 if (kvm_enabled() && cpu->enable_pmu) {
2408 KVMState *s = cs->kvm_state;
2410 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2411 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2412 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2413 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2414 } else {
2415 *eax = 0;
2416 *ebx = 0;
2417 *ecx = 0;
2418 *edx = 0;
2420 break;
2421 case 0xD: {
2422 KVMState *s = cs->kvm_state;
2423 uint64_t kvm_mask;
2424 int i;
2426 /* Processor Extended State */
2427 *eax = 0;
2428 *ebx = 0;
2429 *ecx = 0;
2430 *edx = 0;
2431 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2432 break;
2434 kvm_mask =
2435 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2436 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2438 if (count == 0) {
2439 *ecx = 0x240;
2440 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2441 const ExtSaveArea *esa = &ext_save_areas[i];
2442 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2443 (kvm_mask & (1 << i)) != 0) {
2444 if (i < 32) {
2445 *eax |= 1 << i;
2446 } else {
2447 *edx |= 1 << (i - 32);
2449 *ecx = MAX(*ecx, esa->offset + esa->size);
2452 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2453 *ebx = *ecx;
2454 } else if (count == 1) {
2455 *eax = env->features[FEAT_XSAVE];
2456 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2457 const ExtSaveArea *esa = &ext_save_areas[count];
2458 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2459 (kvm_mask & (1 << count)) != 0) {
2460 *eax = esa->size;
2461 *ebx = esa->offset;
2464 break;
2466 case 0x80000000:
2467 *eax = env->cpuid_xlevel;
2468 *ebx = env->cpuid_vendor1;
2469 *edx = env->cpuid_vendor2;
2470 *ecx = env->cpuid_vendor3;
2471 break;
2472 case 0x80000001:
2473 *eax = env->cpuid_version;
2474 *ebx = 0;
2475 *ecx = env->features[FEAT_8000_0001_ECX];
2476 *edx = env->features[FEAT_8000_0001_EDX];
2478 /* The Linux kernel checks for the CMPLegacy bit and
2479 * discards multiple thread information if it is set.
2480 * So dont set it here for Intel to make Linux guests happy.
2482 if (cs->nr_cores * cs->nr_threads > 1) {
2483 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2484 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2485 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2486 *ecx |= 1 << 1; /* CmpLegacy bit */
2489 break;
2490 case 0x80000002:
2491 case 0x80000003:
2492 case 0x80000004:
2493 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2494 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2495 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2496 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2497 break;
2498 case 0x80000005:
2499 /* cache info (L1 cache) */
2500 if (cpu->cache_info_passthrough) {
2501 host_cpuid(index, 0, eax, ebx, ecx, edx);
2502 break;
2504 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2505 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2506 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2507 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2508 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2509 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2510 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2511 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2512 break;
2513 case 0x80000006:
2514 /* cache info (L2 cache) */
2515 if (cpu->cache_info_passthrough) {
2516 host_cpuid(index, 0, eax, ebx, ecx, edx);
2517 break;
2519 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2520 (L2_DTLB_2M_ENTRIES << 16) | \
2521 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2522 (L2_ITLB_2M_ENTRIES);
2523 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2524 (L2_DTLB_4K_ENTRIES << 16) | \
2525 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2526 (L2_ITLB_4K_ENTRIES);
2527 *ecx = (L2_SIZE_KB_AMD << 16) | \
2528 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2529 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2530 *edx = ((L3_SIZE_KB/512) << 18) | \
2531 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2532 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2533 break;
2534 case 0x80000007:
2535 *eax = 0;
2536 *ebx = 0;
2537 *ecx = 0;
2538 *edx = env->features[FEAT_8000_0007_EDX];
2539 break;
2540 case 0x80000008:
2541 /* virtual & phys address size in low 2 bytes. */
2542 /* XXX: This value must match the one used in the MMU code. */
2543 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2544 /* 64 bit processor */
2545 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2546 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2547 } else {
2548 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2549 *eax = 0x00000024; /* 36 bits physical */
2550 } else {
2551 *eax = 0x00000020; /* 32 bits physical */
2554 *ebx = 0;
2555 *ecx = 0;
2556 *edx = 0;
2557 if (cs->nr_cores * cs->nr_threads > 1) {
2558 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2560 break;
2561 case 0x8000000A:
2562 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2563 *eax = 0x00000001; /* SVM Revision */
2564 *ebx = 0x00000010; /* nr of ASIDs */
2565 *ecx = 0;
2566 *edx = env->features[FEAT_SVM]; /* optional features */
2567 } else {
2568 *eax = 0;
2569 *ebx = 0;
2570 *ecx = 0;
2571 *edx = 0;
2573 break;
2574 case 0xC0000000:
2575 *eax = env->cpuid_xlevel2;
2576 *ebx = 0;
2577 *ecx = 0;
2578 *edx = 0;
2579 break;
2580 case 0xC0000001:
2581 /* Support for VIA CPU's CPUID instruction */
2582 *eax = env->cpuid_version;
2583 *ebx = 0;
2584 *ecx = 0;
2585 *edx = env->features[FEAT_C000_0001_EDX];
2586 break;
2587 case 0xC0000002:
2588 case 0xC0000003:
2589 case 0xC0000004:
2590 /* Reserved for the future, and now filled with zero */
2591 *eax = 0;
2592 *ebx = 0;
2593 *ecx = 0;
2594 *edx = 0;
2595 break;
2596 default:
2597 /* reserved values: zero */
2598 *eax = 0;
2599 *ebx = 0;
2600 *ecx = 0;
2601 *edx = 0;
2602 break;
2606 /* CPUClass::reset() */
2607 static void x86_cpu_reset(CPUState *s)
2609 X86CPU *cpu = X86_CPU(s);
2610 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2611 CPUX86State *env = &cpu->env;
2612 int i;
2614 xcc->parent_reset(s);
2616 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2618 tlb_flush(s, 1);
2620 env->old_exception = -1;
2622 /* init to reset state */
2624 #ifdef CONFIG_SOFTMMU
2625 env->hflags |= HF_SOFTMMU_MASK;
2626 #endif
2627 env->hflags2 |= HF2_GIF_MASK;
2629 cpu_x86_update_cr0(env, 0x60000010);
2630 env->a20_mask = ~0x0;
2631 env->smbase = 0x30000;
2633 env->idt.limit = 0xffff;
2634 env->gdt.limit = 0xffff;
2635 env->ldt.limit = 0xffff;
2636 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2637 env->tr.limit = 0xffff;
2638 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2640 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2641 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2642 DESC_R_MASK | DESC_A_MASK);
2643 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2644 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2645 DESC_A_MASK);
2646 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2647 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2648 DESC_A_MASK);
2649 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2650 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2651 DESC_A_MASK);
2652 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2653 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2654 DESC_A_MASK);
2655 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2656 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2657 DESC_A_MASK);
2659 env->eip = 0xfff0;
2660 env->regs[R_EDX] = env->cpuid_version;
2662 env->eflags = 0x2;
2664 /* FPU init */
2665 for (i = 0; i < 8; i++) {
2666 env->fptags[i] = 1;
2668 cpu_set_fpuc(env, 0x37f);
2670 env->mxcsr = 0x1f80;
2671 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2673 env->pat = 0x0007040600070406ULL;
2674 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2676 memset(env->dr, 0, sizeof(env->dr));
2677 env->dr[6] = DR6_FIXED_1;
2678 env->dr[7] = DR7_FIXED_1;
2679 cpu_breakpoint_remove_all(s, BP_CPU);
2680 cpu_watchpoint_remove_all(s, BP_CPU);
2682 env->xcr0 = 1;
2685 * SDM 11.11.5 requires:
2686 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2687 * - IA32_MTRR_PHYSMASKn.V = 0
2688 * All other bits are undefined. For simplification, zero it all.
2690 env->mtrr_deftype = 0;
2691 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2692 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2694 #if !defined(CONFIG_USER_ONLY)
2695 /* We hard-wire the BSP to the first CPU. */
2696 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2698 s->halted = !cpu_is_bsp(cpu);
2700 if (kvm_enabled()) {
2701 kvm_arch_reset_vcpu(cpu);
2703 #endif
2706 #ifndef CONFIG_USER_ONLY
2707 bool cpu_is_bsp(X86CPU *cpu)
2709 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2712 /* TODO: remove me, when reset over QOM tree is implemented */
2713 static void x86_cpu_machine_reset_cb(void *opaque)
2715 X86CPU *cpu = opaque;
2716 cpu_reset(CPU(cpu));
2718 #endif
2720 static void mce_init(X86CPU *cpu)
2722 CPUX86State *cenv = &cpu->env;
2723 unsigned int bank;
2725 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2726 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2727 (CPUID_MCE | CPUID_MCA)) {
2728 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2729 cenv->mcg_ctl = ~(uint64_t)0;
2730 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2731 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2736 #ifndef CONFIG_USER_ONLY
2737 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2739 APICCommonState *apic;
2740 const char *apic_type = "apic";
2742 if (kvm_irqchip_in_kernel()) {
2743 apic_type = "kvm-apic";
2744 } else if (xen_enabled()) {
2745 apic_type = "xen-apic";
2748 cpu->apic_state = DEVICE(object_new(apic_type));
2750 object_property_add_child(OBJECT(cpu), "apic",
2751 OBJECT(cpu->apic_state), NULL);
2752 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2753 /* TODO: convert to link<> */
2754 apic = APIC_COMMON(cpu->apic_state);
2755 apic->cpu = cpu;
2756 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2759 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2761 APICCommonState *apic;
2762 static bool apic_mmio_map_once;
2764 if (cpu->apic_state == NULL) {
2765 return;
2767 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2768 errp);
2770 /* Map APIC MMIO area */
2771 apic = APIC_COMMON(cpu->apic_state);
2772 if (!apic_mmio_map_once) {
2773 memory_region_add_subregion_overlap(get_system_memory(),
2774 apic->apicbase &
2775 MSR_IA32_APICBASE_BASE,
2776 &apic->io_memory,
2777 0x1000);
2778 apic_mmio_map_once = true;
2782 static void x86_cpu_machine_done(Notifier *n, void *unused)
2784 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2785 MemoryRegion *smram =
2786 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2788 if (smram) {
2789 cpu->smram = g_new(MemoryRegion, 1);
2790 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2791 smram, 0, 1ull << 32);
2792 memory_region_set_enabled(cpu->smram, false);
2793 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2796 #else
2797 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2800 #endif
2803 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2804 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2805 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2806 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2807 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2808 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2809 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2811 CPUState *cs = CPU(dev);
2812 X86CPU *cpu = X86_CPU(dev);
2813 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2814 CPUX86State *env = &cpu->env;
2815 Error *local_err = NULL;
2816 static bool ht_warned;
2818 if (cpu->apic_id < 0) {
2819 error_setg(errp, "apic-id property was not initialized properly");
2820 return;
2823 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2824 env->cpuid_level = 7;
2827 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2828 * CPUID[1].EDX.
2830 if (IS_AMD_CPU(env)) {
2831 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2832 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2833 & CPUID_EXT2_AMD_ALIASES);
2837 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2838 error_setg(&local_err,
2839 kvm_enabled() ?
2840 "Host doesn't support requested features" :
2841 "TCG doesn't support requested features");
2842 goto out;
2845 #ifndef CONFIG_USER_ONLY
2846 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2848 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2849 x86_cpu_apic_create(cpu, &local_err);
2850 if (local_err != NULL) {
2851 goto out;
2854 #endif
2856 mce_init(cpu);
2858 #ifndef CONFIG_USER_ONLY
2859 if (tcg_enabled()) {
2860 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2861 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2862 cs->as = g_new(AddressSpace, 1);
2864 /* Outer container... */
2865 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2866 memory_region_set_enabled(cpu->cpu_as_root, true);
2868 /* ... with two regions inside: normal system memory with low
2869 * priority, and...
2871 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2872 get_system_memory(), 0, ~0ull);
2873 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2874 memory_region_set_enabled(cpu->cpu_as_mem, true);
2875 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2877 /* ... SMRAM with higher priority, linked from /machine/smram. */
2878 cpu->machine_done.notify = x86_cpu_machine_done;
2879 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2881 #endif
2883 qemu_init_vcpu(cs);
2885 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2886 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2887 * based on inputs (sockets,cores,threads), it is still better to gives
2888 * users a warning.
2890 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2891 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2893 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2894 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2895 " -smp options properly.");
2896 ht_warned = true;
2899 x86_cpu_apic_realize(cpu, &local_err);
2900 if (local_err != NULL) {
2901 goto out;
2903 cpu_reset(cs);
2905 xcc->parent_realize(dev, &local_err);
2907 out:
2908 if (local_err != NULL) {
2909 error_propagate(errp, local_err);
2910 return;
2914 typedef struct BitProperty {
2915 uint32_t *ptr;
2916 uint32_t mask;
2917 } BitProperty;
2919 static void x86_cpu_get_bit_prop(Object *obj,
2920 struct Visitor *v,
2921 void *opaque,
2922 const char *name,
2923 Error **errp)
2925 BitProperty *fp = opaque;
2926 bool value = (*fp->ptr & fp->mask) == fp->mask;
2927 visit_type_bool(v, &value, name, errp);
2930 static void x86_cpu_set_bit_prop(Object *obj,
2931 struct Visitor *v,
2932 void *opaque,
2933 const char *name,
2934 Error **errp)
2936 DeviceState *dev = DEVICE(obj);
2937 BitProperty *fp = opaque;
2938 Error *local_err = NULL;
2939 bool value;
2941 if (dev->realized) {
2942 qdev_prop_set_after_realize(dev, name, errp);
2943 return;
2946 visit_type_bool(v, &value, name, &local_err);
2947 if (local_err) {
2948 error_propagate(errp, local_err);
2949 return;
2952 if (value) {
2953 *fp->ptr |= fp->mask;
2954 } else {
2955 *fp->ptr &= ~fp->mask;
2959 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2960 void *opaque)
2962 BitProperty *prop = opaque;
2963 g_free(prop);
2966 /* Register a boolean property to get/set a single bit in a uint32_t field.
2968 * The same property name can be registered multiple times to make it affect
2969 * multiple bits in the same FeatureWord. In that case, the getter will return
2970 * true only if all bits are set.
2972 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2973 const char *prop_name,
2974 uint32_t *field,
2975 int bitnr)
2977 BitProperty *fp;
2978 ObjectProperty *op;
2979 uint32_t mask = (1UL << bitnr);
2981 op = object_property_find(OBJECT(cpu), prop_name, NULL);
2982 if (op) {
2983 fp = op->opaque;
2984 assert(fp->ptr == field);
2985 fp->mask |= mask;
2986 } else {
2987 fp = g_new0(BitProperty, 1);
2988 fp->ptr = field;
2989 fp->mask = mask;
2990 object_property_add(OBJECT(cpu), prop_name, "bool",
2991 x86_cpu_get_bit_prop,
2992 x86_cpu_set_bit_prop,
2993 x86_cpu_release_bit_prop, fp, &error_abort);
2997 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
2998 FeatureWord w,
2999 int bitnr)
3001 Object *obj = OBJECT(cpu);
3002 int i;
3003 char **names;
3004 FeatureWordInfo *fi = &feature_word_info[w];
3006 if (!fi->feat_names) {
3007 return;
3009 if (!fi->feat_names[bitnr]) {
3010 return;
3013 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3015 feat2prop(names[0]);
3016 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3018 for (i = 1; names[i]; i++) {
3019 feat2prop(names[i]);
3020 object_property_add_alias(obj, names[i], obj, names[0],
3021 &error_abort);
3024 g_strfreev(names);
3027 static void x86_cpu_initfn(Object *obj)
3029 CPUState *cs = CPU(obj);
3030 X86CPU *cpu = X86_CPU(obj);
3031 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3032 CPUX86State *env = &cpu->env;
3033 FeatureWord w;
3034 static int inited;
3036 cs->env_ptr = env;
3037 cpu_exec_init(cs, &error_abort);
3039 object_property_add(obj, "family", "int",
3040 x86_cpuid_version_get_family,
3041 x86_cpuid_version_set_family, NULL, NULL, NULL);
3042 object_property_add(obj, "model", "int",
3043 x86_cpuid_version_get_model,
3044 x86_cpuid_version_set_model, NULL, NULL, NULL);
3045 object_property_add(obj, "stepping", "int",
3046 x86_cpuid_version_get_stepping,
3047 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3048 object_property_add_str(obj, "vendor",
3049 x86_cpuid_get_vendor,
3050 x86_cpuid_set_vendor, NULL);
3051 object_property_add_str(obj, "model-id",
3052 x86_cpuid_get_model_id,
3053 x86_cpuid_set_model_id, NULL);
3054 object_property_add(obj, "tsc-frequency", "int",
3055 x86_cpuid_get_tsc_freq,
3056 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3057 object_property_add(obj, "apic-id", "int",
3058 x86_cpuid_get_apic_id,
3059 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3060 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3061 x86_cpu_get_feature_words,
3062 NULL, NULL, (void *)env->features, NULL);
3063 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3064 x86_cpu_get_feature_words,
3065 NULL, NULL, (void *)cpu->filtered_features, NULL);
3067 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3069 #ifndef CONFIG_USER_ONLY
3070 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3071 cpu->apic_id = -1;
3072 #endif
3074 for (w = 0; w < FEATURE_WORDS; w++) {
3075 int bitnr;
3077 for (bitnr = 0; bitnr < 32; bitnr++) {
3078 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3082 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3084 /* init various static tables used in TCG mode */
3085 if (tcg_enabled() && !inited) {
3086 inited = 1;
3087 optimize_flags_init();
3091 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3093 X86CPU *cpu = X86_CPU(cs);
3095 return cpu->apic_id;
3098 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3100 X86CPU *cpu = X86_CPU(cs);
3102 return cpu->env.cr[0] & CR0_PG_MASK;
3105 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3107 X86CPU *cpu = X86_CPU(cs);
3109 cpu->env.eip = value;
3112 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3114 X86CPU *cpu = X86_CPU(cs);
3116 cpu->env.eip = tb->pc - tb->cs_base;
3119 static bool x86_cpu_has_work(CPUState *cs)
3121 X86CPU *cpu = X86_CPU(cs);
3122 CPUX86State *env = &cpu->env;
3124 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3125 CPU_INTERRUPT_POLL)) &&
3126 (env->eflags & IF_MASK)) ||
3127 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3128 CPU_INTERRUPT_INIT |
3129 CPU_INTERRUPT_SIPI |
3130 CPU_INTERRUPT_MCE)) ||
3131 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3132 !(env->hflags & HF_SMM_MASK));
3135 static Property x86_cpu_properties[] = {
3136 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3137 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3138 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3139 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3140 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3141 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3142 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3143 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3144 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3145 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3146 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3147 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3148 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3149 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3150 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3151 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3152 DEFINE_PROP_END_OF_LIST()
3155 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3157 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3158 CPUClass *cc = CPU_CLASS(oc);
3159 DeviceClass *dc = DEVICE_CLASS(oc);
3161 xcc->parent_realize = dc->realize;
3162 dc->realize = x86_cpu_realizefn;
3163 dc->props = x86_cpu_properties;
3165 xcc->parent_reset = cc->reset;
3166 cc->reset = x86_cpu_reset;
3167 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3169 cc->class_by_name = x86_cpu_class_by_name;
3170 cc->parse_features = x86_cpu_parse_featurestr;
3171 cc->has_work = x86_cpu_has_work;
3172 cc->do_interrupt = x86_cpu_do_interrupt;
3173 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3174 cc->dump_state = x86_cpu_dump_state;
3175 cc->set_pc = x86_cpu_set_pc;
3176 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3177 cc->gdb_read_register = x86_cpu_gdb_read_register;
3178 cc->gdb_write_register = x86_cpu_gdb_write_register;
3179 cc->get_arch_id = x86_cpu_get_arch_id;
3180 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3181 #ifdef CONFIG_USER_ONLY
3182 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3183 #else
3184 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3185 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3186 cc->write_elf64_note = x86_cpu_write_elf64_note;
3187 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3188 cc->write_elf32_note = x86_cpu_write_elf32_note;
3189 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3190 cc->vmsd = &vmstate_x86_cpu;
3191 #endif
3192 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3193 #ifndef CONFIG_USER_ONLY
3194 cc->debug_excp_handler = breakpoint_handler;
3195 #endif
3196 cc->cpu_exec_enter = x86_cpu_exec_enter;
3197 cc->cpu_exec_exit = x86_cpu_exec_exit;
3200 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3201 * object in cpus -> dangling pointer after final object_unref().
3203 dc->cannot_destroy_with_object_finalize_yet = true;
3206 static const TypeInfo x86_cpu_type_info = {
3207 .name = TYPE_X86_CPU,
3208 .parent = TYPE_CPU,
3209 .instance_size = sizeof(X86CPU),
3210 .instance_init = x86_cpu_initfn,
3211 .abstract = true,
3212 .class_size = sizeof(X86CPUClass),
3213 .class_init = x86_cpu_common_class_init,
3216 static void x86_cpu_register_types(void)
3218 int i;
3220 type_register_static(&x86_cpu_type_info);
3221 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3222 x86_register_cpudef_type(&builtin_x86_defs[i]);
3224 #ifdef CONFIG_KVM
3225 type_register_static(&host_x86_cpu_type_info);
3226 #endif
3229 type_init(x86_cpu_register_types)