kvm: Allow the Hyper-V vendor ID to be specified
[qemu/ar7.git] / target-i386 / cpu.c
blob5f53af248f874aa41f524b5f252aa2438f23fe90
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 static const char *cpuid_xsave_feature_name[] = {
278 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
288 static const char *cpuid_6_feature_name[] = {
289 NULL, NULL, "arat", NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
299 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
300 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
301 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
302 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
303 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
304 CPUID_PSE36 | CPUID_FXSR)
305 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
306 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
307 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
308 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
309 CPUID_PAE | CPUID_SEP | CPUID_APIC)
311 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
312 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
313 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
314 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
315 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
316 /* partly implemented:
317 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
318 /* missing:
319 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
320 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
321 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
322 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
323 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
324 /* missing:
325 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
326 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
327 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
328 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
329 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
330 CPUID_EXT_RDRAND */
332 #ifdef TARGET_X86_64
333 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
334 #else
335 #define TCG_EXT2_X86_64_FEATURES 0
336 #endif
338 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
339 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
340 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
341 TCG_EXT2_X86_64_FEATURES)
342 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
343 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
344 #define TCG_EXT4_FEATURES 0
345 #define TCG_SVM_FEATURES 0
346 #define TCG_KVM_FEATURES 0
347 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
348 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
349 /* missing:
350 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
351 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
352 CPUID_7_0_EBX_RDSEED */
353 #define TCG_APM_FEATURES 0
354 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
357 typedef struct FeatureWordInfo {
358 const char **feat_names;
359 uint32_t cpuid_eax; /* Input EAX for CPUID */
360 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
361 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
362 int cpuid_reg; /* output register (R_* constant) */
363 uint32_t tcg_features; /* Feature flags supported by TCG */
364 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
365 } FeatureWordInfo;
367 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
368 [FEAT_1_EDX] = {
369 .feat_names = feature_name,
370 .cpuid_eax = 1, .cpuid_reg = R_EDX,
371 .tcg_features = TCG_FEATURES,
373 [FEAT_1_ECX] = {
374 .feat_names = ext_feature_name,
375 .cpuid_eax = 1, .cpuid_reg = R_ECX,
376 .tcg_features = TCG_EXT_FEATURES,
378 [FEAT_8000_0001_EDX] = {
379 .feat_names = ext2_feature_name,
380 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
381 .tcg_features = TCG_EXT2_FEATURES,
383 [FEAT_8000_0001_ECX] = {
384 .feat_names = ext3_feature_name,
385 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
386 .tcg_features = TCG_EXT3_FEATURES,
388 [FEAT_C000_0001_EDX] = {
389 .feat_names = ext4_feature_name,
390 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
391 .tcg_features = TCG_EXT4_FEATURES,
393 [FEAT_KVM] = {
394 .feat_names = kvm_feature_name,
395 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
396 .tcg_features = TCG_KVM_FEATURES,
398 [FEAT_SVM] = {
399 .feat_names = svm_feature_name,
400 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
401 .tcg_features = TCG_SVM_FEATURES,
403 [FEAT_7_0_EBX] = {
404 .feat_names = cpuid_7_0_ebx_feature_name,
405 .cpuid_eax = 7,
406 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
407 .cpuid_reg = R_EBX,
408 .tcg_features = TCG_7_0_EBX_FEATURES,
410 [FEAT_8000_0007_EDX] = {
411 .feat_names = cpuid_apm_edx_feature_name,
412 .cpuid_eax = 0x80000007,
413 .cpuid_reg = R_EDX,
414 .tcg_features = TCG_APM_FEATURES,
415 .unmigratable_flags = CPUID_APM_INVTSC,
417 [FEAT_XSAVE] = {
418 .feat_names = cpuid_xsave_feature_name,
419 .cpuid_eax = 0xd,
420 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
421 .cpuid_reg = R_EAX,
422 .tcg_features = 0,
424 [FEAT_6_EAX] = {
425 .feat_names = cpuid_6_feature_name,
426 .cpuid_eax = 6, .cpuid_reg = R_EAX,
427 .tcg_features = TCG_6_EAX_FEATURES,
431 typedef struct X86RegisterInfo32 {
432 /* Name of register */
433 const char *name;
434 /* QAPI enum value register */
435 X86CPURegister32 qapi_enum;
436 } X86RegisterInfo32;
438 #define REGISTER(reg) \
439 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
440 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
441 REGISTER(EAX),
442 REGISTER(ECX),
443 REGISTER(EDX),
444 REGISTER(EBX),
445 REGISTER(ESP),
446 REGISTER(EBP),
447 REGISTER(ESI),
448 REGISTER(EDI),
450 #undef REGISTER
452 typedef struct ExtSaveArea {
453 uint32_t feature, bits;
454 uint32_t offset, size;
455 } ExtSaveArea;
457 static const ExtSaveArea ext_save_areas[] = {
458 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
459 .offset = 0x240, .size = 0x100 },
460 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
461 .offset = 0x3c0, .size = 0x40 },
462 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
463 .offset = 0x400, .size = 0x40 },
464 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
465 .offset = 0x440, .size = 0x40 },
466 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
467 .offset = 0x480, .size = 0x200 },
468 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
469 .offset = 0x680, .size = 0x400 },
472 const char *get_register_name_32(unsigned int reg)
474 if (reg >= CPU_NB_REGS32) {
475 return NULL;
477 return x86_reg_info_32[reg].name;
481 * Returns the set of feature flags that are supported and migratable by
482 * QEMU, for a given FeatureWord.
484 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
486 FeatureWordInfo *wi = &feature_word_info[w];
487 uint32_t r = 0;
488 int i;
490 for (i = 0; i < 32; i++) {
491 uint32_t f = 1U << i;
492 /* If the feature name is unknown, it is not supported by QEMU yet */
493 if (!wi->feat_names[i]) {
494 continue;
496 /* Skip features known to QEMU, but explicitly marked as unmigratable */
497 if (wi->unmigratable_flags & f) {
498 continue;
500 r |= f;
502 return r;
505 void host_cpuid(uint32_t function, uint32_t count,
506 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
508 uint32_t vec[4];
510 #ifdef __x86_64__
511 asm volatile("cpuid"
512 : "=a"(vec[0]), "=b"(vec[1]),
513 "=c"(vec[2]), "=d"(vec[3])
514 : "0"(function), "c"(count) : "cc");
515 #elif defined(__i386__)
516 asm volatile("pusha \n\t"
517 "cpuid \n\t"
518 "mov %%eax, 0(%2) \n\t"
519 "mov %%ebx, 4(%2) \n\t"
520 "mov %%ecx, 8(%2) \n\t"
521 "mov %%edx, 12(%2) \n\t"
522 "popa"
523 : : "a"(function), "c"(count), "S"(vec)
524 : "memory", "cc");
525 #else
526 abort();
527 #endif
529 if (eax)
530 *eax = vec[0];
531 if (ebx)
532 *ebx = vec[1];
533 if (ecx)
534 *ecx = vec[2];
535 if (edx)
536 *edx = vec[3];
539 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
541 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
542 * a substring. ex if !NULL points to the first char after a substring,
543 * otherwise the string is assumed to sized by a terminating nul.
544 * Return lexical ordering of *s1:*s2.
546 static int sstrcmp(const char *s1, const char *e1,
547 const char *s2, const char *e2)
549 for (;;) {
550 if (!*s1 || !*s2 || *s1 != *s2)
551 return (*s1 - *s2);
552 ++s1, ++s2;
553 if (s1 == e1 && s2 == e2)
554 return (0);
555 else if (s1 == e1)
556 return (*s2);
557 else if (s2 == e2)
558 return (*s1);
562 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
563 * '|' delimited (possibly empty) strings in which case search for a match
564 * within the alternatives proceeds left to right. Return 0 for success,
565 * non-zero otherwise.
567 static int altcmp(const char *s, const char *e, const char *altstr)
569 const char *p, *q;
571 for (q = p = altstr; ; ) {
572 while (*p && *p != '|')
573 ++p;
574 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
575 return (0);
576 if (!*p)
577 return (1);
578 else
579 q = ++p;
583 /* search featureset for flag *[s..e), if found set corresponding bit in
584 * *pval and return true, otherwise return false
586 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
587 const char **featureset)
589 uint32_t mask;
590 const char **ppc;
591 bool found = false;
593 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
594 if (*ppc && !altcmp(s, e, *ppc)) {
595 *pval |= mask;
596 found = true;
599 return found;
602 static void add_flagname_to_bitmaps(const char *flagname,
603 FeatureWordArray words,
604 Error **errp)
606 FeatureWord w;
607 for (w = 0; w < FEATURE_WORDS; w++) {
608 FeatureWordInfo *wi = &feature_word_info[w];
609 if (wi->feat_names &&
610 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
611 break;
614 if (w == FEATURE_WORDS) {
615 error_setg(errp, "CPU feature %s not found", flagname);
619 /* CPU class name definitions: */
621 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
622 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
624 /* Return type name for a given CPU model name
625 * Caller is responsible for freeing the returned string.
627 static char *x86_cpu_type_name(const char *model_name)
629 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
632 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
634 ObjectClass *oc;
635 char *typename;
637 if (cpu_model == NULL) {
638 return NULL;
641 typename = x86_cpu_type_name(cpu_model);
642 oc = object_class_by_name(typename);
643 g_free(typename);
644 return oc;
647 struct X86CPUDefinition {
648 const char *name;
649 uint32_t level;
650 uint32_t xlevel;
651 uint32_t xlevel2;
652 /* vendor is zero-terminated, 12 character ASCII string */
653 char vendor[CPUID_VENDOR_SZ + 1];
654 int family;
655 int model;
656 int stepping;
657 FeatureWordArray features;
658 char model_id[48];
659 bool cache_info_passthrough;
662 static X86CPUDefinition builtin_x86_defs[] = {
664 .name = "qemu64",
665 .level = 0xd,
666 .vendor = CPUID_VENDOR_AMD,
667 .family = 6,
668 .model = 6,
669 .stepping = 3,
670 .features[FEAT_1_EDX] =
671 PPRO_FEATURES |
672 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
673 CPUID_PSE36,
674 .features[FEAT_1_ECX] =
675 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
676 .features[FEAT_8000_0001_EDX] =
677 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
678 .features[FEAT_8000_0001_ECX] =
679 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
680 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
681 .xlevel = 0x8000000A,
684 .name = "phenom",
685 .level = 5,
686 .vendor = CPUID_VENDOR_AMD,
687 .family = 16,
688 .model = 2,
689 .stepping = 3,
690 /* Missing: CPUID_HT */
691 .features[FEAT_1_EDX] =
692 PPRO_FEATURES |
693 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
694 CPUID_PSE36 | CPUID_VME,
695 .features[FEAT_1_ECX] =
696 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
697 CPUID_EXT_POPCNT,
698 .features[FEAT_8000_0001_EDX] =
699 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
700 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
701 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
702 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
703 CPUID_EXT3_CR8LEG,
704 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
705 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
706 .features[FEAT_8000_0001_ECX] =
707 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
708 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
709 /* Missing: CPUID_SVM_LBRV */
710 .features[FEAT_SVM] =
711 CPUID_SVM_NPT,
712 .xlevel = 0x8000001A,
713 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
716 .name = "core2duo",
717 .level = 10,
718 .vendor = CPUID_VENDOR_INTEL,
719 .family = 6,
720 .model = 15,
721 .stepping = 11,
722 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
723 .features[FEAT_1_EDX] =
724 PPRO_FEATURES |
725 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
726 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
727 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
728 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
729 .features[FEAT_1_ECX] =
730 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
731 CPUID_EXT_CX16,
732 .features[FEAT_8000_0001_EDX] =
733 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
734 .features[FEAT_8000_0001_ECX] =
735 CPUID_EXT3_LAHF_LM,
736 .xlevel = 0x80000008,
737 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
740 .name = "kvm64",
741 .level = 0xd,
742 .vendor = CPUID_VENDOR_INTEL,
743 .family = 15,
744 .model = 6,
745 .stepping = 1,
746 /* Missing: CPUID_HT */
747 .features[FEAT_1_EDX] =
748 PPRO_FEATURES | CPUID_VME |
749 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
750 CPUID_PSE36,
751 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
752 .features[FEAT_1_ECX] =
753 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
754 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
755 .features[FEAT_8000_0001_EDX] =
756 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
757 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
758 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
759 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
760 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
761 .features[FEAT_8000_0001_ECX] =
763 .xlevel = 0x80000008,
764 .model_id = "Common KVM processor"
767 .name = "qemu32",
768 .level = 4,
769 .vendor = CPUID_VENDOR_INTEL,
770 .family = 6,
771 .model = 6,
772 .stepping = 3,
773 .features[FEAT_1_EDX] =
774 PPRO_FEATURES,
775 .features[FEAT_1_ECX] =
776 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
777 .xlevel = 0x80000004,
780 .name = "kvm32",
781 .level = 5,
782 .vendor = CPUID_VENDOR_INTEL,
783 .family = 15,
784 .model = 6,
785 .stepping = 1,
786 .features[FEAT_1_EDX] =
787 PPRO_FEATURES | CPUID_VME |
788 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
789 .features[FEAT_1_ECX] =
790 CPUID_EXT_SSE3,
791 .features[FEAT_8000_0001_ECX] =
793 .xlevel = 0x80000008,
794 .model_id = "Common 32-bit KVM processor"
797 .name = "coreduo",
798 .level = 10,
799 .vendor = CPUID_VENDOR_INTEL,
800 .family = 6,
801 .model = 14,
802 .stepping = 8,
803 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
804 .features[FEAT_1_EDX] =
805 PPRO_FEATURES | CPUID_VME |
806 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
807 CPUID_SS,
808 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
809 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
810 .features[FEAT_1_ECX] =
811 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
812 .features[FEAT_8000_0001_EDX] =
813 CPUID_EXT2_NX,
814 .xlevel = 0x80000008,
815 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
818 .name = "486",
819 .level = 1,
820 .vendor = CPUID_VENDOR_INTEL,
821 .family = 4,
822 .model = 8,
823 .stepping = 0,
824 .features[FEAT_1_EDX] =
825 I486_FEATURES,
826 .xlevel = 0,
829 .name = "pentium",
830 .level = 1,
831 .vendor = CPUID_VENDOR_INTEL,
832 .family = 5,
833 .model = 4,
834 .stepping = 3,
835 .features[FEAT_1_EDX] =
836 PENTIUM_FEATURES,
837 .xlevel = 0,
840 .name = "pentium2",
841 .level = 2,
842 .vendor = CPUID_VENDOR_INTEL,
843 .family = 6,
844 .model = 5,
845 .stepping = 2,
846 .features[FEAT_1_EDX] =
847 PENTIUM2_FEATURES,
848 .xlevel = 0,
851 .name = "pentium3",
852 .level = 3,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 6,
855 .model = 7,
856 .stepping = 3,
857 .features[FEAT_1_EDX] =
858 PENTIUM3_FEATURES,
859 .xlevel = 0,
862 .name = "athlon",
863 .level = 2,
864 .vendor = CPUID_VENDOR_AMD,
865 .family = 6,
866 .model = 2,
867 .stepping = 3,
868 .features[FEAT_1_EDX] =
869 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
870 CPUID_MCA,
871 .features[FEAT_8000_0001_EDX] =
872 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
873 .xlevel = 0x80000008,
876 .name = "n270",
877 .level = 10,
878 .vendor = CPUID_VENDOR_INTEL,
879 .family = 6,
880 .model = 28,
881 .stepping = 2,
882 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
883 .features[FEAT_1_EDX] =
884 PPRO_FEATURES |
885 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
886 CPUID_ACPI | CPUID_SS,
887 /* Some CPUs got no CPUID_SEP */
888 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
889 * CPUID_EXT_XTPR */
890 .features[FEAT_1_ECX] =
891 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
892 CPUID_EXT_MOVBE,
893 .features[FEAT_8000_0001_EDX] =
894 CPUID_EXT2_NX,
895 .features[FEAT_8000_0001_ECX] =
896 CPUID_EXT3_LAHF_LM,
897 .xlevel = 0x80000008,
898 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
901 .name = "Conroe",
902 .level = 10,
903 .vendor = CPUID_VENDOR_INTEL,
904 .family = 6,
905 .model = 15,
906 .stepping = 3,
907 .features[FEAT_1_EDX] =
908 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
909 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
910 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
911 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
912 CPUID_DE | CPUID_FP87,
913 .features[FEAT_1_ECX] =
914 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
915 .features[FEAT_8000_0001_EDX] =
916 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
917 .features[FEAT_8000_0001_ECX] =
918 CPUID_EXT3_LAHF_LM,
919 .xlevel = 0x80000008,
920 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
923 .name = "Penryn",
924 .level = 10,
925 .vendor = CPUID_VENDOR_INTEL,
926 .family = 6,
927 .model = 23,
928 .stepping = 3,
929 .features[FEAT_1_EDX] =
930 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
931 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
932 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
933 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
934 CPUID_DE | CPUID_FP87,
935 .features[FEAT_1_ECX] =
936 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
937 CPUID_EXT_SSE3,
938 .features[FEAT_8000_0001_EDX] =
939 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
940 .features[FEAT_8000_0001_ECX] =
941 CPUID_EXT3_LAHF_LM,
942 .xlevel = 0x80000008,
943 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
946 .name = "Nehalem",
947 .level = 11,
948 .vendor = CPUID_VENDOR_INTEL,
949 .family = 6,
950 .model = 26,
951 .stepping = 3,
952 .features[FEAT_1_EDX] =
953 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
954 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
955 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
956 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
957 CPUID_DE | CPUID_FP87,
958 .features[FEAT_1_ECX] =
959 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
960 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
961 .features[FEAT_8000_0001_EDX] =
962 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
963 .features[FEAT_8000_0001_ECX] =
964 CPUID_EXT3_LAHF_LM,
965 .xlevel = 0x80000008,
966 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
969 .name = "Westmere",
970 .level = 11,
971 .vendor = CPUID_VENDOR_INTEL,
972 .family = 6,
973 .model = 44,
974 .stepping = 1,
975 .features[FEAT_1_EDX] =
976 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
977 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
978 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
979 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
980 CPUID_DE | CPUID_FP87,
981 .features[FEAT_1_ECX] =
982 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
983 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
984 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
985 .features[FEAT_8000_0001_EDX] =
986 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
987 .features[FEAT_8000_0001_ECX] =
988 CPUID_EXT3_LAHF_LM,
989 .features[FEAT_6_EAX] =
990 CPUID_6_EAX_ARAT,
991 .xlevel = 0x80000008,
992 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
995 .name = "SandyBridge",
996 .level = 0xd,
997 .vendor = CPUID_VENDOR_INTEL,
998 .family = 6,
999 .model = 42,
1000 .stepping = 1,
1001 .features[FEAT_1_EDX] =
1002 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1003 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1004 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1005 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1006 CPUID_DE | CPUID_FP87,
1007 .features[FEAT_1_ECX] =
1008 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1009 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1010 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1011 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1012 CPUID_EXT_SSE3,
1013 .features[FEAT_8000_0001_EDX] =
1014 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1015 CPUID_EXT2_SYSCALL,
1016 .features[FEAT_8000_0001_ECX] =
1017 CPUID_EXT3_LAHF_LM,
1018 .features[FEAT_XSAVE] =
1019 CPUID_XSAVE_XSAVEOPT,
1020 .features[FEAT_6_EAX] =
1021 CPUID_6_EAX_ARAT,
1022 .xlevel = 0x80000008,
1023 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1026 .name = "IvyBridge",
1027 .level = 0xd,
1028 .vendor = CPUID_VENDOR_INTEL,
1029 .family = 6,
1030 .model = 58,
1031 .stepping = 9,
1032 .features[FEAT_1_EDX] =
1033 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1034 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1035 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1036 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1037 CPUID_DE | CPUID_FP87,
1038 .features[FEAT_1_ECX] =
1039 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1040 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1041 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1042 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1043 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1044 .features[FEAT_7_0_EBX] =
1045 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1046 CPUID_7_0_EBX_ERMS,
1047 .features[FEAT_8000_0001_EDX] =
1048 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1049 CPUID_EXT2_SYSCALL,
1050 .features[FEAT_8000_0001_ECX] =
1051 CPUID_EXT3_LAHF_LM,
1052 .features[FEAT_XSAVE] =
1053 CPUID_XSAVE_XSAVEOPT,
1054 .features[FEAT_6_EAX] =
1055 CPUID_6_EAX_ARAT,
1056 .xlevel = 0x80000008,
1057 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1060 .name = "Haswell-noTSX",
1061 .level = 0xd,
1062 .vendor = CPUID_VENDOR_INTEL,
1063 .family = 6,
1064 .model = 60,
1065 .stepping = 1,
1066 .features[FEAT_1_EDX] =
1067 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1068 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1069 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1070 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1071 CPUID_DE | CPUID_FP87,
1072 .features[FEAT_1_ECX] =
1073 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1074 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1075 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1076 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1077 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1078 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1079 .features[FEAT_8000_0001_EDX] =
1080 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1081 CPUID_EXT2_SYSCALL,
1082 .features[FEAT_8000_0001_ECX] =
1083 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1084 .features[FEAT_7_0_EBX] =
1085 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1086 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1087 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1088 .features[FEAT_XSAVE] =
1089 CPUID_XSAVE_XSAVEOPT,
1090 .features[FEAT_6_EAX] =
1091 CPUID_6_EAX_ARAT,
1092 .xlevel = 0x80000008,
1093 .model_id = "Intel Core Processor (Haswell, no TSX)",
1094 }, {
1095 .name = "Haswell",
1096 .level = 0xd,
1097 .vendor = CPUID_VENDOR_INTEL,
1098 .family = 6,
1099 .model = 60,
1100 .stepping = 1,
1101 .features[FEAT_1_EDX] =
1102 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1103 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1104 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1105 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1106 CPUID_DE | CPUID_FP87,
1107 .features[FEAT_1_ECX] =
1108 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1109 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1110 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1111 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1112 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1113 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1114 .features[FEAT_8000_0001_EDX] =
1115 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1116 CPUID_EXT2_SYSCALL,
1117 .features[FEAT_8000_0001_ECX] =
1118 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1119 .features[FEAT_7_0_EBX] =
1120 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1121 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1122 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1123 CPUID_7_0_EBX_RTM,
1124 .features[FEAT_XSAVE] =
1125 CPUID_XSAVE_XSAVEOPT,
1126 .features[FEAT_6_EAX] =
1127 CPUID_6_EAX_ARAT,
1128 .xlevel = 0x80000008,
1129 .model_id = "Intel Core Processor (Haswell)",
1132 .name = "Broadwell-noTSX",
1133 .level = 0xd,
1134 .vendor = CPUID_VENDOR_INTEL,
1135 .family = 6,
1136 .model = 61,
1137 .stepping = 2,
1138 .features[FEAT_1_EDX] =
1139 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1140 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1141 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1142 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1143 CPUID_DE | CPUID_FP87,
1144 .features[FEAT_1_ECX] =
1145 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1146 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1147 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1148 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1149 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1150 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1151 .features[FEAT_8000_0001_EDX] =
1152 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1153 CPUID_EXT2_SYSCALL,
1154 .features[FEAT_8000_0001_ECX] =
1155 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1156 .features[FEAT_7_0_EBX] =
1157 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1158 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1159 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1160 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1161 CPUID_7_0_EBX_SMAP,
1162 .features[FEAT_XSAVE] =
1163 CPUID_XSAVE_XSAVEOPT,
1164 .features[FEAT_6_EAX] =
1165 CPUID_6_EAX_ARAT,
1166 .xlevel = 0x80000008,
1167 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1170 .name = "Broadwell",
1171 .level = 0xd,
1172 .vendor = CPUID_VENDOR_INTEL,
1173 .family = 6,
1174 .model = 61,
1175 .stepping = 2,
1176 .features[FEAT_1_EDX] =
1177 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1178 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1179 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1180 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1181 CPUID_DE | CPUID_FP87,
1182 .features[FEAT_1_ECX] =
1183 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1184 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1185 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1186 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1187 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1188 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1189 .features[FEAT_8000_0001_EDX] =
1190 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1191 CPUID_EXT2_SYSCALL,
1192 .features[FEAT_8000_0001_ECX] =
1193 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1194 .features[FEAT_7_0_EBX] =
1195 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1196 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1197 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1198 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1199 CPUID_7_0_EBX_SMAP,
1200 .features[FEAT_XSAVE] =
1201 CPUID_XSAVE_XSAVEOPT,
1202 .features[FEAT_6_EAX] =
1203 CPUID_6_EAX_ARAT,
1204 .xlevel = 0x80000008,
1205 .model_id = "Intel Core Processor (Broadwell)",
1208 .name = "Opteron_G1",
1209 .level = 5,
1210 .vendor = CPUID_VENDOR_AMD,
1211 .family = 15,
1212 .model = 6,
1213 .stepping = 1,
1214 .features[FEAT_1_EDX] =
1215 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1216 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1217 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1218 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1219 CPUID_DE | CPUID_FP87,
1220 .features[FEAT_1_ECX] =
1221 CPUID_EXT_SSE3,
1222 .features[FEAT_8000_0001_EDX] =
1223 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1224 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1225 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1226 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1227 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1228 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1229 .xlevel = 0x80000008,
1230 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1233 .name = "Opteron_G2",
1234 .level = 5,
1235 .vendor = CPUID_VENDOR_AMD,
1236 .family = 15,
1237 .model = 6,
1238 .stepping = 1,
1239 .features[FEAT_1_EDX] =
1240 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1241 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1242 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1243 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1244 CPUID_DE | CPUID_FP87,
1245 .features[FEAT_1_ECX] =
1246 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1247 .features[FEAT_8000_0001_EDX] =
1248 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1249 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1250 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1251 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1252 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1253 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1254 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1255 .features[FEAT_8000_0001_ECX] =
1256 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1257 .xlevel = 0x80000008,
1258 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1261 .name = "Opteron_G3",
1262 .level = 5,
1263 .vendor = CPUID_VENDOR_AMD,
1264 .family = 15,
1265 .model = 6,
1266 .stepping = 1,
1267 .features[FEAT_1_EDX] =
1268 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1269 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1270 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1271 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1272 CPUID_DE | CPUID_FP87,
1273 .features[FEAT_1_ECX] =
1274 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1275 CPUID_EXT_SSE3,
1276 .features[FEAT_8000_0001_EDX] =
1277 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1278 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1279 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1280 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1281 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1282 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1283 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1284 .features[FEAT_8000_0001_ECX] =
1285 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1286 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1287 .xlevel = 0x80000008,
1288 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1291 .name = "Opteron_G4",
1292 .level = 0xd,
1293 .vendor = CPUID_VENDOR_AMD,
1294 .family = 21,
1295 .model = 1,
1296 .stepping = 2,
1297 .features[FEAT_1_EDX] =
1298 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1299 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1300 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1301 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1302 CPUID_DE | CPUID_FP87,
1303 .features[FEAT_1_ECX] =
1304 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1305 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1306 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1307 CPUID_EXT_SSE3,
1308 .features[FEAT_8000_0001_EDX] =
1309 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1310 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1311 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1312 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1313 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1314 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1315 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1316 .features[FEAT_8000_0001_ECX] =
1317 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1318 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1319 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1320 CPUID_EXT3_LAHF_LM,
1321 /* no xsaveopt! */
1322 .xlevel = 0x8000001A,
1323 .model_id = "AMD Opteron 62xx class CPU",
1326 .name = "Opteron_G5",
1327 .level = 0xd,
1328 .vendor = CPUID_VENDOR_AMD,
1329 .family = 21,
1330 .model = 2,
1331 .stepping = 0,
1332 .features[FEAT_1_EDX] =
1333 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1334 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1335 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1336 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1337 CPUID_DE | CPUID_FP87,
1338 .features[FEAT_1_ECX] =
1339 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1340 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1341 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1342 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1343 .features[FEAT_8000_0001_EDX] =
1344 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1345 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1346 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1347 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1348 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1349 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1350 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1351 .features[FEAT_8000_0001_ECX] =
1352 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1353 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1354 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1355 CPUID_EXT3_LAHF_LM,
1356 /* no xsaveopt! */
1357 .xlevel = 0x8000001A,
1358 .model_id = "AMD Opteron 63xx class CPU",
1362 typedef struct PropValue {
1363 const char *prop, *value;
1364 } PropValue;
1366 /* KVM-specific features that are automatically added/removed
1367 * from all CPU models when KVM is enabled.
1369 static PropValue kvm_default_props[] = {
1370 { "kvmclock", "on" },
1371 { "kvm-nopiodelay", "on" },
1372 { "kvm-asyncpf", "on" },
1373 { "kvm-steal-time", "on" },
1374 { "kvm-pv-eoi", "on" },
1375 { "kvmclock-stable-bit", "on" },
1376 { "x2apic", "on" },
1377 { "acpi", "off" },
1378 { "monitor", "off" },
1379 { "svm", "off" },
1380 { NULL, NULL },
1383 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1385 PropValue *pv;
1386 for (pv = kvm_default_props; pv->prop; pv++) {
1387 if (!strcmp(pv->prop, prop)) {
1388 pv->value = value;
1389 break;
1393 /* It is valid to call this function only for properties that
1394 * are already present in the kvm_default_props table.
1396 assert(pv->prop);
1399 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1400 bool migratable_only);
1402 #ifdef CONFIG_KVM
1404 static int cpu_x86_fill_model_id(char *str)
1406 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1407 int i;
1409 for (i = 0; i < 3; i++) {
1410 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1411 memcpy(str + i * 16 + 0, &eax, 4);
1412 memcpy(str + i * 16 + 4, &ebx, 4);
1413 memcpy(str + i * 16 + 8, &ecx, 4);
1414 memcpy(str + i * 16 + 12, &edx, 4);
1416 return 0;
1419 static X86CPUDefinition host_cpudef;
1421 static Property host_x86_cpu_properties[] = {
1422 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1423 DEFINE_PROP_END_OF_LIST()
1426 /* class_init for the "host" CPU model
1428 * This function may be called before KVM is initialized.
1430 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1432 DeviceClass *dc = DEVICE_CLASS(oc);
1433 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1434 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1436 xcc->kvm_required = true;
1438 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1439 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1441 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1442 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1443 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1444 host_cpudef.stepping = eax & 0x0F;
1446 cpu_x86_fill_model_id(host_cpudef.model_id);
1448 xcc->cpu_def = &host_cpudef;
1449 host_cpudef.cache_info_passthrough = true;
1451 /* level, xlevel, xlevel2, and the feature words are initialized on
1452 * instance_init, because they require KVM to be initialized.
1455 dc->props = host_x86_cpu_properties;
1456 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1457 dc->cannot_destroy_with_object_finalize_yet = true;
1460 static void host_x86_cpu_initfn(Object *obj)
1462 X86CPU *cpu = X86_CPU(obj);
1463 CPUX86State *env = &cpu->env;
1464 KVMState *s = kvm_state;
1466 assert(kvm_enabled());
1468 /* We can't fill the features array here because we don't know yet if
1469 * "migratable" is true or false.
1471 cpu->host_features = true;
1473 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1474 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1475 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1477 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1480 static const TypeInfo host_x86_cpu_type_info = {
1481 .name = X86_CPU_TYPE_NAME("host"),
1482 .parent = TYPE_X86_CPU,
1483 .instance_init = host_x86_cpu_initfn,
1484 .class_init = host_x86_cpu_class_init,
1487 #endif
1489 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1491 FeatureWordInfo *f = &feature_word_info[w];
1492 int i;
1494 for (i = 0; i < 32; ++i) {
1495 if (1 << i & mask) {
1496 const char *reg = get_register_name_32(f->cpuid_reg);
1497 assert(reg);
1498 fprintf(stderr, "warning: %s doesn't support requested feature: "
1499 "CPUID.%02XH:%s%s%s [bit %d]\n",
1500 kvm_enabled() ? "host" : "TCG",
1501 f->cpuid_eax, reg,
1502 f->feat_names[i] ? "." : "",
1503 f->feat_names[i] ? f->feat_names[i] : "", i);
1508 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1509 const char *name, Error **errp)
1511 X86CPU *cpu = X86_CPU(obj);
1512 CPUX86State *env = &cpu->env;
1513 int64_t value;
1515 value = (env->cpuid_version >> 8) & 0xf;
1516 if (value == 0xf) {
1517 value += (env->cpuid_version >> 20) & 0xff;
1519 visit_type_int(v, &value, name, errp);
1522 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1523 const char *name, Error **errp)
1525 X86CPU *cpu = X86_CPU(obj);
1526 CPUX86State *env = &cpu->env;
1527 const int64_t min = 0;
1528 const int64_t max = 0xff + 0xf;
1529 Error *local_err = NULL;
1530 int64_t value;
1532 visit_type_int(v, &value, name, &local_err);
1533 if (local_err) {
1534 error_propagate(errp, local_err);
1535 return;
1537 if (value < min || value > max) {
1538 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1539 name ? name : "null", value, min, max);
1540 return;
1543 env->cpuid_version &= ~0xff00f00;
1544 if (value > 0x0f) {
1545 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1546 } else {
1547 env->cpuid_version |= value << 8;
1551 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1552 const char *name, Error **errp)
1554 X86CPU *cpu = X86_CPU(obj);
1555 CPUX86State *env = &cpu->env;
1556 int64_t value;
1558 value = (env->cpuid_version >> 4) & 0xf;
1559 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1560 visit_type_int(v, &value, name, errp);
1563 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1564 const char *name, Error **errp)
1566 X86CPU *cpu = X86_CPU(obj);
1567 CPUX86State *env = &cpu->env;
1568 const int64_t min = 0;
1569 const int64_t max = 0xff;
1570 Error *local_err = NULL;
1571 int64_t value;
1573 visit_type_int(v, &value, name, &local_err);
1574 if (local_err) {
1575 error_propagate(errp, local_err);
1576 return;
1578 if (value < min || value > max) {
1579 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1580 name ? name : "null", value, min, max);
1581 return;
1584 env->cpuid_version &= ~0xf00f0;
1585 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1588 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1589 void *opaque, const char *name,
1590 Error **errp)
1592 X86CPU *cpu = X86_CPU(obj);
1593 CPUX86State *env = &cpu->env;
1594 int64_t value;
1596 value = env->cpuid_version & 0xf;
1597 visit_type_int(v, &value, name, errp);
1600 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1601 void *opaque, const char *name,
1602 Error **errp)
1604 X86CPU *cpu = X86_CPU(obj);
1605 CPUX86State *env = &cpu->env;
1606 const int64_t min = 0;
1607 const int64_t max = 0xf;
1608 Error *local_err = NULL;
1609 int64_t value;
1611 visit_type_int(v, &value, name, &local_err);
1612 if (local_err) {
1613 error_propagate(errp, local_err);
1614 return;
1616 if (value < min || value > max) {
1617 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1618 name ? name : "null", value, min, max);
1619 return;
1622 env->cpuid_version &= ~0xf;
1623 env->cpuid_version |= value & 0xf;
1626 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1628 X86CPU *cpu = X86_CPU(obj);
1629 CPUX86State *env = &cpu->env;
1630 char *value;
1632 value = g_malloc(CPUID_VENDOR_SZ + 1);
1633 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1634 env->cpuid_vendor3);
1635 return value;
1638 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1639 Error **errp)
1641 X86CPU *cpu = X86_CPU(obj);
1642 CPUX86State *env = &cpu->env;
1643 int i;
1645 if (strlen(value) != CPUID_VENDOR_SZ) {
1646 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1647 return;
1650 env->cpuid_vendor1 = 0;
1651 env->cpuid_vendor2 = 0;
1652 env->cpuid_vendor3 = 0;
1653 for (i = 0; i < 4; i++) {
1654 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1655 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1656 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1660 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1662 X86CPU *cpu = X86_CPU(obj);
1663 CPUX86State *env = &cpu->env;
1664 char *value;
1665 int i;
1667 value = g_malloc(48 + 1);
1668 for (i = 0; i < 48; i++) {
1669 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1671 value[48] = '\0';
1672 return value;
1675 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1676 Error **errp)
1678 X86CPU *cpu = X86_CPU(obj);
1679 CPUX86State *env = &cpu->env;
1680 int c, len, i;
1682 if (model_id == NULL) {
1683 model_id = "";
1685 len = strlen(model_id);
1686 memset(env->cpuid_model, 0, 48);
1687 for (i = 0; i < 48; i++) {
1688 if (i >= len) {
1689 c = '\0';
1690 } else {
1691 c = (uint8_t)model_id[i];
1693 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1697 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1698 const char *name, Error **errp)
1700 X86CPU *cpu = X86_CPU(obj);
1701 int64_t value;
1703 value = cpu->env.tsc_khz * 1000;
1704 visit_type_int(v, &value, name, errp);
1707 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1708 const char *name, Error **errp)
1710 X86CPU *cpu = X86_CPU(obj);
1711 const int64_t min = 0;
1712 const int64_t max = INT64_MAX;
1713 Error *local_err = NULL;
1714 int64_t value;
1716 visit_type_int(v, &value, name, &local_err);
1717 if (local_err) {
1718 error_propagate(errp, local_err);
1719 return;
1721 if (value < min || value > max) {
1722 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1723 name ? name : "null", value, min, max);
1724 return;
1727 cpu->env.tsc_khz = value / 1000;
1730 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1731 const char *name, Error **errp)
1733 X86CPU *cpu = X86_CPU(obj);
1734 int64_t value = cpu->apic_id;
1736 visit_type_int(v, &value, name, errp);
1739 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1740 const char *name, Error **errp)
1742 X86CPU *cpu = X86_CPU(obj);
1743 DeviceState *dev = DEVICE(obj);
1744 const int64_t min = 0;
1745 const int64_t max = UINT32_MAX;
1746 Error *error = NULL;
1747 int64_t value;
1749 if (dev->realized) {
1750 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1751 "it was realized", name, object_get_typename(obj));
1752 return;
1755 visit_type_int(v, &value, name, &error);
1756 if (error) {
1757 error_propagate(errp, error);
1758 return;
1760 if (value < min || value > max) {
1761 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1762 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1763 object_get_typename(obj), name, value, min, max);
1764 return;
1767 if ((value != cpu->apic_id) && cpu_exists(value)) {
1768 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1769 return;
1771 cpu->apic_id = value;
1774 /* Generic getter for "feature-words" and "filtered-features" properties */
1775 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1776 const char *name, Error **errp)
1778 uint32_t *array = (uint32_t *)opaque;
1779 FeatureWord w;
1780 Error *err = NULL;
1781 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1782 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1783 X86CPUFeatureWordInfoList *list = NULL;
1785 for (w = 0; w < FEATURE_WORDS; w++) {
1786 FeatureWordInfo *wi = &feature_word_info[w];
1787 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1788 qwi->cpuid_input_eax = wi->cpuid_eax;
1789 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1790 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1791 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1792 qwi->features = array[w];
1794 /* List will be in reverse order, but order shouldn't matter */
1795 list_entries[w].next = list;
1796 list_entries[w].value = &word_infos[w];
1797 list = &list_entries[w];
1800 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1801 error_propagate(errp, err);
1804 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1805 const char *name, Error **errp)
1807 X86CPU *cpu = X86_CPU(obj);
1808 int64_t value = cpu->hyperv_spinlock_attempts;
1810 visit_type_int(v, &value, name, errp);
1813 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1814 const char *name, Error **errp)
1816 const int64_t min = 0xFFF;
1817 const int64_t max = UINT_MAX;
1818 X86CPU *cpu = X86_CPU(obj);
1819 Error *err = NULL;
1820 int64_t value;
1822 visit_type_int(v, &value, name, &err);
1823 if (err) {
1824 error_propagate(errp, err);
1825 return;
1828 if (value < min || value > max) {
1829 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1830 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1831 object_get_typename(obj), name ? name : "null",
1832 value, min, max);
1833 return;
1835 cpu->hyperv_spinlock_attempts = value;
1838 static PropertyInfo qdev_prop_spinlocks = {
1839 .name = "int",
1840 .get = x86_get_hv_spinlocks,
1841 .set = x86_set_hv_spinlocks,
1844 /* Convert all '_' in a feature string option name to '-', to make feature
1845 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1847 static inline void feat2prop(char *s)
1849 while ((s = strchr(s, '_'))) {
1850 *s = '-';
1854 /* Parse "+feature,-feature,feature=foo" CPU feature string
1856 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1857 Error **errp)
1859 X86CPU *cpu = X86_CPU(cs);
1860 char *featurestr; /* Single 'key=value" string being parsed */
1861 FeatureWord w;
1862 /* Features to be added */
1863 FeatureWordArray plus_features = { 0 };
1864 /* Features to be removed */
1865 FeatureWordArray minus_features = { 0 };
1866 uint32_t numvalue;
1867 CPUX86State *env = &cpu->env;
1868 Error *local_err = NULL;
1870 featurestr = features ? strtok(features, ",") : NULL;
1872 while (featurestr) {
1873 char *val;
1874 if (featurestr[0] == '+') {
1875 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1876 } else if (featurestr[0] == '-') {
1877 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1878 } else if ((val = strchr(featurestr, '='))) {
1879 *val = 0; val++;
1880 feat2prop(featurestr);
1881 if (!strcmp(featurestr, "xlevel")) {
1882 char *err;
1883 char num[32];
1885 numvalue = strtoul(val, &err, 0);
1886 if (!*val || *err) {
1887 error_setg(errp, "bad numerical value %s", val);
1888 return;
1890 if (numvalue < 0x80000000) {
1891 error_report("xlevel value shall always be >= 0x80000000"
1892 ", fixup will be removed in future versions");
1893 numvalue += 0x80000000;
1895 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1896 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1897 } else if (!strcmp(featurestr, "tsc-freq")) {
1898 int64_t tsc_freq;
1899 char *err;
1900 char num[32];
1902 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1903 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1904 if (tsc_freq < 0 || *err) {
1905 error_setg(errp, "bad numerical value %s", val);
1906 return;
1908 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1909 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1910 &local_err);
1911 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1912 char *err;
1913 const int min = 0xFFF;
1914 char num[32];
1915 numvalue = strtoul(val, &err, 0);
1916 if (!*val || *err) {
1917 error_setg(errp, "bad numerical value %s", val);
1918 return;
1920 if (numvalue < min) {
1921 error_report("hv-spinlocks value shall always be >= 0x%x"
1922 ", fixup will be removed in future versions",
1923 min);
1924 numvalue = min;
1926 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1927 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1928 } else {
1929 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1931 } else {
1932 feat2prop(featurestr);
1933 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1935 if (local_err) {
1936 error_propagate(errp, local_err);
1937 return;
1939 featurestr = strtok(NULL, ",");
1942 if (cpu->host_features) {
1943 for (w = 0; w < FEATURE_WORDS; w++) {
1944 env->features[w] =
1945 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1949 for (w = 0; w < FEATURE_WORDS; w++) {
1950 env->features[w] |= plus_features[w];
1951 env->features[w] &= ~minus_features[w];
1955 /* Print all cpuid feature names in featureset
1957 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1959 int bit;
1960 bool first = true;
1962 for (bit = 0; bit < 32; bit++) {
1963 if (featureset[bit]) {
1964 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1965 first = false;
1970 /* generate CPU information. */
1971 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1973 X86CPUDefinition *def;
1974 char buf[256];
1975 int i;
1977 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1978 def = &builtin_x86_defs[i];
1979 snprintf(buf, sizeof(buf), "%s", def->name);
1980 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1982 #ifdef CONFIG_KVM
1983 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1984 "KVM processor with all supported host features "
1985 "(only available in KVM mode)");
1986 #endif
1988 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1989 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1990 FeatureWordInfo *fw = &feature_word_info[i];
1992 (*cpu_fprintf)(f, " ");
1993 listflags(f, cpu_fprintf, fw->feat_names);
1994 (*cpu_fprintf)(f, "\n");
1998 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2000 CpuDefinitionInfoList *cpu_list = NULL;
2001 X86CPUDefinition *def;
2002 int i;
2004 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2005 CpuDefinitionInfoList *entry;
2006 CpuDefinitionInfo *info;
2008 def = &builtin_x86_defs[i];
2009 info = g_malloc0(sizeof(*info));
2010 info->name = g_strdup(def->name);
2012 entry = g_malloc0(sizeof(*entry));
2013 entry->value = info;
2014 entry->next = cpu_list;
2015 cpu_list = entry;
2018 return cpu_list;
2021 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2022 bool migratable_only)
2024 FeatureWordInfo *wi = &feature_word_info[w];
2025 uint32_t r;
2027 if (kvm_enabled()) {
2028 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2029 wi->cpuid_ecx,
2030 wi->cpuid_reg);
2031 } else if (tcg_enabled()) {
2032 r = wi->tcg_features;
2033 } else {
2034 return ~0;
2036 if (migratable_only) {
2037 r &= x86_cpu_get_migratable_flags(w);
2039 return r;
2043 * Filters CPU feature words based on host availability of each feature.
2045 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2047 static int x86_cpu_filter_features(X86CPU *cpu)
2049 CPUX86State *env = &cpu->env;
2050 FeatureWord w;
2051 int rv = 0;
2053 for (w = 0; w < FEATURE_WORDS; w++) {
2054 uint32_t host_feat =
2055 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2056 uint32_t requested_features = env->features[w];
2057 env->features[w] &= host_feat;
2058 cpu->filtered_features[w] = requested_features & ~env->features[w];
2059 if (cpu->filtered_features[w]) {
2060 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2061 report_unavailable_features(w, cpu->filtered_features[w]);
2063 rv = 1;
2067 return rv;
2070 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2072 PropValue *pv;
2073 for (pv = props; pv->prop; pv++) {
2074 if (!pv->value) {
2075 continue;
2077 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2078 &error_abort);
2082 /* Load data from X86CPUDefinition
2084 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2086 CPUX86State *env = &cpu->env;
2087 const char *vendor;
2088 char host_vendor[CPUID_VENDOR_SZ + 1];
2089 FeatureWord w;
2091 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2092 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2093 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2094 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2095 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2096 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2097 cpu->cache_info_passthrough = def->cache_info_passthrough;
2098 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2099 for (w = 0; w < FEATURE_WORDS; w++) {
2100 env->features[w] = def->features[w];
2103 /* Special cases not set in the X86CPUDefinition structs: */
2104 if (kvm_enabled()) {
2105 x86_cpu_apply_props(cpu, kvm_default_props);
2108 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2110 /* sysenter isn't supported in compatibility mode on AMD,
2111 * syscall isn't supported in compatibility mode on Intel.
2112 * Normally we advertise the actual CPU vendor, but you can
2113 * override this using the 'vendor' property if you want to use
2114 * KVM's sysenter/syscall emulation in compatibility mode and
2115 * when doing cross vendor migration
2117 vendor = def->vendor;
2118 if (kvm_enabled()) {
2119 uint32_t ebx = 0, ecx = 0, edx = 0;
2120 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2121 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2122 vendor = host_vendor;
2125 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2129 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2131 X86CPU *cpu = NULL;
2132 X86CPUClass *xcc;
2133 ObjectClass *oc;
2134 gchar **model_pieces;
2135 char *name, *features;
2136 Error *error = NULL;
2138 model_pieces = g_strsplit(cpu_model, ",", 2);
2139 if (!model_pieces[0]) {
2140 error_setg(&error, "Invalid/empty CPU model name");
2141 goto out;
2143 name = model_pieces[0];
2144 features = model_pieces[1];
2146 oc = x86_cpu_class_by_name(name);
2147 if (oc == NULL) {
2148 error_setg(&error, "Unable to find CPU definition: %s", name);
2149 goto out;
2151 xcc = X86_CPU_CLASS(oc);
2153 if (xcc->kvm_required && !kvm_enabled()) {
2154 error_setg(&error, "CPU model '%s' requires KVM", name);
2155 goto out;
2158 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2160 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2161 if (error) {
2162 goto out;
2165 out:
2166 if (error != NULL) {
2167 error_propagate(errp, error);
2168 if (cpu) {
2169 object_unref(OBJECT(cpu));
2170 cpu = NULL;
2173 g_strfreev(model_pieces);
2174 return cpu;
2177 X86CPU *cpu_x86_init(const char *cpu_model)
2179 Error *error = NULL;
2180 X86CPU *cpu;
2182 cpu = cpu_x86_create(cpu_model, &error);
2183 if (error) {
2184 goto out;
2187 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2189 out:
2190 if (error) {
2191 error_report_err(error);
2192 if (cpu != NULL) {
2193 object_unref(OBJECT(cpu));
2194 cpu = NULL;
2197 return cpu;
2200 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2202 X86CPUDefinition *cpudef = data;
2203 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2205 xcc->cpu_def = cpudef;
2208 static void x86_register_cpudef_type(X86CPUDefinition *def)
2210 char *typename = x86_cpu_type_name(def->name);
2211 TypeInfo ti = {
2212 .name = typename,
2213 .parent = TYPE_X86_CPU,
2214 .class_init = x86_cpu_cpudef_class_init,
2215 .class_data = def,
2218 type_register(&ti);
2219 g_free(typename);
2222 #if !defined(CONFIG_USER_ONLY)
2224 void cpu_clear_apic_feature(CPUX86State *env)
2226 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2229 #endif /* !CONFIG_USER_ONLY */
2231 /* Initialize list of CPU models, filling some non-static fields if necessary
2233 void x86_cpudef_setup(void)
2235 int i, j;
2236 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2238 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2239 X86CPUDefinition *def = &builtin_x86_defs[i];
2241 /* Look for specific "cpudef" models that */
2242 /* have the QEMU version in .model_id */
2243 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2244 if (strcmp(model_with_versions[j], def->name) == 0) {
2245 pstrcpy(def->model_id, sizeof(def->model_id),
2246 "QEMU Virtual CPU version ");
2247 pstrcat(def->model_id, sizeof(def->model_id),
2248 qemu_get_version());
2249 break;
2255 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2256 uint32_t *eax, uint32_t *ebx,
2257 uint32_t *ecx, uint32_t *edx)
2259 X86CPU *cpu = x86_env_get_cpu(env);
2260 CPUState *cs = CPU(cpu);
2262 /* test if maximum index reached */
2263 if (index & 0x80000000) {
2264 if (index > env->cpuid_xlevel) {
2265 if (env->cpuid_xlevel2 > 0) {
2266 /* Handle the Centaur's CPUID instruction. */
2267 if (index > env->cpuid_xlevel2) {
2268 index = env->cpuid_xlevel2;
2269 } else if (index < 0xC0000000) {
2270 index = env->cpuid_xlevel;
2272 } else {
2273 /* Intel documentation states that invalid EAX input will
2274 * return the same information as EAX=cpuid_level
2275 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2277 index = env->cpuid_level;
2280 } else {
2281 if (index > env->cpuid_level)
2282 index = env->cpuid_level;
2285 switch(index) {
2286 case 0:
2287 *eax = env->cpuid_level;
2288 *ebx = env->cpuid_vendor1;
2289 *edx = env->cpuid_vendor2;
2290 *ecx = env->cpuid_vendor3;
2291 break;
2292 case 1:
2293 *eax = env->cpuid_version;
2294 *ebx = (cpu->apic_id << 24) |
2295 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2296 *ecx = env->features[FEAT_1_ECX];
2297 *edx = env->features[FEAT_1_EDX];
2298 if (cs->nr_cores * cs->nr_threads > 1) {
2299 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2300 *edx |= 1 << 28; /* HTT bit */
2302 break;
2303 case 2:
2304 /* cache info: needed for Pentium Pro compatibility */
2305 if (cpu->cache_info_passthrough) {
2306 host_cpuid(index, 0, eax, ebx, ecx, edx);
2307 break;
2309 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2310 *ebx = 0;
2311 *ecx = 0;
2312 *edx = (L1D_DESCRIPTOR << 16) | \
2313 (L1I_DESCRIPTOR << 8) | \
2314 (L2_DESCRIPTOR);
2315 break;
2316 case 4:
2317 /* cache info: needed for Core compatibility */
2318 if (cpu->cache_info_passthrough) {
2319 host_cpuid(index, count, eax, ebx, ecx, edx);
2320 *eax &= ~0xFC000000;
2321 } else {
2322 *eax = 0;
2323 switch (count) {
2324 case 0: /* L1 dcache info */
2325 *eax |= CPUID_4_TYPE_DCACHE | \
2326 CPUID_4_LEVEL(1) | \
2327 CPUID_4_SELF_INIT_LEVEL;
2328 *ebx = (L1D_LINE_SIZE - 1) | \
2329 ((L1D_PARTITIONS - 1) << 12) | \
2330 ((L1D_ASSOCIATIVITY - 1) << 22);
2331 *ecx = L1D_SETS - 1;
2332 *edx = CPUID_4_NO_INVD_SHARING;
2333 break;
2334 case 1: /* L1 icache info */
2335 *eax |= CPUID_4_TYPE_ICACHE | \
2336 CPUID_4_LEVEL(1) | \
2337 CPUID_4_SELF_INIT_LEVEL;
2338 *ebx = (L1I_LINE_SIZE - 1) | \
2339 ((L1I_PARTITIONS - 1) << 12) | \
2340 ((L1I_ASSOCIATIVITY - 1) << 22);
2341 *ecx = L1I_SETS - 1;
2342 *edx = CPUID_4_NO_INVD_SHARING;
2343 break;
2344 case 2: /* L2 cache info */
2345 *eax |= CPUID_4_TYPE_UNIFIED | \
2346 CPUID_4_LEVEL(2) | \
2347 CPUID_4_SELF_INIT_LEVEL;
2348 if (cs->nr_threads > 1) {
2349 *eax |= (cs->nr_threads - 1) << 14;
2351 *ebx = (L2_LINE_SIZE - 1) | \
2352 ((L2_PARTITIONS - 1) << 12) | \
2353 ((L2_ASSOCIATIVITY - 1) << 22);
2354 *ecx = L2_SETS - 1;
2355 *edx = CPUID_4_NO_INVD_SHARING;
2356 break;
2357 default: /* end of info */
2358 *eax = 0;
2359 *ebx = 0;
2360 *ecx = 0;
2361 *edx = 0;
2362 break;
2366 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2367 if ((*eax & 31) && cs->nr_cores > 1) {
2368 *eax |= (cs->nr_cores - 1) << 26;
2370 break;
2371 case 5:
2372 /* mwait info: needed for Core compatibility */
2373 *eax = 0; /* Smallest monitor-line size in bytes */
2374 *ebx = 0; /* Largest monitor-line size in bytes */
2375 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2376 *edx = 0;
2377 break;
2378 case 6:
2379 /* Thermal and Power Leaf */
2380 *eax = env->features[FEAT_6_EAX];
2381 *ebx = 0;
2382 *ecx = 0;
2383 *edx = 0;
2384 break;
2385 case 7:
2386 /* Structured Extended Feature Flags Enumeration Leaf */
2387 if (count == 0) {
2388 *eax = 0; /* Maximum ECX value for sub-leaves */
2389 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2390 *ecx = 0; /* Reserved */
2391 *edx = 0; /* Reserved */
2392 } else {
2393 *eax = 0;
2394 *ebx = 0;
2395 *ecx = 0;
2396 *edx = 0;
2398 break;
2399 case 9:
2400 /* Direct Cache Access Information Leaf */
2401 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2402 *ebx = 0;
2403 *ecx = 0;
2404 *edx = 0;
2405 break;
2406 case 0xA:
2407 /* Architectural Performance Monitoring Leaf */
2408 if (kvm_enabled() && cpu->enable_pmu) {
2409 KVMState *s = cs->kvm_state;
2411 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2412 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2413 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2414 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2415 } else {
2416 *eax = 0;
2417 *ebx = 0;
2418 *ecx = 0;
2419 *edx = 0;
2421 break;
2422 case 0xD: {
2423 KVMState *s = cs->kvm_state;
2424 uint64_t kvm_mask;
2425 int i;
2427 /* Processor Extended State */
2428 *eax = 0;
2429 *ebx = 0;
2430 *ecx = 0;
2431 *edx = 0;
2432 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2433 break;
2435 kvm_mask =
2436 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2437 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2439 if (count == 0) {
2440 *ecx = 0x240;
2441 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2442 const ExtSaveArea *esa = &ext_save_areas[i];
2443 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2444 (kvm_mask & (1 << i)) != 0) {
2445 if (i < 32) {
2446 *eax |= 1 << i;
2447 } else {
2448 *edx |= 1 << (i - 32);
2450 *ecx = MAX(*ecx, esa->offset + esa->size);
2453 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2454 *ebx = *ecx;
2455 } else if (count == 1) {
2456 *eax = env->features[FEAT_XSAVE];
2457 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2458 const ExtSaveArea *esa = &ext_save_areas[count];
2459 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2460 (kvm_mask & (1 << count)) != 0) {
2461 *eax = esa->size;
2462 *ebx = esa->offset;
2465 break;
2467 case 0x80000000:
2468 *eax = env->cpuid_xlevel;
2469 *ebx = env->cpuid_vendor1;
2470 *edx = env->cpuid_vendor2;
2471 *ecx = env->cpuid_vendor3;
2472 break;
2473 case 0x80000001:
2474 *eax = env->cpuid_version;
2475 *ebx = 0;
2476 *ecx = env->features[FEAT_8000_0001_ECX];
2477 *edx = env->features[FEAT_8000_0001_EDX];
2479 /* The Linux kernel checks for the CMPLegacy bit and
2480 * discards multiple thread information if it is set.
2481 * So dont set it here for Intel to make Linux guests happy.
2483 if (cs->nr_cores * cs->nr_threads > 1) {
2484 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2485 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2486 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2487 *ecx |= 1 << 1; /* CmpLegacy bit */
2490 break;
2491 case 0x80000002:
2492 case 0x80000003:
2493 case 0x80000004:
2494 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2495 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2496 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2497 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2498 break;
2499 case 0x80000005:
2500 /* cache info (L1 cache) */
2501 if (cpu->cache_info_passthrough) {
2502 host_cpuid(index, 0, eax, ebx, ecx, edx);
2503 break;
2505 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2506 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2507 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2508 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2509 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2510 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2511 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2512 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2513 break;
2514 case 0x80000006:
2515 /* cache info (L2 cache) */
2516 if (cpu->cache_info_passthrough) {
2517 host_cpuid(index, 0, eax, ebx, ecx, edx);
2518 break;
2520 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2521 (L2_DTLB_2M_ENTRIES << 16) | \
2522 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2523 (L2_ITLB_2M_ENTRIES);
2524 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2525 (L2_DTLB_4K_ENTRIES << 16) | \
2526 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2527 (L2_ITLB_4K_ENTRIES);
2528 *ecx = (L2_SIZE_KB_AMD << 16) | \
2529 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2530 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2531 *edx = ((L3_SIZE_KB/512) << 18) | \
2532 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2533 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2534 break;
2535 case 0x80000007:
2536 *eax = 0;
2537 *ebx = 0;
2538 *ecx = 0;
2539 *edx = env->features[FEAT_8000_0007_EDX];
2540 break;
2541 case 0x80000008:
2542 /* virtual & phys address size in low 2 bytes. */
2543 /* XXX: This value must match the one used in the MMU code. */
2544 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2545 /* 64 bit processor */
2546 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2547 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2548 } else {
2549 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2550 *eax = 0x00000024; /* 36 bits physical */
2551 } else {
2552 *eax = 0x00000020; /* 32 bits physical */
2555 *ebx = 0;
2556 *ecx = 0;
2557 *edx = 0;
2558 if (cs->nr_cores * cs->nr_threads > 1) {
2559 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2561 break;
2562 case 0x8000000A:
2563 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2564 *eax = 0x00000001; /* SVM Revision */
2565 *ebx = 0x00000010; /* nr of ASIDs */
2566 *ecx = 0;
2567 *edx = env->features[FEAT_SVM]; /* optional features */
2568 } else {
2569 *eax = 0;
2570 *ebx = 0;
2571 *ecx = 0;
2572 *edx = 0;
2574 break;
2575 case 0xC0000000:
2576 *eax = env->cpuid_xlevel2;
2577 *ebx = 0;
2578 *ecx = 0;
2579 *edx = 0;
2580 break;
2581 case 0xC0000001:
2582 /* Support for VIA CPU's CPUID instruction */
2583 *eax = env->cpuid_version;
2584 *ebx = 0;
2585 *ecx = 0;
2586 *edx = env->features[FEAT_C000_0001_EDX];
2587 break;
2588 case 0xC0000002:
2589 case 0xC0000003:
2590 case 0xC0000004:
2591 /* Reserved for the future, and now filled with zero */
2592 *eax = 0;
2593 *ebx = 0;
2594 *ecx = 0;
2595 *edx = 0;
2596 break;
2597 default:
2598 /* reserved values: zero */
2599 *eax = 0;
2600 *ebx = 0;
2601 *ecx = 0;
2602 *edx = 0;
2603 break;
2607 /* CPUClass::reset() */
2608 static void x86_cpu_reset(CPUState *s)
2610 X86CPU *cpu = X86_CPU(s);
2611 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2612 CPUX86State *env = &cpu->env;
2613 int i;
2615 xcc->parent_reset(s);
2617 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2619 tlb_flush(s, 1);
2621 env->old_exception = -1;
2623 /* init to reset state */
2625 #ifdef CONFIG_SOFTMMU
2626 env->hflags |= HF_SOFTMMU_MASK;
2627 #endif
2628 env->hflags2 |= HF2_GIF_MASK;
2630 cpu_x86_update_cr0(env, 0x60000010);
2631 env->a20_mask = ~0x0;
2632 env->smbase = 0x30000;
2634 env->idt.limit = 0xffff;
2635 env->gdt.limit = 0xffff;
2636 env->ldt.limit = 0xffff;
2637 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2638 env->tr.limit = 0xffff;
2639 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2641 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2642 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2643 DESC_R_MASK | DESC_A_MASK);
2644 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2645 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2646 DESC_A_MASK);
2647 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2648 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2649 DESC_A_MASK);
2650 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2651 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2652 DESC_A_MASK);
2653 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2654 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2655 DESC_A_MASK);
2656 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2657 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2658 DESC_A_MASK);
2660 env->eip = 0xfff0;
2661 env->regs[R_EDX] = env->cpuid_version;
2663 env->eflags = 0x2;
2665 /* FPU init */
2666 for (i = 0; i < 8; i++) {
2667 env->fptags[i] = 1;
2669 cpu_set_fpuc(env, 0x37f);
2671 env->mxcsr = 0x1f80;
2672 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2674 env->pat = 0x0007040600070406ULL;
2675 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2677 memset(env->dr, 0, sizeof(env->dr));
2678 env->dr[6] = DR6_FIXED_1;
2679 env->dr[7] = DR7_FIXED_1;
2680 cpu_breakpoint_remove_all(s, BP_CPU);
2681 cpu_watchpoint_remove_all(s, BP_CPU);
2683 env->xcr0 = 1;
2686 * SDM 11.11.5 requires:
2687 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2688 * - IA32_MTRR_PHYSMASKn.V = 0
2689 * All other bits are undefined. For simplification, zero it all.
2691 env->mtrr_deftype = 0;
2692 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2693 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2695 #if !defined(CONFIG_USER_ONLY)
2696 /* We hard-wire the BSP to the first CPU. */
2697 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2699 s->halted = !cpu_is_bsp(cpu);
2701 if (kvm_enabled()) {
2702 kvm_arch_reset_vcpu(cpu);
2704 #endif
2707 #ifndef CONFIG_USER_ONLY
2708 bool cpu_is_bsp(X86CPU *cpu)
2710 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2713 /* TODO: remove me, when reset over QOM tree is implemented */
2714 static void x86_cpu_machine_reset_cb(void *opaque)
2716 X86CPU *cpu = opaque;
2717 cpu_reset(CPU(cpu));
2719 #endif
2721 static void mce_init(X86CPU *cpu)
2723 CPUX86State *cenv = &cpu->env;
2724 unsigned int bank;
2726 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2727 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2728 (CPUID_MCE | CPUID_MCA)) {
2729 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2730 cenv->mcg_ctl = ~(uint64_t)0;
2731 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2732 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2737 #ifndef CONFIG_USER_ONLY
2738 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2740 APICCommonState *apic;
2741 const char *apic_type = "apic";
2743 if (kvm_irqchip_in_kernel()) {
2744 apic_type = "kvm-apic";
2745 } else if (xen_enabled()) {
2746 apic_type = "xen-apic";
2749 cpu->apic_state = DEVICE(object_new(apic_type));
2751 object_property_add_child(OBJECT(cpu), "apic",
2752 OBJECT(cpu->apic_state), NULL);
2753 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2754 /* TODO: convert to link<> */
2755 apic = APIC_COMMON(cpu->apic_state);
2756 apic->cpu = cpu;
2757 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2760 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2762 APICCommonState *apic;
2763 static bool apic_mmio_map_once;
2765 if (cpu->apic_state == NULL) {
2766 return;
2768 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2769 errp);
2771 /* Map APIC MMIO area */
2772 apic = APIC_COMMON(cpu->apic_state);
2773 if (!apic_mmio_map_once) {
2774 memory_region_add_subregion_overlap(get_system_memory(),
2775 apic->apicbase &
2776 MSR_IA32_APICBASE_BASE,
2777 &apic->io_memory,
2778 0x1000);
2779 apic_mmio_map_once = true;
2783 static void x86_cpu_machine_done(Notifier *n, void *unused)
2785 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2786 MemoryRegion *smram =
2787 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2789 if (smram) {
2790 cpu->smram = g_new(MemoryRegion, 1);
2791 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2792 smram, 0, 1ull << 32);
2793 memory_region_set_enabled(cpu->smram, false);
2794 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2797 #else
2798 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2801 #endif
2804 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2805 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2806 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2807 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2808 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2809 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2810 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2812 CPUState *cs = CPU(dev);
2813 X86CPU *cpu = X86_CPU(dev);
2814 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2815 CPUX86State *env = &cpu->env;
2816 Error *local_err = NULL;
2817 static bool ht_warned;
2819 if (cpu->apic_id < 0) {
2820 error_setg(errp, "apic-id property was not initialized properly");
2821 return;
2824 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2825 env->cpuid_level = 7;
2828 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2829 * CPUID[1].EDX.
2831 if (IS_AMD_CPU(env)) {
2832 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2833 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2834 & CPUID_EXT2_AMD_ALIASES);
2838 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2839 error_setg(&local_err,
2840 kvm_enabled() ?
2841 "Host doesn't support requested features" :
2842 "TCG doesn't support requested features");
2843 goto out;
2846 #ifndef CONFIG_USER_ONLY
2847 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2849 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2850 x86_cpu_apic_create(cpu, &local_err);
2851 if (local_err != NULL) {
2852 goto out;
2855 #endif
2857 mce_init(cpu);
2859 #ifndef CONFIG_USER_ONLY
2860 if (tcg_enabled()) {
2861 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2862 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2863 cs->as = g_new(AddressSpace, 1);
2865 /* Outer container... */
2866 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2867 memory_region_set_enabled(cpu->cpu_as_root, true);
2869 /* ... with two regions inside: normal system memory with low
2870 * priority, and...
2872 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2873 get_system_memory(), 0, ~0ull);
2874 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2875 memory_region_set_enabled(cpu->cpu_as_mem, true);
2876 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2878 /* ... SMRAM with higher priority, linked from /machine/smram. */
2879 cpu->machine_done.notify = x86_cpu_machine_done;
2880 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2882 #endif
2884 qemu_init_vcpu(cs);
2886 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2887 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2888 * based on inputs (sockets,cores,threads), it is still better to gives
2889 * users a warning.
2891 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2892 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2894 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2895 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2896 " -smp options properly.");
2897 ht_warned = true;
2900 x86_cpu_apic_realize(cpu, &local_err);
2901 if (local_err != NULL) {
2902 goto out;
2904 cpu_reset(cs);
2906 xcc->parent_realize(dev, &local_err);
2908 out:
2909 if (local_err != NULL) {
2910 error_propagate(errp, local_err);
2911 return;
2915 typedef struct BitProperty {
2916 uint32_t *ptr;
2917 uint32_t mask;
2918 } BitProperty;
2920 static void x86_cpu_get_bit_prop(Object *obj,
2921 struct Visitor *v,
2922 void *opaque,
2923 const char *name,
2924 Error **errp)
2926 BitProperty *fp = opaque;
2927 bool value = (*fp->ptr & fp->mask) == fp->mask;
2928 visit_type_bool(v, &value, name, errp);
2931 static void x86_cpu_set_bit_prop(Object *obj,
2932 struct Visitor *v,
2933 void *opaque,
2934 const char *name,
2935 Error **errp)
2937 DeviceState *dev = DEVICE(obj);
2938 BitProperty *fp = opaque;
2939 Error *local_err = NULL;
2940 bool value;
2942 if (dev->realized) {
2943 qdev_prop_set_after_realize(dev, name, errp);
2944 return;
2947 visit_type_bool(v, &value, name, &local_err);
2948 if (local_err) {
2949 error_propagate(errp, local_err);
2950 return;
2953 if (value) {
2954 *fp->ptr |= fp->mask;
2955 } else {
2956 *fp->ptr &= ~fp->mask;
2960 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2961 void *opaque)
2963 BitProperty *prop = opaque;
2964 g_free(prop);
2967 /* Register a boolean property to get/set a single bit in a uint32_t field.
2969 * The same property name can be registered multiple times to make it affect
2970 * multiple bits in the same FeatureWord. In that case, the getter will return
2971 * true only if all bits are set.
2973 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2974 const char *prop_name,
2975 uint32_t *field,
2976 int bitnr)
2978 BitProperty *fp;
2979 ObjectProperty *op;
2980 uint32_t mask = (1UL << bitnr);
2982 op = object_property_find(OBJECT(cpu), prop_name, NULL);
2983 if (op) {
2984 fp = op->opaque;
2985 assert(fp->ptr == field);
2986 fp->mask |= mask;
2987 } else {
2988 fp = g_new0(BitProperty, 1);
2989 fp->ptr = field;
2990 fp->mask = mask;
2991 object_property_add(OBJECT(cpu), prop_name, "bool",
2992 x86_cpu_get_bit_prop,
2993 x86_cpu_set_bit_prop,
2994 x86_cpu_release_bit_prop, fp, &error_abort);
2998 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
2999 FeatureWord w,
3000 int bitnr)
3002 Object *obj = OBJECT(cpu);
3003 int i;
3004 char **names;
3005 FeatureWordInfo *fi = &feature_word_info[w];
3007 if (!fi->feat_names) {
3008 return;
3010 if (!fi->feat_names[bitnr]) {
3011 return;
3014 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3016 feat2prop(names[0]);
3017 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3019 for (i = 1; names[i]; i++) {
3020 feat2prop(names[i]);
3021 object_property_add_alias(obj, names[i], obj, names[0],
3022 &error_abort);
3025 g_strfreev(names);
3028 static void x86_cpu_initfn(Object *obj)
3030 CPUState *cs = CPU(obj);
3031 X86CPU *cpu = X86_CPU(obj);
3032 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3033 CPUX86State *env = &cpu->env;
3034 FeatureWord w;
3035 static int inited;
3037 cs->env_ptr = env;
3038 cpu_exec_init(cs, &error_abort);
3040 object_property_add(obj, "family", "int",
3041 x86_cpuid_version_get_family,
3042 x86_cpuid_version_set_family, NULL, NULL, NULL);
3043 object_property_add(obj, "model", "int",
3044 x86_cpuid_version_get_model,
3045 x86_cpuid_version_set_model, NULL, NULL, NULL);
3046 object_property_add(obj, "stepping", "int",
3047 x86_cpuid_version_get_stepping,
3048 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3049 object_property_add_str(obj, "vendor",
3050 x86_cpuid_get_vendor,
3051 x86_cpuid_set_vendor, NULL);
3052 object_property_add_str(obj, "model-id",
3053 x86_cpuid_get_model_id,
3054 x86_cpuid_set_model_id, NULL);
3055 object_property_add(obj, "tsc-frequency", "int",
3056 x86_cpuid_get_tsc_freq,
3057 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3058 object_property_add(obj, "apic-id", "int",
3059 x86_cpuid_get_apic_id,
3060 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3061 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3062 x86_cpu_get_feature_words,
3063 NULL, NULL, (void *)env->features, NULL);
3064 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3065 x86_cpu_get_feature_words,
3066 NULL, NULL, (void *)cpu->filtered_features, NULL);
3068 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3070 #ifndef CONFIG_USER_ONLY
3071 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3072 cpu->apic_id = -1;
3073 #endif
3075 for (w = 0; w < FEATURE_WORDS; w++) {
3076 int bitnr;
3078 for (bitnr = 0; bitnr < 32; bitnr++) {
3079 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3083 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3085 /* init various static tables used in TCG mode */
3086 if (tcg_enabled() && !inited) {
3087 inited = 1;
3088 optimize_flags_init();
3092 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3094 X86CPU *cpu = X86_CPU(cs);
3096 return cpu->apic_id;
3099 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3101 X86CPU *cpu = X86_CPU(cs);
3103 return cpu->env.cr[0] & CR0_PG_MASK;
3106 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3108 X86CPU *cpu = X86_CPU(cs);
3110 cpu->env.eip = value;
3113 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3115 X86CPU *cpu = X86_CPU(cs);
3117 cpu->env.eip = tb->pc - tb->cs_base;
3120 static bool x86_cpu_has_work(CPUState *cs)
3122 X86CPU *cpu = X86_CPU(cs);
3123 CPUX86State *env = &cpu->env;
3125 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3126 CPU_INTERRUPT_POLL)) &&
3127 (env->eflags & IF_MASK)) ||
3128 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3129 CPU_INTERRUPT_INIT |
3130 CPU_INTERRUPT_SIPI |
3131 CPU_INTERRUPT_MCE)) ||
3132 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3133 !(env->hflags & HF_SMM_MASK));
3136 static Property x86_cpu_properties[] = {
3137 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3138 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3139 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3140 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3141 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3142 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3143 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3144 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3145 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3146 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
3147 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3148 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3149 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3150 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3151 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3152 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3153 DEFINE_PROP_END_OF_LIST()
3156 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3158 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3159 CPUClass *cc = CPU_CLASS(oc);
3160 DeviceClass *dc = DEVICE_CLASS(oc);
3162 xcc->parent_realize = dc->realize;
3163 dc->realize = x86_cpu_realizefn;
3164 dc->props = x86_cpu_properties;
3166 xcc->parent_reset = cc->reset;
3167 cc->reset = x86_cpu_reset;
3168 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3170 cc->class_by_name = x86_cpu_class_by_name;
3171 cc->parse_features = x86_cpu_parse_featurestr;
3172 cc->has_work = x86_cpu_has_work;
3173 cc->do_interrupt = x86_cpu_do_interrupt;
3174 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3175 cc->dump_state = x86_cpu_dump_state;
3176 cc->set_pc = x86_cpu_set_pc;
3177 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3178 cc->gdb_read_register = x86_cpu_gdb_read_register;
3179 cc->gdb_write_register = x86_cpu_gdb_write_register;
3180 cc->get_arch_id = x86_cpu_get_arch_id;
3181 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3182 #ifdef CONFIG_USER_ONLY
3183 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3184 #else
3185 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3186 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3187 cc->write_elf64_note = x86_cpu_write_elf64_note;
3188 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3189 cc->write_elf32_note = x86_cpu_write_elf32_note;
3190 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3191 cc->vmsd = &vmstate_x86_cpu;
3192 #endif
3193 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3194 #ifndef CONFIG_USER_ONLY
3195 cc->debug_excp_handler = breakpoint_handler;
3196 #endif
3197 cc->cpu_exec_enter = x86_cpu_exec_enter;
3198 cc->cpu_exec_exit = x86_cpu_exec_exit;
3201 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3202 * object in cpus -> dangling pointer after final object_unref().
3204 dc->cannot_destroy_with_object_finalize_yet = true;
3207 static const TypeInfo x86_cpu_type_info = {
3208 .name = TYPE_X86_CPU,
3209 .parent = TYPE_CPU,
3210 .instance_size = sizeof(X86CPU),
3211 .instance_init = x86_cpu_initfn,
3212 .abstract = true,
3213 .class_size = sizeof(X86CPUClass),
3214 .class_init = x86_cpu_common_class_init,
3217 static void x86_cpu_register_types(void)
3219 int i;
3221 type_register_static(&x86_cpu_type_info);
3222 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3223 x86_register_cpudef_type(&builtin_x86_defs[i]);
3225 #ifdef CONFIG_KVM
3226 type_register_static(&host_x86_cpu_type_info);
3227 #endif
3230 type_init(x86_cpu_register_types)