exec: Respect as_tranlsate_internal length clamp
[qemu/ar7.git] / target-i386 / cpu.c
blobf01690bfea5e4cdf4625a56129856a9d251f2a2b
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #include "hw/hw.h"
39 #if defined(CONFIG_KVM)
40 #include <linux/kvm_para.h>
41 #endif
43 #include "sysemu/sysemu.h"
44 #include "hw/qdev-properties.h"
45 #include "hw/cpu/icc_bus.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "hw/xen/xen.h"
48 #include "hw/i386/apic_internal.h"
49 #endif
52 /* Cache topology CPUID constants: */
54 /* CPUID Leaf 2 Descriptors */
56 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
57 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
58 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
61 /* CPUID Leaf 4 constants: */
63 /* EAX: */
64 #define CPUID_4_TYPE_DCACHE 1
65 #define CPUID_4_TYPE_ICACHE 2
66 #define CPUID_4_TYPE_UNIFIED 3
68 #define CPUID_4_LEVEL(l) ((l) << 5)
70 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
71 #define CPUID_4_FULLY_ASSOC (1 << 9)
73 /* EDX: */
74 #define CPUID_4_NO_INVD_SHARING (1 << 0)
75 #define CPUID_4_INCLUSIVE (1 << 1)
76 #define CPUID_4_COMPLEX_IDX (1 << 2)
78 #define ASSOC_FULL 0xFF
80 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
81 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
82 a == 2 ? 0x2 : \
83 a == 4 ? 0x4 : \
84 a == 8 ? 0x6 : \
85 a == 16 ? 0x8 : \
86 a == 32 ? 0xA : \
87 a == 48 ? 0xB : \
88 a == 64 ? 0xC : \
89 a == 96 ? 0xD : \
90 a == 128 ? 0xE : \
91 a == ASSOC_FULL ? 0xF : \
92 0 /* invalid value */)
95 /* Definitions of the hardcoded cache entries we expose: */
97 /* L1 data cache: */
98 #define L1D_LINE_SIZE 64
99 #define L1D_ASSOCIATIVITY 8
100 #define L1D_SETS 64
101 #define L1D_PARTITIONS 1
102 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
103 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
104 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
105 #define L1D_LINES_PER_TAG 1
106 #define L1D_SIZE_KB_AMD 64
107 #define L1D_ASSOCIATIVITY_AMD 2
109 /* L1 instruction cache: */
110 #define L1I_LINE_SIZE 64
111 #define L1I_ASSOCIATIVITY 8
112 #define L1I_SETS 64
113 #define L1I_PARTITIONS 1
114 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
115 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
116 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
117 #define L1I_LINES_PER_TAG 1
118 #define L1I_SIZE_KB_AMD 64
119 #define L1I_ASSOCIATIVITY_AMD 2
121 /* Level 2 unified cache: */
122 #define L2_LINE_SIZE 64
123 #define L2_ASSOCIATIVITY 16
124 #define L2_SETS 4096
125 #define L2_PARTITIONS 1
126 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
127 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
128 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
129 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
130 #define L2_LINES_PER_TAG 1
131 #define L2_SIZE_KB_AMD 512
133 /* No L3 cache: */
134 #define L3_SIZE_KB 0 /* disabled */
135 #define L3_ASSOCIATIVITY 0 /* disabled */
136 #define L3_LINES_PER_TAG 0 /* disabled */
137 #define L3_LINE_SIZE 0 /* disabled */
139 /* TLB definitions: */
141 #define L1_DTLB_2M_ASSOC 1
142 #define L1_DTLB_2M_ENTRIES 255
143 #define L1_DTLB_4K_ASSOC 1
144 #define L1_DTLB_4K_ENTRIES 255
146 #define L1_ITLB_2M_ASSOC 1
147 #define L1_ITLB_2M_ENTRIES 255
148 #define L1_ITLB_4K_ASSOC 1
149 #define L1_ITLB_4K_ENTRIES 255
151 #define L2_DTLB_2M_ASSOC 0 /* disabled */
152 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
153 #define L2_DTLB_4K_ASSOC 4
154 #define L2_DTLB_4K_ENTRIES 512
156 #define L2_ITLB_2M_ASSOC 0 /* disabled */
157 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
158 #define L2_ITLB_4K_ASSOC 4
159 #define L2_ITLB_4K_ENTRIES 512
163 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
164 uint32_t vendor2, uint32_t vendor3)
166 int i;
167 for (i = 0; i < 4; i++) {
168 dst[i] = vendor1 >> (8 * i);
169 dst[i + 4] = vendor2 >> (8 * i);
170 dst[i + 8] = vendor3 >> (8 * i);
172 dst[CPUID_VENDOR_SZ] = '\0';
175 /* feature flags taken from "Intel Processor Identification and the CPUID
176 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
177 * between feature naming conventions, aliases may be added.
179 static const char *feature_name[] = {
180 "fpu", "vme", "de", "pse",
181 "tsc", "msr", "pae", "mce",
182 "cx8", "apic", NULL, "sep",
183 "mtrr", "pge", "mca", "cmov",
184 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
185 NULL, "ds" /* Intel dts */, "acpi", "mmx",
186 "fxsr", "sse", "sse2", "ss",
187 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 static const char *ext_feature_name[] = {
190 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
191 "ds_cpl", "vmx", "smx", "est",
192 "tm2", "ssse3", "cid", NULL,
193 "fma", "cx16", "xtpr", "pdcm",
194 NULL, "pcid", "dca", "sse4.1|sse4_1",
195 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
196 "tsc-deadline", "aes", "xsave", "osxsave",
197 "avx", "f16c", "rdrand", "hypervisor",
199 /* Feature names that are already defined on feature_name[] but are set on
200 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
201 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
202 * if and only if CPU vendor is AMD.
204 static const char *ext2_feature_name[] = {
205 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
206 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
207 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
208 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
209 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
210 "nx|xd", NULL, "mmxext", NULL /* mmx */,
211 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
212 NULL, "lm|i64", "3dnowext", "3dnow",
214 static const char *ext3_feature_name[] = {
215 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
216 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
217 "3dnowprefetch", "osvw", "ibs", "xop",
218 "skinit", "wdt", NULL, "lwp",
219 "fma4", "tce", NULL, "nodeid_msr",
220 NULL, "tbm", "topoext", "perfctr_core",
221 "perfctr_nb", NULL, NULL, NULL,
222 NULL, NULL, NULL, NULL,
225 static const char *ext4_feature_name[] = {
226 NULL, NULL, "xstore", "xstore-en",
227 NULL, NULL, "xcrypt", "xcrypt-en",
228 "ace2", "ace2-en", "phe", "phe-en",
229 "pmm", "pmm-en", NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
236 static const char *kvm_feature_name[] = {
237 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
238 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 "kvmclock-stable-bit", NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
247 static const char *svm_feature_name[] = {
248 "npt", "lbrv", "svm_lock", "nrip_save",
249 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
250 NULL, NULL, "pause_filter", NULL,
251 "pfthreshold", NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
258 static const char *cpuid_7_0_ebx_feature_name[] = {
259 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
260 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
261 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
262 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
265 static const char *cpuid_apm_edx_feature_name[] = {
266 NULL, NULL, NULL, NULL,
267 NULL, NULL, NULL, NULL,
268 "invtsc", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
276 static const char *cpuid_xsave_feature_name[] = {
277 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
287 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
288 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
289 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
290 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
291 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
292 CPUID_PSE36 | CPUID_FXSR)
293 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
294 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
295 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
296 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
297 CPUID_PAE | CPUID_SEP | CPUID_APIC)
299 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
300 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
301 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
302 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
303 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
304 /* partly implemented:
305 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
306 /* missing:
307 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
308 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
309 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
310 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
311 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
312 /* missing:
313 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
314 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
315 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
316 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
317 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
318 CPUID_EXT_RDRAND */
320 #ifdef TARGET_X86_64
321 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
322 #else
323 #define TCG_EXT2_X86_64_FEATURES 0
324 #endif
326 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
327 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
328 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
329 TCG_EXT2_X86_64_FEATURES)
330 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
331 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
332 #define TCG_EXT4_FEATURES 0
333 #define TCG_SVM_FEATURES 0
334 #define TCG_KVM_FEATURES 0
335 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
336 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
337 /* missing:
338 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
339 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
340 CPUID_7_0_EBX_RDSEED */
341 #define TCG_APM_FEATURES 0
344 typedef struct FeatureWordInfo {
345 const char **feat_names;
346 uint32_t cpuid_eax; /* Input EAX for CPUID */
347 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
348 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
349 int cpuid_reg; /* output register (R_* constant) */
350 uint32_t tcg_features; /* Feature flags supported by TCG */
351 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
352 } FeatureWordInfo;
354 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
355 [FEAT_1_EDX] = {
356 .feat_names = feature_name,
357 .cpuid_eax = 1, .cpuid_reg = R_EDX,
358 .tcg_features = TCG_FEATURES,
360 [FEAT_1_ECX] = {
361 .feat_names = ext_feature_name,
362 .cpuid_eax = 1, .cpuid_reg = R_ECX,
363 .tcg_features = TCG_EXT_FEATURES,
365 [FEAT_8000_0001_EDX] = {
366 .feat_names = ext2_feature_name,
367 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
368 .tcg_features = TCG_EXT2_FEATURES,
370 [FEAT_8000_0001_ECX] = {
371 .feat_names = ext3_feature_name,
372 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
373 .tcg_features = TCG_EXT3_FEATURES,
375 [FEAT_C000_0001_EDX] = {
376 .feat_names = ext4_feature_name,
377 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
378 .tcg_features = TCG_EXT4_FEATURES,
380 [FEAT_KVM] = {
381 .feat_names = kvm_feature_name,
382 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
383 .tcg_features = TCG_KVM_FEATURES,
385 [FEAT_SVM] = {
386 .feat_names = svm_feature_name,
387 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
388 .tcg_features = TCG_SVM_FEATURES,
390 [FEAT_7_0_EBX] = {
391 .feat_names = cpuid_7_0_ebx_feature_name,
392 .cpuid_eax = 7,
393 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
394 .cpuid_reg = R_EBX,
395 .tcg_features = TCG_7_0_EBX_FEATURES,
397 [FEAT_8000_0007_EDX] = {
398 .feat_names = cpuid_apm_edx_feature_name,
399 .cpuid_eax = 0x80000007,
400 .cpuid_reg = R_EDX,
401 .tcg_features = TCG_APM_FEATURES,
402 .unmigratable_flags = CPUID_APM_INVTSC,
404 [FEAT_XSAVE] = {
405 .feat_names = cpuid_xsave_feature_name,
406 .cpuid_eax = 0xd,
407 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
408 .cpuid_reg = R_EAX,
409 .tcg_features = 0,
413 typedef struct X86RegisterInfo32 {
414 /* Name of register */
415 const char *name;
416 /* QAPI enum value register */
417 X86CPURegister32 qapi_enum;
418 } X86RegisterInfo32;
420 #define REGISTER(reg) \
421 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
422 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
423 REGISTER(EAX),
424 REGISTER(ECX),
425 REGISTER(EDX),
426 REGISTER(EBX),
427 REGISTER(ESP),
428 REGISTER(EBP),
429 REGISTER(ESI),
430 REGISTER(EDI),
432 #undef REGISTER
434 typedef struct ExtSaveArea {
435 uint32_t feature, bits;
436 uint32_t offset, size;
437 } ExtSaveArea;
439 static const ExtSaveArea ext_save_areas[] = {
440 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
441 .offset = 0x240, .size = 0x100 },
442 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
443 .offset = 0x3c0, .size = 0x40 },
444 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
445 .offset = 0x400, .size = 0x40 },
446 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
447 .offset = 0x440, .size = 0x40 },
448 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
449 .offset = 0x480, .size = 0x200 },
450 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
451 .offset = 0x680, .size = 0x400 },
454 const char *get_register_name_32(unsigned int reg)
456 if (reg >= CPU_NB_REGS32) {
457 return NULL;
459 return x86_reg_info_32[reg].name;
462 /* KVM-specific features that are automatically added to all CPU models
463 * when KVM is enabled.
465 static uint32_t kvm_default_features[FEATURE_WORDS] = {
466 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
467 (1 << KVM_FEATURE_NOP_IO_DELAY) |
468 (1 << KVM_FEATURE_CLOCKSOURCE2) |
469 (1 << KVM_FEATURE_ASYNC_PF) |
470 (1 << KVM_FEATURE_STEAL_TIME) |
471 (1 << KVM_FEATURE_PV_EOI) |
472 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
473 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
476 /* Features that are not added by default to any CPU model when KVM is enabled.
478 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
479 [FEAT_1_EDX] = CPUID_ACPI,
480 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
481 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
484 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
486 kvm_default_features[w] &= ~features;
489 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
491 kvm_default_unset_features[w] &= ~features;
495 * Returns the set of feature flags that are supported and migratable by
496 * QEMU, for a given FeatureWord.
498 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
500 FeatureWordInfo *wi = &feature_word_info[w];
501 uint32_t r = 0;
502 int i;
504 for (i = 0; i < 32; i++) {
505 uint32_t f = 1U << i;
506 /* If the feature name is unknown, it is not supported by QEMU yet */
507 if (!wi->feat_names[i]) {
508 continue;
510 /* Skip features known to QEMU, but explicitly marked as unmigratable */
511 if (wi->unmigratable_flags & f) {
512 continue;
514 r |= f;
516 return r;
519 void host_cpuid(uint32_t function, uint32_t count,
520 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
522 uint32_t vec[4];
524 #ifdef __x86_64__
525 asm volatile("cpuid"
526 : "=a"(vec[0]), "=b"(vec[1]),
527 "=c"(vec[2]), "=d"(vec[3])
528 : "0"(function), "c"(count) : "cc");
529 #elif defined(__i386__)
530 asm volatile("pusha \n\t"
531 "cpuid \n\t"
532 "mov %%eax, 0(%2) \n\t"
533 "mov %%ebx, 4(%2) \n\t"
534 "mov %%ecx, 8(%2) \n\t"
535 "mov %%edx, 12(%2) \n\t"
536 "popa"
537 : : "a"(function), "c"(count), "S"(vec)
538 : "memory", "cc");
539 #else
540 abort();
541 #endif
543 if (eax)
544 *eax = vec[0];
545 if (ebx)
546 *ebx = vec[1];
547 if (ecx)
548 *ecx = vec[2];
549 if (edx)
550 *edx = vec[3];
553 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
555 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
556 * a substring. ex if !NULL points to the first char after a substring,
557 * otherwise the string is assumed to sized by a terminating nul.
558 * Return lexical ordering of *s1:*s2.
560 static int sstrcmp(const char *s1, const char *e1,
561 const char *s2, const char *e2)
563 for (;;) {
564 if (!*s1 || !*s2 || *s1 != *s2)
565 return (*s1 - *s2);
566 ++s1, ++s2;
567 if (s1 == e1 && s2 == e2)
568 return (0);
569 else if (s1 == e1)
570 return (*s2);
571 else if (s2 == e2)
572 return (*s1);
576 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
577 * '|' delimited (possibly empty) strings in which case search for a match
578 * within the alternatives proceeds left to right. Return 0 for success,
579 * non-zero otherwise.
581 static int altcmp(const char *s, const char *e, const char *altstr)
583 const char *p, *q;
585 for (q = p = altstr; ; ) {
586 while (*p && *p != '|')
587 ++p;
588 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
589 return (0);
590 if (!*p)
591 return (1);
592 else
593 q = ++p;
597 /* search featureset for flag *[s..e), if found set corresponding bit in
598 * *pval and return true, otherwise return false
600 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
601 const char **featureset)
603 uint32_t mask;
604 const char **ppc;
605 bool found = false;
607 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
608 if (*ppc && !altcmp(s, e, *ppc)) {
609 *pval |= mask;
610 found = true;
613 return found;
616 static void add_flagname_to_bitmaps(const char *flagname,
617 FeatureWordArray words,
618 Error **errp)
620 FeatureWord w;
621 for (w = 0; w < FEATURE_WORDS; w++) {
622 FeatureWordInfo *wi = &feature_word_info[w];
623 if (wi->feat_names &&
624 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
625 break;
628 if (w == FEATURE_WORDS) {
629 error_setg(errp, "CPU feature %s not found", flagname);
633 /* CPU class name definitions: */
635 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
636 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
638 /* Return type name for a given CPU model name
639 * Caller is responsible for freeing the returned string.
641 static char *x86_cpu_type_name(const char *model_name)
643 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
646 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
648 ObjectClass *oc;
649 char *typename;
651 if (cpu_model == NULL) {
652 return NULL;
655 typename = x86_cpu_type_name(cpu_model);
656 oc = object_class_by_name(typename);
657 g_free(typename);
658 return oc;
661 struct X86CPUDefinition {
662 const char *name;
663 uint32_t level;
664 uint32_t xlevel;
665 uint32_t xlevel2;
666 /* vendor is zero-terminated, 12 character ASCII string */
667 char vendor[CPUID_VENDOR_SZ + 1];
668 int family;
669 int model;
670 int stepping;
671 FeatureWordArray features;
672 char model_id[48];
673 bool cache_info_passthrough;
676 static X86CPUDefinition builtin_x86_defs[] = {
678 .name = "qemu64",
679 .level = 4,
680 .vendor = CPUID_VENDOR_AMD,
681 .family = 6,
682 .model = 6,
683 .stepping = 3,
684 .features[FEAT_1_EDX] =
685 PPRO_FEATURES |
686 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
687 CPUID_PSE36,
688 .features[FEAT_1_ECX] =
689 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
690 .features[FEAT_8000_0001_EDX] =
691 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
692 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
693 .features[FEAT_8000_0001_ECX] =
694 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
695 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
696 .xlevel = 0x8000000A,
699 .name = "phenom",
700 .level = 5,
701 .vendor = CPUID_VENDOR_AMD,
702 .family = 16,
703 .model = 2,
704 .stepping = 3,
705 /* Missing: CPUID_HT */
706 .features[FEAT_1_EDX] =
707 PPRO_FEATURES |
708 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
709 CPUID_PSE36 | CPUID_VME,
710 .features[FEAT_1_ECX] =
711 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
712 CPUID_EXT_POPCNT,
713 .features[FEAT_8000_0001_EDX] =
714 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
715 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
716 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
717 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
718 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
719 CPUID_EXT3_CR8LEG,
720 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
721 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
722 .features[FEAT_8000_0001_ECX] =
723 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
724 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
725 /* Missing: CPUID_SVM_LBRV */
726 .features[FEAT_SVM] =
727 CPUID_SVM_NPT,
728 .xlevel = 0x8000001A,
729 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
732 .name = "core2duo",
733 .level = 10,
734 .vendor = CPUID_VENDOR_INTEL,
735 .family = 6,
736 .model = 15,
737 .stepping = 11,
738 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
739 .features[FEAT_1_EDX] =
740 PPRO_FEATURES |
741 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
742 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
743 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
744 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
745 .features[FEAT_1_ECX] =
746 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
747 CPUID_EXT_CX16,
748 .features[FEAT_8000_0001_EDX] =
749 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
750 .features[FEAT_8000_0001_ECX] =
751 CPUID_EXT3_LAHF_LM,
752 .xlevel = 0x80000008,
753 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
756 .name = "kvm64",
757 .level = 5,
758 .vendor = CPUID_VENDOR_INTEL,
759 .family = 15,
760 .model = 6,
761 .stepping = 1,
762 /* Missing: CPUID_HT */
763 .features[FEAT_1_EDX] =
764 PPRO_FEATURES | CPUID_VME |
765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
766 CPUID_PSE36,
767 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
768 .features[FEAT_1_ECX] =
769 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
770 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
771 .features[FEAT_8000_0001_EDX] =
772 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
773 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
774 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
775 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
776 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
777 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
778 .features[FEAT_8000_0001_ECX] =
780 .xlevel = 0x80000008,
781 .model_id = "Common KVM processor"
784 .name = "qemu32",
785 .level = 4,
786 .vendor = CPUID_VENDOR_INTEL,
787 .family = 6,
788 .model = 6,
789 .stepping = 3,
790 .features[FEAT_1_EDX] =
791 PPRO_FEATURES,
792 .features[FEAT_1_ECX] =
793 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
794 .xlevel = 0x80000004,
797 .name = "kvm32",
798 .level = 5,
799 .vendor = CPUID_VENDOR_INTEL,
800 .family = 15,
801 .model = 6,
802 .stepping = 1,
803 .features[FEAT_1_EDX] =
804 PPRO_FEATURES | CPUID_VME |
805 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
806 .features[FEAT_1_ECX] =
807 CPUID_EXT_SSE3,
808 .features[FEAT_8000_0001_EDX] =
809 PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
810 .features[FEAT_8000_0001_ECX] =
812 .xlevel = 0x80000008,
813 .model_id = "Common 32-bit KVM processor"
816 .name = "coreduo",
817 .level = 10,
818 .vendor = CPUID_VENDOR_INTEL,
819 .family = 6,
820 .model = 14,
821 .stepping = 8,
822 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
823 .features[FEAT_1_EDX] =
824 PPRO_FEATURES | CPUID_VME |
825 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
826 CPUID_SS,
827 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
828 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
829 .features[FEAT_1_ECX] =
830 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
831 .features[FEAT_8000_0001_EDX] =
832 CPUID_EXT2_NX,
833 .xlevel = 0x80000008,
834 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
837 .name = "486",
838 .level = 1,
839 .vendor = CPUID_VENDOR_INTEL,
840 .family = 4,
841 .model = 8,
842 .stepping = 0,
843 .features[FEAT_1_EDX] =
844 I486_FEATURES,
845 .xlevel = 0,
848 .name = "pentium",
849 .level = 1,
850 .vendor = CPUID_VENDOR_INTEL,
851 .family = 5,
852 .model = 4,
853 .stepping = 3,
854 .features[FEAT_1_EDX] =
855 PENTIUM_FEATURES,
856 .xlevel = 0,
859 .name = "pentium2",
860 .level = 2,
861 .vendor = CPUID_VENDOR_INTEL,
862 .family = 6,
863 .model = 5,
864 .stepping = 2,
865 .features[FEAT_1_EDX] =
866 PENTIUM2_FEATURES,
867 .xlevel = 0,
870 .name = "pentium3",
871 .level = 2,
872 .vendor = CPUID_VENDOR_INTEL,
873 .family = 6,
874 .model = 7,
875 .stepping = 3,
876 .features[FEAT_1_EDX] =
877 PENTIUM3_FEATURES,
878 .xlevel = 0,
881 .name = "athlon",
882 .level = 2,
883 .vendor = CPUID_VENDOR_AMD,
884 .family = 6,
885 .model = 2,
886 .stepping = 3,
887 .features[FEAT_1_EDX] =
888 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
889 CPUID_MCA,
890 .features[FEAT_8000_0001_EDX] =
891 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
892 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
893 .xlevel = 0x80000008,
896 .name = "n270",
897 /* original is on level 10 */
898 .level = 5,
899 .vendor = CPUID_VENDOR_INTEL,
900 .family = 6,
901 .model = 28,
902 .stepping = 2,
903 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
904 .features[FEAT_1_EDX] =
905 PPRO_FEATURES |
906 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
907 CPUID_ACPI | CPUID_SS,
908 /* Some CPUs got no CPUID_SEP */
909 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
910 * CPUID_EXT_XTPR */
911 .features[FEAT_1_ECX] =
912 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
913 CPUID_EXT_MOVBE,
914 .features[FEAT_8000_0001_EDX] =
915 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
916 CPUID_EXT2_NX,
917 .features[FEAT_8000_0001_ECX] =
918 CPUID_EXT3_LAHF_LM,
919 .xlevel = 0x8000000A,
920 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
923 .name = "Conroe",
924 .level = 4,
925 .vendor = CPUID_VENDOR_INTEL,
926 .family = 6,
927 .model = 15,
928 .stepping = 3,
929 .features[FEAT_1_EDX] =
930 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
931 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
932 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
933 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
934 CPUID_DE | CPUID_FP87,
935 .features[FEAT_1_ECX] =
936 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
937 .features[FEAT_8000_0001_EDX] =
938 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
939 .features[FEAT_8000_0001_ECX] =
940 CPUID_EXT3_LAHF_LM,
941 .xlevel = 0x8000000A,
942 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
945 .name = "Penryn",
946 .level = 4,
947 .vendor = CPUID_VENDOR_INTEL,
948 .family = 6,
949 .model = 23,
950 .stepping = 3,
951 .features[FEAT_1_EDX] =
952 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
953 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
954 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
955 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
956 CPUID_DE | CPUID_FP87,
957 .features[FEAT_1_ECX] =
958 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
959 CPUID_EXT_SSE3,
960 .features[FEAT_8000_0001_EDX] =
961 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
962 .features[FEAT_8000_0001_ECX] =
963 CPUID_EXT3_LAHF_LM,
964 .xlevel = 0x8000000A,
965 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
968 .name = "Nehalem",
969 .level = 4,
970 .vendor = CPUID_VENDOR_INTEL,
971 .family = 6,
972 .model = 26,
973 .stepping = 3,
974 .features[FEAT_1_EDX] =
975 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
976 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
977 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
978 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
979 CPUID_DE | CPUID_FP87,
980 .features[FEAT_1_ECX] =
981 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
982 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
983 .features[FEAT_8000_0001_EDX] =
984 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
985 .features[FEAT_8000_0001_ECX] =
986 CPUID_EXT3_LAHF_LM,
987 .xlevel = 0x8000000A,
988 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
991 .name = "Westmere",
992 .level = 11,
993 .vendor = CPUID_VENDOR_INTEL,
994 .family = 6,
995 .model = 44,
996 .stepping = 1,
997 .features[FEAT_1_EDX] =
998 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
999 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1000 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1001 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1002 CPUID_DE | CPUID_FP87,
1003 .features[FEAT_1_ECX] =
1004 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1005 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1006 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1007 .features[FEAT_8000_0001_EDX] =
1008 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1009 .features[FEAT_8000_0001_ECX] =
1010 CPUID_EXT3_LAHF_LM,
1011 .xlevel = 0x8000000A,
1012 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1015 .name = "SandyBridge",
1016 .level = 0xd,
1017 .vendor = CPUID_VENDOR_INTEL,
1018 .family = 6,
1019 .model = 42,
1020 .stepping = 1,
1021 .features[FEAT_1_EDX] =
1022 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1023 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1024 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1025 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1026 CPUID_DE | CPUID_FP87,
1027 .features[FEAT_1_ECX] =
1028 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1029 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1030 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1031 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1032 CPUID_EXT_SSE3,
1033 .features[FEAT_8000_0001_EDX] =
1034 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1035 CPUID_EXT2_SYSCALL,
1036 .features[FEAT_8000_0001_ECX] =
1037 CPUID_EXT3_LAHF_LM,
1038 .features[FEAT_XSAVE] =
1039 CPUID_XSAVE_XSAVEOPT,
1040 .xlevel = 0x8000000A,
1041 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1044 .name = "IvyBridge",
1045 .level = 0xd,
1046 .vendor = CPUID_VENDOR_INTEL,
1047 .family = 6,
1048 .model = 58,
1049 .stepping = 9,
1050 .features[FEAT_1_EDX] =
1051 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1052 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1053 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1054 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1055 CPUID_DE | CPUID_FP87,
1056 .features[FEAT_1_ECX] =
1057 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1058 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1059 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1060 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1061 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1062 .features[FEAT_7_0_EBX] =
1063 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1064 CPUID_7_0_EBX_ERMS,
1065 .features[FEAT_8000_0001_EDX] =
1066 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1067 CPUID_EXT2_SYSCALL,
1068 .features[FEAT_8000_0001_ECX] =
1069 CPUID_EXT3_LAHF_LM,
1070 .features[FEAT_XSAVE] =
1071 CPUID_XSAVE_XSAVEOPT,
1072 .xlevel = 0x8000000A,
1073 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1076 .name = "Haswell",
1077 .level = 0xd,
1078 .vendor = CPUID_VENDOR_INTEL,
1079 .family = 6,
1080 .model = 60,
1081 .stepping = 1,
1082 .features[FEAT_1_EDX] =
1083 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1084 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1085 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1086 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1087 CPUID_DE | CPUID_FP87,
1088 .features[FEAT_1_ECX] =
1089 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1090 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1091 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1092 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1093 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1094 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1095 .features[FEAT_8000_0001_EDX] =
1096 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1097 CPUID_EXT2_SYSCALL,
1098 .features[FEAT_8000_0001_ECX] =
1099 CPUID_EXT3_LAHF_LM,
1100 .features[FEAT_7_0_EBX] =
1101 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1102 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1103 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1104 .features[FEAT_XSAVE] =
1105 CPUID_XSAVE_XSAVEOPT,
1106 .xlevel = 0x8000000A,
1107 .model_id = "Intel Core Processor (Haswell)",
1110 .name = "Broadwell",
1111 .level = 0xd,
1112 .vendor = CPUID_VENDOR_INTEL,
1113 .family = 6,
1114 .model = 61,
1115 .stepping = 2,
1116 .features[FEAT_1_EDX] =
1117 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1118 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1119 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1120 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1121 CPUID_DE | CPUID_FP87,
1122 .features[FEAT_1_ECX] =
1123 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1124 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1125 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1126 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1127 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1128 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1129 .features[FEAT_8000_0001_EDX] =
1130 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1131 CPUID_EXT2_SYSCALL,
1132 .features[FEAT_8000_0001_ECX] =
1133 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1134 .features[FEAT_7_0_EBX] =
1135 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1136 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1137 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1138 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1139 CPUID_7_0_EBX_SMAP,
1140 .features[FEAT_XSAVE] =
1141 CPUID_XSAVE_XSAVEOPT,
1142 .xlevel = 0x8000000A,
1143 .model_id = "Intel Core Processor (Broadwell)",
1146 .name = "Opteron_G1",
1147 .level = 5,
1148 .vendor = CPUID_VENDOR_AMD,
1149 .family = 15,
1150 .model = 6,
1151 .stepping = 1,
1152 .features[FEAT_1_EDX] =
1153 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1154 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1155 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1156 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1157 CPUID_DE | CPUID_FP87,
1158 .features[FEAT_1_ECX] =
1159 CPUID_EXT_SSE3,
1160 .features[FEAT_8000_0001_EDX] =
1161 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1162 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1163 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1164 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1165 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1166 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1167 .xlevel = 0x80000008,
1168 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1171 .name = "Opteron_G2",
1172 .level = 5,
1173 .vendor = CPUID_VENDOR_AMD,
1174 .family = 15,
1175 .model = 6,
1176 .stepping = 1,
1177 .features[FEAT_1_EDX] =
1178 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1179 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1180 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1181 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1182 CPUID_DE | CPUID_FP87,
1183 .features[FEAT_1_ECX] =
1184 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1185 .features[FEAT_8000_0001_EDX] =
1186 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1187 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1188 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1189 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1190 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1191 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1192 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1193 .features[FEAT_8000_0001_ECX] =
1194 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1195 .xlevel = 0x80000008,
1196 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1199 .name = "Opteron_G3",
1200 .level = 5,
1201 .vendor = CPUID_VENDOR_AMD,
1202 .family = 15,
1203 .model = 6,
1204 .stepping = 1,
1205 .features[FEAT_1_EDX] =
1206 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1207 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1208 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1209 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1210 CPUID_DE | CPUID_FP87,
1211 .features[FEAT_1_ECX] =
1212 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1213 CPUID_EXT_SSE3,
1214 .features[FEAT_8000_0001_EDX] =
1215 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1216 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1217 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1218 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1219 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1220 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1221 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1222 .features[FEAT_8000_0001_ECX] =
1223 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1224 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1225 .xlevel = 0x80000008,
1226 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1229 .name = "Opteron_G4",
1230 .level = 0xd,
1231 .vendor = CPUID_VENDOR_AMD,
1232 .family = 21,
1233 .model = 1,
1234 .stepping = 2,
1235 .features[FEAT_1_EDX] =
1236 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1237 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1238 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1239 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1240 CPUID_DE | CPUID_FP87,
1241 .features[FEAT_1_ECX] =
1242 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1243 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1244 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1245 CPUID_EXT_SSE3,
1246 .features[FEAT_8000_0001_EDX] =
1247 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1248 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1249 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1250 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1251 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1252 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1253 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1254 .features[FEAT_8000_0001_ECX] =
1255 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1256 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1257 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1258 CPUID_EXT3_LAHF_LM,
1259 /* no xsaveopt! */
1260 .xlevel = 0x8000001A,
1261 .model_id = "AMD Opteron 62xx class CPU",
1264 .name = "Opteron_G5",
1265 .level = 0xd,
1266 .vendor = CPUID_VENDOR_AMD,
1267 .family = 21,
1268 .model = 2,
1269 .stepping = 0,
1270 .features[FEAT_1_EDX] =
1271 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1272 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1273 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1274 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1275 CPUID_DE | CPUID_FP87,
1276 .features[FEAT_1_ECX] =
1277 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1278 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1279 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1280 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1281 .features[FEAT_8000_0001_EDX] =
1282 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1283 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1284 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1285 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1286 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1287 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1288 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1289 .features[FEAT_8000_0001_ECX] =
1290 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1291 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1292 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1293 CPUID_EXT3_LAHF_LM,
1294 /* no xsaveopt! */
1295 .xlevel = 0x8000001A,
1296 .model_id = "AMD Opteron 63xx class CPU",
1301 * x86_cpu_compat_set_features:
1302 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1303 * @w: Identifies the feature word to be changed.
1304 * @feat_add: Feature bits to be added to feature word
1305 * @feat_remove: Feature bits to be removed from feature word
1307 * Change CPU model feature bits for compatibility.
1309 * This function may be used by machine-type compatibility functions
1310 * to enable or disable feature bits on specific CPU models.
1312 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1313 uint32_t feat_add, uint32_t feat_remove)
1315 X86CPUDefinition *def;
1316 int i;
1317 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1318 def = &builtin_x86_defs[i];
1319 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1320 def->features[w] |= feat_add;
1321 def->features[w] &= ~feat_remove;
1326 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1327 bool migratable_only);
1329 #ifdef CONFIG_KVM
1331 static int cpu_x86_fill_model_id(char *str)
1333 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1334 int i;
1336 for (i = 0; i < 3; i++) {
1337 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1338 memcpy(str + i * 16 + 0, &eax, 4);
1339 memcpy(str + i * 16 + 4, &ebx, 4);
1340 memcpy(str + i * 16 + 8, &ecx, 4);
1341 memcpy(str + i * 16 + 12, &edx, 4);
1343 return 0;
1346 static X86CPUDefinition host_cpudef;
1348 static Property host_x86_cpu_properties[] = {
1349 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1350 DEFINE_PROP_END_OF_LIST()
1353 /* class_init for the "host" CPU model
1355 * This function may be called before KVM is initialized.
1357 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1359 DeviceClass *dc = DEVICE_CLASS(oc);
1360 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1361 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1363 xcc->kvm_required = true;
1365 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1366 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1368 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1369 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1370 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1371 host_cpudef.stepping = eax & 0x0F;
1373 cpu_x86_fill_model_id(host_cpudef.model_id);
1375 xcc->cpu_def = &host_cpudef;
1376 host_cpudef.cache_info_passthrough = true;
1378 /* level, xlevel, xlevel2, and the feature words are initialized on
1379 * instance_init, because they require KVM to be initialized.
1382 dc->props = host_x86_cpu_properties;
1385 static void host_x86_cpu_initfn(Object *obj)
1387 X86CPU *cpu = X86_CPU(obj);
1388 CPUX86State *env = &cpu->env;
1389 KVMState *s = kvm_state;
1391 assert(kvm_enabled());
1393 /* We can't fill the features array here because we don't know yet if
1394 * "migratable" is true or false.
1396 cpu->host_features = true;
1398 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1399 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1400 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1402 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1405 static const TypeInfo host_x86_cpu_type_info = {
1406 .name = X86_CPU_TYPE_NAME("host"),
1407 .parent = TYPE_X86_CPU,
1408 .instance_init = host_x86_cpu_initfn,
1409 .class_init = host_x86_cpu_class_init,
1412 #endif
1414 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1416 FeatureWordInfo *f = &feature_word_info[w];
1417 int i;
1419 for (i = 0; i < 32; ++i) {
1420 if (1 << i & mask) {
1421 const char *reg = get_register_name_32(f->cpuid_reg);
1422 assert(reg);
1423 fprintf(stderr, "warning: %s doesn't support requested feature: "
1424 "CPUID.%02XH:%s%s%s [bit %d]\n",
1425 kvm_enabled() ? "host" : "TCG",
1426 f->cpuid_eax, reg,
1427 f->feat_names[i] ? "." : "",
1428 f->feat_names[i] ? f->feat_names[i] : "", i);
1433 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1434 const char *name, Error **errp)
1436 X86CPU *cpu = X86_CPU(obj);
1437 CPUX86State *env = &cpu->env;
1438 int64_t value;
1440 value = (env->cpuid_version >> 8) & 0xf;
1441 if (value == 0xf) {
1442 value += (env->cpuid_version >> 20) & 0xff;
1444 visit_type_int(v, &value, name, errp);
1447 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1448 const char *name, Error **errp)
1450 X86CPU *cpu = X86_CPU(obj);
1451 CPUX86State *env = &cpu->env;
1452 const int64_t min = 0;
1453 const int64_t max = 0xff + 0xf;
1454 Error *local_err = NULL;
1455 int64_t value;
1457 visit_type_int(v, &value, name, &local_err);
1458 if (local_err) {
1459 error_propagate(errp, local_err);
1460 return;
1462 if (value < min || value > max) {
1463 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1464 name ? name : "null", value, min, max);
1465 return;
1468 env->cpuid_version &= ~0xff00f00;
1469 if (value > 0x0f) {
1470 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1471 } else {
1472 env->cpuid_version |= value << 8;
1476 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1477 const char *name, Error **errp)
1479 X86CPU *cpu = X86_CPU(obj);
1480 CPUX86State *env = &cpu->env;
1481 int64_t value;
1483 value = (env->cpuid_version >> 4) & 0xf;
1484 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1485 visit_type_int(v, &value, name, errp);
1488 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1489 const char *name, Error **errp)
1491 X86CPU *cpu = X86_CPU(obj);
1492 CPUX86State *env = &cpu->env;
1493 const int64_t min = 0;
1494 const int64_t max = 0xff;
1495 Error *local_err = NULL;
1496 int64_t value;
1498 visit_type_int(v, &value, name, &local_err);
1499 if (local_err) {
1500 error_propagate(errp, local_err);
1501 return;
1503 if (value < min || value > max) {
1504 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1505 name ? name : "null", value, min, max);
1506 return;
1509 env->cpuid_version &= ~0xf00f0;
1510 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1513 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1514 void *opaque, const char *name,
1515 Error **errp)
1517 X86CPU *cpu = X86_CPU(obj);
1518 CPUX86State *env = &cpu->env;
1519 int64_t value;
1521 value = env->cpuid_version & 0xf;
1522 visit_type_int(v, &value, name, errp);
1525 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1526 void *opaque, const char *name,
1527 Error **errp)
1529 X86CPU *cpu = X86_CPU(obj);
1530 CPUX86State *env = &cpu->env;
1531 const int64_t min = 0;
1532 const int64_t max = 0xf;
1533 Error *local_err = NULL;
1534 int64_t value;
1536 visit_type_int(v, &value, name, &local_err);
1537 if (local_err) {
1538 error_propagate(errp, local_err);
1539 return;
1541 if (value < min || value > max) {
1542 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1543 name ? name : "null", value, min, max);
1544 return;
1547 env->cpuid_version &= ~0xf;
1548 env->cpuid_version |= value & 0xf;
1551 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
1552 const char *name, Error **errp)
1554 X86CPU *cpu = X86_CPU(obj);
1556 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1559 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
1560 const char *name, Error **errp)
1562 X86CPU *cpu = X86_CPU(obj);
1564 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1567 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
1568 const char *name, Error **errp)
1570 X86CPU *cpu = X86_CPU(obj);
1572 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1575 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
1576 const char *name, Error **errp)
1578 X86CPU *cpu = X86_CPU(obj);
1580 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1583 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1585 X86CPU *cpu = X86_CPU(obj);
1586 CPUX86State *env = &cpu->env;
1587 char *value;
1589 value = g_malloc(CPUID_VENDOR_SZ + 1);
1590 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1591 env->cpuid_vendor3);
1592 return value;
1595 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1596 Error **errp)
1598 X86CPU *cpu = X86_CPU(obj);
1599 CPUX86State *env = &cpu->env;
1600 int i;
1602 if (strlen(value) != CPUID_VENDOR_SZ) {
1603 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1604 "vendor", value);
1605 return;
1608 env->cpuid_vendor1 = 0;
1609 env->cpuid_vendor2 = 0;
1610 env->cpuid_vendor3 = 0;
1611 for (i = 0; i < 4; i++) {
1612 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1613 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1614 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1618 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1620 X86CPU *cpu = X86_CPU(obj);
1621 CPUX86State *env = &cpu->env;
1622 char *value;
1623 int i;
1625 value = g_malloc(48 + 1);
1626 for (i = 0; i < 48; i++) {
1627 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1629 value[48] = '\0';
1630 return value;
1633 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1634 Error **errp)
1636 X86CPU *cpu = X86_CPU(obj);
1637 CPUX86State *env = &cpu->env;
1638 int c, len, i;
1640 if (model_id == NULL) {
1641 model_id = "";
1643 len = strlen(model_id);
1644 memset(env->cpuid_model, 0, 48);
1645 for (i = 0; i < 48; i++) {
1646 if (i >= len) {
1647 c = '\0';
1648 } else {
1649 c = (uint8_t)model_id[i];
1651 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1655 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1656 const char *name, Error **errp)
1658 X86CPU *cpu = X86_CPU(obj);
1659 int64_t value;
1661 value = cpu->env.tsc_khz * 1000;
1662 visit_type_int(v, &value, name, errp);
1665 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1666 const char *name, Error **errp)
1668 X86CPU *cpu = X86_CPU(obj);
1669 const int64_t min = 0;
1670 const int64_t max = INT64_MAX;
1671 Error *local_err = NULL;
1672 int64_t value;
1674 visit_type_int(v, &value, name, &local_err);
1675 if (local_err) {
1676 error_propagate(errp, local_err);
1677 return;
1679 if (value < min || value > max) {
1680 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1681 name ? name : "null", value, min, max);
1682 return;
1685 cpu->env.tsc_khz = value / 1000;
1688 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1689 const char *name, Error **errp)
1691 X86CPU *cpu = X86_CPU(obj);
1692 int64_t value = cpu->apic_id;
1694 visit_type_int(v, &value, name, errp);
1697 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1698 const char *name, Error **errp)
1700 X86CPU *cpu = X86_CPU(obj);
1701 DeviceState *dev = DEVICE(obj);
1702 const int64_t min = 0;
1703 const int64_t max = UINT32_MAX;
1704 Error *error = NULL;
1705 int64_t value;
1707 if (dev->realized) {
1708 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1709 "it was realized", name, object_get_typename(obj));
1710 return;
1713 visit_type_int(v, &value, name, &error);
1714 if (error) {
1715 error_propagate(errp, error);
1716 return;
1718 if (value < min || value > max) {
1719 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1720 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1721 object_get_typename(obj), name, value, min, max);
1722 return;
1725 if ((value != cpu->apic_id) && cpu_exists(value)) {
1726 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1727 return;
1729 cpu->apic_id = value;
1732 /* Generic getter for "feature-words" and "filtered-features" properties */
1733 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1734 const char *name, Error **errp)
1736 uint32_t *array = (uint32_t *)opaque;
1737 FeatureWord w;
1738 Error *err = NULL;
1739 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1740 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1741 X86CPUFeatureWordInfoList *list = NULL;
1743 for (w = 0; w < FEATURE_WORDS; w++) {
1744 FeatureWordInfo *wi = &feature_word_info[w];
1745 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1746 qwi->cpuid_input_eax = wi->cpuid_eax;
1747 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1748 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1749 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1750 qwi->features = array[w];
1752 /* List will be in reverse order, but order shouldn't matter */
1753 list_entries[w].next = list;
1754 list_entries[w].value = &word_infos[w];
1755 list = &list_entries[w];
1758 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1759 error_propagate(errp, err);
1762 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1763 const char *name, Error **errp)
1765 X86CPU *cpu = X86_CPU(obj);
1766 int64_t value = cpu->hyperv_spinlock_attempts;
1768 visit_type_int(v, &value, name, errp);
1771 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1772 const char *name, Error **errp)
1774 const int64_t min = 0xFFF;
1775 const int64_t max = UINT_MAX;
1776 X86CPU *cpu = X86_CPU(obj);
1777 Error *err = NULL;
1778 int64_t value;
1780 visit_type_int(v, &value, name, &err);
1781 if (err) {
1782 error_propagate(errp, err);
1783 return;
1786 if (value < min || value > max) {
1787 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1788 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1789 object_get_typename(obj), name ? name : "null",
1790 value, min, max);
1791 return;
1793 cpu->hyperv_spinlock_attempts = value;
1796 static PropertyInfo qdev_prop_spinlocks = {
1797 .name = "int",
1798 .get = x86_get_hv_spinlocks,
1799 .set = x86_set_hv_spinlocks,
1802 /* Convert all '_' in a feature string option name to '-', to make feature
1803 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1805 static inline void feat2prop(char *s)
1807 while ((s = strchr(s, '_'))) {
1808 *s = '-';
1812 /* Parse "+feature,-feature,feature=foo" CPU feature string
1814 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1815 Error **errp)
1817 X86CPU *cpu = X86_CPU(cs);
1818 char *featurestr; /* Single 'key=value" string being parsed */
1819 FeatureWord w;
1820 /* Features to be added */
1821 FeatureWordArray plus_features = { 0 };
1822 /* Features to be removed */
1823 FeatureWordArray minus_features = { 0 };
1824 uint32_t numvalue;
1825 CPUX86State *env = &cpu->env;
1826 Error *local_err = NULL;
1828 featurestr = features ? strtok(features, ",") : NULL;
1830 while (featurestr) {
1831 char *val;
1832 if (featurestr[0] == '+') {
1833 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1834 } else if (featurestr[0] == '-') {
1835 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1836 } else if ((val = strchr(featurestr, '='))) {
1837 *val = 0; val++;
1838 feat2prop(featurestr);
1839 if (!strcmp(featurestr, "xlevel")) {
1840 char *err;
1841 char num[32];
1843 numvalue = strtoul(val, &err, 0);
1844 if (!*val || *err) {
1845 error_setg(errp, "bad numerical value %s", val);
1846 return;
1848 if (numvalue < 0x80000000) {
1849 error_report("xlevel value shall always be >= 0x80000000"
1850 ", fixup will be removed in future versions");
1851 numvalue += 0x80000000;
1853 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1854 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1855 } else if (!strcmp(featurestr, "tsc-freq")) {
1856 int64_t tsc_freq;
1857 char *err;
1858 char num[32];
1860 tsc_freq = strtosz_suffix_unit(val, &err,
1861 STRTOSZ_DEFSUFFIX_B, 1000);
1862 if (tsc_freq < 0 || *err) {
1863 error_setg(errp, "bad numerical value %s", val);
1864 return;
1866 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1867 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1868 &local_err);
1869 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1870 char *err;
1871 const int min = 0xFFF;
1872 char num[32];
1873 numvalue = strtoul(val, &err, 0);
1874 if (!*val || *err) {
1875 error_setg(errp, "bad numerical value %s", val);
1876 return;
1878 if (numvalue < min) {
1879 error_report("hv-spinlocks value shall always be >= 0x%x"
1880 ", fixup will be removed in future versions",
1881 min);
1882 numvalue = min;
1884 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1885 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1886 } else {
1887 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1889 } else {
1890 feat2prop(featurestr);
1891 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1893 if (local_err) {
1894 error_propagate(errp, local_err);
1895 return;
1897 featurestr = strtok(NULL, ",");
1900 if (cpu->host_features) {
1901 for (w = 0; w < FEATURE_WORDS; w++) {
1902 env->features[w] =
1903 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1907 for (w = 0; w < FEATURE_WORDS; w++) {
1908 env->features[w] |= plus_features[w];
1909 env->features[w] &= ~minus_features[w];
1913 /* Print all cpuid feature names in featureset
1915 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1917 int bit;
1918 bool first = true;
1920 for (bit = 0; bit < 32; bit++) {
1921 if (featureset[bit]) {
1922 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1923 first = false;
1928 /* generate CPU information. */
1929 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1931 X86CPUDefinition *def;
1932 char buf[256];
1933 int i;
1935 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1936 def = &builtin_x86_defs[i];
1937 snprintf(buf, sizeof(buf), "%s", def->name);
1938 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1940 #ifdef CONFIG_KVM
1941 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1942 "KVM processor with all supported host features "
1943 "(only available in KVM mode)");
1944 #endif
1946 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1947 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1948 FeatureWordInfo *fw = &feature_word_info[i];
1950 (*cpu_fprintf)(f, " ");
1951 listflags(f, cpu_fprintf, fw->feat_names);
1952 (*cpu_fprintf)(f, "\n");
1956 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1958 CpuDefinitionInfoList *cpu_list = NULL;
1959 X86CPUDefinition *def;
1960 int i;
1962 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1963 CpuDefinitionInfoList *entry;
1964 CpuDefinitionInfo *info;
1966 def = &builtin_x86_defs[i];
1967 info = g_malloc0(sizeof(*info));
1968 info->name = g_strdup(def->name);
1970 entry = g_malloc0(sizeof(*entry));
1971 entry->value = info;
1972 entry->next = cpu_list;
1973 cpu_list = entry;
1976 return cpu_list;
1979 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1980 bool migratable_only)
1982 FeatureWordInfo *wi = &feature_word_info[w];
1983 uint32_t r;
1985 if (kvm_enabled()) {
1986 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
1987 wi->cpuid_ecx,
1988 wi->cpuid_reg);
1989 } else if (tcg_enabled()) {
1990 r = wi->tcg_features;
1991 } else {
1992 return ~0;
1994 if (migratable_only) {
1995 r &= x86_cpu_get_migratable_flags(w);
1997 return r;
2001 * Filters CPU feature words based on host availability of each feature.
2003 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2005 static int x86_cpu_filter_features(X86CPU *cpu)
2007 CPUX86State *env = &cpu->env;
2008 FeatureWord w;
2009 int rv = 0;
2011 for (w = 0; w < FEATURE_WORDS; w++) {
2012 uint32_t host_feat =
2013 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2014 uint32_t requested_features = env->features[w];
2015 env->features[w] &= host_feat;
2016 cpu->filtered_features[w] = requested_features & ~env->features[w];
2017 if (cpu->filtered_features[w]) {
2018 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2019 report_unavailable_features(w, cpu->filtered_features[w]);
2021 rv = 1;
2025 return rv;
2028 /* Load data from X86CPUDefinition
2030 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2032 CPUX86State *env = &cpu->env;
2033 const char *vendor;
2034 char host_vendor[CPUID_VENDOR_SZ + 1];
2035 FeatureWord w;
2037 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2038 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2039 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2040 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2041 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2042 env->cpuid_xlevel2 = def->xlevel2;
2043 cpu->cache_info_passthrough = def->cache_info_passthrough;
2044 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2045 for (w = 0; w < FEATURE_WORDS; w++) {
2046 env->features[w] = def->features[w];
2049 /* Special cases not set in the X86CPUDefinition structs: */
2050 if (kvm_enabled()) {
2051 FeatureWord w;
2052 for (w = 0; w < FEATURE_WORDS; w++) {
2053 env->features[w] |= kvm_default_features[w];
2054 env->features[w] &= ~kvm_default_unset_features[w];
2058 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2060 /* sysenter isn't supported in compatibility mode on AMD,
2061 * syscall isn't supported in compatibility mode on Intel.
2062 * Normally we advertise the actual CPU vendor, but you can
2063 * override this using the 'vendor' property if you want to use
2064 * KVM's sysenter/syscall emulation in compatibility mode and
2065 * when doing cross vendor migration
2067 vendor = def->vendor;
2068 if (kvm_enabled()) {
2069 uint32_t ebx = 0, ecx = 0, edx = 0;
2070 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2071 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2072 vendor = host_vendor;
2075 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2079 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2081 X86CPU *cpu = NULL;
2082 X86CPUClass *xcc;
2083 ObjectClass *oc;
2084 gchar **model_pieces;
2085 char *name, *features;
2086 Error *error = NULL;
2088 model_pieces = g_strsplit(cpu_model, ",", 2);
2089 if (!model_pieces[0]) {
2090 error_setg(&error, "Invalid/empty CPU model name");
2091 goto out;
2093 name = model_pieces[0];
2094 features = model_pieces[1];
2096 oc = x86_cpu_class_by_name(name);
2097 if (oc == NULL) {
2098 error_setg(&error, "Unable to find CPU definition: %s", name);
2099 goto out;
2101 xcc = X86_CPU_CLASS(oc);
2103 if (xcc->kvm_required && !kvm_enabled()) {
2104 error_setg(&error, "CPU model '%s' requires KVM", name);
2105 goto out;
2108 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2110 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2111 if (error) {
2112 goto out;
2115 out:
2116 if (error != NULL) {
2117 error_propagate(errp, error);
2118 if (cpu) {
2119 object_unref(OBJECT(cpu));
2120 cpu = NULL;
2123 g_strfreev(model_pieces);
2124 return cpu;
2127 X86CPU *cpu_x86_init(const char *cpu_model)
2129 Error *error = NULL;
2130 X86CPU *cpu;
2132 cpu = cpu_x86_create(cpu_model, &error);
2133 if (error) {
2134 goto out;
2137 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2139 out:
2140 if (error) {
2141 error_report_err(error);
2142 if (cpu != NULL) {
2143 object_unref(OBJECT(cpu));
2144 cpu = NULL;
2147 return cpu;
2150 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2152 X86CPUDefinition *cpudef = data;
2153 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2155 xcc->cpu_def = cpudef;
2158 static void x86_register_cpudef_type(X86CPUDefinition *def)
2160 char *typename = x86_cpu_type_name(def->name);
2161 TypeInfo ti = {
2162 .name = typename,
2163 .parent = TYPE_X86_CPU,
2164 .class_init = x86_cpu_cpudef_class_init,
2165 .class_data = def,
2168 type_register(&ti);
2169 g_free(typename);
2172 #if !defined(CONFIG_USER_ONLY)
2174 void cpu_clear_apic_feature(CPUX86State *env)
2176 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2179 #endif /* !CONFIG_USER_ONLY */
2181 /* Initialize list of CPU models, filling some non-static fields if necessary
2183 void x86_cpudef_setup(void)
2185 int i, j;
2186 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2188 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2189 X86CPUDefinition *def = &builtin_x86_defs[i];
2191 /* Look for specific "cpudef" models that */
2192 /* have the QEMU version in .model_id */
2193 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2194 if (strcmp(model_with_versions[j], def->name) == 0) {
2195 pstrcpy(def->model_id, sizeof(def->model_id),
2196 "QEMU Virtual CPU version ");
2197 pstrcat(def->model_id, sizeof(def->model_id),
2198 qemu_get_version());
2199 break;
2205 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2206 uint32_t *eax, uint32_t *ebx,
2207 uint32_t *ecx, uint32_t *edx)
2209 X86CPU *cpu = x86_env_get_cpu(env);
2210 CPUState *cs = CPU(cpu);
2212 /* test if maximum index reached */
2213 if (index & 0x80000000) {
2214 if (index > env->cpuid_xlevel) {
2215 if (env->cpuid_xlevel2 > 0) {
2216 /* Handle the Centaur's CPUID instruction. */
2217 if (index > env->cpuid_xlevel2) {
2218 index = env->cpuid_xlevel2;
2219 } else if (index < 0xC0000000) {
2220 index = env->cpuid_xlevel;
2222 } else {
2223 /* Intel documentation states that invalid EAX input will
2224 * return the same information as EAX=cpuid_level
2225 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2227 index = env->cpuid_level;
2230 } else {
2231 if (index > env->cpuid_level)
2232 index = env->cpuid_level;
2235 switch(index) {
2236 case 0:
2237 *eax = env->cpuid_level;
2238 *ebx = env->cpuid_vendor1;
2239 *edx = env->cpuid_vendor2;
2240 *ecx = env->cpuid_vendor3;
2241 break;
2242 case 1:
2243 *eax = env->cpuid_version;
2244 *ebx = (cpu->apic_id << 24) |
2245 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2246 *ecx = env->features[FEAT_1_ECX];
2247 *edx = env->features[FEAT_1_EDX];
2248 if (cs->nr_cores * cs->nr_threads > 1) {
2249 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2250 *edx |= 1 << 28; /* HTT bit */
2252 break;
2253 case 2:
2254 /* cache info: needed for Pentium Pro compatibility */
2255 if (cpu->cache_info_passthrough) {
2256 host_cpuid(index, 0, eax, ebx, ecx, edx);
2257 break;
2259 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2260 *ebx = 0;
2261 *ecx = 0;
2262 *edx = (L1D_DESCRIPTOR << 16) | \
2263 (L1I_DESCRIPTOR << 8) | \
2264 (L2_DESCRIPTOR);
2265 break;
2266 case 4:
2267 /* cache info: needed for Core compatibility */
2268 if (cpu->cache_info_passthrough) {
2269 host_cpuid(index, count, eax, ebx, ecx, edx);
2270 *eax &= ~0xFC000000;
2271 } else {
2272 *eax = 0;
2273 switch (count) {
2274 case 0: /* L1 dcache info */
2275 *eax |= CPUID_4_TYPE_DCACHE | \
2276 CPUID_4_LEVEL(1) | \
2277 CPUID_4_SELF_INIT_LEVEL;
2278 *ebx = (L1D_LINE_SIZE - 1) | \
2279 ((L1D_PARTITIONS - 1) << 12) | \
2280 ((L1D_ASSOCIATIVITY - 1) << 22);
2281 *ecx = L1D_SETS - 1;
2282 *edx = CPUID_4_NO_INVD_SHARING;
2283 break;
2284 case 1: /* L1 icache info */
2285 *eax |= CPUID_4_TYPE_ICACHE | \
2286 CPUID_4_LEVEL(1) | \
2287 CPUID_4_SELF_INIT_LEVEL;
2288 *ebx = (L1I_LINE_SIZE - 1) | \
2289 ((L1I_PARTITIONS - 1) << 12) | \
2290 ((L1I_ASSOCIATIVITY - 1) << 22);
2291 *ecx = L1I_SETS - 1;
2292 *edx = CPUID_4_NO_INVD_SHARING;
2293 break;
2294 case 2: /* L2 cache info */
2295 *eax |= CPUID_4_TYPE_UNIFIED | \
2296 CPUID_4_LEVEL(2) | \
2297 CPUID_4_SELF_INIT_LEVEL;
2298 if (cs->nr_threads > 1) {
2299 *eax |= (cs->nr_threads - 1) << 14;
2301 *ebx = (L2_LINE_SIZE - 1) | \
2302 ((L2_PARTITIONS - 1) << 12) | \
2303 ((L2_ASSOCIATIVITY - 1) << 22);
2304 *ecx = L2_SETS - 1;
2305 *edx = CPUID_4_NO_INVD_SHARING;
2306 break;
2307 default: /* end of info */
2308 *eax = 0;
2309 *ebx = 0;
2310 *ecx = 0;
2311 *edx = 0;
2312 break;
2316 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2317 if ((*eax & 31) && cs->nr_cores > 1) {
2318 *eax |= (cs->nr_cores - 1) << 26;
2320 break;
2321 case 5:
2322 /* mwait info: needed for Core compatibility */
2323 *eax = 0; /* Smallest monitor-line size in bytes */
2324 *ebx = 0; /* Largest monitor-line size in bytes */
2325 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2326 *edx = 0;
2327 break;
2328 case 6:
2329 /* Thermal and Power Leaf */
2330 *eax = 0;
2331 *ebx = 0;
2332 *ecx = 0;
2333 *edx = 0;
2334 break;
2335 case 7:
2336 /* Structured Extended Feature Flags Enumeration Leaf */
2337 if (count == 0) {
2338 *eax = 0; /* Maximum ECX value for sub-leaves */
2339 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2340 *ecx = 0; /* Reserved */
2341 *edx = 0; /* Reserved */
2342 } else {
2343 *eax = 0;
2344 *ebx = 0;
2345 *ecx = 0;
2346 *edx = 0;
2348 break;
2349 case 9:
2350 /* Direct Cache Access Information Leaf */
2351 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2352 *ebx = 0;
2353 *ecx = 0;
2354 *edx = 0;
2355 break;
2356 case 0xA:
2357 /* Architectural Performance Monitoring Leaf */
2358 if (kvm_enabled() && cpu->enable_pmu) {
2359 KVMState *s = cs->kvm_state;
2361 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2362 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2363 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2364 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2365 } else {
2366 *eax = 0;
2367 *ebx = 0;
2368 *ecx = 0;
2369 *edx = 0;
2371 break;
2372 case 0xD: {
2373 KVMState *s = cs->kvm_state;
2374 uint64_t kvm_mask;
2375 int i;
2377 /* Processor Extended State */
2378 *eax = 0;
2379 *ebx = 0;
2380 *ecx = 0;
2381 *edx = 0;
2382 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2383 break;
2385 kvm_mask =
2386 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2387 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2389 if (count == 0) {
2390 *ecx = 0x240;
2391 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2392 const ExtSaveArea *esa = &ext_save_areas[i];
2393 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2394 (kvm_mask & (1 << i)) != 0) {
2395 if (i < 32) {
2396 *eax |= 1 << i;
2397 } else {
2398 *edx |= 1 << (i - 32);
2400 *ecx = MAX(*ecx, esa->offset + esa->size);
2403 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2404 *ebx = *ecx;
2405 } else if (count == 1) {
2406 *eax = env->features[FEAT_XSAVE];
2407 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2408 const ExtSaveArea *esa = &ext_save_areas[count];
2409 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2410 (kvm_mask & (1 << count)) != 0) {
2411 *eax = esa->size;
2412 *ebx = esa->offset;
2415 break;
2417 case 0x80000000:
2418 *eax = env->cpuid_xlevel;
2419 *ebx = env->cpuid_vendor1;
2420 *edx = env->cpuid_vendor2;
2421 *ecx = env->cpuid_vendor3;
2422 break;
2423 case 0x80000001:
2424 *eax = env->cpuid_version;
2425 *ebx = 0;
2426 *ecx = env->features[FEAT_8000_0001_ECX];
2427 *edx = env->features[FEAT_8000_0001_EDX];
2429 /* The Linux kernel checks for the CMPLegacy bit and
2430 * discards multiple thread information if it is set.
2431 * So dont set it here for Intel to make Linux guests happy.
2433 if (cs->nr_cores * cs->nr_threads > 1) {
2434 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2435 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2436 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2437 *ecx |= 1 << 1; /* CmpLegacy bit */
2440 break;
2441 case 0x80000002:
2442 case 0x80000003:
2443 case 0x80000004:
2444 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2445 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2446 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2447 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2448 break;
2449 case 0x80000005:
2450 /* cache info (L1 cache) */
2451 if (cpu->cache_info_passthrough) {
2452 host_cpuid(index, 0, eax, ebx, ecx, edx);
2453 break;
2455 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2456 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2457 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2458 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2459 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2460 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2461 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2462 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2463 break;
2464 case 0x80000006:
2465 /* cache info (L2 cache) */
2466 if (cpu->cache_info_passthrough) {
2467 host_cpuid(index, 0, eax, ebx, ecx, edx);
2468 break;
2470 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2471 (L2_DTLB_2M_ENTRIES << 16) | \
2472 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2473 (L2_ITLB_2M_ENTRIES);
2474 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2475 (L2_DTLB_4K_ENTRIES << 16) | \
2476 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2477 (L2_ITLB_4K_ENTRIES);
2478 *ecx = (L2_SIZE_KB_AMD << 16) | \
2479 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2480 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2481 *edx = ((L3_SIZE_KB/512) << 18) | \
2482 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2483 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2484 break;
2485 case 0x80000007:
2486 *eax = 0;
2487 *ebx = 0;
2488 *ecx = 0;
2489 *edx = env->features[FEAT_8000_0007_EDX];
2490 break;
2491 case 0x80000008:
2492 /* virtual & phys address size in low 2 bytes. */
2493 /* XXX: This value must match the one used in the MMU code. */
2494 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2495 /* 64 bit processor */
2496 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2497 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2498 } else {
2499 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2500 *eax = 0x00000024; /* 36 bits physical */
2501 } else {
2502 *eax = 0x00000020; /* 32 bits physical */
2505 *ebx = 0;
2506 *ecx = 0;
2507 *edx = 0;
2508 if (cs->nr_cores * cs->nr_threads > 1) {
2509 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2511 break;
2512 case 0x8000000A:
2513 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2514 *eax = 0x00000001; /* SVM Revision */
2515 *ebx = 0x00000010; /* nr of ASIDs */
2516 *ecx = 0;
2517 *edx = env->features[FEAT_SVM]; /* optional features */
2518 } else {
2519 *eax = 0;
2520 *ebx = 0;
2521 *ecx = 0;
2522 *edx = 0;
2524 break;
2525 case 0xC0000000:
2526 *eax = env->cpuid_xlevel2;
2527 *ebx = 0;
2528 *ecx = 0;
2529 *edx = 0;
2530 break;
2531 case 0xC0000001:
2532 /* Support for VIA CPU's CPUID instruction */
2533 *eax = env->cpuid_version;
2534 *ebx = 0;
2535 *ecx = 0;
2536 *edx = env->features[FEAT_C000_0001_EDX];
2537 break;
2538 case 0xC0000002:
2539 case 0xC0000003:
2540 case 0xC0000004:
2541 /* Reserved for the future, and now filled with zero */
2542 *eax = 0;
2543 *ebx = 0;
2544 *ecx = 0;
2545 *edx = 0;
2546 break;
2547 default:
2548 /* reserved values: zero */
2549 *eax = 0;
2550 *ebx = 0;
2551 *ecx = 0;
2552 *edx = 0;
2553 break;
2557 /* CPUClass::reset() */
2558 static void x86_cpu_reset(CPUState *s)
2560 X86CPU *cpu = X86_CPU(s);
2561 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2562 CPUX86State *env = &cpu->env;
2563 int i;
2565 xcc->parent_reset(s);
2567 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2569 tlb_flush(s, 1);
2571 env->old_exception = -1;
2573 /* init to reset state */
2575 #ifdef CONFIG_SOFTMMU
2576 env->hflags |= HF_SOFTMMU_MASK;
2577 #endif
2578 env->hflags2 |= HF2_GIF_MASK;
2580 cpu_x86_update_cr0(env, 0x60000010);
2581 env->a20_mask = ~0x0;
2582 env->smbase = 0x30000;
2584 env->idt.limit = 0xffff;
2585 env->gdt.limit = 0xffff;
2586 env->ldt.limit = 0xffff;
2587 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2588 env->tr.limit = 0xffff;
2589 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2591 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2592 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2593 DESC_R_MASK | DESC_A_MASK);
2594 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2595 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2596 DESC_A_MASK);
2597 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2598 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2599 DESC_A_MASK);
2600 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2601 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2602 DESC_A_MASK);
2603 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2604 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2605 DESC_A_MASK);
2606 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2607 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2608 DESC_A_MASK);
2610 env->eip = 0xfff0;
2611 env->regs[R_EDX] = env->cpuid_version;
2613 env->eflags = 0x2;
2615 /* FPU init */
2616 for (i = 0; i < 8; i++) {
2617 env->fptags[i] = 1;
2619 cpu_set_fpuc(env, 0x37f);
2621 env->mxcsr = 0x1f80;
2622 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2624 env->pat = 0x0007040600070406ULL;
2625 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2627 memset(env->dr, 0, sizeof(env->dr));
2628 env->dr[6] = DR6_FIXED_1;
2629 env->dr[7] = DR7_FIXED_1;
2630 cpu_breakpoint_remove_all(s, BP_CPU);
2631 cpu_watchpoint_remove_all(s, BP_CPU);
2633 env->xcr0 = 1;
2636 * SDM 11.11.5 requires:
2637 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2638 * - IA32_MTRR_PHYSMASKn.V = 0
2639 * All other bits are undefined. For simplification, zero it all.
2641 env->mtrr_deftype = 0;
2642 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2643 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2645 #if !defined(CONFIG_USER_ONLY)
2646 /* We hard-wire the BSP to the first CPU. */
2647 if (s->cpu_index == 0) {
2648 apic_designate_bsp(cpu->apic_state);
2651 s->halted = !cpu_is_bsp(cpu);
2653 if (kvm_enabled()) {
2654 kvm_arch_reset_vcpu(cpu);
2656 #endif
2659 #ifndef CONFIG_USER_ONLY
2660 bool cpu_is_bsp(X86CPU *cpu)
2662 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2665 /* TODO: remove me, when reset over QOM tree is implemented */
2666 static void x86_cpu_machine_reset_cb(void *opaque)
2668 X86CPU *cpu = opaque;
2669 cpu_reset(CPU(cpu));
2671 #endif
2673 static void mce_init(X86CPU *cpu)
2675 CPUX86State *cenv = &cpu->env;
2676 unsigned int bank;
2678 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2679 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2680 (CPUID_MCE | CPUID_MCA)) {
2681 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2682 cenv->mcg_ctl = ~(uint64_t)0;
2683 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2684 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2689 #ifndef CONFIG_USER_ONLY
2690 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2692 DeviceState *dev = DEVICE(cpu);
2693 APICCommonState *apic;
2694 const char *apic_type = "apic";
2696 if (kvm_irqchip_in_kernel()) {
2697 apic_type = "kvm-apic";
2698 } else if (xen_enabled()) {
2699 apic_type = "xen-apic";
2702 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2703 if (cpu->apic_state == NULL) {
2704 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2705 return;
2708 object_property_add_child(OBJECT(cpu), "apic",
2709 OBJECT(cpu->apic_state), NULL);
2710 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2711 /* TODO: convert to link<> */
2712 apic = APIC_COMMON(cpu->apic_state);
2713 apic->cpu = cpu;
2716 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2718 if (cpu->apic_state == NULL) {
2719 return;
2721 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2722 errp);
2724 #else
2725 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2728 #endif
2731 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2732 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2733 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2734 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2735 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2736 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2737 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2739 CPUState *cs = CPU(dev);
2740 X86CPU *cpu = X86_CPU(dev);
2741 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2742 CPUX86State *env = &cpu->env;
2743 Error *local_err = NULL;
2744 static bool ht_warned;
2746 if (cpu->apic_id < 0) {
2747 error_setg(errp, "apic-id property was not initialized properly");
2748 return;
2751 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2752 env->cpuid_level = 7;
2755 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2756 * CPUID[1].EDX.
2758 if (IS_AMD_CPU(env)) {
2759 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2760 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2761 & CPUID_EXT2_AMD_ALIASES);
2765 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2766 error_setg(&local_err,
2767 kvm_enabled() ?
2768 "Host doesn't support requested features" :
2769 "TCG doesn't support requested features");
2770 goto out;
2773 #ifndef CONFIG_USER_ONLY
2774 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2776 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2777 x86_cpu_apic_create(cpu, &local_err);
2778 if (local_err != NULL) {
2779 goto out;
2782 #endif
2784 mce_init(cpu);
2785 qemu_init_vcpu(cs);
2787 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2788 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2789 * based on inputs (sockets,cores,threads), it is still better to gives
2790 * users a warning.
2792 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2793 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2795 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2796 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2797 " -smp options properly.");
2798 ht_warned = true;
2801 x86_cpu_apic_realize(cpu, &local_err);
2802 if (local_err != NULL) {
2803 goto out;
2805 cpu_reset(cs);
2807 xcc->parent_realize(dev, &local_err);
2808 out:
2809 if (local_err != NULL) {
2810 error_propagate(errp, local_err);
2811 return;
2815 static void x86_cpu_initfn(Object *obj)
2817 CPUState *cs = CPU(obj);
2818 X86CPU *cpu = X86_CPU(obj);
2819 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
2820 CPUX86State *env = &cpu->env;
2821 static int inited;
2823 cs->env_ptr = env;
2824 cpu_exec_init(env);
2826 object_property_add(obj, "family", "int",
2827 x86_cpuid_version_get_family,
2828 x86_cpuid_version_set_family, NULL, NULL, NULL);
2829 object_property_add(obj, "model", "int",
2830 x86_cpuid_version_get_model,
2831 x86_cpuid_version_set_model, NULL, NULL, NULL);
2832 object_property_add(obj, "stepping", "int",
2833 x86_cpuid_version_get_stepping,
2834 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
2835 object_property_add(obj, "level", "int",
2836 x86_cpuid_get_level,
2837 x86_cpuid_set_level, NULL, NULL, NULL);
2838 object_property_add(obj, "xlevel", "int",
2839 x86_cpuid_get_xlevel,
2840 x86_cpuid_set_xlevel, NULL, NULL, NULL);
2841 object_property_add_str(obj, "vendor",
2842 x86_cpuid_get_vendor,
2843 x86_cpuid_set_vendor, NULL);
2844 object_property_add_str(obj, "model-id",
2845 x86_cpuid_get_model_id,
2846 x86_cpuid_set_model_id, NULL);
2847 object_property_add(obj, "tsc-frequency", "int",
2848 x86_cpuid_get_tsc_freq,
2849 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
2850 object_property_add(obj, "apic-id", "int",
2851 x86_cpuid_get_apic_id,
2852 x86_cpuid_set_apic_id, NULL, NULL, NULL);
2853 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
2854 x86_cpu_get_feature_words,
2855 NULL, NULL, (void *)env->features, NULL);
2856 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
2857 x86_cpu_get_feature_words,
2858 NULL, NULL, (void *)cpu->filtered_features, NULL);
2860 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
2862 #ifndef CONFIG_USER_ONLY
2863 /* Any code creating new X86CPU objects have to set apic-id explicitly */
2864 cpu->apic_id = -1;
2865 #endif
2867 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
2869 /* init various static tables used in TCG mode */
2870 if (tcg_enabled() && !inited) {
2871 inited = 1;
2872 optimize_flags_init();
2876 static int64_t x86_cpu_get_arch_id(CPUState *cs)
2878 X86CPU *cpu = X86_CPU(cs);
2880 return cpu->apic_id;
2883 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
2885 X86CPU *cpu = X86_CPU(cs);
2887 return cpu->env.cr[0] & CR0_PG_MASK;
2890 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
2892 X86CPU *cpu = X86_CPU(cs);
2894 cpu->env.eip = value;
2897 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
2899 X86CPU *cpu = X86_CPU(cs);
2901 cpu->env.eip = tb->pc - tb->cs_base;
2904 static bool x86_cpu_has_work(CPUState *cs)
2906 X86CPU *cpu = X86_CPU(cs);
2907 CPUX86State *env = &cpu->env;
2909 #if !defined(CONFIG_USER_ONLY)
2910 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
2911 apic_poll_irq(cpu->apic_state);
2912 cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
2914 #endif
2916 return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2917 (env->eflags & IF_MASK)) ||
2918 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
2919 CPU_INTERRUPT_INIT |
2920 CPU_INTERRUPT_SIPI |
2921 CPU_INTERRUPT_MCE));
2924 static Property x86_cpu_properties[] = {
2925 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
2926 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
2927 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
2928 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
2929 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
2930 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
2931 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
2932 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
2933 DEFINE_PROP_END_OF_LIST()
2936 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
2938 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2939 CPUClass *cc = CPU_CLASS(oc);
2940 DeviceClass *dc = DEVICE_CLASS(oc);
2942 xcc->parent_realize = dc->realize;
2943 dc->realize = x86_cpu_realizefn;
2944 dc->bus_type = TYPE_ICC_BUS;
2945 dc->props = x86_cpu_properties;
2947 xcc->parent_reset = cc->reset;
2948 cc->reset = x86_cpu_reset;
2949 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
2951 cc->class_by_name = x86_cpu_class_by_name;
2952 cc->parse_features = x86_cpu_parse_featurestr;
2953 cc->has_work = x86_cpu_has_work;
2954 cc->do_interrupt = x86_cpu_do_interrupt;
2955 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
2956 cc->dump_state = x86_cpu_dump_state;
2957 cc->set_pc = x86_cpu_set_pc;
2958 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
2959 cc->gdb_read_register = x86_cpu_gdb_read_register;
2960 cc->gdb_write_register = x86_cpu_gdb_write_register;
2961 cc->get_arch_id = x86_cpu_get_arch_id;
2962 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
2963 #ifdef CONFIG_USER_ONLY
2964 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
2965 #else
2966 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
2967 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
2968 cc->write_elf64_note = x86_cpu_write_elf64_note;
2969 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
2970 cc->write_elf32_note = x86_cpu_write_elf32_note;
2971 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
2972 cc->vmsd = &vmstate_x86_cpu;
2973 #endif
2974 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
2975 #ifndef CONFIG_USER_ONLY
2976 cc->debug_excp_handler = breakpoint_handler;
2977 #endif
2978 cc->cpu_exec_enter = x86_cpu_exec_enter;
2979 cc->cpu_exec_exit = x86_cpu_exec_exit;
2982 static const TypeInfo x86_cpu_type_info = {
2983 .name = TYPE_X86_CPU,
2984 .parent = TYPE_CPU,
2985 .instance_size = sizeof(X86CPU),
2986 .instance_init = x86_cpu_initfn,
2987 .abstract = true,
2988 .class_size = sizeof(X86CPUClass),
2989 .class_init = x86_cpu_common_class_init,
2992 static void x86_cpu_register_types(void)
2994 int i;
2996 type_register_static(&x86_cpu_type_info);
2997 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2998 x86_register_cpudef_type(&builtin_x86_defs[i]);
3000 #ifdef CONFIG_KVM
3001 type_register_static(&host_x86_cpu_type_info);
3002 #endif
3005 type_init(x86_cpu_register_types)