target-i386: add Intel AVX-512 support
[qemu/ar7.git] / target-i386 / cpu.c
blobe1946016ad3ec876c75eda09af274aa8d9d5993e
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28 #include "topology.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
278 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
279 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
280 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
281 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
282 CPUID_PSE36 | CPUID_FXSR)
283 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
284 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
285 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
286 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
287 CPUID_PAE | CPUID_SEP | CPUID_APIC)
289 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
290 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
291 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
292 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
293 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
294 /* partly implemented:
295 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
296 /* missing:
297 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
298 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
299 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
300 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
301 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
302 /* missing:
303 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
304 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
305 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
306 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
307 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
308 CPUID_EXT_RDRAND */
310 #ifdef TARGET_X86_64
311 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
312 #else
313 #define TCG_EXT2_X86_64_FEATURES 0
314 #endif
316 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
317 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
318 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
319 TCG_EXT2_X86_64_FEATURES)
320 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
321 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
322 #define TCG_EXT4_FEATURES 0
323 #define TCG_SVM_FEATURES 0
324 #define TCG_KVM_FEATURES 0
325 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
326 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
327 /* missing:
328 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
329 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
330 CPUID_7_0_EBX_RDSEED */
331 #define TCG_APM_FEATURES 0
334 typedef struct FeatureWordInfo {
335 const char **feat_names;
336 uint32_t cpuid_eax; /* Input EAX for CPUID */
337 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
338 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
339 int cpuid_reg; /* output register (R_* constant) */
340 uint32_t tcg_features; /* Feature flags supported by TCG */
341 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
342 } FeatureWordInfo;
344 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
345 [FEAT_1_EDX] = {
346 .feat_names = feature_name,
347 .cpuid_eax = 1, .cpuid_reg = R_EDX,
348 .tcg_features = TCG_FEATURES,
350 [FEAT_1_ECX] = {
351 .feat_names = ext_feature_name,
352 .cpuid_eax = 1, .cpuid_reg = R_ECX,
353 .tcg_features = TCG_EXT_FEATURES,
355 [FEAT_8000_0001_EDX] = {
356 .feat_names = ext2_feature_name,
357 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
358 .tcg_features = TCG_EXT2_FEATURES,
360 [FEAT_8000_0001_ECX] = {
361 .feat_names = ext3_feature_name,
362 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
363 .tcg_features = TCG_EXT3_FEATURES,
365 [FEAT_C000_0001_EDX] = {
366 .feat_names = ext4_feature_name,
367 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
368 .tcg_features = TCG_EXT4_FEATURES,
370 [FEAT_KVM] = {
371 .feat_names = kvm_feature_name,
372 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
373 .tcg_features = TCG_KVM_FEATURES,
375 [FEAT_SVM] = {
376 .feat_names = svm_feature_name,
377 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
378 .tcg_features = TCG_SVM_FEATURES,
380 [FEAT_7_0_EBX] = {
381 .feat_names = cpuid_7_0_ebx_feature_name,
382 .cpuid_eax = 7,
383 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
384 .cpuid_reg = R_EBX,
385 .tcg_features = TCG_7_0_EBX_FEATURES,
387 [FEAT_8000_0007_EDX] = {
388 .feat_names = cpuid_apm_edx_feature_name,
389 .cpuid_eax = 0x80000007,
390 .cpuid_reg = R_EDX,
391 .tcg_features = TCG_APM_FEATURES,
392 .unmigratable_flags = CPUID_APM_INVTSC,
396 typedef struct X86RegisterInfo32 {
397 /* Name of register */
398 const char *name;
399 /* QAPI enum value register */
400 X86CPURegister32 qapi_enum;
401 } X86RegisterInfo32;
403 #define REGISTER(reg) \
404 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
405 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
406 REGISTER(EAX),
407 REGISTER(ECX),
408 REGISTER(EDX),
409 REGISTER(EBX),
410 REGISTER(ESP),
411 REGISTER(EBP),
412 REGISTER(ESI),
413 REGISTER(EDI),
415 #undef REGISTER
417 typedef struct ExtSaveArea {
418 uint32_t feature, bits;
419 uint32_t offset, size;
420 } ExtSaveArea;
422 static const ExtSaveArea ext_save_areas[] = {
423 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
424 .offset = 0x240, .size = 0x100 },
425 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
426 .offset = 0x3c0, .size = 0x40 },
427 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
428 .offset = 0x400, .size = 0x40 },
429 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
430 .offset = 0x440, .size = 0x40 },
431 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
432 .offset = 0x480, .size = 0x200 },
433 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
434 .offset = 0x680, .size = 0x400 },
437 const char *get_register_name_32(unsigned int reg)
439 if (reg >= CPU_NB_REGS32) {
440 return NULL;
442 return x86_reg_info_32[reg].name;
445 /* collects per-function cpuid data
447 typedef struct model_features_t {
448 uint32_t *guest_feat;
449 uint32_t *host_feat;
450 FeatureWord feat_word;
451 } model_features_t;
453 /* KVM-specific features that are automatically added to all CPU models
454 * when KVM is enabled.
456 static uint32_t kvm_default_features[FEATURE_WORDS] = {
457 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
458 (1 << KVM_FEATURE_NOP_IO_DELAY) |
459 (1 << KVM_FEATURE_CLOCKSOURCE2) |
460 (1 << KVM_FEATURE_ASYNC_PF) |
461 (1 << KVM_FEATURE_STEAL_TIME) |
462 (1 << KVM_FEATURE_PV_EOI) |
463 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
464 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
467 /* Features that are not added by default to any CPU model when KVM is enabled.
469 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
470 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
473 void x86_cpu_compat_disable_kvm_features(FeatureWord w, uint32_t features)
475 kvm_default_features[w] &= ~features;
479 * Returns the set of feature flags that are supported and migratable by
480 * QEMU, for a given FeatureWord.
482 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
484 FeatureWordInfo *wi = &feature_word_info[w];
485 uint32_t r = 0;
486 int i;
488 for (i = 0; i < 32; i++) {
489 uint32_t f = 1U << i;
490 /* If the feature name is unknown, it is not supported by QEMU yet */
491 if (!wi->feat_names[i]) {
492 continue;
494 /* Skip features known to QEMU, but explicitly marked as unmigratable */
495 if (wi->unmigratable_flags & f) {
496 continue;
498 r |= f;
500 return r;
503 void host_cpuid(uint32_t function, uint32_t count,
504 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
506 uint32_t vec[4];
508 #ifdef __x86_64__
509 asm volatile("cpuid"
510 : "=a"(vec[0]), "=b"(vec[1]),
511 "=c"(vec[2]), "=d"(vec[3])
512 : "0"(function), "c"(count) : "cc");
513 #elif defined(__i386__)
514 asm volatile("pusha \n\t"
515 "cpuid \n\t"
516 "mov %%eax, 0(%2) \n\t"
517 "mov %%ebx, 4(%2) \n\t"
518 "mov %%ecx, 8(%2) \n\t"
519 "mov %%edx, 12(%2) \n\t"
520 "popa"
521 : : "a"(function), "c"(count), "S"(vec)
522 : "memory", "cc");
523 #else
524 abort();
525 #endif
527 if (eax)
528 *eax = vec[0];
529 if (ebx)
530 *ebx = vec[1];
531 if (ecx)
532 *ecx = vec[2];
533 if (edx)
534 *edx = vec[3];
537 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
539 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
540 * a substring. ex if !NULL points to the first char after a substring,
541 * otherwise the string is assumed to sized by a terminating nul.
542 * Return lexical ordering of *s1:*s2.
544 static int sstrcmp(const char *s1, const char *e1, const char *s2,
545 const char *e2)
547 for (;;) {
548 if (!*s1 || !*s2 || *s1 != *s2)
549 return (*s1 - *s2);
550 ++s1, ++s2;
551 if (s1 == e1 && s2 == e2)
552 return (0);
553 else if (s1 == e1)
554 return (*s2);
555 else if (s2 == e2)
556 return (*s1);
560 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
561 * '|' delimited (possibly empty) strings in which case search for a match
562 * within the alternatives proceeds left to right. Return 0 for success,
563 * non-zero otherwise.
565 static int altcmp(const char *s, const char *e, const char *altstr)
567 const char *p, *q;
569 for (q = p = altstr; ; ) {
570 while (*p && *p != '|')
571 ++p;
572 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
573 return (0);
574 if (!*p)
575 return (1);
576 else
577 q = ++p;
581 /* search featureset for flag *[s..e), if found set corresponding bit in
582 * *pval and return true, otherwise return false
584 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
585 const char **featureset)
587 uint32_t mask;
588 const char **ppc;
589 bool found = false;
591 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
592 if (*ppc && !altcmp(s, e, *ppc)) {
593 *pval |= mask;
594 found = true;
597 return found;
600 static void add_flagname_to_bitmaps(const char *flagname,
601 FeatureWordArray words,
602 Error **errp)
604 FeatureWord w;
605 for (w = 0; w < FEATURE_WORDS; w++) {
606 FeatureWordInfo *wi = &feature_word_info[w];
607 if (wi->feat_names &&
608 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
609 break;
612 if (w == FEATURE_WORDS) {
613 error_setg(errp, "CPU feature %s not found", flagname);
617 /* CPU class name definitions: */
619 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
620 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
622 /* Return type name for a given CPU model name
623 * Caller is responsible for freeing the returned string.
625 static char *x86_cpu_type_name(const char *model_name)
627 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
630 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
632 ObjectClass *oc;
633 char *typename;
635 if (cpu_model == NULL) {
636 return NULL;
639 typename = x86_cpu_type_name(cpu_model);
640 oc = object_class_by_name(typename);
641 g_free(typename);
642 return oc;
645 struct X86CPUDefinition {
646 const char *name;
647 uint32_t level;
648 uint32_t xlevel;
649 uint32_t xlevel2;
650 /* vendor is zero-terminated, 12 character ASCII string */
651 char vendor[CPUID_VENDOR_SZ + 1];
652 int family;
653 int model;
654 int stepping;
655 FeatureWordArray features;
656 char model_id[48];
657 bool cache_info_passthrough;
660 static X86CPUDefinition builtin_x86_defs[] = {
662 .name = "qemu64",
663 .level = 4,
664 .vendor = CPUID_VENDOR_AMD,
665 .family = 6,
666 .model = 6,
667 .stepping = 3,
668 .features[FEAT_1_EDX] =
669 PPRO_FEATURES |
670 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
671 CPUID_PSE36,
672 .features[FEAT_1_ECX] =
673 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
674 .features[FEAT_8000_0001_EDX] =
675 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
676 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
677 .features[FEAT_8000_0001_ECX] =
678 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
679 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
680 .xlevel = 0x8000000A,
683 .name = "phenom",
684 .level = 5,
685 .vendor = CPUID_VENDOR_AMD,
686 .family = 16,
687 .model = 2,
688 .stepping = 3,
689 .features[FEAT_1_EDX] =
690 PPRO_FEATURES |
691 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
692 CPUID_PSE36 | CPUID_VME | CPUID_HT,
693 .features[FEAT_1_ECX] =
694 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
695 CPUID_EXT_POPCNT,
696 .features[FEAT_8000_0001_EDX] =
697 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
698 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
699 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
700 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
701 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
702 CPUID_EXT3_CR8LEG,
703 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
704 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
705 .features[FEAT_8000_0001_ECX] =
706 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
707 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
708 .features[FEAT_SVM] =
709 CPUID_SVM_NPT | CPUID_SVM_LBRV,
710 .xlevel = 0x8000001A,
711 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
714 .name = "core2duo",
715 .level = 10,
716 .vendor = CPUID_VENDOR_INTEL,
717 .family = 6,
718 .model = 15,
719 .stepping = 11,
720 .features[FEAT_1_EDX] =
721 PPRO_FEATURES |
722 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
723 CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |
724 CPUID_HT | CPUID_TM | CPUID_PBE,
725 .features[FEAT_1_ECX] =
726 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
727 CPUID_EXT_DTES64 | CPUID_EXT_DSCPL | CPUID_EXT_VMX | CPUID_EXT_EST |
728 CPUID_EXT_TM2 | CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
729 .features[FEAT_8000_0001_EDX] =
730 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
731 .features[FEAT_8000_0001_ECX] =
732 CPUID_EXT3_LAHF_LM,
733 .xlevel = 0x80000008,
734 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
737 .name = "kvm64",
738 .level = 5,
739 .vendor = CPUID_VENDOR_INTEL,
740 .family = 15,
741 .model = 6,
742 .stepping = 1,
743 /* Missing: CPUID_VME, CPUID_HT */
744 .features[FEAT_1_EDX] =
745 PPRO_FEATURES |
746 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
747 CPUID_PSE36,
748 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
749 .features[FEAT_1_ECX] =
750 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
751 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
752 .features[FEAT_8000_0001_EDX] =
753 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
754 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
755 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
756 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
757 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
758 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
759 .features[FEAT_8000_0001_ECX] =
761 .xlevel = 0x80000008,
762 .model_id = "Common KVM processor"
765 .name = "qemu32",
766 .level = 4,
767 .vendor = CPUID_VENDOR_INTEL,
768 .family = 6,
769 .model = 6,
770 .stepping = 3,
771 .features[FEAT_1_EDX] =
772 PPRO_FEATURES,
773 .features[FEAT_1_ECX] =
774 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
775 .xlevel = 0x80000004,
778 .name = "kvm32",
779 .level = 5,
780 .vendor = CPUID_VENDOR_INTEL,
781 .family = 15,
782 .model = 6,
783 .stepping = 1,
784 .features[FEAT_1_EDX] =
785 PPRO_FEATURES |
786 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
787 .features[FEAT_1_ECX] =
788 CPUID_EXT_SSE3,
789 .features[FEAT_8000_0001_EDX] =
790 PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
791 .features[FEAT_8000_0001_ECX] =
793 .xlevel = 0x80000008,
794 .model_id = "Common 32-bit KVM processor"
797 .name = "coreduo",
798 .level = 10,
799 .vendor = CPUID_VENDOR_INTEL,
800 .family = 6,
801 .model = 14,
802 .stepping = 8,
803 .features[FEAT_1_EDX] =
804 PPRO_FEATURES | CPUID_VME |
805 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_DTS | CPUID_ACPI |
806 CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
807 .features[FEAT_1_ECX] =
808 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_VMX |
809 CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
810 .features[FEAT_8000_0001_EDX] =
811 CPUID_EXT2_NX,
812 .xlevel = 0x80000008,
813 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
816 .name = "486",
817 .level = 1,
818 .vendor = CPUID_VENDOR_INTEL,
819 .family = 4,
820 .model = 8,
821 .stepping = 0,
822 .features[FEAT_1_EDX] =
823 I486_FEATURES,
824 .xlevel = 0,
827 .name = "pentium",
828 .level = 1,
829 .vendor = CPUID_VENDOR_INTEL,
830 .family = 5,
831 .model = 4,
832 .stepping = 3,
833 .features[FEAT_1_EDX] =
834 PENTIUM_FEATURES,
835 .xlevel = 0,
838 .name = "pentium2",
839 .level = 2,
840 .vendor = CPUID_VENDOR_INTEL,
841 .family = 6,
842 .model = 5,
843 .stepping = 2,
844 .features[FEAT_1_EDX] =
845 PENTIUM2_FEATURES,
846 .xlevel = 0,
849 .name = "pentium3",
850 .level = 2,
851 .vendor = CPUID_VENDOR_INTEL,
852 .family = 6,
853 .model = 7,
854 .stepping = 3,
855 .features[FEAT_1_EDX] =
856 PENTIUM3_FEATURES,
857 .xlevel = 0,
860 .name = "athlon",
861 .level = 2,
862 .vendor = CPUID_VENDOR_AMD,
863 .family = 6,
864 .model = 2,
865 .stepping = 3,
866 .features[FEAT_1_EDX] =
867 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
868 CPUID_MCA,
869 .features[FEAT_8000_0001_EDX] =
870 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
871 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
872 .xlevel = 0x80000008,
875 .name = "n270",
876 /* original is on level 10 */
877 .level = 5,
878 .vendor = CPUID_VENDOR_INTEL,
879 .family = 6,
880 .model = 28,
881 .stepping = 2,
882 .features[FEAT_1_EDX] =
883 PPRO_FEATURES |
884 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | CPUID_DTS |
885 CPUID_ACPI | CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
886 /* Some CPUs got no CPUID_SEP */
887 .features[FEAT_1_ECX] =
888 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
889 CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR |
890 CPUID_EXT_MOVBE,
891 .features[FEAT_8000_0001_EDX] =
892 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
893 CPUID_EXT2_NX,
894 .features[FEAT_8000_0001_ECX] =
895 CPUID_EXT3_LAHF_LM,
896 .xlevel = 0x8000000A,
897 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
900 .name = "Conroe",
901 .level = 4,
902 .vendor = CPUID_VENDOR_INTEL,
903 .family = 6,
904 .model = 15,
905 .stepping = 3,
906 .features[FEAT_1_EDX] =
907 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
908 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
909 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
910 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
911 CPUID_DE | CPUID_FP87,
912 .features[FEAT_1_ECX] =
913 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
914 .features[FEAT_8000_0001_EDX] =
915 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
916 .features[FEAT_8000_0001_ECX] =
917 CPUID_EXT3_LAHF_LM,
918 .xlevel = 0x8000000A,
919 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
922 .name = "Penryn",
923 .level = 4,
924 .vendor = CPUID_VENDOR_INTEL,
925 .family = 6,
926 .model = 23,
927 .stepping = 3,
928 .features[FEAT_1_EDX] =
929 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
930 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
931 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
932 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
933 CPUID_DE | CPUID_FP87,
934 .features[FEAT_1_ECX] =
935 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
936 CPUID_EXT_SSE3,
937 .features[FEAT_8000_0001_EDX] =
938 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
939 .features[FEAT_8000_0001_ECX] =
940 CPUID_EXT3_LAHF_LM,
941 .xlevel = 0x8000000A,
942 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
945 .name = "Nehalem",
946 .level = 4,
947 .vendor = CPUID_VENDOR_INTEL,
948 .family = 6,
949 .model = 26,
950 .stepping = 3,
951 .features[FEAT_1_EDX] =
952 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
953 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
954 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
955 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
956 CPUID_DE | CPUID_FP87,
957 .features[FEAT_1_ECX] =
958 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
959 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
960 .features[FEAT_8000_0001_EDX] =
961 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
962 .features[FEAT_8000_0001_ECX] =
963 CPUID_EXT3_LAHF_LM,
964 .xlevel = 0x8000000A,
965 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
968 .name = "Westmere",
969 .level = 11,
970 .vendor = CPUID_VENDOR_INTEL,
971 .family = 6,
972 .model = 44,
973 .stepping = 1,
974 .features[FEAT_1_EDX] =
975 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
976 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
977 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
978 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
979 CPUID_DE | CPUID_FP87,
980 .features[FEAT_1_ECX] =
981 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
982 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
983 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
984 .features[FEAT_8000_0001_EDX] =
985 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
986 .features[FEAT_8000_0001_ECX] =
987 CPUID_EXT3_LAHF_LM,
988 .xlevel = 0x8000000A,
989 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
992 .name = "SandyBridge",
993 .level = 0xd,
994 .vendor = CPUID_VENDOR_INTEL,
995 .family = 6,
996 .model = 42,
997 .stepping = 1,
998 .features[FEAT_1_EDX] =
999 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1000 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1001 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1002 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1003 CPUID_DE | CPUID_FP87,
1004 .features[FEAT_1_ECX] =
1005 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1006 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1007 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1008 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1009 CPUID_EXT_SSE3,
1010 .features[FEAT_8000_0001_EDX] =
1011 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1012 CPUID_EXT2_SYSCALL,
1013 .features[FEAT_8000_0001_ECX] =
1014 CPUID_EXT3_LAHF_LM,
1015 .xlevel = 0x8000000A,
1016 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1019 .name = "Haswell",
1020 .level = 0xd,
1021 .vendor = CPUID_VENDOR_INTEL,
1022 .family = 6,
1023 .model = 60,
1024 .stepping = 1,
1025 .features[FEAT_1_EDX] =
1026 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1027 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1028 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1029 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1030 CPUID_DE | CPUID_FP87,
1031 .features[FEAT_1_ECX] =
1032 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1033 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1034 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1035 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1036 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1037 CPUID_EXT_PCID,
1038 .features[FEAT_8000_0001_EDX] =
1039 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1040 CPUID_EXT2_SYSCALL,
1041 .features[FEAT_8000_0001_ECX] =
1042 CPUID_EXT3_LAHF_LM,
1043 .features[FEAT_7_0_EBX] =
1044 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1045 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1046 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1047 CPUID_7_0_EBX_RTM,
1048 .xlevel = 0x8000000A,
1049 .model_id = "Intel Core Processor (Haswell)",
1052 .name = "Broadwell",
1053 .level = 0xd,
1054 .vendor = CPUID_VENDOR_INTEL,
1055 .family = 6,
1056 .model = 61,
1057 .stepping = 2,
1058 .features[FEAT_1_EDX] =
1059 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1060 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1061 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1062 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1063 CPUID_DE | CPUID_FP87,
1064 .features[FEAT_1_ECX] =
1065 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1066 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1067 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1068 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1069 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1070 CPUID_EXT_PCID,
1071 .features[FEAT_8000_0001_EDX] =
1072 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1073 CPUID_EXT2_SYSCALL,
1074 .features[FEAT_8000_0001_ECX] =
1075 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1076 .features[FEAT_7_0_EBX] =
1077 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1078 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1079 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1080 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1081 CPUID_7_0_EBX_SMAP,
1082 .xlevel = 0x8000000A,
1083 .model_id = "Intel Core Processor (Broadwell)",
1086 .name = "Opteron_G1",
1087 .level = 5,
1088 .vendor = CPUID_VENDOR_AMD,
1089 .family = 15,
1090 .model = 6,
1091 .stepping = 1,
1092 .features[FEAT_1_EDX] =
1093 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1094 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1095 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1096 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1097 CPUID_DE | CPUID_FP87,
1098 .features[FEAT_1_ECX] =
1099 CPUID_EXT_SSE3,
1100 .features[FEAT_8000_0001_EDX] =
1101 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1102 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1103 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1104 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1105 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1106 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1107 .xlevel = 0x80000008,
1108 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1111 .name = "Opteron_G2",
1112 .level = 5,
1113 .vendor = CPUID_VENDOR_AMD,
1114 .family = 15,
1115 .model = 6,
1116 .stepping = 1,
1117 .features[FEAT_1_EDX] =
1118 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1119 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1120 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1121 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1122 CPUID_DE | CPUID_FP87,
1123 .features[FEAT_1_ECX] =
1124 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1125 .features[FEAT_8000_0001_EDX] =
1126 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1127 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1128 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1129 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1130 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1131 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1132 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1133 .features[FEAT_8000_0001_ECX] =
1134 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1135 .xlevel = 0x80000008,
1136 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1139 .name = "Opteron_G3",
1140 .level = 5,
1141 .vendor = CPUID_VENDOR_AMD,
1142 .family = 15,
1143 .model = 6,
1144 .stepping = 1,
1145 .features[FEAT_1_EDX] =
1146 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1147 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1148 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1149 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1150 CPUID_DE | CPUID_FP87,
1151 .features[FEAT_1_ECX] =
1152 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1153 CPUID_EXT_SSE3,
1154 .features[FEAT_8000_0001_EDX] =
1155 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1156 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1157 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1158 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1159 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1160 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1161 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1162 .features[FEAT_8000_0001_ECX] =
1163 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1164 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1165 .xlevel = 0x80000008,
1166 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1169 .name = "Opteron_G4",
1170 .level = 0xd,
1171 .vendor = CPUID_VENDOR_AMD,
1172 .family = 21,
1173 .model = 1,
1174 .stepping = 2,
1175 .features[FEAT_1_EDX] =
1176 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1180 CPUID_DE | CPUID_FP87,
1181 .features[FEAT_1_ECX] =
1182 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1183 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1184 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1185 CPUID_EXT_SSE3,
1186 .features[FEAT_8000_0001_EDX] =
1187 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1188 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1189 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1190 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1191 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1192 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1193 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1194 .features[FEAT_8000_0001_ECX] =
1195 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1196 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1197 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1198 CPUID_EXT3_LAHF_LM,
1199 .xlevel = 0x8000001A,
1200 .model_id = "AMD Opteron 62xx class CPU",
1203 .name = "Opteron_G5",
1204 .level = 0xd,
1205 .vendor = CPUID_VENDOR_AMD,
1206 .family = 21,
1207 .model = 2,
1208 .stepping = 0,
1209 .features[FEAT_1_EDX] =
1210 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1211 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1212 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1213 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1214 CPUID_DE | CPUID_FP87,
1215 .features[FEAT_1_ECX] =
1216 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1217 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1218 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1219 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1220 .features[FEAT_8000_0001_EDX] =
1221 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1222 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1223 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1224 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1225 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1226 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1227 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1228 .features[FEAT_8000_0001_ECX] =
1229 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1230 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1231 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1232 CPUID_EXT3_LAHF_LM,
1233 .xlevel = 0x8000001A,
1234 .model_id = "AMD Opteron 63xx class CPU",
1239 * x86_cpu_compat_set_features:
1240 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1241 * @w: Identifies the feature word to be changed.
1242 * @feat_add: Feature bits to be added to feature word
1243 * @feat_remove: Feature bits to be removed from feature word
1245 * Change CPU model feature bits for compatibility.
1247 * This function may be used by machine-type compatibility functions
1248 * to enable or disable feature bits on specific CPU models.
1250 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1251 uint32_t feat_add, uint32_t feat_remove)
1253 X86CPUDefinition *def;
1254 int i;
1255 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1256 def = &builtin_x86_defs[i];
1257 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1258 def->features[w] |= feat_add;
1259 def->features[w] &= ~feat_remove;
1264 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1265 bool migratable_only);
1267 #ifdef CONFIG_KVM
1269 static int cpu_x86_fill_model_id(char *str)
1271 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1272 int i;
1274 for (i = 0; i < 3; i++) {
1275 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1276 memcpy(str + i * 16 + 0, &eax, 4);
1277 memcpy(str + i * 16 + 4, &ebx, 4);
1278 memcpy(str + i * 16 + 8, &ecx, 4);
1279 memcpy(str + i * 16 + 12, &edx, 4);
1281 return 0;
1284 static X86CPUDefinition host_cpudef;
1286 static Property host_x86_cpu_properties[] = {
1287 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1288 DEFINE_PROP_END_OF_LIST()
1291 /* class_init for the "host" CPU model
1293 * This function may be called before KVM is initialized.
1295 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1297 DeviceClass *dc = DEVICE_CLASS(oc);
1298 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1299 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1301 xcc->kvm_required = true;
1303 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1304 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1306 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1307 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1308 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1309 host_cpudef.stepping = eax & 0x0F;
1311 cpu_x86_fill_model_id(host_cpudef.model_id);
1313 xcc->cpu_def = &host_cpudef;
1314 host_cpudef.cache_info_passthrough = true;
1316 /* level, xlevel, xlevel2, and the feature words are initialized on
1317 * instance_init, because they require KVM to be initialized.
1320 dc->props = host_x86_cpu_properties;
1323 static void host_x86_cpu_initfn(Object *obj)
1325 X86CPU *cpu = X86_CPU(obj);
1326 CPUX86State *env = &cpu->env;
1327 KVMState *s = kvm_state;
1329 assert(kvm_enabled());
1331 /* We can't fill the features array here because we don't know yet if
1332 * "migratable" is true or false.
1334 cpu->host_features = true;
1336 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1337 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1338 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1340 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1343 static const TypeInfo host_x86_cpu_type_info = {
1344 .name = X86_CPU_TYPE_NAME("host"),
1345 .parent = TYPE_X86_CPU,
1346 .instance_init = host_x86_cpu_initfn,
1347 .class_init = host_x86_cpu_class_init,
1350 #endif
1352 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1354 FeatureWordInfo *f = &feature_word_info[w];
1355 int i;
1357 for (i = 0; i < 32; ++i) {
1358 if (1 << i & mask) {
1359 const char *reg = get_register_name_32(f->cpuid_reg);
1360 assert(reg);
1361 fprintf(stderr, "warning: %s doesn't support requested feature: "
1362 "CPUID.%02XH:%s%s%s [bit %d]\n",
1363 kvm_enabled() ? "host" : "TCG",
1364 f->cpuid_eax, reg,
1365 f->feat_names[i] ? "." : "",
1366 f->feat_names[i] ? f->feat_names[i] : "", i);
1371 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1372 const char *name, Error **errp)
1374 X86CPU *cpu = X86_CPU(obj);
1375 CPUX86State *env = &cpu->env;
1376 int64_t value;
1378 value = (env->cpuid_version >> 8) & 0xf;
1379 if (value == 0xf) {
1380 value += (env->cpuid_version >> 20) & 0xff;
1382 visit_type_int(v, &value, name, errp);
1385 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1386 const char *name, Error **errp)
1388 X86CPU *cpu = X86_CPU(obj);
1389 CPUX86State *env = &cpu->env;
1390 const int64_t min = 0;
1391 const int64_t max = 0xff + 0xf;
1392 Error *local_err = NULL;
1393 int64_t value;
1395 visit_type_int(v, &value, name, &local_err);
1396 if (local_err) {
1397 error_propagate(errp, local_err);
1398 return;
1400 if (value < min || value > max) {
1401 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1402 name ? name : "null", value, min, max);
1403 return;
1406 env->cpuid_version &= ~0xff00f00;
1407 if (value > 0x0f) {
1408 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1409 } else {
1410 env->cpuid_version |= value << 8;
1414 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1415 const char *name, Error **errp)
1417 X86CPU *cpu = X86_CPU(obj);
1418 CPUX86State *env = &cpu->env;
1419 int64_t value;
1421 value = (env->cpuid_version >> 4) & 0xf;
1422 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1423 visit_type_int(v, &value, name, errp);
1426 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1427 const char *name, Error **errp)
1429 X86CPU *cpu = X86_CPU(obj);
1430 CPUX86State *env = &cpu->env;
1431 const int64_t min = 0;
1432 const int64_t max = 0xff;
1433 Error *local_err = NULL;
1434 int64_t value;
1436 visit_type_int(v, &value, name, &local_err);
1437 if (local_err) {
1438 error_propagate(errp, local_err);
1439 return;
1441 if (value < min || value > max) {
1442 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1443 name ? name : "null", value, min, max);
1444 return;
1447 env->cpuid_version &= ~0xf00f0;
1448 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1451 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1452 void *opaque, const char *name,
1453 Error **errp)
1455 X86CPU *cpu = X86_CPU(obj);
1456 CPUX86State *env = &cpu->env;
1457 int64_t value;
1459 value = env->cpuid_version & 0xf;
1460 visit_type_int(v, &value, name, errp);
1463 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1464 void *opaque, const char *name,
1465 Error **errp)
1467 X86CPU *cpu = X86_CPU(obj);
1468 CPUX86State *env = &cpu->env;
1469 const int64_t min = 0;
1470 const int64_t max = 0xf;
1471 Error *local_err = NULL;
1472 int64_t value;
1474 visit_type_int(v, &value, name, &local_err);
1475 if (local_err) {
1476 error_propagate(errp, local_err);
1477 return;
1479 if (value < min || value > max) {
1480 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1481 name ? name : "null", value, min, max);
1482 return;
1485 env->cpuid_version &= ~0xf;
1486 env->cpuid_version |= value & 0xf;
1489 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
1490 const char *name, Error **errp)
1492 X86CPU *cpu = X86_CPU(obj);
1494 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1497 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
1498 const char *name, Error **errp)
1500 X86CPU *cpu = X86_CPU(obj);
1502 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1505 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
1506 const char *name, Error **errp)
1508 X86CPU *cpu = X86_CPU(obj);
1510 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1513 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
1514 const char *name, Error **errp)
1516 X86CPU *cpu = X86_CPU(obj);
1518 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1521 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1523 X86CPU *cpu = X86_CPU(obj);
1524 CPUX86State *env = &cpu->env;
1525 char *value;
1527 value = (char *)g_malloc(CPUID_VENDOR_SZ + 1);
1528 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1529 env->cpuid_vendor3);
1530 return value;
1533 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1534 Error **errp)
1536 X86CPU *cpu = X86_CPU(obj);
1537 CPUX86State *env = &cpu->env;
1538 int i;
1540 if (strlen(value) != CPUID_VENDOR_SZ) {
1541 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1542 "vendor", value);
1543 return;
1546 env->cpuid_vendor1 = 0;
1547 env->cpuid_vendor2 = 0;
1548 env->cpuid_vendor3 = 0;
1549 for (i = 0; i < 4; i++) {
1550 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1551 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1552 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1556 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1558 X86CPU *cpu = X86_CPU(obj);
1559 CPUX86State *env = &cpu->env;
1560 char *value;
1561 int i;
1563 value = g_malloc(48 + 1);
1564 for (i = 0; i < 48; i++) {
1565 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1567 value[48] = '\0';
1568 return value;
1571 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1572 Error **errp)
1574 X86CPU *cpu = X86_CPU(obj);
1575 CPUX86State *env = &cpu->env;
1576 int c, len, i;
1578 if (model_id == NULL) {
1579 model_id = "";
1581 len = strlen(model_id);
1582 memset(env->cpuid_model, 0, 48);
1583 for (i = 0; i < 48; i++) {
1584 if (i >= len) {
1585 c = '\0';
1586 } else {
1587 c = (uint8_t)model_id[i];
1589 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1593 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1594 const char *name, Error **errp)
1596 X86CPU *cpu = X86_CPU(obj);
1597 int64_t value;
1599 value = cpu->env.tsc_khz * 1000;
1600 visit_type_int(v, &value, name, errp);
1603 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1604 const char *name, Error **errp)
1606 X86CPU *cpu = X86_CPU(obj);
1607 const int64_t min = 0;
1608 const int64_t max = INT64_MAX;
1609 Error *local_err = NULL;
1610 int64_t value;
1612 visit_type_int(v, &value, name, &local_err);
1613 if (local_err) {
1614 error_propagate(errp, local_err);
1615 return;
1617 if (value < min || value > max) {
1618 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1619 name ? name : "null", value, min, max);
1620 return;
1623 cpu->env.tsc_khz = value / 1000;
1626 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1627 const char *name, Error **errp)
1629 X86CPU *cpu = X86_CPU(obj);
1630 int64_t value = cpu->env.cpuid_apic_id;
1632 visit_type_int(v, &value, name, errp);
1635 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1636 const char *name, Error **errp)
1638 X86CPU *cpu = X86_CPU(obj);
1639 DeviceState *dev = DEVICE(obj);
1640 const int64_t min = 0;
1641 const int64_t max = UINT32_MAX;
1642 Error *error = NULL;
1643 int64_t value;
1645 if (dev->realized) {
1646 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1647 "it was realized", name, object_get_typename(obj));
1648 return;
1651 visit_type_int(v, &value, name, &error);
1652 if (error) {
1653 error_propagate(errp, error);
1654 return;
1656 if (value < min || value > max) {
1657 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1658 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1659 object_get_typename(obj), name, value, min, max);
1660 return;
1663 if ((value != cpu->env.cpuid_apic_id) && cpu_exists(value)) {
1664 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1665 return;
1667 cpu->env.cpuid_apic_id = value;
1670 /* Generic getter for "feature-words" and "filtered-features" properties */
1671 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1672 const char *name, Error **errp)
1674 uint32_t *array = (uint32_t *)opaque;
1675 FeatureWord w;
1676 Error *err = NULL;
1677 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1678 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1679 X86CPUFeatureWordInfoList *list = NULL;
1681 for (w = 0; w < FEATURE_WORDS; w++) {
1682 FeatureWordInfo *wi = &feature_word_info[w];
1683 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1684 qwi->cpuid_input_eax = wi->cpuid_eax;
1685 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1686 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1687 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1688 qwi->features = array[w];
1690 /* List will be in reverse order, but order shouldn't matter */
1691 list_entries[w].next = list;
1692 list_entries[w].value = &word_infos[w];
1693 list = &list_entries[w];
1696 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1697 error_propagate(errp, err);
1700 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1701 const char *name, Error **errp)
1703 X86CPU *cpu = X86_CPU(obj);
1704 int64_t value = cpu->hyperv_spinlock_attempts;
1706 visit_type_int(v, &value, name, errp);
1709 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1710 const char *name, Error **errp)
1712 const int64_t min = 0xFFF;
1713 const int64_t max = UINT_MAX;
1714 X86CPU *cpu = X86_CPU(obj);
1715 Error *err = NULL;
1716 int64_t value;
1718 visit_type_int(v, &value, name, &err);
1719 if (err) {
1720 error_propagate(errp, err);
1721 return;
1724 if (value < min || value > max) {
1725 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1726 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1727 object_get_typename(obj), name ? name : "null",
1728 value, min, max);
1729 return;
1731 cpu->hyperv_spinlock_attempts = value;
1734 static PropertyInfo qdev_prop_spinlocks = {
1735 .name = "int",
1736 .get = x86_get_hv_spinlocks,
1737 .set = x86_set_hv_spinlocks,
1740 /* Convert all '_' in a feature string option name to '-', to make feature
1741 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1743 static inline void feat2prop(char *s)
1745 while ((s = strchr(s, '_'))) {
1746 *s = '-';
1750 /* Parse "+feature,-feature,feature=foo" CPU feature string
1752 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1753 Error **errp)
1755 X86CPU *cpu = X86_CPU(cs);
1756 char *featurestr; /* Single 'key=value" string being parsed */
1757 FeatureWord w;
1758 /* Features to be added */
1759 FeatureWordArray plus_features = { 0 };
1760 /* Features to be removed */
1761 FeatureWordArray minus_features = { 0 };
1762 uint32_t numvalue;
1763 CPUX86State *env = &cpu->env;
1764 Error *local_err = NULL;
1766 featurestr = features ? strtok(features, ",") : NULL;
1768 while (featurestr) {
1769 char *val;
1770 if (featurestr[0] == '+') {
1771 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1772 } else if (featurestr[0] == '-') {
1773 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1774 } else if ((val = strchr(featurestr, '='))) {
1775 *val = 0; val++;
1776 feat2prop(featurestr);
1777 if (!strcmp(featurestr, "xlevel")) {
1778 char *err;
1779 char num[32];
1781 numvalue = strtoul(val, &err, 0);
1782 if (!*val || *err) {
1783 error_setg(errp, "bad numerical value %s", val);
1784 return;
1786 if (numvalue < 0x80000000) {
1787 error_report("xlevel value shall always be >= 0x80000000"
1788 ", fixup will be removed in future versions");
1789 numvalue += 0x80000000;
1791 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1792 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1793 } else if (!strcmp(featurestr, "tsc-freq")) {
1794 int64_t tsc_freq;
1795 char *err;
1796 char num[32];
1798 tsc_freq = strtosz_suffix_unit(val, &err,
1799 STRTOSZ_DEFSUFFIX_B, 1000);
1800 if (tsc_freq < 0 || *err) {
1801 error_setg(errp, "bad numerical value %s", val);
1802 return;
1804 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1805 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1806 &local_err);
1807 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1808 char *err;
1809 const int min = 0xFFF;
1810 char num[32];
1811 numvalue = strtoul(val, &err, 0);
1812 if (!*val || *err) {
1813 error_setg(errp, "bad numerical value %s", val);
1814 return;
1816 if (numvalue < min) {
1817 error_report("hv-spinlocks value shall always be >= 0x%x"
1818 ", fixup will be removed in future versions",
1819 min);
1820 numvalue = min;
1822 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1823 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1824 } else {
1825 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1827 } else {
1828 feat2prop(featurestr);
1829 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1831 if (local_err) {
1832 error_propagate(errp, local_err);
1833 return;
1835 featurestr = strtok(NULL, ",");
1838 if (cpu->host_features) {
1839 for (w = 0; w < FEATURE_WORDS; w++) {
1840 env->features[w] =
1841 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1845 for (w = 0; w < FEATURE_WORDS; w++) {
1846 env->features[w] |= plus_features[w];
1847 env->features[w] &= ~minus_features[w];
1851 /* generate a composite string into buf of all cpuid names in featureset
1852 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1853 * if flags, suppress names undefined in featureset.
1855 static void listflags(char *buf, int bufsize, uint32_t fbits,
1856 const char **featureset, uint32_t flags)
1858 const char **p = &featureset[31];
1859 char *q, *b, bit;
1860 int nc;
1862 b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
1863 *buf = '\0';
1864 for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
1865 if (fbits & 1 << bit && (*p || !flags)) {
1866 if (*p)
1867 nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
1868 else
1869 nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
1870 if (bufsize <= nc) {
1871 if (b) {
1872 memcpy(b, "...", sizeof("..."));
1874 return;
1876 q += nc;
1877 bufsize -= nc;
1881 /* generate CPU information. */
1882 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1884 X86CPUDefinition *def;
1885 char buf[256];
1886 int i;
1888 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1889 def = &builtin_x86_defs[i];
1890 snprintf(buf, sizeof(buf), "%s", def->name);
1891 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1893 #ifdef CONFIG_KVM
1894 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1895 "KVM processor with all supported host features "
1896 "(only available in KVM mode)");
1897 #endif
1899 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1900 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1901 FeatureWordInfo *fw = &feature_word_info[i];
1903 listflags(buf, sizeof(buf), (uint32_t)~0, fw->feat_names, 1);
1904 (*cpu_fprintf)(f, " %s\n", buf);
1908 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1910 CpuDefinitionInfoList *cpu_list = NULL;
1911 X86CPUDefinition *def;
1912 int i;
1914 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1915 CpuDefinitionInfoList *entry;
1916 CpuDefinitionInfo *info;
1918 def = &builtin_x86_defs[i];
1919 info = g_malloc0(sizeof(*info));
1920 info->name = g_strdup(def->name);
1922 entry = g_malloc0(sizeof(*entry));
1923 entry->value = info;
1924 entry->next = cpu_list;
1925 cpu_list = entry;
1928 return cpu_list;
1931 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1932 bool migratable_only)
1934 FeatureWordInfo *wi = &feature_word_info[w];
1935 uint32_t r;
1937 if (kvm_enabled()) {
1938 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
1939 wi->cpuid_ecx,
1940 wi->cpuid_reg);
1941 } else if (tcg_enabled()) {
1942 r = wi->tcg_features;
1943 } else {
1944 return ~0;
1946 if (migratable_only) {
1947 r &= x86_cpu_get_migratable_flags(w);
1949 return r;
1953 * Filters CPU feature words based on host availability of each feature.
1955 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
1957 static int x86_cpu_filter_features(X86CPU *cpu)
1959 CPUX86State *env = &cpu->env;
1960 FeatureWord w;
1961 int rv = 0;
1963 for (w = 0; w < FEATURE_WORDS; w++) {
1964 uint32_t host_feat =
1965 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1966 uint32_t requested_features = env->features[w];
1967 env->features[w] &= host_feat;
1968 cpu->filtered_features[w] = requested_features & ~env->features[w];
1969 if (cpu->filtered_features[w]) {
1970 if (cpu->check_cpuid || cpu->enforce_cpuid) {
1971 report_unavailable_features(w, cpu->filtered_features[w]);
1973 rv = 1;
1977 return rv;
1980 /* Load data from X86CPUDefinition
1982 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
1984 CPUX86State *env = &cpu->env;
1985 const char *vendor;
1986 char host_vendor[CPUID_VENDOR_SZ + 1];
1987 FeatureWord w;
1989 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
1990 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
1991 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
1992 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
1993 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
1994 env->cpuid_xlevel2 = def->xlevel2;
1995 cpu->cache_info_passthrough = def->cache_info_passthrough;
1996 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
1997 for (w = 0; w < FEATURE_WORDS; w++) {
1998 env->features[w] = def->features[w];
2001 /* Special cases not set in the X86CPUDefinition structs: */
2002 if (kvm_enabled()) {
2003 FeatureWord w;
2004 for (w = 0; w < FEATURE_WORDS; w++) {
2005 env->features[w] |= kvm_default_features[w];
2006 env->features[w] &= ~kvm_default_unset_features[w];
2010 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2012 /* sysenter isn't supported in compatibility mode on AMD,
2013 * syscall isn't supported in compatibility mode on Intel.
2014 * Normally we advertise the actual CPU vendor, but you can
2015 * override this using the 'vendor' property if you want to use
2016 * KVM's sysenter/syscall emulation in compatibility mode and
2017 * when doing cross vendor migration
2019 vendor = def->vendor;
2020 if (kvm_enabled()) {
2021 uint32_t ebx = 0, ecx = 0, edx = 0;
2022 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2023 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2024 vendor = host_vendor;
2027 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2031 X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
2032 Error **errp)
2034 X86CPU *cpu = NULL;
2035 X86CPUClass *xcc;
2036 ObjectClass *oc;
2037 gchar **model_pieces;
2038 char *name, *features;
2039 Error *error = NULL;
2041 model_pieces = g_strsplit(cpu_model, ",", 2);
2042 if (!model_pieces[0]) {
2043 error_setg(&error, "Invalid/empty CPU model name");
2044 goto out;
2046 name = model_pieces[0];
2047 features = model_pieces[1];
2049 oc = x86_cpu_class_by_name(name);
2050 if (oc == NULL) {
2051 error_setg(&error, "Unable to find CPU definition: %s", name);
2052 goto out;
2054 xcc = X86_CPU_CLASS(oc);
2056 if (xcc->kvm_required && !kvm_enabled()) {
2057 error_setg(&error, "CPU model '%s' requires KVM", name);
2058 goto out;
2061 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2063 #ifndef CONFIG_USER_ONLY
2064 if (icc_bridge == NULL) {
2065 error_setg(&error, "Invalid icc-bridge value");
2066 goto out;
2068 qdev_set_parent_bus(DEVICE(cpu), qdev_get_child_bus(icc_bridge, "icc"));
2069 object_unref(OBJECT(cpu));
2070 #endif
2072 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2073 if (error) {
2074 goto out;
2077 out:
2078 if (error != NULL) {
2079 error_propagate(errp, error);
2080 if (cpu) {
2081 object_unref(OBJECT(cpu));
2082 cpu = NULL;
2085 g_strfreev(model_pieces);
2086 return cpu;
2089 X86CPU *cpu_x86_init(const char *cpu_model)
2091 Error *error = NULL;
2092 X86CPU *cpu;
2094 cpu = cpu_x86_create(cpu_model, NULL, &error);
2095 if (error) {
2096 goto out;
2099 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2101 out:
2102 if (error) {
2103 error_report("%s", error_get_pretty(error));
2104 error_free(error);
2105 if (cpu != NULL) {
2106 object_unref(OBJECT(cpu));
2107 cpu = NULL;
2110 return cpu;
2113 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2115 X86CPUDefinition *cpudef = data;
2116 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2118 xcc->cpu_def = cpudef;
2121 static void x86_register_cpudef_type(X86CPUDefinition *def)
2123 char *typename = x86_cpu_type_name(def->name);
2124 TypeInfo ti = {
2125 .name = typename,
2126 .parent = TYPE_X86_CPU,
2127 .class_init = x86_cpu_cpudef_class_init,
2128 .class_data = def,
2131 type_register(&ti);
2132 g_free(typename);
2135 #if !defined(CONFIG_USER_ONLY)
2137 void cpu_clear_apic_feature(CPUX86State *env)
2139 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2142 #endif /* !CONFIG_USER_ONLY */
2144 /* Initialize list of CPU models, filling some non-static fields if necessary
2146 void x86_cpudef_setup(void)
2148 int i, j;
2149 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2151 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2152 X86CPUDefinition *def = &builtin_x86_defs[i];
2154 /* Look for specific "cpudef" models that */
2155 /* have the QEMU version in .model_id */
2156 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2157 if (strcmp(model_with_versions[j], def->name) == 0) {
2158 pstrcpy(def->model_id, sizeof(def->model_id),
2159 "QEMU Virtual CPU version ");
2160 pstrcat(def->model_id, sizeof(def->model_id),
2161 qemu_get_version());
2162 break;
2168 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
2169 uint32_t *ecx, uint32_t *edx)
2171 *ebx = env->cpuid_vendor1;
2172 *edx = env->cpuid_vendor2;
2173 *ecx = env->cpuid_vendor3;
2176 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2177 uint32_t *eax, uint32_t *ebx,
2178 uint32_t *ecx, uint32_t *edx)
2180 X86CPU *cpu = x86_env_get_cpu(env);
2181 CPUState *cs = CPU(cpu);
2183 /* test if maximum index reached */
2184 if (index & 0x80000000) {
2185 if (index > env->cpuid_xlevel) {
2186 if (env->cpuid_xlevel2 > 0) {
2187 /* Handle the Centaur's CPUID instruction. */
2188 if (index > env->cpuid_xlevel2) {
2189 index = env->cpuid_xlevel2;
2190 } else if (index < 0xC0000000) {
2191 index = env->cpuid_xlevel;
2193 } else {
2194 /* Intel documentation states that invalid EAX input will
2195 * return the same information as EAX=cpuid_level
2196 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2198 index = env->cpuid_level;
2201 } else {
2202 if (index > env->cpuid_level)
2203 index = env->cpuid_level;
2206 switch(index) {
2207 case 0:
2208 *eax = env->cpuid_level;
2209 get_cpuid_vendor(env, ebx, ecx, edx);
2210 break;
2211 case 1:
2212 *eax = env->cpuid_version;
2213 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2214 *ecx = env->features[FEAT_1_ECX];
2215 *edx = env->features[FEAT_1_EDX];
2216 if (cs->nr_cores * cs->nr_threads > 1) {
2217 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2218 *edx |= 1 << 28; /* HTT bit */
2220 break;
2221 case 2:
2222 /* cache info: needed for Pentium Pro compatibility */
2223 if (cpu->cache_info_passthrough) {
2224 host_cpuid(index, 0, eax, ebx, ecx, edx);
2225 break;
2227 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2228 *ebx = 0;
2229 *ecx = 0;
2230 *edx = (L1D_DESCRIPTOR << 16) | \
2231 (L1I_DESCRIPTOR << 8) | \
2232 (L2_DESCRIPTOR);
2233 break;
2234 case 4:
2235 /* cache info: needed for Core compatibility */
2236 if (cpu->cache_info_passthrough) {
2237 host_cpuid(index, count, eax, ebx, ecx, edx);
2238 *eax &= ~0xFC000000;
2239 } else {
2240 *eax = 0;
2241 switch (count) {
2242 case 0: /* L1 dcache info */
2243 *eax |= CPUID_4_TYPE_DCACHE | \
2244 CPUID_4_LEVEL(1) | \
2245 CPUID_4_SELF_INIT_LEVEL;
2246 *ebx = (L1D_LINE_SIZE - 1) | \
2247 ((L1D_PARTITIONS - 1) << 12) | \
2248 ((L1D_ASSOCIATIVITY - 1) << 22);
2249 *ecx = L1D_SETS - 1;
2250 *edx = CPUID_4_NO_INVD_SHARING;
2251 break;
2252 case 1: /* L1 icache info */
2253 *eax |= CPUID_4_TYPE_ICACHE | \
2254 CPUID_4_LEVEL(1) | \
2255 CPUID_4_SELF_INIT_LEVEL;
2256 *ebx = (L1I_LINE_SIZE - 1) | \
2257 ((L1I_PARTITIONS - 1) << 12) | \
2258 ((L1I_ASSOCIATIVITY - 1) << 22);
2259 *ecx = L1I_SETS - 1;
2260 *edx = CPUID_4_NO_INVD_SHARING;
2261 break;
2262 case 2: /* L2 cache info */
2263 *eax |= CPUID_4_TYPE_UNIFIED | \
2264 CPUID_4_LEVEL(2) | \
2265 CPUID_4_SELF_INIT_LEVEL;
2266 if (cs->nr_threads > 1) {
2267 *eax |= (cs->nr_threads - 1) << 14;
2269 *ebx = (L2_LINE_SIZE - 1) | \
2270 ((L2_PARTITIONS - 1) << 12) | \
2271 ((L2_ASSOCIATIVITY - 1) << 22);
2272 *ecx = L2_SETS - 1;
2273 *edx = CPUID_4_NO_INVD_SHARING;
2274 break;
2275 default: /* end of info */
2276 *eax = 0;
2277 *ebx = 0;
2278 *ecx = 0;
2279 *edx = 0;
2280 break;
2284 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2285 if ((*eax & 31) && cs->nr_cores > 1) {
2286 *eax |= (cs->nr_cores - 1) << 26;
2288 break;
2289 case 5:
2290 /* mwait info: needed for Core compatibility */
2291 *eax = 0; /* Smallest monitor-line size in bytes */
2292 *ebx = 0; /* Largest monitor-line size in bytes */
2293 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2294 *edx = 0;
2295 break;
2296 case 6:
2297 /* Thermal and Power Leaf */
2298 *eax = 0;
2299 *ebx = 0;
2300 *ecx = 0;
2301 *edx = 0;
2302 break;
2303 case 7:
2304 /* Structured Extended Feature Flags Enumeration Leaf */
2305 if (count == 0) {
2306 *eax = 0; /* Maximum ECX value for sub-leaves */
2307 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2308 *ecx = 0; /* Reserved */
2309 *edx = 0; /* Reserved */
2310 } else {
2311 *eax = 0;
2312 *ebx = 0;
2313 *ecx = 0;
2314 *edx = 0;
2316 break;
2317 case 9:
2318 /* Direct Cache Access Information Leaf */
2319 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2320 *ebx = 0;
2321 *ecx = 0;
2322 *edx = 0;
2323 break;
2324 case 0xA:
2325 /* Architectural Performance Monitoring Leaf */
2326 if (kvm_enabled() && cpu->enable_pmu) {
2327 KVMState *s = cs->kvm_state;
2329 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2330 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2331 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2332 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2333 } else {
2334 *eax = 0;
2335 *ebx = 0;
2336 *ecx = 0;
2337 *edx = 0;
2339 break;
2340 case 0xD: {
2341 KVMState *s = cs->kvm_state;
2342 uint64_t kvm_mask;
2343 int i;
2345 /* Processor Extended State */
2346 *eax = 0;
2347 *ebx = 0;
2348 *ecx = 0;
2349 *edx = 0;
2350 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2351 break;
2353 kvm_mask =
2354 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2355 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2357 if (count == 0) {
2358 *ecx = 0x240;
2359 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2360 const ExtSaveArea *esa = &ext_save_areas[i];
2361 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2362 (kvm_mask & (1 << i)) != 0) {
2363 if (i < 32) {
2364 *eax |= 1 << i;
2365 } else {
2366 *edx |= 1 << (i - 32);
2368 *ecx = MAX(*ecx, esa->offset + esa->size);
2371 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2372 *ebx = *ecx;
2373 } else if (count == 1) {
2374 *eax = kvm_arch_get_supported_cpuid(s, 0xd, 1, R_EAX);
2375 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2376 const ExtSaveArea *esa = &ext_save_areas[count];
2377 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2378 (kvm_mask & (1 << count)) != 0) {
2379 *eax = esa->size;
2380 *ebx = esa->offset;
2383 break;
2385 case 0x80000000:
2386 *eax = env->cpuid_xlevel;
2387 *ebx = env->cpuid_vendor1;
2388 *edx = env->cpuid_vendor2;
2389 *ecx = env->cpuid_vendor3;
2390 break;
2391 case 0x80000001:
2392 *eax = env->cpuid_version;
2393 *ebx = 0;
2394 *ecx = env->features[FEAT_8000_0001_ECX];
2395 *edx = env->features[FEAT_8000_0001_EDX];
2397 /* The Linux kernel checks for the CMPLegacy bit and
2398 * discards multiple thread information if it is set.
2399 * So dont set it here for Intel to make Linux guests happy.
2401 if (cs->nr_cores * cs->nr_threads > 1) {
2402 uint32_t tebx, tecx, tedx;
2403 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
2404 if (tebx != CPUID_VENDOR_INTEL_1 ||
2405 tedx != CPUID_VENDOR_INTEL_2 ||
2406 tecx != CPUID_VENDOR_INTEL_3) {
2407 *ecx |= 1 << 1; /* CmpLegacy bit */
2410 break;
2411 case 0x80000002:
2412 case 0x80000003:
2413 case 0x80000004:
2414 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2415 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2416 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2417 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2418 break;
2419 case 0x80000005:
2420 /* cache info (L1 cache) */
2421 if (cpu->cache_info_passthrough) {
2422 host_cpuid(index, 0, eax, ebx, ecx, edx);
2423 break;
2425 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2426 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2427 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2428 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2429 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2430 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2431 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2432 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2433 break;
2434 case 0x80000006:
2435 /* cache info (L2 cache) */
2436 if (cpu->cache_info_passthrough) {
2437 host_cpuid(index, 0, eax, ebx, ecx, edx);
2438 break;
2440 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2441 (L2_DTLB_2M_ENTRIES << 16) | \
2442 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2443 (L2_ITLB_2M_ENTRIES);
2444 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2445 (L2_DTLB_4K_ENTRIES << 16) | \
2446 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2447 (L2_ITLB_4K_ENTRIES);
2448 *ecx = (L2_SIZE_KB_AMD << 16) | \
2449 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2450 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2451 *edx = ((L3_SIZE_KB/512) << 18) | \
2452 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2453 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2454 break;
2455 case 0x80000007:
2456 *eax = 0;
2457 *ebx = 0;
2458 *ecx = 0;
2459 *edx = env->features[FEAT_8000_0007_EDX];
2460 break;
2461 case 0x80000008:
2462 /* virtual & phys address size in low 2 bytes. */
2463 /* XXX: This value must match the one used in the MMU code. */
2464 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2465 /* 64 bit processor */
2466 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2467 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2468 } else {
2469 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2470 *eax = 0x00000024; /* 36 bits physical */
2471 } else {
2472 *eax = 0x00000020; /* 32 bits physical */
2475 *ebx = 0;
2476 *ecx = 0;
2477 *edx = 0;
2478 if (cs->nr_cores * cs->nr_threads > 1) {
2479 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2481 break;
2482 case 0x8000000A:
2483 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2484 *eax = 0x00000001; /* SVM Revision */
2485 *ebx = 0x00000010; /* nr of ASIDs */
2486 *ecx = 0;
2487 *edx = env->features[FEAT_SVM]; /* optional features */
2488 } else {
2489 *eax = 0;
2490 *ebx = 0;
2491 *ecx = 0;
2492 *edx = 0;
2494 break;
2495 case 0xC0000000:
2496 *eax = env->cpuid_xlevel2;
2497 *ebx = 0;
2498 *ecx = 0;
2499 *edx = 0;
2500 break;
2501 case 0xC0000001:
2502 /* Support for VIA CPU's CPUID instruction */
2503 *eax = env->cpuid_version;
2504 *ebx = 0;
2505 *ecx = 0;
2506 *edx = env->features[FEAT_C000_0001_EDX];
2507 break;
2508 case 0xC0000002:
2509 case 0xC0000003:
2510 case 0xC0000004:
2511 /* Reserved for the future, and now filled with zero */
2512 *eax = 0;
2513 *ebx = 0;
2514 *ecx = 0;
2515 *edx = 0;
2516 break;
2517 default:
2518 /* reserved values: zero */
2519 *eax = 0;
2520 *ebx = 0;
2521 *ecx = 0;
2522 *edx = 0;
2523 break;
2527 /* CPUClass::reset() */
2528 static void x86_cpu_reset(CPUState *s)
2530 X86CPU *cpu = X86_CPU(s);
2531 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2532 CPUX86State *env = &cpu->env;
2533 int i;
2535 xcc->parent_reset(s);
2537 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2539 tlb_flush(s, 1);
2541 env->old_exception = -1;
2543 /* init to reset state */
2545 #ifdef CONFIG_SOFTMMU
2546 env->hflags |= HF_SOFTMMU_MASK;
2547 #endif
2548 env->hflags2 |= HF2_GIF_MASK;
2550 cpu_x86_update_cr0(env, 0x60000010);
2551 env->a20_mask = ~0x0;
2552 env->smbase = 0x30000;
2554 env->idt.limit = 0xffff;
2555 env->gdt.limit = 0xffff;
2556 env->ldt.limit = 0xffff;
2557 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2558 env->tr.limit = 0xffff;
2559 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2561 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2562 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2563 DESC_R_MASK | DESC_A_MASK);
2564 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2565 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2566 DESC_A_MASK);
2567 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2568 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2569 DESC_A_MASK);
2570 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2571 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2572 DESC_A_MASK);
2573 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2574 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2575 DESC_A_MASK);
2576 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2577 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2578 DESC_A_MASK);
2580 env->eip = 0xfff0;
2581 env->regs[R_EDX] = env->cpuid_version;
2583 env->eflags = 0x2;
2585 /* FPU init */
2586 for (i = 0; i < 8; i++) {
2587 env->fptags[i] = 1;
2589 cpu_set_fpuc(env, 0x37f);
2591 env->mxcsr = 0x1f80;
2592 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2594 env->pat = 0x0007040600070406ULL;
2595 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2597 memset(env->dr, 0, sizeof(env->dr));
2598 env->dr[6] = DR6_FIXED_1;
2599 env->dr[7] = DR7_FIXED_1;
2600 cpu_breakpoint_remove_all(s, BP_CPU);
2601 cpu_watchpoint_remove_all(s, BP_CPU);
2603 env->xcr0 = 1;
2606 * SDM 11.11.5 requires:
2607 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2608 * - IA32_MTRR_PHYSMASKn.V = 0
2609 * All other bits are undefined. For simplification, zero it all.
2611 env->mtrr_deftype = 0;
2612 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2613 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2615 #if !defined(CONFIG_USER_ONLY)
2616 /* We hard-wire the BSP to the first CPU. */
2617 if (s->cpu_index == 0) {
2618 apic_designate_bsp(cpu->apic_state);
2621 s->halted = !cpu_is_bsp(cpu);
2623 if (kvm_enabled()) {
2624 kvm_arch_reset_vcpu(cpu);
2626 #endif
2629 #ifndef CONFIG_USER_ONLY
2630 bool cpu_is_bsp(X86CPU *cpu)
2632 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2635 /* TODO: remove me, when reset over QOM tree is implemented */
2636 static void x86_cpu_machine_reset_cb(void *opaque)
2638 X86CPU *cpu = opaque;
2639 cpu_reset(CPU(cpu));
2641 #endif
2643 static void mce_init(X86CPU *cpu)
2645 CPUX86State *cenv = &cpu->env;
2646 unsigned int bank;
2648 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2649 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2650 (CPUID_MCE | CPUID_MCA)) {
2651 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2652 cenv->mcg_ctl = ~(uint64_t)0;
2653 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2654 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2659 #ifndef CONFIG_USER_ONLY
2660 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2662 CPUX86State *env = &cpu->env;
2663 DeviceState *dev = DEVICE(cpu);
2664 APICCommonState *apic;
2665 const char *apic_type = "apic";
2667 if (kvm_irqchip_in_kernel()) {
2668 apic_type = "kvm-apic";
2669 } else if (xen_enabled()) {
2670 apic_type = "xen-apic";
2673 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2674 if (cpu->apic_state == NULL) {
2675 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2676 return;
2679 object_property_add_child(OBJECT(cpu), "apic",
2680 OBJECT(cpu->apic_state), NULL);
2681 qdev_prop_set_uint8(cpu->apic_state, "id", env->cpuid_apic_id);
2682 /* TODO: convert to link<> */
2683 apic = APIC_COMMON(cpu->apic_state);
2684 apic->cpu = cpu;
2687 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2689 if (cpu->apic_state == NULL) {
2690 return;
2693 if (qdev_init(cpu->apic_state)) {
2694 error_setg(errp, "APIC device '%s' could not be initialized",
2695 object_get_typename(OBJECT(cpu->apic_state)));
2696 return;
2699 #else
2700 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2703 #endif
2706 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2707 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2708 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2709 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2710 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2711 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2712 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2714 CPUState *cs = CPU(dev);
2715 X86CPU *cpu = X86_CPU(dev);
2716 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2717 CPUX86State *env = &cpu->env;
2718 Error *local_err = NULL;
2719 static bool ht_warned;
2721 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2722 env->cpuid_level = 7;
2725 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2726 * CPUID[1].EDX.
2728 if (IS_AMD_CPU(env)) {
2729 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2730 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2731 & CPUID_EXT2_AMD_ALIASES);
2735 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2736 error_setg(&local_err,
2737 kvm_enabled() ?
2738 "Host doesn't support requested features" :
2739 "TCG doesn't support requested features");
2740 goto out;
2743 #ifndef CONFIG_USER_ONLY
2744 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2746 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2747 x86_cpu_apic_create(cpu, &local_err);
2748 if (local_err != NULL) {
2749 goto out;
2752 #endif
2754 mce_init(cpu);
2755 qemu_init_vcpu(cs);
2757 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2758 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2759 * based on inputs (sockets,cores,threads), it is still better to gives
2760 * users a warning.
2762 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2763 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2765 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2766 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2767 " -smp options properly.");
2768 ht_warned = true;
2771 x86_cpu_apic_realize(cpu, &local_err);
2772 if (local_err != NULL) {
2773 goto out;
2775 cpu_reset(cs);
2777 xcc->parent_realize(dev, &local_err);
2778 out:
2779 if (local_err != NULL) {
2780 error_propagate(errp, local_err);
2781 return;
2785 /* Enables contiguous-apic-ID mode, for compatibility */
2786 static bool compat_apic_id_mode;
2788 void enable_compat_apic_id_mode(void)
2790 compat_apic_id_mode = true;
2793 /* Calculates initial APIC ID for a specific CPU index
2795 * Currently we need to be able to calculate the APIC ID from the CPU index
2796 * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have
2797 * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of
2798 * all CPUs up to max_cpus.
2800 uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index)
2802 uint32_t correct_id;
2803 static bool warned;
2805 correct_id = x86_apicid_from_cpu_idx(smp_cores, smp_threads, cpu_index);
2806 if (compat_apic_id_mode) {
2807 if (cpu_index != correct_id && !warned) {
2808 error_report("APIC IDs set in compatibility mode, "
2809 "CPU topology won't match the configuration");
2810 warned = true;
2812 return cpu_index;
2813 } else {
2814 return correct_id;
2818 static void x86_cpu_initfn(Object *obj)
2820 CPUState *cs = CPU(obj);
2821 X86CPU *cpu = X86_CPU(obj);
2822 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
2823 CPUX86State *env = &cpu->env;
2824 static int inited;
2826 cs->env_ptr = env;
2827 cpu_exec_init(env);
2829 object_property_add(obj, "family", "int",
2830 x86_cpuid_version_get_family,
2831 x86_cpuid_version_set_family, NULL, NULL, NULL);
2832 object_property_add(obj, "model", "int",
2833 x86_cpuid_version_get_model,
2834 x86_cpuid_version_set_model, NULL, NULL, NULL);
2835 object_property_add(obj, "stepping", "int",
2836 x86_cpuid_version_get_stepping,
2837 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
2838 object_property_add(obj, "level", "int",
2839 x86_cpuid_get_level,
2840 x86_cpuid_set_level, NULL, NULL, NULL);
2841 object_property_add(obj, "xlevel", "int",
2842 x86_cpuid_get_xlevel,
2843 x86_cpuid_set_xlevel, NULL, NULL, NULL);
2844 object_property_add_str(obj, "vendor",
2845 x86_cpuid_get_vendor,
2846 x86_cpuid_set_vendor, NULL);
2847 object_property_add_str(obj, "model-id",
2848 x86_cpuid_get_model_id,
2849 x86_cpuid_set_model_id, NULL);
2850 object_property_add(obj, "tsc-frequency", "int",
2851 x86_cpuid_get_tsc_freq,
2852 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
2853 object_property_add(obj, "apic-id", "int",
2854 x86_cpuid_get_apic_id,
2855 x86_cpuid_set_apic_id, NULL, NULL, NULL);
2856 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
2857 x86_cpu_get_feature_words,
2858 NULL, NULL, (void *)env->features, NULL);
2859 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
2860 x86_cpu_get_feature_words,
2861 NULL, NULL, (void *)cpu->filtered_features, NULL);
2863 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
2864 env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index);
2866 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
2868 /* init various static tables used in TCG mode */
2869 if (tcg_enabled() && !inited) {
2870 inited = 1;
2871 optimize_flags_init();
2875 static int64_t x86_cpu_get_arch_id(CPUState *cs)
2877 X86CPU *cpu = X86_CPU(cs);
2878 CPUX86State *env = &cpu->env;
2880 return env->cpuid_apic_id;
2883 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
2885 X86CPU *cpu = X86_CPU(cs);
2887 return cpu->env.cr[0] & CR0_PG_MASK;
2890 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
2892 X86CPU *cpu = X86_CPU(cs);
2894 cpu->env.eip = value;
2897 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
2899 X86CPU *cpu = X86_CPU(cs);
2901 cpu->env.eip = tb->pc - tb->cs_base;
2904 static bool x86_cpu_has_work(CPUState *cs)
2906 X86CPU *cpu = X86_CPU(cs);
2907 CPUX86State *env = &cpu->env;
2909 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
2910 CPU_INTERRUPT_POLL)) &&
2911 (env->eflags & IF_MASK)) ||
2912 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
2913 CPU_INTERRUPT_INIT |
2914 CPU_INTERRUPT_SIPI |
2915 CPU_INTERRUPT_MCE));
2918 static Property x86_cpu_properties[] = {
2919 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
2920 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
2921 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
2922 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
2923 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
2924 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
2925 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
2926 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
2927 DEFINE_PROP_END_OF_LIST()
2930 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
2932 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2933 CPUClass *cc = CPU_CLASS(oc);
2934 DeviceClass *dc = DEVICE_CLASS(oc);
2936 xcc->parent_realize = dc->realize;
2937 dc->realize = x86_cpu_realizefn;
2938 dc->bus_type = TYPE_ICC_BUS;
2939 dc->props = x86_cpu_properties;
2941 xcc->parent_reset = cc->reset;
2942 cc->reset = x86_cpu_reset;
2943 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
2945 cc->class_by_name = x86_cpu_class_by_name;
2946 cc->parse_features = x86_cpu_parse_featurestr;
2947 cc->has_work = x86_cpu_has_work;
2948 cc->do_interrupt = x86_cpu_do_interrupt;
2949 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
2950 cc->dump_state = x86_cpu_dump_state;
2951 cc->set_pc = x86_cpu_set_pc;
2952 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
2953 cc->gdb_read_register = x86_cpu_gdb_read_register;
2954 cc->gdb_write_register = x86_cpu_gdb_write_register;
2955 cc->get_arch_id = x86_cpu_get_arch_id;
2956 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
2957 #ifdef CONFIG_USER_ONLY
2958 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
2959 #else
2960 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
2961 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
2962 cc->write_elf64_note = x86_cpu_write_elf64_note;
2963 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
2964 cc->write_elf32_note = x86_cpu_write_elf32_note;
2965 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
2966 cc->vmsd = &vmstate_x86_cpu;
2967 #endif
2968 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
2969 #ifndef CONFIG_USER_ONLY
2970 cc->debug_excp_handler = breakpoint_handler;
2971 #endif
2972 cc->cpu_exec_enter = x86_cpu_exec_enter;
2973 cc->cpu_exec_exit = x86_cpu_exec_exit;
2976 static const TypeInfo x86_cpu_type_info = {
2977 .name = TYPE_X86_CPU,
2978 .parent = TYPE_CPU,
2979 .instance_size = sizeof(X86CPU),
2980 .instance_init = x86_cpu_initfn,
2981 .abstract = true,
2982 .class_size = sizeof(X86CPUClass),
2983 .class_init = x86_cpu_common_class_init,
2986 static void x86_cpu_register_types(void)
2988 int i;
2990 type_register_static(&x86_cpu_type_info);
2991 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2992 x86_register_cpudef_type(&builtin_x86_defs[i]);
2994 #ifdef CONFIG_KVM
2995 type_register_static(&host_x86_cpu_type_info);
2996 #endif
2999 type_init(x86_cpu_register_types)