target-i386: Remove unused model_features_t struct
[qemu/cris-port.git] / target-i386 / cpu.c
blobe4ccee133af153707b573fcd8719a40a692ffe8d
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28 #include "topology.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
278 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
279 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
280 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
281 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
282 CPUID_PSE36 | CPUID_FXSR)
283 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
284 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
285 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
286 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
287 CPUID_PAE | CPUID_SEP | CPUID_APIC)
289 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
290 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
291 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
292 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
293 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
294 /* partly implemented:
295 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
296 /* missing:
297 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
298 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
299 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
300 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
301 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
302 /* missing:
303 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
304 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
305 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
306 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
307 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
308 CPUID_EXT_RDRAND */
310 #ifdef TARGET_X86_64
311 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
312 #else
313 #define TCG_EXT2_X86_64_FEATURES 0
314 #endif
316 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
317 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
318 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
319 TCG_EXT2_X86_64_FEATURES)
320 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
321 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
322 #define TCG_EXT4_FEATURES 0
323 #define TCG_SVM_FEATURES 0
324 #define TCG_KVM_FEATURES 0
325 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
326 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
327 /* missing:
328 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
329 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
330 CPUID_7_0_EBX_RDSEED */
331 #define TCG_APM_FEATURES 0
334 typedef struct FeatureWordInfo {
335 const char **feat_names;
336 uint32_t cpuid_eax; /* Input EAX for CPUID */
337 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
338 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
339 int cpuid_reg; /* output register (R_* constant) */
340 uint32_t tcg_features; /* Feature flags supported by TCG */
341 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
342 } FeatureWordInfo;
344 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
345 [FEAT_1_EDX] = {
346 .feat_names = feature_name,
347 .cpuid_eax = 1, .cpuid_reg = R_EDX,
348 .tcg_features = TCG_FEATURES,
350 [FEAT_1_ECX] = {
351 .feat_names = ext_feature_name,
352 .cpuid_eax = 1, .cpuid_reg = R_ECX,
353 .tcg_features = TCG_EXT_FEATURES,
355 [FEAT_8000_0001_EDX] = {
356 .feat_names = ext2_feature_name,
357 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
358 .tcg_features = TCG_EXT2_FEATURES,
360 [FEAT_8000_0001_ECX] = {
361 .feat_names = ext3_feature_name,
362 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
363 .tcg_features = TCG_EXT3_FEATURES,
365 [FEAT_C000_0001_EDX] = {
366 .feat_names = ext4_feature_name,
367 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
368 .tcg_features = TCG_EXT4_FEATURES,
370 [FEAT_KVM] = {
371 .feat_names = kvm_feature_name,
372 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
373 .tcg_features = TCG_KVM_FEATURES,
375 [FEAT_SVM] = {
376 .feat_names = svm_feature_name,
377 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
378 .tcg_features = TCG_SVM_FEATURES,
380 [FEAT_7_0_EBX] = {
381 .feat_names = cpuid_7_0_ebx_feature_name,
382 .cpuid_eax = 7,
383 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
384 .cpuid_reg = R_EBX,
385 .tcg_features = TCG_7_0_EBX_FEATURES,
387 [FEAT_8000_0007_EDX] = {
388 .feat_names = cpuid_apm_edx_feature_name,
389 .cpuid_eax = 0x80000007,
390 .cpuid_reg = R_EDX,
391 .tcg_features = TCG_APM_FEATURES,
392 .unmigratable_flags = CPUID_APM_INVTSC,
396 typedef struct X86RegisterInfo32 {
397 /* Name of register */
398 const char *name;
399 /* QAPI enum value register */
400 X86CPURegister32 qapi_enum;
401 } X86RegisterInfo32;
403 #define REGISTER(reg) \
404 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
405 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
406 REGISTER(EAX),
407 REGISTER(ECX),
408 REGISTER(EDX),
409 REGISTER(EBX),
410 REGISTER(ESP),
411 REGISTER(EBP),
412 REGISTER(ESI),
413 REGISTER(EDI),
415 #undef REGISTER
417 typedef struct ExtSaveArea {
418 uint32_t feature, bits;
419 uint32_t offset, size;
420 } ExtSaveArea;
422 static const ExtSaveArea ext_save_areas[] = {
423 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
424 .offset = 0x240, .size = 0x100 },
425 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
426 .offset = 0x3c0, .size = 0x40 },
427 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
428 .offset = 0x400, .size = 0x40 },
429 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
430 .offset = 0x440, .size = 0x40 },
431 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
432 .offset = 0x480, .size = 0x200 },
433 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
434 .offset = 0x680, .size = 0x400 },
437 const char *get_register_name_32(unsigned int reg)
439 if (reg >= CPU_NB_REGS32) {
440 return NULL;
442 return x86_reg_info_32[reg].name;
445 /* KVM-specific features that are automatically added to all CPU models
446 * when KVM is enabled.
448 static uint32_t kvm_default_features[FEATURE_WORDS] = {
449 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
450 (1 << KVM_FEATURE_NOP_IO_DELAY) |
451 (1 << KVM_FEATURE_CLOCKSOURCE2) |
452 (1 << KVM_FEATURE_ASYNC_PF) |
453 (1 << KVM_FEATURE_STEAL_TIME) |
454 (1 << KVM_FEATURE_PV_EOI) |
455 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
456 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
459 /* Features that are not added by default to any CPU model when KVM is enabled.
461 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
462 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
465 void x86_cpu_compat_disable_kvm_features(FeatureWord w, uint32_t features)
467 kvm_default_features[w] &= ~features;
471 * Returns the set of feature flags that are supported and migratable by
472 * QEMU, for a given FeatureWord.
474 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
476 FeatureWordInfo *wi = &feature_word_info[w];
477 uint32_t r = 0;
478 int i;
480 for (i = 0; i < 32; i++) {
481 uint32_t f = 1U << i;
482 /* If the feature name is unknown, it is not supported by QEMU yet */
483 if (!wi->feat_names[i]) {
484 continue;
486 /* Skip features known to QEMU, but explicitly marked as unmigratable */
487 if (wi->unmigratable_flags & f) {
488 continue;
490 r |= f;
492 return r;
495 void host_cpuid(uint32_t function, uint32_t count,
496 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
498 uint32_t vec[4];
500 #ifdef __x86_64__
501 asm volatile("cpuid"
502 : "=a"(vec[0]), "=b"(vec[1]),
503 "=c"(vec[2]), "=d"(vec[3])
504 : "0"(function), "c"(count) : "cc");
505 #elif defined(__i386__)
506 asm volatile("pusha \n\t"
507 "cpuid \n\t"
508 "mov %%eax, 0(%2) \n\t"
509 "mov %%ebx, 4(%2) \n\t"
510 "mov %%ecx, 8(%2) \n\t"
511 "mov %%edx, 12(%2) \n\t"
512 "popa"
513 : : "a"(function), "c"(count), "S"(vec)
514 : "memory", "cc");
515 #else
516 abort();
517 #endif
519 if (eax)
520 *eax = vec[0];
521 if (ebx)
522 *ebx = vec[1];
523 if (ecx)
524 *ecx = vec[2];
525 if (edx)
526 *edx = vec[3];
529 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
531 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
532 * a substring. ex if !NULL points to the first char after a substring,
533 * otherwise the string is assumed to sized by a terminating nul.
534 * Return lexical ordering of *s1:*s2.
536 static int sstrcmp(const char *s1, const char *e1, const char *s2,
537 const char *e2)
539 for (;;) {
540 if (!*s1 || !*s2 || *s1 != *s2)
541 return (*s1 - *s2);
542 ++s1, ++s2;
543 if (s1 == e1 && s2 == e2)
544 return (0);
545 else if (s1 == e1)
546 return (*s2);
547 else if (s2 == e2)
548 return (*s1);
552 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
553 * '|' delimited (possibly empty) strings in which case search for a match
554 * within the alternatives proceeds left to right. Return 0 for success,
555 * non-zero otherwise.
557 static int altcmp(const char *s, const char *e, const char *altstr)
559 const char *p, *q;
561 for (q = p = altstr; ; ) {
562 while (*p && *p != '|')
563 ++p;
564 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
565 return (0);
566 if (!*p)
567 return (1);
568 else
569 q = ++p;
573 /* search featureset for flag *[s..e), if found set corresponding bit in
574 * *pval and return true, otherwise return false
576 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
577 const char **featureset)
579 uint32_t mask;
580 const char **ppc;
581 bool found = false;
583 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
584 if (*ppc && !altcmp(s, e, *ppc)) {
585 *pval |= mask;
586 found = true;
589 return found;
592 static void add_flagname_to_bitmaps(const char *flagname,
593 FeatureWordArray words,
594 Error **errp)
596 FeatureWord w;
597 for (w = 0; w < FEATURE_WORDS; w++) {
598 FeatureWordInfo *wi = &feature_word_info[w];
599 if (wi->feat_names &&
600 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
601 break;
604 if (w == FEATURE_WORDS) {
605 error_setg(errp, "CPU feature %s not found", flagname);
609 /* CPU class name definitions: */
611 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
612 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
614 /* Return type name for a given CPU model name
615 * Caller is responsible for freeing the returned string.
617 static char *x86_cpu_type_name(const char *model_name)
619 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
622 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
624 ObjectClass *oc;
625 char *typename;
627 if (cpu_model == NULL) {
628 return NULL;
631 typename = x86_cpu_type_name(cpu_model);
632 oc = object_class_by_name(typename);
633 g_free(typename);
634 return oc;
637 struct X86CPUDefinition {
638 const char *name;
639 uint32_t level;
640 uint32_t xlevel;
641 uint32_t xlevel2;
642 /* vendor is zero-terminated, 12 character ASCII string */
643 char vendor[CPUID_VENDOR_SZ + 1];
644 int family;
645 int model;
646 int stepping;
647 FeatureWordArray features;
648 char model_id[48];
649 bool cache_info_passthrough;
652 static X86CPUDefinition builtin_x86_defs[] = {
654 .name = "qemu64",
655 .level = 4,
656 .vendor = CPUID_VENDOR_AMD,
657 .family = 6,
658 .model = 6,
659 .stepping = 3,
660 .features[FEAT_1_EDX] =
661 PPRO_FEATURES |
662 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
663 CPUID_PSE36,
664 .features[FEAT_1_ECX] =
665 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
666 .features[FEAT_8000_0001_EDX] =
667 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
668 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
669 .features[FEAT_8000_0001_ECX] =
670 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
671 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
672 .xlevel = 0x8000000A,
675 .name = "phenom",
676 .level = 5,
677 .vendor = CPUID_VENDOR_AMD,
678 .family = 16,
679 .model = 2,
680 .stepping = 3,
681 .features[FEAT_1_EDX] =
682 PPRO_FEATURES |
683 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
684 CPUID_PSE36 | CPUID_VME | CPUID_HT,
685 .features[FEAT_1_ECX] =
686 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
687 CPUID_EXT_POPCNT,
688 .features[FEAT_8000_0001_EDX] =
689 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
690 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
691 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
692 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
693 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
694 CPUID_EXT3_CR8LEG,
695 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
696 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
697 .features[FEAT_8000_0001_ECX] =
698 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
699 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
700 .features[FEAT_SVM] =
701 CPUID_SVM_NPT | CPUID_SVM_LBRV,
702 .xlevel = 0x8000001A,
703 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
706 .name = "core2duo",
707 .level = 10,
708 .vendor = CPUID_VENDOR_INTEL,
709 .family = 6,
710 .model = 15,
711 .stepping = 11,
712 .features[FEAT_1_EDX] =
713 PPRO_FEATURES |
714 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
715 CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |
716 CPUID_HT | CPUID_TM | CPUID_PBE,
717 .features[FEAT_1_ECX] =
718 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
719 CPUID_EXT_DTES64 | CPUID_EXT_DSCPL | CPUID_EXT_VMX | CPUID_EXT_EST |
720 CPUID_EXT_TM2 | CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
721 .features[FEAT_8000_0001_EDX] =
722 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
723 .features[FEAT_8000_0001_ECX] =
724 CPUID_EXT3_LAHF_LM,
725 .xlevel = 0x80000008,
726 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
729 .name = "kvm64",
730 .level = 5,
731 .vendor = CPUID_VENDOR_INTEL,
732 .family = 15,
733 .model = 6,
734 .stepping = 1,
735 /* Missing: CPUID_VME, CPUID_HT */
736 .features[FEAT_1_EDX] =
737 PPRO_FEATURES |
738 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
739 CPUID_PSE36,
740 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
741 .features[FEAT_1_ECX] =
742 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
743 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
744 .features[FEAT_8000_0001_EDX] =
745 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
746 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
747 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
748 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
749 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
750 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
751 .features[FEAT_8000_0001_ECX] =
753 .xlevel = 0x80000008,
754 .model_id = "Common KVM processor"
757 .name = "qemu32",
758 .level = 4,
759 .vendor = CPUID_VENDOR_INTEL,
760 .family = 6,
761 .model = 6,
762 .stepping = 3,
763 .features[FEAT_1_EDX] =
764 PPRO_FEATURES,
765 .features[FEAT_1_ECX] =
766 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
767 .xlevel = 0x80000004,
770 .name = "kvm32",
771 .level = 5,
772 .vendor = CPUID_VENDOR_INTEL,
773 .family = 15,
774 .model = 6,
775 .stepping = 1,
776 .features[FEAT_1_EDX] =
777 PPRO_FEATURES |
778 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
779 .features[FEAT_1_ECX] =
780 CPUID_EXT_SSE3,
781 .features[FEAT_8000_0001_EDX] =
782 PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
783 .features[FEAT_8000_0001_ECX] =
785 .xlevel = 0x80000008,
786 .model_id = "Common 32-bit KVM processor"
789 .name = "coreduo",
790 .level = 10,
791 .vendor = CPUID_VENDOR_INTEL,
792 .family = 6,
793 .model = 14,
794 .stepping = 8,
795 .features[FEAT_1_EDX] =
796 PPRO_FEATURES | CPUID_VME |
797 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_DTS | CPUID_ACPI |
798 CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
799 .features[FEAT_1_ECX] =
800 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_VMX |
801 CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
802 .features[FEAT_8000_0001_EDX] =
803 CPUID_EXT2_NX,
804 .xlevel = 0x80000008,
805 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
808 .name = "486",
809 .level = 1,
810 .vendor = CPUID_VENDOR_INTEL,
811 .family = 4,
812 .model = 8,
813 .stepping = 0,
814 .features[FEAT_1_EDX] =
815 I486_FEATURES,
816 .xlevel = 0,
819 .name = "pentium",
820 .level = 1,
821 .vendor = CPUID_VENDOR_INTEL,
822 .family = 5,
823 .model = 4,
824 .stepping = 3,
825 .features[FEAT_1_EDX] =
826 PENTIUM_FEATURES,
827 .xlevel = 0,
830 .name = "pentium2",
831 .level = 2,
832 .vendor = CPUID_VENDOR_INTEL,
833 .family = 6,
834 .model = 5,
835 .stepping = 2,
836 .features[FEAT_1_EDX] =
837 PENTIUM2_FEATURES,
838 .xlevel = 0,
841 .name = "pentium3",
842 .level = 2,
843 .vendor = CPUID_VENDOR_INTEL,
844 .family = 6,
845 .model = 7,
846 .stepping = 3,
847 .features[FEAT_1_EDX] =
848 PENTIUM3_FEATURES,
849 .xlevel = 0,
852 .name = "athlon",
853 .level = 2,
854 .vendor = CPUID_VENDOR_AMD,
855 .family = 6,
856 .model = 2,
857 .stepping = 3,
858 .features[FEAT_1_EDX] =
859 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
860 CPUID_MCA,
861 .features[FEAT_8000_0001_EDX] =
862 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
863 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
864 .xlevel = 0x80000008,
867 .name = "n270",
868 /* original is on level 10 */
869 .level = 5,
870 .vendor = CPUID_VENDOR_INTEL,
871 .family = 6,
872 .model = 28,
873 .stepping = 2,
874 .features[FEAT_1_EDX] =
875 PPRO_FEATURES |
876 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | CPUID_DTS |
877 CPUID_ACPI | CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
878 /* Some CPUs got no CPUID_SEP */
879 .features[FEAT_1_ECX] =
880 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
881 CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR |
882 CPUID_EXT_MOVBE,
883 .features[FEAT_8000_0001_EDX] =
884 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
885 CPUID_EXT2_NX,
886 .features[FEAT_8000_0001_ECX] =
887 CPUID_EXT3_LAHF_LM,
888 .xlevel = 0x8000000A,
889 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
892 .name = "Conroe",
893 .level = 4,
894 .vendor = CPUID_VENDOR_INTEL,
895 .family = 6,
896 .model = 15,
897 .stepping = 3,
898 .features[FEAT_1_EDX] =
899 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
900 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
901 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
902 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
903 CPUID_DE | CPUID_FP87,
904 .features[FEAT_1_ECX] =
905 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
906 .features[FEAT_8000_0001_EDX] =
907 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
908 .features[FEAT_8000_0001_ECX] =
909 CPUID_EXT3_LAHF_LM,
910 .xlevel = 0x8000000A,
911 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
914 .name = "Penryn",
915 .level = 4,
916 .vendor = CPUID_VENDOR_INTEL,
917 .family = 6,
918 .model = 23,
919 .stepping = 3,
920 .features[FEAT_1_EDX] =
921 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
922 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
923 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
924 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
925 CPUID_DE | CPUID_FP87,
926 .features[FEAT_1_ECX] =
927 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
928 CPUID_EXT_SSE3,
929 .features[FEAT_8000_0001_EDX] =
930 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
931 .features[FEAT_8000_0001_ECX] =
932 CPUID_EXT3_LAHF_LM,
933 .xlevel = 0x8000000A,
934 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
937 .name = "Nehalem",
938 .level = 4,
939 .vendor = CPUID_VENDOR_INTEL,
940 .family = 6,
941 .model = 26,
942 .stepping = 3,
943 .features[FEAT_1_EDX] =
944 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
945 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
946 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
947 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
948 CPUID_DE | CPUID_FP87,
949 .features[FEAT_1_ECX] =
950 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
951 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
952 .features[FEAT_8000_0001_EDX] =
953 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
954 .features[FEAT_8000_0001_ECX] =
955 CPUID_EXT3_LAHF_LM,
956 .xlevel = 0x8000000A,
957 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
960 .name = "Westmere",
961 .level = 11,
962 .vendor = CPUID_VENDOR_INTEL,
963 .family = 6,
964 .model = 44,
965 .stepping = 1,
966 .features[FEAT_1_EDX] =
967 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
968 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
969 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
970 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
971 CPUID_DE | CPUID_FP87,
972 .features[FEAT_1_ECX] =
973 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
974 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
975 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
976 .features[FEAT_8000_0001_EDX] =
977 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
978 .features[FEAT_8000_0001_ECX] =
979 CPUID_EXT3_LAHF_LM,
980 .xlevel = 0x8000000A,
981 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
984 .name = "SandyBridge",
985 .level = 0xd,
986 .vendor = CPUID_VENDOR_INTEL,
987 .family = 6,
988 .model = 42,
989 .stepping = 1,
990 .features[FEAT_1_EDX] =
991 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
992 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
993 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
994 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
995 CPUID_DE | CPUID_FP87,
996 .features[FEAT_1_ECX] =
997 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
998 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
999 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1000 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1001 CPUID_EXT_SSE3,
1002 .features[FEAT_8000_0001_EDX] =
1003 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1004 CPUID_EXT2_SYSCALL,
1005 .features[FEAT_8000_0001_ECX] =
1006 CPUID_EXT3_LAHF_LM,
1007 .xlevel = 0x8000000A,
1008 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1011 .name = "Haswell",
1012 .level = 0xd,
1013 .vendor = CPUID_VENDOR_INTEL,
1014 .family = 6,
1015 .model = 60,
1016 .stepping = 1,
1017 .features[FEAT_1_EDX] =
1018 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1019 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1020 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1021 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1022 CPUID_DE | CPUID_FP87,
1023 .features[FEAT_1_ECX] =
1024 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1025 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1026 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1027 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1028 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1029 CPUID_EXT_PCID,
1030 .features[FEAT_8000_0001_EDX] =
1031 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1032 CPUID_EXT2_SYSCALL,
1033 .features[FEAT_8000_0001_ECX] =
1034 CPUID_EXT3_LAHF_LM,
1035 .features[FEAT_7_0_EBX] =
1036 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1037 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1038 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1039 CPUID_7_0_EBX_RTM,
1040 .xlevel = 0x8000000A,
1041 .model_id = "Intel Core Processor (Haswell)",
1044 .name = "Broadwell",
1045 .level = 0xd,
1046 .vendor = CPUID_VENDOR_INTEL,
1047 .family = 6,
1048 .model = 61,
1049 .stepping = 2,
1050 .features[FEAT_1_EDX] =
1051 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1052 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1053 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1054 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1055 CPUID_DE | CPUID_FP87,
1056 .features[FEAT_1_ECX] =
1057 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1058 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1059 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1060 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1061 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1062 CPUID_EXT_PCID,
1063 .features[FEAT_8000_0001_EDX] =
1064 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1065 CPUID_EXT2_SYSCALL,
1066 .features[FEAT_8000_0001_ECX] =
1067 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1068 .features[FEAT_7_0_EBX] =
1069 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1070 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1071 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1072 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1073 CPUID_7_0_EBX_SMAP,
1074 .xlevel = 0x8000000A,
1075 .model_id = "Intel Core Processor (Broadwell)",
1078 .name = "Opteron_G1",
1079 .level = 5,
1080 .vendor = CPUID_VENDOR_AMD,
1081 .family = 15,
1082 .model = 6,
1083 .stepping = 1,
1084 .features[FEAT_1_EDX] =
1085 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1086 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1087 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1088 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1089 CPUID_DE | CPUID_FP87,
1090 .features[FEAT_1_ECX] =
1091 CPUID_EXT_SSE3,
1092 .features[FEAT_8000_0001_EDX] =
1093 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1094 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1095 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1096 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1097 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1098 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1099 .xlevel = 0x80000008,
1100 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1103 .name = "Opteron_G2",
1104 .level = 5,
1105 .vendor = CPUID_VENDOR_AMD,
1106 .family = 15,
1107 .model = 6,
1108 .stepping = 1,
1109 .features[FEAT_1_EDX] =
1110 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1111 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1112 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1113 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1114 CPUID_DE | CPUID_FP87,
1115 .features[FEAT_1_ECX] =
1116 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1117 .features[FEAT_8000_0001_EDX] =
1118 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1119 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1120 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1121 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1122 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1123 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1124 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1125 .features[FEAT_8000_0001_ECX] =
1126 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1127 .xlevel = 0x80000008,
1128 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1131 .name = "Opteron_G3",
1132 .level = 5,
1133 .vendor = CPUID_VENDOR_AMD,
1134 .family = 15,
1135 .model = 6,
1136 .stepping = 1,
1137 .features[FEAT_1_EDX] =
1138 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1139 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1140 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1141 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1142 CPUID_DE | CPUID_FP87,
1143 .features[FEAT_1_ECX] =
1144 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1145 CPUID_EXT_SSE3,
1146 .features[FEAT_8000_0001_EDX] =
1147 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1148 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1149 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1150 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1151 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1152 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1153 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1154 .features[FEAT_8000_0001_ECX] =
1155 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1156 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1157 .xlevel = 0x80000008,
1158 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1161 .name = "Opteron_G4",
1162 .level = 0xd,
1163 .vendor = CPUID_VENDOR_AMD,
1164 .family = 21,
1165 .model = 1,
1166 .stepping = 2,
1167 .features[FEAT_1_EDX] =
1168 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1169 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1170 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1171 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1172 CPUID_DE | CPUID_FP87,
1173 .features[FEAT_1_ECX] =
1174 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1175 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1176 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1177 CPUID_EXT_SSE3,
1178 .features[FEAT_8000_0001_EDX] =
1179 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1180 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1181 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1182 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1183 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1184 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1185 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1186 .features[FEAT_8000_0001_ECX] =
1187 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1188 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1189 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1190 CPUID_EXT3_LAHF_LM,
1191 .xlevel = 0x8000001A,
1192 .model_id = "AMD Opteron 62xx class CPU",
1195 .name = "Opteron_G5",
1196 .level = 0xd,
1197 .vendor = CPUID_VENDOR_AMD,
1198 .family = 21,
1199 .model = 2,
1200 .stepping = 0,
1201 .features[FEAT_1_EDX] =
1202 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1203 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1204 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1205 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1206 CPUID_DE | CPUID_FP87,
1207 .features[FEAT_1_ECX] =
1208 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1209 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1210 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1211 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1212 .features[FEAT_8000_0001_EDX] =
1213 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1214 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1215 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1216 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1217 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1218 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1219 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1220 .features[FEAT_8000_0001_ECX] =
1221 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1222 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1223 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1224 CPUID_EXT3_LAHF_LM,
1225 .xlevel = 0x8000001A,
1226 .model_id = "AMD Opteron 63xx class CPU",
1231 * x86_cpu_compat_set_features:
1232 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1233 * @w: Identifies the feature word to be changed.
1234 * @feat_add: Feature bits to be added to feature word
1235 * @feat_remove: Feature bits to be removed from feature word
1237 * Change CPU model feature bits for compatibility.
1239 * This function may be used by machine-type compatibility functions
1240 * to enable or disable feature bits on specific CPU models.
1242 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1243 uint32_t feat_add, uint32_t feat_remove)
1245 X86CPUDefinition *def;
1246 int i;
1247 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1248 def = &builtin_x86_defs[i];
1249 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1250 def->features[w] |= feat_add;
1251 def->features[w] &= ~feat_remove;
1256 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1257 bool migratable_only);
1259 #ifdef CONFIG_KVM
1261 static int cpu_x86_fill_model_id(char *str)
1263 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1264 int i;
1266 for (i = 0; i < 3; i++) {
1267 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1268 memcpy(str + i * 16 + 0, &eax, 4);
1269 memcpy(str + i * 16 + 4, &ebx, 4);
1270 memcpy(str + i * 16 + 8, &ecx, 4);
1271 memcpy(str + i * 16 + 12, &edx, 4);
1273 return 0;
1276 static X86CPUDefinition host_cpudef;
1278 static Property host_x86_cpu_properties[] = {
1279 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1280 DEFINE_PROP_END_OF_LIST()
1283 /* class_init for the "host" CPU model
1285 * This function may be called before KVM is initialized.
1287 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1289 DeviceClass *dc = DEVICE_CLASS(oc);
1290 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1291 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1293 xcc->kvm_required = true;
1295 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1296 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1298 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1299 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1300 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1301 host_cpudef.stepping = eax & 0x0F;
1303 cpu_x86_fill_model_id(host_cpudef.model_id);
1305 xcc->cpu_def = &host_cpudef;
1306 host_cpudef.cache_info_passthrough = true;
1308 /* level, xlevel, xlevel2, and the feature words are initialized on
1309 * instance_init, because they require KVM to be initialized.
1312 dc->props = host_x86_cpu_properties;
1315 static void host_x86_cpu_initfn(Object *obj)
1317 X86CPU *cpu = X86_CPU(obj);
1318 CPUX86State *env = &cpu->env;
1319 KVMState *s = kvm_state;
1321 assert(kvm_enabled());
1323 /* We can't fill the features array here because we don't know yet if
1324 * "migratable" is true or false.
1326 cpu->host_features = true;
1328 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1329 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1330 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1332 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1335 static const TypeInfo host_x86_cpu_type_info = {
1336 .name = X86_CPU_TYPE_NAME("host"),
1337 .parent = TYPE_X86_CPU,
1338 .instance_init = host_x86_cpu_initfn,
1339 .class_init = host_x86_cpu_class_init,
1342 #endif
1344 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1346 FeatureWordInfo *f = &feature_word_info[w];
1347 int i;
1349 for (i = 0; i < 32; ++i) {
1350 if (1 << i & mask) {
1351 const char *reg = get_register_name_32(f->cpuid_reg);
1352 assert(reg);
1353 fprintf(stderr, "warning: %s doesn't support requested feature: "
1354 "CPUID.%02XH:%s%s%s [bit %d]\n",
1355 kvm_enabled() ? "host" : "TCG",
1356 f->cpuid_eax, reg,
1357 f->feat_names[i] ? "." : "",
1358 f->feat_names[i] ? f->feat_names[i] : "", i);
1363 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1364 const char *name, Error **errp)
1366 X86CPU *cpu = X86_CPU(obj);
1367 CPUX86State *env = &cpu->env;
1368 int64_t value;
1370 value = (env->cpuid_version >> 8) & 0xf;
1371 if (value == 0xf) {
1372 value += (env->cpuid_version >> 20) & 0xff;
1374 visit_type_int(v, &value, name, errp);
1377 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1378 const char *name, Error **errp)
1380 X86CPU *cpu = X86_CPU(obj);
1381 CPUX86State *env = &cpu->env;
1382 const int64_t min = 0;
1383 const int64_t max = 0xff + 0xf;
1384 Error *local_err = NULL;
1385 int64_t value;
1387 visit_type_int(v, &value, name, &local_err);
1388 if (local_err) {
1389 error_propagate(errp, local_err);
1390 return;
1392 if (value < min || value > max) {
1393 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1394 name ? name : "null", value, min, max);
1395 return;
1398 env->cpuid_version &= ~0xff00f00;
1399 if (value > 0x0f) {
1400 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1401 } else {
1402 env->cpuid_version |= value << 8;
1406 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1407 const char *name, Error **errp)
1409 X86CPU *cpu = X86_CPU(obj);
1410 CPUX86State *env = &cpu->env;
1411 int64_t value;
1413 value = (env->cpuid_version >> 4) & 0xf;
1414 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1415 visit_type_int(v, &value, name, errp);
1418 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1419 const char *name, Error **errp)
1421 X86CPU *cpu = X86_CPU(obj);
1422 CPUX86State *env = &cpu->env;
1423 const int64_t min = 0;
1424 const int64_t max = 0xff;
1425 Error *local_err = NULL;
1426 int64_t value;
1428 visit_type_int(v, &value, name, &local_err);
1429 if (local_err) {
1430 error_propagate(errp, local_err);
1431 return;
1433 if (value < min || value > max) {
1434 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1435 name ? name : "null", value, min, max);
1436 return;
1439 env->cpuid_version &= ~0xf00f0;
1440 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1443 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1444 void *opaque, const char *name,
1445 Error **errp)
1447 X86CPU *cpu = X86_CPU(obj);
1448 CPUX86State *env = &cpu->env;
1449 int64_t value;
1451 value = env->cpuid_version & 0xf;
1452 visit_type_int(v, &value, name, errp);
1455 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1456 void *opaque, const char *name,
1457 Error **errp)
1459 X86CPU *cpu = X86_CPU(obj);
1460 CPUX86State *env = &cpu->env;
1461 const int64_t min = 0;
1462 const int64_t max = 0xf;
1463 Error *local_err = NULL;
1464 int64_t value;
1466 visit_type_int(v, &value, name, &local_err);
1467 if (local_err) {
1468 error_propagate(errp, local_err);
1469 return;
1471 if (value < min || value > max) {
1472 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1473 name ? name : "null", value, min, max);
1474 return;
1477 env->cpuid_version &= ~0xf;
1478 env->cpuid_version |= value & 0xf;
1481 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
1482 const char *name, Error **errp)
1484 X86CPU *cpu = X86_CPU(obj);
1486 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1489 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
1490 const char *name, Error **errp)
1492 X86CPU *cpu = X86_CPU(obj);
1494 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1497 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
1498 const char *name, Error **errp)
1500 X86CPU *cpu = X86_CPU(obj);
1502 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1505 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
1506 const char *name, Error **errp)
1508 X86CPU *cpu = X86_CPU(obj);
1510 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1513 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1515 X86CPU *cpu = X86_CPU(obj);
1516 CPUX86State *env = &cpu->env;
1517 char *value;
1519 value = (char *)g_malloc(CPUID_VENDOR_SZ + 1);
1520 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1521 env->cpuid_vendor3);
1522 return value;
1525 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1526 Error **errp)
1528 X86CPU *cpu = X86_CPU(obj);
1529 CPUX86State *env = &cpu->env;
1530 int i;
1532 if (strlen(value) != CPUID_VENDOR_SZ) {
1533 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1534 "vendor", value);
1535 return;
1538 env->cpuid_vendor1 = 0;
1539 env->cpuid_vendor2 = 0;
1540 env->cpuid_vendor3 = 0;
1541 for (i = 0; i < 4; i++) {
1542 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1543 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1544 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1548 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1550 X86CPU *cpu = X86_CPU(obj);
1551 CPUX86State *env = &cpu->env;
1552 char *value;
1553 int i;
1555 value = g_malloc(48 + 1);
1556 for (i = 0; i < 48; i++) {
1557 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1559 value[48] = '\0';
1560 return value;
1563 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1564 Error **errp)
1566 X86CPU *cpu = X86_CPU(obj);
1567 CPUX86State *env = &cpu->env;
1568 int c, len, i;
1570 if (model_id == NULL) {
1571 model_id = "";
1573 len = strlen(model_id);
1574 memset(env->cpuid_model, 0, 48);
1575 for (i = 0; i < 48; i++) {
1576 if (i >= len) {
1577 c = '\0';
1578 } else {
1579 c = (uint8_t)model_id[i];
1581 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1585 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1586 const char *name, Error **errp)
1588 X86CPU *cpu = X86_CPU(obj);
1589 int64_t value;
1591 value = cpu->env.tsc_khz * 1000;
1592 visit_type_int(v, &value, name, errp);
1595 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1596 const char *name, Error **errp)
1598 X86CPU *cpu = X86_CPU(obj);
1599 const int64_t min = 0;
1600 const int64_t max = INT64_MAX;
1601 Error *local_err = NULL;
1602 int64_t value;
1604 visit_type_int(v, &value, name, &local_err);
1605 if (local_err) {
1606 error_propagate(errp, local_err);
1607 return;
1609 if (value < min || value > max) {
1610 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1611 name ? name : "null", value, min, max);
1612 return;
1615 cpu->env.tsc_khz = value / 1000;
1618 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1619 const char *name, Error **errp)
1621 X86CPU *cpu = X86_CPU(obj);
1622 int64_t value = cpu->env.cpuid_apic_id;
1624 visit_type_int(v, &value, name, errp);
1627 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1628 const char *name, Error **errp)
1630 X86CPU *cpu = X86_CPU(obj);
1631 DeviceState *dev = DEVICE(obj);
1632 const int64_t min = 0;
1633 const int64_t max = UINT32_MAX;
1634 Error *error = NULL;
1635 int64_t value;
1637 if (dev->realized) {
1638 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1639 "it was realized", name, object_get_typename(obj));
1640 return;
1643 visit_type_int(v, &value, name, &error);
1644 if (error) {
1645 error_propagate(errp, error);
1646 return;
1648 if (value < min || value > max) {
1649 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1650 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1651 object_get_typename(obj), name, value, min, max);
1652 return;
1655 if ((value != cpu->env.cpuid_apic_id) && cpu_exists(value)) {
1656 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1657 return;
1659 cpu->env.cpuid_apic_id = value;
1662 /* Generic getter for "feature-words" and "filtered-features" properties */
1663 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1664 const char *name, Error **errp)
1666 uint32_t *array = (uint32_t *)opaque;
1667 FeatureWord w;
1668 Error *err = NULL;
1669 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1670 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1671 X86CPUFeatureWordInfoList *list = NULL;
1673 for (w = 0; w < FEATURE_WORDS; w++) {
1674 FeatureWordInfo *wi = &feature_word_info[w];
1675 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1676 qwi->cpuid_input_eax = wi->cpuid_eax;
1677 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1678 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1679 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1680 qwi->features = array[w];
1682 /* List will be in reverse order, but order shouldn't matter */
1683 list_entries[w].next = list;
1684 list_entries[w].value = &word_infos[w];
1685 list = &list_entries[w];
1688 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1689 error_propagate(errp, err);
1692 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1693 const char *name, Error **errp)
1695 X86CPU *cpu = X86_CPU(obj);
1696 int64_t value = cpu->hyperv_spinlock_attempts;
1698 visit_type_int(v, &value, name, errp);
1701 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1702 const char *name, Error **errp)
1704 const int64_t min = 0xFFF;
1705 const int64_t max = UINT_MAX;
1706 X86CPU *cpu = X86_CPU(obj);
1707 Error *err = NULL;
1708 int64_t value;
1710 visit_type_int(v, &value, name, &err);
1711 if (err) {
1712 error_propagate(errp, err);
1713 return;
1716 if (value < min || value > max) {
1717 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1718 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1719 object_get_typename(obj), name ? name : "null",
1720 value, min, max);
1721 return;
1723 cpu->hyperv_spinlock_attempts = value;
1726 static PropertyInfo qdev_prop_spinlocks = {
1727 .name = "int",
1728 .get = x86_get_hv_spinlocks,
1729 .set = x86_set_hv_spinlocks,
1732 /* Convert all '_' in a feature string option name to '-', to make feature
1733 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1735 static inline void feat2prop(char *s)
1737 while ((s = strchr(s, '_'))) {
1738 *s = '-';
1742 /* Parse "+feature,-feature,feature=foo" CPU feature string
1744 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1745 Error **errp)
1747 X86CPU *cpu = X86_CPU(cs);
1748 char *featurestr; /* Single 'key=value" string being parsed */
1749 FeatureWord w;
1750 /* Features to be added */
1751 FeatureWordArray plus_features = { 0 };
1752 /* Features to be removed */
1753 FeatureWordArray minus_features = { 0 };
1754 uint32_t numvalue;
1755 CPUX86State *env = &cpu->env;
1756 Error *local_err = NULL;
1758 featurestr = features ? strtok(features, ",") : NULL;
1760 while (featurestr) {
1761 char *val;
1762 if (featurestr[0] == '+') {
1763 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1764 } else if (featurestr[0] == '-') {
1765 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1766 } else if ((val = strchr(featurestr, '='))) {
1767 *val = 0; val++;
1768 feat2prop(featurestr);
1769 if (!strcmp(featurestr, "xlevel")) {
1770 char *err;
1771 char num[32];
1773 numvalue = strtoul(val, &err, 0);
1774 if (!*val || *err) {
1775 error_setg(errp, "bad numerical value %s", val);
1776 return;
1778 if (numvalue < 0x80000000) {
1779 error_report("xlevel value shall always be >= 0x80000000"
1780 ", fixup will be removed in future versions");
1781 numvalue += 0x80000000;
1783 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1784 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1785 } else if (!strcmp(featurestr, "tsc-freq")) {
1786 int64_t tsc_freq;
1787 char *err;
1788 char num[32];
1790 tsc_freq = strtosz_suffix_unit(val, &err,
1791 STRTOSZ_DEFSUFFIX_B, 1000);
1792 if (tsc_freq < 0 || *err) {
1793 error_setg(errp, "bad numerical value %s", val);
1794 return;
1796 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1797 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1798 &local_err);
1799 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1800 char *err;
1801 const int min = 0xFFF;
1802 char num[32];
1803 numvalue = strtoul(val, &err, 0);
1804 if (!*val || *err) {
1805 error_setg(errp, "bad numerical value %s", val);
1806 return;
1808 if (numvalue < min) {
1809 error_report("hv-spinlocks value shall always be >= 0x%x"
1810 ", fixup will be removed in future versions",
1811 min);
1812 numvalue = min;
1814 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1815 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1816 } else {
1817 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1819 } else {
1820 feat2prop(featurestr);
1821 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1823 if (local_err) {
1824 error_propagate(errp, local_err);
1825 return;
1827 featurestr = strtok(NULL, ",");
1830 if (cpu->host_features) {
1831 for (w = 0; w < FEATURE_WORDS; w++) {
1832 env->features[w] =
1833 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1837 for (w = 0; w < FEATURE_WORDS; w++) {
1838 env->features[w] |= plus_features[w];
1839 env->features[w] &= ~minus_features[w];
1843 /* generate a composite string into buf of all cpuid names in featureset
1844 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1845 * if flags, suppress names undefined in featureset.
1847 static void listflags(char *buf, int bufsize, uint32_t fbits,
1848 const char **featureset, uint32_t flags)
1850 const char **p = &featureset[31];
1851 char *q, *b, bit;
1852 int nc;
1854 b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
1855 *buf = '\0';
1856 for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
1857 if (fbits & 1 << bit && (*p || !flags)) {
1858 if (*p)
1859 nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
1860 else
1861 nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
1862 if (bufsize <= nc) {
1863 if (b) {
1864 memcpy(b, "...", sizeof("..."));
1866 return;
1868 q += nc;
1869 bufsize -= nc;
1873 /* generate CPU information. */
1874 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1876 X86CPUDefinition *def;
1877 char buf[256];
1878 int i;
1880 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1881 def = &builtin_x86_defs[i];
1882 snprintf(buf, sizeof(buf), "%s", def->name);
1883 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1885 #ifdef CONFIG_KVM
1886 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1887 "KVM processor with all supported host features "
1888 "(only available in KVM mode)");
1889 #endif
1891 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1892 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1893 FeatureWordInfo *fw = &feature_word_info[i];
1895 listflags(buf, sizeof(buf), (uint32_t)~0, fw->feat_names, 1);
1896 (*cpu_fprintf)(f, " %s\n", buf);
1900 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1902 CpuDefinitionInfoList *cpu_list = NULL;
1903 X86CPUDefinition *def;
1904 int i;
1906 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1907 CpuDefinitionInfoList *entry;
1908 CpuDefinitionInfo *info;
1910 def = &builtin_x86_defs[i];
1911 info = g_malloc0(sizeof(*info));
1912 info->name = g_strdup(def->name);
1914 entry = g_malloc0(sizeof(*entry));
1915 entry->value = info;
1916 entry->next = cpu_list;
1917 cpu_list = entry;
1920 return cpu_list;
1923 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1924 bool migratable_only)
1926 FeatureWordInfo *wi = &feature_word_info[w];
1927 uint32_t r;
1929 if (kvm_enabled()) {
1930 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
1931 wi->cpuid_ecx,
1932 wi->cpuid_reg);
1933 } else if (tcg_enabled()) {
1934 r = wi->tcg_features;
1935 } else {
1936 return ~0;
1938 if (migratable_only) {
1939 r &= x86_cpu_get_migratable_flags(w);
1941 return r;
1945 * Filters CPU feature words based on host availability of each feature.
1947 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
1949 static int x86_cpu_filter_features(X86CPU *cpu)
1951 CPUX86State *env = &cpu->env;
1952 FeatureWord w;
1953 int rv = 0;
1955 for (w = 0; w < FEATURE_WORDS; w++) {
1956 uint32_t host_feat =
1957 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1958 uint32_t requested_features = env->features[w];
1959 env->features[w] &= host_feat;
1960 cpu->filtered_features[w] = requested_features & ~env->features[w];
1961 if (cpu->filtered_features[w]) {
1962 if (cpu->check_cpuid || cpu->enforce_cpuid) {
1963 report_unavailable_features(w, cpu->filtered_features[w]);
1965 rv = 1;
1969 return rv;
1972 /* Load data from X86CPUDefinition
1974 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
1976 CPUX86State *env = &cpu->env;
1977 const char *vendor;
1978 char host_vendor[CPUID_VENDOR_SZ + 1];
1979 FeatureWord w;
1981 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
1982 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
1983 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
1984 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
1985 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
1986 env->cpuid_xlevel2 = def->xlevel2;
1987 cpu->cache_info_passthrough = def->cache_info_passthrough;
1988 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
1989 for (w = 0; w < FEATURE_WORDS; w++) {
1990 env->features[w] = def->features[w];
1993 /* Special cases not set in the X86CPUDefinition structs: */
1994 if (kvm_enabled()) {
1995 FeatureWord w;
1996 for (w = 0; w < FEATURE_WORDS; w++) {
1997 env->features[w] |= kvm_default_features[w];
1998 env->features[w] &= ~kvm_default_unset_features[w];
2002 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2004 /* sysenter isn't supported in compatibility mode on AMD,
2005 * syscall isn't supported in compatibility mode on Intel.
2006 * Normally we advertise the actual CPU vendor, but you can
2007 * override this using the 'vendor' property if you want to use
2008 * KVM's sysenter/syscall emulation in compatibility mode and
2009 * when doing cross vendor migration
2011 vendor = def->vendor;
2012 if (kvm_enabled()) {
2013 uint32_t ebx = 0, ecx = 0, edx = 0;
2014 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2015 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2016 vendor = host_vendor;
2019 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2023 X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
2024 Error **errp)
2026 X86CPU *cpu = NULL;
2027 X86CPUClass *xcc;
2028 ObjectClass *oc;
2029 gchar **model_pieces;
2030 char *name, *features;
2031 Error *error = NULL;
2033 model_pieces = g_strsplit(cpu_model, ",", 2);
2034 if (!model_pieces[0]) {
2035 error_setg(&error, "Invalid/empty CPU model name");
2036 goto out;
2038 name = model_pieces[0];
2039 features = model_pieces[1];
2041 oc = x86_cpu_class_by_name(name);
2042 if (oc == NULL) {
2043 error_setg(&error, "Unable to find CPU definition: %s", name);
2044 goto out;
2046 xcc = X86_CPU_CLASS(oc);
2048 if (xcc->kvm_required && !kvm_enabled()) {
2049 error_setg(&error, "CPU model '%s' requires KVM", name);
2050 goto out;
2053 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2055 #ifndef CONFIG_USER_ONLY
2056 if (icc_bridge == NULL) {
2057 error_setg(&error, "Invalid icc-bridge value");
2058 goto out;
2060 qdev_set_parent_bus(DEVICE(cpu), qdev_get_child_bus(icc_bridge, "icc"));
2061 object_unref(OBJECT(cpu));
2062 #endif
2064 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2065 if (error) {
2066 goto out;
2069 out:
2070 if (error != NULL) {
2071 error_propagate(errp, error);
2072 if (cpu) {
2073 object_unref(OBJECT(cpu));
2074 cpu = NULL;
2077 g_strfreev(model_pieces);
2078 return cpu;
2081 X86CPU *cpu_x86_init(const char *cpu_model)
2083 Error *error = NULL;
2084 X86CPU *cpu;
2086 cpu = cpu_x86_create(cpu_model, NULL, &error);
2087 if (error) {
2088 goto out;
2091 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2093 out:
2094 if (error) {
2095 error_report("%s", error_get_pretty(error));
2096 error_free(error);
2097 if (cpu != NULL) {
2098 object_unref(OBJECT(cpu));
2099 cpu = NULL;
2102 return cpu;
2105 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2107 X86CPUDefinition *cpudef = data;
2108 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2110 xcc->cpu_def = cpudef;
2113 static void x86_register_cpudef_type(X86CPUDefinition *def)
2115 char *typename = x86_cpu_type_name(def->name);
2116 TypeInfo ti = {
2117 .name = typename,
2118 .parent = TYPE_X86_CPU,
2119 .class_init = x86_cpu_cpudef_class_init,
2120 .class_data = def,
2123 type_register(&ti);
2124 g_free(typename);
2127 #if !defined(CONFIG_USER_ONLY)
2129 void cpu_clear_apic_feature(CPUX86State *env)
2131 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2134 #endif /* !CONFIG_USER_ONLY */
2136 /* Initialize list of CPU models, filling some non-static fields if necessary
2138 void x86_cpudef_setup(void)
2140 int i, j;
2141 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2143 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2144 X86CPUDefinition *def = &builtin_x86_defs[i];
2146 /* Look for specific "cpudef" models that */
2147 /* have the QEMU version in .model_id */
2148 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2149 if (strcmp(model_with_versions[j], def->name) == 0) {
2150 pstrcpy(def->model_id, sizeof(def->model_id),
2151 "QEMU Virtual CPU version ");
2152 pstrcat(def->model_id, sizeof(def->model_id),
2153 qemu_get_version());
2154 break;
2160 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
2161 uint32_t *ecx, uint32_t *edx)
2163 *ebx = env->cpuid_vendor1;
2164 *edx = env->cpuid_vendor2;
2165 *ecx = env->cpuid_vendor3;
2168 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2169 uint32_t *eax, uint32_t *ebx,
2170 uint32_t *ecx, uint32_t *edx)
2172 X86CPU *cpu = x86_env_get_cpu(env);
2173 CPUState *cs = CPU(cpu);
2175 /* test if maximum index reached */
2176 if (index & 0x80000000) {
2177 if (index > env->cpuid_xlevel) {
2178 if (env->cpuid_xlevel2 > 0) {
2179 /* Handle the Centaur's CPUID instruction. */
2180 if (index > env->cpuid_xlevel2) {
2181 index = env->cpuid_xlevel2;
2182 } else if (index < 0xC0000000) {
2183 index = env->cpuid_xlevel;
2185 } else {
2186 /* Intel documentation states that invalid EAX input will
2187 * return the same information as EAX=cpuid_level
2188 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2190 index = env->cpuid_level;
2193 } else {
2194 if (index > env->cpuid_level)
2195 index = env->cpuid_level;
2198 switch(index) {
2199 case 0:
2200 *eax = env->cpuid_level;
2201 get_cpuid_vendor(env, ebx, ecx, edx);
2202 break;
2203 case 1:
2204 *eax = env->cpuid_version;
2205 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2206 *ecx = env->features[FEAT_1_ECX];
2207 *edx = env->features[FEAT_1_EDX];
2208 if (cs->nr_cores * cs->nr_threads > 1) {
2209 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2210 *edx |= 1 << 28; /* HTT bit */
2212 break;
2213 case 2:
2214 /* cache info: needed for Pentium Pro compatibility */
2215 if (cpu->cache_info_passthrough) {
2216 host_cpuid(index, 0, eax, ebx, ecx, edx);
2217 break;
2219 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2220 *ebx = 0;
2221 *ecx = 0;
2222 *edx = (L1D_DESCRIPTOR << 16) | \
2223 (L1I_DESCRIPTOR << 8) | \
2224 (L2_DESCRIPTOR);
2225 break;
2226 case 4:
2227 /* cache info: needed for Core compatibility */
2228 if (cpu->cache_info_passthrough) {
2229 host_cpuid(index, count, eax, ebx, ecx, edx);
2230 *eax &= ~0xFC000000;
2231 } else {
2232 *eax = 0;
2233 switch (count) {
2234 case 0: /* L1 dcache info */
2235 *eax |= CPUID_4_TYPE_DCACHE | \
2236 CPUID_4_LEVEL(1) | \
2237 CPUID_4_SELF_INIT_LEVEL;
2238 *ebx = (L1D_LINE_SIZE - 1) | \
2239 ((L1D_PARTITIONS - 1) << 12) | \
2240 ((L1D_ASSOCIATIVITY - 1) << 22);
2241 *ecx = L1D_SETS - 1;
2242 *edx = CPUID_4_NO_INVD_SHARING;
2243 break;
2244 case 1: /* L1 icache info */
2245 *eax |= CPUID_4_TYPE_ICACHE | \
2246 CPUID_4_LEVEL(1) | \
2247 CPUID_4_SELF_INIT_LEVEL;
2248 *ebx = (L1I_LINE_SIZE - 1) | \
2249 ((L1I_PARTITIONS - 1) << 12) | \
2250 ((L1I_ASSOCIATIVITY - 1) << 22);
2251 *ecx = L1I_SETS - 1;
2252 *edx = CPUID_4_NO_INVD_SHARING;
2253 break;
2254 case 2: /* L2 cache info */
2255 *eax |= CPUID_4_TYPE_UNIFIED | \
2256 CPUID_4_LEVEL(2) | \
2257 CPUID_4_SELF_INIT_LEVEL;
2258 if (cs->nr_threads > 1) {
2259 *eax |= (cs->nr_threads - 1) << 14;
2261 *ebx = (L2_LINE_SIZE - 1) | \
2262 ((L2_PARTITIONS - 1) << 12) | \
2263 ((L2_ASSOCIATIVITY - 1) << 22);
2264 *ecx = L2_SETS - 1;
2265 *edx = CPUID_4_NO_INVD_SHARING;
2266 break;
2267 default: /* end of info */
2268 *eax = 0;
2269 *ebx = 0;
2270 *ecx = 0;
2271 *edx = 0;
2272 break;
2276 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2277 if ((*eax & 31) && cs->nr_cores > 1) {
2278 *eax |= (cs->nr_cores - 1) << 26;
2280 break;
2281 case 5:
2282 /* mwait info: needed for Core compatibility */
2283 *eax = 0; /* Smallest monitor-line size in bytes */
2284 *ebx = 0; /* Largest monitor-line size in bytes */
2285 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2286 *edx = 0;
2287 break;
2288 case 6:
2289 /* Thermal and Power Leaf */
2290 *eax = 0;
2291 *ebx = 0;
2292 *ecx = 0;
2293 *edx = 0;
2294 break;
2295 case 7:
2296 /* Structured Extended Feature Flags Enumeration Leaf */
2297 if (count == 0) {
2298 *eax = 0; /* Maximum ECX value for sub-leaves */
2299 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2300 *ecx = 0; /* Reserved */
2301 *edx = 0; /* Reserved */
2302 } else {
2303 *eax = 0;
2304 *ebx = 0;
2305 *ecx = 0;
2306 *edx = 0;
2308 break;
2309 case 9:
2310 /* Direct Cache Access Information Leaf */
2311 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2312 *ebx = 0;
2313 *ecx = 0;
2314 *edx = 0;
2315 break;
2316 case 0xA:
2317 /* Architectural Performance Monitoring Leaf */
2318 if (kvm_enabled() && cpu->enable_pmu) {
2319 KVMState *s = cs->kvm_state;
2321 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2322 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2323 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2324 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2325 } else {
2326 *eax = 0;
2327 *ebx = 0;
2328 *ecx = 0;
2329 *edx = 0;
2331 break;
2332 case 0xD: {
2333 KVMState *s = cs->kvm_state;
2334 uint64_t kvm_mask;
2335 int i;
2337 /* Processor Extended State */
2338 *eax = 0;
2339 *ebx = 0;
2340 *ecx = 0;
2341 *edx = 0;
2342 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2343 break;
2345 kvm_mask =
2346 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2347 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2349 if (count == 0) {
2350 *ecx = 0x240;
2351 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2352 const ExtSaveArea *esa = &ext_save_areas[i];
2353 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2354 (kvm_mask & (1 << i)) != 0) {
2355 if (i < 32) {
2356 *eax |= 1 << i;
2357 } else {
2358 *edx |= 1 << (i - 32);
2360 *ecx = MAX(*ecx, esa->offset + esa->size);
2363 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2364 *ebx = *ecx;
2365 } else if (count == 1) {
2366 *eax = kvm_arch_get_supported_cpuid(s, 0xd, 1, R_EAX);
2367 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2368 const ExtSaveArea *esa = &ext_save_areas[count];
2369 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2370 (kvm_mask & (1 << count)) != 0) {
2371 *eax = esa->size;
2372 *ebx = esa->offset;
2375 break;
2377 case 0x80000000:
2378 *eax = env->cpuid_xlevel;
2379 *ebx = env->cpuid_vendor1;
2380 *edx = env->cpuid_vendor2;
2381 *ecx = env->cpuid_vendor3;
2382 break;
2383 case 0x80000001:
2384 *eax = env->cpuid_version;
2385 *ebx = 0;
2386 *ecx = env->features[FEAT_8000_0001_ECX];
2387 *edx = env->features[FEAT_8000_0001_EDX];
2389 /* The Linux kernel checks for the CMPLegacy bit and
2390 * discards multiple thread information if it is set.
2391 * So dont set it here for Intel to make Linux guests happy.
2393 if (cs->nr_cores * cs->nr_threads > 1) {
2394 uint32_t tebx, tecx, tedx;
2395 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
2396 if (tebx != CPUID_VENDOR_INTEL_1 ||
2397 tedx != CPUID_VENDOR_INTEL_2 ||
2398 tecx != CPUID_VENDOR_INTEL_3) {
2399 *ecx |= 1 << 1; /* CmpLegacy bit */
2402 break;
2403 case 0x80000002:
2404 case 0x80000003:
2405 case 0x80000004:
2406 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2407 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2408 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2409 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2410 break;
2411 case 0x80000005:
2412 /* cache info (L1 cache) */
2413 if (cpu->cache_info_passthrough) {
2414 host_cpuid(index, 0, eax, ebx, ecx, edx);
2415 break;
2417 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2418 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2419 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2420 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2421 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2422 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2423 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2424 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2425 break;
2426 case 0x80000006:
2427 /* cache info (L2 cache) */
2428 if (cpu->cache_info_passthrough) {
2429 host_cpuid(index, 0, eax, ebx, ecx, edx);
2430 break;
2432 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2433 (L2_DTLB_2M_ENTRIES << 16) | \
2434 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2435 (L2_ITLB_2M_ENTRIES);
2436 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2437 (L2_DTLB_4K_ENTRIES << 16) | \
2438 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2439 (L2_ITLB_4K_ENTRIES);
2440 *ecx = (L2_SIZE_KB_AMD << 16) | \
2441 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2442 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2443 *edx = ((L3_SIZE_KB/512) << 18) | \
2444 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2445 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2446 break;
2447 case 0x80000007:
2448 *eax = 0;
2449 *ebx = 0;
2450 *ecx = 0;
2451 *edx = env->features[FEAT_8000_0007_EDX];
2452 break;
2453 case 0x80000008:
2454 /* virtual & phys address size in low 2 bytes. */
2455 /* XXX: This value must match the one used in the MMU code. */
2456 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2457 /* 64 bit processor */
2458 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2459 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2460 } else {
2461 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2462 *eax = 0x00000024; /* 36 bits physical */
2463 } else {
2464 *eax = 0x00000020; /* 32 bits physical */
2467 *ebx = 0;
2468 *ecx = 0;
2469 *edx = 0;
2470 if (cs->nr_cores * cs->nr_threads > 1) {
2471 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2473 break;
2474 case 0x8000000A:
2475 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2476 *eax = 0x00000001; /* SVM Revision */
2477 *ebx = 0x00000010; /* nr of ASIDs */
2478 *ecx = 0;
2479 *edx = env->features[FEAT_SVM]; /* optional features */
2480 } else {
2481 *eax = 0;
2482 *ebx = 0;
2483 *ecx = 0;
2484 *edx = 0;
2486 break;
2487 case 0xC0000000:
2488 *eax = env->cpuid_xlevel2;
2489 *ebx = 0;
2490 *ecx = 0;
2491 *edx = 0;
2492 break;
2493 case 0xC0000001:
2494 /* Support for VIA CPU's CPUID instruction */
2495 *eax = env->cpuid_version;
2496 *ebx = 0;
2497 *ecx = 0;
2498 *edx = env->features[FEAT_C000_0001_EDX];
2499 break;
2500 case 0xC0000002:
2501 case 0xC0000003:
2502 case 0xC0000004:
2503 /* Reserved for the future, and now filled with zero */
2504 *eax = 0;
2505 *ebx = 0;
2506 *ecx = 0;
2507 *edx = 0;
2508 break;
2509 default:
2510 /* reserved values: zero */
2511 *eax = 0;
2512 *ebx = 0;
2513 *ecx = 0;
2514 *edx = 0;
2515 break;
2519 /* CPUClass::reset() */
2520 static void x86_cpu_reset(CPUState *s)
2522 X86CPU *cpu = X86_CPU(s);
2523 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2524 CPUX86State *env = &cpu->env;
2525 int i;
2527 xcc->parent_reset(s);
2529 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2531 tlb_flush(s, 1);
2533 env->old_exception = -1;
2535 /* init to reset state */
2537 #ifdef CONFIG_SOFTMMU
2538 env->hflags |= HF_SOFTMMU_MASK;
2539 #endif
2540 env->hflags2 |= HF2_GIF_MASK;
2542 cpu_x86_update_cr0(env, 0x60000010);
2543 env->a20_mask = ~0x0;
2544 env->smbase = 0x30000;
2546 env->idt.limit = 0xffff;
2547 env->gdt.limit = 0xffff;
2548 env->ldt.limit = 0xffff;
2549 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2550 env->tr.limit = 0xffff;
2551 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2553 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2554 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2555 DESC_R_MASK | DESC_A_MASK);
2556 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2557 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2558 DESC_A_MASK);
2559 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2560 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2561 DESC_A_MASK);
2562 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2563 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2564 DESC_A_MASK);
2565 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2566 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2567 DESC_A_MASK);
2568 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2569 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2570 DESC_A_MASK);
2572 env->eip = 0xfff0;
2573 env->regs[R_EDX] = env->cpuid_version;
2575 env->eflags = 0x2;
2577 /* FPU init */
2578 for (i = 0; i < 8; i++) {
2579 env->fptags[i] = 1;
2581 cpu_set_fpuc(env, 0x37f);
2583 env->mxcsr = 0x1f80;
2584 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2586 env->pat = 0x0007040600070406ULL;
2587 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2589 memset(env->dr, 0, sizeof(env->dr));
2590 env->dr[6] = DR6_FIXED_1;
2591 env->dr[7] = DR7_FIXED_1;
2592 cpu_breakpoint_remove_all(s, BP_CPU);
2593 cpu_watchpoint_remove_all(s, BP_CPU);
2595 env->xcr0 = 1;
2598 * SDM 11.11.5 requires:
2599 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2600 * - IA32_MTRR_PHYSMASKn.V = 0
2601 * All other bits are undefined. For simplification, zero it all.
2603 env->mtrr_deftype = 0;
2604 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2605 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2607 #if !defined(CONFIG_USER_ONLY)
2608 /* We hard-wire the BSP to the first CPU. */
2609 if (s->cpu_index == 0) {
2610 apic_designate_bsp(cpu->apic_state);
2613 s->halted = !cpu_is_bsp(cpu);
2615 if (kvm_enabled()) {
2616 kvm_arch_reset_vcpu(cpu);
2618 #endif
2621 #ifndef CONFIG_USER_ONLY
2622 bool cpu_is_bsp(X86CPU *cpu)
2624 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2627 /* TODO: remove me, when reset over QOM tree is implemented */
2628 static void x86_cpu_machine_reset_cb(void *opaque)
2630 X86CPU *cpu = opaque;
2631 cpu_reset(CPU(cpu));
2633 #endif
2635 static void mce_init(X86CPU *cpu)
2637 CPUX86State *cenv = &cpu->env;
2638 unsigned int bank;
2640 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2641 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2642 (CPUID_MCE | CPUID_MCA)) {
2643 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2644 cenv->mcg_ctl = ~(uint64_t)0;
2645 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2646 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2651 #ifndef CONFIG_USER_ONLY
2652 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2654 CPUX86State *env = &cpu->env;
2655 DeviceState *dev = DEVICE(cpu);
2656 APICCommonState *apic;
2657 const char *apic_type = "apic";
2659 if (kvm_irqchip_in_kernel()) {
2660 apic_type = "kvm-apic";
2661 } else if (xen_enabled()) {
2662 apic_type = "xen-apic";
2665 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2666 if (cpu->apic_state == NULL) {
2667 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2668 return;
2671 object_property_add_child(OBJECT(cpu), "apic",
2672 OBJECT(cpu->apic_state), NULL);
2673 qdev_prop_set_uint8(cpu->apic_state, "id", env->cpuid_apic_id);
2674 /* TODO: convert to link<> */
2675 apic = APIC_COMMON(cpu->apic_state);
2676 apic->cpu = cpu;
2679 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2681 if (cpu->apic_state == NULL) {
2682 return;
2685 if (qdev_init(cpu->apic_state)) {
2686 error_setg(errp, "APIC device '%s' could not be initialized",
2687 object_get_typename(OBJECT(cpu->apic_state)));
2688 return;
2691 #else
2692 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2695 #endif
2698 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2699 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2700 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2701 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2702 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2703 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2704 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2706 CPUState *cs = CPU(dev);
2707 X86CPU *cpu = X86_CPU(dev);
2708 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2709 CPUX86State *env = &cpu->env;
2710 Error *local_err = NULL;
2711 static bool ht_warned;
2713 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2714 env->cpuid_level = 7;
2717 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2718 * CPUID[1].EDX.
2720 if (IS_AMD_CPU(env)) {
2721 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2722 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2723 & CPUID_EXT2_AMD_ALIASES);
2727 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2728 error_setg(&local_err,
2729 kvm_enabled() ?
2730 "Host doesn't support requested features" :
2731 "TCG doesn't support requested features");
2732 goto out;
2735 #ifndef CONFIG_USER_ONLY
2736 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2738 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2739 x86_cpu_apic_create(cpu, &local_err);
2740 if (local_err != NULL) {
2741 goto out;
2744 #endif
2746 mce_init(cpu);
2747 qemu_init_vcpu(cs);
2749 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2750 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2751 * based on inputs (sockets,cores,threads), it is still better to gives
2752 * users a warning.
2754 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2755 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2757 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2758 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2759 " -smp options properly.");
2760 ht_warned = true;
2763 x86_cpu_apic_realize(cpu, &local_err);
2764 if (local_err != NULL) {
2765 goto out;
2767 cpu_reset(cs);
2769 xcc->parent_realize(dev, &local_err);
2770 out:
2771 if (local_err != NULL) {
2772 error_propagate(errp, local_err);
2773 return;
2777 /* Enables contiguous-apic-ID mode, for compatibility */
2778 static bool compat_apic_id_mode;
2780 void enable_compat_apic_id_mode(void)
2782 compat_apic_id_mode = true;
2785 /* Calculates initial APIC ID for a specific CPU index
2787 * Currently we need to be able to calculate the APIC ID from the CPU index
2788 * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have
2789 * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of
2790 * all CPUs up to max_cpus.
2792 uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index)
2794 uint32_t correct_id;
2795 static bool warned;
2797 correct_id = x86_apicid_from_cpu_idx(smp_cores, smp_threads, cpu_index);
2798 if (compat_apic_id_mode) {
2799 if (cpu_index != correct_id && !warned) {
2800 error_report("APIC IDs set in compatibility mode, "
2801 "CPU topology won't match the configuration");
2802 warned = true;
2804 return cpu_index;
2805 } else {
2806 return correct_id;
2810 static void x86_cpu_initfn(Object *obj)
2812 CPUState *cs = CPU(obj);
2813 X86CPU *cpu = X86_CPU(obj);
2814 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
2815 CPUX86State *env = &cpu->env;
2816 static int inited;
2818 cs->env_ptr = env;
2819 cpu_exec_init(env);
2821 object_property_add(obj, "family", "int",
2822 x86_cpuid_version_get_family,
2823 x86_cpuid_version_set_family, NULL, NULL, NULL);
2824 object_property_add(obj, "model", "int",
2825 x86_cpuid_version_get_model,
2826 x86_cpuid_version_set_model, NULL, NULL, NULL);
2827 object_property_add(obj, "stepping", "int",
2828 x86_cpuid_version_get_stepping,
2829 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
2830 object_property_add(obj, "level", "int",
2831 x86_cpuid_get_level,
2832 x86_cpuid_set_level, NULL, NULL, NULL);
2833 object_property_add(obj, "xlevel", "int",
2834 x86_cpuid_get_xlevel,
2835 x86_cpuid_set_xlevel, NULL, NULL, NULL);
2836 object_property_add_str(obj, "vendor",
2837 x86_cpuid_get_vendor,
2838 x86_cpuid_set_vendor, NULL);
2839 object_property_add_str(obj, "model-id",
2840 x86_cpuid_get_model_id,
2841 x86_cpuid_set_model_id, NULL);
2842 object_property_add(obj, "tsc-frequency", "int",
2843 x86_cpuid_get_tsc_freq,
2844 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
2845 object_property_add(obj, "apic-id", "int",
2846 x86_cpuid_get_apic_id,
2847 x86_cpuid_set_apic_id, NULL, NULL, NULL);
2848 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
2849 x86_cpu_get_feature_words,
2850 NULL, NULL, (void *)env->features, NULL);
2851 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
2852 x86_cpu_get_feature_words,
2853 NULL, NULL, (void *)cpu->filtered_features, NULL);
2855 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
2856 env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index);
2858 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
2860 /* init various static tables used in TCG mode */
2861 if (tcg_enabled() && !inited) {
2862 inited = 1;
2863 optimize_flags_init();
2867 static int64_t x86_cpu_get_arch_id(CPUState *cs)
2869 X86CPU *cpu = X86_CPU(cs);
2870 CPUX86State *env = &cpu->env;
2872 return env->cpuid_apic_id;
2875 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
2877 X86CPU *cpu = X86_CPU(cs);
2879 return cpu->env.cr[0] & CR0_PG_MASK;
2882 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
2884 X86CPU *cpu = X86_CPU(cs);
2886 cpu->env.eip = value;
2889 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
2891 X86CPU *cpu = X86_CPU(cs);
2893 cpu->env.eip = tb->pc - tb->cs_base;
2896 static bool x86_cpu_has_work(CPUState *cs)
2898 X86CPU *cpu = X86_CPU(cs);
2899 CPUX86State *env = &cpu->env;
2901 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
2902 CPU_INTERRUPT_POLL)) &&
2903 (env->eflags & IF_MASK)) ||
2904 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
2905 CPU_INTERRUPT_INIT |
2906 CPU_INTERRUPT_SIPI |
2907 CPU_INTERRUPT_MCE));
2910 static Property x86_cpu_properties[] = {
2911 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
2912 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
2913 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
2914 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
2915 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
2916 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
2917 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
2918 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
2919 DEFINE_PROP_END_OF_LIST()
2922 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
2924 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2925 CPUClass *cc = CPU_CLASS(oc);
2926 DeviceClass *dc = DEVICE_CLASS(oc);
2928 xcc->parent_realize = dc->realize;
2929 dc->realize = x86_cpu_realizefn;
2930 dc->bus_type = TYPE_ICC_BUS;
2931 dc->props = x86_cpu_properties;
2933 xcc->parent_reset = cc->reset;
2934 cc->reset = x86_cpu_reset;
2935 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
2937 cc->class_by_name = x86_cpu_class_by_name;
2938 cc->parse_features = x86_cpu_parse_featurestr;
2939 cc->has_work = x86_cpu_has_work;
2940 cc->do_interrupt = x86_cpu_do_interrupt;
2941 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
2942 cc->dump_state = x86_cpu_dump_state;
2943 cc->set_pc = x86_cpu_set_pc;
2944 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
2945 cc->gdb_read_register = x86_cpu_gdb_read_register;
2946 cc->gdb_write_register = x86_cpu_gdb_write_register;
2947 cc->get_arch_id = x86_cpu_get_arch_id;
2948 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
2949 #ifdef CONFIG_USER_ONLY
2950 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
2951 #else
2952 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
2953 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
2954 cc->write_elf64_note = x86_cpu_write_elf64_note;
2955 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
2956 cc->write_elf32_note = x86_cpu_write_elf32_note;
2957 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
2958 cc->vmsd = &vmstate_x86_cpu;
2959 #endif
2960 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
2961 #ifndef CONFIG_USER_ONLY
2962 cc->debug_excp_handler = breakpoint_handler;
2963 #endif
2964 cc->cpu_exec_enter = x86_cpu_exec_enter;
2965 cc->cpu_exec_exit = x86_cpu_exec_exit;
2968 static const TypeInfo x86_cpu_type_info = {
2969 .name = TYPE_X86_CPU,
2970 .parent = TYPE_CPU,
2971 .instance_size = sizeof(X86CPU),
2972 .instance_init = x86_cpu_initfn,
2973 .abstract = true,
2974 .class_size = sizeof(X86CPUClass),
2975 .class_init = x86_cpu_common_class_init,
2978 static void x86_cpu_register_types(void)
2980 int i;
2982 type_register_static(&x86_cpu_type_info);
2983 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2984 x86_register_cpudef_type(&builtin_x86_defs[i]);
2986 #ifdef CONFIG_KVM
2987 type_register_static(&host_x86_cpu_type_info);
2988 #endif
2991 type_init(x86_cpu_register_types)