2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst
, uint32_t vendor1
,
165 uint32_t vendor2
, uint32_t vendor3
)
168 for (i
= 0; i
< 4; i
++) {
169 dst
[i
] = vendor1
>> (8 * i
);
170 dst
[i
+ 4] = vendor2
>> (8 * i
);
171 dst
[i
+ 8] = vendor3
>> (8 * i
);
173 dst
[CPUID_VENDOR_SZ
] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name
[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL
, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL
, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name
[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL
,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL
, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name
[] = {
206 NULL
/* fpu */, NULL
/* vme */, NULL
/* de */, NULL
/* pse */,
207 NULL
/* tsc */, NULL
/* msr */, NULL
/* pae */, NULL
/* mce */,
208 NULL
/* cx8 */ /* AMD CMPXCHG8B */, NULL
/* apic */, NULL
, "syscall",
209 NULL
/* mtrr */, NULL
/* pge */, NULL
/* mca */, NULL
/* cmov */,
210 NULL
/* pat */, NULL
/* pse36 */, NULL
, NULL
/* Linux mp */,
211 "nx|xd", NULL
, "mmxext", NULL
/* mmx */,
212 NULL
/* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL
, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name
[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL
, "lwp",
220 "fma4", "tce", NULL
, "nodeid_msr",
221 NULL
, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL
, NULL
, NULL
,
223 NULL
, NULL
, NULL
, NULL
,
226 static const char *ext4_feature_name
[] = {
227 NULL
, NULL
, "xstore", "xstore-en",
228 NULL
, NULL
, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL
, NULL
,
231 NULL
, NULL
, NULL
, NULL
,
232 NULL
, NULL
, NULL
, NULL
,
233 NULL
, NULL
, NULL
, NULL
,
234 NULL
, NULL
, NULL
, NULL
,
237 static const char *kvm_feature_name
[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL
, NULL
, NULL
, NULL
,
241 NULL
, NULL
, NULL
, NULL
,
242 NULL
, NULL
, NULL
, NULL
,
243 NULL
, NULL
, NULL
, NULL
,
244 "kvmclock-stable-bit", NULL
, NULL
, NULL
,
245 NULL
, NULL
, NULL
, NULL
,
248 static const char *svm_feature_name
[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL
, NULL
, "pause_filter", NULL
,
252 "pfthreshold", NULL
, NULL
, NULL
,
253 NULL
, NULL
, NULL
, NULL
,
254 NULL
, NULL
, NULL
, NULL
,
255 NULL
, NULL
, NULL
, NULL
,
256 NULL
, NULL
, NULL
, NULL
,
259 static const char *cpuid_7_0_ebx_feature_name
[] = {
260 "fsgsbase", "tsc_adjust", NULL
, "bmi1", "hle", "avx2", NULL
, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL
, NULL
, "mpx", NULL
,
262 "avx512f", NULL
, "rdseed", "adx", "smap", NULL
, NULL
, NULL
,
263 NULL
, NULL
, "avx512pf", "avx512er", "avx512cd", NULL
, NULL
, NULL
,
266 static const char *cpuid_apm_edx_feature_name
[] = {
267 NULL
, NULL
, NULL
, NULL
,
268 NULL
, NULL
, NULL
, NULL
,
269 "invtsc", NULL
, NULL
, NULL
,
270 NULL
, NULL
, NULL
, NULL
,
271 NULL
, NULL
, NULL
, NULL
,
272 NULL
, NULL
, NULL
, NULL
,
273 NULL
, NULL
, NULL
, NULL
,
274 NULL
, NULL
, NULL
, NULL
,
277 static const char *cpuid_xsave_feature_name
[] = {
278 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
279 NULL
, NULL
, NULL
, NULL
,
280 NULL
, NULL
, NULL
, NULL
,
281 NULL
, NULL
, NULL
, NULL
,
282 NULL
, NULL
, NULL
, NULL
,
283 NULL
, NULL
, NULL
, NULL
,
284 NULL
, NULL
, NULL
, NULL
,
285 NULL
, NULL
, NULL
, NULL
,
288 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
289 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
290 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
291 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
292 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
293 CPUID_PSE36 | CPUID_FXSR)
294 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
295 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
296 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
297 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
298 CPUID_PAE | CPUID_SEP | CPUID_APIC)
300 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
301 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
302 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
303 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
304 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
305 /* partly implemented:
306 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
308 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
309 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
310 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
311 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
312 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
314 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
315 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
316 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
317 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
318 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
322 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
324 #define TCG_EXT2_X86_64_FEATURES 0
327 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
328 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
329 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
330 TCG_EXT2_X86_64_FEATURES)
331 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
332 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
333 #define TCG_EXT4_FEATURES 0
334 #define TCG_SVM_FEATURES 0
335 #define TCG_KVM_FEATURES 0
336 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
337 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
339 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
340 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
341 CPUID_7_0_EBX_RDSEED */
342 #define TCG_APM_FEATURES 0
345 typedef struct FeatureWordInfo
{
346 const char **feat_names
;
347 uint32_t cpuid_eax
; /* Input EAX for CPUID */
348 bool cpuid_needs_ecx
; /* CPUID instruction uses ECX as input */
349 uint32_t cpuid_ecx
; /* Input ECX value for CPUID */
350 int cpuid_reg
; /* output register (R_* constant) */
351 uint32_t tcg_features
; /* Feature flags supported by TCG */
352 uint32_t unmigratable_flags
; /* Feature flags known to be unmigratable */
355 static FeatureWordInfo feature_word_info
[FEATURE_WORDS
] = {
357 .feat_names
= feature_name
,
358 .cpuid_eax
= 1, .cpuid_reg
= R_EDX
,
359 .tcg_features
= TCG_FEATURES
,
362 .feat_names
= ext_feature_name
,
363 .cpuid_eax
= 1, .cpuid_reg
= R_ECX
,
364 .tcg_features
= TCG_EXT_FEATURES
,
366 [FEAT_8000_0001_EDX
] = {
367 .feat_names
= ext2_feature_name
,
368 .cpuid_eax
= 0x80000001, .cpuid_reg
= R_EDX
,
369 .tcg_features
= TCG_EXT2_FEATURES
,
371 [FEAT_8000_0001_ECX
] = {
372 .feat_names
= ext3_feature_name
,
373 .cpuid_eax
= 0x80000001, .cpuid_reg
= R_ECX
,
374 .tcg_features
= TCG_EXT3_FEATURES
,
376 [FEAT_C000_0001_EDX
] = {
377 .feat_names
= ext4_feature_name
,
378 .cpuid_eax
= 0xC0000001, .cpuid_reg
= R_EDX
,
379 .tcg_features
= TCG_EXT4_FEATURES
,
382 .feat_names
= kvm_feature_name
,
383 .cpuid_eax
= KVM_CPUID_FEATURES
, .cpuid_reg
= R_EAX
,
384 .tcg_features
= TCG_KVM_FEATURES
,
387 .feat_names
= svm_feature_name
,
388 .cpuid_eax
= 0x8000000A, .cpuid_reg
= R_EDX
,
389 .tcg_features
= TCG_SVM_FEATURES
,
392 .feat_names
= cpuid_7_0_ebx_feature_name
,
394 .cpuid_needs_ecx
= true, .cpuid_ecx
= 0,
396 .tcg_features
= TCG_7_0_EBX_FEATURES
,
398 [FEAT_8000_0007_EDX
] = {
399 .feat_names
= cpuid_apm_edx_feature_name
,
400 .cpuid_eax
= 0x80000007,
402 .tcg_features
= TCG_APM_FEATURES
,
403 .unmigratable_flags
= CPUID_APM_INVTSC
,
406 .feat_names
= cpuid_xsave_feature_name
,
408 .cpuid_needs_ecx
= true, .cpuid_ecx
= 1,
414 typedef struct X86RegisterInfo32
{
415 /* Name of register */
417 /* QAPI enum value register */
418 X86CPURegister32 qapi_enum
;
421 #define REGISTER(reg) \
422 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
423 static const X86RegisterInfo32 x86_reg_info_32
[CPU_NB_REGS32
] = {
435 typedef struct ExtSaveArea
{
436 uint32_t feature
, bits
;
437 uint32_t offset
, size
;
440 static const ExtSaveArea ext_save_areas
[] = {
441 [2] = { .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_AVX
,
442 .offset
= 0x240, .size
= 0x100 },
443 [3] = { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_MPX
,
444 .offset
= 0x3c0, .size
= 0x40 },
445 [4] = { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_MPX
,
446 .offset
= 0x400, .size
= 0x40 },
447 [5] = { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
448 .offset
= 0x440, .size
= 0x40 },
449 [6] = { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
450 .offset
= 0x480, .size
= 0x200 },
451 [7] = { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
452 .offset
= 0x680, .size
= 0x400 },
455 const char *get_register_name_32(unsigned int reg
)
457 if (reg
>= CPU_NB_REGS32
) {
460 return x86_reg_info_32
[reg
].name
;
463 /* KVM-specific features that are automatically added to all CPU models
464 * when KVM is enabled.
466 static uint32_t kvm_default_features
[FEATURE_WORDS
] = {
467 [FEAT_KVM
] = (1 << KVM_FEATURE_CLOCKSOURCE
) |
468 (1 << KVM_FEATURE_NOP_IO_DELAY
) |
469 (1 << KVM_FEATURE_CLOCKSOURCE2
) |
470 (1 << KVM_FEATURE_ASYNC_PF
) |
471 (1 << KVM_FEATURE_STEAL_TIME
) |
472 (1 << KVM_FEATURE_PV_EOI
) |
473 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT
),
474 [FEAT_1_ECX
] = CPUID_EXT_X2APIC
,
477 /* Features that are not added by default to any CPU model when KVM is enabled.
479 static uint32_t kvm_default_unset_features
[FEATURE_WORDS
] = {
480 [FEAT_1_EDX
] = CPUID_ACPI
,
481 [FEAT_1_ECX
] = CPUID_EXT_MONITOR
,
482 [FEAT_8000_0001_ECX
] = CPUID_EXT3_SVM
,
485 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w
, uint32_t features
)
487 kvm_default_features
[w
] &= ~features
;
490 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w
, uint32_t features
)
492 kvm_default_unset_features
[w
] &= ~features
;
496 * Returns the set of feature flags that are supported and migratable by
497 * QEMU, for a given FeatureWord.
499 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w
)
501 FeatureWordInfo
*wi
= &feature_word_info
[w
];
505 for (i
= 0; i
< 32; i
++) {
506 uint32_t f
= 1U << i
;
507 /* If the feature name is unknown, it is not supported by QEMU yet */
508 if (!wi
->feat_names
[i
]) {
511 /* Skip features known to QEMU, but explicitly marked as unmigratable */
512 if (wi
->unmigratable_flags
& f
) {
520 void host_cpuid(uint32_t function
, uint32_t count
,
521 uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
527 : "=a"(vec
[0]), "=b"(vec
[1]),
528 "=c"(vec
[2]), "=d"(vec
[3])
529 : "0"(function
), "c"(count
) : "cc");
530 #elif defined(__i386__)
531 asm volatile("pusha \n\t"
533 "mov %%eax, 0(%2) \n\t"
534 "mov %%ebx, 4(%2) \n\t"
535 "mov %%ecx, 8(%2) \n\t"
536 "mov %%edx, 12(%2) \n\t"
538 : : "a"(function
), "c"(count
), "S"(vec
)
554 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
556 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
557 * a substring. ex if !NULL points to the first char after a substring,
558 * otherwise the string is assumed to sized by a terminating nul.
559 * Return lexical ordering of *s1:*s2.
561 static int sstrcmp(const char *s1
, const char *e1
,
562 const char *s2
, const char *e2
)
565 if (!*s1
|| !*s2
|| *s1
!= *s2
)
568 if (s1
== e1
&& s2
== e2
)
577 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
578 * '|' delimited (possibly empty) strings in which case search for a match
579 * within the alternatives proceeds left to right. Return 0 for success,
580 * non-zero otherwise.
582 static int altcmp(const char *s
, const char *e
, const char *altstr
)
586 for (q
= p
= altstr
; ; ) {
587 while (*p
&& *p
!= '|')
589 if ((q
== p
&& !*s
) || (q
!= p
&& !sstrcmp(s
, e
, q
, p
)))
598 /* search featureset for flag *[s..e), if found set corresponding bit in
599 * *pval and return true, otherwise return false
601 static bool lookup_feature(uint32_t *pval
, const char *s
, const char *e
,
602 const char **featureset
)
608 for (mask
= 1, ppc
= featureset
; mask
; mask
<<= 1, ++ppc
) {
609 if (*ppc
&& !altcmp(s
, e
, *ppc
)) {
617 static void add_flagname_to_bitmaps(const char *flagname
,
618 FeatureWordArray words
,
622 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
623 FeatureWordInfo
*wi
= &feature_word_info
[w
];
624 if (wi
->feat_names
&&
625 lookup_feature(&words
[w
], flagname
, NULL
, wi
->feat_names
)) {
629 if (w
== FEATURE_WORDS
) {
630 error_setg(errp
, "CPU feature %s not found", flagname
);
634 /* CPU class name definitions: */
636 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
637 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
639 /* Return type name for a given CPU model name
640 * Caller is responsible for freeing the returned string.
642 static char *x86_cpu_type_name(const char *model_name
)
644 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name
);
647 static ObjectClass
*x86_cpu_class_by_name(const char *cpu_model
)
652 if (cpu_model
== NULL
) {
656 typename
= x86_cpu_type_name(cpu_model
);
657 oc
= object_class_by_name(typename
);
662 struct X86CPUDefinition
{
667 /* vendor is zero-terminated, 12 character ASCII string */
668 char vendor
[CPUID_VENDOR_SZ
+ 1];
672 FeatureWordArray features
;
674 bool cache_info_passthrough
;
677 static X86CPUDefinition builtin_x86_defs
[] = {
681 .vendor
= CPUID_VENDOR_AMD
,
685 .features
[FEAT_1_EDX
] =
687 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
689 .features
[FEAT_1_ECX
] =
690 CPUID_EXT_SSE3
| CPUID_EXT_CX16
| CPUID_EXT_POPCNT
,
691 .features
[FEAT_8000_0001_EDX
] =
692 (PPRO_FEATURES
& CPUID_EXT2_AMD_ALIASES
) |
693 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
694 .features
[FEAT_8000_0001_ECX
] =
695 CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
696 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
697 .xlevel
= 0x8000000A,
702 .vendor
= CPUID_VENDOR_AMD
,
706 /* Missing: CPUID_HT */
707 .features
[FEAT_1_EDX
] =
709 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
710 CPUID_PSE36
| CPUID_VME
,
711 .features
[FEAT_1_ECX
] =
712 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_CX16
|
714 .features
[FEAT_8000_0001_EDX
] =
715 (PPRO_FEATURES
& CPUID_EXT2_AMD_ALIASES
) |
716 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
|
717 CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
| CPUID_EXT2_MMXEXT
|
718 CPUID_EXT2_FFXSR
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
,
719 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
721 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
722 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
723 .features
[FEAT_8000_0001_ECX
] =
724 CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
725 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
726 /* Missing: CPUID_SVM_LBRV */
727 .features
[FEAT_SVM
] =
729 .xlevel
= 0x8000001A,
730 .model_id
= "AMD Phenom(tm) 9550 Quad-Core Processor"
735 .vendor
= CPUID_VENDOR_INTEL
,
739 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
740 .features
[FEAT_1_EDX
] =
742 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
743 CPUID_PSE36
| CPUID_VME
| CPUID_ACPI
| CPUID_SS
,
744 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
745 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
746 .features
[FEAT_1_ECX
] =
747 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
749 .features
[FEAT_8000_0001_EDX
] =
750 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
751 .features
[FEAT_8000_0001_ECX
] =
753 .xlevel
= 0x80000008,
754 .model_id
= "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
759 .vendor
= CPUID_VENDOR_INTEL
,
763 /* Missing: CPUID_HT */
764 .features
[FEAT_1_EDX
] =
765 PPRO_FEATURES
| CPUID_VME
|
766 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
768 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
769 .features
[FEAT_1_ECX
] =
770 CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
771 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
772 .features
[FEAT_8000_0001_EDX
] =
773 (PPRO_FEATURES
& CPUID_EXT2_AMD_ALIASES
) |
774 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
775 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
776 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
777 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
778 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
779 .features
[FEAT_8000_0001_ECX
] =
781 .xlevel
= 0x80000008,
782 .model_id
= "Common KVM processor"
787 .vendor
= CPUID_VENDOR_INTEL
,
791 .features
[FEAT_1_EDX
] =
793 .features
[FEAT_1_ECX
] =
794 CPUID_EXT_SSE3
| CPUID_EXT_POPCNT
,
795 .xlevel
= 0x80000004,
800 .vendor
= CPUID_VENDOR_INTEL
,
804 .features
[FEAT_1_EDX
] =
805 PPRO_FEATURES
| CPUID_VME
|
806 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_PSE36
,
807 .features
[FEAT_1_ECX
] =
809 .features
[FEAT_8000_0001_EDX
] =
810 PPRO_FEATURES
& CPUID_EXT2_AMD_ALIASES
,
811 .features
[FEAT_8000_0001_ECX
] =
813 .xlevel
= 0x80000008,
814 .model_id
= "Common 32-bit KVM processor"
819 .vendor
= CPUID_VENDOR_INTEL
,
823 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
824 .features
[FEAT_1_EDX
] =
825 PPRO_FEATURES
| CPUID_VME
|
826 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_ACPI
|
828 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
829 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
830 .features
[FEAT_1_ECX
] =
831 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
,
832 .features
[FEAT_8000_0001_EDX
] =
834 .xlevel
= 0x80000008,
835 .model_id
= "Genuine Intel(R) CPU T2600 @ 2.16GHz",
840 .vendor
= CPUID_VENDOR_INTEL
,
844 .features
[FEAT_1_EDX
] =
851 .vendor
= CPUID_VENDOR_INTEL
,
855 .features
[FEAT_1_EDX
] =
862 .vendor
= CPUID_VENDOR_INTEL
,
866 .features
[FEAT_1_EDX
] =
873 .vendor
= CPUID_VENDOR_INTEL
,
877 .features
[FEAT_1_EDX
] =
884 .vendor
= CPUID_VENDOR_AMD
,
888 .features
[FEAT_1_EDX
] =
889 PPRO_FEATURES
| CPUID_PSE36
| CPUID_VME
| CPUID_MTRR
|
891 .features
[FEAT_8000_0001_EDX
] =
892 (PPRO_FEATURES
& CPUID_EXT2_AMD_ALIASES
) |
893 CPUID_EXT2_MMXEXT
| CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
,
894 .xlevel
= 0x80000008,
898 /* original is on level 10 */
900 .vendor
= CPUID_VENDOR_INTEL
,
904 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
905 .features
[FEAT_1_EDX
] =
907 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_VME
|
908 CPUID_ACPI
| CPUID_SS
,
909 /* Some CPUs got no CPUID_SEP */
910 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
912 .features
[FEAT_1_ECX
] =
913 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
915 .features
[FEAT_8000_0001_EDX
] =
916 (PPRO_FEATURES
& CPUID_EXT2_AMD_ALIASES
) |
918 .features
[FEAT_8000_0001_ECX
] =
920 .xlevel
= 0x8000000A,
921 .model_id
= "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
926 .vendor
= CPUID_VENDOR_INTEL
,
930 .features
[FEAT_1_EDX
] =
931 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
932 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
933 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
934 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
935 CPUID_DE
| CPUID_FP87
,
936 .features
[FEAT_1_ECX
] =
937 CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
938 .features
[FEAT_8000_0001_EDX
] =
939 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
940 .features
[FEAT_8000_0001_ECX
] =
942 .xlevel
= 0x8000000A,
943 .model_id
= "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
948 .vendor
= CPUID_VENDOR_INTEL
,
952 .features
[FEAT_1_EDX
] =
953 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
954 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
955 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
956 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
957 CPUID_DE
| CPUID_FP87
,
958 .features
[FEAT_1_ECX
] =
959 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
961 .features
[FEAT_8000_0001_EDX
] =
962 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
963 .features
[FEAT_8000_0001_ECX
] =
965 .xlevel
= 0x8000000A,
966 .model_id
= "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
971 .vendor
= CPUID_VENDOR_INTEL
,
975 .features
[FEAT_1_EDX
] =
976 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
977 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
978 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
979 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
980 CPUID_DE
| CPUID_FP87
,
981 .features
[FEAT_1_ECX
] =
982 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
983 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
984 .features
[FEAT_8000_0001_EDX
] =
985 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
986 .features
[FEAT_8000_0001_ECX
] =
988 .xlevel
= 0x8000000A,
989 .model_id
= "Intel Core i7 9xx (Nehalem Class Core i7)",
994 .vendor
= CPUID_VENDOR_INTEL
,
998 .features
[FEAT_1_EDX
] =
999 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1000 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1001 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1002 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1003 CPUID_DE
| CPUID_FP87
,
1004 .features
[FEAT_1_ECX
] =
1005 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
1006 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1007 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
1008 .features
[FEAT_8000_0001_EDX
] =
1009 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1010 .features
[FEAT_8000_0001_ECX
] =
1012 .xlevel
= 0x8000000A,
1013 .model_id
= "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1016 .name
= "SandyBridge",
1018 .vendor
= CPUID_VENDOR_INTEL
,
1022 .features
[FEAT_1_EDX
] =
1023 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1024 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1025 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1026 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1027 CPUID_DE
| CPUID_FP87
,
1028 .features
[FEAT_1_ECX
] =
1029 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1030 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1031 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1032 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1034 .features
[FEAT_8000_0001_EDX
] =
1035 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1037 .features
[FEAT_8000_0001_ECX
] =
1039 .features
[FEAT_XSAVE
] =
1040 CPUID_XSAVE_XSAVEOPT
,
1041 .xlevel
= 0x8000000A,
1042 .model_id
= "Intel Xeon E312xx (Sandy Bridge)",
1045 .name
= "IvyBridge",
1047 .vendor
= CPUID_VENDOR_INTEL
,
1051 .features
[FEAT_1_EDX
] =
1052 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1053 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1054 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1055 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1056 CPUID_DE
| CPUID_FP87
,
1057 .features
[FEAT_1_ECX
] =
1058 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1059 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1060 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1061 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1062 CPUID_EXT_SSE3
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1063 .features
[FEAT_7_0_EBX
] =
1064 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_SMEP
|
1066 .features
[FEAT_8000_0001_EDX
] =
1067 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1069 .features
[FEAT_8000_0001_ECX
] =
1071 .features
[FEAT_XSAVE
] =
1072 CPUID_XSAVE_XSAVEOPT
,
1073 .xlevel
= 0x8000000A,
1074 .model_id
= "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1079 .vendor
= CPUID_VENDOR_INTEL
,
1083 .features
[FEAT_1_EDX
] =
1084 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1085 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1086 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1087 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1088 CPUID_DE
| CPUID_FP87
,
1089 .features
[FEAT_1_ECX
] =
1090 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1091 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1092 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1093 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1094 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1095 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1096 .features
[FEAT_8000_0001_EDX
] =
1097 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1099 .features
[FEAT_8000_0001_ECX
] =
1101 .features
[FEAT_7_0_EBX
] =
1102 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1103 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1104 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
,
1105 .features
[FEAT_XSAVE
] =
1106 CPUID_XSAVE_XSAVEOPT
,
1107 .xlevel
= 0x8000000A,
1108 .model_id
= "Intel Core Processor (Haswell)",
1111 .name
= "Broadwell",
1113 .vendor
= CPUID_VENDOR_INTEL
,
1117 .features
[FEAT_1_EDX
] =
1118 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1119 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1120 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1121 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1122 CPUID_DE
| CPUID_FP87
,
1123 .features
[FEAT_1_ECX
] =
1124 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1125 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1126 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1127 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1128 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1129 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1130 .features
[FEAT_8000_0001_EDX
] =
1131 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1133 .features
[FEAT_8000_0001_ECX
] =
1134 CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
1135 .features
[FEAT_7_0_EBX
] =
1136 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1137 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1138 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
1139 CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
1141 .features
[FEAT_XSAVE
] =
1142 CPUID_XSAVE_XSAVEOPT
,
1143 .xlevel
= 0x8000000A,
1144 .model_id
= "Intel Core Processor (Broadwell)",
1147 .name
= "Opteron_G1",
1149 .vendor
= CPUID_VENDOR_AMD
,
1153 .features
[FEAT_1_EDX
] =
1154 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1155 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1156 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1157 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1158 CPUID_DE
| CPUID_FP87
,
1159 .features
[FEAT_1_ECX
] =
1161 .features
[FEAT_8000_0001_EDX
] =
1162 CPUID_EXT2_LM
| CPUID_EXT2_FXSR
| CPUID_EXT2_MMX
|
1163 CPUID_EXT2_NX
| CPUID_EXT2_PSE36
| CPUID_EXT2_PAT
|
1164 CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
| CPUID_EXT2_PGE
|
1165 CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_APIC
|
1166 CPUID_EXT2_CX8
| CPUID_EXT2_MCE
| CPUID_EXT2_PAE
| CPUID_EXT2_MSR
|
1167 CPUID_EXT2_TSC
| CPUID_EXT2_PSE
| CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
1168 .xlevel
= 0x80000008,
1169 .model_id
= "AMD Opteron 240 (Gen 1 Class Opteron)",
1172 .name
= "Opteron_G2",
1174 .vendor
= CPUID_VENDOR_AMD
,
1178 .features
[FEAT_1_EDX
] =
1179 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1180 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1181 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1182 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1183 CPUID_DE
| CPUID_FP87
,
1184 .features
[FEAT_1_ECX
] =
1185 CPUID_EXT_CX16
| CPUID_EXT_SSE3
,
1186 .features
[FEAT_8000_0001_EDX
] =
1187 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_FXSR
|
1188 CPUID_EXT2_MMX
| CPUID_EXT2_NX
| CPUID_EXT2_PSE36
|
1189 CPUID_EXT2_PAT
| CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
|
1190 CPUID_EXT2_PGE
| CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
|
1191 CPUID_EXT2_APIC
| CPUID_EXT2_CX8
| CPUID_EXT2_MCE
|
1192 CPUID_EXT2_PAE
| CPUID_EXT2_MSR
| CPUID_EXT2_TSC
| CPUID_EXT2_PSE
|
1193 CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
1194 .features
[FEAT_8000_0001_ECX
] =
1195 CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
1196 .xlevel
= 0x80000008,
1197 .model_id
= "AMD Opteron 22xx (Gen 2 Class Opteron)",
1200 .name
= "Opteron_G3",
1202 .vendor
= CPUID_VENDOR_AMD
,
1206 .features
[FEAT_1_EDX
] =
1207 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1208 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1209 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1210 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1211 CPUID_DE
| CPUID_FP87
,
1212 .features
[FEAT_1_ECX
] =
1213 CPUID_EXT_POPCNT
| CPUID_EXT_CX16
| CPUID_EXT_MONITOR
|
1215 .features
[FEAT_8000_0001_EDX
] =
1216 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_FXSR
|
1217 CPUID_EXT2_MMX
| CPUID_EXT2_NX
| CPUID_EXT2_PSE36
|
1218 CPUID_EXT2_PAT
| CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
|
1219 CPUID_EXT2_PGE
| CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
|
1220 CPUID_EXT2_APIC
| CPUID_EXT2_CX8
| CPUID_EXT2_MCE
|
1221 CPUID_EXT2_PAE
| CPUID_EXT2_MSR
| CPUID_EXT2_TSC
| CPUID_EXT2_PSE
|
1222 CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
1223 .features
[FEAT_8000_0001_ECX
] =
1224 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
|
1225 CPUID_EXT3_ABM
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
1226 .xlevel
= 0x80000008,
1227 .model_id
= "AMD Opteron 23xx (Gen 3 Class Opteron)",
1230 .name
= "Opteron_G4",
1232 .vendor
= CPUID_VENDOR_AMD
,
1236 .features
[FEAT_1_EDX
] =
1237 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1238 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1239 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1240 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1241 CPUID_DE
| CPUID_FP87
,
1242 .features
[FEAT_1_ECX
] =
1243 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1244 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1245 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1247 .features
[FEAT_8000_0001_EDX
] =
1248 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
|
1249 CPUID_EXT2_PDPE1GB
| CPUID_EXT2_FXSR
| CPUID_EXT2_MMX
|
1250 CPUID_EXT2_NX
| CPUID_EXT2_PSE36
| CPUID_EXT2_PAT
|
1251 CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
| CPUID_EXT2_PGE
|
1252 CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_APIC
|
1253 CPUID_EXT2_CX8
| CPUID_EXT2_MCE
| CPUID_EXT2_PAE
| CPUID_EXT2_MSR
|
1254 CPUID_EXT2_TSC
| CPUID_EXT2_PSE
| CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
1255 .features
[FEAT_8000_0001_ECX
] =
1256 CPUID_EXT3_FMA4
| CPUID_EXT3_XOP
|
1257 CPUID_EXT3_3DNOWPREFETCH
| CPUID_EXT3_MISALIGNSSE
|
1258 CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
| CPUID_EXT3_SVM
|
1261 .xlevel
= 0x8000001A,
1262 .model_id
= "AMD Opteron 62xx class CPU",
1265 .name
= "Opteron_G5",
1267 .vendor
= CPUID_VENDOR_AMD
,
1271 .features
[FEAT_1_EDX
] =
1272 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1273 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1274 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1275 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1276 CPUID_DE
| CPUID_FP87
,
1277 .features
[FEAT_1_ECX
] =
1278 CPUID_EXT_F16C
| CPUID_EXT_AVX
| CPUID_EXT_XSAVE
|
1279 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
1280 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_FMA
|
1281 CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
1282 .features
[FEAT_8000_0001_EDX
] =
1283 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
|
1284 CPUID_EXT2_PDPE1GB
| CPUID_EXT2_FXSR
| CPUID_EXT2_MMX
|
1285 CPUID_EXT2_NX
| CPUID_EXT2_PSE36
| CPUID_EXT2_PAT
|
1286 CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
| CPUID_EXT2_PGE
|
1287 CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_APIC
|
1288 CPUID_EXT2_CX8
| CPUID_EXT2_MCE
| CPUID_EXT2_PAE
| CPUID_EXT2_MSR
|
1289 CPUID_EXT2_TSC
| CPUID_EXT2_PSE
| CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
1290 .features
[FEAT_8000_0001_ECX
] =
1291 CPUID_EXT3_TBM
| CPUID_EXT3_FMA4
| CPUID_EXT3_XOP
|
1292 CPUID_EXT3_3DNOWPREFETCH
| CPUID_EXT3_MISALIGNSSE
|
1293 CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
| CPUID_EXT3_SVM
|
1296 .xlevel
= 0x8000001A,
1297 .model_id
= "AMD Opteron 63xx class CPU",
1302 * x86_cpu_compat_set_features:
1303 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1304 * @w: Identifies the feature word to be changed.
1305 * @feat_add: Feature bits to be added to feature word
1306 * @feat_remove: Feature bits to be removed from feature word
1308 * Change CPU model feature bits for compatibility.
1310 * This function may be used by machine-type compatibility functions
1311 * to enable or disable feature bits on specific CPU models.
1313 void x86_cpu_compat_set_features(const char *cpu_model
, FeatureWord w
,
1314 uint32_t feat_add
, uint32_t feat_remove
)
1316 X86CPUDefinition
*def
;
1318 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); i
++) {
1319 def
= &builtin_x86_defs
[i
];
1320 if (!cpu_model
|| !strcmp(cpu_model
, def
->name
)) {
1321 def
->features
[w
] |= feat_add
;
1322 def
->features
[w
] &= ~feat_remove
;
1327 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w
,
1328 bool migratable_only
);
1332 static int cpu_x86_fill_model_id(char *str
)
1334 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
1337 for (i
= 0; i
< 3; i
++) {
1338 host_cpuid(0x80000002 + i
, 0, &eax
, &ebx
, &ecx
, &edx
);
1339 memcpy(str
+ i
* 16 + 0, &eax
, 4);
1340 memcpy(str
+ i
* 16 + 4, &ebx
, 4);
1341 memcpy(str
+ i
* 16 + 8, &ecx
, 4);
1342 memcpy(str
+ i
* 16 + 12, &edx
, 4);
1347 static X86CPUDefinition host_cpudef
;
1349 static Property host_x86_cpu_properties
[] = {
1350 DEFINE_PROP_BOOL("migratable", X86CPU
, migratable
, true),
1351 DEFINE_PROP_END_OF_LIST()
1354 /* class_init for the "host" CPU model
1356 * This function may be called before KVM is initialized.
1358 static void host_x86_cpu_class_init(ObjectClass
*oc
, void *data
)
1360 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1361 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
1362 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
1364 xcc
->kvm_required
= true;
1366 host_cpuid(0x0, 0, &eax
, &ebx
, &ecx
, &edx
);
1367 x86_cpu_vendor_words2str(host_cpudef
.vendor
, ebx
, edx
, ecx
);
1369 host_cpuid(0x1, 0, &eax
, &ebx
, &ecx
, &edx
);
1370 host_cpudef
.family
= ((eax
>> 8) & 0x0F) + ((eax
>> 20) & 0xFF);
1371 host_cpudef
.model
= ((eax
>> 4) & 0x0F) | ((eax
& 0xF0000) >> 12);
1372 host_cpudef
.stepping
= eax
& 0x0F;
1374 cpu_x86_fill_model_id(host_cpudef
.model_id
);
1376 xcc
->cpu_def
= &host_cpudef
;
1377 host_cpudef
.cache_info_passthrough
= true;
1379 /* level, xlevel, xlevel2, and the feature words are initialized on
1380 * instance_init, because they require KVM to be initialized.
1383 dc
->props
= host_x86_cpu_properties
;
1386 static void host_x86_cpu_initfn(Object
*obj
)
1388 X86CPU
*cpu
= X86_CPU(obj
);
1389 CPUX86State
*env
= &cpu
->env
;
1390 KVMState
*s
= kvm_state
;
1392 assert(kvm_enabled());
1394 /* We can't fill the features array here because we don't know yet if
1395 * "migratable" is true or false.
1397 cpu
->host_features
= true;
1399 env
->cpuid_level
= kvm_arch_get_supported_cpuid(s
, 0x0, 0, R_EAX
);
1400 env
->cpuid_xlevel
= kvm_arch_get_supported_cpuid(s
, 0x80000000, 0, R_EAX
);
1401 env
->cpuid_xlevel2
= kvm_arch_get_supported_cpuid(s
, 0xC0000000, 0, R_EAX
);
1403 object_property_set_bool(OBJECT(cpu
), true, "pmu", &error_abort
);
1406 static const TypeInfo host_x86_cpu_type_info
= {
1407 .name
= X86_CPU_TYPE_NAME("host"),
1408 .parent
= TYPE_X86_CPU
,
1409 .instance_init
= host_x86_cpu_initfn
,
1410 .class_init
= host_x86_cpu_class_init
,
1415 static void report_unavailable_features(FeatureWord w
, uint32_t mask
)
1417 FeatureWordInfo
*f
= &feature_word_info
[w
];
1420 for (i
= 0; i
< 32; ++i
) {
1421 if (1 << i
& mask
) {
1422 const char *reg
= get_register_name_32(f
->cpuid_reg
);
1424 fprintf(stderr
, "warning: %s doesn't support requested feature: "
1425 "CPUID.%02XH:%s%s%s [bit %d]\n",
1426 kvm_enabled() ? "host" : "TCG",
1428 f
->feat_names
[i
] ? "." : "",
1429 f
->feat_names
[i
] ? f
->feat_names
[i
] : "", i
);
1434 static void x86_cpuid_version_get_family(Object
*obj
, Visitor
*v
, void *opaque
,
1435 const char *name
, Error
**errp
)
1437 X86CPU
*cpu
= X86_CPU(obj
);
1438 CPUX86State
*env
= &cpu
->env
;
1441 value
= (env
->cpuid_version
>> 8) & 0xf;
1443 value
+= (env
->cpuid_version
>> 20) & 0xff;
1445 visit_type_int(v
, &value
, name
, errp
);
1448 static void x86_cpuid_version_set_family(Object
*obj
, Visitor
*v
, void *opaque
,
1449 const char *name
, Error
**errp
)
1451 X86CPU
*cpu
= X86_CPU(obj
);
1452 CPUX86State
*env
= &cpu
->env
;
1453 const int64_t min
= 0;
1454 const int64_t max
= 0xff + 0xf;
1455 Error
*local_err
= NULL
;
1458 visit_type_int(v
, &value
, name
, &local_err
);
1460 error_propagate(errp
, local_err
);
1463 if (value
< min
|| value
> max
) {
1464 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
1465 name
? name
: "null", value
, min
, max
);
1469 env
->cpuid_version
&= ~0xff00f00;
1471 env
->cpuid_version
|= 0xf00 | ((value
- 0x0f) << 20);
1473 env
->cpuid_version
|= value
<< 8;
1477 static void x86_cpuid_version_get_model(Object
*obj
, Visitor
*v
, void *opaque
,
1478 const char *name
, Error
**errp
)
1480 X86CPU
*cpu
= X86_CPU(obj
);
1481 CPUX86State
*env
= &cpu
->env
;
1484 value
= (env
->cpuid_version
>> 4) & 0xf;
1485 value
|= ((env
->cpuid_version
>> 16) & 0xf) << 4;
1486 visit_type_int(v
, &value
, name
, errp
);
1489 static void x86_cpuid_version_set_model(Object
*obj
, Visitor
*v
, void *opaque
,
1490 const char *name
, Error
**errp
)
1492 X86CPU
*cpu
= X86_CPU(obj
);
1493 CPUX86State
*env
= &cpu
->env
;
1494 const int64_t min
= 0;
1495 const int64_t max
= 0xff;
1496 Error
*local_err
= NULL
;
1499 visit_type_int(v
, &value
, name
, &local_err
);
1501 error_propagate(errp
, local_err
);
1504 if (value
< min
|| value
> max
) {
1505 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
1506 name
? name
: "null", value
, min
, max
);
1510 env
->cpuid_version
&= ~0xf00f0;
1511 env
->cpuid_version
|= ((value
& 0xf) << 4) | ((value
>> 4) << 16);
1514 static void x86_cpuid_version_get_stepping(Object
*obj
, Visitor
*v
,
1515 void *opaque
, const char *name
,
1518 X86CPU
*cpu
= X86_CPU(obj
);
1519 CPUX86State
*env
= &cpu
->env
;
1522 value
= env
->cpuid_version
& 0xf;
1523 visit_type_int(v
, &value
, name
, errp
);
1526 static void x86_cpuid_version_set_stepping(Object
*obj
, Visitor
*v
,
1527 void *opaque
, const char *name
,
1530 X86CPU
*cpu
= X86_CPU(obj
);
1531 CPUX86State
*env
= &cpu
->env
;
1532 const int64_t min
= 0;
1533 const int64_t max
= 0xf;
1534 Error
*local_err
= NULL
;
1537 visit_type_int(v
, &value
, name
, &local_err
);
1539 error_propagate(errp
, local_err
);
1542 if (value
< min
|| value
> max
) {
1543 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
1544 name
? name
: "null", value
, min
, max
);
1548 env
->cpuid_version
&= ~0xf;
1549 env
->cpuid_version
|= value
& 0xf;
1552 static void x86_cpuid_get_level(Object
*obj
, Visitor
*v
, void *opaque
,
1553 const char *name
, Error
**errp
)
1555 X86CPU
*cpu
= X86_CPU(obj
);
1557 visit_type_uint32(v
, &cpu
->env
.cpuid_level
, name
, errp
);
1560 static void x86_cpuid_set_level(Object
*obj
, Visitor
*v
, void *opaque
,
1561 const char *name
, Error
**errp
)
1563 X86CPU
*cpu
= X86_CPU(obj
);
1565 visit_type_uint32(v
, &cpu
->env
.cpuid_level
, name
, errp
);
1568 static void x86_cpuid_get_xlevel(Object
*obj
, Visitor
*v
, void *opaque
,
1569 const char *name
, Error
**errp
)
1571 X86CPU
*cpu
= X86_CPU(obj
);
1573 visit_type_uint32(v
, &cpu
->env
.cpuid_xlevel
, name
, errp
);
1576 static void x86_cpuid_set_xlevel(Object
*obj
, Visitor
*v
, void *opaque
,
1577 const char *name
, Error
**errp
)
1579 X86CPU
*cpu
= X86_CPU(obj
);
1581 visit_type_uint32(v
, &cpu
->env
.cpuid_xlevel
, name
, errp
);
1584 static char *x86_cpuid_get_vendor(Object
*obj
, Error
**errp
)
1586 X86CPU
*cpu
= X86_CPU(obj
);
1587 CPUX86State
*env
= &cpu
->env
;
1590 value
= g_malloc(CPUID_VENDOR_SZ
+ 1);
1591 x86_cpu_vendor_words2str(value
, env
->cpuid_vendor1
, env
->cpuid_vendor2
,
1592 env
->cpuid_vendor3
);
1596 static void x86_cpuid_set_vendor(Object
*obj
, const char *value
,
1599 X86CPU
*cpu
= X86_CPU(obj
);
1600 CPUX86State
*env
= &cpu
->env
;
1603 if (strlen(value
) != CPUID_VENDOR_SZ
) {
1604 error_set(errp
, QERR_PROPERTY_VALUE_BAD
, "",
1609 env
->cpuid_vendor1
= 0;
1610 env
->cpuid_vendor2
= 0;
1611 env
->cpuid_vendor3
= 0;
1612 for (i
= 0; i
< 4; i
++) {
1613 env
->cpuid_vendor1
|= ((uint8_t)value
[i
]) << (8 * i
);
1614 env
->cpuid_vendor2
|= ((uint8_t)value
[i
+ 4]) << (8 * i
);
1615 env
->cpuid_vendor3
|= ((uint8_t)value
[i
+ 8]) << (8 * i
);
1619 static char *x86_cpuid_get_model_id(Object
*obj
, Error
**errp
)
1621 X86CPU
*cpu
= X86_CPU(obj
);
1622 CPUX86State
*env
= &cpu
->env
;
1626 value
= g_malloc(48 + 1);
1627 for (i
= 0; i
< 48; i
++) {
1628 value
[i
] = env
->cpuid_model
[i
>> 2] >> (8 * (i
& 3));
1634 static void x86_cpuid_set_model_id(Object
*obj
, const char *model_id
,
1637 X86CPU
*cpu
= X86_CPU(obj
);
1638 CPUX86State
*env
= &cpu
->env
;
1641 if (model_id
== NULL
) {
1644 len
= strlen(model_id
);
1645 memset(env
->cpuid_model
, 0, 48);
1646 for (i
= 0; i
< 48; i
++) {
1650 c
= (uint8_t)model_id
[i
];
1652 env
->cpuid_model
[i
>> 2] |= c
<< (8 * (i
& 3));
1656 static void x86_cpuid_get_tsc_freq(Object
*obj
, Visitor
*v
, void *opaque
,
1657 const char *name
, Error
**errp
)
1659 X86CPU
*cpu
= X86_CPU(obj
);
1662 value
= cpu
->env
.tsc_khz
* 1000;
1663 visit_type_int(v
, &value
, name
, errp
);
1666 static void x86_cpuid_set_tsc_freq(Object
*obj
, Visitor
*v
, void *opaque
,
1667 const char *name
, Error
**errp
)
1669 X86CPU
*cpu
= X86_CPU(obj
);
1670 const int64_t min
= 0;
1671 const int64_t max
= INT64_MAX
;
1672 Error
*local_err
= NULL
;
1675 visit_type_int(v
, &value
, name
, &local_err
);
1677 error_propagate(errp
, local_err
);
1680 if (value
< min
|| value
> max
) {
1681 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
1682 name
? name
: "null", value
, min
, max
);
1686 cpu
->env
.tsc_khz
= value
/ 1000;
1689 static void x86_cpuid_get_apic_id(Object
*obj
, Visitor
*v
, void *opaque
,
1690 const char *name
, Error
**errp
)
1692 X86CPU
*cpu
= X86_CPU(obj
);
1693 int64_t value
= cpu
->env
.cpuid_apic_id
;
1695 visit_type_int(v
, &value
, name
, errp
);
1698 static void x86_cpuid_set_apic_id(Object
*obj
, Visitor
*v
, void *opaque
,
1699 const char *name
, Error
**errp
)
1701 X86CPU
*cpu
= X86_CPU(obj
);
1702 DeviceState
*dev
= DEVICE(obj
);
1703 const int64_t min
= 0;
1704 const int64_t max
= UINT32_MAX
;
1705 Error
*error
= NULL
;
1708 if (dev
->realized
) {
1709 error_setg(errp
, "Attempt to set property '%s' on '%s' after "
1710 "it was realized", name
, object_get_typename(obj
));
1714 visit_type_int(v
, &value
, name
, &error
);
1716 error_propagate(errp
, error
);
1719 if (value
< min
|| value
> max
) {
1720 error_setg(errp
, "Property %s.%s doesn't take value %" PRId64
1721 " (minimum: %" PRId64
", maximum: %" PRId64
")" ,
1722 object_get_typename(obj
), name
, value
, min
, max
);
1726 if ((value
!= cpu
->env
.cpuid_apic_id
) && cpu_exists(value
)) {
1727 error_setg(errp
, "CPU with APIC ID %" PRIi64
" exists", value
);
1730 cpu
->env
.cpuid_apic_id
= value
;
1733 /* Generic getter for "feature-words" and "filtered-features" properties */
1734 static void x86_cpu_get_feature_words(Object
*obj
, Visitor
*v
, void *opaque
,
1735 const char *name
, Error
**errp
)
1737 uint32_t *array
= (uint32_t *)opaque
;
1740 X86CPUFeatureWordInfo word_infos
[FEATURE_WORDS
] = { };
1741 X86CPUFeatureWordInfoList list_entries
[FEATURE_WORDS
] = { };
1742 X86CPUFeatureWordInfoList
*list
= NULL
;
1744 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
1745 FeatureWordInfo
*wi
= &feature_word_info
[w
];
1746 X86CPUFeatureWordInfo
*qwi
= &word_infos
[w
];
1747 qwi
->cpuid_input_eax
= wi
->cpuid_eax
;
1748 qwi
->has_cpuid_input_ecx
= wi
->cpuid_needs_ecx
;
1749 qwi
->cpuid_input_ecx
= wi
->cpuid_ecx
;
1750 qwi
->cpuid_register
= x86_reg_info_32
[wi
->cpuid_reg
].qapi_enum
;
1751 qwi
->features
= array
[w
];
1753 /* List will be in reverse order, but order shouldn't matter */
1754 list_entries
[w
].next
= list
;
1755 list_entries
[w
].value
= &word_infos
[w
];
1756 list
= &list_entries
[w
];
1759 visit_type_X86CPUFeatureWordInfoList(v
, &list
, "feature-words", &err
);
1760 error_propagate(errp
, err
);
1763 static void x86_get_hv_spinlocks(Object
*obj
, Visitor
*v
, void *opaque
,
1764 const char *name
, Error
**errp
)
1766 X86CPU
*cpu
= X86_CPU(obj
);
1767 int64_t value
= cpu
->hyperv_spinlock_attempts
;
1769 visit_type_int(v
, &value
, name
, errp
);
1772 static void x86_set_hv_spinlocks(Object
*obj
, Visitor
*v
, void *opaque
,
1773 const char *name
, Error
**errp
)
1775 const int64_t min
= 0xFFF;
1776 const int64_t max
= UINT_MAX
;
1777 X86CPU
*cpu
= X86_CPU(obj
);
1781 visit_type_int(v
, &value
, name
, &err
);
1783 error_propagate(errp
, err
);
1787 if (value
< min
|| value
> max
) {
1788 error_setg(errp
, "Property %s.%s doesn't take value %" PRId64
1789 " (minimum: %" PRId64
", maximum: %" PRId64
")",
1790 object_get_typename(obj
), name
? name
: "null",
1794 cpu
->hyperv_spinlock_attempts
= value
;
1797 static PropertyInfo qdev_prop_spinlocks
= {
1799 .get
= x86_get_hv_spinlocks
,
1800 .set
= x86_set_hv_spinlocks
,
1803 /* Convert all '_' in a feature string option name to '-', to make feature
1804 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1806 static inline void feat2prop(char *s
)
1808 while ((s
= strchr(s
, '_'))) {
1813 /* Parse "+feature,-feature,feature=foo" CPU feature string
1815 static void x86_cpu_parse_featurestr(CPUState
*cs
, char *features
,
1818 X86CPU
*cpu
= X86_CPU(cs
);
1819 char *featurestr
; /* Single 'key=value" string being parsed */
1821 /* Features to be added */
1822 FeatureWordArray plus_features
= { 0 };
1823 /* Features to be removed */
1824 FeatureWordArray minus_features
= { 0 };
1826 CPUX86State
*env
= &cpu
->env
;
1827 Error
*local_err
= NULL
;
1829 featurestr
= features
? strtok(features
, ",") : NULL
;
1831 while (featurestr
) {
1833 if (featurestr
[0] == '+') {
1834 add_flagname_to_bitmaps(featurestr
+ 1, plus_features
, &local_err
);
1835 } else if (featurestr
[0] == '-') {
1836 add_flagname_to_bitmaps(featurestr
+ 1, minus_features
, &local_err
);
1837 } else if ((val
= strchr(featurestr
, '='))) {
1839 feat2prop(featurestr
);
1840 if (!strcmp(featurestr
, "xlevel")) {
1844 numvalue
= strtoul(val
, &err
, 0);
1845 if (!*val
|| *err
) {
1846 error_setg(errp
, "bad numerical value %s", val
);
1849 if (numvalue
< 0x80000000) {
1850 error_report("xlevel value shall always be >= 0x80000000"
1851 ", fixup will be removed in future versions");
1852 numvalue
+= 0x80000000;
1854 snprintf(num
, sizeof(num
), "%" PRIu32
, numvalue
);
1855 object_property_parse(OBJECT(cpu
), num
, featurestr
, &local_err
);
1856 } else if (!strcmp(featurestr
, "tsc-freq")) {
1861 tsc_freq
= strtosz_suffix_unit(val
, &err
,
1862 STRTOSZ_DEFSUFFIX_B
, 1000);
1863 if (tsc_freq
< 0 || *err
) {
1864 error_setg(errp
, "bad numerical value %s", val
);
1867 snprintf(num
, sizeof(num
), "%" PRId64
, tsc_freq
);
1868 object_property_parse(OBJECT(cpu
), num
, "tsc-frequency",
1870 } else if (!strcmp(featurestr
, "hv-spinlocks")) {
1872 const int min
= 0xFFF;
1874 numvalue
= strtoul(val
, &err
, 0);
1875 if (!*val
|| *err
) {
1876 error_setg(errp
, "bad numerical value %s", val
);
1879 if (numvalue
< min
) {
1880 error_report("hv-spinlocks value shall always be >= 0x%x"
1881 ", fixup will be removed in future versions",
1885 snprintf(num
, sizeof(num
), "%" PRId32
, numvalue
);
1886 object_property_parse(OBJECT(cpu
), num
, featurestr
, &local_err
);
1888 object_property_parse(OBJECT(cpu
), val
, featurestr
, &local_err
);
1891 feat2prop(featurestr
);
1892 object_property_parse(OBJECT(cpu
), "on", featurestr
, &local_err
);
1895 error_propagate(errp
, local_err
);
1898 featurestr
= strtok(NULL
, ",");
1901 if (cpu
->host_features
) {
1902 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
1904 x86_cpu_get_supported_feature_word(w
, cpu
->migratable
);
1908 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
1909 env
->features
[w
] |= plus_features
[w
];
1910 env
->features
[w
] &= ~minus_features
[w
];
1914 /* generate a composite string into buf of all cpuid names in featureset
1915 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1916 * if flags, suppress names undefined in featureset.
1918 static void listflags(char *buf
, int bufsize
, uint32_t fbits
,
1919 const char **featureset
, uint32_t flags
)
1921 const char **p
= &featureset
[31];
1925 b
= 4 <= bufsize
? buf
+ (bufsize
-= 3) - 1 : NULL
;
1927 for (q
= buf
, bit
= 31; fbits
&& bufsize
; --p
, fbits
&= ~(1 << bit
), --bit
)
1928 if (fbits
& 1 << bit
&& (*p
|| !flags
)) {
1930 nc
= snprintf(q
, bufsize
, "%s%s", q
== buf
? "" : " ", *p
);
1932 nc
= snprintf(q
, bufsize
, "%s[%d]", q
== buf
? "" : " ", bit
);
1933 if (bufsize
<= nc
) {
1935 memcpy(b
, "...", sizeof("..."));
1944 /* generate CPU information. */
1945 void x86_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
1947 X86CPUDefinition
*def
;
1951 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); i
++) {
1952 def
= &builtin_x86_defs
[i
];
1953 snprintf(buf
, sizeof(buf
), "%s", def
->name
);
1954 (*cpu_fprintf
)(f
, "x86 %16s %-48s\n", buf
, def
->model_id
);
1957 (*cpu_fprintf
)(f
, "x86 %16s %-48s\n", "host",
1958 "KVM processor with all supported host features "
1959 "(only available in KVM mode)");
1962 (*cpu_fprintf
)(f
, "\nRecognized CPUID flags:\n");
1963 for (i
= 0; i
< ARRAY_SIZE(feature_word_info
); i
++) {
1964 FeatureWordInfo
*fw
= &feature_word_info
[i
];
1966 listflags(buf
, sizeof(buf
), (uint32_t)~0, fw
->feat_names
, 1);
1967 (*cpu_fprintf
)(f
, " %s\n", buf
);
1971 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
1973 CpuDefinitionInfoList
*cpu_list
= NULL
;
1974 X86CPUDefinition
*def
;
1977 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); i
++) {
1978 CpuDefinitionInfoList
*entry
;
1979 CpuDefinitionInfo
*info
;
1981 def
= &builtin_x86_defs
[i
];
1982 info
= g_malloc0(sizeof(*info
));
1983 info
->name
= g_strdup(def
->name
);
1985 entry
= g_malloc0(sizeof(*entry
));
1986 entry
->value
= info
;
1987 entry
->next
= cpu_list
;
1994 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w
,
1995 bool migratable_only
)
1997 FeatureWordInfo
*wi
= &feature_word_info
[w
];
2000 if (kvm_enabled()) {
2001 r
= kvm_arch_get_supported_cpuid(kvm_state
, wi
->cpuid_eax
,
2004 } else if (tcg_enabled()) {
2005 r
= wi
->tcg_features
;
2009 if (migratable_only
) {
2010 r
&= x86_cpu_get_migratable_flags(w
);
2016 * Filters CPU feature words based on host availability of each feature.
2018 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2020 static int x86_cpu_filter_features(X86CPU
*cpu
)
2022 CPUX86State
*env
= &cpu
->env
;
2026 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
2027 uint32_t host_feat
=
2028 x86_cpu_get_supported_feature_word(w
, cpu
->migratable
);
2029 uint32_t requested_features
= env
->features
[w
];
2030 env
->features
[w
] &= host_feat
;
2031 cpu
->filtered_features
[w
] = requested_features
& ~env
->features
[w
];
2032 if (cpu
->filtered_features
[w
]) {
2033 if (cpu
->check_cpuid
|| cpu
->enforce_cpuid
) {
2034 report_unavailable_features(w
, cpu
->filtered_features
[w
]);
2043 /* Load data from X86CPUDefinition
2045 static void x86_cpu_load_def(X86CPU
*cpu
, X86CPUDefinition
*def
, Error
**errp
)
2047 CPUX86State
*env
= &cpu
->env
;
2049 char host_vendor
[CPUID_VENDOR_SZ
+ 1];
2052 object_property_set_int(OBJECT(cpu
), def
->level
, "level", errp
);
2053 object_property_set_int(OBJECT(cpu
), def
->family
, "family", errp
);
2054 object_property_set_int(OBJECT(cpu
), def
->model
, "model", errp
);
2055 object_property_set_int(OBJECT(cpu
), def
->stepping
, "stepping", errp
);
2056 object_property_set_int(OBJECT(cpu
), def
->xlevel
, "xlevel", errp
);
2057 env
->cpuid_xlevel2
= def
->xlevel2
;
2058 cpu
->cache_info_passthrough
= def
->cache_info_passthrough
;
2059 object_property_set_str(OBJECT(cpu
), def
->model_id
, "model-id", errp
);
2060 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
2061 env
->features
[w
] = def
->features
[w
];
2064 /* Special cases not set in the X86CPUDefinition structs: */
2065 if (kvm_enabled()) {
2067 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
2068 env
->features
[w
] |= kvm_default_features
[w
];
2069 env
->features
[w
] &= ~kvm_default_unset_features
[w
];
2073 env
->features
[FEAT_1_ECX
] |= CPUID_EXT_HYPERVISOR
;
2075 /* sysenter isn't supported in compatibility mode on AMD,
2076 * syscall isn't supported in compatibility mode on Intel.
2077 * Normally we advertise the actual CPU vendor, but you can
2078 * override this using the 'vendor' property if you want to use
2079 * KVM's sysenter/syscall emulation in compatibility mode and
2080 * when doing cross vendor migration
2082 vendor
= def
->vendor
;
2083 if (kvm_enabled()) {
2084 uint32_t ebx
= 0, ecx
= 0, edx
= 0;
2085 host_cpuid(0, 0, NULL
, &ebx
, &ecx
, &edx
);
2086 x86_cpu_vendor_words2str(host_vendor
, ebx
, edx
, ecx
);
2087 vendor
= host_vendor
;
2090 object_property_set_str(OBJECT(cpu
), vendor
, "vendor", errp
);
2094 X86CPU
*cpu_x86_create(const char *cpu_model
, DeviceState
*icc_bridge
,
2100 gchar
**model_pieces
;
2101 char *name
, *features
;
2102 Error
*error
= NULL
;
2104 model_pieces
= g_strsplit(cpu_model
, ",", 2);
2105 if (!model_pieces
[0]) {
2106 error_setg(&error
, "Invalid/empty CPU model name");
2109 name
= model_pieces
[0];
2110 features
= model_pieces
[1];
2112 oc
= x86_cpu_class_by_name(name
);
2114 error_setg(&error
, "Unable to find CPU definition: %s", name
);
2117 xcc
= X86_CPU_CLASS(oc
);
2119 if (xcc
->kvm_required
&& !kvm_enabled()) {
2120 error_setg(&error
, "CPU model '%s' requires KVM", name
);
2124 cpu
= X86_CPU(object_new(object_class_get_name(oc
)));
2126 #ifndef CONFIG_USER_ONLY
2127 if (icc_bridge
== NULL
) {
2128 error_setg(&error
, "Invalid icc-bridge value");
2131 qdev_set_parent_bus(DEVICE(cpu
), qdev_get_child_bus(icc_bridge
, "icc"));
2132 object_unref(OBJECT(cpu
));
2135 x86_cpu_parse_featurestr(CPU(cpu
), features
, &error
);
2141 if (error
!= NULL
) {
2142 error_propagate(errp
, error
);
2144 object_unref(OBJECT(cpu
));
2148 g_strfreev(model_pieces
);
2152 X86CPU
*cpu_x86_init(const char *cpu_model
)
2154 Error
*error
= NULL
;
2157 cpu
= cpu_x86_create(cpu_model
, NULL
, &error
);
2162 object_property_set_bool(OBJECT(cpu
), true, "realized", &error
);
2166 error_report_err(error
);
2168 object_unref(OBJECT(cpu
));
2175 static void x86_cpu_cpudef_class_init(ObjectClass
*oc
, void *data
)
2177 X86CPUDefinition
*cpudef
= data
;
2178 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
2180 xcc
->cpu_def
= cpudef
;
2183 static void x86_register_cpudef_type(X86CPUDefinition
*def
)
2185 char *typename
= x86_cpu_type_name(def
->name
);
2188 .parent
= TYPE_X86_CPU
,
2189 .class_init
= x86_cpu_cpudef_class_init
,
2197 #if !defined(CONFIG_USER_ONLY)
2199 void cpu_clear_apic_feature(CPUX86State
*env
)
2201 env
->features
[FEAT_1_EDX
] &= ~CPUID_APIC
;
2204 #endif /* !CONFIG_USER_ONLY */
2206 /* Initialize list of CPU models, filling some non-static fields if necessary
2208 void x86_cpudef_setup(void)
2211 static const char *model_with_versions
[] = { "qemu32", "qemu64", "athlon" };
2213 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); ++i
) {
2214 X86CPUDefinition
*def
= &builtin_x86_defs
[i
];
2216 /* Look for specific "cpudef" models that */
2217 /* have the QEMU version in .model_id */
2218 for (j
= 0; j
< ARRAY_SIZE(model_with_versions
); j
++) {
2219 if (strcmp(model_with_versions
[j
], def
->name
) == 0) {
2220 pstrcpy(def
->model_id
, sizeof(def
->model_id
),
2221 "QEMU Virtual CPU version ");
2222 pstrcat(def
->model_id
, sizeof(def
->model_id
),
2223 qemu_get_version());
2230 static void get_cpuid_vendor(CPUX86State
*env
, uint32_t *ebx
,
2231 uint32_t *ecx
, uint32_t *edx
)
2233 *ebx
= env
->cpuid_vendor1
;
2234 *edx
= env
->cpuid_vendor2
;
2235 *ecx
= env
->cpuid_vendor3
;
2238 void cpu_x86_cpuid(CPUX86State
*env
, uint32_t index
, uint32_t count
,
2239 uint32_t *eax
, uint32_t *ebx
,
2240 uint32_t *ecx
, uint32_t *edx
)
2242 X86CPU
*cpu
= x86_env_get_cpu(env
);
2243 CPUState
*cs
= CPU(cpu
);
2245 /* test if maximum index reached */
2246 if (index
& 0x80000000) {
2247 if (index
> env
->cpuid_xlevel
) {
2248 if (env
->cpuid_xlevel2
> 0) {
2249 /* Handle the Centaur's CPUID instruction. */
2250 if (index
> env
->cpuid_xlevel2
) {
2251 index
= env
->cpuid_xlevel2
;
2252 } else if (index
< 0xC0000000) {
2253 index
= env
->cpuid_xlevel
;
2256 /* Intel documentation states that invalid EAX input will
2257 * return the same information as EAX=cpuid_level
2258 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2260 index
= env
->cpuid_level
;
2264 if (index
> env
->cpuid_level
)
2265 index
= env
->cpuid_level
;
2270 *eax
= env
->cpuid_level
;
2271 get_cpuid_vendor(env
, ebx
, ecx
, edx
);
2274 *eax
= env
->cpuid_version
;
2275 *ebx
= (env
->cpuid_apic_id
<< 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2276 *ecx
= env
->features
[FEAT_1_ECX
];
2277 *edx
= env
->features
[FEAT_1_EDX
];
2278 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
2279 *ebx
|= (cs
->nr_cores
* cs
->nr_threads
) << 16;
2280 *edx
|= 1 << 28; /* HTT bit */
2284 /* cache info: needed for Pentium Pro compatibility */
2285 if (cpu
->cache_info_passthrough
) {
2286 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
2289 *eax
= 1; /* Number of CPUID[EAX=2] calls required */
2292 *edx
= (L1D_DESCRIPTOR
<< 16) | \
2293 (L1I_DESCRIPTOR
<< 8) | \
2297 /* cache info: needed for Core compatibility */
2298 if (cpu
->cache_info_passthrough
) {
2299 host_cpuid(index
, count
, eax
, ebx
, ecx
, edx
);
2300 *eax
&= ~0xFC000000;
2304 case 0: /* L1 dcache info */
2305 *eax
|= CPUID_4_TYPE_DCACHE
| \
2306 CPUID_4_LEVEL(1) | \
2307 CPUID_4_SELF_INIT_LEVEL
;
2308 *ebx
= (L1D_LINE_SIZE
- 1) | \
2309 ((L1D_PARTITIONS
- 1) << 12) | \
2310 ((L1D_ASSOCIATIVITY
- 1) << 22);
2311 *ecx
= L1D_SETS
- 1;
2312 *edx
= CPUID_4_NO_INVD_SHARING
;
2314 case 1: /* L1 icache info */
2315 *eax
|= CPUID_4_TYPE_ICACHE
| \
2316 CPUID_4_LEVEL(1) | \
2317 CPUID_4_SELF_INIT_LEVEL
;
2318 *ebx
= (L1I_LINE_SIZE
- 1) | \
2319 ((L1I_PARTITIONS
- 1) << 12) | \
2320 ((L1I_ASSOCIATIVITY
- 1) << 22);
2321 *ecx
= L1I_SETS
- 1;
2322 *edx
= CPUID_4_NO_INVD_SHARING
;
2324 case 2: /* L2 cache info */
2325 *eax
|= CPUID_4_TYPE_UNIFIED
| \
2326 CPUID_4_LEVEL(2) | \
2327 CPUID_4_SELF_INIT_LEVEL
;
2328 if (cs
->nr_threads
> 1) {
2329 *eax
|= (cs
->nr_threads
- 1) << 14;
2331 *ebx
= (L2_LINE_SIZE
- 1) | \
2332 ((L2_PARTITIONS
- 1) << 12) | \
2333 ((L2_ASSOCIATIVITY
- 1) << 22);
2335 *edx
= CPUID_4_NO_INVD_SHARING
;
2337 default: /* end of info */
2346 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2347 if ((*eax
& 31) && cs
->nr_cores
> 1) {
2348 *eax
|= (cs
->nr_cores
- 1) << 26;
2352 /* mwait info: needed for Core compatibility */
2353 *eax
= 0; /* Smallest monitor-line size in bytes */
2354 *ebx
= 0; /* Largest monitor-line size in bytes */
2355 *ecx
= CPUID_MWAIT_EMX
| CPUID_MWAIT_IBE
;
2359 /* Thermal and Power Leaf */
2366 /* Structured Extended Feature Flags Enumeration Leaf */
2368 *eax
= 0; /* Maximum ECX value for sub-leaves */
2369 *ebx
= env
->features
[FEAT_7_0_EBX
]; /* Feature flags */
2370 *ecx
= 0; /* Reserved */
2371 *edx
= 0; /* Reserved */
2380 /* Direct Cache Access Information Leaf */
2381 *eax
= 0; /* Bits 0-31 in DCA_CAP MSR */
2387 /* Architectural Performance Monitoring Leaf */
2388 if (kvm_enabled() && cpu
->enable_pmu
) {
2389 KVMState
*s
= cs
->kvm_state
;
2391 *eax
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EAX
);
2392 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EBX
);
2393 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_ECX
);
2394 *edx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EDX
);
2403 KVMState
*s
= cs
->kvm_state
;
2407 /* Processor Extended State */
2412 if (!(env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
) || !kvm_enabled()) {
2416 kvm_arch_get_supported_cpuid(s
, 0xd, 0, R_EAX
) |
2417 ((uint64_t)kvm_arch_get_supported_cpuid(s
, 0xd, 0, R_EDX
) << 32);
2421 for (i
= 2; i
< ARRAY_SIZE(ext_save_areas
); i
++) {
2422 const ExtSaveArea
*esa
= &ext_save_areas
[i
];
2423 if ((env
->features
[esa
->feature
] & esa
->bits
) == esa
->bits
&&
2424 (kvm_mask
& (1 << i
)) != 0) {
2428 *edx
|= 1 << (i
- 32);
2430 *ecx
= MAX(*ecx
, esa
->offset
+ esa
->size
);
2433 *eax
|= kvm_mask
& (XSTATE_FP
| XSTATE_SSE
);
2435 } else if (count
== 1) {
2436 *eax
= env
->features
[FEAT_XSAVE
];
2437 } else if (count
< ARRAY_SIZE(ext_save_areas
)) {
2438 const ExtSaveArea
*esa
= &ext_save_areas
[count
];
2439 if ((env
->features
[esa
->feature
] & esa
->bits
) == esa
->bits
&&
2440 (kvm_mask
& (1 << count
)) != 0) {
2448 *eax
= env
->cpuid_xlevel
;
2449 *ebx
= env
->cpuid_vendor1
;
2450 *edx
= env
->cpuid_vendor2
;
2451 *ecx
= env
->cpuid_vendor3
;
2454 *eax
= env
->cpuid_version
;
2456 *ecx
= env
->features
[FEAT_8000_0001_ECX
];
2457 *edx
= env
->features
[FEAT_8000_0001_EDX
];
2459 /* The Linux kernel checks for the CMPLegacy bit and
2460 * discards multiple thread information if it is set.
2461 * So dont set it here for Intel to make Linux guests happy.
2463 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
2464 uint32_t tebx
, tecx
, tedx
;
2465 get_cpuid_vendor(env
, &tebx
, &tecx
, &tedx
);
2466 if (tebx
!= CPUID_VENDOR_INTEL_1
||
2467 tedx
!= CPUID_VENDOR_INTEL_2
||
2468 tecx
!= CPUID_VENDOR_INTEL_3
) {
2469 *ecx
|= 1 << 1; /* CmpLegacy bit */
2476 *eax
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
2477 *ebx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
2478 *ecx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
2479 *edx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
2482 /* cache info (L1 cache) */
2483 if (cpu
->cache_info_passthrough
) {
2484 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
2487 *eax
= (L1_DTLB_2M_ASSOC
<< 24) | (L1_DTLB_2M_ENTRIES
<< 16) | \
2488 (L1_ITLB_2M_ASSOC
<< 8) | (L1_ITLB_2M_ENTRIES
);
2489 *ebx
= (L1_DTLB_4K_ASSOC
<< 24) | (L1_DTLB_4K_ENTRIES
<< 16) | \
2490 (L1_ITLB_4K_ASSOC
<< 8) | (L1_ITLB_4K_ENTRIES
);
2491 *ecx
= (L1D_SIZE_KB_AMD
<< 24) | (L1D_ASSOCIATIVITY_AMD
<< 16) | \
2492 (L1D_LINES_PER_TAG
<< 8) | (L1D_LINE_SIZE
);
2493 *edx
= (L1I_SIZE_KB_AMD
<< 24) | (L1I_ASSOCIATIVITY_AMD
<< 16) | \
2494 (L1I_LINES_PER_TAG
<< 8) | (L1I_LINE_SIZE
);
2497 /* cache info (L2 cache) */
2498 if (cpu
->cache_info_passthrough
) {
2499 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
2502 *eax
= (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC
) << 28) | \
2503 (L2_DTLB_2M_ENTRIES
<< 16) | \
2504 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC
) << 12) | \
2505 (L2_ITLB_2M_ENTRIES
);
2506 *ebx
= (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC
) << 28) | \
2507 (L2_DTLB_4K_ENTRIES
<< 16) | \
2508 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC
) << 12) | \
2509 (L2_ITLB_4K_ENTRIES
);
2510 *ecx
= (L2_SIZE_KB_AMD
<< 16) | \
2511 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY
) << 12) | \
2512 (L2_LINES_PER_TAG
<< 8) | (L2_LINE_SIZE
);
2513 *edx
= ((L3_SIZE_KB
/512) << 18) | \
2514 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY
) << 12) | \
2515 (L3_LINES_PER_TAG
<< 8) | (L3_LINE_SIZE
);
2521 *edx
= env
->features
[FEAT_8000_0007_EDX
];
2524 /* virtual & phys address size in low 2 bytes. */
2525 /* XXX: This value must match the one used in the MMU code. */
2526 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
) {
2527 /* 64 bit processor */
2528 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2529 *eax
= 0x00003028; /* 48 bits virtual, 40 bits physical */
2531 if (env
->features
[FEAT_1_EDX
] & CPUID_PSE36
) {
2532 *eax
= 0x00000024; /* 36 bits physical */
2534 *eax
= 0x00000020; /* 32 bits physical */
2540 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
2541 *ecx
|= (cs
->nr_cores
* cs
->nr_threads
) - 1;
2545 if (env
->features
[FEAT_8000_0001_ECX
] & CPUID_EXT3_SVM
) {
2546 *eax
= 0x00000001; /* SVM Revision */
2547 *ebx
= 0x00000010; /* nr of ASIDs */
2549 *edx
= env
->features
[FEAT_SVM
]; /* optional features */
2558 *eax
= env
->cpuid_xlevel2
;
2564 /* Support for VIA CPU's CPUID instruction */
2565 *eax
= env
->cpuid_version
;
2568 *edx
= env
->features
[FEAT_C000_0001_EDX
];
2573 /* Reserved for the future, and now filled with zero */
2580 /* reserved values: zero */
2589 /* CPUClass::reset() */
2590 static void x86_cpu_reset(CPUState
*s
)
2592 X86CPU
*cpu
= X86_CPU(s
);
2593 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(cpu
);
2594 CPUX86State
*env
= &cpu
->env
;
2597 xcc
->parent_reset(s
);
2599 memset(env
, 0, offsetof(CPUX86State
, cpuid_level
));
2603 env
->old_exception
= -1;
2605 /* init to reset state */
2607 #ifdef CONFIG_SOFTMMU
2608 env
->hflags
|= HF_SOFTMMU_MASK
;
2610 env
->hflags2
|= HF2_GIF_MASK
;
2612 cpu_x86_update_cr0(env
, 0x60000010);
2613 env
->a20_mask
= ~0x0;
2614 env
->smbase
= 0x30000;
2616 env
->idt
.limit
= 0xffff;
2617 env
->gdt
.limit
= 0xffff;
2618 env
->ldt
.limit
= 0xffff;
2619 env
->ldt
.flags
= DESC_P_MASK
| (2 << DESC_TYPE_SHIFT
);
2620 env
->tr
.limit
= 0xffff;
2621 env
->tr
.flags
= DESC_P_MASK
| (11 << DESC_TYPE_SHIFT
);
2623 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff,
2624 DESC_P_MASK
| DESC_S_MASK
| DESC_CS_MASK
|
2625 DESC_R_MASK
| DESC_A_MASK
);
2626 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff,
2627 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2629 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff,
2630 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2632 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff,
2633 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2635 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff,
2636 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2638 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff,
2639 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2643 env
->regs
[R_EDX
] = env
->cpuid_version
;
2648 for (i
= 0; i
< 8; i
++) {
2651 cpu_set_fpuc(env
, 0x37f);
2653 env
->mxcsr
= 0x1f80;
2654 env
->xstate_bv
= XSTATE_FP
| XSTATE_SSE
;
2656 env
->pat
= 0x0007040600070406ULL
;
2657 env
->msr_ia32_misc_enable
= MSR_IA32_MISC_ENABLE_DEFAULT
;
2659 memset(env
->dr
, 0, sizeof(env
->dr
));
2660 env
->dr
[6] = DR6_FIXED_1
;
2661 env
->dr
[7] = DR7_FIXED_1
;
2662 cpu_breakpoint_remove_all(s
, BP_CPU
);
2663 cpu_watchpoint_remove_all(s
, BP_CPU
);
2668 * SDM 11.11.5 requires:
2669 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2670 * - IA32_MTRR_PHYSMASKn.V = 0
2671 * All other bits are undefined. For simplification, zero it all.
2673 env
->mtrr_deftype
= 0;
2674 memset(env
->mtrr_var
, 0, sizeof(env
->mtrr_var
));
2675 memset(env
->mtrr_fixed
, 0, sizeof(env
->mtrr_fixed
));
2677 #if !defined(CONFIG_USER_ONLY)
2678 /* We hard-wire the BSP to the first CPU. */
2679 if (s
->cpu_index
== 0) {
2680 apic_designate_bsp(cpu
->apic_state
);
2683 s
->halted
= !cpu_is_bsp(cpu
);
2685 if (kvm_enabled()) {
2686 kvm_arch_reset_vcpu(cpu
);
2691 #ifndef CONFIG_USER_ONLY
2692 bool cpu_is_bsp(X86CPU
*cpu
)
2694 return cpu_get_apic_base(cpu
->apic_state
) & MSR_IA32_APICBASE_BSP
;
2697 /* TODO: remove me, when reset over QOM tree is implemented */
2698 static void x86_cpu_machine_reset_cb(void *opaque
)
2700 X86CPU
*cpu
= opaque
;
2701 cpu_reset(CPU(cpu
));
2705 static void mce_init(X86CPU
*cpu
)
2707 CPUX86State
*cenv
= &cpu
->env
;
2710 if (((cenv
->cpuid_version
>> 8) & 0xf) >= 6
2711 && (cenv
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
2712 (CPUID_MCE
| CPUID_MCA
)) {
2713 cenv
->mcg_cap
= MCE_CAP_DEF
| MCE_BANKS_DEF
;
2714 cenv
->mcg_ctl
= ~(uint64_t)0;
2715 for (bank
= 0; bank
< MCE_BANKS_DEF
; bank
++) {
2716 cenv
->mce_banks
[bank
* 4] = ~(uint64_t)0;
2721 #ifndef CONFIG_USER_ONLY
2722 static void x86_cpu_apic_create(X86CPU
*cpu
, Error
**errp
)
2724 CPUX86State
*env
= &cpu
->env
;
2725 DeviceState
*dev
= DEVICE(cpu
);
2726 APICCommonState
*apic
;
2727 const char *apic_type
= "apic";
2729 if (kvm_irqchip_in_kernel()) {
2730 apic_type
= "kvm-apic";
2731 } else if (xen_enabled()) {
2732 apic_type
= "xen-apic";
2735 cpu
->apic_state
= qdev_try_create(qdev_get_parent_bus(dev
), apic_type
);
2736 if (cpu
->apic_state
== NULL
) {
2737 error_setg(errp
, "APIC device '%s' could not be created", apic_type
);
2741 object_property_add_child(OBJECT(cpu
), "apic",
2742 OBJECT(cpu
->apic_state
), NULL
);
2743 qdev_prop_set_uint8(cpu
->apic_state
, "id", env
->cpuid_apic_id
);
2744 /* TODO: convert to link<> */
2745 apic
= APIC_COMMON(cpu
->apic_state
);
2749 static void x86_cpu_apic_realize(X86CPU
*cpu
, Error
**errp
)
2751 if (cpu
->apic_state
== NULL
) {
2755 if (qdev_init(cpu
->apic_state
)) {
2756 error_setg(errp
, "APIC device '%s' could not be initialized",
2757 object_get_typename(OBJECT(cpu
->apic_state
)));
2762 static void x86_cpu_apic_realize(X86CPU
*cpu
, Error
**errp
)
2768 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2769 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2770 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2771 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2772 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2773 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2774 static void x86_cpu_realizefn(DeviceState
*dev
, Error
**errp
)
2776 CPUState
*cs
= CPU(dev
);
2777 X86CPU
*cpu
= X86_CPU(dev
);
2778 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(dev
);
2779 CPUX86State
*env
= &cpu
->env
;
2780 Error
*local_err
= NULL
;
2781 static bool ht_warned
;
2783 if (env
->features
[FEAT_7_0_EBX
] && env
->cpuid_level
< 7) {
2784 env
->cpuid_level
= 7;
2787 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2790 if (IS_AMD_CPU(env
)) {
2791 env
->features
[FEAT_8000_0001_EDX
] &= ~CPUID_EXT2_AMD_ALIASES
;
2792 env
->features
[FEAT_8000_0001_EDX
] |= (env
->features
[FEAT_1_EDX
]
2793 & CPUID_EXT2_AMD_ALIASES
);
2797 if (x86_cpu_filter_features(cpu
) && cpu
->enforce_cpuid
) {
2798 error_setg(&local_err
,
2800 "Host doesn't support requested features" :
2801 "TCG doesn't support requested features");
2805 #ifndef CONFIG_USER_ONLY
2806 qemu_register_reset(x86_cpu_machine_reset_cb
, cpu
);
2808 if (cpu
->env
.features
[FEAT_1_EDX
] & CPUID_APIC
|| smp_cpus
> 1) {
2809 x86_cpu_apic_create(cpu
, &local_err
);
2810 if (local_err
!= NULL
) {
2819 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2820 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2821 * based on inputs (sockets,cores,threads), it is still better to gives
2824 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2825 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2827 if (!IS_INTEL_CPU(env
) && cs
->nr_threads
> 1 && !ht_warned
) {
2828 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2829 " -smp options properly.");
2833 x86_cpu_apic_realize(cpu
, &local_err
);
2834 if (local_err
!= NULL
) {
2839 xcc
->parent_realize(dev
, &local_err
);
2841 if (local_err
!= NULL
) {
2842 error_propagate(errp
, local_err
);
2847 /* Enables contiguous-apic-ID mode, for compatibility */
2848 static bool compat_apic_id_mode
;
2850 void enable_compat_apic_id_mode(void)
2852 compat_apic_id_mode
= true;
2855 /* Calculates initial APIC ID for a specific CPU index
2857 * Currently we need to be able to calculate the APIC ID from the CPU index
2858 * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have
2859 * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of
2860 * all CPUs up to max_cpus.
2862 uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index
)
2864 uint32_t correct_id
;
2867 correct_id
= x86_apicid_from_cpu_idx(smp_cores
, smp_threads
, cpu_index
);
2868 if (compat_apic_id_mode
) {
2869 if (cpu_index
!= correct_id
&& !warned
) {
2870 error_report("APIC IDs set in compatibility mode, "
2871 "CPU topology won't match the configuration");
2880 static void x86_cpu_initfn(Object
*obj
)
2882 CPUState
*cs
= CPU(obj
);
2883 X86CPU
*cpu
= X86_CPU(obj
);
2884 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(obj
);
2885 CPUX86State
*env
= &cpu
->env
;
2891 object_property_add(obj
, "family", "int",
2892 x86_cpuid_version_get_family
,
2893 x86_cpuid_version_set_family
, NULL
, NULL
, NULL
);
2894 object_property_add(obj
, "model", "int",
2895 x86_cpuid_version_get_model
,
2896 x86_cpuid_version_set_model
, NULL
, NULL
, NULL
);
2897 object_property_add(obj
, "stepping", "int",
2898 x86_cpuid_version_get_stepping
,
2899 x86_cpuid_version_set_stepping
, NULL
, NULL
, NULL
);
2900 object_property_add(obj
, "level", "int",
2901 x86_cpuid_get_level
,
2902 x86_cpuid_set_level
, NULL
, NULL
, NULL
);
2903 object_property_add(obj
, "xlevel", "int",
2904 x86_cpuid_get_xlevel
,
2905 x86_cpuid_set_xlevel
, NULL
, NULL
, NULL
);
2906 object_property_add_str(obj
, "vendor",
2907 x86_cpuid_get_vendor
,
2908 x86_cpuid_set_vendor
, NULL
);
2909 object_property_add_str(obj
, "model-id",
2910 x86_cpuid_get_model_id
,
2911 x86_cpuid_set_model_id
, NULL
);
2912 object_property_add(obj
, "tsc-frequency", "int",
2913 x86_cpuid_get_tsc_freq
,
2914 x86_cpuid_set_tsc_freq
, NULL
, NULL
, NULL
);
2915 object_property_add(obj
, "apic-id", "int",
2916 x86_cpuid_get_apic_id
,
2917 x86_cpuid_set_apic_id
, NULL
, NULL
, NULL
);
2918 object_property_add(obj
, "feature-words", "X86CPUFeatureWordInfo",
2919 x86_cpu_get_feature_words
,
2920 NULL
, NULL
, (void *)env
->features
, NULL
);
2921 object_property_add(obj
, "filtered-features", "X86CPUFeatureWordInfo",
2922 x86_cpu_get_feature_words
,
2923 NULL
, NULL
, (void *)cpu
->filtered_features
, NULL
);
2925 cpu
->hyperv_spinlock_attempts
= HYPERV_SPINLOCK_NEVER_RETRY
;
2926 env
->cpuid_apic_id
= x86_cpu_apic_id_from_index(cs
->cpu_index
);
2928 x86_cpu_load_def(cpu
, xcc
->cpu_def
, &error_abort
);
2930 /* init various static tables used in TCG mode */
2931 if (tcg_enabled() && !inited
) {
2933 optimize_flags_init();
2937 static int64_t x86_cpu_get_arch_id(CPUState
*cs
)
2939 X86CPU
*cpu
= X86_CPU(cs
);
2940 CPUX86State
*env
= &cpu
->env
;
2942 return env
->cpuid_apic_id
;
2945 static bool x86_cpu_get_paging_enabled(const CPUState
*cs
)
2947 X86CPU
*cpu
= X86_CPU(cs
);
2949 return cpu
->env
.cr
[0] & CR0_PG_MASK
;
2952 static void x86_cpu_set_pc(CPUState
*cs
, vaddr value
)
2954 X86CPU
*cpu
= X86_CPU(cs
);
2956 cpu
->env
.eip
= value
;
2959 static void x86_cpu_synchronize_from_tb(CPUState
*cs
, TranslationBlock
*tb
)
2961 X86CPU
*cpu
= X86_CPU(cs
);
2963 cpu
->env
.eip
= tb
->pc
- tb
->cs_base
;
2966 static bool x86_cpu_has_work(CPUState
*cs
)
2968 X86CPU
*cpu
= X86_CPU(cs
);
2969 CPUX86State
*env
= &cpu
->env
;
2971 #if !defined(CONFIG_USER_ONLY)
2972 if (cs
->interrupt_request
& CPU_INTERRUPT_POLL
) {
2973 apic_poll_irq(cpu
->apic_state
);
2974 cpu_reset_interrupt(cs
, CPU_INTERRUPT_POLL
);
2978 return ((cs
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
2979 (env
->eflags
& IF_MASK
)) ||
2980 (cs
->interrupt_request
& (CPU_INTERRUPT_NMI
|
2981 CPU_INTERRUPT_INIT
|
2982 CPU_INTERRUPT_SIPI
|
2983 CPU_INTERRUPT_MCE
));
2986 static Property x86_cpu_properties
[] = {
2987 DEFINE_PROP_BOOL("pmu", X86CPU
, enable_pmu
, false),
2988 { .name
= "hv-spinlocks", .info
= &qdev_prop_spinlocks
},
2989 DEFINE_PROP_BOOL("hv-relaxed", X86CPU
, hyperv_relaxed_timing
, false),
2990 DEFINE_PROP_BOOL("hv-vapic", X86CPU
, hyperv_vapic
, false),
2991 DEFINE_PROP_BOOL("hv-time", X86CPU
, hyperv_time
, false),
2992 DEFINE_PROP_BOOL("check", X86CPU
, check_cpuid
, false),
2993 DEFINE_PROP_BOOL("enforce", X86CPU
, enforce_cpuid
, false),
2994 DEFINE_PROP_BOOL("kvm", X86CPU
, expose_kvm
, true),
2995 DEFINE_PROP_END_OF_LIST()
2998 static void x86_cpu_common_class_init(ObjectClass
*oc
, void *data
)
3000 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
3001 CPUClass
*cc
= CPU_CLASS(oc
);
3002 DeviceClass
*dc
= DEVICE_CLASS(oc
);
3004 xcc
->parent_realize
= dc
->realize
;
3005 dc
->realize
= x86_cpu_realizefn
;
3006 dc
->bus_type
= TYPE_ICC_BUS
;
3007 dc
->props
= x86_cpu_properties
;
3009 xcc
->parent_reset
= cc
->reset
;
3010 cc
->reset
= x86_cpu_reset
;
3011 cc
->reset_dump_flags
= CPU_DUMP_FPU
| CPU_DUMP_CCOP
;
3013 cc
->class_by_name
= x86_cpu_class_by_name
;
3014 cc
->parse_features
= x86_cpu_parse_featurestr
;
3015 cc
->has_work
= x86_cpu_has_work
;
3016 cc
->do_interrupt
= x86_cpu_do_interrupt
;
3017 cc
->cpu_exec_interrupt
= x86_cpu_exec_interrupt
;
3018 cc
->dump_state
= x86_cpu_dump_state
;
3019 cc
->set_pc
= x86_cpu_set_pc
;
3020 cc
->synchronize_from_tb
= x86_cpu_synchronize_from_tb
;
3021 cc
->gdb_read_register
= x86_cpu_gdb_read_register
;
3022 cc
->gdb_write_register
= x86_cpu_gdb_write_register
;
3023 cc
->get_arch_id
= x86_cpu_get_arch_id
;
3024 cc
->get_paging_enabled
= x86_cpu_get_paging_enabled
;
3025 #ifdef CONFIG_USER_ONLY
3026 cc
->handle_mmu_fault
= x86_cpu_handle_mmu_fault
;
3028 cc
->get_memory_mapping
= x86_cpu_get_memory_mapping
;
3029 cc
->get_phys_page_debug
= x86_cpu_get_phys_page_debug
;
3030 cc
->write_elf64_note
= x86_cpu_write_elf64_note
;
3031 cc
->write_elf64_qemunote
= x86_cpu_write_elf64_qemunote
;
3032 cc
->write_elf32_note
= x86_cpu_write_elf32_note
;
3033 cc
->write_elf32_qemunote
= x86_cpu_write_elf32_qemunote
;
3034 cc
->vmsd
= &vmstate_x86_cpu
;
3036 cc
->gdb_num_core_regs
= CPU_NB_REGS
* 2 + 25;
3037 #ifndef CONFIG_USER_ONLY
3038 cc
->debug_excp_handler
= breakpoint_handler
;
3040 cc
->cpu_exec_enter
= x86_cpu_exec_enter
;
3041 cc
->cpu_exec_exit
= x86_cpu_exec_exit
;
3044 static const TypeInfo x86_cpu_type_info
= {
3045 .name
= TYPE_X86_CPU
,
3047 .instance_size
= sizeof(X86CPU
),
3048 .instance_init
= x86_cpu_initfn
,
3050 .class_size
= sizeof(X86CPUClass
),
3051 .class_init
= x86_cpu_common_class_init
,
3054 static void x86_cpu_register_types(void)
3058 type_register_static(&x86_cpu_type_info
);
3059 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); i
++) {
3060 x86_register_cpudef_type(&builtin_x86_defs
[i
]);
3063 type_register_static(&host_x86_cpu_type_info
);
3067 type_init(x86_cpu_register_types
)