aio-posix: remove useless parameter
[qemu/cris-port.git] / target-i386 / cpu.c
blobfc209ee1cb8a680cadc2f8bfd71e291c96ca6a5c
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *hyperv_priv_feature_name[] = {
249 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
250 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
251 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
252 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
253 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
254 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 NULL, NULL, NULL, NULL,
258 NULL, NULL, NULL, NULL,
259 NULL, NULL, NULL, NULL,
262 static const char *hyperv_ident_feature_name[] = {
263 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
264 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
265 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
266 NULL /* hv_create_port */, NULL /* hv_connect_port */,
267 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
268 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
269 NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
276 static const char *hyperv_misc_feature_name[] = {
277 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
278 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
279 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
280 NULL, NULL,
281 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 NULL, NULL, NULL, NULL,
289 static const char *svm_feature_name[] = {
290 "npt", "lbrv", "svm_lock", "nrip_save",
291 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
292 NULL, NULL, "pause_filter", NULL,
293 "pfthreshold", NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 NULL, NULL, NULL, NULL,
300 static const char *cpuid_7_0_ebx_feature_name[] = {
301 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
302 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
303 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
304 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
307 static const char *cpuid_7_0_ecx_feature_name[] = {
308 NULL, NULL, NULL, "pku",
309 "ospke", NULL, NULL, NULL,
310 NULL, NULL, NULL, NULL,
311 NULL, NULL, NULL, NULL,
312 NULL, NULL, NULL, NULL,
313 NULL, NULL, NULL, NULL,
314 NULL, NULL, NULL, NULL,
315 NULL, NULL, NULL, NULL,
318 static const char *cpuid_apm_edx_feature_name[] = {
319 NULL, NULL, NULL, NULL,
320 NULL, NULL, NULL, NULL,
321 "invtsc", NULL, NULL, NULL,
322 NULL, NULL, NULL, NULL,
323 NULL, NULL, NULL, NULL,
324 NULL, NULL, NULL, NULL,
325 NULL, NULL, NULL, NULL,
326 NULL, NULL, NULL, NULL,
329 static const char *cpuid_xsave_feature_name[] = {
330 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
331 NULL, NULL, NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
340 static const char *cpuid_6_feature_name[] = {
341 NULL, NULL, "arat", NULL,
342 NULL, NULL, NULL, NULL,
343 NULL, NULL, NULL, NULL,
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
351 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
352 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
353 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
354 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
355 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
356 CPUID_PSE36 | CPUID_FXSR)
357 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
358 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
359 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
360 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
361 CPUID_PAE | CPUID_SEP | CPUID_APIC)
363 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
364 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
365 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
366 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
367 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
368 /* partly implemented:
369 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
370 /* missing:
371 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
372 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
373 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
374 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
375 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
376 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
377 /* missing:
378 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
379 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
380 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
381 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
382 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
384 #ifdef TARGET_X86_64
385 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
386 #else
387 #define TCG_EXT2_X86_64_FEATURES 0
388 #endif
390 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
391 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
392 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
393 TCG_EXT2_X86_64_FEATURES)
394 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
395 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
396 #define TCG_EXT4_FEATURES 0
397 #define TCG_SVM_FEATURES 0
398 #define TCG_KVM_FEATURES 0
399 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
400 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
401 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
402 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
403 CPUID_7_0_EBX_ERMS)
404 /* missing:
405 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
406 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
407 CPUID_7_0_EBX_RDSEED */
408 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
409 #define TCG_APM_FEATURES 0
410 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
411 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
412 /* missing:
413 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
415 typedef struct FeatureWordInfo {
416 const char **feat_names;
417 uint32_t cpuid_eax; /* Input EAX for CPUID */
418 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
419 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
420 int cpuid_reg; /* output register (R_* constant) */
421 uint32_t tcg_features; /* Feature flags supported by TCG */
422 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
423 } FeatureWordInfo;
425 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
426 [FEAT_1_EDX] = {
427 .feat_names = feature_name,
428 .cpuid_eax = 1, .cpuid_reg = R_EDX,
429 .tcg_features = TCG_FEATURES,
431 [FEAT_1_ECX] = {
432 .feat_names = ext_feature_name,
433 .cpuid_eax = 1, .cpuid_reg = R_ECX,
434 .tcg_features = TCG_EXT_FEATURES,
436 [FEAT_8000_0001_EDX] = {
437 .feat_names = ext2_feature_name,
438 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
439 .tcg_features = TCG_EXT2_FEATURES,
441 [FEAT_8000_0001_ECX] = {
442 .feat_names = ext3_feature_name,
443 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
444 .tcg_features = TCG_EXT3_FEATURES,
446 [FEAT_C000_0001_EDX] = {
447 .feat_names = ext4_feature_name,
448 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
449 .tcg_features = TCG_EXT4_FEATURES,
451 [FEAT_KVM] = {
452 .feat_names = kvm_feature_name,
453 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
454 .tcg_features = TCG_KVM_FEATURES,
456 [FEAT_HYPERV_EAX] = {
457 .feat_names = hyperv_priv_feature_name,
458 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
460 [FEAT_HYPERV_EBX] = {
461 .feat_names = hyperv_ident_feature_name,
462 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
464 [FEAT_HYPERV_EDX] = {
465 .feat_names = hyperv_misc_feature_name,
466 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
468 [FEAT_SVM] = {
469 .feat_names = svm_feature_name,
470 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
471 .tcg_features = TCG_SVM_FEATURES,
473 [FEAT_7_0_EBX] = {
474 .feat_names = cpuid_7_0_ebx_feature_name,
475 .cpuid_eax = 7,
476 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
477 .cpuid_reg = R_EBX,
478 .tcg_features = TCG_7_0_EBX_FEATURES,
480 [FEAT_7_0_ECX] = {
481 .feat_names = cpuid_7_0_ecx_feature_name,
482 .cpuid_eax = 7,
483 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
484 .cpuid_reg = R_ECX,
485 .tcg_features = TCG_7_0_ECX_FEATURES,
487 [FEAT_8000_0007_EDX] = {
488 .feat_names = cpuid_apm_edx_feature_name,
489 .cpuid_eax = 0x80000007,
490 .cpuid_reg = R_EDX,
491 .tcg_features = TCG_APM_FEATURES,
492 .unmigratable_flags = CPUID_APM_INVTSC,
494 [FEAT_XSAVE] = {
495 .feat_names = cpuid_xsave_feature_name,
496 .cpuid_eax = 0xd,
497 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
498 .cpuid_reg = R_EAX,
499 .tcg_features = TCG_XSAVE_FEATURES,
501 [FEAT_6_EAX] = {
502 .feat_names = cpuid_6_feature_name,
503 .cpuid_eax = 6, .cpuid_reg = R_EAX,
504 .tcg_features = TCG_6_EAX_FEATURES,
508 typedef struct X86RegisterInfo32 {
509 /* Name of register */
510 const char *name;
511 /* QAPI enum value register */
512 X86CPURegister32 qapi_enum;
513 } X86RegisterInfo32;
515 #define REGISTER(reg) \
516 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
517 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
518 REGISTER(EAX),
519 REGISTER(ECX),
520 REGISTER(EDX),
521 REGISTER(EBX),
522 REGISTER(ESP),
523 REGISTER(EBP),
524 REGISTER(ESI),
525 REGISTER(EDI),
527 #undef REGISTER
529 const ExtSaveArea x86_ext_save_areas[] = {
530 [XSTATE_YMM_BIT] =
531 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
532 .offset = offsetof(X86XSaveArea, avx_state),
533 .size = sizeof(XSaveAVX) },
534 [XSTATE_BNDREGS_BIT] =
535 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
536 .offset = offsetof(X86XSaveArea, bndreg_state),
537 .size = sizeof(XSaveBNDREG) },
538 [XSTATE_BNDCSR_BIT] =
539 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
540 .offset = offsetof(X86XSaveArea, bndcsr_state),
541 .size = sizeof(XSaveBNDCSR) },
542 [XSTATE_OPMASK_BIT] =
543 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
544 .offset = offsetof(X86XSaveArea, opmask_state),
545 .size = sizeof(XSaveOpmask) },
546 [XSTATE_ZMM_Hi256_BIT] =
547 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
548 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
549 .size = sizeof(XSaveZMM_Hi256) },
550 [XSTATE_Hi16_ZMM_BIT] =
551 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
552 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
553 .size = sizeof(XSaveHi16_ZMM) },
554 [XSTATE_PKRU_BIT] =
555 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
556 .offset = offsetof(X86XSaveArea, pkru_state),
557 .size = sizeof(XSavePKRU) },
560 const char *get_register_name_32(unsigned int reg)
562 if (reg >= CPU_NB_REGS32) {
563 return NULL;
565 return x86_reg_info_32[reg].name;
569 * Returns the set of feature flags that are supported and migratable by
570 * QEMU, for a given FeatureWord.
572 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
574 FeatureWordInfo *wi = &feature_word_info[w];
575 uint32_t r = 0;
576 int i;
578 for (i = 0; i < 32; i++) {
579 uint32_t f = 1U << i;
580 /* If the feature name is unknown, it is not supported by QEMU yet */
581 if (!wi->feat_names[i]) {
582 continue;
584 /* Skip features known to QEMU, but explicitly marked as unmigratable */
585 if (wi->unmigratable_flags & f) {
586 continue;
588 r |= f;
590 return r;
593 void host_cpuid(uint32_t function, uint32_t count,
594 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
596 uint32_t vec[4];
598 #ifdef __x86_64__
599 asm volatile("cpuid"
600 : "=a"(vec[0]), "=b"(vec[1]),
601 "=c"(vec[2]), "=d"(vec[3])
602 : "0"(function), "c"(count) : "cc");
603 #elif defined(__i386__)
604 asm volatile("pusha \n\t"
605 "cpuid \n\t"
606 "mov %%eax, 0(%2) \n\t"
607 "mov %%ebx, 4(%2) \n\t"
608 "mov %%ecx, 8(%2) \n\t"
609 "mov %%edx, 12(%2) \n\t"
610 "popa"
611 : : "a"(function), "c"(count), "S"(vec)
612 : "memory", "cc");
613 #else
614 abort();
615 #endif
617 if (eax)
618 *eax = vec[0];
619 if (ebx)
620 *ebx = vec[1];
621 if (ecx)
622 *ecx = vec[2];
623 if (edx)
624 *edx = vec[3];
627 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
629 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
630 * a substring. ex if !NULL points to the first char after a substring,
631 * otherwise the string is assumed to sized by a terminating nul.
632 * Return lexical ordering of *s1:*s2.
634 static int sstrcmp(const char *s1, const char *e1,
635 const char *s2, const char *e2)
637 for (;;) {
638 if (!*s1 || !*s2 || *s1 != *s2)
639 return (*s1 - *s2);
640 ++s1, ++s2;
641 if (s1 == e1 && s2 == e2)
642 return (0);
643 else if (s1 == e1)
644 return (*s2);
645 else if (s2 == e2)
646 return (*s1);
650 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
651 * '|' delimited (possibly empty) strings in which case search for a match
652 * within the alternatives proceeds left to right. Return 0 for success,
653 * non-zero otherwise.
655 static int altcmp(const char *s, const char *e, const char *altstr)
657 const char *p, *q;
659 for (q = p = altstr; ; ) {
660 while (*p && *p != '|')
661 ++p;
662 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
663 return (0);
664 if (!*p)
665 return (1);
666 else
667 q = ++p;
671 /* search featureset for flag *[s..e), if found set corresponding bit in
672 * *pval and return true, otherwise return false
674 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
675 const char **featureset)
677 uint32_t mask;
678 const char **ppc;
679 bool found = false;
681 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
682 if (*ppc && !altcmp(s, e, *ppc)) {
683 *pval |= mask;
684 found = true;
687 return found;
690 static void add_flagname_to_bitmaps(const char *flagname,
691 FeatureWordArray words,
692 Error **errp)
694 FeatureWord w;
695 for (w = 0; w < FEATURE_WORDS; w++) {
696 FeatureWordInfo *wi = &feature_word_info[w];
697 if (wi->feat_names &&
698 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
699 break;
702 if (w == FEATURE_WORDS) {
703 error_setg(errp, "CPU feature %s not found", flagname);
707 /* CPU class name definitions: */
709 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
710 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
712 /* Return type name for a given CPU model name
713 * Caller is responsible for freeing the returned string.
715 static char *x86_cpu_type_name(const char *model_name)
717 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
720 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
722 ObjectClass *oc;
723 char *typename;
725 if (cpu_model == NULL) {
726 return NULL;
729 typename = x86_cpu_type_name(cpu_model);
730 oc = object_class_by_name(typename);
731 g_free(typename);
732 return oc;
735 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
737 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
738 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
739 return g_strndup(class_name,
740 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
743 struct X86CPUDefinition {
744 const char *name;
745 uint32_t level;
746 uint32_t xlevel;
747 uint32_t xlevel2;
748 /* vendor is zero-terminated, 12 character ASCII string */
749 char vendor[CPUID_VENDOR_SZ + 1];
750 int family;
751 int model;
752 int stepping;
753 FeatureWordArray features;
754 char model_id[48];
757 static X86CPUDefinition builtin_x86_defs[] = {
759 .name = "qemu64",
760 .level = 0xd,
761 .vendor = CPUID_VENDOR_AMD,
762 .family = 6,
763 .model = 6,
764 .stepping = 3,
765 .features[FEAT_1_EDX] =
766 PPRO_FEATURES |
767 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
768 CPUID_PSE36,
769 .features[FEAT_1_ECX] =
770 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
771 .features[FEAT_8000_0001_EDX] =
772 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
773 .features[FEAT_8000_0001_ECX] =
774 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
775 .xlevel = 0x8000000A,
776 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
779 .name = "phenom",
780 .level = 5,
781 .vendor = CPUID_VENDOR_AMD,
782 .family = 16,
783 .model = 2,
784 .stepping = 3,
785 /* Missing: CPUID_HT */
786 .features[FEAT_1_EDX] =
787 PPRO_FEATURES |
788 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
789 CPUID_PSE36 | CPUID_VME,
790 .features[FEAT_1_ECX] =
791 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
792 CPUID_EXT_POPCNT,
793 .features[FEAT_8000_0001_EDX] =
794 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
795 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
796 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
797 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
798 CPUID_EXT3_CR8LEG,
799 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
800 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
801 .features[FEAT_8000_0001_ECX] =
802 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
803 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
804 /* Missing: CPUID_SVM_LBRV */
805 .features[FEAT_SVM] =
806 CPUID_SVM_NPT,
807 .xlevel = 0x8000001A,
808 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
811 .name = "core2duo",
812 .level = 10,
813 .vendor = CPUID_VENDOR_INTEL,
814 .family = 6,
815 .model = 15,
816 .stepping = 11,
817 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
818 .features[FEAT_1_EDX] =
819 PPRO_FEATURES |
820 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
821 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
822 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
823 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
824 .features[FEAT_1_ECX] =
825 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
826 CPUID_EXT_CX16,
827 .features[FEAT_8000_0001_EDX] =
828 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
829 .features[FEAT_8000_0001_ECX] =
830 CPUID_EXT3_LAHF_LM,
831 .xlevel = 0x80000008,
832 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
835 .name = "kvm64",
836 .level = 0xd,
837 .vendor = CPUID_VENDOR_INTEL,
838 .family = 15,
839 .model = 6,
840 .stepping = 1,
841 /* Missing: CPUID_HT */
842 .features[FEAT_1_EDX] =
843 PPRO_FEATURES | CPUID_VME |
844 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
845 CPUID_PSE36,
846 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
847 .features[FEAT_1_ECX] =
848 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
849 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
850 .features[FEAT_8000_0001_EDX] =
851 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
852 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
853 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
854 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
855 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
856 .features[FEAT_8000_0001_ECX] =
858 .xlevel = 0x80000008,
859 .model_id = "Common KVM processor"
862 .name = "qemu32",
863 .level = 4,
864 .vendor = CPUID_VENDOR_INTEL,
865 .family = 6,
866 .model = 6,
867 .stepping = 3,
868 .features[FEAT_1_EDX] =
869 PPRO_FEATURES,
870 .features[FEAT_1_ECX] =
871 CPUID_EXT_SSE3,
872 .xlevel = 0x80000004,
873 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
876 .name = "kvm32",
877 .level = 5,
878 .vendor = CPUID_VENDOR_INTEL,
879 .family = 15,
880 .model = 6,
881 .stepping = 1,
882 .features[FEAT_1_EDX] =
883 PPRO_FEATURES | CPUID_VME |
884 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
885 .features[FEAT_1_ECX] =
886 CPUID_EXT_SSE3,
887 .features[FEAT_8000_0001_ECX] =
889 .xlevel = 0x80000008,
890 .model_id = "Common 32-bit KVM processor"
893 .name = "coreduo",
894 .level = 10,
895 .vendor = CPUID_VENDOR_INTEL,
896 .family = 6,
897 .model = 14,
898 .stepping = 8,
899 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
900 .features[FEAT_1_EDX] =
901 PPRO_FEATURES | CPUID_VME |
902 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
903 CPUID_SS,
904 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
905 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
906 .features[FEAT_1_ECX] =
907 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
908 .features[FEAT_8000_0001_EDX] =
909 CPUID_EXT2_NX,
910 .xlevel = 0x80000008,
911 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
914 .name = "486",
915 .level = 1,
916 .vendor = CPUID_VENDOR_INTEL,
917 .family = 4,
918 .model = 8,
919 .stepping = 0,
920 .features[FEAT_1_EDX] =
921 I486_FEATURES,
922 .xlevel = 0,
925 .name = "pentium",
926 .level = 1,
927 .vendor = CPUID_VENDOR_INTEL,
928 .family = 5,
929 .model = 4,
930 .stepping = 3,
931 .features[FEAT_1_EDX] =
932 PENTIUM_FEATURES,
933 .xlevel = 0,
936 .name = "pentium2",
937 .level = 2,
938 .vendor = CPUID_VENDOR_INTEL,
939 .family = 6,
940 .model = 5,
941 .stepping = 2,
942 .features[FEAT_1_EDX] =
943 PENTIUM2_FEATURES,
944 .xlevel = 0,
947 .name = "pentium3",
948 .level = 3,
949 .vendor = CPUID_VENDOR_INTEL,
950 .family = 6,
951 .model = 7,
952 .stepping = 3,
953 .features[FEAT_1_EDX] =
954 PENTIUM3_FEATURES,
955 .xlevel = 0,
958 .name = "athlon",
959 .level = 2,
960 .vendor = CPUID_VENDOR_AMD,
961 .family = 6,
962 .model = 2,
963 .stepping = 3,
964 .features[FEAT_1_EDX] =
965 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
966 CPUID_MCA,
967 .features[FEAT_8000_0001_EDX] =
968 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
969 .xlevel = 0x80000008,
970 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
973 .name = "n270",
974 .level = 10,
975 .vendor = CPUID_VENDOR_INTEL,
976 .family = 6,
977 .model = 28,
978 .stepping = 2,
979 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
980 .features[FEAT_1_EDX] =
981 PPRO_FEATURES |
982 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
983 CPUID_ACPI | CPUID_SS,
984 /* Some CPUs got no CPUID_SEP */
985 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
986 * CPUID_EXT_XTPR */
987 .features[FEAT_1_ECX] =
988 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
989 CPUID_EXT_MOVBE,
990 .features[FEAT_8000_0001_EDX] =
991 CPUID_EXT2_NX,
992 .features[FEAT_8000_0001_ECX] =
993 CPUID_EXT3_LAHF_LM,
994 .xlevel = 0x80000008,
995 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
998 .name = "Conroe",
999 .level = 10,
1000 .vendor = CPUID_VENDOR_INTEL,
1001 .family = 6,
1002 .model = 15,
1003 .stepping = 3,
1004 .features[FEAT_1_EDX] =
1005 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1006 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1007 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1008 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1009 CPUID_DE | CPUID_FP87,
1010 .features[FEAT_1_ECX] =
1011 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1012 .features[FEAT_8000_0001_EDX] =
1013 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1014 .features[FEAT_8000_0001_ECX] =
1015 CPUID_EXT3_LAHF_LM,
1016 .xlevel = 0x80000008,
1017 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1020 .name = "Penryn",
1021 .level = 10,
1022 .vendor = CPUID_VENDOR_INTEL,
1023 .family = 6,
1024 .model = 23,
1025 .stepping = 3,
1026 .features[FEAT_1_EDX] =
1027 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1028 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1029 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1030 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1031 CPUID_DE | CPUID_FP87,
1032 .features[FEAT_1_ECX] =
1033 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1034 CPUID_EXT_SSE3,
1035 .features[FEAT_8000_0001_EDX] =
1036 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1037 .features[FEAT_8000_0001_ECX] =
1038 CPUID_EXT3_LAHF_LM,
1039 .xlevel = 0x80000008,
1040 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1043 .name = "Nehalem",
1044 .level = 11,
1045 .vendor = CPUID_VENDOR_INTEL,
1046 .family = 6,
1047 .model = 26,
1048 .stepping = 3,
1049 .features[FEAT_1_EDX] =
1050 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1051 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1052 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1053 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1054 CPUID_DE | CPUID_FP87,
1055 .features[FEAT_1_ECX] =
1056 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1057 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1058 .features[FEAT_8000_0001_EDX] =
1059 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1060 .features[FEAT_8000_0001_ECX] =
1061 CPUID_EXT3_LAHF_LM,
1062 .xlevel = 0x80000008,
1063 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1066 .name = "Westmere",
1067 .level = 11,
1068 .vendor = CPUID_VENDOR_INTEL,
1069 .family = 6,
1070 .model = 44,
1071 .stepping = 1,
1072 .features[FEAT_1_EDX] =
1073 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1074 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1075 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1076 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1077 CPUID_DE | CPUID_FP87,
1078 .features[FEAT_1_ECX] =
1079 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1080 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1081 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1082 .features[FEAT_8000_0001_EDX] =
1083 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1084 .features[FEAT_8000_0001_ECX] =
1085 CPUID_EXT3_LAHF_LM,
1086 .features[FEAT_6_EAX] =
1087 CPUID_6_EAX_ARAT,
1088 .xlevel = 0x80000008,
1089 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1092 .name = "SandyBridge",
1093 .level = 0xd,
1094 .vendor = CPUID_VENDOR_INTEL,
1095 .family = 6,
1096 .model = 42,
1097 .stepping = 1,
1098 .features[FEAT_1_EDX] =
1099 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1100 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1101 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1102 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1103 CPUID_DE | CPUID_FP87,
1104 .features[FEAT_1_ECX] =
1105 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1106 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1107 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1108 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1109 CPUID_EXT_SSE3,
1110 .features[FEAT_8000_0001_EDX] =
1111 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1112 CPUID_EXT2_SYSCALL,
1113 .features[FEAT_8000_0001_ECX] =
1114 CPUID_EXT3_LAHF_LM,
1115 .features[FEAT_XSAVE] =
1116 CPUID_XSAVE_XSAVEOPT,
1117 .features[FEAT_6_EAX] =
1118 CPUID_6_EAX_ARAT,
1119 .xlevel = 0x80000008,
1120 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1123 .name = "IvyBridge",
1124 .level = 0xd,
1125 .vendor = CPUID_VENDOR_INTEL,
1126 .family = 6,
1127 .model = 58,
1128 .stepping = 9,
1129 .features[FEAT_1_EDX] =
1130 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1131 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1132 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1133 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1134 CPUID_DE | CPUID_FP87,
1135 .features[FEAT_1_ECX] =
1136 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1137 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1138 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1139 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1140 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1141 .features[FEAT_7_0_EBX] =
1142 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1143 CPUID_7_0_EBX_ERMS,
1144 .features[FEAT_8000_0001_EDX] =
1145 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1146 CPUID_EXT2_SYSCALL,
1147 .features[FEAT_8000_0001_ECX] =
1148 CPUID_EXT3_LAHF_LM,
1149 .features[FEAT_XSAVE] =
1150 CPUID_XSAVE_XSAVEOPT,
1151 .features[FEAT_6_EAX] =
1152 CPUID_6_EAX_ARAT,
1153 .xlevel = 0x80000008,
1154 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1157 .name = "Haswell-noTSX",
1158 .level = 0xd,
1159 .vendor = CPUID_VENDOR_INTEL,
1160 .family = 6,
1161 .model = 60,
1162 .stepping = 1,
1163 .features[FEAT_1_EDX] =
1164 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1165 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1166 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1167 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1168 CPUID_DE | CPUID_FP87,
1169 .features[FEAT_1_ECX] =
1170 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1171 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1172 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1173 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1174 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1175 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1176 .features[FEAT_8000_0001_EDX] =
1177 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1178 CPUID_EXT2_SYSCALL,
1179 .features[FEAT_8000_0001_ECX] =
1180 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1181 .features[FEAT_7_0_EBX] =
1182 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1183 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1184 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1185 .features[FEAT_XSAVE] =
1186 CPUID_XSAVE_XSAVEOPT,
1187 .features[FEAT_6_EAX] =
1188 CPUID_6_EAX_ARAT,
1189 .xlevel = 0x80000008,
1190 .model_id = "Intel Core Processor (Haswell, no TSX)",
1191 }, {
1192 .name = "Haswell",
1193 .level = 0xd,
1194 .vendor = CPUID_VENDOR_INTEL,
1195 .family = 6,
1196 .model = 60,
1197 .stepping = 1,
1198 .features[FEAT_1_EDX] =
1199 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1200 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1201 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1202 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1203 CPUID_DE | CPUID_FP87,
1204 .features[FEAT_1_ECX] =
1205 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1206 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1207 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1208 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1209 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1210 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1211 .features[FEAT_8000_0001_EDX] =
1212 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1213 CPUID_EXT2_SYSCALL,
1214 .features[FEAT_8000_0001_ECX] =
1215 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1216 .features[FEAT_7_0_EBX] =
1217 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1218 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1219 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1220 CPUID_7_0_EBX_RTM,
1221 .features[FEAT_XSAVE] =
1222 CPUID_XSAVE_XSAVEOPT,
1223 .features[FEAT_6_EAX] =
1224 CPUID_6_EAX_ARAT,
1225 .xlevel = 0x80000008,
1226 .model_id = "Intel Core Processor (Haswell)",
1229 .name = "Broadwell-noTSX",
1230 .level = 0xd,
1231 .vendor = CPUID_VENDOR_INTEL,
1232 .family = 6,
1233 .model = 61,
1234 .stepping = 2,
1235 .features[FEAT_1_EDX] =
1236 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1237 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1238 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1239 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1240 CPUID_DE | CPUID_FP87,
1241 .features[FEAT_1_ECX] =
1242 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1243 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1244 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1245 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1246 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1247 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1248 .features[FEAT_8000_0001_EDX] =
1249 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1250 CPUID_EXT2_SYSCALL,
1251 .features[FEAT_8000_0001_ECX] =
1252 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1253 .features[FEAT_7_0_EBX] =
1254 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1255 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1256 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1257 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1258 CPUID_7_0_EBX_SMAP,
1259 .features[FEAT_XSAVE] =
1260 CPUID_XSAVE_XSAVEOPT,
1261 .features[FEAT_6_EAX] =
1262 CPUID_6_EAX_ARAT,
1263 .xlevel = 0x80000008,
1264 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1267 .name = "Broadwell",
1268 .level = 0xd,
1269 .vendor = CPUID_VENDOR_INTEL,
1270 .family = 6,
1271 .model = 61,
1272 .stepping = 2,
1273 .features[FEAT_1_EDX] =
1274 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1275 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1276 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1277 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1278 CPUID_DE | CPUID_FP87,
1279 .features[FEAT_1_ECX] =
1280 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1281 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1282 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1283 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1284 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1285 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1286 .features[FEAT_8000_0001_EDX] =
1287 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1288 CPUID_EXT2_SYSCALL,
1289 .features[FEAT_8000_0001_ECX] =
1290 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1291 .features[FEAT_7_0_EBX] =
1292 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1293 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1294 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1295 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1296 CPUID_7_0_EBX_SMAP,
1297 .features[FEAT_XSAVE] =
1298 CPUID_XSAVE_XSAVEOPT,
1299 .features[FEAT_6_EAX] =
1300 CPUID_6_EAX_ARAT,
1301 .xlevel = 0x80000008,
1302 .model_id = "Intel Core Processor (Broadwell)",
1305 .name = "Skylake-Client",
1306 .level = 0xd,
1307 .vendor = CPUID_VENDOR_INTEL,
1308 .family = 6,
1309 .model = 94,
1310 .stepping = 3,
1311 .features[FEAT_1_EDX] =
1312 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1313 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1314 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1315 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1316 CPUID_DE | CPUID_FP87,
1317 .features[FEAT_1_ECX] =
1318 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1319 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1320 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1321 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1322 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1323 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1324 .features[FEAT_8000_0001_EDX] =
1325 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1326 CPUID_EXT2_SYSCALL,
1327 .features[FEAT_8000_0001_ECX] =
1328 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1329 .features[FEAT_7_0_EBX] =
1330 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1331 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1332 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1333 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1334 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1335 /* Missing: XSAVES (not supported by some Linux versions,
1336 * including v4.1 to v4.6).
1337 * KVM doesn't yet expose any XSAVES state save component,
1338 * and the only one defined in Skylake (processor tracing)
1339 * probably will block migration anyway.
1341 .features[FEAT_XSAVE] =
1342 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1343 CPUID_XSAVE_XGETBV1,
1344 .features[FEAT_6_EAX] =
1345 CPUID_6_EAX_ARAT,
1346 .xlevel = 0x80000008,
1347 .model_id = "Intel Core Processor (Skylake)",
1350 .name = "Opteron_G1",
1351 .level = 5,
1352 .vendor = CPUID_VENDOR_AMD,
1353 .family = 15,
1354 .model = 6,
1355 .stepping = 1,
1356 .features[FEAT_1_EDX] =
1357 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1358 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1359 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1360 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1361 CPUID_DE | CPUID_FP87,
1362 .features[FEAT_1_ECX] =
1363 CPUID_EXT_SSE3,
1364 .features[FEAT_8000_0001_EDX] =
1365 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1366 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1367 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1368 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1369 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1370 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1371 .xlevel = 0x80000008,
1372 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1375 .name = "Opteron_G2",
1376 .level = 5,
1377 .vendor = CPUID_VENDOR_AMD,
1378 .family = 15,
1379 .model = 6,
1380 .stepping = 1,
1381 .features[FEAT_1_EDX] =
1382 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1383 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1384 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1385 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1386 CPUID_DE | CPUID_FP87,
1387 .features[FEAT_1_ECX] =
1388 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1389 /* Missing: CPUID_EXT2_RDTSCP */
1390 .features[FEAT_8000_0001_EDX] =
1391 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1392 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1393 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1394 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1395 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1396 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1397 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1398 .features[FEAT_8000_0001_ECX] =
1399 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1400 .xlevel = 0x80000008,
1401 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1404 .name = "Opteron_G3",
1405 .level = 5,
1406 .vendor = CPUID_VENDOR_AMD,
1407 .family = 15,
1408 .model = 6,
1409 .stepping = 1,
1410 .features[FEAT_1_EDX] =
1411 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1412 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1413 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1414 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1415 CPUID_DE | CPUID_FP87,
1416 .features[FEAT_1_ECX] =
1417 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1418 CPUID_EXT_SSE3,
1419 /* Missing: CPUID_EXT2_RDTSCP */
1420 .features[FEAT_8000_0001_EDX] =
1421 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1422 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1423 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1424 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1425 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1426 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1427 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1428 .features[FEAT_8000_0001_ECX] =
1429 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1430 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1431 .xlevel = 0x80000008,
1432 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1435 .name = "Opteron_G4",
1436 .level = 0xd,
1437 .vendor = CPUID_VENDOR_AMD,
1438 .family = 21,
1439 .model = 1,
1440 .stepping = 2,
1441 .features[FEAT_1_EDX] =
1442 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1443 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1444 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1445 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1446 CPUID_DE | CPUID_FP87,
1447 .features[FEAT_1_ECX] =
1448 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1449 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1450 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1451 CPUID_EXT_SSE3,
1452 /* Missing: CPUID_EXT2_RDTSCP */
1453 .features[FEAT_8000_0001_EDX] =
1454 CPUID_EXT2_LM |
1455 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1456 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1457 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1458 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1459 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1460 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1461 .features[FEAT_8000_0001_ECX] =
1462 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1463 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1464 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1465 CPUID_EXT3_LAHF_LM,
1466 /* no xsaveopt! */
1467 .xlevel = 0x8000001A,
1468 .model_id = "AMD Opteron 62xx class CPU",
1471 .name = "Opteron_G5",
1472 .level = 0xd,
1473 .vendor = CPUID_VENDOR_AMD,
1474 .family = 21,
1475 .model = 2,
1476 .stepping = 0,
1477 .features[FEAT_1_EDX] =
1478 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1479 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1480 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1481 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1482 CPUID_DE | CPUID_FP87,
1483 .features[FEAT_1_ECX] =
1484 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1485 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1486 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1487 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1488 /* Missing: CPUID_EXT2_RDTSCP */
1489 .features[FEAT_8000_0001_EDX] =
1490 CPUID_EXT2_LM |
1491 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1492 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1493 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1494 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1495 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1496 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1497 .features[FEAT_8000_0001_ECX] =
1498 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1499 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1500 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1501 CPUID_EXT3_LAHF_LM,
1502 /* no xsaveopt! */
1503 .xlevel = 0x8000001A,
1504 .model_id = "AMD Opteron 63xx class CPU",
1508 typedef struct PropValue {
1509 const char *prop, *value;
1510 } PropValue;
1512 /* KVM-specific features that are automatically added/removed
1513 * from all CPU models when KVM is enabled.
1515 static PropValue kvm_default_props[] = {
1516 { "kvmclock", "on" },
1517 { "kvm-nopiodelay", "on" },
1518 { "kvm-asyncpf", "on" },
1519 { "kvm-steal-time", "on" },
1520 { "kvm-pv-eoi", "on" },
1521 { "kvmclock-stable-bit", "on" },
1522 { "x2apic", "on" },
1523 { "acpi", "off" },
1524 { "monitor", "off" },
1525 { "svm", "off" },
1526 { NULL, NULL },
1529 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1531 PropValue *pv;
1532 for (pv = kvm_default_props; pv->prop; pv++) {
1533 if (!strcmp(pv->prop, prop)) {
1534 pv->value = value;
1535 break;
1539 /* It is valid to call this function only for properties that
1540 * are already present in the kvm_default_props table.
1542 assert(pv->prop);
1545 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1546 bool migratable_only);
1548 #ifdef CONFIG_KVM
1550 static bool lmce_supported(void)
1552 uint64_t mce_cap;
1554 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1555 return false;
1558 return !!(mce_cap & MCG_LMCE_P);
1561 static int cpu_x86_fill_model_id(char *str)
1563 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1564 int i;
1566 for (i = 0; i < 3; i++) {
1567 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1568 memcpy(str + i * 16 + 0, &eax, 4);
1569 memcpy(str + i * 16 + 4, &ebx, 4);
1570 memcpy(str + i * 16 + 8, &ecx, 4);
1571 memcpy(str + i * 16 + 12, &edx, 4);
1573 return 0;
1576 static X86CPUDefinition host_cpudef;
1578 static Property host_x86_cpu_properties[] = {
1579 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1580 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1581 DEFINE_PROP_END_OF_LIST()
1584 /* class_init for the "host" CPU model
1586 * This function may be called before KVM is initialized.
1588 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1590 DeviceClass *dc = DEVICE_CLASS(oc);
1591 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1592 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1594 xcc->kvm_required = true;
1596 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1597 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1599 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1600 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1601 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1602 host_cpudef.stepping = eax & 0x0F;
1604 cpu_x86_fill_model_id(host_cpudef.model_id);
1606 xcc->cpu_def = &host_cpudef;
1608 /* level, xlevel, xlevel2, and the feature words are initialized on
1609 * instance_init, because they require KVM to be initialized.
1612 dc->props = host_x86_cpu_properties;
1613 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1614 dc->cannot_destroy_with_object_finalize_yet = true;
1617 static void host_x86_cpu_initfn(Object *obj)
1619 X86CPU *cpu = X86_CPU(obj);
1620 CPUX86State *env = &cpu->env;
1621 KVMState *s = kvm_state;
1623 /* We can't fill the features array here because we don't know yet if
1624 * "migratable" is true or false.
1626 cpu->host_features = true;
1628 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1629 if (kvm_enabled()) {
1630 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1631 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1632 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1634 if (lmce_supported()) {
1635 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1639 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1642 static const TypeInfo host_x86_cpu_type_info = {
1643 .name = X86_CPU_TYPE_NAME("host"),
1644 .parent = TYPE_X86_CPU,
1645 .instance_init = host_x86_cpu_initfn,
1646 .class_init = host_x86_cpu_class_init,
1649 #endif
1651 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1653 FeatureWordInfo *f = &feature_word_info[w];
1654 int i;
1656 for (i = 0; i < 32; ++i) {
1657 if ((1UL << i) & mask) {
1658 const char *reg = get_register_name_32(f->cpuid_reg);
1659 assert(reg);
1660 fprintf(stderr, "warning: %s doesn't support requested feature: "
1661 "CPUID.%02XH:%s%s%s [bit %d]\n",
1662 kvm_enabled() ? "host" : "TCG",
1663 f->cpuid_eax, reg,
1664 f->feat_names[i] ? "." : "",
1665 f->feat_names[i] ? f->feat_names[i] : "", i);
1670 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1671 const char *name, void *opaque,
1672 Error **errp)
1674 X86CPU *cpu = X86_CPU(obj);
1675 CPUX86State *env = &cpu->env;
1676 int64_t value;
1678 value = (env->cpuid_version >> 8) & 0xf;
1679 if (value == 0xf) {
1680 value += (env->cpuid_version >> 20) & 0xff;
1682 visit_type_int(v, name, &value, errp);
1685 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1686 const char *name, void *opaque,
1687 Error **errp)
1689 X86CPU *cpu = X86_CPU(obj);
1690 CPUX86State *env = &cpu->env;
1691 const int64_t min = 0;
1692 const int64_t max = 0xff + 0xf;
1693 Error *local_err = NULL;
1694 int64_t value;
1696 visit_type_int(v, name, &value, &local_err);
1697 if (local_err) {
1698 error_propagate(errp, local_err);
1699 return;
1701 if (value < min || value > max) {
1702 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1703 name ? name : "null", value, min, max);
1704 return;
1707 env->cpuid_version &= ~0xff00f00;
1708 if (value > 0x0f) {
1709 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1710 } else {
1711 env->cpuid_version |= value << 8;
1715 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1716 const char *name, void *opaque,
1717 Error **errp)
1719 X86CPU *cpu = X86_CPU(obj);
1720 CPUX86State *env = &cpu->env;
1721 int64_t value;
1723 value = (env->cpuid_version >> 4) & 0xf;
1724 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1725 visit_type_int(v, name, &value, errp);
1728 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1729 const char *name, void *opaque,
1730 Error **errp)
1732 X86CPU *cpu = X86_CPU(obj);
1733 CPUX86State *env = &cpu->env;
1734 const int64_t min = 0;
1735 const int64_t max = 0xff;
1736 Error *local_err = NULL;
1737 int64_t value;
1739 visit_type_int(v, name, &value, &local_err);
1740 if (local_err) {
1741 error_propagate(errp, local_err);
1742 return;
1744 if (value < min || value > max) {
1745 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1746 name ? name : "null", value, min, max);
1747 return;
1750 env->cpuid_version &= ~0xf00f0;
1751 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1754 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1755 const char *name, void *opaque,
1756 Error **errp)
1758 X86CPU *cpu = X86_CPU(obj);
1759 CPUX86State *env = &cpu->env;
1760 int64_t value;
1762 value = env->cpuid_version & 0xf;
1763 visit_type_int(v, name, &value, errp);
1766 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1767 const char *name, void *opaque,
1768 Error **errp)
1770 X86CPU *cpu = X86_CPU(obj);
1771 CPUX86State *env = &cpu->env;
1772 const int64_t min = 0;
1773 const int64_t max = 0xf;
1774 Error *local_err = NULL;
1775 int64_t value;
1777 visit_type_int(v, name, &value, &local_err);
1778 if (local_err) {
1779 error_propagate(errp, local_err);
1780 return;
1782 if (value < min || value > max) {
1783 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1784 name ? name : "null", value, min, max);
1785 return;
1788 env->cpuid_version &= ~0xf;
1789 env->cpuid_version |= value & 0xf;
1792 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1794 X86CPU *cpu = X86_CPU(obj);
1795 CPUX86State *env = &cpu->env;
1796 char *value;
1798 value = g_malloc(CPUID_VENDOR_SZ + 1);
1799 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1800 env->cpuid_vendor3);
1801 return value;
1804 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1805 Error **errp)
1807 X86CPU *cpu = X86_CPU(obj);
1808 CPUX86State *env = &cpu->env;
1809 int i;
1811 if (strlen(value) != CPUID_VENDOR_SZ) {
1812 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1813 return;
1816 env->cpuid_vendor1 = 0;
1817 env->cpuid_vendor2 = 0;
1818 env->cpuid_vendor3 = 0;
1819 for (i = 0; i < 4; i++) {
1820 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1821 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1822 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1826 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1828 X86CPU *cpu = X86_CPU(obj);
1829 CPUX86State *env = &cpu->env;
1830 char *value;
1831 int i;
1833 value = g_malloc(48 + 1);
1834 for (i = 0; i < 48; i++) {
1835 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1837 value[48] = '\0';
1838 return value;
1841 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1842 Error **errp)
1844 X86CPU *cpu = X86_CPU(obj);
1845 CPUX86State *env = &cpu->env;
1846 int c, len, i;
1848 if (model_id == NULL) {
1849 model_id = "";
1851 len = strlen(model_id);
1852 memset(env->cpuid_model, 0, 48);
1853 for (i = 0; i < 48; i++) {
1854 if (i >= len) {
1855 c = '\0';
1856 } else {
1857 c = (uint8_t)model_id[i];
1859 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1863 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1864 void *opaque, Error **errp)
1866 X86CPU *cpu = X86_CPU(obj);
1867 int64_t value;
1869 value = cpu->env.tsc_khz * 1000;
1870 visit_type_int(v, name, &value, errp);
1873 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1874 void *opaque, Error **errp)
1876 X86CPU *cpu = X86_CPU(obj);
1877 const int64_t min = 0;
1878 const int64_t max = INT64_MAX;
1879 Error *local_err = NULL;
1880 int64_t value;
1882 visit_type_int(v, name, &value, &local_err);
1883 if (local_err) {
1884 error_propagate(errp, local_err);
1885 return;
1887 if (value < min || value > max) {
1888 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1889 name ? name : "null", value, min, max);
1890 return;
1893 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1896 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1897 void *opaque, Error **errp)
1899 X86CPU *cpu = X86_CPU(obj);
1900 int64_t value = cpu->apic_id;
1902 visit_type_int(v, name, &value, errp);
1905 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1906 void *opaque, Error **errp)
1908 X86CPU *cpu = X86_CPU(obj);
1909 DeviceState *dev = DEVICE(obj);
1910 const int64_t min = 0;
1911 const int64_t max = UINT32_MAX;
1912 Error *error = NULL;
1913 int64_t value;
1915 if (dev->realized) {
1916 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1917 "it was realized", name, object_get_typename(obj));
1918 return;
1921 visit_type_int(v, name, &value, &error);
1922 if (error) {
1923 error_propagate(errp, error);
1924 return;
1926 if (value < min || value > max) {
1927 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1928 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1929 object_get_typename(obj), name, value, min, max);
1930 return;
1933 if ((value != cpu->apic_id) && cpu_exists(value)) {
1934 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1935 return;
1937 cpu->apic_id = value;
1940 /* Generic getter for "feature-words" and "filtered-features" properties */
1941 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1942 const char *name, void *opaque,
1943 Error **errp)
1945 uint32_t *array = (uint32_t *)opaque;
1946 FeatureWord w;
1947 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1948 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1949 X86CPUFeatureWordInfoList *list = NULL;
1951 for (w = 0; w < FEATURE_WORDS; w++) {
1952 FeatureWordInfo *wi = &feature_word_info[w];
1953 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1954 qwi->cpuid_input_eax = wi->cpuid_eax;
1955 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1956 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1957 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1958 qwi->features = array[w];
1960 /* List will be in reverse order, but order shouldn't matter */
1961 list_entries[w].next = list;
1962 list_entries[w].value = &word_infos[w];
1963 list = &list_entries[w];
1966 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1969 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1970 void *opaque, Error **errp)
1972 X86CPU *cpu = X86_CPU(obj);
1973 int64_t value = cpu->hyperv_spinlock_attempts;
1975 visit_type_int(v, name, &value, errp);
1978 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1979 void *opaque, Error **errp)
1981 const int64_t min = 0xFFF;
1982 const int64_t max = UINT_MAX;
1983 X86CPU *cpu = X86_CPU(obj);
1984 Error *err = NULL;
1985 int64_t value;
1987 visit_type_int(v, name, &value, &err);
1988 if (err) {
1989 error_propagate(errp, err);
1990 return;
1993 if (value < min || value > max) {
1994 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1995 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1996 object_get_typename(obj), name ? name : "null",
1997 value, min, max);
1998 return;
2000 cpu->hyperv_spinlock_attempts = value;
2003 static PropertyInfo qdev_prop_spinlocks = {
2004 .name = "int",
2005 .get = x86_get_hv_spinlocks,
2006 .set = x86_set_hv_spinlocks,
2009 /* Convert all '_' in a feature string option name to '-', to make feature
2010 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2012 static inline void feat2prop(char *s)
2014 while ((s = strchr(s, '_'))) {
2015 *s = '-';
2019 /* Compatibily hack to maintain legacy +-feat semantic,
2020 * where +-feat overwrites any feature set by
2021 * feat=on|feat even if the later is parsed after +-feat
2022 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2024 static FeatureWordArray plus_features = { 0 };
2025 static FeatureWordArray minus_features = { 0 };
2027 /* Parse "+feature,-feature,feature=foo" CPU feature string
2029 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2030 Error **errp)
2032 char *featurestr; /* Single 'key=value" string being parsed */
2033 Error *local_err = NULL;
2034 static bool cpu_globals_initialized;
2036 if (cpu_globals_initialized) {
2037 return;
2039 cpu_globals_initialized = true;
2041 if (!features) {
2042 return;
2045 for (featurestr = strtok(features, ",");
2046 featurestr && !local_err;
2047 featurestr = strtok(NULL, ",")) {
2048 const char *name;
2049 const char *val = NULL;
2050 char *eq = NULL;
2051 char num[32];
2052 GlobalProperty *prop;
2054 /* Compatibility syntax: */
2055 if (featurestr[0] == '+') {
2056 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2057 continue;
2058 } else if (featurestr[0] == '-') {
2059 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2060 continue;
2063 eq = strchr(featurestr, '=');
2064 if (eq) {
2065 *eq++ = 0;
2066 val = eq;
2067 } else {
2068 val = "on";
2071 feat2prop(featurestr);
2072 name = featurestr;
2074 /* Special case: */
2075 if (!strcmp(name, "tsc-freq")) {
2076 int64_t tsc_freq;
2077 char *err;
2079 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2080 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2081 if (tsc_freq < 0 || *err) {
2082 error_setg(errp, "bad numerical value %s", val);
2083 return;
2085 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2086 val = num;
2087 name = "tsc-frequency";
2090 prop = g_new0(typeof(*prop), 1);
2091 prop->driver = typename;
2092 prop->property = g_strdup(name);
2093 prop->value = g_strdup(val);
2094 prop->errp = &error_fatal;
2095 qdev_prop_register_global(prop);
2098 if (local_err) {
2099 error_propagate(errp, local_err);
2103 /* Print all cpuid feature names in featureset
2105 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2107 int bit;
2108 bool first = true;
2110 for (bit = 0; bit < 32; bit++) {
2111 if (featureset[bit]) {
2112 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2113 first = false;
2118 /* generate CPU information. */
2119 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2121 X86CPUDefinition *def;
2122 char buf[256];
2123 int i;
2125 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2126 def = &builtin_x86_defs[i];
2127 snprintf(buf, sizeof(buf), "%s", def->name);
2128 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2130 #ifdef CONFIG_KVM
2131 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2132 "KVM processor with all supported host features "
2133 "(only available in KVM mode)");
2134 #endif
2136 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2137 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2138 FeatureWordInfo *fw = &feature_word_info[i];
2140 (*cpu_fprintf)(f, " ");
2141 listflags(f, cpu_fprintf, fw->feat_names);
2142 (*cpu_fprintf)(f, "\n");
2146 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2148 CpuDefinitionInfoList *cpu_list = NULL;
2149 X86CPUDefinition *def;
2150 int i;
2152 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2153 CpuDefinitionInfoList *entry;
2154 CpuDefinitionInfo *info;
2156 def = &builtin_x86_defs[i];
2157 info = g_malloc0(sizeof(*info));
2158 info->name = g_strdup(def->name);
2160 entry = g_malloc0(sizeof(*entry));
2161 entry->value = info;
2162 entry->next = cpu_list;
2163 cpu_list = entry;
2166 return cpu_list;
2169 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2170 bool migratable_only)
2172 FeatureWordInfo *wi = &feature_word_info[w];
2173 uint32_t r;
2175 if (kvm_enabled()) {
2176 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2177 wi->cpuid_ecx,
2178 wi->cpuid_reg);
2179 } else if (tcg_enabled()) {
2180 r = wi->tcg_features;
2181 } else {
2182 return ~0;
2184 if (migratable_only) {
2185 r &= x86_cpu_get_migratable_flags(w);
2187 return r;
2191 * Filters CPU feature words based on host availability of each feature.
2193 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2195 static int x86_cpu_filter_features(X86CPU *cpu)
2197 CPUX86State *env = &cpu->env;
2198 FeatureWord w;
2199 int rv = 0;
2201 for (w = 0; w < FEATURE_WORDS; w++) {
2202 uint32_t host_feat =
2203 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2204 uint32_t requested_features = env->features[w];
2205 env->features[w] &= host_feat;
2206 cpu->filtered_features[w] = requested_features & ~env->features[w];
2207 if (cpu->filtered_features[w]) {
2208 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2209 report_unavailable_features(w, cpu->filtered_features[w]);
2211 rv = 1;
2215 return rv;
2218 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2220 PropValue *pv;
2221 for (pv = props; pv->prop; pv++) {
2222 if (!pv->value) {
2223 continue;
2225 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2226 &error_abort);
2230 /* Load data from X86CPUDefinition
2232 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2234 CPUX86State *env = &cpu->env;
2235 const char *vendor;
2236 char host_vendor[CPUID_VENDOR_SZ + 1];
2237 FeatureWord w;
2239 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2240 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2241 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2242 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2243 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2244 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2245 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2246 for (w = 0; w < FEATURE_WORDS; w++) {
2247 env->features[w] = def->features[w];
2250 /* Special cases not set in the X86CPUDefinition structs: */
2251 if (kvm_enabled()) {
2252 if (!kvm_irqchip_in_kernel()) {
2253 x86_cpu_change_kvm_default("x2apic", "off");
2256 x86_cpu_apply_props(cpu, kvm_default_props);
2259 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2261 /* sysenter isn't supported in compatibility mode on AMD,
2262 * syscall isn't supported in compatibility mode on Intel.
2263 * Normally we advertise the actual CPU vendor, but you can
2264 * override this using the 'vendor' property if you want to use
2265 * KVM's sysenter/syscall emulation in compatibility mode and
2266 * when doing cross vendor migration
2268 vendor = def->vendor;
2269 if (kvm_enabled()) {
2270 uint32_t ebx = 0, ecx = 0, edx = 0;
2271 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2272 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2273 vendor = host_vendor;
2276 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2280 X86CPU *cpu_x86_init(const char *cpu_model)
2282 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2285 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2287 X86CPUDefinition *cpudef = data;
2288 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2290 xcc->cpu_def = cpudef;
2293 static void x86_register_cpudef_type(X86CPUDefinition *def)
2295 char *typename = x86_cpu_type_name(def->name);
2296 TypeInfo ti = {
2297 .name = typename,
2298 .parent = TYPE_X86_CPU,
2299 .class_init = x86_cpu_cpudef_class_init,
2300 .class_data = def,
2303 type_register(&ti);
2304 g_free(typename);
2307 #if !defined(CONFIG_USER_ONLY)
2309 void cpu_clear_apic_feature(CPUX86State *env)
2311 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2314 #endif /* !CONFIG_USER_ONLY */
2316 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2317 uint32_t *eax, uint32_t *ebx,
2318 uint32_t *ecx, uint32_t *edx)
2320 X86CPU *cpu = x86_env_get_cpu(env);
2321 CPUState *cs = CPU(cpu);
2323 /* test if maximum index reached */
2324 if (index & 0x80000000) {
2325 if (index > env->cpuid_xlevel) {
2326 if (env->cpuid_xlevel2 > 0) {
2327 /* Handle the Centaur's CPUID instruction. */
2328 if (index > env->cpuid_xlevel2) {
2329 index = env->cpuid_xlevel2;
2330 } else if (index < 0xC0000000) {
2331 index = env->cpuid_xlevel;
2333 } else {
2334 /* Intel documentation states that invalid EAX input will
2335 * return the same information as EAX=cpuid_level
2336 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2338 index = env->cpuid_level;
2341 } else {
2342 if (index > env->cpuid_level)
2343 index = env->cpuid_level;
2346 switch(index) {
2347 case 0:
2348 *eax = env->cpuid_level;
2349 *ebx = env->cpuid_vendor1;
2350 *edx = env->cpuid_vendor2;
2351 *ecx = env->cpuid_vendor3;
2352 break;
2353 case 1:
2354 *eax = env->cpuid_version;
2355 *ebx = (cpu->apic_id << 24) |
2356 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2357 *ecx = env->features[FEAT_1_ECX];
2358 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2359 *ecx |= CPUID_EXT_OSXSAVE;
2361 *edx = env->features[FEAT_1_EDX];
2362 if (cs->nr_cores * cs->nr_threads > 1) {
2363 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2364 *edx |= CPUID_HT;
2366 break;
2367 case 2:
2368 /* cache info: needed for Pentium Pro compatibility */
2369 if (cpu->cache_info_passthrough) {
2370 host_cpuid(index, 0, eax, ebx, ecx, edx);
2371 break;
2373 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2374 *ebx = 0;
2375 *ecx = 0;
2376 *edx = (L1D_DESCRIPTOR << 16) | \
2377 (L1I_DESCRIPTOR << 8) | \
2378 (L2_DESCRIPTOR);
2379 break;
2380 case 4:
2381 /* cache info: needed for Core compatibility */
2382 if (cpu->cache_info_passthrough) {
2383 host_cpuid(index, count, eax, ebx, ecx, edx);
2384 *eax &= ~0xFC000000;
2385 } else {
2386 *eax = 0;
2387 switch (count) {
2388 case 0: /* L1 dcache info */
2389 *eax |= CPUID_4_TYPE_DCACHE | \
2390 CPUID_4_LEVEL(1) | \
2391 CPUID_4_SELF_INIT_LEVEL;
2392 *ebx = (L1D_LINE_SIZE - 1) | \
2393 ((L1D_PARTITIONS - 1) << 12) | \
2394 ((L1D_ASSOCIATIVITY - 1) << 22);
2395 *ecx = L1D_SETS - 1;
2396 *edx = CPUID_4_NO_INVD_SHARING;
2397 break;
2398 case 1: /* L1 icache info */
2399 *eax |= CPUID_4_TYPE_ICACHE | \
2400 CPUID_4_LEVEL(1) | \
2401 CPUID_4_SELF_INIT_LEVEL;
2402 *ebx = (L1I_LINE_SIZE - 1) | \
2403 ((L1I_PARTITIONS - 1) << 12) | \
2404 ((L1I_ASSOCIATIVITY - 1) << 22);
2405 *ecx = L1I_SETS - 1;
2406 *edx = CPUID_4_NO_INVD_SHARING;
2407 break;
2408 case 2: /* L2 cache info */
2409 *eax |= CPUID_4_TYPE_UNIFIED | \
2410 CPUID_4_LEVEL(2) | \
2411 CPUID_4_SELF_INIT_LEVEL;
2412 if (cs->nr_threads > 1) {
2413 *eax |= (cs->nr_threads - 1) << 14;
2415 *ebx = (L2_LINE_SIZE - 1) | \
2416 ((L2_PARTITIONS - 1) << 12) | \
2417 ((L2_ASSOCIATIVITY - 1) << 22);
2418 *ecx = L2_SETS - 1;
2419 *edx = CPUID_4_NO_INVD_SHARING;
2420 break;
2421 default: /* end of info */
2422 *eax = 0;
2423 *ebx = 0;
2424 *ecx = 0;
2425 *edx = 0;
2426 break;
2430 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2431 if ((*eax & 31) && cs->nr_cores > 1) {
2432 *eax |= (cs->nr_cores - 1) << 26;
2434 break;
2435 case 5:
2436 /* mwait info: needed for Core compatibility */
2437 *eax = 0; /* Smallest monitor-line size in bytes */
2438 *ebx = 0; /* Largest monitor-line size in bytes */
2439 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2440 *edx = 0;
2441 break;
2442 case 6:
2443 /* Thermal and Power Leaf */
2444 *eax = env->features[FEAT_6_EAX];
2445 *ebx = 0;
2446 *ecx = 0;
2447 *edx = 0;
2448 break;
2449 case 7:
2450 /* Structured Extended Feature Flags Enumeration Leaf */
2451 if (count == 0) {
2452 *eax = 0; /* Maximum ECX value for sub-leaves */
2453 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2454 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2455 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2456 *ecx |= CPUID_7_0_ECX_OSPKE;
2458 *edx = 0; /* Reserved */
2459 } else {
2460 *eax = 0;
2461 *ebx = 0;
2462 *ecx = 0;
2463 *edx = 0;
2465 break;
2466 case 9:
2467 /* Direct Cache Access Information Leaf */
2468 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2469 *ebx = 0;
2470 *ecx = 0;
2471 *edx = 0;
2472 break;
2473 case 0xA:
2474 /* Architectural Performance Monitoring Leaf */
2475 if (kvm_enabled() && cpu->enable_pmu) {
2476 KVMState *s = cs->kvm_state;
2478 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2479 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2480 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2481 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2482 } else {
2483 *eax = 0;
2484 *ebx = 0;
2485 *ecx = 0;
2486 *edx = 0;
2488 break;
2489 case 0xB:
2490 /* Extended Topology Enumeration Leaf */
2491 if (!cpu->enable_cpuid_0xb) {
2492 *eax = *ebx = *ecx = *edx = 0;
2493 break;
2496 *ecx = count & 0xff;
2497 *edx = cpu->apic_id;
2499 switch (count) {
2500 case 0:
2501 *eax = apicid_core_offset(smp_cores, smp_threads);
2502 *ebx = smp_threads;
2503 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2504 break;
2505 case 1:
2506 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2507 *ebx = smp_cores * smp_threads;
2508 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2509 break;
2510 default:
2511 *eax = 0;
2512 *ebx = 0;
2513 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2516 assert(!(*eax & ~0x1f));
2517 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2518 break;
2519 case 0xD: {
2520 KVMState *s = cs->kvm_state;
2521 uint64_t ena_mask;
2522 int i;
2524 /* Processor Extended State */
2525 *eax = 0;
2526 *ebx = 0;
2527 *ecx = 0;
2528 *edx = 0;
2529 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2530 break;
2532 if (kvm_enabled()) {
2533 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2534 ena_mask <<= 32;
2535 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2536 } else {
2537 ena_mask = -1;
2540 if (count == 0) {
2541 *ecx = 0x240;
2542 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2543 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2544 if ((env->features[esa->feature] & esa->bits) == esa->bits
2545 && ((ena_mask >> i) & 1) != 0) {
2546 if (i < 32) {
2547 *eax |= 1u << i;
2548 } else {
2549 *edx |= 1u << (i - 32);
2551 *ecx = MAX(*ecx, esa->offset + esa->size);
2554 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2555 *ebx = *ecx;
2556 } else if (count == 1) {
2557 *eax = env->features[FEAT_XSAVE];
2558 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2559 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2560 if ((env->features[esa->feature] & esa->bits) == esa->bits
2561 && ((ena_mask >> count) & 1) != 0) {
2562 *eax = esa->size;
2563 *ebx = esa->offset;
2566 break;
2568 case 0x80000000:
2569 *eax = env->cpuid_xlevel;
2570 *ebx = env->cpuid_vendor1;
2571 *edx = env->cpuid_vendor2;
2572 *ecx = env->cpuid_vendor3;
2573 break;
2574 case 0x80000001:
2575 *eax = env->cpuid_version;
2576 *ebx = 0;
2577 *ecx = env->features[FEAT_8000_0001_ECX];
2578 *edx = env->features[FEAT_8000_0001_EDX];
2580 /* The Linux kernel checks for the CMPLegacy bit and
2581 * discards multiple thread information if it is set.
2582 * So don't set it here for Intel to make Linux guests happy.
2584 if (cs->nr_cores * cs->nr_threads > 1) {
2585 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2586 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2587 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2588 *ecx |= 1 << 1; /* CmpLegacy bit */
2591 break;
2592 case 0x80000002:
2593 case 0x80000003:
2594 case 0x80000004:
2595 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2596 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2597 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2598 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2599 break;
2600 case 0x80000005:
2601 /* cache info (L1 cache) */
2602 if (cpu->cache_info_passthrough) {
2603 host_cpuid(index, 0, eax, ebx, ecx, edx);
2604 break;
2606 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2607 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2608 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2609 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2610 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2611 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2612 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2613 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2614 break;
2615 case 0x80000006:
2616 /* cache info (L2 cache) */
2617 if (cpu->cache_info_passthrough) {
2618 host_cpuid(index, 0, eax, ebx, ecx, edx);
2619 break;
2621 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2622 (L2_DTLB_2M_ENTRIES << 16) | \
2623 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2624 (L2_ITLB_2M_ENTRIES);
2625 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2626 (L2_DTLB_4K_ENTRIES << 16) | \
2627 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2628 (L2_ITLB_4K_ENTRIES);
2629 *ecx = (L2_SIZE_KB_AMD << 16) | \
2630 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2631 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2632 *edx = ((L3_SIZE_KB/512) << 18) | \
2633 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2634 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2635 break;
2636 case 0x80000007:
2637 *eax = 0;
2638 *ebx = 0;
2639 *ecx = 0;
2640 *edx = env->features[FEAT_8000_0007_EDX];
2641 break;
2642 case 0x80000008:
2643 /* virtual & phys address size in low 2 bytes. */
2644 /* XXX: This value must match the one used in the MMU code. */
2645 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2646 /* 64 bit processor */
2647 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2648 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2649 } else {
2650 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2651 *eax = 0x00000024; /* 36 bits physical */
2652 } else {
2653 *eax = 0x00000020; /* 32 bits physical */
2656 *ebx = 0;
2657 *ecx = 0;
2658 *edx = 0;
2659 if (cs->nr_cores * cs->nr_threads > 1) {
2660 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2662 break;
2663 case 0x8000000A:
2664 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2665 *eax = 0x00000001; /* SVM Revision */
2666 *ebx = 0x00000010; /* nr of ASIDs */
2667 *ecx = 0;
2668 *edx = env->features[FEAT_SVM]; /* optional features */
2669 } else {
2670 *eax = 0;
2671 *ebx = 0;
2672 *ecx = 0;
2673 *edx = 0;
2675 break;
2676 case 0xC0000000:
2677 *eax = env->cpuid_xlevel2;
2678 *ebx = 0;
2679 *ecx = 0;
2680 *edx = 0;
2681 break;
2682 case 0xC0000001:
2683 /* Support for VIA CPU's CPUID instruction */
2684 *eax = env->cpuid_version;
2685 *ebx = 0;
2686 *ecx = 0;
2687 *edx = env->features[FEAT_C000_0001_EDX];
2688 break;
2689 case 0xC0000002:
2690 case 0xC0000003:
2691 case 0xC0000004:
2692 /* Reserved for the future, and now filled with zero */
2693 *eax = 0;
2694 *ebx = 0;
2695 *ecx = 0;
2696 *edx = 0;
2697 break;
2698 default:
2699 /* reserved values: zero */
2700 *eax = 0;
2701 *ebx = 0;
2702 *ecx = 0;
2703 *edx = 0;
2704 break;
2708 /* CPUClass::reset() */
2709 static void x86_cpu_reset(CPUState *s)
2711 X86CPU *cpu = X86_CPU(s);
2712 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2713 CPUX86State *env = &cpu->env;
2714 target_ulong cr4;
2715 uint64_t xcr0;
2716 int i;
2718 xcc->parent_reset(s);
2720 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2722 tlb_flush(s, 1);
2724 env->old_exception = -1;
2726 /* init to reset state */
2728 #ifdef CONFIG_SOFTMMU
2729 env->hflags |= HF_SOFTMMU_MASK;
2730 #endif
2731 env->hflags2 |= HF2_GIF_MASK;
2733 cpu_x86_update_cr0(env, 0x60000010);
2734 env->a20_mask = ~0x0;
2735 env->smbase = 0x30000;
2737 env->idt.limit = 0xffff;
2738 env->gdt.limit = 0xffff;
2739 env->ldt.limit = 0xffff;
2740 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2741 env->tr.limit = 0xffff;
2742 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2744 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2745 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2746 DESC_R_MASK | DESC_A_MASK);
2747 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2748 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2749 DESC_A_MASK);
2750 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2751 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2752 DESC_A_MASK);
2753 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2754 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2755 DESC_A_MASK);
2756 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2757 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2758 DESC_A_MASK);
2759 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2760 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2761 DESC_A_MASK);
2763 env->eip = 0xfff0;
2764 env->regs[R_EDX] = env->cpuid_version;
2766 env->eflags = 0x2;
2768 /* FPU init */
2769 for (i = 0; i < 8; i++) {
2770 env->fptags[i] = 1;
2772 cpu_set_fpuc(env, 0x37f);
2774 env->mxcsr = 0x1f80;
2775 /* All units are in INIT state. */
2776 env->xstate_bv = 0;
2778 env->pat = 0x0007040600070406ULL;
2779 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2781 memset(env->dr, 0, sizeof(env->dr));
2782 env->dr[6] = DR6_FIXED_1;
2783 env->dr[7] = DR7_FIXED_1;
2784 cpu_breakpoint_remove_all(s, BP_CPU);
2785 cpu_watchpoint_remove_all(s, BP_CPU);
2787 cr4 = 0;
2788 xcr0 = XSTATE_FP_MASK;
2790 #ifdef CONFIG_USER_ONLY
2791 /* Enable all the features for user-mode. */
2792 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2793 xcr0 |= XSTATE_SSE_MASK;
2795 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2796 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2797 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2798 xcr0 |= 1ull << i;
2802 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2803 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2805 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2806 cr4 |= CR4_FSGSBASE_MASK;
2808 #endif
2810 env->xcr0 = xcr0;
2811 cpu_x86_update_cr4(env, cr4);
2814 * SDM 11.11.5 requires:
2815 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2816 * - IA32_MTRR_PHYSMASKn.V = 0
2817 * All other bits are undefined. For simplification, zero it all.
2819 env->mtrr_deftype = 0;
2820 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2821 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2823 #if !defined(CONFIG_USER_ONLY)
2824 /* We hard-wire the BSP to the first CPU. */
2825 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2827 s->halted = !cpu_is_bsp(cpu);
2829 if (kvm_enabled()) {
2830 kvm_arch_reset_vcpu(cpu);
2832 #endif
2835 #ifndef CONFIG_USER_ONLY
2836 bool cpu_is_bsp(X86CPU *cpu)
2838 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2841 /* TODO: remove me, when reset over QOM tree is implemented */
2842 static void x86_cpu_machine_reset_cb(void *opaque)
2844 X86CPU *cpu = opaque;
2845 cpu_reset(CPU(cpu));
2847 #endif
2849 static void mce_init(X86CPU *cpu)
2851 CPUX86State *cenv = &cpu->env;
2852 unsigned int bank;
2854 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2855 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2856 (CPUID_MCE | CPUID_MCA)) {
2857 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2858 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2859 cenv->mcg_ctl = ~(uint64_t)0;
2860 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2861 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2866 #ifndef CONFIG_USER_ONLY
2867 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2869 APICCommonState *apic;
2870 const char *apic_type = "apic";
2872 if (kvm_apic_in_kernel()) {
2873 apic_type = "kvm-apic";
2874 } else if (xen_enabled()) {
2875 apic_type = "xen-apic";
2878 cpu->apic_state = DEVICE(object_new(apic_type));
2880 object_property_add_child(OBJECT(cpu), "apic",
2881 OBJECT(cpu->apic_state), NULL);
2882 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2883 /* TODO: convert to link<> */
2884 apic = APIC_COMMON(cpu->apic_state);
2885 apic->cpu = cpu;
2886 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2889 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2891 APICCommonState *apic;
2892 static bool apic_mmio_map_once;
2894 if (cpu->apic_state == NULL) {
2895 return;
2897 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2898 errp);
2900 /* Map APIC MMIO area */
2901 apic = APIC_COMMON(cpu->apic_state);
2902 if (!apic_mmio_map_once) {
2903 memory_region_add_subregion_overlap(get_system_memory(),
2904 apic->apicbase &
2905 MSR_IA32_APICBASE_BASE,
2906 &apic->io_memory,
2907 0x1000);
2908 apic_mmio_map_once = true;
2912 static void x86_cpu_machine_done(Notifier *n, void *unused)
2914 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2915 MemoryRegion *smram =
2916 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2918 if (smram) {
2919 cpu->smram = g_new(MemoryRegion, 1);
2920 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2921 smram, 0, 1ull << 32);
2922 memory_region_set_enabled(cpu->smram, false);
2923 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2926 #else
2927 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2930 #endif
2933 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2934 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2935 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2936 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2937 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2938 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2939 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2941 CPUState *cs = CPU(dev);
2942 X86CPU *cpu = X86_CPU(dev);
2943 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2944 CPUX86State *env = &cpu->env;
2945 Error *local_err = NULL;
2946 static bool ht_warned;
2947 FeatureWord w;
2949 if (xcc->kvm_required && !kvm_enabled()) {
2950 char *name = x86_cpu_class_get_model_name(xcc);
2951 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2952 g_free(name);
2953 goto out;
2956 if (cpu->apic_id < 0) {
2957 error_setg(errp, "apic-id property was not initialized properly");
2958 return;
2961 /*TODO: cpu->host_features incorrectly overwrites features
2962 * set using "feat=on|off". Once we fix this, we can convert
2963 * plus_features & minus_features to global properties
2964 * inside x86_cpu_parse_featurestr() too.
2966 if (cpu->host_features) {
2967 for (w = 0; w < FEATURE_WORDS; w++) {
2968 env->features[w] =
2969 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2973 for (w = 0; w < FEATURE_WORDS; w++) {
2974 cpu->env.features[w] |= plus_features[w];
2975 cpu->env.features[w] &= ~minus_features[w];
2978 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2979 env->cpuid_level = 7;
2982 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2983 error_setg(&local_err,
2984 kvm_enabled() ?
2985 "Host doesn't support requested features" :
2986 "TCG doesn't support requested features");
2987 goto out;
2990 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2991 * CPUID[1].EDX.
2993 if (IS_AMD_CPU(env)) {
2994 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2995 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2996 & CPUID_EXT2_AMD_ALIASES);
3000 cpu_exec_init(cs, &error_abort);
3002 if (tcg_enabled()) {
3003 tcg_x86_init();
3006 #ifndef CONFIG_USER_ONLY
3007 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3009 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3010 x86_cpu_apic_create(cpu, &local_err);
3011 if (local_err != NULL) {
3012 goto out;
3015 #endif
3017 mce_init(cpu);
3019 #ifndef CONFIG_USER_ONLY
3020 if (tcg_enabled()) {
3021 AddressSpace *newas = g_new(AddressSpace, 1);
3023 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3024 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3026 /* Outer container... */
3027 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3028 memory_region_set_enabled(cpu->cpu_as_root, true);
3030 /* ... with two regions inside: normal system memory with low
3031 * priority, and...
3033 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3034 get_system_memory(), 0, ~0ull);
3035 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3036 memory_region_set_enabled(cpu->cpu_as_mem, true);
3037 address_space_init(newas, cpu->cpu_as_root, "CPU");
3038 cs->num_ases = 1;
3039 cpu_address_space_init(cs, newas, 0);
3041 /* ... SMRAM with higher priority, linked from /machine/smram. */
3042 cpu->machine_done.notify = x86_cpu_machine_done;
3043 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3045 #endif
3047 qemu_init_vcpu(cs);
3049 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3050 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3051 * based on inputs (sockets,cores,threads), it is still better to gives
3052 * users a warning.
3054 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3055 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3057 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3058 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3059 " -smp options properly.");
3060 ht_warned = true;
3063 x86_cpu_apic_realize(cpu, &local_err);
3064 if (local_err != NULL) {
3065 goto out;
3067 cpu_reset(cs);
3069 xcc->parent_realize(dev, &local_err);
3071 out:
3072 if (local_err != NULL) {
3073 error_propagate(errp, local_err);
3074 return;
3078 typedef struct BitProperty {
3079 uint32_t *ptr;
3080 uint32_t mask;
3081 } BitProperty;
3083 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3084 void *opaque, Error **errp)
3086 BitProperty *fp = opaque;
3087 bool value = (*fp->ptr & fp->mask) == fp->mask;
3088 visit_type_bool(v, name, &value, errp);
3091 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3092 void *opaque, Error **errp)
3094 DeviceState *dev = DEVICE(obj);
3095 BitProperty *fp = opaque;
3096 Error *local_err = NULL;
3097 bool value;
3099 if (dev->realized) {
3100 qdev_prop_set_after_realize(dev, name, errp);
3101 return;
3104 visit_type_bool(v, name, &value, &local_err);
3105 if (local_err) {
3106 error_propagate(errp, local_err);
3107 return;
3110 if (value) {
3111 *fp->ptr |= fp->mask;
3112 } else {
3113 *fp->ptr &= ~fp->mask;
3117 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3118 void *opaque)
3120 BitProperty *prop = opaque;
3121 g_free(prop);
3124 /* Register a boolean property to get/set a single bit in a uint32_t field.
3126 * The same property name can be registered multiple times to make it affect
3127 * multiple bits in the same FeatureWord. In that case, the getter will return
3128 * true only if all bits are set.
3130 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3131 const char *prop_name,
3132 uint32_t *field,
3133 int bitnr)
3135 BitProperty *fp;
3136 ObjectProperty *op;
3137 uint32_t mask = (1UL << bitnr);
3139 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3140 if (op) {
3141 fp = op->opaque;
3142 assert(fp->ptr == field);
3143 fp->mask |= mask;
3144 } else {
3145 fp = g_new0(BitProperty, 1);
3146 fp->ptr = field;
3147 fp->mask = mask;
3148 object_property_add(OBJECT(cpu), prop_name, "bool",
3149 x86_cpu_get_bit_prop,
3150 x86_cpu_set_bit_prop,
3151 x86_cpu_release_bit_prop, fp, &error_abort);
3155 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3156 FeatureWord w,
3157 int bitnr)
3159 Object *obj = OBJECT(cpu);
3160 int i;
3161 char **names;
3162 FeatureWordInfo *fi = &feature_word_info[w];
3164 if (!fi->feat_names) {
3165 return;
3167 if (!fi->feat_names[bitnr]) {
3168 return;
3171 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3173 feat2prop(names[0]);
3174 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3176 for (i = 1; names[i]; i++) {
3177 feat2prop(names[i]);
3178 object_property_add_alias(obj, names[i], obj, names[0],
3179 &error_abort);
3182 g_strfreev(names);
3185 static void x86_cpu_initfn(Object *obj)
3187 CPUState *cs = CPU(obj);
3188 X86CPU *cpu = X86_CPU(obj);
3189 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3190 CPUX86State *env = &cpu->env;
3191 FeatureWord w;
3193 cs->env_ptr = env;
3195 object_property_add(obj, "family", "int",
3196 x86_cpuid_version_get_family,
3197 x86_cpuid_version_set_family, NULL, NULL, NULL);
3198 object_property_add(obj, "model", "int",
3199 x86_cpuid_version_get_model,
3200 x86_cpuid_version_set_model, NULL, NULL, NULL);
3201 object_property_add(obj, "stepping", "int",
3202 x86_cpuid_version_get_stepping,
3203 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3204 object_property_add_str(obj, "vendor",
3205 x86_cpuid_get_vendor,
3206 x86_cpuid_set_vendor, NULL);
3207 object_property_add_str(obj, "model-id",
3208 x86_cpuid_get_model_id,
3209 x86_cpuid_set_model_id, NULL);
3210 object_property_add(obj, "tsc-frequency", "int",
3211 x86_cpuid_get_tsc_freq,
3212 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3213 object_property_add(obj, "apic-id", "int",
3214 x86_cpuid_get_apic_id,
3215 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3216 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3217 x86_cpu_get_feature_words,
3218 NULL, NULL, (void *)env->features, NULL);
3219 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3220 x86_cpu_get_feature_words,
3221 NULL, NULL, (void *)cpu->filtered_features, NULL);
3223 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3225 #ifndef CONFIG_USER_ONLY
3226 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3227 cpu->apic_id = -1;
3228 #endif
3230 for (w = 0; w < FEATURE_WORDS; w++) {
3231 int bitnr;
3233 for (bitnr = 0; bitnr < 32; bitnr++) {
3234 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3238 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3241 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3243 X86CPU *cpu = X86_CPU(cs);
3245 return cpu->apic_id;
3248 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3250 X86CPU *cpu = X86_CPU(cs);
3252 return cpu->env.cr[0] & CR0_PG_MASK;
3255 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3257 X86CPU *cpu = X86_CPU(cs);
3259 cpu->env.eip = value;
3262 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3264 X86CPU *cpu = X86_CPU(cs);
3266 cpu->env.eip = tb->pc - tb->cs_base;
3269 static bool x86_cpu_has_work(CPUState *cs)
3271 X86CPU *cpu = X86_CPU(cs);
3272 CPUX86State *env = &cpu->env;
3274 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3275 CPU_INTERRUPT_POLL)) &&
3276 (env->eflags & IF_MASK)) ||
3277 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3278 CPU_INTERRUPT_INIT |
3279 CPU_INTERRUPT_SIPI |
3280 CPU_INTERRUPT_MCE)) ||
3281 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3282 !(env->hflags & HF_SMM_MASK));
3285 static Property x86_cpu_properties[] = {
3286 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3287 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3288 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3289 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3290 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3291 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3292 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3293 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3294 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3295 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3296 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3297 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3298 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3299 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3300 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3301 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3302 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3303 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3304 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3305 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3306 DEFINE_PROP_END_OF_LIST()
3309 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3311 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3312 CPUClass *cc = CPU_CLASS(oc);
3313 DeviceClass *dc = DEVICE_CLASS(oc);
3315 xcc->parent_realize = dc->realize;
3316 dc->realize = x86_cpu_realizefn;
3317 dc->props = x86_cpu_properties;
3319 xcc->parent_reset = cc->reset;
3320 cc->reset = x86_cpu_reset;
3321 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3323 cc->class_by_name = x86_cpu_class_by_name;
3324 cc->parse_features = x86_cpu_parse_featurestr;
3325 cc->has_work = x86_cpu_has_work;
3326 cc->do_interrupt = x86_cpu_do_interrupt;
3327 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3328 cc->dump_state = x86_cpu_dump_state;
3329 cc->set_pc = x86_cpu_set_pc;
3330 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3331 cc->gdb_read_register = x86_cpu_gdb_read_register;
3332 cc->gdb_write_register = x86_cpu_gdb_write_register;
3333 cc->get_arch_id = x86_cpu_get_arch_id;
3334 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3335 #ifdef CONFIG_USER_ONLY
3336 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3337 #else
3338 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3339 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3340 cc->write_elf64_note = x86_cpu_write_elf64_note;
3341 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3342 cc->write_elf32_note = x86_cpu_write_elf32_note;
3343 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3344 cc->vmsd = &vmstate_x86_cpu;
3345 #endif
3346 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3347 #ifndef CONFIG_USER_ONLY
3348 cc->debug_excp_handler = breakpoint_handler;
3349 #endif
3350 cc->cpu_exec_enter = x86_cpu_exec_enter;
3351 cc->cpu_exec_exit = x86_cpu_exec_exit;
3354 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3355 * object in cpus -> dangling pointer after final object_unref().
3357 dc->cannot_destroy_with_object_finalize_yet = true;
3360 static const TypeInfo x86_cpu_type_info = {
3361 .name = TYPE_X86_CPU,
3362 .parent = TYPE_CPU,
3363 .instance_size = sizeof(X86CPU),
3364 .instance_init = x86_cpu_initfn,
3365 .abstract = true,
3366 .class_size = sizeof(X86CPUClass),
3367 .class_init = x86_cpu_common_class_init,
3370 static void x86_cpu_register_types(void)
3372 int i;
3374 type_register_static(&x86_cpu_type_info);
3375 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3376 x86_register_cpudef_type(&builtin_x86_defs[i]);
3378 #ifdef CONFIG_KVM
3379 type_register_static(&host_x86_cpu_type_info);
3380 #endif
3383 type_init(x86_cpu_register_types)