2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
63 /* CPUID Leaf 4 constants: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
70 #define CPUID_4_LEVEL(l) ((l) << 5)
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
80 #define ASSOC_FULL 0xFF
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
97 /* Definitions of the hardcoded cache entries we expose: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
148 /* TLB definitions: */
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
172 static void x86_cpu_vendor_words2str(char *dst
, uint32_t vendor1
,
173 uint32_t vendor2
, uint32_t vendor3
)
176 for (i
= 0; i
< 4; i
++) {
177 dst
[i
] = vendor1
>> (8 * i
);
178 dst
[i
+ 4] = vendor2
>> (8 * i
);
179 dst
[i
+ 8] = vendor3
>> (8 * i
);
181 dst
[CPUID_VENDOR_SZ
] = '\0';
184 /* feature flags taken from "Intel Processor Identification and the CPUID
185 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
186 * between feature naming conventions, aliases may be added.
188 static const char *feature_name
[] = {
189 "fpu", "vme", "de", "pse",
190 "tsc", "msr", "pae", "mce",
191 "cx8", "apic", NULL
, "sep",
192 "mtrr", "pge", "mca", "cmov",
193 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
194 NULL
, "ds" /* Intel dts */, "acpi", "mmx",
195 "fxsr", "sse", "sse2", "ss",
196 "ht" /* Intel htt */, "tm", "ia64", "pbe",
198 static const char *ext_feature_name
[] = {
199 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
200 "ds_cpl", "vmx", "smx", "est",
201 "tm2", "ssse3", "cid", NULL
,
202 "fma", "cx16", "xtpr", "pdcm",
203 NULL
, "pcid", "dca", "sse4.1|sse4_1",
204 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
205 "tsc-deadline", "aes", "xsave", "osxsave",
206 "avx", "f16c", "rdrand", "hypervisor",
208 /* Feature names that are already defined on feature_name[] but are set on
209 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
210 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
211 * if and only if CPU vendor is AMD.
213 static const char *ext2_feature_name
[] = {
214 NULL
/* fpu */, NULL
/* vme */, NULL
/* de */, NULL
/* pse */,
215 NULL
/* tsc */, NULL
/* msr */, NULL
/* pae */, NULL
/* mce */,
216 NULL
/* cx8 */ /* AMD CMPXCHG8B */, NULL
/* apic */, NULL
, "syscall",
217 NULL
/* mtrr */, NULL
/* pge */, NULL
/* mca */, NULL
/* cmov */,
218 NULL
/* pat */, NULL
/* pse36 */, NULL
, NULL
/* Linux mp */,
219 "nx|xd", NULL
, "mmxext", NULL
/* mmx */,
220 NULL
/* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
221 NULL
, "lm|i64", "3dnowext", "3dnow",
223 static const char *ext3_feature_name
[] = {
224 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
225 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
226 "3dnowprefetch", "osvw", "ibs", "xop",
227 "skinit", "wdt", NULL
, "lwp",
228 "fma4", "tce", NULL
, "nodeid_msr",
229 NULL
, "tbm", "topoext", "perfctr_core",
230 "perfctr_nb", NULL
, NULL
, NULL
,
231 NULL
, NULL
, NULL
, NULL
,
234 static const char *ext4_feature_name
[] = {
235 NULL
, NULL
, "xstore", "xstore-en",
236 NULL
, NULL
, "xcrypt", "xcrypt-en",
237 "ace2", "ace2-en", "phe", "phe-en",
238 "pmm", "pmm-en", NULL
, NULL
,
239 NULL
, NULL
, NULL
, NULL
,
240 NULL
, NULL
, NULL
, NULL
,
241 NULL
, NULL
, NULL
, NULL
,
242 NULL
, NULL
, NULL
, NULL
,
245 static const char *kvm_feature_name
[] = {
246 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
247 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
248 NULL
, NULL
, NULL
, NULL
,
249 NULL
, NULL
, NULL
, NULL
,
250 NULL
, NULL
, NULL
, NULL
,
251 NULL
, NULL
, NULL
, NULL
,
252 "kvmclock-stable-bit", NULL
, NULL
, NULL
,
253 NULL
, NULL
, NULL
, NULL
,
256 static const char *hyperv_priv_feature_name
[] = {
257 NULL
/* hv_msr_vp_runtime_access */, NULL
/* hv_msr_time_refcount_access */,
258 NULL
/* hv_msr_synic_access */, NULL
/* hv_msr_stimer_access */,
259 NULL
/* hv_msr_apic_access */, NULL
/* hv_msr_hypercall_access */,
260 NULL
/* hv_vpindex_access */, NULL
/* hv_msr_reset_access */,
261 NULL
/* hv_msr_stats_access */, NULL
/* hv_reftsc_access */,
262 NULL
/* hv_msr_idle_access */, NULL
/* hv_msr_frequency_access */,
263 NULL
, NULL
, NULL
, NULL
,
264 NULL
, NULL
, NULL
, NULL
,
265 NULL
, NULL
, NULL
, NULL
,
266 NULL
, NULL
, NULL
, NULL
,
267 NULL
, NULL
, NULL
, NULL
,
270 static const char *hyperv_ident_feature_name
[] = {
271 NULL
/* hv_create_partitions */, NULL
/* hv_access_partition_id */,
272 NULL
/* hv_access_memory_pool */, NULL
/* hv_adjust_message_buffers */,
273 NULL
/* hv_post_messages */, NULL
/* hv_signal_events */,
274 NULL
/* hv_create_port */, NULL
/* hv_connect_port */,
275 NULL
/* hv_access_stats */, NULL
, NULL
, NULL
/* hv_debugging */,
276 NULL
/* hv_cpu_power_management */, NULL
/* hv_configure_profiler */,
278 NULL
, NULL
, NULL
, NULL
,
279 NULL
, NULL
, NULL
, NULL
,
280 NULL
, NULL
, NULL
, NULL
,
281 NULL
, NULL
, NULL
, NULL
,
284 static const char *hyperv_misc_feature_name
[] = {
285 NULL
/* hv_mwait */, NULL
/* hv_guest_debugging */,
286 NULL
/* hv_perf_monitor */, NULL
/* hv_cpu_dynamic_part */,
287 NULL
/* hv_hypercall_params_xmm */, NULL
/* hv_guest_idle_state */,
289 NULL
, NULL
, NULL
/* hv_guest_crash_msr */, NULL
,
290 NULL
, NULL
, NULL
, NULL
,
291 NULL
, NULL
, NULL
, NULL
,
292 NULL
, NULL
, NULL
, NULL
,
293 NULL
, NULL
, NULL
, NULL
,
294 NULL
, NULL
, NULL
, NULL
,
297 static const char *svm_feature_name
[] = {
298 "npt", "lbrv", "svm_lock", "nrip_save",
299 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
300 NULL
, NULL
, "pause_filter", NULL
,
301 "pfthreshold", NULL
, NULL
, NULL
,
302 NULL
, NULL
, NULL
, NULL
,
303 NULL
, NULL
, NULL
, NULL
,
304 NULL
, NULL
, NULL
, NULL
,
305 NULL
, NULL
, NULL
, NULL
,
308 static const char *cpuid_7_0_ebx_feature_name
[] = {
309 "fsgsbase", "tsc_adjust", NULL
, "bmi1",
310 "hle", "avx2", NULL
, "smep",
311 "bmi2", "erms", "invpcid", "rtm",
312 NULL
, NULL
, "mpx", NULL
,
313 "avx512f", "avx512dq", "rdseed", "adx",
314 "smap", "avx512ifma", "pcommit", "clflushopt",
315 "clwb", NULL
, "avx512pf", "avx512er",
316 "avx512cd", NULL
, "avx512bw", "avx512vl",
319 static const char *cpuid_7_0_ecx_feature_name
[] = {
320 NULL
, "avx512vbmi", "umip", "pku",
321 "ospke", NULL
, NULL
, NULL
,
322 NULL
, NULL
, NULL
, NULL
,
323 NULL
, NULL
, NULL
, NULL
,
324 NULL
, NULL
, NULL
, NULL
,
325 NULL
, NULL
, "rdpid", NULL
,
326 NULL
, NULL
, NULL
, NULL
,
327 NULL
, NULL
, NULL
, NULL
,
330 static const char *cpuid_apm_edx_feature_name
[] = {
331 NULL
, NULL
, NULL
, NULL
,
332 NULL
, NULL
, NULL
, NULL
,
333 "invtsc", NULL
, NULL
, NULL
,
334 NULL
, NULL
, NULL
, NULL
,
335 NULL
, NULL
, NULL
, NULL
,
336 NULL
, NULL
, NULL
, NULL
,
337 NULL
, NULL
, NULL
, NULL
,
338 NULL
, NULL
, NULL
, NULL
,
341 static const char *cpuid_xsave_feature_name
[] = {
342 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
343 NULL
, NULL
, NULL
, NULL
,
344 NULL
, NULL
, NULL
, NULL
,
345 NULL
, NULL
, NULL
, NULL
,
346 NULL
, NULL
, NULL
, NULL
,
347 NULL
, NULL
, NULL
, NULL
,
348 NULL
, NULL
, NULL
, NULL
,
349 NULL
, NULL
, NULL
, NULL
,
352 static const char *cpuid_6_feature_name
[] = {
353 NULL
, NULL
, "arat", NULL
,
354 NULL
, NULL
, NULL
, NULL
,
355 NULL
, NULL
, NULL
, NULL
,
356 NULL
, NULL
, NULL
, NULL
,
357 NULL
, NULL
, NULL
, NULL
,
358 NULL
, NULL
, NULL
, NULL
,
359 NULL
, NULL
, NULL
, NULL
,
360 NULL
, NULL
, NULL
, NULL
,
363 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
364 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
365 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
366 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
367 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
368 CPUID_PSE36 | CPUID_FXSR)
369 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
370 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
371 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
372 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
373 CPUID_PAE | CPUID_SEP | CPUID_APIC)
375 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
376 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
377 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
378 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
379 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
380 /* partly implemented:
381 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
383 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
384 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
385 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
386 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
387 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
388 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
390 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
391 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
392 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
393 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
394 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
397 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
399 #define TCG_EXT2_X86_64_FEATURES 0
402 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
403 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
404 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
405 TCG_EXT2_X86_64_FEATURES)
406 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
407 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
408 #define TCG_EXT4_FEATURES 0
409 #define TCG_SVM_FEATURES 0
410 #define TCG_KVM_FEATURES 0
411 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
412 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
413 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
414 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
417 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
418 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
419 CPUID_7_0_EBX_RDSEED */
420 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
421 #define TCG_APM_FEATURES 0
422 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
423 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
425 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
427 typedef struct FeatureWordInfo
{
428 const char **feat_names
;
429 uint32_t cpuid_eax
; /* Input EAX for CPUID */
430 bool cpuid_needs_ecx
; /* CPUID instruction uses ECX as input */
431 uint32_t cpuid_ecx
; /* Input ECX value for CPUID */
432 int cpuid_reg
; /* output register (R_* constant) */
433 uint32_t tcg_features
; /* Feature flags supported by TCG */
434 uint32_t unmigratable_flags
; /* Feature flags known to be unmigratable */
437 static FeatureWordInfo feature_word_info
[FEATURE_WORDS
] = {
439 .feat_names
= feature_name
,
440 .cpuid_eax
= 1, .cpuid_reg
= R_EDX
,
441 .tcg_features
= TCG_FEATURES
,
444 .feat_names
= ext_feature_name
,
445 .cpuid_eax
= 1, .cpuid_reg
= R_ECX
,
446 .tcg_features
= TCG_EXT_FEATURES
,
448 [FEAT_8000_0001_EDX
] = {
449 .feat_names
= ext2_feature_name
,
450 .cpuid_eax
= 0x80000001, .cpuid_reg
= R_EDX
,
451 .tcg_features
= TCG_EXT2_FEATURES
,
453 [FEAT_8000_0001_ECX
] = {
454 .feat_names
= ext3_feature_name
,
455 .cpuid_eax
= 0x80000001, .cpuid_reg
= R_ECX
,
456 .tcg_features
= TCG_EXT3_FEATURES
,
458 [FEAT_C000_0001_EDX
] = {
459 .feat_names
= ext4_feature_name
,
460 .cpuid_eax
= 0xC0000001, .cpuid_reg
= R_EDX
,
461 .tcg_features
= TCG_EXT4_FEATURES
,
464 .feat_names
= kvm_feature_name
,
465 .cpuid_eax
= KVM_CPUID_FEATURES
, .cpuid_reg
= R_EAX
,
466 .tcg_features
= TCG_KVM_FEATURES
,
468 [FEAT_HYPERV_EAX
] = {
469 .feat_names
= hyperv_priv_feature_name
,
470 .cpuid_eax
= 0x40000003, .cpuid_reg
= R_EAX
,
472 [FEAT_HYPERV_EBX
] = {
473 .feat_names
= hyperv_ident_feature_name
,
474 .cpuid_eax
= 0x40000003, .cpuid_reg
= R_EBX
,
476 [FEAT_HYPERV_EDX
] = {
477 .feat_names
= hyperv_misc_feature_name
,
478 .cpuid_eax
= 0x40000003, .cpuid_reg
= R_EDX
,
481 .feat_names
= svm_feature_name
,
482 .cpuid_eax
= 0x8000000A, .cpuid_reg
= R_EDX
,
483 .tcg_features
= TCG_SVM_FEATURES
,
486 .feat_names
= cpuid_7_0_ebx_feature_name
,
488 .cpuid_needs_ecx
= true, .cpuid_ecx
= 0,
490 .tcg_features
= TCG_7_0_EBX_FEATURES
,
493 .feat_names
= cpuid_7_0_ecx_feature_name
,
495 .cpuid_needs_ecx
= true, .cpuid_ecx
= 0,
497 .tcg_features
= TCG_7_0_ECX_FEATURES
,
499 [FEAT_8000_0007_EDX
] = {
500 .feat_names
= cpuid_apm_edx_feature_name
,
501 .cpuid_eax
= 0x80000007,
503 .tcg_features
= TCG_APM_FEATURES
,
504 .unmigratable_flags
= CPUID_APM_INVTSC
,
507 .feat_names
= cpuid_xsave_feature_name
,
509 .cpuid_needs_ecx
= true, .cpuid_ecx
= 1,
511 .tcg_features
= TCG_XSAVE_FEATURES
,
514 .feat_names
= cpuid_6_feature_name
,
515 .cpuid_eax
= 6, .cpuid_reg
= R_EAX
,
516 .tcg_features
= TCG_6_EAX_FEATURES
,
520 typedef struct X86RegisterInfo32
{
521 /* Name of register */
523 /* QAPI enum value register */
524 X86CPURegister32 qapi_enum
;
527 #define REGISTER(reg) \
528 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
529 static const X86RegisterInfo32 x86_reg_info_32
[CPU_NB_REGS32
] = {
541 const ExtSaveArea x86_ext_save_areas
[] = {
543 { .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_AVX
,
544 .offset
= offsetof(X86XSaveArea
, avx_state
),
545 .size
= sizeof(XSaveAVX
) },
546 [XSTATE_BNDREGS_BIT
] =
547 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_MPX
,
548 .offset
= offsetof(X86XSaveArea
, bndreg_state
),
549 .size
= sizeof(XSaveBNDREG
) },
550 [XSTATE_BNDCSR_BIT
] =
551 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_MPX
,
552 .offset
= offsetof(X86XSaveArea
, bndcsr_state
),
553 .size
= sizeof(XSaveBNDCSR
) },
554 [XSTATE_OPMASK_BIT
] =
555 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
556 .offset
= offsetof(X86XSaveArea
, opmask_state
),
557 .size
= sizeof(XSaveOpmask
) },
558 [XSTATE_ZMM_Hi256_BIT
] =
559 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
560 .offset
= offsetof(X86XSaveArea
, zmm_hi256_state
),
561 .size
= sizeof(XSaveZMM_Hi256
) },
562 [XSTATE_Hi16_ZMM_BIT
] =
563 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
564 .offset
= offsetof(X86XSaveArea
, hi16_zmm_state
),
565 .size
= sizeof(XSaveHi16_ZMM
) },
567 { .feature
= FEAT_7_0_ECX
, .bits
= CPUID_7_0_ECX_PKU
,
568 .offset
= offsetof(X86XSaveArea
, pkru_state
),
569 .size
= sizeof(XSavePKRU
) },
572 const char *get_register_name_32(unsigned int reg
)
574 if (reg
>= CPU_NB_REGS32
) {
577 return x86_reg_info_32
[reg
].name
;
581 * Returns the set of feature flags that are supported and migratable by
582 * QEMU, for a given FeatureWord.
584 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w
)
586 FeatureWordInfo
*wi
= &feature_word_info
[w
];
590 for (i
= 0; i
< 32; i
++) {
591 uint32_t f
= 1U << i
;
592 /* If the feature name is unknown, it is not supported by QEMU yet */
593 if (!wi
->feat_names
[i
]) {
596 /* Skip features known to QEMU, but explicitly marked as unmigratable */
597 if (wi
->unmigratable_flags
& f
) {
605 void host_cpuid(uint32_t function
, uint32_t count
,
606 uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
612 : "=a"(vec
[0]), "=b"(vec
[1]),
613 "=c"(vec
[2]), "=d"(vec
[3])
614 : "0"(function
), "c"(count
) : "cc");
615 #elif defined(__i386__)
616 asm volatile("pusha \n\t"
618 "mov %%eax, 0(%2) \n\t"
619 "mov %%ebx, 4(%2) \n\t"
620 "mov %%ecx, 8(%2) \n\t"
621 "mov %%edx, 12(%2) \n\t"
623 : : "a"(function
), "c"(count
), "S"(vec
)
639 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
641 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
642 * a substring. ex if !NULL points to the first char after a substring,
643 * otherwise the string is assumed to sized by a terminating nul.
644 * Return lexical ordering of *s1:*s2.
646 static int sstrcmp(const char *s1
, const char *e1
,
647 const char *s2
, const char *e2
)
650 if (!*s1
|| !*s2
|| *s1
!= *s2
)
653 if (s1
== e1
&& s2
== e2
)
662 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
663 * '|' delimited (possibly empty) strings in which case search for a match
664 * within the alternatives proceeds left to right. Return 0 for success,
665 * non-zero otherwise.
667 static int altcmp(const char *s
, const char *e
, const char *altstr
)
671 for (q
= p
= altstr
; ; ) {
672 while (*p
&& *p
!= '|')
674 if ((q
== p
&& !*s
) || (q
!= p
&& !sstrcmp(s
, e
, q
, p
)))
683 /* search featureset for flag *[s..e), if found set corresponding bit in
684 * *pval and return true, otherwise return false
686 static bool lookup_feature(uint32_t *pval
, const char *s
, const char *e
,
687 const char **featureset
)
693 for (mask
= 1, ppc
= featureset
; mask
; mask
<<= 1, ++ppc
) {
694 if (*ppc
&& !altcmp(s
, e
, *ppc
)) {
702 static void add_flagname_to_bitmaps(const char *flagname
,
703 FeatureWordArray words
,
707 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
708 FeatureWordInfo
*wi
= &feature_word_info
[w
];
709 if (wi
->feat_names
&&
710 lookup_feature(&words
[w
], flagname
, NULL
, wi
->feat_names
)) {
714 if (w
== FEATURE_WORDS
) {
715 error_setg(errp
, "CPU feature %s not found", flagname
);
719 /* CPU class name definitions: */
721 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
722 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
724 /* Return type name for a given CPU model name
725 * Caller is responsible for freeing the returned string.
727 static char *x86_cpu_type_name(const char *model_name
)
729 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name
);
732 static ObjectClass
*x86_cpu_class_by_name(const char *cpu_model
)
737 if (cpu_model
== NULL
) {
741 typename
= x86_cpu_type_name(cpu_model
);
742 oc
= object_class_by_name(typename
);
747 static char *x86_cpu_class_get_model_name(X86CPUClass
*cc
)
749 const char *class_name
= object_class_get_name(OBJECT_CLASS(cc
));
750 assert(g_str_has_suffix(class_name
, X86_CPU_TYPE_SUFFIX
));
751 return g_strndup(class_name
,
752 strlen(class_name
) - strlen(X86_CPU_TYPE_SUFFIX
));
755 struct X86CPUDefinition
{
760 /* vendor is zero-terminated, 12 character ASCII string */
761 char vendor
[CPUID_VENDOR_SZ
+ 1];
765 FeatureWordArray features
;
769 static X86CPUDefinition builtin_x86_defs
[] = {
773 .vendor
= CPUID_VENDOR_AMD
,
777 .features
[FEAT_1_EDX
] =
779 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
781 .features
[FEAT_1_ECX
] =
782 CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
783 .features
[FEAT_8000_0001_EDX
] =
784 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
785 .features
[FEAT_8000_0001_ECX
] =
786 CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
,
787 .xlevel
= 0x8000000A,
788 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
793 .vendor
= CPUID_VENDOR_AMD
,
797 /* Missing: CPUID_HT */
798 .features
[FEAT_1_EDX
] =
800 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
801 CPUID_PSE36
| CPUID_VME
,
802 .features
[FEAT_1_ECX
] =
803 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_CX16
|
805 .features
[FEAT_8000_0001_EDX
] =
806 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
|
807 CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
| CPUID_EXT2_MMXEXT
|
808 CPUID_EXT2_FFXSR
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
,
809 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
811 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
812 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
813 .features
[FEAT_8000_0001_ECX
] =
814 CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
815 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
816 /* Missing: CPUID_SVM_LBRV */
817 .features
[FEAT_SVM
] =
819 .xlevel
= 0x8000001A,
820 .model_id
= "AMD Phenom(tm) 9550 Quad-Core Processor"
825 .vendor
= CPUID_VENDOR_INTEL
,
829 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
830 .features
[FEAT_1_EDX
] =
832 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
833 CPUID_PSE36
| CPUID_VME
| CPUID_ACPI
| CPUID_SS
,
834 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
835 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
836 .features
[FEAT_1_ECX
] =
837 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
839 .features
[FEAT_8000_0001_EDX
] =
840 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
841 .features
[FEAT_8000_0001_ECX
] =
843 .xlevel
= 0x80000008,
844 .model_id
= "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
849 .vendor
= CPUID_VENDOR_INTEL
,
853 /* Missing: CPUID_HT */
854 .features
[FEAT_1_EDX
] =
855 PPRO_FEATURES
| CPUID_VME
|
856 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
858 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
859 .features
[FEAT_1_ECX
] =
860 CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
861 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
862 .features
[FEAT_8000_0001_EDX
] =
863 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
864 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
865 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
866 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
867 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
868 .features
[FEAT_8000_0001_ECX
] =
870 .xlevel
= 0x80000008,
871 .model_id
= "Common KVM processor"
876 .vendor
= CPUID_VENDOR_INTEL
,
880 .features
[FEAT_1_EDX
] =
882 .features
[FEAT_1_ECX
] =
884 .xlevel
= 0x80000004,
885 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
890 .vendor
= CPUID_VENDOR_INTEL
,
894 .features
[FEAT_1_EDX
] =
895 PPRO_FEATURES
| CPUID_VME
|
896 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_PSE36
,
897 .features
[FEAT_1_ECX
] =
899 .features
[FEAT_8000_0001_ECX
] =
901 .xlevel
= 0x80000008,
902 .model_id
= "Common 32-bit KVM processor"
907 .vendor
= CPUID_VENDOR_INTEL
,
911 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
912 .features
[FEAT_1_EDX
] =
913 PPRO_FEATURES
| CPUID_VME
|
914 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_ACPI
|
916 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
917 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
918 .features
[FEAT_1_ECX
] =
919 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
,
920 .features
[FEAT_8000_0001_EDX
] =
922 .xlevel
= 0x80000008,
923 .model_id
= "Genuine Intel(R) CPU T2600 @ 2.16GHz",
928 .vendor
= CPUID_VENDOR_INTEL
,
932 .features
[FEAT_1_EDX
] =
939 .vendor
= CPUID_VENDOR_INTEL
,
943 .features
[FEAT_1_EDX
] =
950 .vendor
= CPUID_VENDOR_INTEL
,
954 .features
[FEAT_1_EDX
] =
961 .vendor
= CPUID_VENDOR_INTEL
,
965 .features
[FEAT_1_EDX
] =
972 .vendor
= CPUID_VENDOR_AMD
,
976 .features
[FEAT_1_EDX
] =
977 PPRO_FEATURES
| CPUID_PSE36
| CPUID_VME
| CPUID_MTRR
|
979 .features
[FEAT_8000_0001_EDX
] =
980 CPUID_EXT2_MMXEXT
| CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
,
981 .xlevel
= 0x80000008,
982 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
987 .vendor
= CPUID_VENDOR_INTEL
,
991 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
992 .features
[FEAT_1_EDX
] =
994 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_VME
|
995 CPUID_ACPI
| CPUID_SS
,
996 /* Some CPUs got no CPUID_SEP */
997 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
999 .features
[FEAT_1_ECX
] =
1000 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
1002 .features
[FEAT_8000_0001_EDX
] =
1004 .features
[FEAT_8000_0001_ECX
] =
1006 .xlevel
= 0x80000008,
1007 .model_id
= "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1012 .vendor
= CPUID_VENDOR_INTEL
,
1016 .features
[FEAT_1_EDX
] =
1017 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1018 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1019 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1020 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1021 CPUID_DE
| CPUID_FP87
,
1022 .features
[FEAT_1_ECX
] =
1023 CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
1024 .features
[FEAT_8000_0001_EDX
] =
1025 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1026 .features
[FEAT_8000_0001_ECX
] =
1028 .xlevel
= 0x80000008,
1029 .model_id
= "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1034 .vendor
= CPUID_VENDOR_INTEL
,
1038 .features
[FEAT_1_EDX
] =
1039 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1040 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1041 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1042 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1043 CPUID_DE
| CPUID_FP87
,
1044 .features
[FEAT_1_ECX
] =
1045 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1047 .features
[FEAT_8000_0001_EDX
] =
1048 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1049 .features
[FEAT_8000_0001_ECX
] =
1051 .xlevel
= 0x80000008,
1052 .model_id
= "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1057 .vendor
= CPUID_VENDOR_INTEL
,
1061 .features
[FEAT_1_EDX
] =
1062 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1063 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1064 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1065 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1066 CPUID_DE
| CPUID_FP87
,
1067 .features
[FEAT_1_ECX
] =
1068 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1069 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
1070 .features
[FEAT_8000_0001_EDX
] =
1071 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1072 .features
[FEAT_8000_0001_ECX
] =
1074 .xlevel
= 0x80000008,
1075 .model_id
= "Intel Core i7 9xx (Nehalem Class Core i7)",
1080 .vendor
= CPUID_VENDOR_INTEL
,
1084 .features
[FEAT_1_EDX
] =
1085 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1086 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1087 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1088 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1089 CPUID_DE
| CPUID_FP87
,
1090 .features
[FEAT_1_ECX
] =
1091 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
1092 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1093 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
1094 .features
[FEAT_8000_0001_EDX
] =
1095 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1096 .features
[FEAT_8000_0001_ECX
] =
1098 .features
[FEAT_6_EAX
] =
1100 .xlevel
= 0x80000008,
1101 .model_id
= "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1104 .name
= "SandyBridge",
1106 .vendor
= CPUID_VENDOR_INTEL
,
1110 .features
[FEAT_1_EDX
] =
1111 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1112 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1113 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1114 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1115 CPUID_DE
| CPUID_FP87
,
1116 .features
[FEAT_1_ECX
] =
1117 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1118 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1119 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1120 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1122 .features
[FEAT_8000_0001_EDX
] =
1123 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1125 .features
[FEAT_8000_0001_ECX
] =
1127 .features
[FEAT_XSAVE
] =
1128 CPUID_XSAVE_XSAVEOPT
,
1129 .features
[FEAT_6_EAX
] =
1131 .xlevel
= 0x80000008,
1132 .model_id
= "Intel Xeon E312xx (Sandy Bridge)",
1135 .name
= "IvyBridge",
1137 .vendor
= CPUID_VENDOR_INTEL
,
1141 .features
[FEAT_1_EDX
] =
1142 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1143 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1144 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1145 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1146 CPUID_DE
| CPUID_FP87
,
1147 .features
[FEAT_1_ECX
] =
1148 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1149 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1150 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1151 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1152 CPUID_EXT_SSE3
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1153 .features
[FEAT_7_0_EBX
] =
1154 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_SMEP
|
1156 .features
[FEAT_8000_0001_EDX
] =
1157 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1159 .features
[FEAT_8000_0001_ECX
] =
1161 .features
[FEAT_XSAVE
] =
1162 CPUID_XSAVE_XSAVEOPT
,
1163 .features
[FEAT_6_EAX
] =
1165 .xlevel
= 0x80000008,
1166 .model_id
= "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1169 .name
= "Haswell-noTSX",
1171 .vendor
= CPUID_VENDOR_INTEL
,
1175 .features
[FEAT_1_EDX
] =
1176 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1177 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1178 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1179 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1180 CPUID_DE
| CPUID_FP87
,
1181 .features
[FEAT_1_ECX
] =
1182 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1183 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1184 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1185 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1186 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1187 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1188 .features
[FEAT_8000_0001_EDX
] =
1189 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1191 .features
[FEAT_8000_0001_ECX
] =
1192 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
1193 .features
[FEAT_7_0_EBX
] =
1194 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1195 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1196 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
,
1197 .features
[FEAT_XSAVE
] =
1198 CPUID_XSAVE_XSAVEOPT
,
1199 .features
[FEAT_6_EAX
] =
1201 .xlevel
= 0x80000008,
1202 .model_id
= "Intel Core Processor (Haswell, no TSX)",
1206 .vendor
= CPUID_VENDOR_INTEL
,
1210 .features
[FEAT_1_EDX
] =
1211 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1212 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1213 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1214 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1215 CPUID_DE
| CPUID_FP87
,
1216 .features
[FEAT_1_ECX
] =
1217 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1218 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1219 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1220 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1221 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1222 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1223 .features
[FEAT_8000_0001_EDX
] =
1224 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1226 .features
[FEAT_8000_0001_ECX
] =
1227 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
1228 .features
[FEAT_7_0_EBX
] =
1229 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1230 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1231 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
1233 .features
[FEAT_XSAVE
] =
1234 CPUID_XSAVE_XSAVEOPT
,
1235 .features
[FEAT_6_EAX
] =
1237 .xlevel
= 0x80000008,
1238 .model_id
= "Intel Core Processor (Haswell)",
1241 .name
= "Broadwell-noTSX",
1243 .vendor
= CPUID_VENDOR_INTEL
,
1247 .features
[FEAT_1_EDX
] =
1248 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1249 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1250 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1251 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1252 CPUID_DE
| CPUID_FP87
,
1253 .features
[FEAT_1_ECX
] =
1254 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1255 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1256 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1257 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1258 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1259 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1260 .features
[FEAT_8000_0001_EDX
] =
1261 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1263 .features
[FEAT_8000_0001_ECX
] =
1264 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
1265 .features
[FEAT_7_0_EBX
] =
1266 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1267 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1268 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
1269 CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
1271 .features
[FEAT_XSAVE
] =
1272 CPUID_XSAVE_XSAVEOPT
,
1273 .features
[FEAT_6_EAX
] =
1275 .xlevel
= 0x80000008,
1276 .model_id
= "Intel Core Processor (Broadwell, no TSX)",
1279 .name
= "Broadwell",
1281 .vendor
= CPUID_VENDOR_INTEL
,
1285 .features
[FEAT_1_EDX
] =
1286 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1287 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1288 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1289 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1290 CPUID_DE
| CPUID_FP87
,
1291 .features
[FEAT_1_ECX
] =
1292 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1293 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1294 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1295 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1296 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1297 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1298 .features
[FEAT_8000_0001_EDX
] =
1299 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1301 .features
[FEAT_8000_0001_ECX
] =
1302 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
1303 .features
[FEAT_7_0_EBX
] =
1304 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1305 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1306 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
1307 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
1309 .features
[FEAT_XSAVE
] =
1310 CPUID_XSAVE_XSAVEOPT
,
1311 .features
[FEAT_6_EAX
] =
1313 .xlevel
= 0x80000008,
1314 .model_id
= "Intel Core Processor (Broadwell)",
1317 .name
= "Skylake-Client",
1319 .vendor
= CPUID_VENDOR_INTEL
,
1323 .features
[FEAT_1_EDX
] =
1324 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1325 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1326 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1327 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1328 CPUID_DE
| CPUID_FP87
,
1329 .features
[FEAT_1_ECX
] =
1330 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1331 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1332 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1333 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1334 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1335 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1336 .features
[FEAT_8000_0001_EDX
] =
1337 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1339 .features
[FEAT_8000_0001_ECX
] =
1340 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
1341 .features
[FEAT_7_0_EBX
] =
1342 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1343 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1344 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
1345 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
1346 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
,
1347 /* Missing: XSAVES (not supported by some Linux versions,
1348 * including v4.1 to v4.6).
1349 * KVM doesn't yet expose any XSAVES state save component,
1350 * and the only one defined in Skylake (processor tracing)
1351 * probably will block migration anyway.
1353 .features
[FEAT_XSAVE
] =
1354 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
1355 CPUID_XSAVE_XGETBV1
,
1356 .features
[FEAT_6_EAX
] =
1358 .xlevel
= 0x80000008,
1359 .model_id
= "Intel Core Processor (Skylake)",
1362 .name
= "Opteron_G1",
1364 .vendor
= CPUID_VENDOR_AMD
,
1368 .features
[FEAT_1_EDX
] =
1369 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1370 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1371 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1372 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1373 CPUID_DE
| CPUID_FP87
,
1374 .features
[FEAT_1_ECX
] =
1376 .features
[FEAT_8000_0001_EDX
] =
1377 CPUID_EXT2_LM
| CPUID_EXT2_FXSR
| CPUID_EXT2_MMX
|
1378 CPUID_EXT2_NX
| CPUID_EXT2_PSE36
| CPUID_EXT2_PAT
|
1379 CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
| CPUID_EXT2_PGE
|
1380 CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_APIC
|
1381 CPUID_EXT2_CX8
| CPUID_EXT2_MCE
| CPUID_EXT2_PAE
| CPUID_EXT2_MSR
|
1382 CPUID_EXT2_TSC
| CPUID_EXT2_PSE
| CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
1383 .xlevel
= 0x80000008,
1384 .model_id
= "AMD Opteron 240 (Gen 1 Class Opteron)",
1387 .name
= "Opteron_G2",
1389 .vendor
= CPUID_VENDOR_AMD
,
1393 .features
[FEAT_1_EDX
] =
1394 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1395 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1396 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1397 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1398 CPUID_DE
| CPUID_FP87
,
1399 .features
[FEAT_1_ECX
] =
1400 CPUID_EXT_CX16
| CPUID_EXT_SSE3
,
1401 /* Missing: CPUID_EXT2_RDTSCP */
1402 .features
[FEAT_8000_0001_EDX
] =
1403 CPUID_EXT2_LM
| CPUID_EXT2_FXSR
|
1404 CPUID_EXT2_MMX
| CPUID_EXT2_NX
| CPUID_EXT2_PSE36
|
1405 CPUID_EXT2_PAT
| CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
|
1406 CPUID_EXT2_PGE
| CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
|
1407 CPUID_EXT2_APIC
| CPUID_EXT2_CX8
| CPUID_EXT2_MCE
|
1408 CPUID_EXT2_PAE
| CPUID_EXT2_MSR
| CPUID_EXT2_TSC
| CPUID_EXT2_PSE
|
1409 CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
1410 .features
[FEAT_8000_0001_ECX
] =
1411 CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
1412 .xlevel
= 0x80000008,
1413 .model_id
= "AMD Opteron 22xx (Gen 2 Class Opteron)",
1416 .name
= "Opteron_G3",
1418 .vendor
= CPUID_VENDOR_AMD
,
1422 .features
[FEAT_1_EDX
] =
1423 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1424 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1425 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1426 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1427 CPUID_DE
| CPUID_FP87
,
1428 .features
[FEAT_1_ECX
] =
1429 CPUID_EXT_POPCNT
| CPUID_EXT_CX16
| CPUID_EXT_MONITOR
|
1431 /* Missing: CPUID_EXT2_RDTSCP */
1432 .features
[FEAT_8000_0001_EDX
] =
1433 CPUID_EXT2_LM
| CPUID_EXT2_FXSR
|
1434 CPUID_EXT2_MMX
| CPUID_EXT2_NX
| CPUID_EXT2_PSE36
|
1435 CPUID_EXT2_PAT
| CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
|
1436 CPUID_EXT2_PGE
| CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
|
1437 CPUID_EXT2_APIC
| CPUID_EXT2_CX8
| CPUID_EXT2_MCE
|
1438 CPUID_EXT2_PAE
| CPUID_EXT2_MSR
| CPUID_EXT2_TSC
| CPUID_EXT2_PSE
|
1439 CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
1440 .features
[FEAT_8000_0001_ECX
] =
1441 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
|
1442 CPUID_EXT3_ABM
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
1443 .xlevel
= 0x80000008,
1444 .model_id
= "AMD Opteron 23xx (Gen 3 Class Opteron)",
1447 .name
= "Opteron_G4",
1449 .vendor
= CPUID_VENDOR_AMD
,
1453 .features
[FEAT_1_EDX
] =
1454 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1455 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1456 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1457 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1458 CPUID_DE
| CPUID_FP87
,
1459 .features
[FEAT_1_ECX
] =
1460 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1461 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1462 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1464 /* Missing: CPUID_EXT2_RDTSCP */
1465 .features
[FEAT_8000_0001_EDX
] =
1467 CPUID_EXT2_PDPE1GB
| CPUID_EXT2_FXSR
| CPUID_EXT2_MMX
|
1468 CPUID_EXT2_NX
| CPUID_EXT2_PSE36
| CPUID_EXT2_PAT
|
1469 CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
| CPUID_EXT2_PGE
|
1470 CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_APIC
|
1471 CPUID_EXT2_CX8
| CPUID_EXT2_MCE
| CPUID_EXT2_PAE
| CPUID_EXT2_MSR
|
1472 CPUID_EXT2_TSC
| CPUID_EXT2_PSE
| CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
1473 .features
[FEAT_8000_0001_ECX
] =
1474 CPUID_EXT3_FMA4
| CPUID_EXT3_XOP
|
1475 CPUID_EXT3_3DNOWPREFETCH
| CPUID_EXT3_MISALIGNSSE
|
1476 CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
| CPUID_EXT3_SVM
|
1479 .xlevel
= 0x8000001A,
1480 .model_id
= "AMD Opteron 62xx class CPU",
1483 .name
= "Opteron_G5",
1485 .vendor
= CPUID_VENDOR_AMD
,
1489 .features
[FEAT_1_EDX
] =
1490 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1491 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1492 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1493 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1494 CPUID_DE
| CPUID_FP87
,
1495 .features
[FEAT_1_ECX
] =
1496 CPUID_EXT_F16C
| CPUID_EXT_AVX
| CPUID_EXT_XSAVE
|
1497 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
1498 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_FMA
|
1499 CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
1500 /* Missing: CPUID_EXT2_RDTSCP */
1501 .features
[FEAT_8000_0001_EDX
] =
1503 CPUID_EXT2_PDPE1GB
| CPUID_EXT2_FXSR
| CPUID_EXT2_MMX
|
1504 CPUID_EXT2_NX
| CPUID_EXT2_PSE36
| CPUID_EXT2_PAT
|
1505 CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
| CPUID_EXT2_PGE
|
1506 CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_APIC
|
1507 CPUID_EXT2_CX8
| CPUID_EXT2_MCE
| CPUID_EXT2_PAE
| CPUID_EXT2_MSR
|
1508 CPUID_EXT2_TSC
| CPUID_EXT2_PSE
| CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
1509 .features
[FEAT_8000_0001_ECX
] =
1510 CPUID_EXT3_TBM
| CPUID_EXT3_FMA4
| CPUID_EXT3_XOP
|
1511 CPUID_EXT3_3DNOWPREFETCH
| CPUID_EXT3_MISALIGNSSE
|
1512 CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
| CPUID_EXT3_SVM
|
1515 .xlevel
= 0x8000001A,
1516 .model_id
= "AMD Opteron 63xx class CPU",
1520 typedef struct PropValue
{
1521 const char *prop
, *value
;
1524 /* KVM-specific features that are automatically added/removed
1525 * from all CPU models when KVM is enabled.
1527 static PropValue kvm_default_props
[] = {
1528 { "kvmclock", "on" },
1529 { "kvm-nopiodelay", "on" },
1530 { "kvm-asyncpf", "on" },
1531 { "kvm-steal-time", "on" },
1532 { "kvm-pv-eoi", "on" },
1533 { "kvmclock-stable-bit", "on" },
1536 { "monitor", "off" },
1541 void x86_cpu_change_kvm_default(const char *prop
, const char *value
)
1544 for (pv
= kvm_default_props
; pv
->prop
; pv
++) {
1545 if (!strcmp(pv
->prop
, prop
)) {
1551 /* It is valid to call this function only for properties that
1552 * are already present in the kvm_default_props table.
1557 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w
,
1558 bool migratable_only
);
1562 static bool lmce_supported(void)
1566 if (kvm_ioctl(kvm_state
, KVM_X86_GET_MCE_CAP_SUPPORTED
, &mce_cap
) < 0) {
1570 return !!(mce_cap
& MCG_LMCE_P
);
1573 static int cpu_x86_fill_model_id(char *str
)
1575 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
1578 for (i
= 0; i
< 3; i
++) {
1579 host_cpuid(0x80000002 + i
, 0, &eax
, &ebx
, &ecx
, &edx
);
1580 memcpy(str
+ i
* 16 + 0, &eax
, 4);
1581 memcpy(str
+ i
* 16 + 4, &ebx
, 4);
1582 memcpy(str
+ i
* 16 + 8, &ecx
, 4);
1583 memcpy(str
+ i
* 16 + 12, &edx
, 4);
1588 static X86CPUDefinition host_cpudef
;
1590 static Property host_x86_cpu_properties
[] = {
1591 DEFINE_PROP_BOOL("migratable", X86CPU
, migratable
, true),
1592 DEFINE_PROP_BOOL("host-cache-info", X86CPU
, cache_info_passthrough
, false),
1593 DEFINE_PROP_END_OF_LIST()
1596 /* class_init for the "host" CPU model
1598 * This function may be called before KVM is initialized.
1600 static void host_x86_cpu_class_init(ObjectClass
*oc
, void *data
)
1602 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1603 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
1604 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
1606 xcc
->kvm_required
= true;
1608 host_cpuid(0x0, 0, &eax
, &ebx
, &ecx
, &edx
);
1609 x86_cpu_vendor_words2str(host_cpudef
.vendor
, ebx
, edx
, ecx
);
1611 host_cpuid(0x1, 0, &eax
, &ebx
, &ecx
, &edx
);
1612 host_cpudef
.family
= ((eax
>> 8) & 0x0F) + ((eax
>> 20) & 0xFF);
1613 host_cpudef
.model
= ((eax
>> 4) & 0x0F) | ((eax
& 0xF0000) >> 12);
1614 host_cpudef
.stepping
= eax
& 0x0F;
1616 cpu_x86_fill_model_id(host_cpudef
.model_id
);
1618 xcc
->cpu_def
= &host_cpudef
;
1620 /* level, xlevel, xlevel2, and the feature words are initialized on
1621 * instance_init, because they require KVM to be initialized.
1624 dc
->props
= host_x86_cpu_properties
;
1625 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1626 dc
->cannot_destroy_with_object_finalize_yet
= true;
1629 static void host_x86_cpu_initfn(Object
*obj
)
1631 X86CPU
*cpu
= X86_CPU(obj
);
1632 CPUX86State
*env
= &cpu
->env
;
1633 KVMState
*s
= kvm_state
;
1635 /* We can't fill the features array here because we don't know yet if
1636 * "migratable" is true or false.
1638 cpu
->host_features
= true;
1640 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1641 if (kvm_enabled()) {
1642 env
->cpuid_level
= kvm_arch_get_supported_cpuid(s
, 0x0, 0, R_EAX
);
1643 env
->cpuid_xlevel
= kvm_arch_get_supported_cpuid(s
, 0x80000000, 0, R_EAX
);
1644 env
->cpuid_xlevel2
= kvm_arch_get_supported_cpuid(s
, 0xC0000000, 0, R_EAX
);
1646 if (lmce_supported()) {
1647 object_property_set_bool(OBJECT(cpu
), true, "lmce", &error_abort
);
1651 object_property_set_bool(OBJECT(cpu
), true, "pmu", &error_abort
);
1654 static const TypeInfo host_x86_cpu_type_info
= {
1655 .name
= X86_CPU_TYPE_NAME("host"),
1656 .parent
= TYPE_X86_CPU
,
1657 .instance_init
= host_x86_cpu_initfn
,
1658 .class_init
= host_x86_cpu_class_init
,
1663 static void report_unavailable_features(FeatureWord w
, uint32_t mask
)
1665 FeatureWordInfo
*f
= &feature_word_info
[w
];
1668 for (i
= 0; i
< 32; ++i
) {
1669 if ((1UL << i
) & mask
) {
1670 const char *reg
= get_register_name_32(f
->cpuid_reg
);
1672 fprintf(stderr
, "warning: %s doesn't support requested feature: "
1673 "CPUID.%02XH:%s%s%s [bit %d]\n",
1674 kvm_enabled() ? "host" : "TCG",
1676 f
->feat_names
[i
] ? "." : "",
1677 f
->feat_names
[i
] ? f
->feat_names
[i
] : "", i
);
1682 static void x86_cpuid_version_get_family(Object
*obj
, Visitor
*v
,
1683 const char *name
, void *opaque
,
1686 X86CPU
*cpu
= X86_CPU(obj
);
1687 CPUX86State
*env
= &cpu
->env
;
1690 value
= (env
->cpuid_version
>> 8) & 0xf;
1692 value
+= (env
->cpuid_version
>> 20) & 0xff;
1694 visit_type_int(v
, name
, &value
, errp
);
1697 static void x86_cpuid_version_set_family(Object
*obj
, Visitor
*v
,
1698 const char *name
, void *opaque
,
1701 X86CPU
*cpu
= X86_CPU(obj
);
1702 CPUX86State
*env
= &cpu
->env
;
1703 const int64_t min
= 0;
1704 const int64_t max
= 0xff + 0xf;
1705 Error
*local_err
= NULL
;
1708 visit_type_int(v
, name
, &value
, &local_err
);
1710 error_propagate(errp
, local_err
);
1713 if (value
< min
|| value
> max
) {
1714 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
1715 name
? name
: "null", value
, min
, max
);
1719 env
->cpuid_version
&= ~0xff00f00;
1721 env
->cpuid_version
|= 0xf00 | ((value
- 0x0f) << 20);
1723 env
->cpuid_version
|= value
<< 8;
1727 static void x86_cpuid_version_get_model(Object
*obj
, Visitor
*v
,
1728 const char *name
, void *opaque
,
1731 X86CPU
*cpu
= X86_CPU(obj
);
1732 CPUX86State
*env
= &cpu
->env
;
1735 value
= (env
->cpuid_version
>> 4) & 0xf;
1736 value
|= ((env
->cpuid_version
>> 16) & 0xf) << 4;
1737 visit_type_int(v
, name
, &value
, errp
);
1740 static void x86_cpuid_version_set_model(Object
*obj
, Visitor
*v
,
1741 const char *name
, void *opaque
,
1744 X86CPU
*cpu
= X86_CPU(obj
);
1745 CPUX86State
*env
= &cpu
->env
;
1746 const int64_t min
= 0;
1747 const int64_t max
= 0xff;
1748 Error
*local_err
= NULL
;
1751 visit_type_int(v
, name
, &value
, &local_err
);
1753 error_propagate(errp
, local_err
);
1756 if (value
< min
|| value
> max
) {
1757 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
1758 name
? name
: "null", value
, min
, max
);
1762 env
->cpuid_version
&= ~0xf00f0;
1763 env
->cpuid_version
|= ((value
& 0xf) << 4) | ((value
>> 4) << 16);
1766 static void x86_cpuid_version_get_stepping(Object
*obj
, Visitor
*v
,
1767 const char *name
, void *opaque
,
1770 X86CPU
*cpu
= X86_CPU(obj
);
1771 CPUX86State
*env
= &cpu
->env
;
1774 value
= env
->cpuid_version
& 0xf;
1775 visit_type_int(v
, name
, &value
, errp
);
1778 static void x86_cpuid_version_set_stepping(Object
*obj
, Visitor
*v
,
1779 const char *name
, void *opaque
,
1782 X86CPU
*cpu
= X86_CPU(obj
);
1783 CPUX86State
*env
= &cpu
->env
;
1784 const int64_t min
= 0;
1785 const int64_t max
= 0xf;
1786 Error
*local_err
= NULL
;
1789 visit_type_int(v
, name
, &value
, &local_err
);
1791 error_propagate(errp
, local_err
);
1794 if (value
< min
|| value
> max
) {
1795 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
1796 name
? name
: "null", value
, min
, max
);
1800 env
->cpuid_version
&= ~0xf;
1801 env
->cpuid_version
|= value
& 0xf;
1804 static char *x86_cpuid_get_vendor(Object
*obj
, Error
**errp
)
1806 X86CPU
*cpu
= X86_CPU(obj
);
1807 CPUX86State
*env
= &cpu
->env
;
1810 value
= g_malloc(CPUID_VENDOR_SZ
+ 1);
1811 x86_cpu_vendor_words2str(value
, env
->cpuid_vendor1
, env
->cpuid_vendor2
,
1812 env
->cpuid_vendor3
);
1816 static void x86_cpuid_set_vendor(Object
*obj
, const char *value
,
1819 X86CPU
*cpu
= X86_CPU(obj
);
1820 CPUX86State
*env
= &cpu
->env
;
1823 if (strlen(value
) != CPUID_VENDOR_SZ
) {
1824 error_setg(errp
, QERR_PROPERTY_VALUE_BAD
, "", "vendor", value
);
1828 env
->cpuid_vendor1
= 0;
1829 env
->cpuid_vendor2
= 0;
1830 env
->cpuid_vendor3
= 0;
1831 for (i
= 0; i
< 4; i
++) {
1832 env
->cpuid_vendor1
|= ((uint8_t)value
[i
]) << (8 * i
);
1833 env
->cpuid_vendor2
|= ((uint8_t)value
[i
+ 4]) << (8 * i
);
1834 env
->cpuid_vendor3
|= ((uint8_t)value
[i
+ 8]) << (8 * i
);
1838 static char *x86_cpuid_get_model_id(Object
*obj
, Error
**errp
)
1840 X86CPU
*cpu
= X86_CPU(obj
);
1841 CPUX86State
*env
= &cpu
->env
;
1845 value
= g_malloc(48 + 1);
1846 for (i
= 0; i
< 48; i
++) {
1847 value
[i
] = env
->cpuid_model
[i
>> 2] >> (8 * (i
& 3));
1853 static void x86_cpuid_set_model_id(Object
*obj
, const char *model_id
,
1856 X86CPU
*cpu
= X86_CPU(obj
);
1857 CPUX86State
*env
= &cpu
->env
;
1860 if (model_id
== NULL
) {
1863 len
= strlen(model_id
);
1864 memset(env
->cpuid_model
, 0, 48);
1865 for (i
= 0; i
< 48; i
++) {
1869 c
= (uint8_t)model_id
[i
];
1871 env
->cpuid_model
[i
>> 2] |= c
<< (8 * (i
& 3));
1875 static void x86_cpuid_get_tsc_freq(Object
*obj
, Visitor
*v
, const char *name
,
1876 void *opaque
, Error
**errp
)
1878 X86CPU
*cpu
= X86_CPU(obj
);
1881 value
= cpu
->env
.tsc_khz
* 1000;
1882 visit_type_int(v
, name
, &value
, errp
);
1885 static void x86_cpuid_set_tsc_freq(Object
*obj
, Visitor
*v
, const char *name
,
1886 void *opaque
, Error
**errp
)
1888 X86CPU
*cpu
= X86_CPU(obj
);
1889 const int64_t min
= 0;
1890 const int64_t max
= INT64_MAX
;
1891 Error
*local_err
= NULL
;
1894 visit_type_int(v
, name
, &value
, &local_err
);
1896 error_propagate(errp
, local_err
);
1899 if (value
< min
|| value
> max
) {
1900 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
1901 name
? name
: "null", value
, min
, max
);
1905 cpu
->env
.tsc_khz
= cpu
->env
.user_tsc_khz
= value
/ 1000;
1908 /* Generic getter for "feature-words" and "filtered-features" properties */
1909 static void x86_cpu_get_feature_words(Object
*obj
, Visitor
*v
,
1910 const char *name
, void *opaque
,
1913 uint32_t *array
= (uint32_t *)opaque
;
1915 X86CPUFeatureWordInfo word_infos
[FEATURE_WORDS
] = { };
1916 X86CPUFeatureWordInfoList list_entries
[FEATURE_WORDS
] = { };
1917 X86CPUFeatureWordInfoList
*list
= NULL
;
1919 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
1920 FeatureWordInfo
*wi
= &feature_word_info
[w
];
1921 X86CPUFeatureWordInfo
*qwi
= &word_infos
[w
];
1922 qwi
->cpuid_input_eax
= wi
->cpuid_eax
;
1923 qwi
->has_cpuid_input_ecx
= wi
->cpuid_needs_ecx
;
1924 qwi
->cpuid_input_ecx
= wi
->cpuid_ecx
;
1925 qwi
->cpuid_register
= x86_reg_info_32
[wi
->cpuid_reg
].qapi_enum
;
1926 qwi
->features
= array
[w
];
1928 /* List will be in reverse order, but order shouldn't matter */
1929 list_entries
[w
].next
= list
;
1930 list_entries
[w
].value
= &word_infos
[w
];
1931 list
= &list_entries
[w
];
1934 visit_type_X86CPUFeatureWordInfoList(v
, "feature-words", &list
, errp
);
1937 static void x86_get_hv_spinlocks(Object
*obj
, Visitor
*v
, const char *name
,
1938 void *opaque
, Error
**errp
)
1940 X86CPU
*cpu
= X86_CPU(obj
);
1941 int64_t value
= cpu
->hyperv_spinlock_attempts
;
1943 visit_type_int(v
, name
, &value
, errp
);
1946 static void x86_set_hv_spinlocks(Object
*obj
, Visitor
*v
, const char *name
,
1947 void *opaque
, Error
**errp
)
1949 const int64_t min
= 0xFFF;
1950 const int64_t max
= UINT_MAX
;
1951 X86CPU
*cpu
= X86_CPU(obj
);
1955 visit_type_int(v
, name
, &value
, &err
);
1957 error_propagate(errp
, err
);
1961 if (value
< min
|| value
> max
) {
1962 error_setg(errp
, "Property %s.%s doesn't take value %" PRId64
1963 " (minimum: %" PRId64
", maximum: %" PRId64
")",
1964 object_get_typename(obj
), name
? name
: "null",
1968 cpu
->hyperv_spinlock_attempts
= value
;
1971 static PropertyInfo qdev_prop_spinlocks
= {
1973 .get
= x86_get_hv_spinlocks
,
1974 .set
= x86_set_hv_spinlocks
,
1977 /* Convert all '_' in a feature string option name to '-', to make feature
1978 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1980 static inline void feat2prop(char *s
)
1982 while ((s
= strchr(s
, '_'))) {
1987 /* Compatibily hack to maintain legacy +-feat semantic,
1988 * where +-feat overwrites any feature set by
1989 * feat=on|feat even if the later is parsed after +-feat
1990 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1992 static FeatureWordArray plus_features
= { 0 };
1993 static FeatureWordArray minus_features
= { 0 };
1995 /* Parse "+feature,-feature,feature=foo" CPU feature string
1997 static void x86_cpu_parse_featurestr(const char *typename
, char *features
,
2000 char *featurestr
; /* Single 'key=value" string being parsed */
2001 Error
*local_err
= NULL
;
2002 static bool cpu_globals_initialized
;
2004 if (cpu_globals_initialized
) {
2007 cpu_globals_initialized
= true;
2013 for (featurestr
= strtok(features
, ",");
2014 featurestr
&& !local_err
;
2015 featurestr
= strtok(NULL
, ",")) {
2017 const char *val
= NULL
;
2020 GlobalProperty
*prop
;
2022 /* Compatibility syntax: */
2023 if (featurestr
[0] == '+') {
2024 add_flagname_to_bitmaps(featurestr
+ 1, plus_features
, &local_err
);
2026 } else if (featurestr
[0] == '-') {
2027 add_flagname_to_bitmaps(featurestr
+ 1, minus_features
, &local_err
);
2031 eq
= strchr(featurestr
, '=');
2039 feat2prop(featurestr
);
2043 if (!strcmp(name
, "tsc-freq")) {
2047 tsc_freq
= qemu_strtosz_suffix_unit(val
, &err
,
2048 QEMU_STRTOSZ_DEFSUFFIX_B
, 1000);
2049 if (tsc_freq
< 0 || *err
) {
2050 error_setg(errp
, "bad numerical value %s", val
);
2053 snprintf(num
, sizeof(num
), "%" PRId64
, tsc_freq
);
2055 name
= "tsc-frequency";
2058 prop
= g_new0(typeof(*prop
), 1);
2059 prop
->driver
= typename
;
2060 prop
->property
= g_strdup(name
);
2061 prop
->value
= g_strdup(val
);
2062 prop
->errp
= &error_fatal
;
2063 qdev_prop_register_global(prop
);
2067 error_propagate(errp
, local_err
);
2071 /* Print all cpuid feature names in featureset
2073 static void listflags(FILE *f
, fprintf_function print
, const char **featureset
)
2078 for (bit
= 0; bit
< 32; bit
++) {
2079 if (featureset
[bit
]) {
2080 print(f
, "%s%s", first
? "" : " ", featureset
[bit
]);
2086 /* generate CPU information. */
2087 void x86_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
2089 X86CPUDefinition
*def
;
2093 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); i
++) {
2094 def
= &builtin_x86_defs
[i
];
2095 snprintf(buf
, sizeof(buf
), "%s", def
->name
);
2096 (*cpu_fprintf
)(f
, "x86 %16s %-48s\n", buf
, def
->model_id
);
2099 (*cpu_fprintf
)(f
, "x86 %16s %-48s\n", "host",
2100 "KVM processor with all supported host features "
2101 "(only available in KVM mode)");
2104 (*cpu_fprintf
)(f
, "\nRecognized CPUID flags:\n");
2105 for (i
= 0; i
< ARRAY_SIZE(feature_word_info
); i
++) {
2106 FeatureWordInfo
*fw
= &feature_word_info
[i
];
2108 (*cpu_fprintf
)(f
, " ");
2109 listflags(f
, cpu_fprintf
, fw
->feat_names
);
2110 (*cpu_fprintf
)(f
, "\n");
2114 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
2116 CpuDefinitionInfoList
*cpu_list
= NULL
;
2117 X86CPUDefinition
*def
;
2120 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); i
++) {
2121 CpuDefinitionInfoList
*entry
;
2122 CpuDefinitionInfo
*info
;
2124 def
= &builtin_x86_defs
[i
];
2125 info
= g_malloc0(sizeof(*info
));
2126 info
->name
= g_strdup(def
->name
);
2128 entry
= g_malloc0(sizeof(*entry
));
2129 entry
->value
= info
;
2130 entry
->next
= cpu_list
;
2137 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w
,
2138 bool migratable_only
)
2140 FeatureWordInfo
*wi
= &feature_word_info
[w
];
2143 if (kvm_enabled()) {
2144 r
= kvm_arch_get_supported_cpuid(kvm_state
, wi
->cpuid_eax
,
2147 } else if (tcg_enabled()) {
2148 r
= wi
->tcg_features
;
2152 if (migratable_only
) {
2153 r
&= x86_cpu_get_migratable_flags(w
);
2159 * Filters CPU feature words based on host availability of each feature.
2161 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2163 static int x86_cpu_filter_features(X86CPU
*cpu
)
2165 CPUX86State
*env
= &cpu
->env
;
2169 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
2170 uint32_t host_feat
=
2171 x86_cpu_get_supported_feature_word(w
, cpu
->migratable
);
2172 uint32_t requested_features
= env
->features
[w
];
2173 env
->features
[w
] &= host_feat
;
2174 cpu
->filtered_features
[w
] = requested_features
& ~env
->features
[w
];
2175 if (cpu
->filtered_features
[w
]) {
2176 if (cpu
->check_cpuid
|| cpu
->enforce_cpuid
) {
2177 report_unavailable_features(w
, cpu
->filtered_features
[w
]);
2186 static void x86_cpu_apply_props(X86CPU
*cpu
, PropValue
*props
)
2189 for (pv
= props
; pv
->prop
; pv
++) {
2193 object_property_parse(OBJECT(cpu
), pv
->value
, pv
->prop
,
2198 /* Load data from X86CPUDefinition
2200 static void x86_cpu_load_def(X86CPU
*cpu
, X86CPUDefinition
*def
, Error
**errp
)
2202 CPUX86State
*env
= &cpu
->env
;
2204 char host_vendor
[CPUID_VENDOR_SZ
+ 1];
2207 object_property_set_int(OBJECT(cpu
), def
->level
, "level", errp
);
2208 object_property_set_int(OBJECT(cpu
), def
->family
, "family", errp
);
2209 object_property_set_int(OBJECT(cpu
), def
->model
, "model", errp
);
2210 object_property_set_int(OBJECT(cpu
), def
->stepping
, "stepping", errp
);
2211 object_property_set_int(OBJECT(cpu
), def
->xlevel
, "xlevel", errp
);
2212 object_property_set_int(OBJECT(cpu
), def
->xlevel2
, "xlevel2", errp
);
2213 object_property_set_str(OBJECT(cpu
), def
->model_id
, "model-id", errp
);
2214 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
2215 env
->features
[w
] = def
->features
[w
];
2218 /* Special cases not set in the X86CPUDefinition structs: */
2219 if (kvm_enabled()) {
2220 if (!kvm_irqchip_in_kernel()) {
2221 x86_cpu_change_kvm_default("x2apic", "off");
2224 x86_cpu_apply_props(cpu
, kvm_default_props
);
2227 env
->features
[FEAT_1_ECX
] |= CPUID_EXT_HYPERVISOR
;
2229 /* sysenter isn't supported in compatibility mode on AMD,
2230 * syscall isn't supported in compatibility mode on Intel.
2231 * Normally we advertise the actual CPU vendor, but you can
2232 * override this using the 'vendor' property if you want to use
2233 * KVM's sysenter/syscall emulation in compatibility mode and
2234 * when doing cross vendor migration
2236 vendor
= def
->vendor
;
2237 if (kvm_enabled()) {
2238 uint32_t ebx
= 0, ecx
= 0, edx
= 0;
2239 host_cpuid(0, 0, NULL
, &ebx
, &ecx
, &edx
);
2240 x86_cpu_vendor_words2str(host_vendor
, ebx
, edx
, ecx
);
2241 vendor
= host_vendor
;
2244 object_property_set_str(OBJECT(cpu
), vendor
, "vendor", errp
);
2248 X86CPU
*cpu_x86_init(const char *cpu_model
)
2250 return X86_CPU(cpu_generic_init(TYPE_X86_CPU
, cpu_model
));
2253 static void x86_cpu_cpudef_class_init(ObjectClass
*oc
, void *data
)
2255 X86CPUDefinition
*cpudef
= data
;
2256 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
2258 xcc
->cpu_def
= cpudef
;
2261 static void x86_register_cpudef_type(X86CPUDefinition
*def
)
2263 char *typename
= x86_cpu_type_name(def
->name
);
2266 .parent
= TYPE_X86_CPU
,
2267 .class_init
= x86_cpu_cpudef_class_init
,
2275 #if !defined(CONFIG_USER_ONLY)
2277 void cpu_clear_apic_feature(CPUX86State
*env
)
2279 env
->features
[FEAT_1_EDX
] &= ~CPUID_APIC
;
2282 #endif /* !CONFIG_USER_ONLY */
2284 void cpu_x86_cpuid(CPUX86State
*env
, uint32_t index
, uint32_t count
,
2285 uint32_t *eax
, uint32_t *ebx
,
2286 uint32_t *ecx
, uint32_t *edx
)
2288 X86CPU
*cpu
= x86_env_get_cpu(env
);
2289 CPUState
*cs
= CPU(cpu
);
2290 uint32_t pkg_offset
;
2292 /* test if maximum index reached */
2293 if (index
& 0x80000000) {
2294 if (index
> env
->cpuid_xlevel
) {
2295 if (env
->cpuid_xlevel2
> 0) {
2296 /* Handle the Centaur's CPUID instruction. */
2297 if (index
> env
->cpuid_xlevel2
) {
2298 index
= env
->cpuid_xlevel2
;
2299 } else if (index
< 0xC0000000) {
2300 index
= env
->cpuid_xlevel
;
2303 /* Intel documentation states that invalid EAX input will
2304 * return the same information as EAX=cpuid_level
2305 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2307 index
= env
->cpuid_level
;
2311 if (index
> env
->cpuid_level
)
2312 index
= env
->cpuid_level
;
2317 *eax
= env
->cpuid_level
;
2318 *ebx
= env
->cpuid_vendor1
;
2319 *edx
= env
->cpuid_vendor2
;
2320 *ecx
= env
->cpuid_vendor3
;
2323 *eax
= env
->cpuid_version
;
2324 *ebx
= (cpu
->apic_id
<< 24) |
2325 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2326 *ecx
= env
->features
[FEAT_1_ECX
];
2327 if ((*ecx
& CPUID_EXT_XSAVE
) && (env
->cr
[4] & CR4_OSXSAVE_MASK
)) {
2328 *ecx
|= CPUID_EXT_OSXSAVE
;
2330 *edx
= env
->features
[FEAT_1_EDX
];
2331 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
2332 *ebx
|= (cs
->nr_cores
* cs
->nr_threads
) << 16;
2337 /* cache info: needed for Pentium Pro compatibility */
2338 if (cpu
->cache_info_passthrough
) {
2339 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
2342 *eax
= 1; /* Number of CPUID[EAX=2] calls required */
2344 if (!cpu
->enable_l3_cache
) {
2347 *ecx
= L3_N_DESCRIPTOR
;
2349 *edx
= (L1D_DESCRIPTOR
<< 16) | \
2350 (L1I_DESCRIPTOR
<< 8) | \
2354 /* cache info: needed for Core compatibility */
2355 if (cpu
->cache_info_passthrough
) {
2356 host_cpuid(index
, count
, eax
, ebx
, ecx
, edx
);
2357 *eax
&= ~0xFC000000;
2361 case 0: /* L1 dcache info */
2362 *eax
|= CPUID_4_TYPE_DCACHE
| \
2363 CPUID_4_LEVEL(1) | \
2364 CPUID_4_SELF_INIT_LEVEL
;
2365 *ebx
= (L1D_LINE_SIZE
- 1) | \
2366 ((L1D_PARTITIONS
- 1) << 12) | \
2367 ((L1D_ASSOCIATIVITY
- 1) << 22);
2368 *ecx
= L1D_SETS
- 1;
2369 *edx
= CPUID_4_NO_INVD_SHARING
;
2371 case 1: /* L1 icache info */
2372 *eax
|= CPUID_4_TYPE_ICACHE
| \
2373 CPUID_4_LEVEL(1) | \
2374 CPUID_4_SELF_INIT_LEVEL
;
2375 *ebx
= (L1I_LINE_SIZE
- 1) | \
2376 ((L1I_PARTITIONS
- 1) << 12) | \
2377 ((L1I_ASSOCIATIVITY
- 1) << 22);
2378 *ecx
= L1I_SETS
- 1;
2379 *edx
= CPUID_4_NO_INVD_SHARING
;
2381 case 2: /* L2 cache info */
2382 *eax
|= CPUID_4_TYPE_UNIFIED
| \
2383 CPUID_4_LEVEL(2) | \
2384 CPUID_4_SELF_INIT_LEVEL
;
2385 if (cs
->nr_threads
> 1) {
2386 *eax
|= (cs
->nr_threads
- 1) << 14;
2388 *ebx
= (L2_LINE_SIZE
- 1) | \
2389 ((L2_PARTITIONS
- 1) << 12) | \
2390 ((L2_ASSOCIATIVITY
- 1) << 22);
2392 *edx
= CPUID_4_NO_INVD_SHARING
;
2394 case 3: /* L3 cache info */
2395 if (!cpu
->enable_l3_cache
) {
2402 *eax
|= CPUID_4_TYPE_UNIFIED
| \
2403 CPUID_4_LEVEL(3) | \
2404 CPUID_4_SELF_INIT_LEVEL
;
2405 pkg_offset
= apicid_pkg_offset(cs
->nr_cores
, cs
->nr_threads
);
2406 *eax
|= ((1 << pkg_offset
) - 1) << 14;
2407 *ebx
= (L3_N_LINE_SIZE
- 1) | \
2408 ((L3_N_PARTITIONS
- 1) << 12) | \
2409 ((L3_N_ASSOCIATIVITY
- 1) << 22);
2410 *ecx
= L3_N_SETS
- 1;
2411 *edx
= CPUID_4_INCLUSIVE
| CPUID_4_COMPLEX_IDX
;
2413 default: /* end of info */
2422 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2423 if ((*eax
& 31) && cs
->nr_cores
> 1) {
2424 *eax
|= (cs
->nr_cores
- 1) << 26;
2428 /* mwait info: needed for Core compatibility */
2429 *eax
= 0; /* Smallest monitor-line size in bytes */
2430 *ebx
= 0; /* Largest monitor-line size in bytes */
2431 *ecx
= CPUID_MWAIT_EMX
| CPUID_MWAIT_IBE
;
2435 /* Thermal and Power Leaf */
2436 *eax
= env
->features
[FEAT_6_EAX
];
2442 /* Structured Extended Feature Flags Enumeration Leaf */
2444 *eax
= 0; /* Maximum ECX value for sub-leaves */
2445 *ebx
= env
->features
[FEAT_7_0_EBX
]; /* Feature flags */
2446 *ecx
= env
->features
[FEAT_7_0_ECX
]; /* Feature flags */
2447 if ((*ecx
& CPUID_7_0_ECX_PKU
) && env
->cr
[4] & CR4_PKE_MASK
) {
2448 *ecx
|= CPUID_7_0_ECX_OSPKE
;
2450 *edx
= 0; /* Reserved */
2459 /* Direct Cache Access Information Leaf */
2460 *eax
= 0; /* Bits 0-31 in DCA_CAP MSR */
2466 /* Architectural Performance Monitoring Leaf */
2467 if (kvm_enabled() && cpu
->enable_pmu
) {
2468 KVMState
*s
= cs
->kvm_state
;
2470 *eax
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EAX
);
2471 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EBX
);
2472 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_ECX
);
2473 *edx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EDX
);
2482 /* Extended Topology Enumeration Leaf */
2483 if (!cpu
->enable_cpuid_0xb
) {
2484 *eax
= *ebx
= *ecx
= *edx
= 0;
2488 *ecx
= count
& 0xff;
2489 *edx
= cpu
->apic_id
;
2493 *eax
= apicid_core_offset(smp_cores
, smp_threads
);
2495 *ecx
|= CPUID_TOPOLOGY_LEVEL_SMT
;
2498 *eax
= apicid_pkg_offset(smp_cores
, smp_threads
);
2499 *ebx
= smp_cores
* smp_threads
;
2500 *ecx
|= CPUID_TOPOLOGY_LEVEL_CORE
;
2505 *ecx
|= CPUID_TOPOLOGY_LEVEL_INVALID
;
2508 assert(!(*eax
& ~0x1f));
2509 *ebx
&= 0xffff; /* The count doesn't need to be reliable. */
2512 KVMState
*s
= cs
->kvm_state
;
2516 /* Processor Extended State */
2521 if (!(env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
)) {
2524 if (kvm_enabled()) {
2525 ena_mask
= kvm_arch_get_supported_cpuid(s
, 0xd, 0, R_EDX
);
2527 ena_mask
|= kvm_arch_get_supported_cpuid(s
, 0xd, 0, R_EAX
);
2534 for (i
= 2; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
2535 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
2536 if ((env
->features
[esa
->feature
] & esa
->bits
) == esa
->bits
2537 && ((ena_mask
>> i
) & 1) != 0) {
2541 *edx
|= 1u << (i
- 32);
2543 *ecx
= MAX(*ecx
, esa
->offset
+ esa
->size
);
2546 *eax
|= ena_mask
& (XSTATE_FP_MASK
| XSTATE_SSE_MASK
);
2548 } else if (count
== 1) {
2549 *eax
= env
->features
[FEAT_XSAVE
];
2550 } else if (count
< ARRAY_SIZE(x86_ext_save_areas
)) {
2551 const ExtSaveArea
*esa
= &x86_ext_save_areas
[count
];
2552 if ((env
->features
[esa
->feature
] & esa
->bits
) == esa
->bits
2553 && ((ena_mask
>> count
) & 1) != 0) {
2561 *eax
= env
->cpuid_xlevel
;
2562 *ebx
= env
->cpuid_vendor1
;
2563 *edx
= env
->cpuid_vendor2
;
2564 *ecx
= env
->cpuid_vendor3
;
2567 *eax
= env
->cpuid_version
;
2569 *ecx
= env
->features
[FEAT_8000_0001_ECX
];
2570 *edx
= env
->features
[FEAT_8000_0001_EDX
];
2572 /* The Linux kernel checks for the CMPLegacy bit and
2573 * discards multiple thread information if it is set.
2574 * So don't set it here for Intel to make Linux guests happy.
2576 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
2577 if (env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
||
2578 env
->cpuid_vendor2
!= CPUID_VENDOR_INTEL_2
||
2579 env
->cpuid_vendor3
!= CPUID_VENDOR_INTEL_3
) {
2580 *ecx
|= 1 << 1; /* CmpLegacy bit */
2587 *eax
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
2588 *ebx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
2589 *ecx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
2590 *edx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
2593 /* cache info (L1 cache) */
2594 if (cpu
->cache_info_passthrough
) {
2595 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
2598 *eax
= (L1_DTLB_2M_ASSOC
<< 24) | (L1_DTLB_2M_ENTRIES
<< 16) | \
2599 (L1_ITLB_2M_ASSOC
<< 8) | (L1_ITLB_2M_ENTRIES
);
2600 *ebx
= (L1_DTLB_4K_ASSOC
<< 24) | (L1_DTLB_4K_ENTRIES
<< 16) | \
2601 (L1_ITLB_4K_ASSOC
<< 8) | (L1_ITLB_4K_ENTRIES
);
2602 *ecx
= (L1D_SIZE_KB_AMD
<< 24) | (L1D_ASSOCIATIVITY_AMD
<< 16) | \
2603 (L1D_LINES_PER_TAG
<< 8) | (L1D_LINE_SIZE
);
2604 *edx
= (L1I_SIZE_KB_AMD
<< 24) | (L1I_ASSOCIATIVITY_AMD
<< 16) | \
2605 (L1I_LINES_PER_TAG
<< 8) | (L1I_LINE_SIZE
);
2608 /* cache info (L2 cache) */
2609 if (cpu
->cache_info_passthrough
) {
2610 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
2613 *eax
= (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC
) << 28) | \
2614 (L2_DTLB_2M_ENTRIES
<< 16) | \
2615 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC
) << 12) | \
2616 (L2_ITLB_2M_ENTRIES
);
2617 *ebx
= (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC
) << 28) | \
2618 (L2_DTLB_4K_ENTRIES
<< 16) | \
2619 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC
) << 12) | \
2620 (L2_ITLB_4K_ENTRIES
);
2621 *ecx
= (L2_SIZE_KB_AMD
<< 16) | \
2622 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY
) << 12) | \
2623 (L2_LINES_PER_TAG
<< 8) | (L2_LINE_SIZE
);
2624 if (!cpu
->enable_l3_cache
) {
2625 *edx
= ((L3_SIZE_KB
/ 512) << 18) | \
2626 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY
) << 12) | \
2627 (L3_LINES_PER_TAG
<< 8) | (L3_LINE_SIZE
);
2629 *edx
= ((L3_N_SIZE_KB_AMD
/ 512) << 18) | \
2630 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY
) << 12) | \
2631 (L3_N_LINES_PER_TAG
<< 8) | (L3_N_LINE_SIZE
);
2638 *edx
= env
->features
[FEAT_8000_0007_EDX
];
2641 /* virtual & phys address size in low 2 bytes. */
2642 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
) {
2643 /* 64 bit processor, 48 bits virtual, configurable
2646 *eax
= 0x00003000 + cpu
->phys_bits
;
2648 *eax
= cpu
->phys_bits
;
2653 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
2654 *ecx
|= (cs
->nr_cores
* cs
->nr_threads
) - 1;
2658 if (env
->features
[FEAT_8000_0001_ECX
] & CPUID_EXT3_SVM
) {
2659 *eax
= 0x00000001; /* SVM Revision */
2660 *ebx
= 0x00000010; /* nr of ASIDs */
2662 *edx
= env
->features
[FEAT_SVM
]; /* optional features */
2671 *eax
= env
->cpuid_xlevel2
;
2677 /* Support for VIA CPU's CPUID instruction */
2678 *eax
= env
->cpuid_version
;
2681 *edx
= env
->features
[FEAT_C000_0001_EDX
];
2686 /* Reserved for the future, and now filled with zero */
2693 /* reserved values: zero */
2702 /* CPUClass::reset() */
2703 static void x86_cpu_reset(CPUState
*s
)
2705 X86CPU
*cpu
= X86_CPU(s
);
2706 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(cpu
);
2707 CPUX86State
*env
= &cpu
->env
;
2712 xcc
->parent_reset(s
);
2714 memset(env
, 0, offsetof(CPUX86State
, cpuid_level
));
2718 env
->old_exception
= -1;
2720 /* init to reset state */
2722 env
->hflags2
|= HF2_GIF_MASK
;
2724 cpu_x86_update_cr0(env
, 0x60000010);
2725 env
->a20_mask
= ~0x0;
2726 env
->smbase
= 0x30000;
2728 env
->idt
.limit
= 0xffff;
2729 env
->gdt
.limit
= 0xffff;
2730 env
->ldt
.limit
= 0xffff;
2731 env
->ldt
.flags
= DESC_P_MASK
| (2 << DESC_TYPE_SHIFT
);
2732 env
->tr
.limit
= 0xffff;
2733 env
->tr
.flags
= DESC_P_MASK
| (11 << DESC_TYPE_SHIFT
);
2735 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff,
2736 DESC_P_MASK
| DESC_S_MASK
| DESC_CS_MASK
|
2737 DESC_R_MASK
| DESC_A_MASK
);
2738 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff,
2739 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2741 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff,
2742 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2744 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff,
2745 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2747 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff,
2748 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2750 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff,
2751 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2755 env
->regs
[R_EDX
] = env
->cpuid_version
;
2760 for (i
= 0; i
< 8; i
++) {
2763 cpu_set_fpuc(env
, 0x37f);
2765 env
->mxcsr
= 0x1f80;
2766 /* All units are in INIT state. */
2769 env
->pat
= 0x0007040600070406ULL
;
2770 env
->msr_ia32_misc_enable
= MSR_IA32_MISC_ENABLE_DEFAULT
;
2772 memset(env
->dr
, 0, sizeof(env
->dr
));
2773 env
->dr
[6] = DR6_FIXED_1
;
2774 env
->dr
[7] = DR7_FIXED_1
;
2775 cpu_breakpoint_remove_all(s
, BP_CPU
);
2776 cpu_watchpoint_remove_all(s
, BP_CPU
);
2779 xcr0
= XSTATE_FP_MASK
;
2781 #ifdef CONFIG_USER_ONLY
2782 /* Enable all the features for user-mode. */
2783 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
2784 xcr0
|= XSTATE_SSE_MASK
;
2786 for (i
= 2; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
2787 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
2788 if ((env
->features
[esa
->feature
] & esa
->bits
) == esa
->bits
) {
2793 if (env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
) {
2794 cr4
|= CR4_OSFXSR_MASK
| CR4_OSXSAVE_MASK
;
2796 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_FSGSBASE
) {
2797 cr4
|= CR4_FSGSBASE_MASK
;
2802 cpu_x86_update_cr4(env
, cr4
);
2805 * SDM 11.11.5 requires:
2806 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2807 * - IA32_MTRR_PHYSMASKn.V = 0
2808 * All other bits are undefined. For simplification, zero it all.
2810 env
->mtrr_deftype
= 0;
2811 memset(env
->mtrr_var
, 0, sizeof(env
->mtrr_var
));
2812 memset(env
->mtrr_fixed
, 0, sizeof(env
->mtrr_fixed
));
2814 #if !defined(CONFIG_USER_ONLY)
2815 /* We hard-wire the BSP to the first CPU. */
2816 apic_designate_bsp(cpu
->apic_state
, s
->cpu_index
== 0);
2818 s
->halted
= !cpu_is_bsp(cpu
);
2820 if (kvm_enabled()) {
2821 kvm_arch_reset_vcpu(cpu
);
2826 #ifndef CONFIG_USER_ONLY
2827 bool cpu_is_bsp(X86CPU
*cpu
)
2829 return cpu_get_apic_base(cpu
->apic_state
) & MSR_IA32_APICBASE_BSP
;
2832 /* TODO: remove me, when reset over QOM tree is implemented */
2833 static void x86_cpu_machine_reset_cb(void *opaque
)
2835 X86CPU
*cpu
= opaque
;
2836 cpu_reset(CPU(cpu
));
2840 static void mce_init(X86CPU
*cpu
)
2842 CPUX86State
*cenv
= &cpu
->env
;
2845 if (((cenv
->cpuid_version
>> 8) & 0xf) >= 6
2846 && (cenv
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
2847 (CPUID_MCE
| CPUID_MCA
)) {
2848 cenv
->mcg_cap
= MCE_CAP_DEF
| MCE_BANKS_DEF
|
2849 (cpu
->enable_lmce
? MCG_LMCE_P
: 0);
2850 cenv
->mcg_ctl
= ~(uint64_t)0;
2851 for (bank
= 0; bank
< MCE_BANKS_DEF
; bank
++) {
2852 cenv
->mce_banks
[bank
* 4] = ~(uint64_t)0;
2857 #ifndef CONFIG_USER_ONLY
2858 static void x86_cpu_apic_create(X86CPU
*cpu
, Error
**errp
)
2860 APICCommonState
*apic
;
2861 const char *apic_type
= "apic";
2863 if (kvm_apic_in_kernel()) {
2864 apic_type
= "kvm-apic";
2865 } else if (xen_enabled()) {
2866 apic_type
= "xen-apic";
2869 cpu
->apic_state
= DEVICE(object_new(apic_type
));
2871 object_property_add_child(OBJECT(cpu
), "lapic",
2872 OBJECT(cpu
->apic_state
), &error_abort
);
2873 object_unref(OBJECT(cpu
->apic_state
));
2875 qdev_prop_set_uint8(cpu
->apic_state
, "id", cpu
->apic_id
);
2876 /* TODO: convert to link<> */
2877 apic
= APIC_COMMON(cpu
->apic_state
);
2879 apic
->apicbase
= APIC_DEFAULT_ADDRESS
| MSR_IA32_APICBASE_ENABLE
;
2882 static void x86_cpu_apic_realize(X86CPU
*cpu
, Error
**errp
)
2884 APICCommonState
*apic
;
2885 static bool apic_mmio_map_once
;
2887 if (cpu
->apic_state
== NULL
) {
2890 object_property_set_bool(OBJECT(cpu
->apic_state
), true, "realized",
2893 /* Map APIC MMIO area */
2894 apic
= APIC_COMMON(cpu
->apic_state
);
2895 if (!apic_mmio_map_once
) {
2896 memory_region_add_subregion_overlap(get_system_memory(),
2898 MSR_IA32_APICBASE_BASE
,
2901 apic_mmio_map_once
= true;
2905 static void x86_cpu_machine_done(Notifier
*n
, void *unused
)
2907 X86CPU
*cpu
= container_of(n
, X86CPU
, machine_done
);
2908 MemoryRegion
*smram
=
2909 (MemoryRegion
*) object_resolve_path("/machine/smram", NULL
);
2912 cpu
->smram
= g_new(MemoryRegion
, 1);
2913 memory_region_init_alias(cpu
->smram
, OBJECT(cpu
), "smram",
2914 smram
, 0, 1ull << 32);
2915 memory_region_set_enabled(cpu
->smram
, false);
2916 memory_region_add_subregion_overlap(cpu
->cpu_as_root
, 0, cpu
->smram
, 1);
2920 static void x86_cpu_apic_realize(X86CPU
*cpu
, Error
**errp
)
2925 /* Note: Only safe for use on x86(-64) hosts */
2926 static uint32_t x86_host_phys_bits(void)
2929 uint32_t host_phys_bits
;
2931 host_cpuid(0x80000000, 0, &eax
, NULL
, NULL
, NULL
);
2932 if (eax
>= 0x80000008) {
2933 host_cpuid(0x80000008, 0, &eax
, NULL
, NULL
, NULL
);
2934 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2935 * at 23:16 that can specify a maximum physical address bits for
2936 * the guest that can override this value; but I've not seen
2937 * anything with that set.
2939 host_phys_bits
= eax
& 0xff;
2941 /* It's an odd 64 bit machine that doesn't have the leaf for
2942 * physical address bits; fall back to 36 that's most older
2945 host_phys_bits
= 36;
2948 return host_phys_bits
;
2951 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2952 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2953 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2954 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2955 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2956 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2957 static void x86_cpu_realizefn(DeviceState
*dev
, Error
**errp
)
2959 CPUState
*cs
= CPU(dev
);
2960 X86CPU
*cpu
= X86_CPU(dev
);
2961 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(dev
);
2962 CPUX86State
*env
= &cpu
->env
;
2963 Error
*local_err
= NULL
;
2964 static bool ht_warned
;
2967 if (xcc
->kvm_required
&& !kvm_enabled()) {
2968 char *name
= x86_cpu_class_get_model_name(xcc
);
2969 error_setg(&local_err
, "CPU model '%s' requires KVM", name
);
2974 if (cpu
->apic_id
== UNASSIGNED_APIC_ID
) {
2975 error_setg(errp
, "apic-id property was not initialized properly");
2979 /*TODO: cpu->host_features incorrectly overwrites features
2980 * set using "feat=on|off". Once we fix this, we can convert
2981 * plus_features & minus_features to global properties
2982 * inside x86_cpu_parse_featurestr() too.
2984 if (cpu
->host_features
) {
2985 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
2987 x86_cpu_get_supported_feature_word(w
, cpu
->migratable
);
2991 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
2992 cpu
->env
.features
[w
] |= plus_features
[w
];
2993 cpu
->env
.features
[w
] &= ~minus_features
[w
];
2996 if (env
->features
[FEAT_7_0_EBX
] && env
->cpuid_level
< 7) {
2997 env
->cpuid_level
= 7;
3000 if (x86_cpu_filter_features(cpu
) && cpu
->enforce_cpuid
) {
3001 error_setg(&local_err
,
3003 "Host doesn't support requested features" :
3004 "TCG doesn't support requested features");
3008 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3011 if (IS_AMD_CPU(env
)) {
3012 env
->features
[FEAT_8000_0001_EDX
] &= ~CPUID_EXT2_AMD_ALIASES
;
3013 env
->features
[FEAT_8000_0001_EDX
] |= (env
->features
[FEAT_1_EDX
]
3014 & CPUID_EXT2_AMD_ALIASES
);
3017 /* For 64bit systems think about the number of physical bits to present.
3018 * ideally this should be the same as the host; anything other than matching
3019 * the host can cause incorrect guest behaviour.
3020 * QEMU used to pick the magic value of 40 bits that corresponds to
3021 * consumer AMD devices but nothing else.
3023 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
) {
3024 if (kvm_enabled()) {
3025 uint32_t host_phys_bits
= x86_host_phys_bits();
3028 if (cpu
->host_phys_bits
) {
3029 /* The user asked for us to use the host physical bits */
3030 cpu
->phys_bits
= host_phys_bits
;
3033 /* Print a warning if the user set it to a value that's not the
3036 if (cpu
->phys_bits
!= host_phys_bits
&& cpu
->phys_bits
!= 0 &&
3038 error_report("Warning: Host physical bits (%u)"
3039 " does not match phys-bits property (%u)",
3040 host_phys_bits
, cpu
->phys_bits
);
3044 if (cpu
->phys_bits
&&
3045 (cpu
->phys_bits
> TARGET_PHYS_ADDR_SPACE_BITS
||
3046 cpu
->phys_bits
< 32)) {
3047 error_setg(errp
, "phys-bits should be between 32 and %u "
3049 TARGET_PHYS_ADDR_SPACE_BITS
, cpu
->phys_bits
);
3053 if (cpu
->phys_bits
&& cpu
->phys_bits
!= TCG_PHYS_ADDR_BITS
) {
3054 error_setg(errp
, "TCG only supports phys-bits=%u",
3055 TCG_PHYS_ADDR_BITS
);
3059 /* 0 means it was not explicitly set by the user (or by machine
3060 * compat_props or by the host code above). In this case, the default
3061 * is the value used by TCG (40).
3063 if (cpu
->phys_bits
== 0) {
3064 cpu
->phys_bits
= TCG_PHYS_ADDR_BITS
;
3067 /* For 32 bit systems don't use the user set value, but keep
3068 * phys_bits consistent with what we tell the guest.
3070 if (cpu
->phys_bits
!= 0) {
3071 error_setg(errp
, "phys-bits is not user-configurable in 32 bit");
3075 if (env
->features
[FEAT_1_EDX
] & CPUID_PSE36
) {
3076 cpu
->phys_bits
= 36;
3078 cpu
->phys_bits
= 32;
3081 cpu_exec_init(cs
, &error_abort
);
3083 if (tcg_enabled()) {
3087 #ifndef CONFIG_USER_ONLY
3088 qemu_register_reset(x86_cpu_machine_reset_cb
, cpu
);
3090 if (cpu
->env
.features
[FEAT_1_EDX
] & CPUID_APIC
|| smp_cpus
> 1) {
3091 x86_cpu_apic_create(cpu
, &local_err
);
3092 if (local_err
!= NULL
) {
3100 #ifndef CONFIG_USER_ONLY
3101 if (tcg_enabled()) {
3102 AddressSpace
*newas
= g_new(AddressSpace
, 1);
3104 cpu
->cpu_as_mem
= g_new(MemoryRegion
, 1);
3105 cpu
->cpu_as_root
= g_new(MemoryRegion
, 1);
3107 /* Outer container... */
3108 memory_region_init(cpu
->cpu_as_root
, OBJECT(cpu
), "memory", ~0ull);
3109 memory_region_set_enabled(cpu
->cpu_as_root
, true);
3111 /* ... with two regions inside: normal system memory with low
3114 memory_region_init_alias(cpu
->cpu_as_mem
, OBJECT(cpu
), "memory",
3115 get_system_memory(), 0, ~0ull);
3116 memory_region_add_subregion_overlap(cpu
->cpu_as_root
, 0, cpu
->cpu_as_mem
, 0);
3117 memory_region_set_enabled(cpu
->cpu_as_mem
, true);
3118 address_space_init(newas
, cpu
->cpu_as_root
, "CPU");
3120 cpu_address_space_init(cs
, newas
, 0);
3122 /* ... SMRAM with higher priority, linked from /machine/smram. */
3123 cpu
->machine_done
.notify
= x86_cpu_machine_done
;
3124 qemu_add_machine_init_done_notifier(&cpu
->machine_done
);
3130 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3131 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3132 * based on inputs (sockets,cores,threads), it is still better to gives
3135 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3136 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3138 if (!IS_INTEL_CPU(env
) && cs
->nr_threads
> 1 && !ht_warned
) {
3139 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3140 " -smp options properly.");
3144 x86_cpu_apic_realize(cpu
, &local_err
);
3145 if (local_err
!= NULL
) {
3150 xcc
->parent_realize(dev
, &local_err
);
3153 if (local_err
!= NULL
) {
3154 error_propagate(errp
, local_err
);
3159 static void x86_cpu_unrealizefn(DeviceState
*dev
, Error
**errp
)
3161 X86CPU
*cpu
= X86_CPU(dev
);
3163 #ifndef CONFIG_USER_ONLY
3164 cpu_remove_sync(CPU(dev
));
3165 qemu_unregister_reset(x86_cpu_machine_reset_cb
, dev
);
3168 if (cpu
->apic_state
) {
3169 object_unparent(OBJECT(cpu
->apic_state
));
3170 cpu
->apic_state
= NULL
;
3174 typedef struct BitProperty
{
3179 static void x86_cpu_get_bit_prop(Object
*obj
, Visitor
*v
, const char *name
,
3180 void *opaque
, Error
**errp
)
3182 BitProperty
*fp
= opaque
;
3183 bool value
= (*fp
->ptr
& fp
->mask
) == fp
->mask
;
3184 visit_type_bool(v
, name
, &value
, errp
);
3187 static void x86_cpu_set_bit_prop(Object
*obj
, Visitor
*v
, const char *name
,
3188 void *opaque
, Error
**errp
)
3190 DeviceState
*dev
= DEVICE(obj
);
3191 BitProperty
*fp
= opaque
;
3192 Error
*local_err
= NULL
;
3195 if (dev
->realized
) {
3196 qdev_prop_set_after_realize(dev
, name
, errp
);
3200 visit_type_bool(v
, name
, &value
, &local_err
);
3202 error_propagate(errp
, local_err
);
3207 *fp
->ptr
|= fp
->mask
;
3209 *fp
->ptr
&= ~fp
->mask
;
3213 static void x86_cpu_release_bit_prop(Object
*obj
, const char *name
,
3216 BitProperty
*prop
= opaque
;
3220 /* Register a boolean property to get/set a single bit in a uint32_t field.
3222 * The same property name can be registered multiple times to make it affect
3223 * multiple bits in the same FeatureWord. In that case, the getter will return
3224 * true only if all bits are set.
3226 static void x86_cpu_register_bit_prop(X86CPU
*cpu
,
3227 const char *prop_name
,
3233 uint32_t mask
= (1UL << bitnr
);
3235 op
= object_property_find(OBJECT(cpu
), prop_name
, NULL
);
3238 assert(fp
->ptr
== field
);
3241 fp
= g_new0(BitProperty
, 1);
3244 object_property_add(OBJECT(cpu
), prop_name
, "bool",
3245 x86_cpu_get_bit_prop
,
3246 x86_cpu_set_bit_prop
,
3247 x86_cpu_release_bit_prop
, fp
, &error_abort
);
3251 static void x86_cpu_register_feature_bit_props(X86CPU
*cpu
,
3255 Object
*obj
= OBJECT(cpu
);
3258 FeatureWordInfo
*fi
= &feature_word_info
[w
];
3260 if (!fi
->feat_names
) {
3263 if (!fi
->feat_names
[bitnr
]) {
3267 names
= g_strsplit(fi
->feat_names
[bitnr
], "|", 0);
3269 feat2prop(names
[0]);
3270 x86_cpu_register_bit_prop(cpu
, names
[0], &cpu
->env
.features
[w
], bitnr
);
3272 for (i
= 1; names
[i
]; i
++) {
3273 feat2prop(names
[i
]);
3274 object_property_add_alias(obj
, names
[i
], obj
, names
[0],
3281 static void x86_cpu_initfn(Object
*obj
)
3283 CPUState
*cs
= CPU(obj
);
3284 X86CPU
*cpu
= X86_CPU(obj
);
3285 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(obj
);
3286 CPUX86State
*env
= &cpu
->env
;
3291 object_property_add(obj
, "family", "int",
3292 x86_cpuid_version_get_family
,
3293 x86_cpuid_version_set_family
, NULL
, NULL
, NULL
);
3294 object_property_add(obj
, "model", "int",
3295 x86_cpuid_version_get_model
,
3296 x86_cpuid_version_set_model
, NULL
, NULL
, NULL
);
3297 object_property_add(obj
, "stepping", "int",
3298 x86_cpuid_version_get_stepping
,
3299 x86_cpuid_version_set_stepping
, NULL
, NULL
, NULL
);
3300 object_property_add_str(obj
, "vendor",
3301 x86_cpuid_get_vendor
,
3302 x86_cpuid_set_vendor
, NULL
);
3303 object_property_add_str(obj
, "model-id",
3304 x86_cpuid_get_model_id
,
3305 x86_cpuid_set_model_id
, NULL
);
3306 object_property_add(obj
, "tsc-frequency", "int",
3307 x86_cpuid_get_tsc_freq
,
3308 x86_cpuid_set_tsc_freq
, NULL
, NULL
, NULL
);
3309 object_property_add(obj
, "feature-words", "X86CPUFeatureWordInfo",
3310 x86_cpu_get_feature_words
,
3311 NULL
, NULL
, (void *)env
->features
, NULL
);
3312 object_property_add(obj
, "filtered-features", "X86CPUFeatureWordInfo",
3313 x86_cpu_get_feature_words
,
3314 NULL
, NULL
, (void *)cpu
->filtered_features
, NULL
);
3316 cpu
->hyperv_spinlock_attempts
= HYPERV_SPINLOCK_NEVER_RETRY
;
3318 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3321 for (bitnr
= 0; bitnr
< 32; bitnr
++) {
3322 x86_cpu_register_feature_bit_props(cpu
, w
, bitnr
);
3326 x86_cpu_load_def(cpu
, xcc
->cpu_def
, &error_abort
);
3329 static int64_t x86_cpu_get_arch_id(CPUState
*cs
)
3331 X86CPU
*cpu
= X86_CPU(cs
);
3333 return cpu
->apic_id
;
3336 static bool x86_cpu_get_paging_enabled(const CPUState
*cs
)
3338 X86CPU
*cpu
= X86_CPU(cs
);
3340 return cpu
->env
.cr
[0] & CR0_PG_MASK
;
3343 static void x86_cpu_set_pc(CPUState
*cs
, vaddr value
)
3345 X86CPU
*cpu
= X86_CPU(cs
);
3347 cpu
->env
.eip
= value
;
3350 static void x86_cpu_synchronize_from_tb(CPUState
*cs
, TranslationBlock
*tb
)
3352 X86CPU
*cpu
= X86_CPU(cs
);
3354 cpu
->env
.eip
= tb
->pc
- tb
->cs_base
;
3357 static bool x86_cpu_has_work(CPUState
*cs
)
3359 X86CPU
*cpu
= X86_CPU(cs
);
3360 CPUX86State
*env
= &cpu
->env
;
3362 return ((cs
->interrupt_request
& (CPU_INTERRUPT_HARD
|
3363 CPU_INTERRUPT_POLL
)) &&
3364 (env
->eflags
& IF_MASK
)) ||
3365 (cs
->interrupt_request
& (CPU_INTERRUPT_NMI
|
3366 CPU_INTERRUPT_INIT
|
3367 CPU_INTERRUPT_SIPI
|
3368 CPU_INTERRUPT_MCE
)) ||
3369 ((cs
->interrupt_request
& CPU_INTERRUPT_SMI
) &&
3370 !(env
->hflags
& HF_SMM_MASK
));
3373 static Property x86_cpu_properties
[] = {
3374 #ifdef CONFIG_USER_ONLY
3375 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3376 DEFINE_PROP_UINT32("apic-id", X86CPU
, apic_id
, 0),
3377 DEFINE_PROP_INT32("thread-id", X86CPU
, thread_id
, 0),
3378 DEFINE_PROP_INT32("core-id", X86CPU
, core_id
, 0),
3379 DEFINE_PROP_INT32("socket-id", X86CPU
, socket_id
, 0),
3381 DEFINE_PROP_UINT32("apic-id", X86CPU
, apic_id
, UNASSIGNED_APIC_ID
),
3382 DEFINE_PROP_INT32("thread-id", X86CPU
, thread_id
, -1),
3383 DEFINE_PROP_INT32("core-id", X86CPU
, core_id
, -1),
3384 DEFINE_PROP_INT32("socket-id", X86CPU
, socket_id
, -1),
3386 DEFINE_PROP_BOOL("pmu", X86CPU
, enable_pmu
, false),
3387 { .name
= "hv-spinlocks", .info
= &qdev_prop_spinlocks
},
3388 DEFINE_PROP_BOOL("hv-relaxed", X86CPU
, hyperv_relaxed_timing
, false),
3389 DEFINE_PROP_BOOL("hv-vapic", X86CPU
, hyperv_vapic
, false),
3390 DEFINE_PROP_BOOL("hv-time", X86CPU
, hyperv_time
, false),
3391 DEFINE_PROP_BOOL("hv-crash", X86CPU
, hyperv_crash
, false),
3392 DEFINE_PROP_BOOL("hv-reset", X86CPU
, hyperv_reset
, false),
3393 DEFINE_PROP_BOOL("hv-vpindex", X86CPU
, hyperv_vpindex
, false),
3394 DEFINE_PROP_BOOL("hv-runtime", X86CPU
, hyperv_runtime
, false),
3395 DEFINE_PROP_BOOL("hv-synic", X86CPU
, hyperv_synic
, false),
3396 DEFINE_PROP_BOOL("hv-stimer", X86CPU
, hyperv_stimer
, false),
3397 DEFINE_PROP_BOOL("check", X86CPU
, check_cpuid
, true),
3398 DEFINE_PROP_BOOL("enforce", X86CPU
, enforce_cpuid
, false),
3399 DEFINE_PROP_BOOL("kvm", X86CPU
, expose_kvm
, true),
3400 DEFINE_PROP_UINT32("phys-bits", X86CPU
, phys_bits
, 0),
3401 DEFINE_PROP_BOOL("host-phys-bits", X86CPU
, host_phys_bits
, false),
3402 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU
, fill_mtrr_mask
, true),
3403 DEFINE_PROP_UINT32("level", X86CPU
, env
.cpuid_level
, 0),
3404 DEFINE_PROP_UINT32("xlevel", X86CPU
, env
.cpuid_xlevel
, 0),
3405 DEFINE_PROP_UINT32("xlevel2", X86CPU
, env
.cpuid_xlevel2
, 0),
3406 DEFINE_PROP_STRING("hv-vendor-id", X86CPU
, hyperv_vendor_id
),
3407 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU
, enable_cpuid_0xb
, true),
3408 DEFINE_PROP_BOOL("lmce", X86CPU
, enable_lmce
, false),
3409 DEFINE_PROP_BOOL("l3-cache", X86CPU
, enable_l3_cache
, true),
3410 DEFINE_PROP_END_OF_LIST()
3413 static void x86_cpu_common_class_init(ObjectClass
*oc
, void *data
)
3415 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
3416 CPUClass
*cc
= CPU_CLASS(oc
);
3417 DeviceClass
*dc
= DEVICE_CLASS(oc
);
3419 xcc
->parent_realize
= dc
->realize
;
3420 dc
->realize
= x86_cpu_realizefn
;
3421 dc
->unrealize
= x86_cpu_unrealizefn
;
3422 dc
->props
= x86_cpu_properties
;
3424 xcc
->parent_reset
= cc
->reset
;
3425 cc
->reset
= x86_cpu_reset
;
3426 cc
->reset_dump_flags
= CPU_DUMP_FPU
| CPU_DUMP_CCOP
;
3428 cc
->class_by_name
= x86_cpu_class_by_name
;
3429 cc
->parse_features
= x86_cpu_parse_featurestr
;
3430 cc
->has_work
= x86_cpu_has_work
;
3431 cc
->do_interrupt
= x86_cpu_do_interrupt
;
3432 cc
->cpu_exec_interrupt
= x86_cpu_exec_interrupt
;
3433 cc
->dump_state
= x86_cpu_dump_state
;
3434 cc
->set_pc
= x86_cpu_set_pc
;
3435 cc
->synchronize_from_tb
= x86_cpu_synchronize_from_tb
;
3436 cc
->gdb_read_register
= x86_cpu_gdb_read_register
;
3437 cc
->gdb_write_register
= x86_cpu_gdb_write_register
;
3438 cc
->get_arch_id
= x86_cpu_get_arch_id
;
3439 cc
->get_paging_enabled
= x86_cpu_get_paging_enabled
;
3440 #ifdef CONFIG_USER_ONLY
3441 cc
->handle_mmu_fault
= x86_cpu_handle_mmu_fault
;
3443 cc
->get_memory_mapping
= x86_cpu_get_memory_mapping
;
3444 cc
->get_phys_page_debug
= x86_cpu_get_phys_page_debug
;
3445 cc
->write_elf64_note
= x86_cpu_write_elf64_note
;
3446 cc
->write_elf64_qemunote
= x86_cpu_write_elf64_qemunote
;
3447 cc
->write_elf32_note
= x86_cpu_write_elf32_note
;
3448 cc
->write_elf32_qemunote
= x86_cpu_write_elf32_qemunote
;
3449 cc
->vmsd
= &vmstate_x86_cpu
;
3451 cc
->gdb_num_core_regs
= CPU_NB_REGS
* 2 + 25;
3452 #ifndef CONFIG_USER_ONLY
3453 cc
->debug_excp_handler
= breakpoint_handler
;
3455 cc
->cpu_exec_enter
= x86_cpu_exec_enter
;
3456 cc
->cpu_exec_exit
= x86_cpu_exec_exit
;
3458 dc
->cannot_instantiate_with_device_add_yet
= false;
3460 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3461 * object in cpus -> dangling pointer after final object_unref().
3463 dc
->cannot_destroy_with_object_finalize_yet
= true;
3466 static const TypeInfo x86_cpu_type_info
= {
3467 .name
= TYPE_X86_CPU
,
3469 .instance_size
= sizeof(X86CPU
),
3470 .instance_init
= x86_cpu_initfn
,
3472 .class_size
= sizeof(X86CPUClass
),
3473 .class_init
= x86_cpu_common_class_init
,
3476 static void x86_cpu_register_types(void)
3480 type_register_static(&x86_cpu_type_info
);
3481 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); i
++) {
3482 x86_register_cpudef_type(&builtin_x86_defs
[i
]);
3485 type_register_static(&host_x86_cpu_type_info
);
3489 type_init(x86_cpu_register_types
)