2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qapi/qmp/types.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "qom/qom-qobject.h"
38 #include "sysemu/arch_init.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/i386/topology.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "exec/address-spaces.h"
50 #include "hw/xen/xen.h"
51 #include "hw/i386/apic_internal.h"
55 /* Cache topology CPUID constants: */
57 /* CPUID Leaf 2 Descriptors */
59 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
60 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
61 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
65 /* CPUID Leaf 4 constants: */
68 #define CPUID_4_TYPE_DCACHE 1
69 #define CPUID_4_TYPE_ICACHE 2
70 #define CPUID_4_TYPE_UNIFIED 3
72 #define CPUID_4_LEVEL(l) ((l) << 5)
74 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
75 #define CPUID_4_FULLY_ASSOC (1 << 9)
78 #define CPUID_4_NO_INVD_SHARING (1 << 0)
79 #define CPUID_4_INCLUSIVE (1 << 1)
80 #define CPUID_4_COMPLEX_IDX (1 << 2)
82 #define ASSOC_FULL 0xFF
84 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
85 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
95 a == ASSOC_FULL ? 0xF : \
96 0 /* invalid value */)
99 /* Definitions of the hardcoded cache entries we expose: */
102 #define L1D_LINE_SIZE 64
103 #define L1D_ASSOCIATIVITY 8
105 #define L1D_PARTITIONS 1
106 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
107 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
108 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
109 #define L1D_LINES_PER_TAG 1
110 #define L1D_SIZE_KB_AMD 64
111 #define L1D_ASSOCIATIVITY_AMD 2
113 /* L1 instruction cache: */
114 #define L1I_LINE_SIZE 64
115 #define L1I_ASSOCIATIVITY 8
117 #define L1I_PARTITIONS 1
118 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
119 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
120 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
121 #define L1I_LINES_PER_TAG 1
122 #define L1I_SIZE_KB_AMD 64
123 #define L1I_ASSOCIATIVITY_AMD 2
125 /* Level 2 unified cache: */
126 #define L2_LINE_SIZE 64
127 #define L2_ASSOCIATIVITY 16
129 #define L2_PARTITIONS 1
130 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
131 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
132 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
133 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
134 #define L2_LINES_PER_TAG 1
135 #define L2_SIZE_KB_AMD 512
137 /* Level 3 unified cache: */
138 #define L3_SIZE_KB 0 /* disabled */
139 #define L3_ASSOCIATIVITY 0 /* disabled */
140 #define L3_LINES_PER_TAG 0 /* disabled */
141 #define L3_LINE_SIZE 0 /* disabled */
142 #define L3_N_LINE_SIZE 64
143 #define L3_N_ASSOCIATIVITY 16
144 #define L3_N_SETS 16384
145 #define L3_N_PARTITIONS 1
146 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
147 #define L3_N_LINES_PER_TAG 1
148 #define L3_N_SIZE_KB_AMD 16384
150 /* TLB definitions: */
152 #define L1_DTLB_2M_ASSOC 1
153 #define L1_DTLB_2M_ENTRIES 255
154 #define L1_DTLB_4K_ASSOC 1
155 #define L1_DTLB_4K_ENTRIES 255
157 #define L1_ITLB_2M_ASSOC 1
158 #define L1_ITLB_2M_ENTRIES 255
159 #define L1_ITLB_4K_ASSOC 1
160 #define L1_ITLB_4K_ENTRIES 255
162 #define L2_DTLB_2M_ASSOC 0 /* disabled */
163 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
164 #define L2_DTLB_4K_ASSOC 4
165 #define L2_DTLB_4K_ENTRIES 512
167 #define L2_ITLB_2M_ASSOC 0 /* disabled */
168 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
169 #define L2_ITLB_4K_ASSOC 4
170 #define L2_ITLB_4K_ENTRIES 512
174 static void x86_cpu_vendor_words2str(char *dst
, uint32_t vendor1
,
175 uint32_t vendor2
, uint32_t vendor3
)
178 for (i
= 0; i
< 4; i
++) {
179 dst
[i
] = vendor1
>> (8 * i
);
180 dst
[i
+ 4] = vendor2
>> (8 * i
);
181 dst
[i
+ 8] = vendor3
>> (8 * i
);
183 dst
[CPUID_VENDOR_SZ
] = '\0';
186 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
187 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
188 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
189 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
190 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
191 CPUID_PSE36 | CPUID_FXSR)
192 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
193 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
194 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
195 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
196 CPUID_PAE | CPUID_SEP | CPUID_APIC)
198 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
199 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
200 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
201 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
202 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
203 /* partly implemented:
204 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
206 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
207 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
208 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
209 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
210 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
211 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
213 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
214 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
215 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
216 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
217 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
220 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
222 #define TCG_EXT2_X86_64_FEATURES 0
225 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
226 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
227 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
228 TCG_EXT2_X86_64_FEATURES)
229 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
230 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
231 #define TCG_EXT4_FEATURES 0
232 #define TCG_SVM_FEATURES 0
233 #define TCG_KVM_FEATURES 0
234 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
235 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
236 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
237 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
240 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
241 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
242 CPUID_7_0_EBX_RDSEED */
243 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
245 #define TCG_7_0_EDX_FEATURES 0
246 #define TCG_APM_FEATURES 0
247 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
248 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
250 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
252 typedef struct FeatureWordInfo
{
253 /* feature flags names are taken from "Intel Processor Identification and
254 * the CPUID Instruction" and AMD's "CPUID Specification".
255 * In cases of disagreement between feature naming conventions,
256 * aliases may be added.
258 const char *feat_names
[32];
259 uint32_t cpuid_eax
; /* Input EAX for CPUID */
260 bool cpuid_needs_ecx
; /* CPUID instruction uses ECX as input */
261 uint32_t cpuid_ecx
; /* Input ECX value for CPUID */
262 int cpuid_reg
; /* output register (R_* constant) */
263 uint32_t tcg_features
; /* Feature flags supported by TCG */
264 uint32_t unmigratable_flags
; /* Feature flags known to be unmigratable */
265 uint32_t migratable_flags
; /* Feature flags known to be migratable */
268 static FeatureWordInfo feature_word_info
[FEATURE_WORDS
] = {
271 "fpu", "vme", "de", "pse",
272 "tsc", "msr", "pae", "mce",
273 "cx8", "apic", NULL
, "sep",
274 "mtrr", "pge", "mca", "cmov",
275 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
276 NULL
, "ds" /* Intel dts */, "acpi", "mmx",
277 "fxsr", "sse", "sse2", "ss",
278 "ht" /* Intel htt */, "tm", "ia64", "pbe",
280 .cpuid_eax
= 1, .cpuid_reg
= R_EDX
,
281 .tcg_features
= TCG_FEATURES
,
285 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
286 "ds-cpl", "vmx", "smx", "est",
287 "tm2", "ssse3", "cid", NULL
,
288 "fma", "cx16", "xtpr", "pdcm",
289 NULL
, "pcid", "dca", "sse4.1",
290 "sse4.2", "x2apic", "movbe", "popcnt",
291 "tsc-deadline", "aes", "xsave", "osxsave",
292 "avx", "f16c", "rdrand", "hypervisor",
294 .cpuid_eax
= 1, .cpuid_reg
= R_ECX
,
295 .tcg_features
= TCG_EXT_FEATURES
,
297 /* Feature names that are already defined on feature_name[] but
298 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
299 * names on feat_names below. They are copied automatically
300 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
302 [FEAT_8000_0001_EDX
] = {
304 NULL
/* fpu */, NULL
/* vme */, NULL
/* de */, NULL
/* pse */,
305 NULL
/* tsc */, NULL
/* msr */, NULL
/* pae */, NULL
/* mce */,
306 NULL
/* cx8 */, NULL
/* apic */, NULL
, "syscall",
307 NULL
/* mtrr */, NULL
/* pge */, NULL
/* mca */, NULL
/* cmov */,
308 NULL
/* pat */, NULL
/* pse36 */, NULL
, NULL
/* Linux mp */,
309 "nx", NULL
, "mmxext", NULL
/* mmx */,
310 NULL
/* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
311 NULL
, "lm", "3dnowext", "3dnow",
313 .cpuid_eax
= 0x80000001, .cpuid_reg
= R_EDX
,
314 .tcg_features
= TCG_EXT2_FEATURES
,
316 [FEAT_8000_0001_ECX
] = {
318 "lahf-lm", "cmp-legacy", "svm", "extapic",
319 "cr8legacy", "abm", "sse4a", "misalignsse",
320 "3dnowprefetch", "osvw", "ibs", "xop",
321 "skinit", "wdt", NULL
, "lwp",
322 "fma4", "tce", NULL
, "nodeid-msr",
323 NULL
, "tbm", "topoext", "perfctr-core",
324 "perfctr-nb", NULL
, NULL
, NULL
,
325 NULL
, NULL
, NULL
, NULL
,
327 .cpuid_eax
= 0x80000001, .cpuid_reg
= R_ECX
,
328 .tcg_features
= TCG_EXT3_FEATURES
,
330 [FEAT_C000_0001_EDX
] = {
332 NULL
, NULL
, "xstore", "xstore-en",
333 NULL
, NULL
, "xcrypt", "xcrypt-en",
334 "ace2", "ace2-en", "phe", "phe-en",
335 "pmm", "pmm-en", NULL
, NULL
,
336 NULL
, NULL
, NULL
, NULL
,
337 NULL
, NULL
, NULL
, NULL
,
338 NULL
, NULL
, NULL
, NULL
,
339 NULL
, NULL
, NULL
, NULL
,
341 .cpuid_eax
= 0xC0000001, .cpuid_reg
= R_EDX
,
342 .tcg_features
= TCG_EXT4_FEATURES
,
346 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
347 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
348 NULL
, NULL
, NULL
, NULL
,
349 NULL
, NULL
, NULL
, NULL
,
350 NULL
, NULL
, NULL
, NULL
,
351 NULL
, NULL
, NULL
, NULL
,
352 "kvmclock-stable-bit", NULL
, NULL
, NULL
,
353 NULL
, NULL
, NULL
, NULL
,
355 .cpuid_eax
= KVM_CPUID_FEATURES
, .cpuid_reg
= R_EAX
,
356 .tcg_features
= TCG_KVM_FEATURES
,
358 [FEAT_HYPERV_EAX
] = {
360 NULL
/* hv_msr_vp_runtime_access */, NULL
/* hv_msr_time_refcount_access */,
361 NULL
/* hv_msr_synic_access */, NULL
/* hv_msr_stimer_access */,
362 NULL
/* hv_msr_apic_access */, NULL
/* hv_msr_hypercall_access */,
363 NULL
/* hv_vpindex_access */, NULL
/* hv_msr_reset_access */,
364 NULL
/* hv_msr_stats_access */, NULL
/* hv_reftsc_access */,
365 NULL
/* hv_msr_idle_access */, NULL
/* hv_msr_frequency_access */,
366 NULL
, NULL
, NULL
, NULL
,
367 NULL
, NULL
, NULL
, NULL
,
368 NULL
, NULL
, NULL
, NULL
,
369 NULL
, NULL
, NULL
, NULL
,
370 NULL
, NULL
, NULL
, NULL
,
372 .cpuid_eax
= 0x40000003, .cpuid_reg
= R_EAX
,
374 [FEAT_HYPERV_EBX
] = {
376 NULL
/* hv_create_partitions */, NULL
/* hv_access_partition_id */,
377 NULL
/* hv_access_memory_pool */, NULL
/* hv_adjust_message_buffers */,
378 NULL
/* hv_post_messages */, NULL
/* hv_signal_events */,
379 NULL
/* hv_create_port */, NULL
/* hv_connect_port */,
380 NULL
/* hv_access_stats */, NULL
, NULL
, NULL
/* hv_debugging */,
381 NULL
/* hv_cpu_power_management */, NULL
/* hv_configure_profiler */,
383 NULL
, NULL
, NULL
, NULL
,
384 NULL
, NULL
, NULL
, NULL
,
385 NULL
, NULL
, NULL
, NULL
,
386 NULL
, NULL
, NULL
, NULL
,
388 .cpuid_eax
= 0x40000003, .cpuid_reg
= R_EBX
,
390 [FEAT_HYPERV_EDX
] = {
392 NULL
/* hv_mwait */, NULL
/* hv_guest_debugging */,
393 NULL
/* hv_perf_monitor */, NULL
/* hv_cpu_dynamic_part */,
394 NULL
/* hv_hypercall_params_xmm */, NULL
/* hv_guest_idle_state */,
396 NULL
, NULL
, NULL
/* hv_guest_crash_msr */, NULL
,
397 NULL
, NULL
, NULL
, NULL
,
398 NULL
, NULL
, NULL
, NULL
,
399 NULL
, NULL
, NULL
, NULL
,
400 NULL
, NULL
, NULL
, NULL
,
401 NULL
, NULL
, NULL
, NULL
,
403 .cpuid_eax
= 0x40000003, .cpuid_reg
= R_EDX
,
407 "npt", "lbrv", "svm-lock", "nrip-save",
408 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
409 NULL
, NULL
, "pause-filter", NULL
,
410 "pfthreshold", NULL
, NULL
, NULL
,
411 NULL
, NULL
, NULL
, NULL
,
412 NULL
, NULL
, NULL
, NULL
,
413 NULL
, NULL
, NULL
, NULL
,
414 NULL
, NULL
, NULL
, NULL
,
416 .cpuid_eax
= 0x8000000A, .cpuid_reg
= R_EDX
,
417 .tcg_features
= TCG_SVM_FEATURES
,
421 "fsgsbase", "tsc-adjust", NULL
, "bmi1",
422 "hle", "avx2", NULL
, "smep",
423 "bmi2", "erms", "invpcid", "rtm",
424 NULL
, NULL
, "mpx", NULL
,
425 "avx512f", "avx512dq", "rdseed", "adx",
426 "smap", "avx512ifma", "pcommit", "clflushopt",
427 "clwb", NULL
, "avx512pf", "avx512er",
428 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
431 .cpuid_needs_ecx
= true, .cpuid_ecx
= 0,
433 .tcg_features
= TCG_7_0_EBX_FEATURES
,
437 NULL
, "avx512vbmi", "umip", "pku",
438 "ospke", NULL
, NULL
, NULL
,
439 NULL
, NULL
, NULL
, NULL
,
440 NULL
, NULL
, "avx512-vpopcntdq", NULL
,
441 "la57", NULL
, NULL
, NULL
,
442 NULL
, NULL
, "rdpid", NULL
,
443 NULL
, NULL
, NULL
, NULL
,
444 NULL
, NULL
, NULL
, NULL
,
447 .cpuid_needs_ecx
= true, .cpuid_ecx
= 0,
449 .tcg_features
= TCG_7_0_ECX_FEATURES
,
453 NULL
, NULL
, "avx512-4vnniw", "avx512-4fmaps",
454 NULL
, NULL
, NULL
, NULL
,
455 NULL
, NULL
, NULL
, NULL
,
456 NULL
, NULL
, NULL
, NULL
,
457 NULL
, NULL
, NULL
, NULL
,
458 NULL
, NULL
, NULL
, NULL
,
459 NULL
, NULL
, NULL
, NULL
,
460 NULL
, NULL
, NULL
, NULL
,
463 .cpuid_needs_ecx
= true, .cpuid_ecx
= 0,
465 .tcg_features
= TCG_7_0_EDX_FEATURES
,
467 [FEAT_8000_0007_EDX
] = {
469 NULL
, NULL
, NULL
, NULL
,
470 NULL
, NULL
, NULL
, NULL
,
471 "invtsc", NULL
, NULL
, NULL
,
472 NULL
, NULL
, NULL
, NULL
,
473 NULL
, NULL
, NULL
, NULL
,
474 NULL
, NULL
, NULL
, NULL
,
475 NULL
, NULL
, NULL
, NULL
,
476 NULL
, NULL
, NULL
, NULL
,
478 .cpuid_eax
= 0x80000007,
480 .tcg_features
= TCG_APM_FEATURES
,
481 .unmigratable_flags
= CPUID_APM_INVTSC
,
485 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
486 NULL
, NULL
, NULL
, NULL
,
487 NULL
, NULL
, NULL
, NULL
,
488 NULL
, NULL
, NULL
, NULL
,
489 NULL
, NULL
, NULL
, NULL
,
490 NULL
, NULL
, NULL
, NULL
,
491 NULL
, NULL
, NULL
, NULL
,
492 NULL
, NULL
, NULL
, NULL
,
495 .cpuid_needs_ecx
= true, .cpuid_ecx
= 1,
497 .tcg_features
= TCG_XSAVE_FEATURES
,
501 NULL
, NULL
, "arat", NULL
,
502 NULL
, NULL
, NULL
, NULL
,
503 NULL
, NULL
, NULL
, NULL
,
504 NULL
, NULL
, NULL
, NULL
,
505 NULL
, NULL
, NULL
, NULL
,
506 NULL
, NULL
, NULL
, NULL
,
507 NULL
, NULL
, NULL
, NULL
,
508 NULL
, NULL
, NULL
, NULL
,
510 .cpuid_eax
= 6, .cpuid_reg
= R_EAX
,
511 .tcg_features
= TCG_6_EAX_FEATURES
,
513 [FEAT_XSAVE_COMP_LO
] = {
515 .cpuid_needs_ecx
= true, .cpuid_ecx
= 0,
518 .migratable_flags
= XSTATE_FP_MASK
| XSTATE_SSE_MASK
|
519 XSTATE_YMM_MASK
| XSTATE_BNDREGS_MASK
| XSTATE_BNDCSR_MASK
|
520 XSTATE_OPMASK_MASK
| XSTATE_ZMM_Hi256_MASK
| XSTATE_Hi16_ZMM_MASK
|
523 [FEAT_XSAVE_COMP_HI
] = {
525 .cpuid_needs_ecx
= true, .cpuid_ecx
= 0,
531 typedef struct X86RegisterInfo32
{
532 /* Name of register */
534 /* QAPI enum value register */
535 X86CPURegister32 qapi_enum
;
538 #define REGISTER(reg) \
539 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
540 static const X86RegisterInfo32 x86_reg_info_32
[CPU_NB_REGS32
] = {
552 typedef struct ExtSaveArea
{
553 uint32_t feature
, bits
;
554 uint32_t offset
, size
;
557 static const ExtSaveArea x86_ext_save_areas
[] = {
559 /* x87 FP state component is always enabled if XSAVE is supported */
560 .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_XSAVE
,
561 /* x87 state is in the legacy region of the XSAVE area */
563 .size
= sizeof(X86LegacyXSaveArea
) + sizeof(X86XSaveHeader
),
566 /* SSE state component is always enabled if XSAVE is supported */
567 .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_XSAVE
,
568 /* SSE state is in the legacy region of the XSAVE area */
570 .size
= sizeof(X86LegacyXSaveArea
) + sizeof(X86XSaveHeader
),
573 { .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_AVX
,
574 .offset
= offsetof(X86XSaveArea
, avx_state
),
575 .size
= sizeof(XSaveAVX
) },
576 [XSTATE_BNDREGS_BIT
] =
577 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_MPX
,
578 .offset
= offsetof(X86XSaveArea
, bndreg_state
),
579 .size
= sizeof(XSaveBNDREG
) },
580 [XSTATE_BNDCSR_BIT
] =
581 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_MPX
,
582 .offset
= offsetof(X86XSaveArea
, bndcsr_state
),
583 .size
= sizeof(XSaveBNDCSR
) },
584 [XSTATE_OPMASK_BIT
] =
585 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
586 .offset
= offsetof(X86XSaveArea
, opmask_state
),
587 .size
= sizeof(XSaveOpmask
) },
588 [XSTATE_ZMM_Hi256_BIT
] =
589 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
590 .offset
= offsetof(X86XSaveArea
, zmm_hi256_state
),
591 .size
= sizeof(XSaveZMM_Hi256
) },
592 [XSTATE_Hi16_ZMM_BIT
] =
593 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
594 .offset
= offsetof(X86XSaveArea
, hi16_zmm_state
),
595 .size
= sizeof(XSaveHi16_ZMM
) },
597 { .feature
= FEAT_7_0_ECX
, .bits
= CPUID_7_0_ECX_PKU
,
598 .offset
= offsetof(X86XSaveArea
, pkru_state
),
599 .size
= sizeof(XSavePKRU
) },
602 static uint32_t xsave_area_size(uint64_t mask
)
607 for (i
= 0; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
608 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
609 if ((mask
>> i
) & 1) {
610 ret
= MAX(ret
, esa
->offset
+ esa
->size
);
616 static inline uint64_t x86_cpu_xsave_components(X86CPU
*cpu
)
618 return ((uint64_t)cpu
->env
.features
[FEAT_XSAVE_COMP_HI
]) << 32 |
619 cpu
->env
.features
[FEAT_XSAVE_COMP_LO
];
622 const char *get_register_name_32(unsigned int reg
)
624 if (reg
>= CPU_NB_REGS32
) {
627 return x86_reg_info_32
[reg
].name
;
631 * Returns the set of feature flags that are supported and migratable by
632 * QEMU, for a given FeatureWord.
634 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w
)
636 FeatureWordInfo
*wi
= &feature_word_info
[w
];
640 for (i
= 0; i
< 32; i
++) {
641 uint32_t f
= 1U << i
;
643 /* If the feature name is known, it is implicitly considered migratable,
644 * unless it is explicitly set in unmigratable_flags */
645 if ((wi
->migratable_flags
& f
) ||
646 (wi
->feat_names
[i
] && !(wi
->unmigratable_flags
& f
))) {
653 void host_cpuid(uint32_t function
, uint32_t count
,
654 uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
660 : "=a"(vec
[0]), "=b"(vec
[1]),
661 "=c"(vec
[2]), "=d"(vec
[3])
662 : "0"(function
), "c"(count
) : "cc");
663 #elif defined(__i386__)
664 asm volatile("pusha \n\t"
666 "mov %%eax, 0(%2) \n\t"
667 "mov %%ebx, 4(%2) \n\t"
668 "mov %%ecx, 8(%2) \n\t"
669 "mov %%edx, 12(%2) \n\t"
671 : : "a"(function
), "c"(count
), "S"(vec
)
687 void host_vendor_fms(char *vendor
, int *family
, int *model
, int *stepping
)
689 uint32_t eax
, ebx
, ecx
, edx
;
691 host_cpuid(0x0, 0, &eax
, &ebx
, &ecx
, &edx
);
692 x86_cpu_vendor_words2str(vendor
, ebx
, edx
, ecx
);
694 host_cpuid(0x1, 0, &eax
, &ebx
, &ecx
, &edx
);
696 *family
= ((eax
>> 8) & 0x0F) + ((eax
>> 20) & 0xFF);
699 *model
= ((eax
>> 4) & 0x0F) | ((eax
& 0xF0000) >> 12);
702 *stepping
= eax
& 0x0F;
706 /* CPU class name definitions: */
708 /* Return type name for a given CPU model name
709 * Caller is responsible for freeing the returned string.
711 static char *x86_cpu_type_name(const char *model_name
)
713 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name
);
716 static ObjectClass
*x86_cpu_class_by_name(const char *cpu_model
)
721 if (cpu_model
== NULL
) {
725 typename
= x86_cpu_type_name(cpu_model
);
726 oc
= object_class_by_name(typename
);
731 static char *x86_cpu_class_get_model_name(X86CPUClass
*cc
)
733 const char *class_name
= object_class_get_name(OBJECT_CLASS(cc
));
734 assert(g_str_has_suffix(class_name
, X86_CPU_TYPE_SUFFIX
));
735 return g_strndup(class_name
,
736 strlen(class_name
) - strlen(X86_CPU_TYPE_SUFFIX
));
739 struct X86CPUDefinition
{
743 /* vendor is zero-terminated, 12 character ASCII string */
744 char vendor
[CPUID_VENDOR_SZ
+ 1];
748 FeatureWordArray features
;
752 static X86CPUDefinition builtin_x86_defs
[] = {
756 .vendor
= CPUID_VENDOR_AMD
,
760 .features
[FEAT_1_EDX
] =
762 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
764 .features
[FEAT_1_ECX
] =
765 CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
766 .features
[FEAT_8000_0001_EDX
] =
767 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
768 .features
[FEAT_8000_0001_ECX
] =
769 CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
,
770 .xlevel
= 0x8000000A,
771 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
776 .vendor
= CPUID_VENDOR_AMD
,
780 /* Missing: CPUID_HT */
781 .features
[FEAT_1_EDX
] =
783 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
784 CPUID_PSE36
| CPUID_VME
,
785 .features
[FEAT_1_ECX
] =
786 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_CX16
|
788 .features
[FEAT_8000_0001_EDX
] =
789 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
|
790 CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
| CPUID_EXT2_MMXEXT
|
791 CPUID_EXT2_FFXSR
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
,
792 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
794 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
795 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
796 .features
[FEAT_8000_0001_ECX
] =
797 CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
798 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
799 /* Missing: CPUID_SVM_LBRV */
800 .features
[FEAT_SVM
] =
802 .xlevel
= 0x8000001A,
803 .model_id
= "AMD Phenom(tm) 9550 Quad-Core Processor"
808 .vendor
= CPUID_VENDOR_INTEL
,
812 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
813 .features
[FEAT_1_EDX
] =
815 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
816 CPUID_PSE36
| CPUID_VME
| CPUID_ACPI
| CPUID_SS
,
817 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
818 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
819 .features
[FEAT_1_ECX
] =
820 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
822 .features
[FEAT_8000_0001_EDX
] =
823 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
824 .features
[FEAT_8000_0001_ECX
] =
826 .xlevel
= 0x80000008,
827 .model_id
= "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
832 .vendor
= CPUID_VENDOR_INTEL
,
836 /* Missing: CPUID_HT */
837 .features
[FEAT_1_EDX
] =
838 PPRO_FEATURES
| CPUID_VME
|
839 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
841 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
842 .features
[FEAT_1_ECX
] =
843 CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
844 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
845 .features
[FEAT_8000_0001_EDX
] =
846 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
847 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
848 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
849 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
850 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
851 .features
[FEAT_8000_0001_ECX
] =
853 .xlevel
= 0x80000008,
854 .model_id
= "Common KVM processor"
859 .vendor
= CPUID_VENDOR_INTEL
,
863 .features
[FEAT_1_EDX
] =
865 .features
[FEAT_1_ECX
] =
867 .xlevel
= 0x80000004,
868 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
873 .vendor
= CPUID_VENDOR_INTEL
,
877 .features
[FEAT_1_EDX
] =
878 PPRO_FEATURES
| CPUID_VME
|
879 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_PSE36
,
880 .features
[FEAT_1_ECX
] =
882 .features
[FEAT_8000_0001_ECX
] =
884 .xlevel
= 0x80000008,
885 .model_id
= "Common 32-bit KVM processor"
890 .vendor
= CPUID_VENDOR_INTEL
,
894 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
895 .features
[FEAT_1_EDX
] =
896 PPRO_FEATURES
| CPUID_VME
|
897 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_ACPI
|
899 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
900 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
901 .features
[FEAT_1_ECX
] =
902 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
,
903 .features
[FEAT_8000_0001_EDX
] =
905 .xlevel
= 0x80000008,
906 .model_id
= "Genuine Intel(R) CPU T2600 @ 2.16GHz",
911 .vendor
= CPUID_VENDOR_INTEL
,
915 .features
[FEAT_1_EDX
] =
922 .vendor
= CPUID_VENDOR_INTEL
,
926 .features
[FEAT_1_EDX
] =
933 .vendor
= CPUID_VENDOR_INTEL
,
937 .features
[FEAT_1_EDX
] =
944 .vendor
= CPUID_VENDOR_INTEL
,
948 .features
[FEAT_1_EDX
] =
955 .vendor
= CPUID_VENDOR_AMD
,
959 .features
[FEAT_1_EDX
] =
960 PPRO_FEATURES
| CPUID_PSE36
| CPUID_VME
| CPUID_MTRR
|
962 .features
[FEAT_8000_0001_EDX
] =
963 CPUID_EXT2_MMXEXT
| CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
,
964 .xlevel
= 0x80000008,
965 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
970 .vendor
= CPUID_VENDOR_INTEL
,
974 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
975 .features
[FEAT_1_EDX
] =
977 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_VME
|
978 CPUID_ACPI
| CPUID_SS
,
979 /* Some CPUs got no CPUID_SEP */
980 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
982 .features
[FEAT_1_ECX
] =
983 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
985 .features
[FEAT_8000_0001_EDX
] =
987 .features
[FEAT_8000_0001_ECX
] =
989 .xlevel
= 0x80000008,
990 .model_id
= "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
995 .vendor
= CPUID_VENDOR_INTEL
,
999 .features
[FEAT_1_EDX
] =
1000 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1001 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1002 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1003 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1004 CPUID_DE
| CPUID_FP87
,
1005 .features
[FEAT_1_ECX
] =
1006 CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
1007 .features
[FEAT_8000_0001_EDX
] =
1008 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1009 .features
[FEAT_8000_0001_ECX
] =
1011 .xlevel
= 0x80000008,
1012 .model_id
= "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1017 .vendor
= CPUID_VENDOR_INTEL
,
1021 .features
[FEAT_1_EDX
] =
1022 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1023 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1024 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1025 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1026 CPUID_DE
| CPUID_FP87
,
1027 .features
[FEAT_1_ECX
] =
1028 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1030 .features
[FEAT_8000_0001_EDX
] =
1031 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1032 .features
[FEAT_8000_0001_ECX
] =
1034 .xlevel
= 0x80000008,
1035 .model_id
= "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1040 .vendor
= CPUID_VENDOR_INTEL
,
1044 .features
[FEAT_1_EDX
] =
1045 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1046 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1047 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1048 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1049 CPUID_DE
| CPUID_FP87
,
1050 .features
[FEAT_1_ECX
] =
1051 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1052 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
1053 .features
[FEAT_8000_0001_EDX
] =
1054 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1055 .features
[FEAT_8000_0001_ECX
] =
1057 .xlevel
= 0x80000008,
1058 .model_id
= "Intel Core i7 9xx (Nehalem Class Core i7)",
1063 .vendor
= CPUID_VENDOR_INTEL
,
1067 .features
[FEAT_1_EDX
] =
1068 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1069 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1070 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1071 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1072 CPUID_DE
| CPUID_FP87
,
1073 .features
[FEAT_1_ECX
] =
1074 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
1075 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1076 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
1077 .features
[FEAT_8000_0001_EDX
] =
1078 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1079 .features
[FEAT_8000_0001_ECX
] =
1081 .features
[FEAT_6_EAX
] =
1083 .xlevel
= 0x80000008,
1084 .model_id
= "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1087 .name
= "SandyBridge",
1089 .vendor
= CPUID_VENDOR_INTEL
,
1093 .features
[FEAT_1_EDX
] =
1094 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1095 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1096 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1097 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1098 CPUID_DE
| CPUID_FP87
,
1099 .features
[FEAT_1_ECX
] =
1100 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1101 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1102 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1103 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1105 .features
[FEAT_8000_0001_EDX
] =
1106 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1108 .features
[FEAT_8000_0001_ECX
] =
1110 .features
[FEAT_XSAVE
] =
1111 CPUID_XSAVE_XSAVEOPT
,
1112 .features
[FEAT_6_EAX
] =
1114 .xlevel
= 0x80000008,
1115 .model_id
= "Intel Xeon E312xx (Sandy Bridge)",
1118 .name
= "IvyBridge",
1120 .vendor
= CPUID_VENDOR_INTEL
,
1124 .features
[FEAT_1_EDX
] =
1125 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1126 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1127 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1128 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1129 CPUID_DE
| CPUID_FP87
,
1130 .features
[FEAT_1_ECX
] =
1131 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1132 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1133 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1134 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1135 CPUID_EXT_SSE3
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1136 .features
[FEAT_7_0_EBX
] =
1137 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_SMEP
|
1139 .features
[FEAT_8000_0001_EDX
] =
1140 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1142 .features
[FEAT_8000_0001_ECX
] =
1144 .features
[FEAT_XSAVE
] =
1145 CPUID_XSAVE_XSAVEOPT
,
1146 .features
[FEAT_6_EAX
] =
1148 .xlevel
= 0x80000008,
1149 .model_id
= "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1152 .name
= "Haswell-noTSX",
1154 .vendor
= CPUID_VENDOR_INTEL
,
1158 .features
[FEAT_1_EDX
] =
1159 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1160 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1161 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1162 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1163 CPUID_DE
| CPUID_FP87
,
1164 .features
[FEAT_1_ECX
] =
1165 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1166 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1167 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1168 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1169 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1170 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1171 .features
[FEAT_8000_0001_EDX
] =
1172 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1174 .features
[FEAT_8000_0001_ECX
] =
1175 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
1176 .features
[FEAT_7_0_EBX
] =
1177 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1178 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1179 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
,
1180 .features
[FEAT_XSAVE
] =
1181 CPUID_XSAVE_XSAVEOPT
,
1182 .features
[FEAT_6_EAX
] =
1184 .xlevel
= 0x80000008,
1185 .model_id
= "Intel Core Processor (Haswell, no TSX)",
1189 .vendor
= CPUID_VENDOR_INTEL
,
1193 .features
[FEAT_1_EDX
] =
1194 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1195 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1196 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1197 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1198 CPUID_DE
| CPUID_FP87
,
1199 .features
[FEAT_1_ECX
] =
1200 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1201 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1202 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1203 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1204 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1205 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1206 .features
[FEAT_8000_0001_EDX
] =
1207 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1209 .features
[FEAT_8000_0001_ECX
] =
1210 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
1211 .features
[FEAT_7_0_EBX
] =
1212 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1213 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1214 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
1216 .features
[FEAT_XSAVE
] =
1217 CPUID_XSAVE_XSAVEOPT
,
1218 .features
[FEAT_6_EAX
] =
1220 .xlevel
= 0x80000008,
1221 .model_id
= "Intel Core Processor (Haswell)",
1224 .name
= "Broadwell-noTSX",
1226 .vendor
= CPUID_VENDOR_INTEL
,
1230 .features
[FEAT_1_EDX
] =
1231 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1232 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1233 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1234 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1235 CPUID_DE
| CPUID_FP87
,
1236 .features
[FEAT_1_ECX
] =
1237 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1238 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1239 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1240 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1241 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1242 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1243 .features
[FEAT_8000_0001_EDX
] =
1244 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1246 .features
[FEAT_8000_0001_ECX
] =
1247 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
1248 .features
[FEAT_7_0_EBX
] =
1249 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1250 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1251 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
1252 CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
1254 .features
[FEAT_XSAVE
] =
1255 CPUID_XSAVE_XSAVEOPT
,
1256 .features
[FEAT_6_EAX
] =
1258 .xlevel
= 0x80000008,
1259 .model_id
= "Intel Core Processor (Broadwell, no TSX)",
1262 .name
= "Broadwell",
1264 .vendor
= CPUID_VENDOR_INTEL
,
1268 .features
[FEAT_1_EDX
] =
1269 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1270 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1271 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1272 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1273 CPUID_DE
| CPUID_FP87
,
1274 .features
[FEAT_1_ECX
] =
1275 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1276 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1277 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1278 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1279 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1280 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1281 .features
[FEAT_8000_0001_EDX
] =
1282 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1284 .features
[FEAT_8000_0001_ECX
] =
1285 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
1286 .features
[FEAT_7_0_EBX
] =
1287 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1288 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1289 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
1290 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
1292 .features
[FEAT_XSAVE
] =
1293 CPUID_XSAVE_XSAVEOPT
,
1294 .features
[FEAT_6_EAX
] =
1296 .xlevel
= 0x80000008,
1297 .model_id
= "Intel Core Processor (Broadwell)",
1300 .name
= "Skylake-Client",
1302 .vendor
= CPUID_VENDOR_INTEL
,
1306 .features
[FEAT_1_EDX
] =
1307 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1308 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1309 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1310 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1311 CPUID_DE
| CPUID_FP87
,
1312 .features
[FEAT_1_ECX
] =
1313 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1314 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1315 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1316 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1317 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1318 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1319 .features
[FEAT_8000_0001_EDX
] =
1320 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1322 .features
[FEAT_8000_0001_ECX
] =
1323 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
1324 .features
[FEAT_7_0_EBX
] =
1325 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1326 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1327 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
1328 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
1329 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
,
1330 /* Missing: XSAVES (not supported by some Linux versions,
1331 * including v4.1 to v4.12).
1332 * KVM doesn't yet expose any XSAVES state save component,
1333 * and the only one defined in Skylake (processor tracing)
1334 * probably will block migration anyway.
1336 .features
[FEAT_XSAVE
] =
1337 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
1338 CPUID_XSAVE_XGETBV1
,
1339 .features
[FEAT_6_EAX
] =
1341 .xlevel
= 0x80000008,
1342 .model_id
= "Intel Core Processor (Skylake)",
1345 .name
= "Skylake-Server",
1347 .vendor
= CPUID_VENDOR_INTEL
,
1351 .features
[FEAT_1_EDX
] =
1352 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1353 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1354 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1355 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1356 CPUID_DE
| CPUID_FP87
,
1357 .features
[FEAT_1_ECX
] =
1358 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1359 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1360 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1361 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1362 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1363 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1364 .features
[FEAT_8000_0001_EDX
] =
1365 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
1366 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1367 .features
[FEAT_8000_0001_ECX
] =
1368 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
1369 .features
[FEAT_7_0_EBX
] =
1370 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1371 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1372 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
1373 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
1374 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
| CPUID_7_0_EBX_CLWB
|
1375 CPUID_7_0_EBX_AVX512F
| CPUID_7_0_EBX_AVX512DQ
|
1376 CPUID_7_0_EBX_AVX512BW
| CPUID_7_0_EBX_AVX512CD
|
1377 CPUID_7_0_EBX_AVX512VL
,
1378 /* Missing: XSAVES (not supported by some Linux versions,
1379 * including v4.1 to v4.12).
1380 * KVM doesn't yet expose any XSAVES state save component,
1381 * and the only one defined in Skylake (processor tracing)
1382 * probably will block migration anyway.
1384 .features
[FEAT_XSAVE
] =
1385 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
1386 CPUID_XSAVE_XGETBV1
,
1387 .features
[FEAT_6_EAX
] =
1389 .xlevel
= 0x80000008,
1390 .model_id
= "Intel Xeon Processor (Skylake)",
1393 .name
= "Opteron_G1",
1395 .vendor
= CPUID_VENDOR_AMD
,
1399 .features
[FEAT_1_EDX
] =
1400 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1401 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1402 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1403 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1404 CPUID_DE
| CPUID_FP87
,
1405 .features
[FEAT_1_ECX
] =
1407 .features
[FEAT_8000_0001_EDX
] =
1408 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1409 .xlevel
= 0x80000008,
1410 .model_id
= "AMD Opteron 240 (Gen 1 Class Opteron)",
1413 .name
= "Opteron_G2",
1415 .vendor
= CPUID_VENDOR_AMD
,
1419 .features
[FEAT_1_EDX
] =
1420 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1421 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1422 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1423 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1424 CPUID_DE
| CPUID_FP87
,
1425 .features
[FEAT_1_ECX
] =
1426 CPUID_EXT_CX16
| CPUID_EXT_SSE3
,
1427 /* Missing: CPUID_EXT2_RDTSCP */
1428 .features
[FEAT_8000_0001_EDX
] =
1429 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1430 .features
[FEAT_8000_0001_ECX
] =
1431 CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
1432 .xlevel
= 0x80000008,
1433 .model_id
= "AMD Opteron 22xx (Gen 2 Class Opteron)",
1436 .name
= "Opteron_G3",
1438 .vendor
= CPUID_VENDOR_AMD
,
1442 .features
[FEAT_1_EDX
] =
1443 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1444 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1445 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1446 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1447 CPUID_DE
| CPUID_FP87
,
1448 .features
[FEAT_1_ECX
] =
1449 CPUID_EXT_POPCNT
| CPUID_EXT_CX16
| CPUID_EXT_MONITOR
|
1451 /* Missing: CPUID_EXT2_RDTSCP */
1452 .features
[FEAT_8000_0001_EDX
] =
1453 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1454 .features
[FEAT_8000_0001_ECX
] =
1455 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
|
1456 CPUID_EXT3_ABM
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
1457 .xlevel
= 0x80000008,
1458 .model_id
= "AMD Opteron 23xx (Gen 3 Class Opteron)",
1461 .name
= "Opteron_G4",
1463 .vendor
= CPUID_VENDOR_AMD
,
1467 .features
[FEAT_1_EDX
] =
1468 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1469 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1470 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1471 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1472 CPUID_DE
| CPUID_FP87
,
1473 .features
[FEAT_1_ECX
] =
1474 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1475 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1476 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1478 /* Missing: CPUID_EXT2_RDTSCP */
1479 .features
[FEAT_8000_0001_EDX
] =
1480 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_NX
|
1482 .features
[FEAT_8000_0001_ECX
] =
1483 CPUID_EXT3_FMA4
| CPUID_EXT3_XOP
|
1484 CPUID_EXT3_3DNOWPREFETCH
| CPUID_EXT3_MISALIGNSSE
|
1485 CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
| CPUID_EXT3_SVM
|
1488 .xlevel
= 0x8000001A,
1489 .model_id
= "AMD Opteron 62xx class CPU",
1492 .name
= "Opteron_G5",
1494 .vendor
= CPUID_VENDOR_AMD
,
1498 .features
[FEAT_1_EDX
] =
1499 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1500 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1501 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1502 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1503 CPUID_DE
| CPUID_FP87
,
1504 .features
[FEAT_1_ECX
] =
1505 CPUID_EXT_F16C
| CPUID_EXT_AVX
| CPUID_EXT_XSAVE
|
1506 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
1507 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_FMA
|
1508 CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
1509 /* Missing: CPUID_EXT2_RDTSCP */
1510 .features
[FEAT_8000_0001_EDX
] =
1511 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_NX
|
1513 .features
[FEAT_8000_0001_ECX
] =
1514 CPUID_EXT3_TBM
| CPUID_EXT3_FMA4
| CPUID_EXT3_XOP
|
1515 CPUID_EXT3_3DNOWPREFETCH
| CPUID_EXT3_MISALIGNSSE
|
1516 CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
| CPUID_EXT3_SVM
|
1519 .xlevel
= 0x8000001A,
1520 .model_id
= "AMD Opteron 63xx class CPU",
1525 .vendor
= CPUID_VENDOR_AMD
,
1529 .features
[FEAT_1_EDX
] =
1530 CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
| CPUID_CLFLUSH
|
1531 CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
| CPUID_PGE
|
1532 CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
| CPUID_MCE
|
1533 CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
| CPUID_DE
|
1534 CPUID_VME
| CPUID_FP87
,
1535 .features
[FEAT_1_ECX
] =
1536 CPUID_EXT_RDRAND
| CPUID_EXT_F16C
| CPUID_EXT_AVX
|
1537 CPUID_EXT_XSAVE
| CPUID_EXT_AES
| CPUID_EXT_POPCNT
|
1538 CPUID_EXT_MOVBE
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1539 CPUID_EXT_CX16
| CPUID_EXT_FMA
| CPUID_EXT_SSSE3
|
1540 CPUID_EXT_MONITOR
| CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
1541 .features
[FEAT_8000_0001_EDX
] =
1542 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_PDPE1GB
|
1543 CPUID_EXT2_FFXSR
| CPUID_EXT2_MMXEXT
| CPUID_EXT2_NX
|
1545 .features
[FEAT_8000_0001_ECX
] =
1546 CPUID_EXT3_OSVW
| CPUID_EXT3_3DNOWPREFETCH
|
1547 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
|
1548 CPUID_EXT3_CR8LEG
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
1549 .features
[FEAT_7_0_EBX
] =
1550 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
| CPUID_7_0_EBX_AVX2
|
1551 CPUID_7_0_EBX_SMEP
| CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_RDSEED
|
1552 CPUID_7_0_EBX_ADX
| CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_CLFLUSHOPT
|
1553 CPUID_7_0_EBX_SHA_NI
,
1554 /* Missing: XSAVES (not supported by some Linux versions,
1555 * including v4.1 to v4.12).
1556 * KVM doesn't yet expose any XSAVES state save component.
1558 .features
[FEAT_XSAVE
] =
1559 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
1560 CPUID_XSAVE_XGETBV1
,
1561 .features
[FEAT_6_EAX
] =
1563 .xlevel
= 0x8000000A,
1564 .model_id
= "AMD EPYC Processor",
1568 typedef struct PropValue
{
1569 const char *prop
, *value
;
1572 /* KVM-specific features that are automatically added/removed
1573 * from all CPU models when KVM is enabled.
1575 static PropValue kvm_default_props
[] = {
1576 { "kvmclock", "on" },
1577 { "kvm-nopiodelay", "on" },
1578 { "kvm-asyncpf", "on" },
1579 { "kvm-steal-time", "on" },
1580 { "kvm-pv-eoi", "on" },
1581 { "kvmclock-stable-bit", "on" },
1584 { "monitor", "off" },
1589 /* TCG-specific defaults that override all CPU models when using TCG
1591 static PropValue tcg_default_props
[] = {
1597 void x86_cpu_change_kvm_default(const char *prop
, const char *value
)
1600 for (pv
= kvm_default_props
; pv
->prop
; pv
++) {
1601 if (!strcmp(pv
->prop
, prop
)) {
1607 /* It is valid to call this function only for properties that
1608 * are already present in the kvm_default_props table.
1613 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w
,
1614 bool migratable_only
);
1616 static bool lmce_supported(void)
1618 uint64_t mce_cap
= 0;
1621 if (kvm_ioctl(kvm_state
, KVM_X86_GET_MCE_CAP_SUPPORTED
, &mce_cap
) < 0) {
1626 return !!(mce_cap
& MCG_LMCE_P
);
1629 #define CPUID_MODEL_ID_SZ 48
1632 * cpu_x86_fill_model_id:
1633 * Get CPUID model ID string from host CPU.
1635 * @str should have at least CPUID_MODEL_ID_SZ bytes
1637 * The function does NOT add a null terminator to the string
1640 static int cpu_x86_fill_model_id(char *str
)
1642 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
1645 for (i
= 0; i
< 3; i
++) {
1646 host_cpuid(0x80000002 + i
, 0, &eax
, &ebx
, &ecx
, &edx
);
1647 memcpy(str
+ i
* 16 + 0, &eax
, 4);
1648 memcpy(str
+ i
* 16 + 4, &ebx
, 4);
1649 memcpy(str
+ i
* 16 + 8, &ecx
, 4);
1650 memcpy(str
+ i
* 16 + 12, &edx
, 4);
1655 static Property max_x86_cpu_properties
[] = {
1656 DEFINE_PROP_BOOL("migratable", X86CPU
, migratable
, true),
1657 DEFINE_PROP_BOOL("host-cache-info", X86CPU
, cache_info_passthrough
, false),
1658 DEFINE_PROP_END_OF_LIST()
1661 static void max_x86_cpu_class_init(ObjectClass
*oc
, void *data
)
1663 DeviceClass
*dc
= DEVICE_CLASS(oc
);
1664 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
1668 xcc
->model_description
=
1669 "Enables all features supported by the accelerator in the current host";
1671 dc
->props
= max_x86_cpu_properties
;
1674 static void x86_cpu_load_def(X86CPU
*cpu
, X86CPUDefinition
*def
, Error
**errp
);
1676 static void max_x86_cpu_initfn(Object
*obj
)
1678 X86CPU
*cpu
= X86_CPU(obj
);
1679 CPUX86State
*env
= &cpu
->env
;
1680 KVMState
*s
= kvm_state
;
1682 /* We can't fill the features array here because we don't know yet if
1683 * "migratable" is true or false.
1685 cpu
->max_features
= true;
1687 if (kvm_enabled()) {
1688 char vendor
[CPUID_VENDOR_SZ
+ 1] = { 0 };
1689 char model_id
[CPUID_MODEL_ID_SZ
+ 1] = { 0 };
1690 int family
, model
, stepping
;
1692 host_vendor_fms(vendor
, &family
, &model
, &stepping
);
1694 cpu_x86_fill_model_id(model_id
);
1696 object_property_set_str(OBJECT(cpu
), vendor
, "vendor", &error_abort
);
1697 object_property_set_int(OBJECT(cpu
), family
, "family", &error_abort
);
1698 object_property_set_int(OBJECT(cpu
), model
, "model", &error_abort
);
1699 object_property_set_int(OBJECT(cpu
), stepping
, "stepping",
1701 object_property_set_str(OBJECT(cpu
), model_id
, "model-id",
1704 env
->cpuid_min_level
=
1705 kvm_arch_get_supported_cpuid(s
, 0x0, 0, R_EAX
);
1706 env
->cpuid_min_xlevel
=
1707 kvm_arch_get_supported_cpuid(s
, 0x80000000, 0, R_EAX
);
1708 env
->cpuid_min_xlevel2
=
1709 kvm_arch_get_supported_cpuid(s
, 0xC0000000, 0, R_EAX
);
1711 if (lmce_supported()) {
1712 object_property_set_bool(OBJECT(cpu
), true, "lmce", &error_abort
);
1715 object_property_set_str(OBJECT(cpu
), CPUID_VENDOR_AMD
,
1716 "vendor", &error_abort
);
1717 object_property_set_int(OBJECT(cpu
), 6, "family", &error_abort
);
1718 object_property_set_int(OBJECT(cpu
), 6, "model", &error_abort
);
1719 object_property_set_int(OBJECT(cpu
), 3, "stepping", &error_abort
);
1720 object_property_set_str(OBJECT(cpu
),
1721 "QEMU TCG CPU version " QEMU_HW_VERSION
,
1722 "model-id", &error_abort
);
1725 object_property_set_bool(OBJECT(cpu
), true, "pmu", &error_abort
);
1728 static const TypeInfo max_x86_cpu_type_info
= {
1729 .name
= X86_CPU_TYPE_NAME("max"),
1730 .parent
= TYPE_X86_CPU
,
1731 .instance_init
= max_x86_cpu_initfn
,
1732 .class_init
= max_x86_cpu_class_init
,
1737 static void host_x86_cpu_class_init(ObjectClass
*oc
, void *data
)
1739 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
1741 xcc
->kvm_required
= true;
1744 xcc
->model_description
=
1745 "KVM processor with all supported host features "
1746 "(only available in KVM mode)";
1749 static const TypeInfo host_x86_cpu_type_info
= {
1750 .name
= X86_CPU_TYPE_NAME("host"),
1751 .parent
= X86_CPU_TYPE_NAME("max"),
1752 .class_init
= host_x86_cpu_class_init
,
1757 static void report_unavailable_features(FeatureWord w
, uint32_t mask
)
1759 FeatureWordInfo
*f
= &feature_word_info
[w
];
1762 for (i
= 0; i
< 32; ++i
) {
1763 if ((1UL << i
) & mask
) {
1764 const char *reg
= get_register_name_32(f
->cpuid_reg
);
1766 warn_report("%s doesn't support requested feature: "
1767 "CPUID.%02XH:%s%s%s [bit %d]",
1768 kvm_enabled() ? "host" : "TCG",
1770 f
->feat_names
[i
] ? "." : "",
1771 f
->feat_names
[i
] ? f
->feat_names
[i
] : "", i
);
1776 static void x86_cpuid_version_get_family(Object
*obj
, Visitor
*v
,
1777 const char *name
, void *opaque
,
1780 X86CPU
*cpu
= X86_CPU(obj
);
1781 CPUX86State
*env
= &cpu
->env
;
1784 value
= (env
->cpuid_version
>> 8) & 0xf;
1786 value
+= (env
->cpuid_version
>> 20) & 0xff;
1788 visit_type_int(v
, name
, &value
, errp
);
1791 static void x86_cpuid_version_set_family(Object
*obj
, Visitor
*v
,
1792 const char *name
, void *opaque
,
1795 X86CPU
*cpu
= X86_CPU(obj
);
1796 CPUX86State
*env
= &cpu
->env
;
1797 const int64_t min
= 0;
1798 const int64_t max
= 0xff + 0xf;
1799 Error
*local_err
= NULL
;
1802 visit_type_int(v
, name
, &value
, &local_err
);
1804 error_propagate(errp
, local_err
);
1807 if (value
< min
|| value
> max
) {
1808 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
1809 name
? name
: "null", value
, min
, max
);
1813 env
->cpuid_version
&= ~0xff00f00;
1815 env
->cpuid_version
|= 0xf00 | ((value
- 0x0f) << 20);
1817 env
->cpuid_version
|= value
<< 8;
1821 static void x86_cpuid_version_get_model(Object
*obj
, Visitor
*v
,
1822 const char *name
, void *opaque
,
1825 X86CPU
*cpu
= X86_CPU(obj
);
1826 CPUX86State
*env
= &cpu
->env
;
1829 value
= (env
->cpuid_version
>> 4) & 0xf;
1830 value
|= ((env
->cpuid_version
>> 16) & 0xf) << 4;
1831 visit_type_int(v
, name
, &value
, errp
);
1834 static void x86_cpuid_version_set_model(Object
*obj
, Visitor
*v
,
1835 const char *name
, void *opaque
,
1838 X86CPU
*cpu
= X86_CPU(obj
);
1839 CPUX86State
*env
= &cpu
->env
;
1840 const int64_t min
= 0;
1841 const int64_t max
= 0xff;
1842 Error
*local_err
= NULL
;
1845 visit_type_int(v
, name
, &value
, &local_err
);
1847 error_propagate(errp
, local_err
);
1850 if (value
< min
|| value
> max
) {
1851 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
1852 name
? name
: "null", value
, min
, max
);
1856 env
->cpuid_version
&= ~0xf00f0;
1857 env
->cpuid_version
|= ((value
& 0xf) << 4) | ((value
>> 4) << 16);
1860 static void x86_cpuid_version_get_stepping(Object
*obj
, Visitor
*v
,
1861 const char *name
, void *opaque
,
1864 X86CPU
*cpu
= X86_CPU(obj
);
1865 CPUX86State
*env
= &cpu
->env
;
1868 value
= env
->cpuid_version
& 0xf;
1869 visit_type_int(v
, name
, &value
, errp
);
1872 static void x86_cpuid_version_set_stepping(Object
*obj
, Visitor
*v
,
1873 const char *name
, void *opaque
,
1876 X86CPU
*cpu
= X86_CPU(obj
);
1877 CPUX86State
*env
= &cpu
->env
;
1878 const int64_t min
= 0;
1879 const int64_t max
= 0xf;
1880 Error
*local_err
= NULL
;
1883 visit_type_int(v
, name
, &value
, &local_err
);
1885 error_propagate(errp
, local_err
);
1888 if (value
< min
|| value
> max
) {
1889 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
1890 name
? name
: "null", value
, min
, max
);
1894 env
->cpuid_version
&= ~0xf;
1895 env
->cpuid_version
|= value
& 0xf;
1898 static char *x86_cpuid_get_vendor(Object
*obj
, Error
**errp
)
1900 X86CPU
*cpu
= X86_CPU(obj
);
1901 CPUX86State
*env
= &cpu
->env
;
1904 value
= g_malloc(CPUID_VENDOR_SZ
+ 1);
1905 x86_cpu_vendor_words2str(value
, env
->cpuid_vendor1
, env
->cpuid_vendor2
,
1906 env
->cpuid_vendor3
);
1910 static void x86_cpuid_set_vendor(Object
*obj
, const char *value
,
1913 X86CPU
*cpu
= X86_CPU(obj
);
1914 CPUX86State
*env
= &cpu
->env
;
1917 if (strlen(value
) != CPUID_VENDOR_SZ
) {
1918 error_setg(errp
, QERR_PROPERTY_VALUE_BAD
, "", "vendor", value
);
1922 env
->cpuid_vendor1
= 0;
1923 env
->cpuid_vendor2
= 0;
1924 env
->cpuid_vendor3
= 0;
1925 for (i
= 0; i
< 4; i
++) {
1926 env
->cpuid_vendor1
|= ((uint8_t)value
[i
]) << (8 * i
);
1927 env
->cpuid_vendor2
|= ((uint8_t)value
[i
+ 4]) << (8 * i
);
1928 env
->cpuid_vendor3
|= ((uint8_t)value
[i
+ 8]) << (8 * i
);
1932 static char *x86_cpuid_get_model_id(Object
*obj
, Error
**errp
)
1934 X86CPU
*cpu
= X86_CPU(obj
);
1935 CPUX86State
*env
= &cpu
->env
;
1939 value
= g_malloc(48 + 1);
1940 for (i
= 0; i
< 48; i
++) {
1941 value
[i
] = env
->cpuid_model
[i
>> 2] >> (8 * (i
& 3));
1947 static void x86_cpuid_set_model_id(Object
*obj
, const char *model_id
,
1950 X86CPU
*cpu
= X86_CPU(obj
);
1951 CPUX86State
*env
= &cpu
->env
;
1954 if (model_id
== NULL
) {
1957 len
= strlen(model_id
);
1958 memset(env
->cpuid_model
, 0, 48);
1959 for (i
= 0; i
< 48; i
++) {
1963 c
= (uint8_t)model_id
[i
];
1965 env
->cpuid_model
[i
>> 2] |= c
<< (8 * (i
& 3));
1969 static void x86_cpuid_get_tsc_freq(Object
*obj
, Visitor
*v
, const char *name
,
1970 void *opaque
, Error
**errp
)
1972 X86CPU
*cpu
= X86_CPU(obj
);
1975 value
= cpu
->env
.tsc_khz
* 1000;
1976 visit_type_int(v
, name
, &value
, errp
);
1979 static void x86_cpuid_set_tsc_freq(Object
*obj
, Visitor
*v
, const char *name
,
1980 void *opaque
, Error
**errp
)
1982 X86CPU
*cpu
= X86_CPU(obj
);
1983 const int64_t min
= 0;
1984 const int64_t max
= INT64_MAX
;
1985 Error
*local_err
= NULL
;
1988 visit_type_int(v
, name
, &value
, &local_err
);
1990 error_propagate(errp
, local_err
);
1993 if (value
< min
|| value
> max
) {
1994 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
1995 name
? name
: "null", value
, min
, max
);
1999 cpu
->env
.tsc_khz
= cpu
->env
.user_tsc_khz
= value
/ 1000;
2002 /* Generic getter for "feature-words" and "filtered-features" properties */
2003 static void x86_cpu_get_feature_words(Object
*obj
, Visitor
*v
,
2004 const char *name
, void *opaque
,
2007 uint32_t *array
= (uint32_t *)opaque
;
2009 X86CPUFeatureWordInfo word_infos
[FEATURE_WORDS
] = { };
2010 X86CPUFeatureWordInfoList list_entries
[FEATURE_WORDS
] = { };
2011 X86CPUFeatureWordInfoList
*list
= NULL
;
2013 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
2014 FeatureWordInfo
*wi
= &feature_word_info
[w
];
2015 X86CPUFeatureWordInfo
*qwi
= &word_infos
[w
];
2016 qwi
->cpuid_input_eax
= wi
->cpuid_eax
;
2017 qwi
->has_cpuid_input_ecx
= wi
->cpuid_needs_ecx
;
2018 qwi
->cpuid_input_ecx
= wi
->cpuid_ecx
;
2019 qwi
->cpuid_register
= x86_reg_info_32
[wi
->cpuid_reg
].qapi_enum
;
2020 qwi
->features
= array
[w
];
2022 /* List will be in reverse order, but order shouldn't matter */
2023 list_entries
[w
].next
= list
;
2024 list_entries
[w
].value
= &word_infos
[w
];
2025 list
= &list_entries
[w
];
2028 visit_type_X86CPUFeatureWordInfoList(v
, "feature-words", &list
, errp
);
2031 static void x86_get_hv_spinlocks(Object
*obj
, Visitor
*v
, const char *name
,
2032 void *opaque
, Error
**errp
)
2034 X86CPU
*cpu
= X86_CPU(obj
);
2035 int64_t value
= cpu
->hyperv_spinlock_attempts
;
2037 visit_type_int(v
, name
, &value
, errp
);
2040 static void x86_set_hv_spinlocks(Object
*obj
, Visitor
*v
, const char *name
,
2041 void *opaque
, Error
**errp
)
2043 const int64_t min
= 0xFFF;
2044 const int64_t max
= UINT_MAX
;
2045 X86CPU
*cpu
= X86_CPU(obj
);
2049 visit_type_int(v
, name
, &value
, &err
);
2051 error_propagate(errp
, err
);
2055 if (value
< min
|| value
> max
) {
2056 error_setg(errp
, "Property %s.%s doesn't take value %" PRId64
2057 " (minimum: %" PRId64
", maximum: %" PRId64
")",
2058 object_get_typename(obj
), name
? name
: "null",
2062 cpu
->hyperv_spinlock_attempts
= value
;
2065 static const PropertyInfo qdev_prop_spinlocks
= {
2067 .get
= x86_get_hv_spinlocks
,
2068 .set
= x86_set_hv_spinlocks
,
2071 /* Convert all '_' in a feature string option name to '-', to make feature
2072 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2074 static inline void feat2prop(char *s
)
2076 while ((s
= strchr(s
, '_'))) {
2081 /* Return the feature property name for a feature flag bit */
2082 static const char *x86_cpu_feature_name(FeatureWord w
, int bitnr
)
2084 /* XSAVE components are automatically enabled by other features,
2085 * so return the original feature name instead
2087 if (w
== FEAT_XSAVE_COMP_LO
|| w
== FEAT_XSAVE_COMP_HI
) {
2088 int comp
= (w
== FEAT_XSAVE_COMP_HI
) ? bitnr
+ 32 : bitnr
;
2090 if (comp
< ARRAY_SIZE(x86_ext_save_areas
) &&
2091 x86_ext_save_areas
[comp
].bits
) {
2092 w
= x86_ext_save_areas
[comp
].feature
;
2093 bitnr
= ctz32(x86_ext_save_areas
[comp
].bits
);
2098 assert(w
< FEATURE_WORDS
);
2099 return feature_word_info
[w
].feat_names
[bitnr
];
2102 /* Compatibily hack to maintain legacy +-feat semantic,
2103 * where +-feat overwrites any feature set by
2104 * feat=on|feat even if the later is parsed after +-feat
2105 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2107 static GList
*plus_features
, *minus_features
;
2109 static gint
compare_string(gconstpointer a
, gconstpointer b
)
2111 return g_strcmp0(a
, b
);
2114 /* Parse "+feature,-feature,feature=foo" CPU feature string
2116 static void x86_cpu_parse_featurestr(const char *typename
, char *features
,
2119 char *featurestr
; /* Single 'key=value" string being parsed */
2120 static bool cpu_globals_initialized
;
2121 bool ambiguous
= false;
2123 if (cpu_globals_initialized
) {
2126 cpu_globals_initialized
= true;
2132 for (featurestr
= strtok(features
, ",");
2134 featurestr
= strtok(NULL
, ",")) {
2136 const char *val
= NULL
;
2139 GlobalProperty
*prop
;
2141 /* Compatibility syntax: */
2142 if (featurestr
[0] == '+') {
2143 plus_features
= g_list_append(plus_features
,
2144 g_strdup(featurestr
+ 1));
2146 } else if (featurestr
[0] == '-') {
2147 minus_features
= g_list_append(minus_features
,
2148 g_strdup(featurestr
+ 1));
2152 eq
= strchr(featurestr
, '=');
2160 feat2prop(featurestr
);
2163 if (g_list_find_custom(plus_features
, name
, compare_string
)) {
2164 warn_report("Ambiguous CPU model string. "
2165 "Don't mix both \"+%s\" and \"%s=%s\"",
2169 if (g_list_find_custom(minus_features
, name
, compare_string
)) {
2170 warn_report("Ambiguous CPU model string. "
2171 "Don't mix both \"-%s\" and \"%s=%s\"",
2177 if (!strcmp(name
, "tsc-freq")) {
2181 ret
= qemu_strtosz_metric(val
, NULL
, &tsc_freq
);
2182 if (ret
< 0 || tsc_freq
> INT64_MAX
) {
2183 error_setg(errp
, "bad numerical value %s", val
);
2186 snprintf(num
, sizeof(num
), "%" PRId64
, tsc_freq
);
2188 name
= "tsc-frequency";
2191 prop
= g_new0(typeof(*prop
), 1);
2192 prop
->driver
= typename
;
2193 prop
->property
= g_strdup(name
);
2194 prop
->value
= g_strdup(val
);
2195 prop
->errp
= &error_fatal
;
2196 qdev_prop_register_global(prop
);
2200 warn_report("Compatibility of ambiguous CPU model "
2201 "strings won't be kept on future QEMU versions");
2205 static void x86_cpu_expand_features(X86CPU
*cpu
, Error
**errp
);
2206 static int x86_cpu_filter_features(X86CPU
*cpu
);
2208 /* Check for missing features that may prevent the CPU class from
2209 * running using the current machine and accelerator.
2211 static void x86_cpu_class_check_missing_features(X86CPUClass
*xcc
,
2212 strList
**missing_feats
)
2217 strList
**next
= missing_feats
;
2219 if (xcc
->kvm_required
&& !kvm_enabled()) {
2220 strList
*new = g_new0(strList
, 1);
2221 new->value
= g_strdup("kvm");;
2222 *missing_feats
= new;
2226 xc
= X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc
))));
2228 x86_cpu_expand_features(xc
, &err
);
2230 /* Errors at x86_cpu_expand_features should never happen,
2231 * but in case it does, just report the model as not
2232 * runnable at all using the "type" property.
2234 strList
*new = g_new0(strList
, 1);
2235 new->value
= g_strdup("type");
2240 x86_cpu_filter_features(xc
);
2242 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
2243 uint32_t filtered
= xc
->filtered_features
[w
];
2245 for (i
= 0; i
< 32; i
++) {
2246 if (filtered
& (1UL << i
)) {
2247 strList
*new = g_new0(strList
, 1);
2248 new->value
= g_strdup(x86_cpu_feature_name(w
, i
));
2255 object_unref(OBJECT(xc
));
2258 /* Print all cpuid feature names in featureset
2260 static void listflags(FILE *f
, fprintf_function print
, const char **featureset
)
2265 for (bit
= 0; bit
< 32; bit
++) {
2266 if (featureset
[bit
]) {
2267 print(f
, "%s%s", first
? "" : " ", featureset
[bit
]);
2273 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2274 static gint
x86_cpu_list_compare(gconstpointer a
, gconstpointer b
)
2276 ObjectClass
*class_a
= (ObjectClass
*)a
;
2277 ObjectClass
*class_b
= (ObjectClass
*)b
;
2278 X86CPUClass
*cc_a
= X86_CPU_CLASS(class_a
);
2279 X86CPUClass
*cc_b
= X86_CPU_CLASS(class_b
);
2280 const char *name_a
, *name_b
;
2282 if (cc_a
->ordering
!= cc_b
->ordering
) {
2283 return cc_a
->ordering
- cc_b
->ordering
;
2285 name_a
= object_class_get_name(class_a
);
2286 name_b
= object_class_get_name(class_b
);
2287 return strcmp(name_a
, name_b
);
2291 static GSList
*get_sorted_cpu_model_list(void)
2293 GSList
*list
= object_class_get_list(TYPE_X86_CPU
, false);
2294 list
= g_slist_sort(list
, x86_cpu_list_compare
);
2298 static void x86_cpu_list_entry(gpointer data
, gpointer user_data
)
2300 ObjectClass
*oc
= data
;
2301 X86CPUClass
*cc
= X86_CPU_CLASS(oc
);
2302 CPUListState
*s
= user_data
;
2303 char *name
= x86_cpu_class_get_model_name(cc
);
2304 const char *desc
= cc
->model_description
;
2305 if (!desc
&& cc
->cpu_def
) {
2306 desc
= cc
->cpu_def
->model_id
;
2309 (*s
->cpu_fprintf
)(s
->file
, "x86 %16s %-48s\n",
2314 /* list available CPU models and flags */
2315 void x86_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
2320 .cpu_fprintf
= cpu_fprintf
,
2324 (*cpu_fprintf
)(f
, "Available CPUs:\n");
2325 list
= get_sorted_cpu_model_list();
2326 g_slist_foreach(list
, x86_cpu_list_entry
, &s
);
2329 (*cpu_fprintf
)(f
, "\nRecognized CPUID flags:\n");
2330 for (i
= 0; i
< ARRAY_SIZE(feature_word_info
); i
++) {
2331 FeatureWordInfo
*fw
= &feature_word_info
[i
];
2333 (*cpu_fprintf
)(f
, " ");
2334 listflags(f
, cpu_fprintf
, fw
->feat_names
);
2335 (*cpu_fprintf
)(f
, "\n");
2339 static void x86_cpu_definition_entry(gpointer data
, gpointer user_data
)
2341 ObjectClass
*oc
= data
;
2342 X86CPUClass
*cc
= X86_CPU_CLASS(oc
);
2343 CpuDefinitionInfoList
**cpu_list
= user_data
;
2344 CpuDefinitionInfoList
*entry
;
2345 CpuDefinitionInfo
*info
;
2347 info
= g_malloc0(sizeof(*info
));
2348 info
->name
= x86_cpu_class_get_model_name(cc
);
2349 x86_cpu_class_check_missing_features(cc
, &info
->unavailable_features
);
2350 info
->has_unavailable_features
= true;
2351 info
->q_typename
= g_strdup(object_class_get_name(oc
));
2352 info
->migration_safe
= cc
->migration_safe
;
2353 info
->has_migration_safe
= true;
2354 info
->q_static
= cc
->static_model
;
2356 entry
= g_malloc0(sizeof(*entry
));
2357 entry
->value
= info
;
2358 entry
->next
= *cpu_list
;
2362 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
2364 CpuDefinitionInfoList
*cpu_list
= NULL
;
2365 GSList
*list
= get_sorted_cpu_model_list();
2366 g_slist_foreach(list
, x86_cpu_definition_entry
, &cpu_list
);
2371 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w
,
2372 bool migratable_only
)
2374 FeatureWordInfo
*wi
= &feature_word_info
[w
];
2377 if (kvm_enabled()) {
2378 r
= kvm_arch_get_supported_cpuid(kvm_state
, wi
->cpuid_eax
,
2381 } else if (tcg_enabled()) {
2382 r
= wi
->tcg_features
;
2386 if (migratable_only
) {
2387 r
&= x86_cpu_get_migratable_flags(w
);
2392 static void x86_cpu_report_filtered_features(X86CPU
*cpu
)
2396 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
2397 report_unavailable_features(w
, cpu
->filtered_features
[w
]);
2401 static void x86_cpu_apply_props(X86CPU
*cpu
, PropValue
*props
)
2404 for (pv
= props
; pv
->prop
; pv
++) {
2408 object_property_parse(OBJECT(cpu
), pv
->value
, pv
->prop
,
2413 /* Load data from X86CPUDefinition into a X86CPU object
2415 static void x86_cpu_load_def(X86CPU
*cpu
, X86CPUDefinition
*def
, Error
**errp
)
2417 CPUX86State
*env
= &cpu
->env
;
2419 char host_vendor
[CPUID_VENDOR_SZ
+ 1];
2422 /*NOTE: any property set by this function should be returned by
2423 * x86_cpu_static_props(), so static expansion of
2424 * query-cpu-model-expansion is always complete.
2427 /* CPU models only set _minimum_ values for level/xlevel: */
2428 object_property_set_uint(OBJECT(cpu
), def
->level
, "min-level", errp
);
2429 object_property_set_uint(OBJECT(cpu
), def
->xlevel
, "min-xlevel", errp
);
2431 object_property_set_int(OBJECT(cpu
), def
->family
, "family", errp
);
2432 object_property_set_int(OBJECT(cpu
), def
->model
, "model", errp
);
2433 object_property_set_int(OBJECT(cpu
), def
->stepping
, "stepping", errp
);
2434 object_property_set_str(OBJECT(cpu
), def
->model_id
, "model-id", errp
);
2435 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
2436 env
->features
[w
] = def
->features
[w
];
2439 /* Special cases not set in the X86CPUDefinition structs: */
2440 if (kvm_enabled()) {
2441 if (!kvm_irqchip_in_kernel()) {
2442 x86_cpu_change_kvm_default("x2apic", "off");
2445 x86_cpu_apply_props(cpu
, kvm_default_props
);
2446 } else if (tcg_enabled()) {
2447 x86_cpu_apply_props(cpu
, tcg_default_props
);
2450 env
->features
[FEAT_1_ECX
] |= CPUID_EXT_HYPERVISOR
;
2452 /* sysenter isn't supported in compatibility mode on AMD,
2453 * syscall isn't supported in compatibility mode on Intel.
2454 * Normally we advertise the actual CPU vendor, but you can
2455 * override this using the 'vendor' property if you want to use
2456 * KVM's sysenter/syscall emulation in compatibility mode and
2457 * when doing cross vendor migration
2459 vendor
= def
->vendor
;
2460 if (kvm_enabled()) {
2461 uint32_t ebx
= 0, ecx
= 0, edx
= 0;
2462 host_cpuid(0, 0, NULL
, &ebx
, &ecx
, &edx
);
2463 x86_cpu_vendor_words2str(host_vendor
, ebx
, edx
, ecx
);
2464 vendor
= host_vendor
;
2467 object_property_set_str(OBJECT(cpu
), vendor
, "vendor", errp
);
2471 /* Return a QDict containing keys for all properties that can be included
2472 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2473 * must be included in the dictionary.
2475 static QDict
*x86_cpu_static_props(void)
2479 static const char *props
[] = {
2497 for (i
= 0; props
[i
]; i
++) {
2498 qdict_put_null(d
, props
[i
]);
2501 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
2502 FeatureWordInfo
*fi
= &feature_word_info
[w
];
2504 for (bit
= 0; bit
< 32; bit
++) {
2505 if (!fi
->feat_names
[bit
]) {
2508 qdict_put_null(d
, fi
->feat_names
[bit
]);
2515 /* Add an entry to @props dict, with the value for property. */
2516 static void x86_cpu_expand_prop(X86CPU
*cpu
, QDict
*props
, const char *prop
)
2518 QObject
*value
= object_property_get_qobject(OBJECT(cpu
), prop
,
2521 qdict_put_obj(props
, prop
, value
);
2524 /* Convert CPU model data from X86CPU object to a property dictionary
2525 * that can recreate exactly the same CPU model.
2527 static void x86_cpu_to_dict(X86CPU
*cpu
, QDict
*props
)
2529 QDict
*sprops
= x86_cpu_static_props();
2530 const QDictEntry
*e
;
2532 for (e
= qdict_first(sprops
); e
; e
= qdict_next(sprops
, e
)) {
2533 const char *prop
= qdict_entry_key(e
);
2534 x86_cpu_expand_prop(cpu
, props
, prop
);
2538 /* Convert CPU model data from X86CPU object to a property dictionary
2539 * that can recreate exactly the same CPU model, including every
2540 * writeable QOM property.
2542 static void x86_cpu_to_dict_full(X86CPU
*cpu
, QDict
*props
)
2544 ObjectPropertyIterator iter
;
2545 ObjectProperty
*prop
;
2547 object_property_iter_init(&iter
, OBJECT(cpu
));
2548 while ((prop
= object_property_iter_next(&iter
))) {
2549 /* skip read-only or write-only properties */
2550 if (!prop
->get
|| !prop
->set
) {
2554 /* "hotplugged" is the only property that is configurable
2555 * on the command-line but will be set differently on CPUs
2556 * created using "-cpu ... -smp ..." and by CPUs created
2557 * on the fly by x86_cpu_from_model() for querying. Skip it.
2559 if (!strcmp(prop
->name
, "hotplugged")) {
2562 x86_cpu_expand_prop(cpu
, props
, prop
->name
);
2566 static void object_apply_props(Object
*obj
, QDict
*props
, Error
**errp
)
2568 const QDictEntry
*prop
;
2571 for (prop
= qdict_first(props
); prop
; prop
= qdict_next(props
, prop
)) {
2572 object_property_set_qobject(obj
, qdict_entry_value(prop
),
2573 qdict_entry_key(prop
), &err
);
2579 error_propagate(errp
, err
);
2582 /* Create X86CPU object according to model+props specification */
2583 static X86CPU
*x86_cpu_from_model(const char *model
, QDict
*props
, Error
**errp
)
2589 xcc
= X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU
, model
));
2591 error_setg(&err
, "CPU model '%s' not found", model
);
2595 xc
= X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc
))));
2597 object_apply_props(OBJECT(xc
), props
, &err
);
2603 x86_cpu_expand_features(xc
, &err
);
2610 error_propagate(errp
, err
);
2611 object_unref(OBJECT(xc
));
2617 CpuModelExpansionInfo
*
2618 arch_query_cpu_model_expansion(CpuModelExpansionType type
,
2619 CpuModelInfo
*model
,
2624 CpuModelExpansionInfo
*ret
= g_new0(CpuModelExpansionInfo
, 1);
2625 QDict
*props
= NULL
;
2626 const char *base_name
;
2628 xc
= x86_cpu_from_model(model
->name
,
2630 qobject_to_qdict(model
->props
) :
2636 props
= qdict_new();
2639 case CPU_MODEL_EXPANSION_TYPE_STATIC
:
2640 /* Static expansion will be based on "base" only */
2642 x86_cpu_to_dict(xc
, props
);
2644 case CPU_MODEL_EXPANSION_TYPE_FULL
:
2645 /* As we don't return every single property, full expansion needs
2646 * to keep the original model name+props, and add extra
2647 * properties on top of that.
2649 base_name
= model
->name
;
2650 x86_cpu_to_dict_full(xc
, props
);
2653 error_setg(&err
, "Unsupportted expansion type");
2658 props
= qdict_new();
2660 x86_cpu_to_dict(xc
, props
);
2662 ret
->model
= g_new0(CpuModelInfo
, 1);
2663 ret
->model
->name
= g_strdup(base_name
);
2664 ret
->model
->props
= QOBJECT(props
);
2665 ret
->model
->has_props
= true;
2668 object_unref(OBJECT(xc
));
2670 error_propagate(errp
, err
);
2671 qapi_free_CpuModelExpansionInfo(ret
);
2677 static gchar
*x86_gdb_arch_name(CPUState
*cs
)
2679 #ifdef TARGET_X86_64
2680 return g_strdup("i386:x86-64");
2682 return g_strdup("i386");
2686 static void x86_cpu_cpudef_class_init(ObjectClass
*oc
, void *data
)
2688 X86CPUDefinition
*cpudef
= data
;
2689 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
2691 xcc
->cpu_def
= cpudef
;
2692 xcc
->migration_safe
= true;
2695 static void x86_register_cpudef_type(X86CPUDefinition
*def
)
2697 char *typename
= x86_cpu_type_name(def
->name
);
2700 .parent
= TYPE_X86_CPU
,
2701 .class_init
= x86_cpu_cpudef_class_init
,
2705 /* AMD aliases are handled at runtime based on CPUID vendor, so
2706 * they shouldn't be set on the CPU model table.
2708 assert(!(def
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_AMD_ALIASES
));
2714 #if !defined(CONFIG_USER_ONLY)
2716 void cpu_clear_apic_feature(CPUX86State
*env
)
2718 env
->features
[FEAT_1_EDX
] &= ~CPUID_APIC
;
2721 #endif /* !CONFIG_USER_ONLY */
2723 void cpu_x86_cpuid(CPUX86State
*env
, uint32_t index
, uint32_t count
,
2724 uint32_t *eax
, uint32_t *ebx
,
2725 uint32_t *ecx
, uint32_t *edx
)
2727 X86CPU
*cpu
= x86_env_get_cpu(env
);
2728 CPUState
*cs
= CPU(cpu
);
2729 uint32_t pkg_offset
;
2731 uint32_t signature
[3];
2733 /* Calculate & apply limits for different index ranges */
2734 if (index
>= 0xC0000000) {
2735 limit
= env
->cpuid_xlevel2
;
2736 } else if (index
>= 0x80000000) {
2737 limit
= env
->cpuid_xlevel
;
2738 } else if (index
>= 0x40000000) {
2741 limit
= env
->cpuid_level
;
2744 if (index
> limit
) {
2745 /* Intel documentation states that invalid EAX input will
2746 * return the same information as EAX=cpuid_level
2747 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2749 index
= env
->cpuid_level
;
2754 *eax
= env
->cpuid_level
;
2755 *ebx
= env
->cpuid_vendor1
;
2756 *edx
= env
->cpuid_vendor2
;
2757 *ecx
= env
->cpuid_vendor3
;
2760 *eax
= env
->cpuid_version
;
2761 *ebx
= (cpu
->apic_id
<< 24) |
2762 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2763 *ecx
= env
->features
[FEAT_1_ECX
];
2764 if ((*ecx
& CPUID_EXT_XSAVE
) && (env
->cr
[4] & CR4_OSXSAVE_MASK
)) {
2765 *ecx
|= CPUID_EXT_OSXSAVE
;
2767 *edx
= env
->features
[FEAT_1_EDX
];
2768 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
2769 *ebx
|= (cs
->nr_cores
* cs
->nr_threads
) << 16;
2774 /* cache info: needed for Pentium Pro compatibility */
2775 if (cpu
->cache_info_passthrough
) {
2776 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
2779 *eax
= 1; /* Number of CPUID[EAX=2] calls required */
2781 if (!cpu
->enable_l3_cache
) {
2784 *ecx
= L3_N_DESCRIPTOR
;
2786 *edx
= (L1D_DESCRIPTOR
<< 16) | \
2787 (L1I_DESCRIPTOR
<< 8) | \
2791 /* cache info: needed for Core compatibility */
2792 if (cpu
->cache_info_passthrough
) {
2793 host_cpuid(index
, count
, eax
, ebx
, ecx
, edx
);
2794 *eax
&= ~0xFC000000;
2798 case 0: /* L1 dcache info */
2799 *eax
|= CPUID_4_TYPE_DCACHE
| \
2800 CPUID_4_LEVEL(1) | \
2801 CPUID_4_SELF_INIT_LEVEL
;
2802 *ebx
= (L1D_LINE_SIZE
- 1) | \
2803 ((L1D_PARTITIONS
- 1) << 12) | \
2804 ((L1D_ASSOCIATIVITY
- 1) << 22);
2805 *ecx
= L1D_SETS
- 1;
2806 *edx
= CPUID_4_NO_INVD_SHARING
;
2808 case 1: /* L1 icache info */
2809 *eax
|= CPUID_4_TYPE_ICACHE
| \
2810 CPUID_4_LEVEL(1) | \
2811 CPUID_4_SELF_INIT_LEVEL
;
2812 *ebx
= (L1I_LINE_SIZE
- 1) | \
2813 ((L1I_PARTITIONS
- 1) << 12) | \
2814 ((L1I_ASSOCIATIVITY
- 1) << 22);
2815 *ecx
= L1I_SETS
- 1;
2816 *edx
= CPUID_4_NO_INVD_SHARING
;
2818 case 2: /* L2 cache info */
2819 *eax
|= CPUID_4_TYPE_UNIFIED
| \
2820 CPUID_4_LEVEL(2) | \
2821 CPUID_4_SELF_INIT_LEVEL
;
2822 if (cs
->nr_threads
> 1) {
2823 *eax
|= (cs
->nr_threads
- 1) << 14;
2825 *ebx
= (L2_LINE_SIZE
- 1) | \
2826 ((L2_PARTITIONS
- 1) << 12) | \
2827 ((L2_ASSOCIATIVITY
- 1) << 22);
2829 *edx
= CPUID_4_NO_INVD_SHARING
;
2831 case 3: /* L3 cache info */
2832 if (!cpu
->enable_l3_cache
) {
2839 *eax
|= CPUID_4_TYPE_UNIFIED
| \
2840 CPUID_4_LEVEL(3) | \
2841 CPUID_4_SELF_INIT_LEVEL
;
2842 pkg_offset
= apicid_pkg_offset(cs
->nr_cores
, cs
->nr_threads
);
2843 *eax
|= ((1 << pkg_offset
) - 1) << 14;
2844 *ebx
= (L3_N_LINE_SIZE
- 1) | \
2845 ((L3_N_PARTITIONS
- 1) << 12) | \
2846 ((L3_N_ASSOCIATIVITY
- 1) << 22);
2847 *ecx
= L3_N_SETS
- 1;
2848 *edx
= CPUID_4_INCLUSIVE
| CPUID_4_COMPLEX_IDX
;
2850 default: /* end of info */
2859 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2860 if ((*eax
& 31) && cs
->nr_cores
> 1) {
2861 *eax
|= (cs
->nr_cores
- 1) << 26;
2865 /* mwait info: needed for Core compatibility */
2866 *eax
= 0; /* Smallest monitor-line size in bytes */
2867 *ebx
= 0; /* Largest monitor-line size in bytes */
2868 *ecx
= CPUID_MWAIT_EMX
| CPUID_MWAIT_IBE
;
2872 /* Thermal and Power Leaf */
2873 *eax
= env
->features
[FEAT_6_EAX
];
2879 /* Structured Extended Feature Flags Enumeration Leaf */
2881 *eax
= 0; /* Maximum ECX value for sub-leaves */
2882 *ebx
= env
->features
[FEAT_7_0_EBX
]; /* Feature flags */
2883 *ecx
= env
->features
[FEAT_7_0_ECX
]; /* Feature flags */
2884 if ((*ecx
& CPUID_7_0_ECX_PKU
) && env
->cr
[4] & CR4_PKE_MASK
) {
2885 *ecx
|= CPUID_7_0_ECX_OSPKE
;
2887 *edx
= env
->features
[FEAT_7_0_EDX
]; /* Feature flags */
2896 /* Direct Cache Access Information Leaf */
2897 *eax
= 0; /* Bits 0-31 in DCA_CAP MSR */
2903 /* Architectural Performance Monitoring Leaf */
2904 if (kvm_enabled() && cpu
->enable_pmu
) {
2905 KVMState
*s
= cs
->kvm_state
;
2907 *eax
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EAX
);
2908 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EBX
);
2909 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_ECX
);
2910 *edx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EDX
);
2919 /* Extended Topology Enumeration Leaf */
2920 if (!cpu
->enable_cpuid_0xb
) {
2921 *eax
= *ebx
= *ecx
= *edx
= 0;
2925 *ecx
= count
& 0xff;
2926 *edx
= cpu
->apic_id
;
2930 *eax
= apicid_core_offset(cs
->nr_cores
, cs
->nr_threads
);
2931 *ebx
= cs
->nr_threads
;
2932 *ecx
|= CPUID_TOPOLOGY_LEVEL_SMT
;
2935 *eax
= apicid_pkg_offset(cs
->nr_cores
, cs
->nr_threads
);
2936 *ebx
= cs
->nr_cores
* cs
->nr_threads
;
2937 *ecx
|= CPUID_TOPOLOGY_LEVEL_CORE
;
2942 *ecx
|= CPUID_TOPOLOGY_LEVEL_INVALID
;
2945 assert(!(*eax
& ~0x1f));
2946 *ebx
&= 0xffff; /* The count doesn't need to be reliable. */
2949 /* Processor Extended State */
2954 if (!(env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
)) {
2959 *ecx
= xsave_area_size(x86_cpu_xsave_components(cpu
));
2960 *eax
= env
->features
[FEAT_XSAVE_COMP_LO
];
2961 *edx
= env
->features
[FEAT_XSAVE_COMP_HI
];
2963 } else if (count
== 1) {
2964 *eax
= env
->features
[FEAT_XSAVE
];
2965 } else if (count
< ARRAY_SIZE(x86_ext_save_areas
)) {
2966 if ((x86_cpu_xsave_components(cpu
) >> count
) & 1) {
2967 const ExtSaveArea
*esa
= &x86_ext_save_areas
[count
];
2976 * CPUID code in kvm_arch_init_vcpu() ignores stuff
2977 * set here, but we restrict to TCG none the less.
2979 if (tcg_enabled() && cpu
->expose_tcg
) {
2980 memcpy(signature
, "TCGTCGTCGTCG", 12);
2982 *ebx
= signature
[0];
2983 *ecx
= signature
[1];
2984 *edx
= signature
[2];
2999 *eax
= env
->cpuid_xlevel
;
3000 *ebx
= env
->cpuid_vendor1
;
3001 *edx
= env
->cpuid_vendor2
;
3002 *ecx
= env
->cpuid_vendor3
;
3005 *eax
= env
->cpuid_version
;
3007 *ecx
= env
->features
[FEAT_8000_0001_ECX
];
3008 *edx
= env
->features
[FEAT_8000_0001_EDX
];
3010 /* The Linux kernel checks for the CMPLegacy bit and
3011 * discards multiple thread information if it is set.
3012 * So don't set it here for Intel to make Linux guests happy.
3014 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
3015 if (env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
||
3016 env
->cpuid_vendor2
!= CPUID_VENDOR_INTEL_2
||
3017 env
->cpuid_vendor3
!= CPUID_VENDOR_INTEL_3
) {
3018 *ecx
|= 1 << 1; /* CmpLegacy bit */
3025 *eax
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
3026 *ebx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
3027 *ecx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
3028 *edx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
3031 /* cache info (L1 cache) */
3032 if (cpu
->cache_info_passthrough
) {
3033 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
3036 *eax
= (L1_DTLB_2M_ASSOC
<< 24) | (L1_DTLB_2M_ENTRIES
<< 16) | \
3037 (L1_ITLB_2M_ASSOC
<< 8) | (L1_ITLB_2M_ENTRIES
);
3038 *ebx
= (L1_DTLB_4K_ASSOC
<< 24) | (L1_DTLB_4K_ENTRIES
<< 16) | \
3039 (L1_ITLB_4K_ASSOC
<< 8) | (L1_ITLB_4K_ENTRIES
);
3040 *ecx
= (L1D_SIZE_KB_AMD
<< 24) | (L1D_ASSOCIATIVITY_AMD
<< 16) | \
3041 (L1D_LINES_PER_TAG
<< 8) | (L1D_LINE_SIZE
);
3042 *edx
= (L1I_SIZE_KB_AMD
<< 24) | (L1I_ASSOCIATIVITY_AMD
<< 16) | \
3043 (L1I_LINES_PER_TAG
<< 8) | (L1I_LINE_SIZE
);
3046 /* cache info (L2 cache) */
3047 if (cpu
->cache_info_passthrough
) {
3048 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
3051 *eax
= (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC
) << 28) | \
3052 (L2_DTLB_2M_ENTRIES
<< 16) | \
3053 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC
) << 12) | \
3054 (L2_ITLB_2M_ENTRIES
);
3055 *ebx
= (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC
) << 28) | \
3056 (L2_DTLB_4K_ENTRIES
<< 16) | \
3057 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC
) << 12) | \
3058 (L2_ITLB_4K_ENTRIES
);
3059 *ecx
= (L2_SIZE_KB_AMD
<< 16) | \
3060 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY
) << 12) | \
3061 (L2_LINES_PER_TAG
<< 8) | (L2_LINE_SIZE
);
3062 if (!cpu
->enable_l3_cache
) {
3063 *edx
= ((L3_SIZE_KB
/ 512) << 18) | \
3064 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY
) << 12) | \
3065 (L3_LINES_PER_TAG
<< 8) | (L3_LINE_SIZE
);
3067 *edx
= ((L3_N_SIZE_KB_AMD
/ 512) << 18) | \
3068 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY
) << 12) | \
3069 (L3_N_LINES_PER_TAG
<< 8) | (L3_N_LINE_SIZE
);
3076 *edx
= env
->features
[FEAT_8000_0007_EDX
];
3079 /* virtual & phys address size in low 2 bytes. */
3080 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
) {
3081 /* 64 bit processor */
3082 *eax
= cpu
->phys_bits
; /* configurable physical bits */
3083 if (env
->features
[FEAT_7_0_ECX
] & CPUID_7_0_ECX_LA57
) {
3084 *eax
|= 0x00003900; /* 57 bits virtual */
3086 *eax
|= 0x00003000; /* 48 bits virtual */
3089 *eax
= cpu
->phys_bits
;
3094 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
3095 *ecx
|= (cs
->nr_cores
* cs
->nr_threads
) - 1;
3099 if (env
->features
[FEAT_8000_0001_ECX
] & CPUID_EXT3_SVM
) {
3100 *eax
= 0x00000001; /* SVM Revision */
3101 *ebx
= 0x00000010; /* nr of ASIDs */
3103 *edx
= env
->features
[FEAT_SVM
]; /* optional features */
3112 *eax
= env
->cpuid_xlevel2
;
3118 /* Support for VIA CPU's CPUID instruction */
3119 *eax
= env
->cpuid_version
;
3122 *edx
= env
->features
[FEAT_C000_0001_EDX
];
3127 /* Reserved for the future, and now filled with zero */
3134 /* reserved values: zero */
3143 /* CPUClass::reset() */
3144 static void x86_cpu_reset(CPUState
*s
)
3146 X86CPU
*cpu
= X86_CPU(s
);
3147 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(cpu
);
3148 CPUX86State
*env
= &cpu
->env
;
3153 xcc
->parent_reset(s
);
3155 memset(env
, 0, offsetof(CPUX86State
, end_reset_fields
));
3157 env
->old_exception
= -1;
3159 /* init to reset state */
3161 env
->hflags2
|= HF2_GIF_MASK
;
3163 cpu_x86_update_cr0(env
, 0x60000010);
3164 env
->a20_mask
= ~0x0;
3165 env
->smbase
= 0x30000;
3167 env
->idt
.limit
= 0xffff;
3168 env
->gdt
.limit
= 0xffff;
3169 env
->ldt
.limit
= 0xffff;
3170 env
->ldt
.flags
= DESC_P_MASK
| (2 << DESC_TYPE_SHIFT
);
3171 env
->tr
.limit
= 0xffff;
3172 env
->tr
.flags
= DESC_P_MASK
| (11 << DESC_TYPE_SHIFT
);
3174 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff,
3175 DESC_P_MASK
| DESC_S_MASK
| DESC_CS_MASK
|
3176 DESC_R_MASK
| DESC_A_MASK
);
3177 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff,
3178 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
3180 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff,
3181 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
3183 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff,
3184 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
3186 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff,
3187 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
3189 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff,
3190 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
3194 env
->regs
[R_EDX
] = env
->cpuid_version
;
3199 for (i
= 0; i
< 8; i
++) {
3202 cpu_set_fpuc(env
, 0x37f);
3204 env
->mxcsr
= 0x1f80;
3205 /* All units are in INIT state. */
3208 env
->pat
= 0x0007040600070406ULL
;
3209 env
->msr_ia32_misc_enable
= MSR_IA32_MISC_ENABLE_DEFAULT
;
3211 memset(env
->dr
, 0, sizeof(env
->dr
));
3212 env
->dr
[6] = DR6_FIXED_1
;
3213 env
->dr
[7] = DR7_FIXED_1
;
3214 cpu_breakpoint_remove_all(s
, BP_CPU
);
3215 cpu_watchpoint_remove_all(s
, BP_CPU
);
3218 xcr0
= XSTATE_FP_MASK
;
3220 #ifdef CONFIG_USER_ONLY
3221 /* Enable all the features for user-mode. */
3222 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
3223 xcr0
|= XSTATE_SSE_MASK
;
3225 for (i
= 2; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
3226 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
3227 if (env
->features
[esa
->feature
] & esa
->bits
) {
3232 if (env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
) {
3233 cr4
|= CR4_OSFXSR_MASK
| CR4_OSXSAVE_MASK
;
3235 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_FSGSBASE
) {
3236 cr4
|= CR4_FSGSBASE_MASK
;
3241 cpu_x86_update_cr4(env
, cr4
);
3244 * SDM 11.11.5 requires:
3245 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3246 * - IA32_MTRR_PHYSMASKn.V = 0
3247 * All other bits are undefined. For simplification, zero it all.
3249 env
->mtrr_deftype
= 0;
3250 memset(env
->mtrr_var
, 0, sizeof(env
->mtrr_var
));
3251 memset(env
->mtrr_fixed
, 0, sizeof(env
->mtrr_fixed
));
3253 #if !defined(CONFIG_USER_ONLY)
3254 /* We hard-wire the BSP to the first CPU. */
3255 apic_designate_bsp(cpu
->apic_state
, s
->cpu_index
== 0);
3257 s
->halted
= !cpu_is_bsp(cpu
);
3259 if (kvm_enabled()) {
3260 kvm_arch_reset_vcpu(cpu
);
3265 #ifndef CONFIG_USER_ONLY
3266 bool cpu_is_bsp(X86CPU
*cpu
)
3268 return cpu_get_apic_base(cpu
->apic_state
) & MSR_IA32_APICBASE_BSP
;
3271 /* TODO: remove me, when reset over QOM tree is implemented */
3272 static void x86_cpu_machine_reset_cb(void *opaque
)
3274 X86CPU
*cpu
= opaque
;
3275 cpu_reset(CPU(cpu
));
3279 static void mce_init(X86CPU
*cpu
)
3281 CPUX86State
*cenv
= &cpu
->env
;
3284 if (((cenv
->cpuid_version
>> 8) & 0xf) >= 6
3285 && (cenv
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
3286 (CPUID_MCE
| CPUID_MCA
)) {
3287 cenv
->mcg_cap
= MCE_CAP_DEF
| MCE_BANKS_DEF
|
3288 (cpu
->enable_lmce
? MCG_LMCE_P
: 0);
3289 cenv
->mcg_ctl
= ~(uint64_t)0;
3290 for (bank
= 0; bank
< MCE_BANKS_DEF
; bank
++) {
3291 cenv
->mce_banks
[bank
* 4] = ~(uint64_t)0;
3296 #ifndef CONFIG_USER_ONLY
3297 APICCommonClass
*apic_get_class(void)
3299 const char *apic_type
= "apic";
3301 if (kvm_apic_in_kernel()) {
3302 apic_type
= "kvm-apic";
3303 } else if (xen_enabled()) {
3304 apic_type
= "xen-apic";
3307 return APIC_COMMON_CLASS(object_class_by_name(apic_type
));
3310 static void x86_cpu_apic_create(X86CPU
*cpu
, Error
**errp
)
3312 APICCommonState
*apic
;
3313 ObjectClass
*apic_class
= OBJECT_CLASS(apic_get_class());
3315 cpu
->apic_state
= DEVICE(object_new(object_class_get_name(apic_class
)));
3317 object_property_add_child(OBJECT(cpu
), "lapic",
3318 OBJECT(cpu
->apic_state
), &error_abort
);
3319 object_unref(OBJECT(cpu
->apic_state
));
3321 qdev_prop_set_uint32(cpu
->apic_state
, "id", cpu
->apic_id
);
3322 /* TODO: convert to link<> */
3323 apic
= APIC_COMMON(cpu
->apic_state
);
3325 apic
->apicbase
= APIC_DEFAULT_ADDRESS
| MSR_IA32_APICBASE_ENABLE
;
3328 static void x86_cpu_apic_realize(X86CPU
*cpu
, Error
**errp
)
3330 APICCommonState
*apic
;
3331 static bool apic_mmio_map_once
;
3333 if (cpu
->apic_state
== NULL
) {
3336 object_property_set_bool(OBJECT(cpu
->apic_state
), true, "realized",
3339 /* Map APIC MMIO area */
3340 apic
= APIC_COMMON(cpu
->apic_state
);
3341 if (!apic_mmio_map_once
) {
3342 memory_region_add_subregion_overlap(get_system_memory(),
3344 MSR_IA32_APICBASE_BASE
,
3347 apic_mmio_map_once
= true;
3351 static void x86_cpu_machine_done(Notifier
*n
, void *unused
)
3353 X86CPU
*cpu
= container_of(n
, X86CPU
, machine_done
);
3354 MemoryRegion
*smram
=
3355 (MemoryRegion
*) object_resolve_path("/machine/smram", NULL
);
3358 cpu
->smram
= g_new(MemoryRegion
, 1);
3359 memory_region_init_alias(cpu
->smram
, OBJECT(cpu
), "smram",
3360 smram
, 0, 1ull << 32);
3361 memory_region_set_enabled(cpu
->smram
, true);
3362 memory_region_add_subregion_overlap(cpu
->cpu_as_root
, 0, cpu
->smram
, 1);
3366 static void x86_cpu_apic_realize(X86CPU
*cpu
, Error
**errp
)
3371 /* Note: Only safe for use on x86(-64) hosts */
3372 static uint32_t x86_host_phys_bits(void)
3375 uint32_t host_phys_bits
;
3377 host_cpuid(0x80000000, 0, &eax
, NULL
, NULL
, NULL
);
3378 if (eax
>= 0x80000008) {
3379 host_cpuid(0x80000008, 0, &eax
, NULL
, NULL
, NULL
);
3380 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3381 * at 23:16 that can specify a maximum physical address bits for
3382 * the guest that can override this value; but I've not seen
3383 * anything with that set.
3385 host_phys_bits
= eax
& 0xff;
3387 /* It's an odd 64 bit machine that doesn't have the leaf for
3388 * physical address bits; fall back to 36 that's most older
3391 host_phys_bits
= 36;
3394 return host_phys_bits
;
3397 static void x86_cpu_adjust_level(X86CPU
*cpu
, uint32_t *min
, uint32_t value
)
3404 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3405 static void x86_cpu_adjust_feat_level(X86CPU
*cpu
, FeatureWord w
)
3407 CPUX86State
*env
= &cpu
->env
;
3408 FeatureWordInfo
*fi
= &feature_word_info
[w
];
3409 uint32_t eax
= fi
->cpuid_eax
;
3410 uint32_t region
= eax
& 0xF0000000;
3412 if (!env
->features
[w
]) {
3418 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_level
, eax
);
3421 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel
, eax
);
3424 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel2
, eax
);
3429 /* Calculate XSAVE components based on the configured CPU feature flags */
3430 static void x86_cpu_enable_xsave_components(X86CPU
*cpu
)
3432 CPUX86State
*env
= &cpu
->env
;
3436 if (!(env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
)) {
3441 for (i
= 0; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
3442 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
3443 if (env
->features
[esa
->feature
] & esa
->bits
) {
3444 mask
|= (1ULL << i
);
3448 env
->features
[FEAT_XSAVE_COMP_LO
] = mask
;
3449 env
->features
[FEAT_XSAVE_COMP_HI
] = mask
>> 32;
3452 /***** Steps involved on loading and filtering CPUID data
3454 * When initializing and realizing a CPU object, the steps
3455 * involved in setting up CPUID data are:
3457 * 1) Loading CPU model definition (X86CPUDefinition). This is
3458 * implemented by x86_cpu_load_def() and should be completely
3459 * transparent, as it is done automatically by instance_init.
3460 * No code should need to look at X86CPUDefinition structs
3461 * outside instance_init.
3463 * 2) CPU expansion. This is done by realize before CPUID
3464 * filtering, and will make sure host/accelerator data is
3465 * loaded for CPU models that depend on host capabilities
3466 * (e.g. "host"). Done by x86_cpu_expand_features().
3468 * 3) CPUID filtering. This initializes extra data related to
3469 * CPUID, and checks if the host supports all capabilities
3470 * required by the CPU. Runnability of a CPU model is
3471 * determined at this step. Done by x86_cpu_filter_features().
3473 * Some operations don't require all steps to be performed.
3476 * - CPU instance creation (instance_init) will run only CPU
3477 * model loading. CPU expansion can't run at instance_init-time
3478 * because host/accelerator data may be not available yet.
3479 * - CPU realization will perform both CPU model expansion and CPUID
3480 * filtering, and return an error in case one of them fails.
3481 * - query-cpu-definitions needs to run all 3 steps. It needs
3482 * to run CPUID filtering, as the 'unavailable-features'
3483 * field is set based on the filtering results.
3484 * - The query-cpu-model-expansion QMP command only needs to run
3485 * CPU model loading and CPU expansion. It should not filter
3486 * any CPUID data based on host capabilities.
3489 /* Expand CPU configuration data, based on configured features
3490 * and host/accelerator capabilities when appropriate.
3492 static void x86_cpu_expand_features(X86CPU
*cpu
, Error
**errp
)
3494 CPUX86State
*env
= &cpu
->env
;
3497 Error
*local_err
= NULL
;
3499 /*TODO: Now cpu->max_features doesn't overwrite features
3500 * set using QOM properties, and we can convert
3501 * plus_features & minus_features to global properties
3502 * inside x86_cpu_parse_featurestr() too.
3504 if (cpu
->max_features
) {
3505 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3506 /* Override only features that weren't set explicitly
3510 x86_cpu_get_supported_feature_word(w
, cpu
->migratable
) &
3511 ~env
->user_features
[w
];
3515 for (l
= plus_features
; l
; l
= l
->next
) {
3516 const char *prop
= l
->data
;
3517 object_property_set_bool(OBJECT(cpu
), true, prop
, &local_err
);
3523 for (l
= minus_features
; l
; l
= l
->next
) {
3524 const char *prop
= l
->data
;
3525 object_property_set_bool(OBJECT(cpu
), false, prop
, &local_err
);
3531 if (!kvm_enabled() || !cpu
->expose_kvm
) {
3532 env
->features
[FEAT_KVM
] = 0;
3535 x86_cpu_enable_xsave_components(cpu
);
3537 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3538 x86_cpu_adjust_feat_level(cpu
, FEAT_7_0_EBX
);
3539 if (cpu
->full_cpuid_auto_level
) {
3540 x86_cpu_adjust_feat_level(cpu
, FEAT_1_EDX
);
3541 x86_cpu_adjust_feat_level(cpu
, FEAT_1_ECX
);
3542 x86_cpu_adjust_feat_level(cpu
, FEAT_6_EAX
);
3543 x86_cpu_adjust_feat_level(cpu
, FEAT_7_0_ECX
);
3544 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0001_EDX
);
3545 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0001_ECX
);
3546 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0007_EDX
);
3547 x86_cpu_adjust_feat_level(cpu
, FEAT_C000_0001_EDX
);
3548 x86_cpu_adjust_feat_level(cpu
, FEAT_SVM
);
3549 x86_cpu_adjust_feat_level(cpu
, FEAT_XSAVE
);
3550 /* SVM requires CPUID[0x8000000A] */
3551 if (env
->features
[FEAT_8000_0001_ECX
] & CPUID_EXT3_SVM
) {
3552 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel
, 0x8000000A);
3556 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3557 if (env
->cpuid_level
== UINT32_MAX
) {
3558 env
->cpuid_level
= env
->cpuid_min_level
;
3560 if (env
->cpuid_xlevel
== UINT32_MAX
) {
3561 env
->cpuid_xlevel
= env
->cpuid_min_xlevel
;
3563 if (env
->cpuid_xlevel2
== UINT32_MAX
) {
3564 env
->cpuid_xlevel2
= env
->cpuid_min_xlevel2
;
3568 if (local_err
!= NULL
) {
3569 error_propagate(errp
, local_err
);
3574 * Finishes initialization of CPUID data, filters CPU feature
3575 * words based on host availability of each feature.
3577 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3579 static int x86_cpu_filter_features(X86CPU
*cpu
)
3581 CPUX86State
*env
= &cpu
->env
;
3585 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3586 uint32_t host_feat
=
3587 x86_cpu_get_supported_feature_word(w
, false);
3588 uint32_t requested_features
= env
->features
[w
];
3589 env
->features
[w
] &= host_feat
;
3590 cpu
->filtered_features
[w
] = requested_features
& ~env
->features
[w
];
3591 if (cpu
->filtered_features
[w
]) {
3599 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3600 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3601 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3602 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3603 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3604 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3605 static void x86_cpu_realizefn(DeviceState
*dev
, Error
**errp
)
3607 CPUState
*cs
= CPU(dev
);
3608 X86CPU
*cpu
= X86_CPU(dev
);
3609 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(dev
);
3610 CPUX86State
*env
= &cpu
->env
;
3611 Error
*local_err
= NULL
;
3612 static bool ht_warned
;
3614 if (xcc
->kvm_required
&& !kvm_enabled()) {
3615 char *name
= x86_cpu_class_get_model_name(xcc
);
3616 error_setg(&local_err
, "CPU model '%s' requires KVM", name
);
3621 if (cpu
->apic_id
== UNASSIGNED_APIC_ID
) {
3622 error_setg(errp
, "apic-id property was not initialized properly");
3626 x86_cpu_expand_features(cpu
, &local_err
);
3631 if (x86_cpu_filter_features(cpu
) &&
3632 (cpu
->check_cpuid
|| cpu
->enforce_cpuid
)) {
3633 x86_cpu_report_filtered_features(cpu
);
3634 if (cpu
->enforce_cpuid
) {
3635 error_setg(&local_err
,
3637 "Host doesn't support requested features" :
3638 "TCG doesn't support requested features");
3643 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3646 if (IS_AMD_CPU(env
)) {
3647 env
->features
[FEAT_8000_0001_EDX
] &= ~CPUID_EXT2_AMD_ALIASES
;
3648 env
->features
[FEAT_8000_0001_EDX
] |= (env
->features
[FEAT_1_EDX
]
3649 & CPUID_EXT2_AMD_ALIASES
);
3652 /* For 64bit systems think about the number of physical bits to present.
3653 * ideally this should be the same as the host; anything other than matching
3654 * the host can cause incorrect guest behaviour.
3655 * QEMU used to pick the magic value of 40 bits that corresponds to
3656 * consumer AMD devices but nothing else.
3658 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
) {
3659 if (kvm_enabled()) {
3660 uint32_t host_phys_bits
= x86_host_phys_bits();
3663 if (cpu
->host_phys_bits
) {
3664 /* The user asked for us to use the host physical bits */
3665 cpu
->phys_bits
= host_phys_bits
;
3668 /* Print a warning if the user set it to a value that's not the
3671 if (cpu
->phys_bits
!= host_phys_bits
&& cpu
->phys_bits
!= 0 &&
3673 warn_report("Host physical bits (%u)"
3674 " does not match phys-bits property (%u)",
3675 host_phys_bits
, cpu
->phys_bits
);
3679 if (cpu
->phys_bits
&&
3680 (cpu
->phys_bits
> TARGET_PHYS_ADDR_SPACE_BITS
||
3681 cpu
->phys_bits
< 32)) {
3682 error_setg(errp
, "phys-bits should be between 32 and %u "
3684 TARGET_PHYS_ADDR_SPACE_BITS
, cpu
->phys_bits
);
3688 if (cpu
->phys_bits
&& cpu
->phys_bits
!= TCG_PHYS_ADDR_BITS
) {
3689 error_setg(errp
, "TCG only supports phys-bits=%u",
3690 TCG_PHYS_ADDR_BITS
);
3694 /* 0 means it was not explicitly set by the user (or by machine
3695 * compat_props or by the host code above). In this case, the default
3696 * is the value used by TCG (40).
3698 if (cpu
->phys_bits
== 0) {
3699 cpu
->phys_bits
= TCG_PHYS_ADDR_BITS
;
3702 /* For 32 bit systems don't use the user set value, but keep
3703 * phys_bits consistent with what we tell the guest.
3705 if (cpu
->phys_bits
!= 0) {
3706 error_setg(errp
, "phys-bits is not user-configurable in 32 bit");
3710 if (env
->features
[FEAT_1_EDX
] & CPUID_PSE36
) {
3711 cpu
->phys_bits
= 36;
3713 cpu
->phys_bits
= 32;
3716 cpu_exec_realizefn(cs
, &local_err
);
3717 if (local_err
!= NULL
) {
3718 error_propagate(errp
, local_err
);
3722 #ifndef CONFIG_USER_ONLY
3723 qemu_register_reset(x86_cpu_machine_reset_cb
, cpu
);
3725 if (cpu
->env
.features
[FEAT_1_EDX
] & CPUID_APIC
|| smp_cpus
> 1) {
3726 x86_cpu_apic_create(cpu
, &local_err
);
3727 if (local_err
!= NULL
) {
3735 #ifndef CONFIG_USER_ONLY
3736 if (tcg_enabled()) {
3737 AddressSpace
*as_normal
= g_new0(AddressSpace
, 1);
3738 AddressSpace
*as_smm
= g_new(AddressSpace
, 1);
3740 address_space_init(as_normal
, cs
->memory
, "cpu-memory");
3742 cpu
->cpu_as_mem
= g_new(MemoryRegion
, 1);
3743 cpu
->cpu_as_root
= g_new(MemoryRegion
, 1);
3745 /* Outer container... */
3746 memory_region_init(cpu
->cpu_as_root
, OBJECT(cpu
), "memory", ~0ull);
3747 memory_region_set_enabled(cpu
->cpu_as_root
, true);
3749 /* ... with two regions inside: normal system memory with low
3752 memory_region_init_alias(cpu
->cpu_as_mem
, OBJECT(cpu
), "memory",
3753 get_system_memory(), 0, ~0ull);
3754 memory_region_add_subregion_overlap(cpu
->cpu_as_root
, 0, cpu
->cpu_as_mem
, 0);
3755 memory_region_set_enabled(cpu
->cpu_as_mem
, true);
3756 address_space_init(as_smm
, cpu
->cpu_as_root
, "CPU");
3759 cpu_address_space_init(cs
, as_normal
, 0);
3760 cpu_address_space_init(cs
, as_smm
, 1);
3762 /* ... SMRAM with higher priority, linked from /machine/smram. */
3763 cpu
->machine_done
.notify
= x86_cpu_machine_done
;
3764 qemu_add_machine_init_done_notifier(&cpu
->machine_done
);
3770 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3771 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3772 * based on inputs (sockets,cores,threads), it is still better to gives
3775 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3776 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3778 if (!IS_INTEL_CPU(env
) && cs
->nr_threads
> 1 && !ht_warned
) {
3779 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3780 " -smp options properly.");
3784 x86_cpu_apic_realize(cpu
, &local_err
);
3785 if (local_err
!= NULL
) {
3790 xcc
->parent_realize(dev
, &local_err
);
3793 if (local_err
!= NULL
) {
3794 error_propagate(errp
, local_err
);
3799 static void x86_cpu_unrealizefn(DeviceState
*dev
, Error
**errp
)
3801 X86CPU
*cpu
= X86_CPU(dev
);
3802 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(dev
);
3803 Error
*local_err
= NULL
;
3805 #ifndef CONFIG_USER_ONLY
3806 cpu_remove_sync(CPU(dev
));
3807 qemu_unregister_reset(x86_cpu_machine_reset_cb
, dev
);
3810 if (cpu
->apic_state
) {
3811 object_unparent(OBJECT(cpu
->apic_state
));
3812 cpu
->apic_state
= NULL
;
3815 xcc
->parent_unrealize(dev
, &local_err
);
3816 if (local_err
!= NULL
) {
3817 error_propagate(errp
, local_err
);
3822 typedef struct BitProperty
{
3827 static void x86_cpu_get_bit_prop(Object
*obj
, Visitor
*v
, const char *name
,
3828 void *opaque
, Error
**errp
)
3830 X86CPU
*cpu
= X86_CPU(obj
);
3831 BitProperty
*fp
= opaque
;
3832 uint32_t f
= cpu
->env
.features
[fp
->w
];
3833 bool value
= (f
& fp
->mask
) == fp
->mask
;
3834 visit_type_bool(v
, name
, &value
, errp
);
3837 static void x86_cpu_set_bit_prop(Object
*obj
, Visitor
*v
, const char *name
,
3838 void *opaque
, Error
**errp
)
3840 DeviceState
*dev
= DEVICE(obj
);
3841 X86CPU
*cpu
= X86_CPU(obj
);
3842 BitProperty
*fp
= opaque
;
3843 Error
*local_err
= NULL
;
3846 if (dev
->realized
) {
3847 qdev_prop_set_after_realize(dev
, name
, errp
);
3851 visit_type_bool(v
, name
, &value
, &local_err
);
3853 error_propagate(errp
, local_err
);
3858 cpu
->env
.features
[fp
->w
] |= fp
->mask
;
3860 cpu
->env
.features
[fp
->w
] &= ~fp
->mask
;
3862 cpu
->env
.user_features
[fp
->w
] |= fp
->mask
;
3865 static void x86_cpu_release_bit_prop(Object
*obj
, const char *name
,
3868 BitProperty
*prop
= opaque
;
3872 /* Register a boolean property to get/set a single bit in a uint32_t field.
3874 * The same property name can be registered multiple times to make it affect
3875 * multiple bits in the same FeatureWord. In that case, the getter will return
3876 * true only if all bits are set.
3878 static void x86_cpu_register_bit_prop(X86CPU
*cpu
,
3879 const char *prop_name
,
3885 uint32_t mask
= (1UL << bitnr
);
3887 op
= object_property_find(OBJECT(cpu
), prop_name
, NULL
);
3893 fp
= g_new0(BitProperty
, 1);
3896 object_property_add(OBJECT(cpu
), prop_name
, "bool",
3897 x86_cpu_get_bit_prop
,
3898 x86_cpu_set_bit_prop
,
3899 x86_cpu_release_bit_prop
, fp
, &error_abort
);
3903 static void x86_cpu_register_feature_bit_props(X86CPU
*cpu
,
3907 FeatureWordInfo
*fi
= &feature_word_info
[w
];
3908 const char *name
= fi
->feat_names
[bitnr
];
3914 /* Property names should use "-" instead of "_".
3915 * Old names containing underscores are registered as aliases
3916 * using object_property_add_alias()
3918 assert(!strchr(name
, '_'));
3919 /* aliases don't use "|" delimiters anymore, they are registered
3920 * manually using object_property_add_alias() */
3921 assert(!strchr(name
, '|'));
3922 x86_cpu_register_bit_prop(cpu
, name
, w
, bitnr
);
3925 static GuestPanicInformation
*x86_cpu_get_crash_info(CPUState
*cs
)
3927 X86CPU
*cpu
= X86_CPU(cs
);
3928 CPUX86State
*env
= &cpu
->env
;
3929 GuestPanicInformation
*panic_info
= NULL
;
3931 if (env
->features
[FEAT_HYPERV_EDX
] & HV_GUEST_CRASH_MSR_AVAILABLE
) {
3932 panic_info
= g_malloc0(sizeof(GuestPanicInformation
));
3934 panic_info
->type
= GUEST_PANIC_INFORMATION_TYPE_HYPER_V
;
3936 assert(HV_CRASH_PARAMS
>= 5);
3937 panic_info
->u
.hyper_v
.arg1
= env
->msr_hv_crash_params
[0];
3938 panic_info
->u
.hyper_v
.arg2
= env
->msr_hv_crash_params
[1];
3939 panic_info
->u
.hyper_v
.arg3
= env
->msr_hv_crash_params
[2];
3940 panic_info
->u
.hyper_v
.arg4
= env
->msr_hv_crash_params
[3];
3941 panic_info
->u
.hyper_v
.arg5
= env
->msr_hv_crash_params
[4];
3946 static void x86_cpu_get_crash_info_qom(Object
*obj
, Visitor
*v
,
3947 const char *name
, void *opaque
,
3950 CPUState
*cs
= CPU(obj
);
3951 GuestPanicInformation
*panic_info
;
3953 if (!cs
->crash_occurred
) {
3954 error_setg(errp
, "No crash occured");
3958 panic_info
= x86_cpu_get_crash_info(cs
);
3959 if (panic_info
== NULL
) {
3960 error_setg(errp
, "No crash information");
3964 visit_type_GuestPanicInformation(v
, "crash-information", &panic_info
,
3966 qapi_free_GuestPanicInformation(panic_info
);
3969 static void x86_cpu_initfn(Object
*obj
)
3971 CPUState
*cs
= CPU(obj
);
3972 X86CPU
*cpu
= X86_CPU(obj
);
3973 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(obj
);
3974 CPUX86State
*env
= &cpu
->env
;
3979 object_property_add(obj
, "family", "int",
3980 x86_cpuid_version_get_family
,
3981 x86_cpuid_version_set_family
, NULL
, NULL
, NULL
);
3982 object_property_add(obj
, "model", "int",
3983 x86_cpuid_version_get_model
,
3984 x86_cpuid_version_set_model
, NULL
, NULL
, NULL
);
3985 object_property_add(obj
, "stepping", "int",
3986 x86_cpuid_version_get_stepping
,
3987 x86_cpuid_version_set_stepping
, NULL
, NULL
, NULL
);
3988 object_property_add_str(obj
, "vendor",
3989 x86_cpuid_get_vendor
,
3990 x86_cpuid_set_vendor
, NULL
);
3991 object_property_add_str(obj
, "model-id",
3992 x86_cpuid_get_model_id
,
3993 x86_cpuid_set_model_id
, NULL
);
3994 object_property_add(obj
, "tsc-frequency", "int",
3995 x86_cpuid_get_tsc_freq
,
3996 x86_cpuid_set_tsc_freq
, NULL
, NULL
, NULL
);
3997 object_property_add(obj
, "feature-words", "X86CPUFeatureWordInfo",
3998 x86_cpu_get_feature_words
,
3999 NULL
, NULL
, (void *)env
->features
, NULL
);
4000 object_property_add(obj
, "filtered-features", "X86CPUFeatureWordInfo",
4001 x86_cpu_get_feature_words
,
4002 NULL
, NULL
, (void *)cpu
->filtered_features
, NULL
);
4004 object_property_add(obj
, "crash-information", "GuestPanicInformation",
4005 x86_cpu_get_crash_info_qom
, NULL
, NULL
, NULL
, NULL
);
4007 cpu
->hyperv_spinlock_attempts
= HYPERV_SPINLOCK_NEVER_RETRY
;
4009 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
4012 for (bitnr
= 0; bitnr
< 32; bitnr
++) {
4013 x86_cpu_register_feature_bit_props(cpu
, w
, bitnr
);
4017 object_property_add_alias(obj
, "sse3", obj
, "pni", &error_abort
);
4018 object_property_add_alias(obj
, "pclmuldq", obj
, "pclmulqdq", &error_abort
);
4019 object_property_add_alias(obj
, "sse4-1", obj
, "sse4.1", &error_abort
);
4020 object_property_add_alias(obj
, "sse4-2", obj
, "sse4.2", &error_abort
);
4021 object_property_add_alias(obj
, "xd", obj
, "nx", &error_abort
);
4022 object_property_add_alias(obj
, "ffxsr", obj
, "fxsr-opt", &error_abort
);
4023 object_property_add_alias(obj
, "i64", obj
, "lm", &error_abort
);
4025 object_property_add_alias(obj
, "ds_cpl", obj
, "ds-cpl", &error_abort
);
4026 object_property_add_alias(obj
, "tsc_adjust", obj
, "tsc-adjust", &error_abort
);
4027 object_property_add_alias(obj
, "fxsr_opt", obj
, "fxsr-opt", &error_abort
);
4028 object_property_add_alias(obj
, "lahf_lm", obj
, "lahf-lm", &error_abort
);
4029 object_property_add_alias(obj
, "cmp_legacy", obj
, "cmp-legacy", &error_abort
);
4030 object_property_add_alias(obj
, "nodeid_msr", obj
, "nodeid-msr", &error_abort
);
4031 object_property_add_alias(obj
, "perfctr_core", obj
, "perfctr-core", &error_abort
);
4032 object_property_add_alias(obj
, "perfctr_nb", obj
, "perfctr-nb", &error_abort
);
4033 object_property_add_alias(obj
, "kvm_nopiodelay", obj
, "kvm-nopiodelay", &error_abort
);
4034 object_property_add_alias(obj
, "kvm_mmu", obj
, "kvm-mmu", &error_abort
);
4035 object_property_add_alias(obj
, "kvm_asyncpf", obj
, "kvm-asyncpf", &error_abort
);
4036 object_property_add_alias(obj
, "kvm_steal_time", obj
, "kvm-steal-time", &error_abort
);
4037 object_property_add_alias(obj
, "kvm_pv_eoi", obj
, "kvm-pv-eoi", &error_abort
);
4038 object_property_add_alias(obj
, "kvm_pv_unhalt", obj
, "kvm-pv-unhalt", &error_abort
);
4039 object_property_add_alias(obj
, "svm_lock", obj
, "svm-lock", &error_abort
);
4040 object_property_add_alias(obj
, "nrip_save", obj
, "nrip-save", &error_abort
);
4041 object_property_add_alias(obj
, "tsc_scale", obj
, "tsc-scale", &error_abort
);
4042 object_property_add_alias(obj
, "vmcb_clean", obj
, "vmcb-clean", &error_abort
);
4043 object_property_add_alias(obj
, "pause_filter", obj
, "pause-filter", &error_abort
);
4044 object_property_add_alias(obj
, "sse4_1", obj
, "sse4.1", &error_abort
);
4045 object_property_add_alias(obj
, "sse4_2", obj
, "sse4.2", &error_abort
);
4048 x86_cpu_load_def(cpu
, xcc
->cpu_def
, &error_abort
);
4052 static int64_t x86_cpu_get_arch_id(CPUState
*cs
)
4054 X86CPU
*cpu
= X86_CPU(cs
);
4056 return cpu
->apic_id
;
4059 static bool x86_cpu_get_paging_enabled(const CPUState
*cs
)
4061 X86CPU
*cpu
= X86_CPU(cs
);
4063 return cpu
->env
.cr
[0] & CR0_PG_MASK
;
4066 static void x86_cpu_set_pc(CPUState
*cs
, vaddr value
)
4068 X86CPU
*cpu
= X86_CPU(cs
);
4070 cpu
->env
.eip
= value
;
4073 static void x86_cpu_synchronize_from_tb(CPUState
*cs
, TranslationBlock
*tb
)
4075 X86CPU
*cpu
= X86_CPU(cs
);
4077 cpu
->env
.eip
= tb
->pc
- tb
->cs_base
;
4080 static bool x86_cpu_has_work(CPUState
*cs
)
4082 X86CPU
*cpu
= X86_CPU(cs
);
4083 CPUX86State
*env
= &cpu
->env
;
4085 return ((cs
->interrupt_request
& (CPU_INTERRUPT_HARD
|
4086 CPU_INTERRUPT_POLL
)) &&
4087 (env
->eflags
& IF_MASK
)) ||
4088 (cs
->interrupt_request
& (CPU_INTERRUPT_NMI
|
4089 CPU_INTERRUPT_INIT
|
4090 CPU_INTERRUPT_SIPI
|
4091 CPU_INTERRUPT_MCE
)) ||
4092 ((cs
->interrupt_request
& CPU_INTERRUPT_SMI
) &&
4093 !(env
->hflags
& HF_SMM_MASK
));
4096 static Property x86_cpu_properties
[] = {
4097 #ifdef CONFIG_USER_ONLY
4098 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4099 DEFINE_PROP_UINT32("apic-id", X86CPU
, apic_id
, 0),
4100 DEFINE_PROP_INT32("thread-id", X86CPU
, thread_id
, 0),
4101 DEFINE_PROP_INT32("core-id", X86CPU
, core_id
, 0),
4102 DEFINE_PROP_INT32("socket-id", X86CPU
, socket_id
, 0),
4104 DEFINE_PROP_UINT32("apic-id", X86CPU
, apic_id
, UNASSIGNED_APIC_ID
),
4105 DEFINE_PROP_INT32("thread-id", X86CPU
, thread_id
, -1),
4106 DEFINE_PROP_INT32("core-id", X86CPU
, core_id
, -1),
4107 DEFINE_PROP_INT32("socket-id", X86CPU
, socket_id
, -1),
4109 DEFINE_PROP_INT32("node-id", X86CPU
, node_id
, CPU_UNSET_NUMA_NODE_ID
),
4110 DEFINE_PROP_BOOL("pmu", X86CPU
, enable_pmu
, false),
4111 { .name
= "hv-spinlocks", .info
= &qdev_prop_spinlocks
},
4112 DEFINE_PROP_BOOL("hv-relaxed", X86CPU
, hyperv_relaxed_timing
, false),
4113 DEFINE_PROP_BOOL("hv-vapic", X86CPU
, hyperv_vapic
, false),
4114 DEFINE_PROP_BOOL("hv-time", X86CPU
, hyperv_time
, false),
4115 DEFINE_PROP_BOOL("hv-crash", X86CPU
, hyperv_crash
, false),
4116 DEFINE_PROP_BOOL("hv-reset", X86CPU
, hyperv_reset
, false),
4117 DEFINE_PROP_BOOL("hv-vpindex", X86CPU
, hyperv_vpindex
, false),
4118 DEFINE_PROP_BOOL("hv-runtime", X86CPU
, hyperv_runtime
, false),
4119 DEFINE_PROP_BOOL("hv-synic", X86CPU
, hyperv_synic
, false),
4120 DEFINE_PROP_BOOL("hv-stimer", X86CPU
, hyperv_stimer
, false),
4121 DEFINE_PROP_BOOL("check", X86CPU
, check_cpuid
, true),
4122 DEFINE_PROP_BOOL("enforce", X86CPU
, enforce_cpuid
, false),
4123 DEFINE_PROP_BOOL("kvm", X86CPU
, expose_kvm
, true),
4124 DEFINE_PROP_UINT32("phys-bits", X86CPU
, phys_bits
, 0),
4125 DEFINE_PROP_BOOL("host-phys-bits", X86CPU
, host_phys_bits
, false),
4126 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU
, fill_mtrr_mask
, true),
4127 DEFINE_PROP_UINT32("level", X86CPU
, env
.cpuid_level
, UINT32_MAX
),
4128 DEFINE_PROP_UINT32("xlevel", X86CPU
, env
.cpuid_xlevel
, UINT32_MAX
),
4129 DEFINE_PROP_UINT32("xlevel2", X86CPU
, env
.cpuid_xlevel2
, UINT32_MAX
),
4130 DEFINE_PROP_UINT32("min-level", X86CPU
, env
.cpuid_min_level
, 0),
4131 DEFINE_PROP_UINT32("min-xlevel", X86CPU
, env
.cpuid_min_xlevel
, 0),
4132 DEFINE_PROP_UINT32("min-xlevel2", X86CPU
, env
.cpuid_min_xlevel2
, 0),
4133 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU
, full_cpuid_auto_level
, true),
4134 DEFINE_PROP_STRING("hv-vendor-id", X86CPU
, hyperv_vendor_id
),
4135 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU
, enable_cpuid_0xb
, true),
4136 DEFINE_PROP_BOOL("lmce", X86CPU
, enable_lmce
, false),
4137 DEFINE_PROP_BOOL("l3-cache", X86CPU
, enable_l3_cache
, true),
4138 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU
, kvm_no_smi_migration
,
4140 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU
, vmware_cpuid_freq
, true),
4141 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU
, expose_tcg
, true),
4144 * From "Requirements for Implementing the Microsoft
4145 * Hypervisor Interface":
4146 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
4148 * "Starting with Windows Server 2012 and Windows 8, if
4149 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
4150 * the hypervisor imposes no specific limit to the number of VPs.
4151 * In this case, Windows Server 2012 guest VMs may use more than
4152 * 64 VPs, up to the maximum supported number of processors applicable
4153 * to the specific Windows version being used."
4155 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU
, hv_max_vps
, -1),
4156 DEFINE_PROP_END_OF_LIST()
4159 static void x86_cpu_common_class_init(ObjectClass
*oc
, void *data
)
4161 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
4162 CPUClass
*cc
= CPU_CLASS(oc
);
4163 DeviceClass
*dc
= DEVICE_CLASS(oc
);
4165 xcc
->parent_realize
= dc
->realize
;
4166 xcc
->parent_unrealize
= dc
->unrealize
;
4167 dc
->realize
= x86_cpu_realizefn
;
4168 dc
->unrealize
= x86_cpu_unrealizefn
;
4169 dc
->props
= x86_cpu_properties
;
4171 xcc
->parent_reset
= cc
->reset
;
4172 cc
->reset
= x86_cpu_reset
;
4173 cc
->reset_dump_flags
= CPU_DUMP_FPU
| CPU_DUMP_CCOP
;
4175 cc
->class_by_name
= x86_cpu_class_by_name
;
4176 cc
->parse_features
= x86_cpu_parse_featurestr
;
4177 cc
->has_work
= x86_cpu_has_work
;
4179 cc
->do_interrupt
= x86_cpu_do_interrupt
;
4180 cc
->cpu_exec_interrupt
= x86_cpu_exec_interrupt
;
4182 cc
->dump_state
= x86_cpu_dump_state
;
4183 cc
->get_crash_info
= x86_cpu_get_crash_info
;
4184 cc
->set_pc
= x86_cpu_set_pc
;
4185 cc
->synchronize_from_tb
= x86_cpu_synchronize_from_tb
;
4186 cc
->gdb_read_register
= x86_cpu_gdb_read_register
;
4187 cc
->gdb_write_register
= x86_cpu_gdb_write_register
;
4188 cc
->get_arch_id
= x86_cpu_get_arch_id
;
4189 cc
->get_paging_enabled
= x86_cpu_get_paging_enabled
;
4190 #ifdef CONFIG_USER_ONLY
4191 cc
->handle_mmu_fault
= x86_cpu_handle_mmu_fault
;
4193 cc
->asidx_from_attrs
= x86_asidx_from_attrs
;
4194 cc
->get_memory_mapping
= x86_cpu_get_memory_mapping
;
4195 cc
->get_phys_page_debug
= x86_cpu_get_phys_page_debug
;
4196 cc
->write_elf64_note
= x86_cpu_write_elf64_note
;
4197 cc
->write_elf64_qemunote
= x86_cpu_write_elf64_qemunote
;
4198 cc
->write_elf32_note
= x86_cpu_write_elf32_note
;
4199 cc
->write_elf32_qemunote
= x86_cpu_write_elf32_qemunote
;
4200 cc
->vmsd
= &vmstate_x86_cpu
;
4202 cc
->gdb_arch_name
= x86_gdb_arch_name
;
4203 #ifdef TARGET_X86_64
4204 cc
->gdb_core_xml_file
= "i386-64bit.xml";
4205 cc
->gdb_num_core_regs
= 57;
4207 cc
->gdb_core_xml_file
= "i386-32bit.xml";
4208 cc
->gdb_num_core_regs
= 41;
4210 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4211 cc
->debug_excp_handler
= breakpoint_handler
;
4213 cc
->cpu_exec_enter
= x86_cpu_exec_enter
;
4214 cc
->cpu_exec_exit
= x86_cpu_exec_exit
;
4215 cc
->tcg_initialize
= tcg_x86_init
;
4217 dc
->user_creatable
= true;
4220 static const TypeInfo x86_cpu_type_info
= {
4221 .name
= TYPE_X86_CPU
,
4223 .instance_size
= sizeof(X86CPU
),
4224 .instance_init
= x86_cpu_initfn
,
4226 .class_size
= sizeof(X86CPUClass
),
4227 .class_init
= x86_cpu_common_class_init
,
4231 /* "base" CPU model, used by query-cpu-model-expansion */
4232 static void x86_cpu_base_class_init(ObjectClass
*oc
, void *data
)
4234 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
4236 xcc
->static_model
= true;
4237 xcc
->migration_safe
= true;
4238 xcc
->model_description
= "base CPU model type with no features enabled";
4242 static const TypeInfo x86_base_cpu_type_info
= {
4243 .name
= X86_CPU_TYPE_NAME("base"),
4244 .parent
= TYPE_X86_CPU
,
4245 .class_init
= x86_cpu_base_class_init
,
4248 static void x86_cpu_register_types(void)
4252 type_register_static(&x86_cpu_type_info
);
4253 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); i
++) {
4254 x86_register_cpudef_type(&builtin_x86_defs
[i
]);
4256 type_register_static(&max_x86_cpu_type_info
);
4257 type_register_static(&x86_base_cpu_type_info
);
4259 type_register_static(&host_x86_cpu_type_info
);
4263 type_init(x86_cpu_register_types
)