target/arm: Restore SPSEL to correct CONTROL register on exception return
[qemu/kevin.git] / target / i386 / cpu.c
blob98732cd65f2502d975a863585a22ac009a5d5f2c
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qapi/qmp/types.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "qom/qom-qobject.h"
38 #include "sysemu/arch_init.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/i386/topology.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "exec/address-spaces.h"
49 #include "hw/hw.h"
50 #include "hw/xen/xen.h"
51 #include "hw/i386/apic_internal.h"
52 #endif
55 /* Cache topology CPUID constants: */
57 /* CPUID Leaf 2 Descriptors */
59 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
60 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
61 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
65 /* CPUID Leaf 4 constants: */
67 /* EAX: */
68 #define CPUID_4_TYPE_DCACHE 1
69 #define CPUID_4_TYPE_ICACHE 2
70 #define CPUID_4_TYPE_UNIFIED 3
72 #define CPUID_4_LEVEL(l) ((l) << 5)
74 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
75 #define CPUID_4_FULLY_ASSOC (1 << 9)
77 /* EDX: */
78 #define CPUID_4_NO_INVD_SHARING (1 << 0)
79 #define CPUID_4_INCLUSIVE (1 << 1)
80 #define CPUID_4_COMPLEX_IDX (1 << 2)
82 #define ASSOC_FULL 0xFF
84 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
85 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
86 a == 2 ? 0x2 : \
87 a == 4 ? 0x4 : \
88 a == 8 ? 0x6 : \
89 a == 16 ? 0x8 : \
90 a == 32 ? 0xA : \
91 a == 48 ? 0xB : \
92 a == 64 ? 0xC : \
93 a == 96 ? 0xD : \
94 a == 128 ? 0xE : \
95 a == ASSOC_FULL ? 0xF : \
96 0 /* invalid value */)
99 /* Definitions of the hardcoded cache entries we expose: */
101 /* L1 data cache: */
102 #define L1D_LINE_SIZE 64
103 #define L1D_ASSOCIATIVITY 8
104 #define L1D_SETS 64
105 #define L1D_PARTITIONS 1
106 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
107 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
108 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
109 #define L1D_LINES_PER_TAG 1
110 #define L1D_SIZE_KB_AMD 64
111 #define L1D_ASSOCIATIVITY_AMD 2
113 /* L1 instruction cache: */
114 #define L1I_LINE_SIZE 64
115 #define L1I_ASSOCIATIVITY 8
116 #define L1I_SETS 64
117 #define L1I_PARTITIONS 1
118 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
119 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
120 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
121 #define L1I_LINES_PER_TAG 1
122 #define L1I_SIZE_KB_AMD 64
123 #define L1I_ASSOCIATIVITY_AMD 2
125 /* Level 2 unified cache: */
126 #define L2_LINE_SIZE 64
127 #define L2_ASSOCIATIVITY 16
128 #define L2_SETS 4096
129 #define L2_PARTITIONS 1
130 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
131 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
132 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
133 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
134 #define L2_LINES_PER_TAG 1
135 #define L2_SIZE_KB_AMD 512
137 /* Level 3 unified cache: */
138 #define L3_SIZE_KB 0 /* disabled */
139 #define L3_ASSOCIATIVITY 0 /* disabled */
140 #define L3_LINES_PER_TAG 0 /* disabled */
141 #define L3_LINE_SIZE 0 /* disabled */
142 #define L3_N_LINE_SIZE 64
143 #define L3_N_ASSOCIATIVITY 16
144 #define L3_N_SETS 16384
145 #define L3_N_PARTITIONS 1
146 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
147 #define L3_N_LINES_PER_TAG 1
148 #define L3_N_SIZE_KB_AMD 16384
150 /* TLB definitions: */
152 #define L1_DTLB_2M_ASSOC 1
153 #define L1_DTLB_2M_ENTRIES 255
154 #define L1_DTLB_4K_ASSOC 1
155 #define L1_DTLB_4K_ENTRIES 255
157 #define L1_ITLB_2M_ASSOC 1
158 #define L1_ITLB_2M_ENTRIES 255
159 #define L1_ITLB_4K_ASSOC 1
160 #define L1_ITLB_4K_ENTRIES 255
162 #define L2_DTLB_2M_ASSOC 0 /* disabled */
163 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
164 #define L2_DTLB_4K_ASSOC 4
165 #define L2_DTLB_4K_ENTRIES 512
167 #define L2_ITLB_2M_ASSOC 0 /* disabled */
168 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
169 #define L2_ITLB_4K_ASSOC 4
170 #define L2_ITLB_4K_ENTRIES 512
174 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
175 uint32_t vendor2, uint32_t vendor3)
177 int i;
178 for (i = 0; i < 4; i++) {
179 dst[i] = vendor1 >> (8 * i);
180 dst[i + 4] = vendor2 >> (8 * i);
181 dst[i + 8] = vendor3 >> (8 * i);
183 dst[CPUID_VENDOR_SZ] = '\0';
186 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
187 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
188 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
189 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
190 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
191 CPUID_PSE36 | CPUID_FXSR)
192 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
193 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
194 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
195 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
196 CPUID_PAE | CPUID_SEP | CPUID_APIC)
198 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
199 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
200 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
201 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
202 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
203 /* partly implemented:
204 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
205 /* missing:
206 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
207 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
208 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
209 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
210 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
211 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
212 /* missing:
213 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
214 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
215 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
216 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
217 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
219 #ifdef TARGET_X86_64
220 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
221 #else
222 #define TCG_EXT2_X86_64_FEATURES 0
223 #endif
225 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
226 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
227 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
228 TCG_EXT2_X86_64_FEATURES)
229 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
230 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
231 #define TCG_EXT4_FEATURES 0
232 #define TCG_SVM_FEATURES 0
233 #define TCG_KVM_FEATURES 0
234 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
235 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
236 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
237 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
238 CPUID_7_0_EBX_ERMS)
239 /* missing:
240 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
241 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
242 CPUID_7_0_EBX_RDSEED */
243 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
244 CPUID_7_0_ECX_LA57)
245 #define TCG_7_0_EDX_FEATURES 0
246 #define TCG_APM_FEATURES 0
247 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
248 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
249 /* missing:
250 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
252 typedef struct FeatureWordInfo {
253 /* feature flags names are taken from "Intel Processor Identification and
254 * the CPUID Instruction" and AMD's "CPUID Specification".
255 * In cases of disagreement between feature naming conventions,
256 * aliases may be added.
258 const char *feat_names[32];
259 uint32_t cpuid_eax; /* Input EAX for CPUID */
260 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
261 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
262 int cpuid_reg; /* output register (R_* constant) */
263 uint32_t tcg_features; /* Feature flags supported by TCG */
264 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
265 uint32_t migratable_flags; /* Feature flags known to be migratable */
266 } FeatureWordInfo;
268 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
269 [FEAT_1_EDX] = {
270 .feat_names = {
271 "fpu", "vme", "de", "pse",
272 "tsc", "msr", "pae", "mce",
273 "cx8", "apic", NULL, "sep",
274 "mtrr", "pge", "mca", "cmov",
275 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
276 NULL, "ds" /* Intel dts */, "acpi", "mmx",
277 "fxsr", "sse", "sse2", "ss",
278 "ht" /* Intel htt */, "tm", "ia64", "pbe",
280 .cpuid_eax = 1, .cpuid_reg = R_EDX,
281 .tcg_features = TCG_FEATURES,
283 [FEAT_1_ECX] = {
284 .feat_names = {
285 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
286 "ds-cpl", "vmx", "smx", "est",
287 "tm2", "ssse3", "cid", NULL,
288 "fma", "cx16", "xtpr", "pdcm",
289 NULL, "pcid", "dca", "sse4.1",
290 "sse4.2", "x2apic", "movbe", "popcnt",
291 "tsc-deadline", "aes", "xsave", "osxsave",
292 "avx", "f16c", "rdrand", "hypervisor",
294 .cpuid_eax = 1, .cpuid_reg = R_ECX,
295 .tcg_features = TCG_EXT_FEATURES,
297 /* Feature names that are already defined on feature_name[] but
298 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
299 * names on feat_names below. They are copied automatically
300 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
302 [FEAT_8000_0001_EDX] = {
303 .feat_names = {
304 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
305 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
306 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
307 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
308 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
309 "nx", NULL, "mmxext", NULL /* mmx */,
310 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
311 NULL, "lm", "3dnowext", "3dnow",
313 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
314 .tcg_features = TCG_EXT2_FEATURES,
316 [FEAT_8000_0001_ECX] = {
317 .feat_names = {
318 "lahf-lm", "cmp-legacy", "svm", "extapic",
319 "cr8legacy", "abm", "sse4a", "misalignsse",
320 "3dnowprefetch", "osvw", "ibs", "xop",
321 "skinit", "wdt", NULL, "lwp",
322 "fma4", "tce", NULL, "nodeid-msr",
323 NULL, "tbm", "topoext", "perfctr-core",
324 "perfctr-nb", NULL, NULL, NULL,
325 NULL, NULL, NULL, NULL,
327 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
328 .tcg_features = TCG_EXT3_FEATURES,
330 [FEAT_C000_0001_EDX] = {
331 .feat_names = {
332 NULL, NULL, "xstore", "xstore-en",
333 NULL, NULL, "xcrypt", "xcrypt-en",
334 "ace2", "ace2-en", "phe", "phe-en",
335 "pmm", "pmm-en", NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 NULL, NULL, NULL, NULL,
339 NULL, NULL, NULL, NULL,
341 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
342 .tcg_features = TCG_EXT4_FEATURES,
344 [FEAT_KVM] = {
345 .feat_names = {
346 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
347 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
348 NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 NULL, NULL, NULL, NULL,
351 NULL, NULL, NULL, NULL,
352 "kvmclock-stable-bit", NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL,
355 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
356 .tcg_features = TCG_KVM_FEATURES,
358 [FEAT_HYPERV_EAX] = {
359 .feat_names = {
360 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
361 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
362 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
363 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
364 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
365 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
366 NULL, NULL, NULL, NULL,
367 NULL, NULL, NULL, NULL,
368 NULL, NULL, NULL, NULL,
369 NULL, NULL, NULL, NULL,
370 NULL, NULL, NULL, NULL,
372 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
374 [FEAT_HYPERV_EBX] = {
375 .feat_names = {
376 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
377 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
378 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
379 NULL /* hv_create_port */, NULL /* hv_connect_port */,
380 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
381 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
382 NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 NULL, NULL, NULL, NULL,
385 NULL, NULL, NULL, NULL,
386 NULL, NULL, NULL, NULL,
388 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
390 [FEAT_HYPERV_EDX] = {
391 .feat_names = {
392 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
393 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
394 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
395 NULL, NULL,
396 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL,
400 NULL, NULL, NULL, NULL,
401 NULL, NULL, NULL, NULL,
403 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
405 [FEAT_SVM] = {
406 .feat_names = {
407 "npt", "lbrv", "svm-lock", "nrip-save",
408 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
409 NULL, NULL, "pause-filter", NULL,
410 "pfthreshold", NULL, NULL, NULL,
411 NULL, NULL, NULL, NULL,
412 NULL, NULL, NULL, NULL,
413 NULL, NULL, NULL, NULL,
414 NULL, NULL, NULL, NULL,
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
419 [FEAT_7_0_EBX] = {
420 .feat_names = {
421 "fsgsbase", "tsc-adjust", NULL, "bmi1",
422 "hle", "avx2", NULL, "smep",
423 "bmi2", "erms", "invpcid", "rtm",
424 NULL, NULL, "mpx", NULL,
425 "avx512f", "avx512dq", "rdseed", "adx",
426 "smap", "avx512ifma", "pcommit", "clflushopt",
427 "clwb", NULL, "avx512pf", "avx512er",
428 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
430 .cpuid_eax = 7,
431 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
432 .cpuid_reg = R_EBX,
433 .tcg_features = TCG_7_0_EBX_FEATURES,
435 [FEAT_7_0_ECX] = {
436 .feat_names = {
437 NULL, "avx512vbmi", "umip", "pku",
438 "ospke", NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, "avx512-vpopcntdq", NULL,
441 "la57", NULL, NULL, NULL,
442 NULL, NULL, "rdpid", NULL,
443 NULL, NULL, NULL, NULL,
444 NULL, NULL, NULL, NULL,
446 .cpuid_eax = 7,
447 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
448 .cpuid_reg = R_ECX,
449 .tcg_features = TCG_7_0_ECX_FEATURES,
451 [FEAT_7_0_EDX] = {
452 .feat_names = {
453 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
462 .cpuid_eax = 7,
463 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
464 .cpuid_reg = R_EDX,
465 .tcg_features = TCG_7_0_EDX_FEATURES,
467 [FEAT_8000_0007_EDX] = {
468 .feat_names = {
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 "invtsc", NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL,
478 .cpuid_eax = 0x80000007,
479 .cpuid_reg = R_EDX,
480 .tcg_features = TCG_APM_FEATURES,
481 .unmigratable_flags = CPUID_APM_INVTSC,
483 [FEAT_XSAVE] = {
484 .feat_names = {
485 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
489 NULL, NULL, NULL, NULL,
490 NULL, NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL,
494 .cpuid_eax = 0xd,
495 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
496 .cpuid_reg = R_EAX,
497 .tcg_features = TCG_XSAVE_FEATURES,
499 [FEAT_6_EAX] = {
500 .feat_names = {
501 NULL, NULL, "arat", NULL,
502 NULL, NULL, NULL, NULL,
503 NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
510 .cpuid_eax = 6, .cpuid_reg = R_EAX,
511 .tcg_features = TCG_6_EAX_FEATURES,
513 [FEAT_XSAVE_COMP_LO] = {
514 .cpuid_eax = 0xD,
515 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
516 .cpuid_reg = R_EAX,
517 .tcg_features = ~0U,
518 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
519 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
520 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
521 XSTATE_PKRU_MASK,
523 [FEAT_XSAVE_COMP_HI] = {
524 .cpuid_eax = 0xD,
525 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
526 .cpuid_reg = R_EDX,
527 .tcg_features = ~0U,
531 typedef struct X86RegisterInfo32 {
532 /* Name of register */
533 const char *name;
534 /* QAPI enum value register */
535 X86CPURegister32 qapi_enum;
536 } X86RegisterInfo32;
538 #define REGISTER(reg) \
539 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
540 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
541 REGISTER(EAX),
542 REGISTER(ECX),
543 REGISTER(EDX),
544 REGISTER(EBX),
545 REGISTER(ESP),
546 REGISTER(EBP),
547 REGISTER(ESI),
548 REGISTER(EDI),
550 #undef REGISTER
552 typedef struct ExtSaveArea {
553 uint32_t feature, bits;
554 uint32_t offset, size;
555 } ExtSaveArea;
557 static const ExtSaveArea x86_ext_save_areas[] = {
558 [XSTATE_FP_BIT] = {
559 /* x87 FP state component is always enabled if XSAVE is supported */
560 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
561 /* x87 state is in the legacy region of the XSAVE area */
562 .offset = 0,
563 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
565 [XSTATE_SSE_BIT] = {
566 /* SSE state component is always enabled if XSAVE is supported */
567 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
568 /* SSE state is in the legacy region of the XSAVE area */
569 .offset = 0,
570 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
572 [XSTATE_YMM_BIT] =
573 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
574 .offset = offsetof(X86XSaveArea, avx_state),
575 .size = sizeof(XSaveAVX) },
576 [XSTATE_BNDREGS_BIT] =
577 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
578 .offset = offsetof(X86XSaveArea, bndreg_state),
579 .size = sizeof(XSaveBNDREG) },
580 [XSTATE_BNDCSR_BIT] =
581 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
582 .offset = offsetof(X86XSaveArea, bndcsr_state),
583 .size = sizeof(XSaveBNDCSR) },
584 [XSTATE_OPMASK_BIT] =
585 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
586 .offset = offsetof(X86XSaveArea, opmask_state),
587 .size = sizeof(XSaveOpmask) },
588 [XSTATE_ZMM_Hi256_BIT] =
589 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
590 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
591 .size = sizeof(XSaveZMM_Hi256) },
592 [XSTATE_Hi16_ZMM_BIT] =
593 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
594 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
595 .size = sizeof(XSaveHi16_ZMM) },
596 [XSTATE_PKRU_BIT] =
597 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
598 .offset = offsetof(X86XSaveArea, pkru_state),
599 .size = sizeof(XSavePKRU) },
602 static uint32_t xsave_area_size(uint64_t mask)
604 int i;
605 uint64_t ret = 0;
607 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
608 const ExtSaveArea *esa = &x86_ext_save_areas[i];
609 if ((mask >> i) & 1) {
610 ret = MAX(ret, esa->offset + esa->size);
613 return ret;
616 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
618 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
619 cpu->env.features[FEAT_XSAVE_COMP_LO];
622 const char *get_register_name_32(unsigned int reg)
624 if (reg >= CPU_NB_REGS32) {
625 return NULL;
627 return x86_reg_info_32[reg].name;
631 * Returns the set of feature flags that are supported and migratable by
632 * QEMU, for a given FeatureWord.
634 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
636 FeatureWordInfo *wi = &feature_word_info[w];
637 uint32_t r = 0;
638 int i;
640 for (i = 0; i < 32; i++) {
641 uint32_t f = 1U << i;
643 /* If the feature name is known, it is implicitly considered migratable,
644 * unless it is explicitly set in unmigratable_flags */
645 if ((wi->migratable_flags & f) ||
646 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
647 r |= f;
650 return r;
653 void host_cpuid(uint32_t function, uint32_t count,
654 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
656 uint32_t vec[4];
658 #ifdef __x86_64__
659 asm volatile("cpuid"
660 : "=a"(vec[0]), "=b"(vec[1]),
661 "=c"(vec[2]), "=d"(vec[3])
662 : "0"(function), "c"(count) : "cc");
663 #elif defined(__i386__)
664 asm volatile("pusha \n\t"
665 "cpuid \n\t"
666 "mov %%eax, 0(%2) \n\t"
667 "mov %%ebx, 4(%2) \n\t"
668 "mov %%ecx, 8(%2) \n\t"
669 "mov %%edx, 12(%2) \n\t"
670 "popa"
671 : : "a"(function), "c"(count), "S"(vec)
672 : "memory", "cc");
673 #else
674 abort();
675 #endif
677 if (eax)
678 *eax = vec[0];
679 if (ebx)
680 *ebx = vec[1];
681 if (ecx)
682 *ecx = vec[2];
683 if (edx)
684 *edx = vec[3];
687 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
689 uint32_t eax, ebx, ecx, edx;
691 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
692 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
694 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
695 if (family) {
696 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
698 if (model) {
699 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
701 if (stepping) {
702 *stepping = eax & 0x0F;
706 /* CPU class name definitions: */
708 /* Return type name for a given CPU model name
709 * Caller is responsible for freeing the returned string.
711 static char *x86_cpu_type_name(const char *model_name)
713 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
716 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
718 ObjectClass *oc;
719 char *typename;
721 if (cpu_model == NULL) {
722 return NULL;
725 typename = x86_cpu_type_name(cpu_model);
726 oc = object_class_by_name(typename);
727 g_free(typename);
728 return oc;
731 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
733 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
734 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
735 return g_strndup(class_name,
736 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
739 struct X86CPUDefinition {
740 const char *name;
741 uint32_t level;
742 uint32_t xlevel;
743 /* vendor is zero-terminated, 12 character ASCII string */
744 char vendor[CPUID_VENDOR_SZ + 1];
745 int family;
746 int model;
747 int stepping;
748 FeatureWordArray features;
749 char model_id[48];
752 static X86CPUDefinition builtin_x86_defs[] = {
754 .name = "qemu64",
755 .level = 0xd,
756 .vendor = CPUID_VENDOR_AMD,
757 .family = 6,
758 .model = 6,
759 .stepping = 3,
760 .features[FEAT_1_EDX] =
761 PPRO_FEATURES |
762 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
763 CPUID_PSE36,
764 .features[FEAT_1_ECX] =
765 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
766 .features[FEAT_8000_0001_EDX] =
767 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
768 .features[FEAT_8000_0001_ECX] =
769 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
770 .xlevel = 0x8000000A,
771 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
774 .name = "phenom",
775 .level = 5,
776 .vendor = CPUID_VENDOR_AMD,
777 .family = 16,
778 .model = 2,
779 .stepping = 3,
780 /* Missing: CPUID_HT */
781 .features[FEAT_1_EDX] =
782 PPRO_FEATURES |
783 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
784 CPUID_PSE36 | CPUID_VME,
785 .features[FEAT_1_ECX] =
786 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
787 CPUID_EXT_POPCNT,
788 .features[FEAT_8000_0001_EDX] =
789 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
790 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
791 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
792 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
793 CPUID_EXT3_CR8LEG,
794 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
795 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
796 .features[FEAT_8000_0001_ECX] =
797 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
798 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
799 /* Missing: CPUID_SVM_LBRV */
800 .features[FEAT_SVM] =
801 CPUID_SVM_NPT,
802 .xlevel = 0x8000001A,
803 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
806 .name = "core2duo",
807 .level = 10,
808 .vendor = CPUID_VENDOR_INTEL,
809 .family = 6,
810 .model = 15,
811 .stepping = 11,
812 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
813 .features[FEAT_1_EDX] =
814 PPRO_FEATURES |
815 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
816 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
817 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
818 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
819 .features[FEAT_1_ECX] =
820 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
821 CPUID_EXT_CX16,
822 .features[FEAT_8000_0001_EDX] =
823 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
824 .features[FEAT_8000_0001_ECX] =
825 CPUID_EXT3_LAHF_LM,
826 .xlevel = 0x80000008,
827 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
830 .name = "kvm64",
831 .level = 0xd,
832 .vendor = CPUID_VENDOR_INTEL,
833 .family = 15,
834 .model = 6,
835 .stepping = 1,
836 /* Missing: CPUID_HT */
837 .features[FEAT_1_EDX] =
838 PPRO_FEATURES | CPUID_VME |
839 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
840 CPUID_PSE36,
841 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
842 .features[FEAT_1_ECX] =
843 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
844 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
845 .features[FEAT_8000_0001_EDX] =
846 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
847 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
848 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
849 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
850 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
851 .features[FEAT_8000_0001_ECX] =
853 .xlevel = 0x80000008,
854 .model_id = "Common KVM processor"
857 .name = "qemu32",
858 .level = 4,
859 .vendor = CPUID_VENDOR_INTEL,
860 .family = 6,
861 .model = 6,
862 .stepping = 3,
863 .features[FEAT_1_EDX] =
864 PPRO_FEATURES,
865 .features[FEAT_1_ECX] =
866 CPUID_EXT_SSE3,
867 .xlevel = 0x80000004,
868 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
871 .name = "kvm32",
872 .level = 5,
873 .vendor = CPUID_VENDOR_INTEL,
874 .family = 15,
875 .model = 6,
876 .stepping = 1,
877 .features[FEAT_1_EDX] =
878 PPRO_FEATURES | CPUID_VME |
879 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
880 .features[FEAT_1_ECX] =
881 CPUID_EXT_SSE3,
882 .features[FEAT_8000_0001_ECX] =
884 .xlevel = 0x80000008,
885 .model_id = "Common 32-bit KVM processor"
888 .name = "coreduo",
889 .level = 10,
890 .vendor = CPUID_VENDOR_INTEL,
891 .family = 6,
892 .model = 14,
893 .stepping = 8,
894 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
895 .features[FEAT_1_EDX] =
896 PPRO_FEATURES | CPUID_VME |
897 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
898 CPUID_SS,
899 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
900 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
901 .features[FEAT_1_ECX] =
902 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
903 .features[FEAT_8000_0001_EDX] =
904 CPUID_EXT2_NX,
905 .xlevel = 0x80000008,
906 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
909 .name = "486",
910 .level = 1,
911 .vendor = CPUID_VENDOR_INTEL,
912 .family = 4,
913 .model = 8,
914 .stepping = 0,
915 .features[FEAT_1_EDX] =
916 I486_FEATURES,
917 .xlevel = 0,
920 .name = "pentium",
921 .level = 1,
922 .vendor = CPUID_VENDOR_INTEL,
923 .family = 5,
924 .model = 4,
925 .stepping = 3,
926 .features[FEAT_1_EDX] =
927 PENTIUM_FEATURES,
928 .xlevel = 0,
931 .name = "pentium2",
932 .level = 2,
933 .vendor = CPUID_VENDOR_INTEL,
934 .family = 6,
935 .model = 5,
936 .stepping = 2,
937 .features[FEAT_1_EDX] =
938 PENTIUM2_FEATURES,
939 .xlevel = 0,
942 .name = "pentium3",
943 .level = 3,
944 .vendor = CPUID_VENDOR_INTEL,
945 .family = 6,
946 .model = 7,
947 .stepping = 3,
948 .features[FEAT_1_EDX] =
949 PENTIUM3_FEATURES,
950 .xlevel = 0,
953 .name = "athlon",
954 .level = 2,
955 .vendor = CPUID_VENDOR_AMD,
956 .family = 6,
957 .model = 2,
958 .stepping = 3,
959 .features[FEAT_1_EDX] =
960 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
961 CPUID_MCA,
962 .features[FEAT_8000_0001_EDX] =
963 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
964 .xlevel = 0x80000008,
965 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
968 .name = "n270",
969 .level = 10,
970 .vendor = CPUID_VENDOR_INTEL,
971 .family = 6,
972 .model = 28,
973 .stepping = 2,
974 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
975 .features[FEAT_1_EDX] =
976 PPRO_FEATURES |
977 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
978 CPUID_ACPI | CPUID_SS,
979 /* Some CPUs got no CPUID_SEP */
980 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
981 * CPUID_EXT_XTPR */
982 .features[FEAT_1_ECX] =
983 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
984 CPUID_EXT_MOVBE,
985 .features[FEAT_8000_0001_EDX] =
986 CPUID_EXT2_NX,
987 .features[FEAT_8000_0001_ECX] =
988 CPUID_EXT3_LAHF_LM,
989 .xlevel = 0x80000008,
990 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
993 .name = "Conroe",
994 .level = 10,
995 .vendor = CPUID_VENDOR_INTEL,
996 .family = 6,
997 .model = 15,
998 .stepping = 3,
999 .features[FEAT_1_EDX] =
1000 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1001 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1002 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1003 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1004 CPUID_DE | CPUID_FP87,
1005 .features[FEAT_1_ECX] =
1006 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1007 .features[FEAT_8000_0001_EDX] =
1008 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1009 .features[FEAT_8000_0001_ECX] =
1010 CPUID_EXT3_LAHF_LM,
1011 .xlevel = 0x80000008,
1012 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1015 .name = "Penryn",
1016 .level = 10,
1017 .vendor = CPUID_VENDOR_INTEL,
1018 .family = 6,
1019 .model = 23,
1020 .stepping = 3,
1021 .features[FEAT_1_EDX] =
1022 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1023 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1024 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1025 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1026 CPUID_DE | CPUID_FP87,
1027 .features[FEAT_1_ECX] =
1028 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1029 CPUID_EXT_SSE3,
1030 .features[FEAT_8000_0001_EDX] =
1031 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1032 .features[FEAT_8000_0001_ECX] =
1033 CPUID_EXT3_LAHF_LM,
1034 .xlevel = 0x80000008,
1035 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1038 .name = "Nehalem",
1039 .level = 11,
1040 .vendor = CPUID_VENDOR_INTEL,
1041 .family = 6,
1042 .model = 26,
1043 .stepping = 3,
1044 .features[FEAT_1_EDX] =
1045 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1046 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1047 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1048 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1049 CPUID_DE | CPUID_FP87,
1050 .features[FEAT_1_ECX] =
1051 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1052 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1053 .features[FEAT_8000_0001_EDX] =
1054 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1055 .features[FEAT_8000_0001_ECX] =
1056 CPUID_EXT3_LAHF_LM,
1057 .xlevel = 0x80000008,
1058 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1061 .name = "Westmere",
1062 .level = 11,
1063 .vendor = CPUID_VENDOR_INTEL,
1064 .family = 6,
1065 .model = 44,
1066 .stepping = 1,
1067 .features[FEAT_1_EDX] =
1068 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1069 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1070 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1071 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1072 CPUID_DE | CPUID_FP87,
1073 .features[FEAT_1_ECX] =
1074 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1075 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1076 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1077 .features[FEAT_8000_0001_EDX] =
1078 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1079 .features[FEAT_8000_0001_ECX] =
1080 CPUID_EXT3_LAHF_LM,
1081 .features[FEAT_6_EAX] =
1082 CPUID_6_EAX_ARAT,
1083 .xlevel = 0x80000008,
1084 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1087 .name = "SandyBridge",
1088 .level = 0xd,
1089 .vendor = CPUID_VENDOR_INTEL,
1090 .family = 6,
1091 .model = 42,
1092 .stepping = 1,
1093 .features[FEAT_1_EDX] =
1094 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1095 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1096 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1097 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1098 CPUID_DE | CPUID_FP87,
1099 .features[FEAT_1_ECX] =
1100 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1101 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1102 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1103 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1104 CPUID_EXT_SSE3,
1105 .features[FEAT_8000_0001_EDX] =
1106 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1107 CPUID_EXT2_SYSCALL,
1108 .features[FEAT_8000_0001_ECX] =
1109 CPUID_EXT3_LAHF_LM,
1110 .features[FEAT_XSAVE] =
1111 CPUID_XSAVE_XSAVEOPT,
1112 .features[FEAT_6_EAX] =
1113 CPUID_6_EAX_ARAT,
1114 .xlevel = 0x80000008,
1115 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1118 .name = "IvyBridge",
1119 .level = 0xd,
1120 .vendor = CPUID_VENDOR_INTEL,
1121 .family = 6,
1122 .model = 58,
1123 .stepping = 9,
1124 .features[FEAT_1_EDX] =
1125 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1126 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1127 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1128 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1129 CPUID_DE | CPUID_FP87,
1130 .features[FEAT_1_ECX] =
1131 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1132 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1133 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1134 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1135 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1136 .features[FEAT_7_0_EBX] =
1137 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1138 CPUID_7_0_EBX_ERMS,
1139 .features[FEAT_8000_0001_EDX] =
1140 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1141 CPUID_EXT2_SYSCALL,
1142 .features[FEAT_8000_0001_ECX] =
1143 CPUID_EXT3_LAHF_LM,
1144 .features[FEAT_XSAVE] =
1145 CPUID_XSAVE_XSAVEOPT,
1146 .features[FEAT_6_EAX] =
1147 CPUID_6_EAX_ARAT,
1148 .xlevel = 0x80000008,
1149 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1152 .name = "Haswell-noTSX",
1153 .level = 0xd,
1154 .vendor = CPUID_VENDOR_INTEL,
1155 .family = 6,
1156 .model = 60,
1157 .stepping = 1,
1158 .features[FEAT_1_EDX] =
1159 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1160 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1161 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1162 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1163 CPUID_DE | CPUID_FP87,
1164 .features[FEAT_1_ECX] =
1165 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1166 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1167 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1168 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1169 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1170 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1171 .features[FEAT_8000_0001_EDX] =
1172 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1173 CPUID_EXT2_SYSCALL,
1174 .features[FEAT_8000_0001_ECX] =
1175 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1176 .features[FEAT_7_0_EBX] =
1177 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1178 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1179 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1180 .features[FEAT_XSAVE] =
1181 CPUID_XSAVE_XSAVEOPT,
1182 .features[FEAT_6_EAX] =
1183 CPUID_6_EAX_ARAT,
1184 .xlevel = 0x80000008,
1185 .model_id = "Intel Core Processor (Haswell, no TSX)",
1186 }, {
1187 .name = "Haswell",
1188 .level = 0xd,
1189 .vendor = CPUID_VENDOR_INTEL,
1190 .family = 6,
1191 .model = 60,
1192 .stepping = 4,
1193 .features[FEAT_1_EDX] =
1194 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1195 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1196 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1197 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1198 CPUID_DE | CPUID_FP87,
1199 .features[FEAT_1_ECX] =
1200 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1201 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1202 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1203 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1204 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1205 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1206 .features[FEAT_8000_0001_EDX] =
1207 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1208 CPUID_EXT2_SYSCALL,
1209 .features[FEAT_8000_0001_ECX] =
1210 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1211 .features[FEAT_7_0_EBX] =
1212 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1213 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1214 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1215 CPUID_7_0_EBX_RTM,
1216 .features[FEAT_XSAVE] =
1217 CPUID_XSAVE_XSAVEOPT,
1218 .features[FEAT_6_EAX] =
1219 CPUID_6_EAX_ARAT,
1220 .xlevel = 0x80000008,
1221 .model_id = "Intel Core Processor (Haswell)",
1224 .name = "Broadwell-noTSX",
1225 .level = 0xd,
1226 .vendor = CPUID_VENDOR_INTEL,
1227 .family = 6,
1228 .model = 61,
1229 .stepping = 2,
1230 .features[FEAT_1_EDX] =
1231 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1232 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1233 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1234 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1235 CPUID_DE | CPUID_FP87,
1236 .features[FEAT_1_ECX] =
1237 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1238 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1239 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1240 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1241 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1242 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1243 .features[FEAT_8000_0001_EDX] =
1244 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1245 CPUID_EXT2_SYSCALL,
1246 .features[FEAT_8000_0001_ECX] =
1247 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1248 .features[FEAT_7_0_EBX] =
1249 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1250 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1251 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1252 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1253 CPUID_7_0_EBX_SMAP,
1254 .features[FEAT_XSAVE] =
1255 CPUID_XSAVE_XSAVEOPT,
1256 .features[FEAT_6_EAX] =
1257 CPUID_6_EAX_ARAT,
1258 .xlevel = 0x80000008,
1259 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1262 .name = "Broadwell",
1263 .level = 0xd,
1264 .vendor = CPUID_VENDOR_INTEL,
1265 .family = 6,
1266 .model = 61,
1267 .stepping = 2,
1268 .features[FEAT_1_EDX] =
1269 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1270 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1271 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1272 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1273 CPUID_DE | CPUID_FP87,
1274 .features[FEAT_1_ECX] =
1275 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1276 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1277 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1278 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1279 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1280 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1281 .features[FEAT_8000_0001_EDX] =
1282 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1283 CPUID_EXT2_SYSCALL,
1284 .features[FEAT_8000_0001_ECX] =
1285 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1286 .features[FEAT_7_0_EBX] =
1287 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1288 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1289 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1290 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1291 CPUID_7_0_EBX_SMAP,
1292 .features[FEAT_XSAVE] =
1293 CPUID_XSAVE_XSAVEOPT,
1294 .features[FEAT_6_EAX] =
1295 CPUID_6_EAX_ARAT,
1296 .xlevel = 0x80000008,
1297 .model_id = "Intel Core Processor (Broadwell)",
1300 .name = "Skylake-Client",
1301 .level = 0xd,
1302 .vendor = CPUID_VENDOR_INTEL,
1303 .family = 6,
1304 .model = 94,
1305 .stepping = 3,
1306 .features[FEAT_1_EDX] =
1307 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1308 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1309 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1310 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1311 CPUID_DE | CPUID_FP87,
1312 .features[FEAT_1_ECX] =
1313 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1314 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1315 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1316 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1317 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1318 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1319 .features[FEAT_8000_0001_EDX] =
1320 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1321 CPUID_EXT2_SYSCALL,
1322 .features[FEAT_8000_0001_ECX] =
1323 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1324 .features[FEAT_7_0_EBX] =
1325 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1326 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1327 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1328 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1329 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1330 /* Missing: XSAVES (not supported by some Linux versions,
1331 * including v4.1 to v4.12).
1332 * KVM doesn't yet expose any XSAVES state save component,
1333 * and the only one defined in Skylake (processor tracing)
1334 * probably will block migration anyway.
1336 .features[FEAT_XSAVE] =
1337 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1338 CPUID_XSAVE_XGETBV1,
1339 .features[FEAT_6_EAX] =
1340 CPUID_6_EAX_ARAT,
1341 .xlevel = 0x80000008,
1342 .model_id = "Intel Core Processor (Skylake)",
1345 .name = "Skylake-Server",
1346 .level = 0xd,
1347 .vendor = CPUID_VENDOR_INTEL,
1348 .family = 6,
1349 .model = 85,
1350 .stepping = 4,
1351 .features[FEAT_1_EDX] =
1352 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1353 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1354 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1355 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1356 CPUID_DE | CPUID_FP87,
1357 .features[FEAT_1_ECX] =
1358 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1359 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1360 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1361 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1362 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1363 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1364 .features[FEAT_8000_0001_EDX] =
1365 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1366 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1367 .features[FEAT_8000_0001_ECX] =
1368 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1369 .features[FEAT_7_0_EBX] =
1370 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1371 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1372 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1373 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1374 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1375 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1376 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1377 CPUID_7_0_EBX_AVX512VL,
1378 /* Missing: XSAVES (not supported by some Linux versions,
1379 * including v4.1 to v4.12).
1380 * KVM doesn't yet expose any XSAVES state save component,
1381 * and the only one defined in Skylake (processor tracing)
1382 * probably will block migration anyway.
1384 .features[FEAT_XSAVE] =
1385 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1386 CPUID_XSAVE_XGETBV1,
1387 .features[FEAT_6_EAX] =
1388 CPUID_6_EAX_ARAT,
1389 .xlevel = 0x80000008,
1390 .model_id = "Intel Xeon Processor (Skylake)",
1393 .name = "Opteron_G1",
1394 .level = 5,
1395 .vendor = CPUID_VENDOR_AMD,
1396 .family = 15,
1397 .model = 6,
1398 .stepping = 1,
1399 .features[FEAT_1_EDX] =
1400 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1401 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1402 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1403 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1404 CPUID_DE | CPUID_FP87,
1405 .features[FEAT_1_ECX] =
1406 CPUID_EXT_SSE3,
1407 .features[FEAT_8000_0001_EDX] =
1408 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1409 .xlevel = 0x80000008,
1410 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1413 .name = "Opteron_G2",
1414 .level = 5,
1415 .vendor = CPUID_VENDOR_AMD,
1416 .family = 15,
1417 .model = 6,
1418 .stepping = 1,
1419 .features[FEAT_1_EDX] =
1420 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1421 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1422 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1423 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1424 CPUID_DE | CPUID_FP87,
1425 .features[FEAT_1_ECX] =
1426 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1427 /* Missing: CPUID_EXT2_RDTSCP */
1428 .features[FEAT_8000_0001_EDX] =
1429 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1430 .features[FEAT_8000_0001_ECX] =
1431 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1432 .xlevel = 0x80000008,
1433 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1436 .name = "Opteron_G3",
1437 .level = 5,
1438 .vendor = CPUID_VENDOR_AMD,
1439 .family = 16,
1440 .model = 2,
1441 .stepping = 3,
1442 .features[FEAT_1_EDX] =
1443 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1444 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1445 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1446 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1447 CPUID_DE | CPUID_FP87,
1448 .features[FEAT_1_ECX] =
1449 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1450 CPUID_EXT_SSE3,
1451 /* Missing: CPUID_EXT2_RDTSCP */
1452 .features[FEAT_8000_0001_EDX] =
1453 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1454 .features[FEAT_8000_0001_ECX] =
1455 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1456 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1457 .xlevel = 0x80000008,
1458 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1461 .name = "Opteron_G4",
1462 .level = 0xd,
1463 .vendor = CPUID_VENDOR_AMD,
1464 .family = 21,
1465 .model = 1,
1466 .stepping = 2,
1467 .features[FEAT_1_EDX] =
1468 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1469 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1470 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1471 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1472 CPUID_DE | CPUID_FP87,
1473 .features[FEAT_1_ECX] =
1474 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1475 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1476 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1477 CPUID_EXT_SSE3,
1478 /* Missing: CPUID_EXT2_RDTSCP */
1479 .features[FEAT_8000_0001_EDX] =
1480 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1481 CPUID_EXT2_SYSCALL,
1482 .features[FEAT_8000_0001_ECX] =
1483 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1484 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1485 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1486 CPUID_EXT3_LAHF_LM,
1487 /* no xsaveopt! */
1488 .xlevel = 0x8000001A,
1489 .model_id = "AMD Opteron 62xx class CPU",
1492 .name = "Opteron_G5",
1493 .level = 0xd,
1494 .vendor = CPUID_VENDOR_AMD,
1495 .family = 21,
1496 .model = 2,
1497 .stepping = 0,
1498 .features[FEAT_1_EDX] =
1499 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1500 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1501 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1502 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1503 CPUID_DE | CPUID_FP87,
1504 .features[FEAT_1_ECX] =
1505 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1506 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1507 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1508 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1509 /* Missing: CPUID_EXT2_RDTSCP */
1510 .features[FEAT_8000_0001_EDX] =
1511 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1512 CPUID_EXT2_SYSCALL,
1513 .features[FEAT_8000_0001_ECX] =
1514 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1515 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1516 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1517 CPUID_EXT3_LAHF_LM,
1518 /* no xsaveopt! */
1519 .xlevel = 0x8000001A,
1520 .model_id = "AMD Opteron 63xx class CPU",
1523 .name = "EPYC",
1524 .level = 0xd,
1525 .vendor = CPUID_VENDOR_AMD,
1526 .family = 23,
1527 .model = 1,
1528 .stepping = 2,
1529 .features[FEAT_1_EDX] =
1530 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
1531 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
1532 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
1533 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
1534 CPUID_VME | CPUID_FP87,
1535 .features[FEAT_1_ECX] =
1536 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
1537 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
1538 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1539 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
1540 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1541 .features[FEAT_8000_0001_EDX] =
1542 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
1543 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
1544 CPUID_EXT2_SYSCALL,
1545 .features[FEAT_8000_0001_ECX] =
1546 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
1547 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
1548 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1549 .features[FEAT_7_0_EBX] =
1550 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
1551 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
1552 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
1553 CPUID_7_0_EBX_SHA_NI,
1554 /* Missing: XSAVES (not supported by some Linux versions,
1555 * including v4.1 to v4.12).
1556 * KVM doesn't yet expose any XSAVES state save component.
1558 .features[FEAT_XSAVE] =
1559 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1560 CPUID_XSAVE_XGETBV1,
1561 .features[FEAT_6_EAX] =
1562 CPUID_6_EAX_ARAT,
1563 .xlevel = 0x8000000A,
1564 .model_id = "AMD EPYC Processor",
1568 typedef struct PropValue {
1569 const char *prop, *value;
1570 } PropValue;
1572 /* KVM-specific features that are automatically added/removed
1573 * from all CPU models when KVM is enabled.
1575 static PropValue kvm_default_props[] = {
1576 { "kvmclock", "on" },
1577 { "kvm-nopiodelay", "on" },
1578 { "kvm-asyncpf", "on" },
1579 { "kvm-steal-time", "on" },
1580 { "kvm-pv-eoi", "on" },
1581 { "kvmclock-stable-bit", "on" },
1582 { "x2apic", "on" },
1583 { "acpi", "off" },
1584 { "monitor", "off" },
1585 { "svm", "off" },
1586 { NULL, NULL },
1589 /* TCG-specific defaults that override all CPU models when using TCG
1591 static PropValue tcg_default_props[] = {
1592 { "vme", "off" },
1593 { NULL, NULL },
1597 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1599 PropValue *pv;
1600 for (pv = kvm_default_props; pv->prop; pv++) {
1601 if (!strcmp(pv->prop, prop)) {
1602 pv->value = value;
1603 break;
1607 /* It is valid to call this function only for properties that
1608 * are already present in the kvm_default_props table.
1610 assert(pv->prop);
1613 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1614 bool migratable_only);
1616 static bool lmce_supported(void)
1618 uint64_t mce_cap = 0;
1620 #ifdef CONFIG_KVM
1621 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1622 return false;
1624 #endif
1626 return !!(mce_cap & MCG_LMCE_P);
1629 #define CPUID_MODEL_ID_SZ 48
1632 * cpu_x86_fill_model_id:
1633 * Get CPUID model ID string from host CPU.
1635 * @str should have at least CPUID_MODEL_ID_SZ bytes
1637 * The function does NOT add a null terminator to the string
1638 * automatically.
1640 static int cpu_x86_fill_model_id(char *str)
1642 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1643 int i;
1645 for (i = 0; i < 3; i++) {
1646 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1647 memcpy(str + i * 16 + 0, &eax, 4);
1648 memcpy(str + i * 16 + 4, &ebx, 4);
1649 memcpy(str + i * 16 + 8, &ecx, 4);
1650 memcpy(str + i * 16 + 12, &edx, 4);
1652 return 0;
1655 static Property max_x86_cpu_properties[] = {
1656 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1657 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1658 DEFINE_PROP_END_OF_LIST()
1661 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
1663 DeviceClass *dc = DEVICE_CLASS(oc);
1664 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1666 xcc->ordering = 9;
1668 xcc->model_description =
1669 "Enables all features supported by the accelerator in the current host";
1671 dc->props = max_x86_cpu_properties;
1674 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
1676 static void max_x86_cpu_initfn(Object *obj)
1678 X86CPU *cpu = X86_CPU(obj);
1679 CPUX86State *env = &cpu->env;
1680 KVMState *s = kvm_state;
1682 /* We can't fill the features array here because we don't know yet if
1683 * "migratable" is true or false.
1685 cpu->max_features = true;
1687 if (kvm_enabled()) {
1688 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
1689 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
1690 int family, model, stepping;
1692 host_vendor_fms(vendor, &family, &model, &stepping);
1694 cpu_x86_fill_model_id(model_id);
1696 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
1697 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
1698 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
1699 object_property_set_int(OBJECT(cpu), stepping, "stepping",
1700 &error_abort);
1701 object_property_set_str(OBJECT(cpu), model_id, "model-id",
1702 &error_abort);
1704 env->cpuid_min_level =
1705 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1706 env->cpuid_min_xlevel =
1707 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1708 env->cpuid_min_xlevel2 =
1709 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1711 if (lmce_supported()) {
1712 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1714 } else {
1715 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
1716 "vendor", &error_abort);
1717 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
1718 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
1719 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
1720 object_property_set_str(OBJECT(cpu),
1721 "QEMU TCG CPU version " QEMU_HW_VERSION,
1722 "model-id", &error_abort);
1725 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1728 static const TypeInfo max_x86_cpu_type_info = {
1729 .name = X86_CPU_TYPE_NAME("max"),
1730 .parent = TYPE_X86_CPU,
1731 .instance_init = max_x86_cpu_initfn,
1732 .class_init = max_x86_cpu_class_init,
1735 #ifdef CONFIG_KVM
1737 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1739 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1741 xcc->kvm_required = true;
1742 xcc->ordering = 8;
1744 xcc->model_description =
1745 "KVM processor with all supported host features "
1746 "(only available in KVM mode)";
1749 static const TypeInfo host_x86_cpu_type_info = {
1750 .name = X86_CPU_TYPE_NAME("host"),
1751 .parent = X86_CPU_TYPE_NAME("max"),
1752 .class_init = host_x86_cpu_class_init,
1755 #endif
1757 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1759 FeatureWordInfo *f = &feature_word_info[w];
1760 int i;
1762 for (i = 0; i < 32; ++i) {
1763 if ((1UL << i) & mask) {
1764 const char *reg = get_register_name_32(f->cpuid_reg);
1765 assert(reg);
1766 warn_report("%s doesn't support requested feature: "
1767 "CPUID.%02XH:%s%s%s [bit %d]",
1768 kvm_enabled() ? "host" : "TCG",
1769 f->cpuid_eax, reg,
1770 f->feat_names[i] ? "." : "",
1771 f->feat_names[i] ? f->feat_names[i] : "", i);
1776 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1777 const char *name, void *opaque,
1778 Error **errp)
1780 X86CPU *cpu = X86_CPU(obj);
1781 CPUX86State *env = &cpu->env;
1782 int64_t value;
1784 value = (env->cpuid_version >> 8) & 0xf;
1785 if (value == 0xf) {
1786 value += (env->cpuid_version >> 20) & 0xff;
1788 visit_type_int(v, name, &value, errp);
1791 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1792 const char *name, void *opaque,
1793 Error **errp)
1795 X86CPU *cpu = X86_CPU(obj);
1796 CPUX86State *env = &cpu->env;
1797 const int64_t min = 0;
1798 const int64_t max = 0xff + 0xf;
1799 Error *local_err = NULL;
1800 int64_t value;
1802 visit_type_int(v, name, &value, &local_err);
1803 if (local_err) {
1804 error_propagate(errp, local_err);
1805 return;
1807 if (value < min || value > max) {
1808 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1809 name ? name : "null", value, min, max);
1810 return;
1813 env->cpuid_version &= ~0xff00f00;
1814 if (value > 0x0f) {
1815 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1816 } else {
1817 env->cpuid_version |= value << 8;
1821 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1822 const char *name, void *opaque,
1823 Error **errp)
1825 X86CPU *cpu = X86_CPU(obj);
1826 CPUX86State *env = &cpu->env;
1827 int64_t value;
1829 value = (env->cpuid_version >> 4) & 0xf;
1830 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1831 visit_type_int(v, name, &value, errp);
1834 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1835 const char *name, void *opaque,
1836 Error **errp)
1838 X86CPU *cpu = X86_CPU(obj);
1839 CPUX86State *env = &cpu->env;
1840 const int64_t min = 0;
1841 const int64_t max = 0xff;
1842 Error *local_err = NULL;
1843 int64_t value;
1845 visit_type_int(v, name, &value, &local_err);
1846 if (local_err) {
1847 error_propagate(errp, local_err);
1848 return;
1850 if (value < min || value > max) {
1851 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1852 name ? name : "null", value, min, max);
1853 return;
1856 env->cpuid_version &= ~0xf00f0;
1857 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1860 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1861 const char *name, void *opaque,
1862 Error **errp)
1864 X86CPU *cpu = X86_CPU(obj);
1865 CPUX86State *env = &cpu->env;
1866 int64_t value;
1868 value = env->cpuid_version & 0xf;
1869 visit_type_int(v, name, &value, errp);
1872 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1873 const char *name, void *opaque,
1874 Error **errp)
1876 X86CPU *cpu = X86_CPU(obj);
1877 CPUX86State *env = &cpu->env;
1878 const int64_t min = 0;
1879 const int64_t max = 0xf;
1880 Error *local_err = NULL;
1881 int64_t value;
1883 visit_type_int(v, name, &value, &local_err);
1884 if (local_err) {
1885 error_propagate(errp, local_err);
1886 return;
1888 if (value < min || value > max) {
1889 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1890 name ? name : "null", value, min, max);
1891 return;
1894 env->cpuid_version &= ~0xf;
1895 env->cpuid_version |= value & 0xf;
1898 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1900 X86CPU *cpu = X86_CPU(obj);
1901 CPUX86State *env = &cpu->env;
1902 char *value;
1904 value = g_malloc(CPUID_VENDOR_SZ + 1);
1905 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1906 env->cpuid_vendor3);
1907 return value;
1910 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1911 Error **errp)
1913 X86CPU *cpu = X86_CPU(obj);
1914 CPUX86State *env = &cpu->env;
1915 int i;
1917 if (strlen(value) != CPUID_VENDOR_SZ) {
1918 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1919 return;
1922 env->cpuid_vendor1 = 0;
1923 env->cpuid_vendor2 = 0;
1924 env->cpuid_vendor3 = 0;
1925 for (i = 0; i < 4; i++) {
1926 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1927 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1928 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1932 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1934 X86CPU *cpu = X86_CPU(obj);
1935 CPUX86State *env = &cpu->env;
1936 char *value;
1937 int i;
1939 value = g_malloc(48 + 1);
1940 for (i = 0; i < 48; i++) {
1941 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1943 value[48] = '\0';
1944 return value;
1947 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1948 Error **errp)
1950 X86CPU *cpu = X86_CPU(obj);
1951 CPUX86State *env = &cpu->env;
1952 int c, len, i;
1954 if (model_id == NULL) {
1955 model_id = "";
1957 len = strlen(model_id);
1958 memset(env->cpuid_model, 0, 48);
1959 for (i = 0; i < 48; i++) {
1960 if (i >= len) {
1961 c = '\0';
1962 } else {
1963 c = (uint8_t)model_id[i];
1965 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1969 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1970 void *opaque, Error **errp)
1972 X86CPU *cpu = X86_CPU(obj);
1973 int64_t value;
1975 value = cpu->env.tsc_khz * 1000;
1976 visit_type_int(v, name, &value, errp);
1979 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1980 void *opaque, Error **errp)
1982 X86CPU *cpu = X86_CPU(obj);
1983 const int64_t min = 0;
1984 const int64_t max = INT64_MAX;
1985 Error *local_err = NULL;
1986 int64_t value;
1988 visit_type_int(v, name, &value, &local_err);
1989 if (local_err) {
1990 error_propagate(errp, local_err);
1991 return;
1993 if (value < min || value > max) {
1994 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1995 name ? name : "null", value, min, max);
1996 return;
1999 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2002 /* Generic getter for "feature-words" and "filtered-features" properties */
2003 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2004 const char *name, void *opaque,
2005 Error **errp)
2007 uint32_t *array = (uint32_t *)opaque;
2008 FeatureWord w;
2009 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2010 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2011 X86CPUFeatureWordInfoList *list = NULL;
2013 for (w = 0; w < FEATURE_WORDS; w++) {
2014 FeatureWordInfo *wi = &feature_word_info[w];
2015 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2016 qwi->cpuid_input_eax = wi->cpuid_eax;
2017 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2018 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2019 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2020 qwi->features = array[w];
2022 /* List will be in reverse order, but order shouldn't matter */
2023 list_entries[w].next = list;
2024 list_entries[w].value = &word_infos[w];
2025 list = &list_entries[w];
2028 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2031 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2032 void *opaque, Error **errp)
2034 X86CPU *cpu = X86_CPU(obj);
2035 int64_t value = cpu->hyperv_spinlock_attempts;
2037 visit_type_int(v, name, &value, errp);
2040 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2041 void *opaque, Error **errp)
2043 const int64_t min = 0xFFF;
2044 const int64_t max = UINT_MAX;
2045 X86CPU *cpu = X86_CPU(obj);
2046 Error *err = NULL;
2047 int64_t value;
2049 visit_type_int(v, name, &value, &err);
2050 if (err) {
2051 error_propagate(errp, err);
2052 return;
2055 if (value < min || value > max) {
2056 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2057 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2058 object_get_typename(obj), name ? name : "null",
2059 value, min, max);
2060 return;
2062 cpu->hyperv_spinlock_attempts = value;
2065 static const PropertyInfo qdev_prop_spinlocks = {
2066 .name = "int",
2067 .get = x86_get_hv_spinlocks,
2068 .set = x86_set_hv_spinlocks,
2071 /* Convert all '_' in a feature string option name to '-', to make feature
2072 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2074 static inline void feat2prop(char *s)
2076 while ((s = strchr(s, '_'))) {
2077 *s = '-';
2081 /* Return the feature property name for a feature flag bit */
2082 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2084 /* XSAVE components are automatically enabled by other features,
2085 * so return the original feature name instead
2087 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2088 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2090 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2091 x86_ext_save_areas[comp].bits) {
2092 w = x86_ext_save_areas[comp].feature;
2093 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2097 assert(bitnr < 32);
2098 assert(w < FEATURE_WORDS);
2099 return feature_word_info[w].feat_names[bitnr];
2102 /* Compatibily hack to maintain legacy +-feat semantic,
2103 * where +-feat overwrites any feature set by
2104 * feat=on|feat even if the later is parsed after +-feat
2105 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2107 static GList *plus_features, *minus_features;
2109 static gint compare_string(gconstpointer a, gconstpointer b)
2111 return g_strcmp0(a, b);
2114 /* Parse "+feature,-feature,feature=foo" CPU feature string
2116 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2117 Error **errp)
2119 char *featurestr; /* Single 'key=value" string being parsed */
2120 static bool cpu_globals_initialized;
2121 bool ambiguous = false;
2123 if (cpu_globals_initialized) {
2124 return;
2126 cpu_globals_initialized = true;
2128 if (!features) {
2129 return;
2132 for (featurestr = strtok(features, ",");
2133 featurestr;
2134 featurestr = strtok(NULL, ",")) {
2135 const char *name;
2136 const char *val = NULL;
2137 char *eq = NULL;
2138 char num[32];
2139 GlobalProperty *prop;
2141 /* Compatibility syntax: */
2142 if (featurestr[0] == '+') {
2143 plus_features = g_list_append(plus_features,
2144 g_strdup(featurestr + 1));
2145 continue;
2146 } else if (featurestr[0] == '-') {
2147 minus_features = g_list_append(minus_features,
2148 g_strdup(featurestr + 1));
2149 continue;
2152 eq = strchr(featurestr, '=');
2153 if (eq) {
2154 *eq++ = 0;
2155 val = eq;
2156 } else {
2157 val = "on";
2160 feat2prop(featurestr);
2161 name = featurestr;
2163 if (g_list_find_custom(plus_features, name, compare_string)) {
2164 warn_report("Ambiguous CPU model string. "
2165 "Don't mix both \"+%s\" and \"%s=%s\"",
2166 name, name, val);
2167 ambiguous = true;
2169 if (g_list_find_custom(minus_features, name, compare_string)) {
2170 warn_report("Ambiguous CPU model string. "
2171 "Don't mix both \"-%s\" and \"%s=%s\"",
2172 name, name, val);
2173 ambiguous = true;
2176 /* Special case: */
2177 if (!strcmp(name, "tsc-freq")) {
2178 int ret;
2179 uint64_t tsc_freq;
2181 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2182 if (ret < 0 || tsc_freq > INT64_MAX) {
2183 error_setg(errp, "bad numerical value %s", val);
2184 return;
2186 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2187 val = num;
2188 name = "tsc-frequency";
2191 prop = g_new0(typeof(*prop), 1);
2192 prop->driver = typename;
2193 prop->property = g_strdup(name);
2194 prop->value = g_strdup(val);
2195 prop->errp = &error_fatal;
2196 qdev_prop_register_global(prop);
2199 if (ambiguous) {
2200 warn_report("Compatibility of ambiguous CPU model "
2201 "strings won't be kept on future QEMU versions");
2205 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2206 static int x86_cpu_filter_features(X86CPU *cpu);
2208 /* Check for missing features that may prevent the CPU class from
2209 * running using the current machine and accelerator.
2211 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2212 strList **missing_feats)
2214 X86CPU *xc;
2215 FeatureWord w;
2216 Error *err = NULL;
2217 strList **next = missing_feats;
2219 if (xcc->kvm_required && !kvm_enabled()) {
2220 strList *new = g_new0(strList, 1);
2221 new->value = g_strdup("kvm");;
2222 *missing_feats = new;
2223 return;
2226 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2228 x86_cpu_expand_features(xc, &err);
2229 if (err) {
2230 /* Errors at x86_cpu_expand_features should never happen,
2231 * but in case it does, just report the model as not
2232 * runnable at all using the "type" property.
2234 strList *new = g_new0(strList, 1);
2235 new->value = g_strdup("type");
2236 *next = new;
2237 next = &new->next;
2240 x86_cpu_filter_features(xc);
2242 for (w = 0; w < FEATURE_WORDS; w++) {
2243 uint32_t filtered = xc->filtered_features[w];
2244 int i;
2245 for (i = 0; i < 32; i++) {
2246 if (filtered & (1UL << i)) {
2247 strList *new = g_new0(strList, 1);
2248 new->value = g_strdup(x86_cpu_feature_name(w, i));
2249 *next = new;
2250 next = &new->next;
2255 object_unref(OBJECT(xc));
2258 /* Print all cpuid feature names in featureset
2260 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2262 int bit;
2263 bool first = true;
2265 for (bit = 0; bit < 32; bit++) {
2266 if (featureset[bit]) {
2267 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2268 first = false;
2273 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2274 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2276 ObjectClass *class_a = (ObjectClass *)a;
2277 ObjectClass *class_b = (ObjectClass *)b;
2278 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2279 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2280 const char *name_a, *name_b;
2282 if (cc_a->ordering != cc_b->ordering) {
2283 return cc_a->ordering - cc_b->ordering;
2284 } else {
2285 name_a = object_class_get_name(class_a);
2286 name_b = object_class_get_name(class_b);
2287 return strcmp(name_a, name_b);
2291 static GSList *get_sorted_cpu_model_list(void)
2293 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2294 list = g_slist_sort(list, x86_cpu_list_compare);
2295 return list;
2298 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2300 ObjectClass *oc = data;
2301 X86CPUClass *cc = X86_CPU_CLASS(oc);
2302 CPUListState *s = user_data;
2303 char *name = x86_cpu_class_get_model_name(cc);
2304 const char *desc = cc->model_description;
2305 if (!desc && cc->cpu_def) {
2306 desc = cc->cpu_def->model_id;
2309 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2310 name, desc);
2311 g_free(name);
2314 /* list available CPU models and flags */
2315 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2317 int i;
2318 CPUListState s = {
2319 .file = f,
2320 .cpu_fprintf = cpu_fprintf,
2322 GSList *list;
2324 (*cpu_fprintf)(f, "Available CPUs:\n");
2325 list = get_sorted_cpu_model_list();
2326 g_slist_foreach(list, x86_cpu_list_entry, &s);
2327 g_slist_free(list);
2329 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2330 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2331 FeatureWordInfo *fw = &feature_word_info[i];
2333 (*cpu_fprintf)(f, " ");
2334 listflags(f, cpu_fprintf, fw->feat_names);
2335 (*cpu_fprintf)(f, "\n");
2339 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2341 ObjectClass *oc = data;
2342 X86CPUClass *cc = X86_CPU_CLASS(oc);
2343 CpuDefinitionInfoList **cpu_list = user_data;
2344 CpuDefinitionInfoList *entry;
2345 CpuDefinitionInfo *info;
2347 info = g_malloc0(sizeof(*info));
2348 info->name = x86_cpu_class_get_model_name(cc);
2349 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2350 info->has_unavailable_features = true;
2351 info->q_typename = g_strdup(object_class_get_name(oc));
2352 info->migration_safe = cc->migration_safe;
2353 info->has_migration_safe = true;
2354 info->q_static = cc->static_model;
2356 entry = g_malloc0(sizeof(*entry));
2357 entry->value = info;
2358 entry->next = *cpu_list;
2359 *cpu_list = entry;
2362 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2364 CpuDefinitionInfoList *cpu_list = NULL;
2365 GSList *list = get_sorted_cpu_model_list();
2366 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2367 g_slist_free(list);
2368 return cpu_list;
2371 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2372 bool migratable_only)
2374 FeatureWordInfo *wi = &feature_word_info[w];
2375 uint32_t r;
2377 if (kvm_enabled()) {
2378 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2379 wi->cpuid_ecx,
2380 wi->cpuid_reg);
2381 } else if (tcg_enabled()) {
2382 r = wi->tcg_features;
2383 } else {
2384 return ~0;
2386 if (migratable_only) {
2387 r &= x86_cpu_get_migratable_flags(w);
2389 return r;
2392 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2394 FeatureWord w;
2396 for (w = 0; w < FEATURE_WORDS; w++) {
2397 report_unavailable_features(w, cpu->filtered_features[w]);
2401 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2403 PropValue *pv;
2404 for (pv = props; pv->prop; pv++) {
2405 if (!pv->value) {
2406 continue;
2408 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2409 &error_abort);
2413 /* Load data from X86CPUDefinition into a X86CPU object
2415 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2417 CPUX86State *env = &cpu->env;
2418 const char *vendor;
2419 char host_vendor[CPUID_VENDOR_SZ + 1];
2420 FeatureWord w;
2422 /*NOTE: any property set by this function should be returned by
2423 * x86_cpu_static_props(), so static expansion of
2424 * query-cpu-model-expansion is always complete.
2427 /* CPU models only set _minimum_ values for level/xlevel: */
2428 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2429 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2431 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2432 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2433 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2434 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2435 for (w = 0; w < FEATURE_WORDS; w++) {
2436 env->features[w] = def->features[w];
2439 /* Special cases not set in the X86CPUDefinition structs: */
2440 if (kvm_enabled()) {
2441 if (!kvm_irqchip_in_kernel()) {
2442 x86_cpu_change_kvm_default("x2apic", "off");
2445 x86_cpu_apply_props(cpu, kvm_default_props);
2446 } else if (tcg_enabled()) {
2447 x86_cpu_apply_props(cpu, tcg_default_props);
2450 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2452 /* sysenter isn't supported in compatibility mode on AMD,
2453 * syscall isn't supported in compatibility mode on Intel.
2454 * Normally we advertise the actual CPU vendor, but you can
2455 * override this using the 'vendor' property if you want to use
2456 * KVM's sysenter/syscall emulation in compatibility mode and
2457 * when doing cross vendor migration
2459 vendor = def->vendor;
2460 if (kvm_enabled()) {
2461 uint32_t ebx = 0, ecx = 0, edx = 0;
2462 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2463 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2464 vendor = host_vendor;
2467 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2471 /* Return a QDict containing keys for all properties that can be included
2472 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2473 * must be included in the dictionary.
2475 static QDict *x86_cpu_static_props(void)
2477 FeatureWord w;
2478 int i;
2479 static const char *props[] = {
2480 "min-level",
2481 "min-xlevel",
2482 "family",
2483 "model",
2484 "stepping",
2485 "model-id",
2486 "vendor",
2487 "lmce",
2488 NULL,
2490 static QDict *d;
2492 if (d) {
2493 return d;
2496 d = qdict_new();
2497 for (i = 0; props[i]; i++) {
2498 qdict_put_null(d, props[i]);
2501 for (w = 0; w < FEATURE_WORDS; w++) {
2502 FeatureWordInfo *fi = &feature_word_info[w];
2503 int bit;
2504 for (bit = 0; bit < 32; bit++) {
2505 if (!fi->feat_names[bit]) {
2506 continue;
2508 qdict_put_null(d, fi->feat_names[bit]);
2512 return d;
2515 /* Add an entry to @props dict, with the value for property. */
2516 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2518 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2519 &error_abort);
2521 qdict_put_obj(props, prop, value);
2524 /* Convert CPU model data from X86CPU object to a property dictionary
2525 * that can recreate exactly the same CPU model.
2527 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
2529 QDict *sprops = x86_cpu_static_props();
2530 const QDictEntry *e;
2532 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
2533 const char *prop = qdict_entry_key(e);
2534 x86_cpu_expand_prop(cpu, props, prop);
2538 /* Convert CPU model data from X86CPU object to a property dictionary
2539 * that can recreate exactly the same CPU model, including every
2540 * writeable QOM property.
2542 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
2544 ObjectPropertyIterator iter;
2545 ObjectProperty *prop;
2547 object_property_iter_init(&iter, OBJECT(cpu));
2548 while ((prop = object_property_iter_next(&iter))) {
2549 /* skip read-only or write-only properties */
2550 if (!prop->get || !prop->set) {
2551 continue;
2554 /* "hotplugged" is the only property that is configurable
2555 * on the command-line but will be set differently on CPUs
2556 * created using "-cpu ... -smp ..." and by CPUs created
2557 * on the fly by x86_cpu_from_model() for querying. Skip it.
2559 if (!strcmp(prop->name, "hotplugged")) {
2560 continue;
2562 x86_cpu_expand_prop(cpu, props, prop->name);
2566 static void object_apply_props(Object *obj, QDict *props, Error **errp)
2568 const QDictEntry *prop;
2569 Error *err = NULL;
2571 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
2572 object_property_set_qobject(obj, qdict_entry_value(prop),
2573 qdict_entry_key(prop), &err);
2574 if (err) {
2575 break;
2579 error_propagate(errp, err);
2582 /* Create X86CPU object according to model+props specification */
2583 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
2585 X86CPU *xc = NULL;
2586 X86CPUClass *xcc;
2587 Error *err = NULL;
2589 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
2590 if (xcc == NULL) {
2591 error_setg(&err, "CPU model '%s' not found", model);
2592 goto out;
2595 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2596 if (props) {
2597 object_apply_props(OBJECT(xc), props, &err);
2598 if (err) {
2599 goto out;
2603 x86_cpu_expand_features(xc, &err);
2604 if (err) {
2605 goto out;
2608 out:
2609 if (err) {
2610 error_propagate(errp, err);
2611 object_unref(OBJECT(xc));
2612 xc = NULL;
2614 return xc;
2617 CpuModelExpansionInfo *
2618 arch_query_cpu_model_expansion(CpuModelExpansionType type,
2619 CpuModelInfo *model,
2620 Error **errp)
2622 X86CPU *xc = NULL;
2623 Error *err = NULL;
2624 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
2625 QDict *props = NULL;
2626 const char *base_name;
2628 xc = x86_cpu_from_model(model->name,
2629 model->has_props ?
2630 qobject_to_qdict(model->props) :
2631 NULL, &err);
2632 if (err) {
2633 goto out;
2636 props = qdict_new();
2638 switch (type) {
2639 case CPU_MODEL_EXPANSION_TYPE_STATIC:
2640 /* Static expansion will be based on "base" only */
2641 base_name = "base";
2642 x86_cpu_to_dict(xc, props);
2643 break;
2644 case CPU_MODEL_EXPANSION_TYPE_FULL:
2645 /* As we don't return every single property, full expansion needs
2646 * to keep the original model name+props, and add extra
2647 * properties on top of that.
2649 base_name = model->name;
2650 x86_cpu_to_dict_full(xc, props);
2651 break;
2652 default:
2653 error_setg(&err, "Unsupportted expansion type");
2654 goto out;
2657 if (!props) {
2658 props = qdict_new();
2660 x86_cpu_to_dict(xc, props);
2662 ret->model = g_new0(CpuModelInfo, 1);
2663 ret->model->name = g_strdup(base_name);
2664 ret->model->props = QOBJECT(props);
2665 ret->model->has_props = true;
2667 out:
2668 object_unref(OBJECT(xc));
2669 if (err) {
2670 error_propagate(errp, err);
2671 qapi_free_CpuModelExpansionInfo(ret);
2672 ret = NULL;
2674 return ret;
2677 static gchar *x86_gdb_arch_name(CPUState *cs)
2679 #ifdef TARGET_X86_64
2680 return g_strdup("i386:x86-64");
2681 #else
2682 return g_strdup("i386");
2683 #endif
2686 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2688 X86CPUDefinition *cpudef = data;
2689 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2691 xcc->cpu_def = cpudef;
2692 xcc->migration_safe = true;
2695 static void x86_register_cpudef_type(X86CPUDefinition *def)
2697 char *typename = x86_cpu_type_name(def->name);
2698 TypeInfo ti = {
2699 .name = typename,
2700 .parent = TYPE_X86_CPU,
2701 .class_init = x86_cpu_cpudef_class_init,
2702 .class_data = def,
2705 /* AMD aliases are handled at runtime based on CPUID vendor, so
2706 * they shouldn't be set on the CPU model table.
2708 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
2710 type_register(&ti);
2711 g_free(typename);
2714 #if !defined(CONFIG_USER_ONLY)
2716 void cpu_clear_apic_feature(CPUX86State *env)
2718 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2721 #endif /* !CONFIG_USER_ONLY */
2723 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2724 uint32_t *eax, uint32_t *ebx,
2725 uint32_t *ecx, uint32_t *edx)
2727 X86CPU *cpu = x86_env_get_cpu(env);
2728 CPUState *cs = CPU(cpu);
2729 uint32_t pkg_offset;
2730 uint32_t limit;
2731 uint32_t signature[3];
2733 /* Calculate & apply limits for different index ranges */
2734 if (index >= 0xC0000000) {
2735 limit = env->cpuid_xlevel2;
2736 } else if (index >= 0x80000000) {
2737 limit = env->cpuid_xlevel;
2738 } else if (index >= 0x40000000) {
2739 limit = 0x40000001;
2740 } else {
2741 limit = env->cpuid_level;
2744 if (index > limit) {
2745 /* Intel documentation states that invalid EAX input will
2746 * return the same information as EAX=cpuid_level
2747 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2749 index = env->cpuid_level;
2752 switch(index) {
2753 case 0:
2754 *eax = env->cpuid_level;
2755 *ebx = env->cpuid_vendor1;
2756 *edx = env->cpuid_vendor2;
2757 *ecx = env->cpuid_vendor3;
2758 break;
2759 case 1:
2760 *eax = env->cpuid_version;
2761 *ebx = (cpu->apic_id << 24) |
2762 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2763 *ecx = env->features[FEAT_1_ECX];
2764 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2765 *ecx |= CPUID_EXT_OSXSAVE;
2767 *edx = env->features[FEAT_1_EDX];
2768 if (cs->nr_cores * cs->nr_threads > 1) {
2769 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2770 *edx |= CPUID_HT;
2772 break;
2773 case 2:
2774 /* cache info: needed for Pentium Pro compatibility */
2775 if (cpu->cache_info_passthrough) {
2776 host_cpuid(index, 0, eax, ebx, ecx, edx);
2777 break;
2779 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2780 *ebx = 0;
2781 if (!cpu->enable_l3_cache) {
2782 *ecx = 0;
2783 } else {
2784 *ecx = L3_N_DESCRIPTOR;
2786 *edx = (L1D_DESCRIPTOR << 16) | \
2787 (L1I_DESCRIPTOR << 8) | \
2788 (L2_DESCRIPTOR);
2789 break;
2790 case 4:
2791 /* cache info: needed for Core compatibility */
2792 if (cpu->cache_info_passthrough) {
2793 host_cpuid(index, count, eax, ebx, ecx, edx);
2794 *eax &= ~0xFC000000;
2795 } else {
2796 *eax = 0;
2797 switch (count) {
2798 case 0: /* L1 dcache info */
2799 *eax |= CPUID_4_TYPE_DCACHE | \
2800 CPUID_4_LEVEL(1) | \
2801 CPUID_4_SELF_INIT_LEVEL;
2802 *ebx = (L1D_LINE_SIZE - 1) | \
2803 ((L1D_PARTITIONS - 1) << 12) | \
2804 ((L1D_ASSOCIATIVITY - 1) << 22);
2805 *ecx = L1D_SETS - 1;
2806 *edx = CPUID_4_NO_INVD_SHARING;
2807 break;
2808 case 1: /* L1 icache info */
2809 *eax |= CPUID_4_TYPE_ICACHE | \
2810 CPUID_4_LEVEL(1) | \
2811 CPUID_4_SELF_INIT_LEVEL;
2812 *ebx = (L1I_LINE_SIZE - 1) | \
2813 ((L1I_PARTITIONS - 1) << 12) | \
2814 ((L1I_ASSOCIATIVITY - 1) << 22);
2815 *ecx = L1I_SETS - 1;
2816 *edx = CPUID_4_NO_INVD_SHARING;
2817 break;
2818 case 2: /* L2 cache info */
2819 *eax |= CPUID_4_TYPE_UNIFIED | \
2820 CPUID_4_LEVEL(2) | \
2821 CPUID_4_SELF_INIT_LEVEL;
2822 if (cs->nr_threads > 1) {
2823 *eax |= (cs->nr_threads - 1) << 14;
2825 *ebx = (L2_LINE_SIZE - 1) | \
2826 ((L2_PARTITIONS - 1) << 12) | \
2827 ((L2_ASSOCIATIVITY - 1) << 22);
2828 *ecx = L2_SETS - 1;
2829 *edx = CPUID_4_NO_INVD_SHARING;
2830 break;
2831 case 3: /* L3 cache info */
2832 if (!cpu->enable_l3_cache) {
2833 *eax = 0;
2834 *ebx = 0;
2835 *ecx = 0;
2836 *edx = 0;
2837 break;
2839 *eax |= CPUID_4_TYPE_UNIFIED | \
2840 CPUID_4_LEVEL(3) | \
2841 CPUID_4_SELF_INIT_LEVEL;
2842 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2843 *eax |= ((1 << pkg_offset) - 1) << 14;
2844 *ebx = (L3_N_LINE_SIZE - 1) | \
2845 ((L3_N_PARTITIONS - 1) << 12) | \
2846 ((L3_N_ASSOCIATIVITY - 1) << 22);
2847 *ecx = L3_N_SETS - 1;
2848 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2849 break;
2850 default: /* end of info */
2851 *eax = 0;
2852 *ebx = 0;
2853 *ecx = 0;
2854 *edx = 0;
2855 break;
2859 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2860 if ((*eax & 31) && cs->nr_cores > 1) {
2861 *eax |= (cs->nr_cores - 1) << 26;
2863 break;
2864 case 5:
2865 /* mwait info: needed for Core compatibility */
2866 *eax = 0; /* Smallest monitor-line size in bytes */
2867 *ebx = 0; /* Largest monitor-line size in bytes */
2868 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2869 *edx = 0;
2870 break;
2871 case 6:
2872 /* Thermal and Power Leaf */
2873 *eax = env->features[FEAT_6_EAX];
2874 *ebx = 0;
2875 *ecx = 0;
2876 *edx = 0;
2877 break;
2878 case 7:
2879 /* Structured Extended Feature Flags Enumeration Leaf */
2880 if (count == 0) {
2881 *eax = 0; /* Maximum ECX value for sub-leaves */
2882 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2883 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2884 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2885 *ecx |= CPUID_7_0_ECX_OSPKE;
2887 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2888 } else {
2889 *eax = 0;
2890 *ebx = 0;
2891 *ecx = 0;
2892 *edx = 0;
2894 break;
2895 case 9:
2896 /* Direct Cache Access Information Leaf */
2897 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2898 *ebx = 0;
2899 *ecx = 0;
2900 *edx = 0;
2901 break;
2902 case 0xA:
2903 /* Architectural Performance Monitoring Leaf */
2904 if (kvm_enabled() && cpu->enable_pmu) {
2905 KVMState *s = cs->kvm_state;
2907 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2908 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2909 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2910 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2911 } else {
2912 *eax = 0;
2913 *ebx = 0;
2914 *ecx = 0;
2915 *edx = 0;
2917 break;
2918 case 0xB:
2919 /* Extended Topology Enumeration Leaf */
2920 if (!cpu->enable_cpuid_0xb) {
2921 *eax = *ebx = *ecx = *edx = 0;
2922 break;
2925 *ecx = count & 0xff;
2926 *edx = cpu->apic_id;
2928 switch (count) {
2929 case 0:
2930 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2931 *ebx = cs->nr_threads;
2932 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2933 break;
2934 case 1:
2935 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2936 *ebx = cs->nr_cores * cs->nr_threads;
2937 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2938 break;
2939 default:
2940 *eax = 0;
2941 *ebx = 0;
2942 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2945 assert(!(*eax & ~0x1f));
2946 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2947 break;
2948 case 0xD: {
2949 /* Processor Extended State */
2950 *eax = 0;
2951 *ebx = 0;
2952 *ecx = 0;
2953 *edx = 0;
2954 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2955 break;
2958 if (count == 0) {
2959 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2960 *eax = env->features[FEAT_XSAVE_COMP_LO];
2961 *edx = env->features[FEAT_XSAVE_COMP_HI];
2962 *ebx = *ecx;
2963 } else if (count == 1) {
2964 *eax = env->features[FEAT_XSAVE];
2965 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2966 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2967 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2968 *eax = esa->size;
2969 *ebx = esa->offset;
2972 break;
2974 case 0x40000000:
2976 * CPUID code in kvm_arch_init_vcpu() ignores stuff
2977 * set here, but we restrict to TCG none the less.
2979 if (tcg_enabled() && cpu->expose_tcg) {
2980 memcpy(signature, "TCGTCGTCGTCG", 12);
2981 *eax = 0x40000001;
2982 *ebx = signature[0];
2983 *ecx = signature[1];
2984 *edx = signature[2];
2985 } else {
2986 *eax = 0;
2987 *ebx = 0;
2988 *ecx = 0;
2989 *edx = 0;
2991 break;
2992 case 0x40000001:
2993 *eax = 0;
2994 *ebx = 0;
2995 *ecx = 0;
2996 *edx = 0;
2997 break;
2998 case 0x80000000:
2999 *eax = env->cpuid_xlevel;
3000 *ebx = env->cpuid_vendor1;
3001 *edx = env->cpuid_vendor2;
3002 *ecx = env->cpuid_vendor3;
3003 break;
3004 case 0x80000001:
3005 *eax = env->cpuid_version;
3006 *ebx = 0;
3007 *ecx = env->features[FEAT_8000_0001_ECX];
3008 *edx = env->features[FEAT_8000_0001_EDX];
3010 /* The Linux kernel checks for the CMPLegacy bit and
3011 * discards multiple thread information if it is set.
3012 * So don't set it here for Intel to make Linux guests happy.
3014 if (cs->nr_cores * cs->nr_threads > 1) {
3015 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3016 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3017 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3018 *ecx |= 1 << 1; /* CmpLegacy bit */
3021 break;
3022 case 0x80000002:
3023 case 0x80000003:
3024 case 0x80000004:
3025 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3026 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3027 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3028 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3029 break;
3030 case 0x80000005:
3031 /* cache info (L1 cache) */
3032 if (cpu->cache_info_passthrough) {
3033 host_cpuid(index, 0, eax, ebx, ecx, edx);
3034 break;
3036 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3037 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3038 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3039 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3040 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
3041 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
3042 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
3043 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
3044 break;
3045 case 0x80000006:
3046 /* cache info (L2 cache) */
3047 if (cpu->cache_info_passthrough) {
3048 host_cpuid(index, 0, eax, ebx, ecx, edx);
3049 break;
3051 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3052 (L2_DTLB_2M_ENTRIES << 16) | \
3053 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3054 (L2_ITLB_2M_ENTRIES);
3055 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3056 (L2_DTLB_4K_ENTRIES << 16) | \
3057 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3058 (L2_ITLB_4K_ENTRIES);
3059 *ecx = (L2_SIZE_KB_AMD << 16) | \
3060 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
3061 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
3062 if (!cpu->enable_l3_cache) {
3063 *edx = ((L3_SIZE_KB / 512) << 18) | \
3064 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
3065 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
3066 } else {
3067 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
3068 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
3069 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
3071 break;
3072 case 0x80000007:
3073 *eax = 0;
3074 *ebx = 0;
3075 *ecx = 0;
3076 *edx = env->features[FEAT_8000_0007_EDX];
3077 break;
3078 case 0x80000008:
3079 /* virtual & phys address size in low 2 bytes. */
3080 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3081 /* 64 bit processor */
3082 *eax = cpu->phys_bits; /* configurable physical bits */
3083 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3084 *eax |= 0x00003900; /* 57 bits virtual */
3085 } else {
3086 *eax |= 0x00003000; /* 48 bits virtual */
3088 } else {
3089 *eax = cpu->phys_bits;
3091 *ebx = 0;
3092 *ecx = 0;
3093 *edx = 0;
3094 if (cs->nr_cores * cs->nr_threads > 1) {
3095 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3097 break;
3098 case 0x8000000A:
3099 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3100 *eax = 0x00000001; /* SVM Revision */
3101 *ebx = 0x00000010; /* nr of ASIDs */
3102 *ecx = 0;
3103 *edx = env->features[FEAT_SVM]; /* optional features */
3104 } else {
3105 *eax = 0;
3106 *ebx = 0;
3107 *ecx = 0;
3108 *edx = 0;
3110 break;
3111 case 0xC0000000:
3112 *eax = env->cpuid_xlevel2;
3113 *ebx = 0;
3114 *ecx = 0;
3115 *edx = 0;
3116 break;
3117 case 0xC0000001:
3118 /* Support for VIA CPU's CPUID instruction */
3119 *eax = env->cpuid_version;
3120 *ebx = 0;
3121 *ecx = 0;
3122 *edx = env->features[FEAT_C000_0001_EDX];
3123 break;
3124 case 0xC0000002:
3125 case 0xC0000003:
3126 case 0xC0000004:
3127 /* Reserved for the future, and now filled with zero */
3128 *eax = 0;
3129 *ebx = 0;
3130 *ecx = 0;
3131 *edx = 0;
3132 break;
3133 default:
3134 /* reserved values: zero */
3135 *eax = 0;
3136 *ebx = 0;
3137 *ecx = 0;
3138 *edx = 0;
3139 break;
3143 /* CPUClass::reset() */
3144 static void x86_cpu_reset(CPUState *s)
3146 X86CPU *cpu = X86_CPU(s);
3147 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3148 CPUX86State *env = &cpu->env;
3149 target_ulong cr4;
3150 uint64_t xcr0;
3151 int i;
3153 xcc->parent_reset(s);
3155 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3157 env->old_exception = -1;
3159 /* init to reset state */
3161 env->hflags2 |= HF2_GIF_MASK;
3163 cpu_x86_update_cr0(env, 0x60000010);
3164 env->a20_mask = ~0x0;
3165 env->smbase = 0x30000;
3167 env->idt.limit = 0xffff;
3168 env->gdt.limit = 0xffff;
3169 env->ldt.limit = 0xffff;
3170 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3171 env->tr.limit = 0xffff;
3172 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3174 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3175 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3176 DESC_R_MASK | DESC_A_MASK);
3177 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3178 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3179 DESC_A_MASK);
3180 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3181 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3182 DESC_A_MASK);
3183 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3184 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3185 DESC_A_MASK);
3186 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3187 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3188 DESC_A_MASK);
3189 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3190 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3191 DESC_A_MASK);
3193 env->eip = 0xfff0;
3194 env->regs[R_EDX] = env->cpuid_version;
3196 env->eflags = 0x2;
3198 /* FPU init */
3199 for (i = 0; i < 8; i++) {
3200 env->fptags[i] = 1;
3202 cpu_set_fpuc(env, 0x37f);
3204 env->mxcsr = 0x1f80;
3205 /* All units are in INIT state. */
3206 env->xstate_bv = 0;
3208 env->pat = 0x0007040600070406ULL;
3209 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3211 memset(env->dr, 0, sizeof(env->dr));
3212 env->dr[6] = DR6_FIXED_1;
3213 env->dr[7] = DR7_FIXED_1;
3214 cpu_breakpoint_remove_all(s, BP_CPU);
3215 cpu_watchpoint_remove_all(s, BP_CPU);
3217 cr4 = 0;
3218 xcr0 = XSTATE_FP_MASK;
3220 #ifdef CONFIG_USER_ONLY
3221 /* Enable all the features for user-mode. */
3222 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3223 xcr0 |= XSTATE_SSE_MASK;
3225 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3226 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3227 if (env->features[esa->feature] & esa->bits) {
3228 xcr0 |= 1ull << i;
3232 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3233 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3235 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3236 cr4 |= CR4_FSGSBASE_MASK;
3238 #endif
3240 env->xcr0 = xcr0;
3241 cpu_x86_update_cr4(env, cr4);
3244 * SDM 11.11.5 requires:
3245 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3246 * - IA32_MTRR_PHYSMASKn.V = 0
3247 * All other bits are undefined. For simplification, zero it all.
3249 env->mtrr_deftype = 0;
3250 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3251 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3253 #if !defined(CONFIG_USER_ONLY)
3254 /* We hard-wire the BSP to the first CPU. */
3255 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3257 s->halted = !cpu_is_bsp(cpu);
3259 if (kvm_enabled()) {
3260 kvm_arch_reset_vcpu(cpu);
3262 #endif
3265 #ifndef CONFIG_USER_ONLY
3266 bool cpu_is_bsp(X86CPU *cpu)
3268 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3271 /* TODO: remove me, when reset over QOM tree is implemented */
3272 static void x86_cpu_machine_reset_cb(void *opaque)
3274 X86CPU *cpu = opaque;
3275 cpu_reset(CPU(cpu));
3277 #endif
3279 static void mce_init(X86CPU *cpu)
3281 CPUX86State *cenv = &cpu->env;
3282 unsigned int bank;
3284 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3285 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3286 (CPUID_MCE | CPUID_MCA)) {
3287 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3288 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3289 cenv->mcg_ctl = ~(uint64_t)0;
3290 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3291 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3296 #ifndef CONFIG_USER_ONLY
3297 APICCommonClass *apic_get_class(void)
3299 const char *apic_type = "apic";
3301 if (kvm_apic_in_kernel()) {
3302 apic_type = "kvm-apic";
3303 } else if (xen_enabled()) {
3304 apic_type = "xen-apic";
3307 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3310 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3312 APICCommonState *apic;
3313 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3315 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3317 object_property_add_child(OBJECT(cpu), "lapic",
3318 OBJECT(cpu->apic_state), &error_abort);
3319 object_unref(OBJECT(cpu->apic_state));
3321 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3322 /* TODO: convert to link<> */
3323 apic = APIC_COMMON(cpu->apic_state);
3324 apic->cpu = cpu;
3325 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3328 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3330 APICCommonState *apic;
3331 static bool apic_mmio_map_once;
3333 if (cpu->apic_state == NULL) {
3334 return;
3336 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3337 errp);
3339 /* Map APIC MMIO area */
3340 apic = APIC_COMMON(cpu->apic_state);
3341 if (!apic_mmio_map_once) {
3342 memory_region_add_subregion_overlap(get_system_memory(),
3343 apic->apicbase &
3344 MSR_IA32_APICBASE_BASE,
3345 &apic->io_memory,
3346 0x1000);
3347 apic_mmio_map_once = true;
3351 static void x86_cpu_machine_done(Notifier *n, void *unused)
3353 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3354 MemoryRegion *smram =
3355 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3357 if (smram) {
3358 cpu->smram = g_new(MemoryRegion, 1);
3359 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3360 smram, 0, 1ull << 32);
3361 memory_region_set_enabled(cpu->smram, true);
3362 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3365 #else
3366 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3369 #endif
3371 /* Note: Only safe for use on x86(-64) hosts */
3372 static uint32_t x86_host_phys_bits(void)
3374 uint32_t eax;
3375 uint32_t host_phys_bits;
3377 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3378 if (eax >= 0x80000008) {
3379 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3380 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3381 * at 23:16 that can specify a maximum physical address bits for
3382 * the guest that can override this value; but I've not seen
3383 * anything with that set.
3385 host_phys_bits = eax & 0xff;
3386 } else {
3387 /* It's an odd 64 bit machine that doesn't have the leaf for
3388 * physical address bits; fall back to 36 that's most older
3389 * Intel.
3391 host_phys_bits = 36;
3394 return host_phys_bits;
3397 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3399 if (*min < value) {
3400 *min = value;
3404 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3405 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3407 CPUX86State *env = &cpu->env;
3408 FeatureWordInfo *fi = &feature_word_info[w];
3409 uint32_t eax = fi->cpuid_eax;
3410 uint32_t region = eax & 0xF0000000;
3412 if (!env->features[w]) {
3413 return;
3416 switch (region) {
3417 case 0x00000000:
3418 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3419 break;
3420 case 0x80000000:
3421 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3422 break;
3423 case 0xC0000000:
3424 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3425 break;
3429 /* Calculate XSAVE components based on the configured CPU feature flags */
3430 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3432 CPUX86State *env = &cpu->env;
3433 int i;
3434 uint64_t mask;
3436 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3437 return;
3440 mask = 0;
3441 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3442 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3443 if (env->features[esa->feature] & esa->bits) {
3444 mask |= (1ULL << i);
3448 env->features[FEAT_XSAVE_COMP_LO] = mask;
3449 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3452 /***** Steps involved on loading and filtering CPUID data
3454 * When initializing and realizing a CPU object, the steps
3455 * involved in setting up CPUID data are:
3457 * 1) Loading CPU model definition (X86CPUDefinition). This is
3458 * implemented by x86_cpu_load_def() and should be completely
3459 * transparent, as it is done automatically by instance_init.
3460 * No code should need to look at X86CPUDefinition structs
3461 * outside instance_init.
3463 * 2) CPU expansion. This is done by realize before CPUID
3464 * filtering, and will make sure host/accelerator data is
3465 * loaded for CPU models that depend on host capabilities
3466 * (e.g. "host"). Done by x86_cpu_expand_features().
3468 * 3) CPUID filtering. This initializes extra data related to
3469 * CPUID, and checks if the host supports all capabilities
3470 * required by the CPU. Runnability of a CPU model is
3471 * determined at this step. Done by x86_cpu_filter_features().
3473 * Some operations don't require all steps to be performed.
3474 * More precisely:
3476 * - CPU instance creation (instance_init) will run only CPU
3477 * model loading. CPU expansion can't run at instance_init-time
3478 * because host/accelerator data may be not available yet.
3479 * - CPU realization will perform both CPU model expansion and CPUID
3480 * filtering, and return an error in case one of them fails.
3481 * - query-cpu-definitions needs to run all 3 steps. It needs
3482 * to run CPUID filtering, as the 'unavailable-features'
3483 * field is set based on the filtering results.
3484 * - The query-cpu-model-expansion QMP command only needs to run
3485 * CPU model loading and CPU expansion. It should not filter
3486 * any CPUID data based on host capabilities.
3489 /* Expand CPU configuration data, based on configured features
3490 * and host/accelerator capabilities when appropriate.
3492 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3494 CPUX86State *env = &cpu->env;
3495 FeatureWord w;
3496 GList *l;
3497 Error *local_err = NULL;
3499 /*TODO: Now cpu->max_features doesn't overwrite features
3500 * set using QOM properties, and we can convert
3501 * plus_features & minus_features to global properties
3502 * inside x86_cpu_parse_featurestr() too.
3504 if (cpu->max_features) {
3505 for (w = 0; w < FEATURE_WORDS; w++) {
3506 /* Override only features that weren't set explicitly
3507 * by the user.
3509 env->features[w] |=
3510 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
3511 ~env->user_features[w];
3515 for (l = plus_features; l; l = l->next) {
3516 const char *prop = l->data;
3517 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3518 if (local_err) {
3519 goto out;
3523 for (l = minus_features; l; l = l->next) {
3524 const char *prop = l->data;
3525 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3526 if (local_err) {
3527 goto out;
3531 if (!kvm_enabled() || !cpu->expose_kvm) {
3532 env->features[FEAT_KVM] = 0;
3535 x86_cpu_enable_xsave_components(cpu);
3537 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3538 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3539 if (cpu->full_cpuid_auto_level) {
3540 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3541 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3542 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3543 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3544 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3545 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3546 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3547 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3548 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3549 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3550 /* SVM requires CPUID[0x8000000A] */
3551 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3552 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3556 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3557 if (env->cpuid_level == UINT32_MAX) {
3558 env->cpuid_level = env->cpuid_min_level;
3560 if (env->cpuid_xlevel == UINT32_MAX) {
3561 env->cpuid_xlevel = env->cpuid_min_xlevel;
3563 if (env->cpuid_xlevel2 == UINT32_MAX) {
3564 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3567 out:
3568 if (local_err != NULL) {
3569 error_propagate(errp, local_err);
3574 * Finishes initialization of CPUID data, filters CPU feature
3575 * words based on host availability of each feature.
3577 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3579 static int x86_cpu_filter_features(X86CPU *cpu)
3581 CPUX86State *env = &cpu->env;
3582 FeatureWord w;
3583 int rv = 0;
3585 for (w = 0; w < FEATURE_WORDS; w++) {
3586 uint32_t host_feat =
3587 x86_cpu_get_supported_feature_word(w, false);
3588 uint32_t requested_features = env->features[w];
3589 env->features[w] &= host_feat;
3590 cpu->filtered_features[w] = requested_features & ~env->features[w];
3591 if (cpu->filtered_features[w]) {
3592 rv = 1;
3596 return rv;
3599 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3600 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3601 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3602 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3603 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3604 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3605 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3607 CPUState *cs = CPU(dev);
3608 X86CPU *cpu = X86_CPU(dev);
3609 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3610 CPUX86State *env = &cpu->env;
3611 Error *local_err = NULL;
3612 static bool ht_warned;
3614 if (xcc->kvm_required && !kvm_enabled()) {
3615 char *name = x86_cpu_class_get_model_name(xcc);
3616 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3617 g_free(name);
3618 goto out;
3621 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3622 error_setg(errp, "apic-id property was not initialized properly");
3623 return;
3626 x86_cpu_expand_features(cpu, &local_err);
3627 if (local_err) {
3628 goto out;
3631 if (x86_cpu_filter_features(cpu) &&
3632 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3633 x86_cpu_report_filtered_features(cpu);
3634 if (cpu->enforce_cpuid) {
3635 error_setg(&local_err,
3636 kvm_enabled() ?
3637 "Host doesn't support requested features" :
3638 "TCG doesn't support requested features");
3639 goto out;
3643 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3644 * CPUID[1].EDX.
3646 if (IS_AMD_CPU(env)) {
3647 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3648 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3649 & CPUID_EXT2_AMD_ALIASES);
3652 /* For 64bit systems think about the number of physical bits to present.
3653 * ideally this should be the same as the host; anything other than matching
3654 * the host can cause incorrect guest behaviour.
3655 * QEMU used to pick the magic value of 40 bits that corresponds to
3656 * consumer AMD devices but nothing else.
3658 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3659 if (kvm_enabled()) {
3660 uint32_t host_phys_bits = x86_host_phys_bits();
3661 static bool warned;
3663 if (cpu->host_phys_bits) {
3664 /* The user asked for us to use the host physical bits */
3665 cpu->phys_bits = host_phys_bits;
3668 /* Print a warning if the user set it to a value that's not the
3669 * host value.
3671 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3672 !warned) {
3673 warn_report("Host physical bits (%u)"
3674 " does not match phys-bits property (%u)",
3675 host_phys_bits, cpu->phys_bits);
3676 warned = true;
3679 if (cpu->phys_bits &&
3680 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3681 cpu->phys_bits < 32)) {
3682 error_setg(errp, "phys-bits should be between 32 and %u "
3683 " (but is %u)",
3684 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3685 return;
3687 } else {
3688 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3689 error_setg(errp, "TCG only supports phys-bits=%u",
3690 TCG_PHYS_ADDR_BITS);
3691 return;
3694 /* 0 means it was not explicitly set by the user (or by machine
3695 * compat_props or by the host code above). In this case, the default
3696 * is the value used by TCG (40).
3698 if (cpu->phys_bits == 0) {
3699 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3701 } else {
3702 /* For 32 bit systems don't use the user set value, but keep
3703 * phys_bits consistent with what we tell the guest.
3705 if (cpu->phys_bits != 0) {
3706 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3707 return;
3710 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3711 cpu->phys_bits = 36;
3712 } else {
3713 cpu->phys_bits = 32;
3716 cpu_exec_realizefn(cs, &local_err);
3717 if (local_err != NULL) {
3718 error_propagate(errp, local_err);
3719 return;
3722 if (tcg_enabled()) {
3723 tcg_x86_init();
3726 #ifndef CONFIG_USER_ONLY
3727 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3729 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3730 x86_cpu_apic_create(cpu, &local_err);
3731 if (local_err != NULL) {
3732 goto out;
3735 #endif
3737 mce_init(cpu);
3739 #ifndef CONFIG_USER_ONLY
3740 if (tcg_enabled()) {
3741 AddressSpace *as_normal = g_new0(AddressSpace, 1);
3742 AddressSpace *as_smm = g_new(AddressSpace, 1);
3744 address_space_init(as_normal, cs->memory, "cpu-memory");
3746 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3747 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3749 /* Outer container... */
3750 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3751 memory_region_set_enabled(cpu->cpu_as_root, true);
3753 /* ... with two regions inside: normal system memory with low
3754 * priority, and...
3756 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3757 get_system_memory(), 0, ~0ull);
3758 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3759 memory_region_set_enabled(cpu->cpu_as_mem, true);
3760 address_space_init(as_smm, cpu->cpu_as_root, "CPU");
3762 cs->num_ases = 2;
3763 cpu_address_space_init(cs, as_normal, 0);
3764 cpu_address_space_init(cs, as_smm, 1);
3766 /* ... SMRAM with higher priority, linked from /machine/smram. */
3767 cpu->machine_done.notify = x86_cpu_machine_done;
3768 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3770 #endif
3772 qemu_init_vcpu(cs);
3774 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3775 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3776 * based on inputs (sockets,cores,threads), it is still better to gives
3777 * users a warning.
3779 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3780 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3782 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3783 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3784 " -smp options properly.");
3785 ht_warned = true;
3788 x86_cpu_apic_realize(cpu, &local_err);
3789 if (local_err != NULL) {
3790 goto out;
3792 cpu_reset(cs);
3794 xcc->parent_realize(dev, &local_err);
3796 out:
3797 if (local_err != NULL) {
3798 error_propagate(errp, local_err);
3799 return;
3803 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3805 X86CPU *cpu = X86_CPU(dev);
3806 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3807 Error *local_err = NULL;
3809 #ifndef CONFIG_USER_ONLY
3810 cpu_remove_sync(CPU(dev));
3811 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3812 #endif
3814 if (cpu->apic_state) {
3815 object_unparent(OBJECT(cpu->apic_state));
3816 cpu->apic_state = NULL;
3819 xcc->parent_unrealize(dev, &local_err);
3820 if (local_err != NULL) {
3821 error_propagate(errp, local_err);
3822 return;
3826 typedef struct BitProperty {
3827 FeatureWord w;
3828 uint32_t mask;
3829 } BitProperty;
3831 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3832 void *opaque, Error **errp)
3834 X86CPU *cpu = X86_CPU(obj);
3835 BitProperty *fp = opaque;
3836 uint32_t f = cpu->env.features[fp->w];
3837 bool value = (f & fp->mask) == fp->mask;
3838 visit_type_bool(v, name, &value, errp);
3841 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3842 void *opaque, Error **errp)
3844 DeviceState *dev = DEVICE(obj);
3845 X86CPU *cpu = X86_CPU(obj);
3846 BitProperty *fp = opaque;
3847 Error *local_err = NULL;
3848 bool value;
3850 if (dev->realized) {
3851 qdev_prop_set_after_realize(dev, name, errp);
3852 return;
3855 visit_type_bool(v, name, &value, &local_err);
3856 if (local_err) {
3857 error_propagate(errp, local_err);
3858 return;
3861 if (value) {
3862 cpu->env.features[fp->w] |= fp->mask;
3863 } else {
3864 cpu->env.features[fp->w] &= ~fp->mask;
3866 cpu->env.user_features[fp->w] |= fp->mask;
3869 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3870 void *opaque)
3872 BitProperty *prop = opaque;
3873 g_free(prop);
3876 /* Register a boolean property to get/set a single bit in a uint32_t field.
3878 * The same property name can be registered multiple times to make it affect
3879 * multiple bits in the same FeatureWord. In that case, the getter will return
3880 * true only if all bits are set.
3882 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3883 const char *prop_name,
3884 FeatureWord w,
3885 int bitnr)
3887 BitProperty *fp;
3888 ObjectProperty *op;
3889 uint32_t mask = (1UL << bitnr);
3891 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3892 if (op) {
3893 fp = op->opaque;
3894 assert(fp->w == w);
3895 fp->mask |= mask;
3896 } else {
3897 fp = g_new0(BitProperty, 1);
3898 fp->w = w;
3899 fp->mask = mask;
3900 object_property_add(OBJECT(cpu), prop_name, "bool",
3901 x86_cpu_get_bit_prop,
3902 x86_cpu_set_bit_prop,
3903 x86_cpu_release_bit_prop, fp, &error_abort);
3907 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3908 FeatureWord w,
3909 int bitnr)
3911 FeatureWordInfo *fi = &feature_word_info[w];
3912 const char *name = fi->feat_names[bitnr];
3914 if (!name) {
3915 return;
3918 /* Property names should use "-" instead of "_".
3919 * Old names containing underscores are registered as aliases
3920 * using object_property_add_alias()
3922 assert(!strchr(name, '_'));
3923 /* aliases don't use "|" delimiters anymore, they are registered
3924 * manually using object_property_add_alias() */
3925 assert(!strchr(name, '|'));
3926 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
3929 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
3931 X86CPU *cpu = X86_CPU(cs);
3932 CPUX86State *env = &cpu->env;
3933 GuestPanicInformation *panic_info = NULL;
3935 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
3936 panic_info = g_malloc0(sizeof(GuestPanicInformation));
3938 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
3940 assert(HV_CRASH_PARAMS >= 5);
3941 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
3942 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
3943 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
3944 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
3945 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
3948 return panic_info;
3950 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
3951 const char *name, void *opaque,
3952 Error **errp)
3954 CPUState *cs = CPU(obj);
3955 GuestPanicInformation *panic_info;
3957 if (!cs->crash_occurred) {
3958 error_setg(errp, "No crash occured");
3959 return;
3962 panic_info = x86_cpu_get_crash_info(cs);
3963 if (panic_info == NULL) {
3964 error_setg(errp, "No crash information");
3965 return;
3968 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
3969 errp);
3970 qapi_free_GuestPanicInformation(panic_info);
3973 static void x86_cpu_initfn(Object *obj)
3975 CPUState *cs = CPU(obj);
3976 X86CPU *cpu = X86_CPU(obj);
3977 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3978 CPUX86State *env = &cpu->env;
3979 FeatureWord w;
3981 cs->env_ptr = env;
3983 object_property_add(obj, "family", "int",
3984 x86_cpuid_version_get_family,
3985 x86_cpuid_version_set_family, NULL, NULL, NULL);
3986 object_property_add(obj, "model", "int",
3987 x86_cpuid_version_get_model,
3988 x86_cpuid_version_set_model, NULL, NULL, NULL);
3989 object_property_add(obj, "stepping", "int",
3990 x86_cpuid_version_get_stepping,
3991 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3992 object_property_add_str(obj, "vendor",
3993 x86_cpuid_get_vendor,
3994 x86_cpuid_set_vendor, NULL);
3995 object_property_add_str(obj, "model-id",
3996 x86_cpuid_get_model_id,
3997 x86_cpuid_set_model_id, NULL);
3998 object_property_add(obj, "tsc-frequency", "int",
3999 x86_cpuid_get_tsc_freq,
4000 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4001 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4002 x86_cpu_get_feature_words,
4003 NULL, NULL, (void *)env->features, NULL);
4004 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4005 x86_cpu_get_feature_words,
4006 NULL, NULL, (void *)cpu->filtered_features, NULL);
4008 object_property_add(obj, "crash-information", "GuestPanicInformation",
4009 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4011 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4013 for (w = 0; w < FEATURE_WORDS; w++) {
4014 int bitnr;
4016 for (bitnr = 0; bitnr < 32; bitnr++) {
4017 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4021 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4022 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4023 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4024 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4025 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4026 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4027 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4029 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4030 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4031 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4032 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
4033 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
4034 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
4035 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
4036 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
4037 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
4038 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
4039 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
4040 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
4041 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
4042 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4043 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4044 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4045 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4046 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4047 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4048 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4049 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4051 if (xcc->cpu_def) {
4052 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4056 static int64_t x86_cpu_get_arch_id(CPUState *cs)
4058 X86CPU *cpu = X86_CPU(cs);
4060 return cpu->apic_id;
4063 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4065 X86CPU *cpu = X86_CPU(cs);
4067 return cpu->env.cr[0] & CR0_PG_MASK;
4070 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
4072 X86CPU *cpu = X86_CPU(cs);
4074 cpu->env.eip = value;
4077 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
4079 X86CPU *cpu = X86_CPU(cs);
4081 cpu->env.eip = tb->pc - tb->cs_base;
4084 static bool x86_cpu_has_work(CPUState *cs)
4086 X86CPU *cpu = X86_CPU(cs);
4087 CPUX86State *env = &cpu->env;
4089 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
4090 CPU_INTERRUPT_POLL)) &&
4091 (env->eflags & IF_MASK)) ||
4092 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
4093 CPU_INTERRUPT_INIT |
4094 CPU_INTERRUPT_SIPI |
4095 CPU_INTERRUPT_MCE)) ||
4096 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4097 !(env->hflags & HF_SMM_MASK));
4100 static Property x86_cpu_properties[] = {
4101 #ifdef CONFIG_USER_ONLY
4102 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4103 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4104 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4105 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4106 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4107 #else
4108 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4109 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4110 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4111 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4112 #endif
4113 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4114 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4115 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4116 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4117 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4118 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4119 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4120 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4121 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4122 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4123 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4124 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4125 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4126 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4127 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4128 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4129 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4130 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4131 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4132 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4133 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4134 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4135 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4136 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4137 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4138 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4139 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4140 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4141 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4142 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4143 false),
4144 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4145 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4148 * From "Requirements for Implementing the Microsoft
4149 * Hypervisor Interface":
4150 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
4152 * "Starting with Windows Server 2012 and Windows 8, if
4153 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
4154 * the hypervisor imposes no specific limit to the number of VPs.
4155 * In this case, Windows Server 2012 guest VMs may use more than
4156 * 64 VPs, up to the maximum supported number of processors applicable
4157 * to the specific Windows version being used."
4159 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
4160 DEFINE_PROP_END_OF_LIST()
4163 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4165 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4166 CPUClass *cc = CPU_CLASS(oc);
4167 DeviceClass *dc = DEVICE_CLASS(oc);
4169 xcc->parent_realize = dc->realize;
4170 xcc->parent_unrealize = dc->unrealize;
4171 dc->realize = x86_cpu_realizefn;
4172 dc->unrealize = x86_cpu_unrealizefn;
4173 dc->props = x86_cpu_properties;
4175 xcc->parent_reset = cc->reset;
4176 cc->reset = x86_cpu_reset;
4177 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4179 cc->class_by_name = x86_cpu_class_by_name;
4180 cc->parse_features = x86_cpu_parse_featurestr;
4181 cc->has_work = x86_cpu_has_work;
4182 #ifdef CONFIG_TCG
4183 cc->do_interrupt = x86_cpu_do_interrupt;
4184 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4185 #endif
4186 cc->dump_state = x86_cpu_dump_state;
4187 cc->get_crash_info = x86_cpu_get_crash_info;
4188 cc->set_pc = x86_cpu_set_pc;
4189 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4190 cc->gdb_read_register = x86_cpu_gdb_read_register;
4191 cc->gdb_write_register = x86_cpu_gdb_write_register;
4192 cc->get_arch_id = x86_cpu_get_arch_id;
4193 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4194 #ifdef CONFIG_USER_ONLY
4195 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4196 #else
4197 cc->asidx_from_attrs = x86_asidx_from_attrs;
4198 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4199 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4200 cc->write_elf64_note = x86_cpu_write_elf64_note;
4201 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4202 cc->write_elf32_note = x86_cpu_write_elf32_note;
4203 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4204 cc->vmsd = &vmstate_x86_cpu;
4205 #endif
4206 cc->gdb_arch_name = x86_gdb_arch_name;
4207 #ifdef TARGET_X86_64
4208 cc->gdb_core_xml_file = "i386-64bit.xml";
4209 cc->gdb_num_core_regs = 57;
4210 #else
4211 cc->gdb_core_xml_file = "i386-32bit.xml";
4212 cc->gdb_num_core_regs = 41;
4213 #endif
4214 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4215 cc->debug_excp_handler = breakpoint_handler;
4216 #endif
4217 cc->cpu_exec_enter = x86_cpu_exec_enter;
4218 cc->cpu_exec_exit = x86_cpu_exec_exit;
4220 dc->user_creatable = true;
4223 static const TypeInfo x86_cpu_type_info = {
4224 .name = TYPE_X86_CPU,
4225 .parent = TYPE_CPU,
4226 .instance_size = sizeof(X86CPU),
4227 .instance_init = x86_cpu_initfn,
4228 .abstract = true,
4229 .class_size = sizeof(X86CPUClass),
4230 .class_init = x86_cpu_common_class_init,
4234 /* "base" CPU model, used by query-cpu-model-expansion */
4235 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4237 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4239 xcc->static_model = true;
4240 xcc->migration_safe = true;
4241 xcc->model_description = "base CPU model type with no features enabled";
4242 xcc->ordering = 8;
4245 static const TypeInfo x86_base_cpu_type_info = {
4246 .name = X86_CPU_TYPE_NAME("base"),
4247 .parent = TYPE_X86_CPU,
4248 .class_init = x86_cpu_base_class_init,
4251 static void x86_cpu_register_types(void)
4253 int i;
4255 type_register_static(&x86_cpu_type_info);
4256 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4257 x86_register_cpudef_type(&builtin_x86_defs[i]);
4259 type_register_static(&max_x86_cpu_type_info);
4260 type_register_static(&x86_base_cpu_type_info);
4261 #ifdef CONFIG_KVM
4262 type_register_static(&host_x86_cpu_type_info);
4263 #endif
4266 type_init(x86_cpu_register_types)