i386: Update comment about XSAVES on Skylake-Client
[qemu/kevin.git] / target / i386 / cpu.c
blobe702691795789b922ae0415aa41aac4ef5772f59
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qapi/qmp/types.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "qom/qom-qobject.h"
38 #include "sysemu/arch_init.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/i386/topology.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "exec/address-spaces.h"
49 #include "hw/hw.h"
50 #include "hw/xen/xen.h"
51 #include "hw/i386/apic_internal.h"
52 #endif
55 /* Cache topology CPUID constants: */
57 /* CPUID Leaf 2 Descriptors */
59 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
60 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
61 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
65 /* CPUID Leaf 4 constants: */
67 /* EAX: */
68 #define CPUID_4_TYPE_DCACHE 1
69 #define CPUID_4_TYPE_ICACHE 2
70 #define CPUID_4_TYPE_UNIFIED 3
72 #define CPUID_4_LEVEL(l) ((l) << 5)
74 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
75 #define CPUID_4_FULLY_ASSOC (1 << 9)
77 /* EDX: */
78 #define CPUID_4_NO_INVD_SHARING (1 << 0)
79 #define CPUID_4_INCLUSIVE (1 << 1)
80 #define CPUID_4_COMPLEX_IDX (1 << 2)
82 #define ASSOC_FULL 0xFF
84 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
85 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
86 a == 2 ? 0x2 : \
87 a == 4 ? 0x4 : \
88 a == 8 ? 0x6 : \
89 a == 16 ? 0x8 : \
90 a == 32 ? 0xA : \
91 a == 48 ? 0xB : \
92 a == 64 ? 0xC : \
93 a == 96 ? 0xD : \
94 a == 128 ? 0xE : \
95 a == ASSOC_FULL ? 0xF : \
96 0 /* invalid value */)
99 /* Definitions of the hardcoded cache entries we expose: */
101 /* L1 data cache: */
102 #define L1D_LINE_SIZE 64
103 #define L1D_ASSOCIATIVITY 8
104 #define L1D_SETS 64
105 #define L1D_PARTITIONS 1
106 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
107 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
108 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
109 #define L1D_LINES_PER_TAG 1
110 #define L1D_SIZE_KB_AMD 64
111 #define L1D_ASSOCIATIVITY_AMD 2
113 /* L1 instruction cache: */
114 #define L1I_LINE_SIZE 64
115 #define L1I_ASSOCIATIVITY 8
116 #define L1I_SETS 64
117 #define L1I_PARTITIONS 1
118 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
119 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
120 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
121 #define L1I_LINES_PER_TAG 1
122 #define L1I_SIZE_KB_AMD 64
123 #define L1I_ASSOCIATIVITY_AMD 2
125 /* Level 2 unified cache: */
126 #define L2_LINE_SIZE 64
127 #define L2_ASSOCIATIVITY 16
128 #define L2_SETS 4096
129 #define L2_PARTITIONS 1
130 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
131 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
132 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
133 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
134 #define L2_LINES_PER_TAG 1
135 #define L2_SIZE_KB_AMD 512
137 /* Level 3 unified cache: */
138 #define L3_SIZE_KB 0 /* disabled */
139 #define L3_ASSOCIATIVITY 0 /* disabled */
140 #define L3_LINES_PER_TAG 0 /* disabled */
141 #define L3_LINE_SIZE 0 /* disabled */
142 #define L3_N_LINE_SIZE 64
143 #define L3_N_ASSOCIATIVITY 16
144 #define L3_N_SETS 16384
145 #define L3_N_PARTITIONS 1
146 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
147 #define L3_N_LINES_PER_TAG 1
148 #define L3_N_SIZE_KB_AMD 16384
150 /* TLB definitions: */
152 #define L1_DTLB_2M_ASSOC 1
153 #define L1_DTLB_2M_ENTRIES 255
154 #define L1_DTLB_4K_ASSOC 1
155 #define L1_DTLB_4K_ENTRIES 255
157 #define L1_ITLB_2M_ASSOC 1
158 #define L1_ITLB_2M_ENTRIES 255
159 #define L1_ITLB_4K_ASSOC 1
160 #define L1_ITLB_4K_ENTRIES 255
162 #define L2_DTLB_2M_ASSOC 0 /* disabled */
163 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
164 #define L2_DTLB_4K_ASSOC 4
165 #define L2_DTLB_4K_ENTRIES 512
167 #define L2_ITLB_2M_ASSOC 0 /* disabled */
168 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
169 #define L2_ITLB_4K_ASSOC 4
170 #define L2_ITLB_4K_ENTRIES 512
174 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
175 uint32_t vendor2, uint32_t vendor3)
177 int i;
178 for (i = 0; i < 4; i++) {
179 dst[i] = vendor1 >> (8 * i);
180 dst[i + 4] = vendor2 >> (8 * i);
181 dst[i + 8] = vendor3 >> (8 * i);
183 dst[CPUID_VENDOR_SZ] = '\0';
186 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
187 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
188 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
189 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
190 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
191 CPUID_PSE36 | CPUID_FXSR)
192 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
193 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
194 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
195 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
196 CPUID_PAE | CPUID_SEP | CPUID_APIC)
198 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
199 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
200 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
201 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
202 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
203 /* partly implemented:
204 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
205 /* missing:
206 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
207 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
208 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
209 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
210 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
211 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
212 /* missing:
213 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
214 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
215 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
216 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
217 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
219 #ifdef TARGET_X86_64
220 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
221 #else
222 #define TCG_EXT2_X86_64_FEATURES 0
223 #endif
225 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
226 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
227 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
228 TCG_EXT2_X86_64_FEATURES)
229 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
230 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
231 #define TCG_EXT4_FEATURES 0
232 #define TCG_SVM_FEATURES 0
233 #define TCG_KVM_FEATURES 0
234 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
235 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
236 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
237 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
238 CPUID_7_0_EBX_ERMS)
239 /* missing:
240 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
241 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
242 CPUID_7_0_EBX_RDSEED */
243 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
244 CPUID_7_0_ECX_LA57)
245 #define TCG_7_0_EDX_FEATURES 0
246 #define TCG_APM_FEATURES 0
247 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
248 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
249 /* missing:
250 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
252 typedef struct FeatureWordInfo {
253 /* feature flags names are taken from "Intel Processor Identification and
254 * the CPUID Instruction" and AMD's "CPUID Specification".
255 * In cases of disagreement between feature naming conventions,
256 * aliases may be added.
258 const char *feat_names[32];
259 uint32_t cpuid_eax; /* Input EAX for CPUID */
260 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
261 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
262 int cpuid_reg; /* output register (R_* constant) */
263 uint32_t tcg_features; /* Feature flags supported by TCG */
264 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
265 uint32_t migratable_flags; /* Feature flags known to be migratable */
266 } FeatureWordInfo;
268 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
269 [FEAT_1_EDX] = {
270 .feat_names = {
271 "fpu", "vme", "de", "pse",
272 "tsc", "msr", "pae", "mce",
273 "cx8", "apic", NULL, "sep",
274 "mtrr", "pge", "mca", "cmov",
275 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
276 NULL, "ds" /* Intel dts */, "acpi", "mmx",
277 "fxsr", "sse", "sse2", "ss",
278 "ht" /* Intel htt */, "tm", "ia64", "pbe",
280 .cpuid_eax = 1, .cpuid_reg = R_EDX,
281 .tcg_features = TCG_FEATURES,
283 [FEAT_1_ECX] = {
284 .feat_names = {
285 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
286 "ds-cpl", "vmx", "smx", "est",
287 "tm2", "ssse3", "cid", NULL,
288 "fma", "cx16", "xtpr", "pdcm",
289 NULL, "pcid", "dca", "sse4.1",
290 "sse4.2", "x2apic", "movbe", "popcnt",
291 "tsc-deadline", "aes", "xsave", "osxsave",
292 "avx", "f16c", "rdrand", "hypervisor",
294 .cpuid_eax = 1, .cpuid_reg = R_ECX,
295 .tcg_features = TCG_EXT_FEATURES,
297 /* Feature names that are already defined on feature_name[] but
298 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
299 * names on feat_names below. They are copied automatically
300 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
302 [FEAT_8000_0001_EDX] = {
303 .feat_names = {
304 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
305 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
306 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
307 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
308 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
309 "nx", NULL, "mmxext", NULL /* mmx */,
310 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
311 NULL, "lm", "3dnowext", "3dnow",
313 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
314 .tcg_features = TCG_EXT2_FEATURES,
316 [FEAT_8000_0001_ECX] = {
317 .feat_names = {
318 "lahf-lm", "cmp-legacy", "svm", "extapic",
319 "cr8legacy", "abm", "sse4a", "misalignsse",
320 "3dnowprefetch", "osvw", "ibs", "xop",
321 "skinit", "wdt", NULL, "lwp",
322 "fma4", "tce", NULL, "nodeid-msr",
323 NULL, "tbm", "topoext", "perfctr-core",
324 "perfctr-nb", NULL, NULL, NULL,
325 NULL, NULL, NULL, NULL,
327 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
328 .tcg_features = TCG_EXT3_FEATURES,
330 [FEAT_C000_0001_EDX] = {
331 .feat_names = {
332 NULL, NULL, "xstore", "xstore-en",
333 NULL, NULL, "xcrypt", "xcrypt-en",
334 "ace2", "ace2-en", "phe", "phe-en",
335 "pmm", "pmm-en", NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 NULL, NULL, NULL, NULL,
339 NULL, NULL, NULL, NULL,
341 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
342 .tcg_features = TCG_EXT4_FEATURES,
344 [FEAT_KVM] = {
345 .feat_names = {
346 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
347 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
348 NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 NULL, NULL, NULL, NULL,
351 NULL, NULL, NULL, NULL,
352 "kvmclock-stable-bit", NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL,
355 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
356 .tcg_features = TCG_KVM_FEATURES,
358 [FEAT_HYPERV_EAX] = {
359 .feat_names = {
360 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
361 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
362 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
363 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
364 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
365 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
366 NULL, NULL, NULL, NULL,
367 NULL, NULL, NULL, NULL,
368 NULL, NULL, NULL, NULL,
369 NULL, NULL, NULL, NULL,
370 NULL, NULL, NULL, NULL,
372 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
374 [FEAT_HYPERV_EBX] = {
375 .feat_names = {
376 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
377 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
378 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
379 NULL /* hv_create_port */, NULL /* hv_connect_port */,
380 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
381 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
382 NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 NULL, NULL, NULL, NULL,
385 NULL, NULL, NULL, NULL,
386 NULL, NULL, NULL, NULL,
388 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
390 [FEAT_HYPERV_EDX] = {
391 .feat_names = {
392 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
393 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
394 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
395 NULL, NULL,
396 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL,
400 NULL, NULL, NULL, NULL,
401 NULL, NULL, NULL, NULL,
403 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
405 [FEAT_SVM] = {
406 .feat_names = {
407 "npt", "lbrv", "svm-lock", "nrip-save",
408 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
409 NULL, NULL, "pause-filter", NULL,
410 "pfthreshold", NULL, NULL, NULL,
411 NULL, NULL, NULL, NULL,
412 NULL, NULL, NULL, NULL,
413 NULL, NULL, NULL, NULL,
414 NULL, NULL, NULL, NULL,
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
419 [FEAT_7_0_EBX] = {
420 .feat_names = {
421 "fsgsbase", "tsc-adjust", NULL, "bmi1",
422 "hle", "avx2", NULL, "smep",
423 "bmi2", "erms", "invpcid", "rtm",
424 NULL, NULL, "mpx", NULL,
425 "avx512f", "avx512dq", "rdseed", "adx",
426 "smap", "avx512ifma", "pcommit", "clflushopt",
427 "clwb", NULL, "avx512pf", "avx512er",
428 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
430 .cpuid_eax = 7,
431 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
432 .cpuid_reg = R_EBX,
433 .tcg_features = TCG_7_0_EBX_FEATURES,
435 [FEAT_7_0_ECX] = {
436 .feat_names = {
437 NULL, "avx512vbmi", "umip", "pku",
438 "ospke", NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, "avx512-vpopcntdq", NULL,
441 "la57", NULL, NULL, NULL,
442 NULL, NULL, "rdpid", NULL,
443 NULL, NULL, NULL, NULL,
444 NULL, NULL, NULL, NULL,
446 .cpuid_eax = 7,
447 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
448 .cpuid_reg = R_ECX,
449 .tcg_features = TCG_7_0_ECX_FEATURES,
451 [FEAT_7_0_EDX] = {
452 .feat_names = {
453 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
462 .cpuid_eax = 7,
463 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
464 .cpuid_reg = R_EDX,
465 .tcg_features = TCG_7_0_EDX_FEATURES,
467 [FEAT_8000_0007_EDX] = {
468 .feat_names = {
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 "invtsc", NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL,
478 .cpuid_eax = 0x80000007,
479 .cpuid_reg = R_EDX,
480 .tcg_features = TCG_APM_FEATURES,
481 .unmigratable_flags = CPUID_APM_INVTSC,
483 [FEAT_XSAVE] = {
484 .feat_names = {
485 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
489 NULL, NULL, NULL, NULL,
490 NULL, NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL,
494 .cpuid_eax = 0xd,
495 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
496 .cpuid_reg = R_EAX,
497 .tcg_features = TCG_XSAVE_FEATURES,
499 [FEAT_6_EAX] = {
500 .feat_names = {
501 NULL, NULL, "arat", NULL,
502 NULL, NULL, NULL, NULL,
503 NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
510 .cpuid_eax = 6, .cpuid_reg = R_EAX,
511 .tcg_features = TCG_6_EAX_FEATURES,
513 [FEAT_XSAVE_COMP_LO] = {
514 .cpuid_eax = 0xD,
515 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
516 .cpuid_reg = R_EAX,
517 .tcg_features = ~0U,
518 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
519 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
520 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
521 XSTATE_PKRU_MASK,
523 [FEAT_XSAVE_COMP_HI] = {
524 .cpuid_eax = 0xD,
525 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
526 .cpuid_reg = R_EDX,
527 .tcg_features = ~0U,
531 typedef struct X86RegisterInfo32 {
532 /* Name of register */
533 const char *name;
534 /* QAPI enum value register */
535 X86CPURegister32 qapi_enum;
536 } X86RegisterInfo32;
538 #define REGISTER(reg) \
539 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
540 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
541 REGISTER(EAX),
542 REGISTER(ECX),
543 REGISTER(EDX),
544 REGISTER(EBX),
545 REGISTER(ESP),
546 REGISTER(EBP),
547 REGISTER(ESI),
548 REGISTER(EDI),
550 #undef REGISTER
552 typedef struct ExtSaveArea {
553 uint32_t feature, bits;
554 uint32_t offset, size;
555 } ExtSaveArea;
557 static const ExtSaveArea x86_ext_save_areas[] = {
558 [XSTATE_FP_BIT] = {
559 /* x87 FP state component is always enabled if XSAVE is supported */
560 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
561 /* x87 state is in the legacy region of the XSAVE area */
562 .offset = 0,
563 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
565 [XSTATE_SSE_BIT] = {
566 /* SSE state component is always enabled if XSAVE is supported */
567 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
568 /* SSE state is in the legacy region of the XSAVE area */
569 .offset = 0,
570 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
572 [XSTATE_YMM_BIT] =
573 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
574 .offset = offsetof(X86XSaveArea, avx_state),
575 .size = sizeof(XSaveAVX) },
576 [XSTATE_BNDREGS_BIT] =
577 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
578 .offset = offsetof(X86XSaveArea, bndreg_state),
579 .size = sizeof(XSaveBNDREG) },
580 [XSTATE_BNDCSR_BIT] =
581 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
582 .offset = offsetof(X86XSaveArea, bndcsr_state),
583 .size = sizeof(XSaveBNDCSR) },
584 [XSTATE_OPMASK_BIT] =
585 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
586 .offset = offsetof(X86XSaveArea, opmask_state),
587 .size = sizeof(XSaveOpmask) },
588 [XSTATE_ZMM_Hi256_BIT] =
589 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
590 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
591 .size = sizeof(XSaveZMM_Hi256) },
592 [XSTATE_Hi16_ZMM_BIT] =
593 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
594 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
595 .size = sizeof(XSaveHi16_ZMM) },
596 [XSTATE_PKRU_BIT] =
597 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
598 .offset = offsetof(X86XSaveArea, pkru_state),
599 .size = sizeof(XSavePKRU) },
602 static uint32_t xsave_area_size(uint64_t mask)
604 int i;
605 uint64_t ret = 0;
607 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
608 const ExtSaveArea *esa = &x86_ext_save_areas[i];
609 if ((mask >> i) & 1) {
610 ret = MAX(ret, esa->offset + esa->size);
613 return ret;
616 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
618 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
619 cpu->env.features[FEAT_XSAVE_COMP_LO];
622 const char *get_register_name_32(unsigned int reg)
624 if (reg >= CPU_NB_REGS32) {
625 return NULL;
627 return x86_reg_info_32[reg].name;
631 * Returns the set of feature flags that are supported and migratable by
632 * QEMU, for a given FeatureWord.
634 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
636 FeatureWordInfo *wi = &feature_word_info[w];
637 uint32_t r = 0;
638 int i;
640 for (i = 0; i < 32; i++) {
641 uint32_t f = 1U << i;
643 /* If the feature name is known, it is implicitly considered migratable,
644 * unless it is explicitly set in unmigratable_flags */
645 if ((wi->migratable_flags & f) ||
646 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
647 r |= f;
650 return r;
653 void host_cpuid(uint32_t function, uint32_t count,
654 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
656 uint32_t vec[4];
658 #ifdef __x86_64__
659 asm volatile("cpuid"
660 : "=a"(vec[0]), "=b"(vec[1]),
661 "=c"(vec[2]), "=d"(vec[3])
662 : "0"(function), "c"(count) : "cc");
663 #elif defined(__i386__)
664 asm volatile("pusha \n\t"
665 "cpuid \n\t"
666 "mov %%eax, 0(%2) \n\t"
667 "mov %%ebx, 4(%2) \n\t"
668 "mov %%ecx, 8(%2) \n\t"
669 "mov %%edx, 12(%2) \n\t"
670 "popa"
671 : : "a"(function), "c"(count), "S"(vec)
672 : "memory", "cc");
673 #else
674 abort();
675 #endif
677 if (eax)
678 *eax = vec[0];
679 if (ebx)
680 *ebx = vec[1];
681 if (ecx)
682 *ecx = vec[2];
683 if (edx)
684 *edx = vec[3];
687 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
689 uint32_t eax, ebx, ecx, edx;
691 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
692 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
694 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
695 if (family) {
696 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
698 if (model) {
699 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
701 if (stepping) {
702 *stepping = eax & 0x0F;
706 /* CPU class name definitions: */
708 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
709 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
711 /* Return type name for a given CPU model name
712 * Caller is responsible for freeing the returned string.
714 static char *x86_cpu_type_name(const char *model_name)
716 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
719 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
721 ObjectClass *oc;
722 char *typename;
724 if (cpu_model == NULL) {
725 return NULL;
728 typename = x86_cpu_type_name(cpu_model);
729 oc = object_class_by_name(typename);
730 g_free(typename);
731 return oc;
734 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
736 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
737 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
738 return g_strndup(class_name,
739 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
742 struct X86CPUDefinition {
743 const char *name;
744 uint32_t level;
745 uint32_t xlevel;
746 /* vendor is zero-terminated, 12 character ASCII string */
747 char vendor[CPUID_VENDOR_SZ + 1];
748 int family;
749 int model;
750 int stepping;
751 FeatureWordArray features;
752 char model_id[48];
755 static X86CPUDefinition builtin_x86_defs[] = {
757 .name = "qemu64",
758 .level = 0xd,
759 .vendor = CPUID_VENDOR_AMD,
760 .family = 6,
761 .model = 6,
762 .stepping = 3,
763 .features[FEAT_1_EDX] =
764 PPRO_FEATURES |
765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
766 CPUID_PSE36,
767 .features[FEAT_1_ECX] =
768 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
769 .features[FEAT_8000_0001_EDX] =
770 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
771 .features[FEAT_8000_0001_ECX] =
772 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
773 .xlevel = 0x8000000A,
774 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
777 .name = "phenom",
778 .level = 5,
779 .vendor = CPUID_VENDOR_AMD,
780 .family = 16,
781 .model = 2,
782 .stepping = 3,
783 /* Missing: CPUID_HT */
784 .features[FEAT_1_EDX] =
785 PPRO_FEATURES |
786 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
787 CPUID_PSE36 | CPUID_VME,
788 .features[FEAT_1_ECX] =
789 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
790 CPUID_EXT_POPCNT,
791 .features[FEAT_8000_0001_EDX] =
792 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
793 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
794 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
795 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
796 CPUID_EXT3_CR8LEG,
797 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
798 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
799 .features[FEAT_8000_0001_ECX] =
800 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
801 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
802 /* Missing: CPUID_SVM_LBRV */
803 .features[FEAT_SVM] =
804 CPUID_SVM_NPT,
805 .xlevel = 0x8000001A,
806 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
809 .name = "core2duo",
810 .level = 10,
811 .vendor = CPUID_VENDOR_INTEL,
812 .family = 6,
813 .model = 15,
814 .stepping = 11,
815 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
816 .features[FEAT_1_EDX] =
817 PPRO_FEATURES |
818 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
819 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
820 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
821 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
822 .features[FEAT_1_ECX] =
823 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
824 CPUID_EXT_CX16,
825 .features[FEAT_8000_0001_EDX] =
826 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
827 .features[FEAT_8000_0001_ECX] =
828 CPUID_EXT3_LAHF_LM,
829 .xlevel = 0x80000008,
830 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
833 .name = "kvm64",
834 .level = 0xd,
835 .vendor = CPUID_VENDOR_INTEL,
836 .family = 15,
837 .model = 6,
838 .stepping = 1,
839 /* Missing: CPUID_HT */
840 .features[FEAT_1_EDX] =
841 PPRO_FEATURES | CPUID_VME |
842 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
843 CPUID_PSE36,
844 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
845 .features[FEAT_1_ECX] =
846 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
847 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
848 .features[FEAT_8000_0001_EDX] =
849 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
850 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
851 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
852 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
853 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
854 .features[FEAT_8000_0001_ECX] =
856 .xlevel = 0x80000008,
857 .model_id = "Common KVM processor"
860 .name = "qemu32",
861 .level = 4,
862 .vendor = CPUID_VENDOR_INTEL,
863 .family = 6,
864 .model = 6,
865 .stepping = 3,
866 .features[FEAT_1_EDX] =
867 PPRO_FEATURES,
868 .features[FEAT_1_ECX] =
869 CPUID_EXT_SSE3,
870 .xlevel = 0x80000004,
871 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
874 .name = "kvm32",
875 .level = 5,
876 .vendor = CPUID_VENDOR_INTEL,
877 .family = 15,
878 .model = 6,
879 .stepping = 1,
880 .features[FEAT_1_EDX] =
881 PPRO_FEATURES | CPUID_VME |
882 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
883 .features[FEAT_1_ECX] =
884 CPUID_EXT_SSE3,
885 .features[FEAT_8000_0001_ECX] =
887 .xlevel = 0x80000008,
888 .model_id = "Common 32-bit KVM processor"
891 .name = "coreduo",
892 .level = 10,
893 .vendor = CPUID_VENDOR_INTEL,
894 .family = 6,
895 .model = 14,
896 .stepping = 8,
897 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
898 .features[FEAT_1_EDX] =
899 PPRO_FEATURES | CPUID_VME |
900 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
901 CPUID_SS,
902 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
903 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
904 .features[FEAT_1_ECX] =
905 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
906 .features[FEAT_8000_0001_EDX] =
907 CPUID_EXT2_NX,
908 .xlevel = 0x80000008,
909 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
912 .name = "486",
913 .level = 1,
914 .vendor = CPUID_VENDOR_INTEL,
915 .family = 4,
916 .model = 8,
917 .stepping = 0,
918 .features[FEAT_1_EDX] =
919 I486_FEATURES,
920 .xlevel = 0,
923 .name = "pentium",
924 .level = 1,
925 .vendor = CPUID_VENDOR_INTEL,
926 .family = 5,
927 .model = 4,
928 .stepping = 3,
929 .features[FEAT_1_EDX] =
930 PENTIUM_FEATURES,
931 .xlevel = 0,
934 .name = "pentium2",
935 .level = 2,
936 .vendor = CPUID_VENDOR_INTEL,
937 .family = 6,
938 .model = 5,
939 .stepping = 2,
940 .features[FEAT_1_EDX] =
941 PENTIUM2_FEATURES,
942 .xlevel = 0,
945 .name = "pentium3",
946 .level = 3,
947 .vendor = CPUID_VENDOR_INTEL,
948 .family = 6,
949 .model = 7,
950 .stepping = 3,
951 .features[FEAT_1_EDX] =
952 PENTIUM3_FEATURES,
953 .xlevel = 0,
956 .name = "athlon",
957 .level = 2,
958 .vendor = CPUID_VENDOR_AMD,
959 .family = 6,
960 .model = 2,
961 .stepping = 3,
962 .features[FEAT_1_EDX] =
963 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
964 CPUID_MCA,
965 .features[FEAT_8000_0001_EDX] =
966 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
967 .xlevel = 0x80000008,
968 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
971 .name = "n270",
972 .level = 10,
973 .vendor = CPUID_VENDOR_INTEL,
974 .family = 6,
975 .model = 28,
976 .stepping = 2,
977 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
978 .features[FEAT_1_EDX] =
979 PPRO_FEATURES |
980 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
981 CPUID_ACPI | CPUID_SS,
982 /* Some CPUs got no CPUID_SEP */
983 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
984 * CPUID_EXT_XTPR */
985 .features[FEAT_1_ECX] =
986 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
987 CPUID_EXT_MOVBE,
988 .features[FEAT_8000_0001_EDX] =
989 CPUID_EXT2_NX,
990 .features[FEAT_8000_0001_ECX] =
991 CPUID_EXT3_LAHF_LM,
992 .xlevel = 0x80000008,
993 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
996 .name = "Conroe",
997 .level = 10,
998 .vendor = CPUID_VENDOR_INTEL,
999 .family = 6,
1000 .model = 15,
1001 .stepping = 3,
1002 .features[FEAT_1_EDX] =
1003 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1004 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1005 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1006 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1007 CPUID_DE | CPUID_FP87,
1008 .features[FEAT_1_ECX] =
1009 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1010 .features[FEAT_8000_0001_EDX] =
1011 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1012 .features[FEAT_8000_0001_ECX] =
1013 CPUID_EXT3_LAHF_LM,
1014 .xlevel = 0x80000008,
1015 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1018 .name = "Penryn",
1019 .level = 10,
1020 .vendor = CPUID_VENDOR_INTEL,
1021 .family = 6,
1022 .model = 23,
1023 .stepping = 3,
1024 .features[FEAT_1_EDX] =
1025 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1026 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1027 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1028 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1029 CPUID_DE | CPUID_FP87,
1030 .features[FEAT_1_ECX] =
1031 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1032 CPUID_EXT_SSE3,
1033 .features[FEAT_8000_0001_EDX] =
1034 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1035 .features[FEAT_8000_0001_ECX] =
1036 CPUID_EXT3_LAHF_LM,
1037 .xlevel = 0x80000008,
1038 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1041 .name = "Nehalem",
1042 .level = 11,
1043 .vendor = CPUID_VENDOR_INTEL,
1044 .family = 6,
1045 .model = 26,
1046 .stepping = 3,
1047 .features[FEAT_1_EDX] =
1048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1052 CPUID_DE | CPUID_FP87,
1053 .features[FEAT_1_ECX] =
1054 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1055 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1056 .features[FEAT_8000_0001_EDX] =
1057 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1058 .features[FEAT_8000_0001_ECX] =
1059 CPUID_EXT3_LAHF_LM,
1060 .xlevel = 0x80000008,
1061 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1064 .name = "Westmere",
1065 .level = 11,
1066 .vendor = CPUID_VENDOR_INTEL,
1067 .family = 6,
1068 .model = 44,
1069 .stepping = 1,
1070 .features[FEAT_1_EDX] =
1071 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1072 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1073 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1074 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1075 CPUID_DE | CPUID_FP87,
1076 .features[FEAT_1_ECX] =
1077 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1078 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1079 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1080 .features[FEAT_8000_0001_EDX] =
1081 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1082 .features[FEAT_8000_0001_ECX] =
1083 CPUID_EXT3_LAHF_LM,
1084 .features[FEAT_6_EAX] =
1085 CPUID_6_EAX_ARAT,
1086 .xlevel = 0x80000008,
1087 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1090 .name = "SandyBridge",
1091 .level = 0xd,
1092 .vendor = CPUID_VENDOR_INTEL,
1093 .family = 6,
1094 .model = 42,
1095 .stepping = 1,
1096 .features[FEAT_1_EDX] =
1097 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1098 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1099 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1100 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1101 CPUID_DE | CPUID_FP87,
1102 .features[FEAT_1_ECX] =
1103 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1104 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1105 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1106 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1107 CPUID_EXT_SSE3,
1108 .features[FEAT_8000_0001_EDX] =
1109 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1110 CPUID_EXT2_SYSCALL,
1111 .features[FEAT_8000_0001_ECX] =
1112 CPUID_EXT3_LAHF_LM,
1113 .features[FEAT_XSAVE] =
1114 CPUID_XSAVE_XSAVEOPT,
1115 .features[FEAT_6_EAX] =
1116 CPUID_6_EAX_ARAT,
1117 .xlevel = 0x80000008,
1118 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1121 .name = "IvyBridge",
1122 .level = 0xd,
1123 .vendor = CPUID_VENDOR_INTEL,
1124 .family = 6,
1125 .model = 58,
1126 .stepping = 9,
1127 .features[FEAT_1_EDX] =
1128 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1129 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1130 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1131 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1132 CPUID_DE | CPUID_FP87,
1133 .features[FEAT_1_ECX] =
1134 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1135 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1136 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1137 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1138 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1139 .features[FEAT_7_0_EBX] =
1140 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1141 CPUID_7_0_EBX_ERMS,
1142 .features[FEAT_8000_0001_EDX] =
1143 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1144 CPUID_EXT2_SYSCALL,
1145 .features[FEAT_8000_0001_ECX] =
1146 CPUID_EXT3_LAHF_LM,
1147 .features[FEAT_XSAVE] =
1148 CPUID_XSAVE_XSAVEOPT,
1149 .features[FEAT_6_EAX] =
1150 CPUID_6_EAX_ARAT,
1151 .xlevel = 0x80000008,
1152 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1155 .name = "Haswell-noTSX",
1156 .level = 0xd,
1157 .vendor = CPUID_VENDOR_INTEL,
1158 .family = 6,
1159 .model = 60,
1160 .stepping = 1,
1161 .features[FEAT_1_EDX] =
1162 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1163 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1164 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1165 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1166 CPUID_DE | CPUID_FP87,
1167 .features[FEAT_1_ECX] =
1168 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1169 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1170 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1171 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1172 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1173 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1174 .features[FEAT_8000_0001_EDX] =
1175 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1176 CPUID_EXT2_SYSCALL,
1177 .features[FEAT_8000_0001_ECX] =
1178 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1179 .features[FEAT_7_0_EBX] =
1180 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1181 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1182 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1183 .features[FEAT_XSAVE] =
1184 CPUID_XSAVE_XSAVEOPT,
1185 .features[FEAT_6_EAX] =
1186 CPUID_6_EAX_ARAT,
1187 .xlevel = 0x80000008,
1188 .model_id = "Intel Core Processor (Haswell, no TSX)",
1189 }, {
1190 .name = "Haswell",
1191 .level = 0xd,
1192 .vendor = CPUID_VENDOR_INTEL,
1193 .family = 6,
1194 .model = 60,
1195 .stepping = 4,
1196 .features[FEAT_1_EDX] =
1197 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1198 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1199 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1200 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1201 CPUID_DE | CPUID_FP87,
1202 .features[FEAT_1_ECX] =
1203 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1204 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1205 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1206 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1207 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1208 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1209 .features[FEAT_8000_0001_EDX] =
1210 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1211 CPUID_EXT2_SYSCALL,
1212 .features[FEAT_8000_0001_ECX] =
1213 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1214 .features[FEAT_7_0_EBX] =
1215 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1216 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1217 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1218 CPUID_7_0_EBX_RTM,
1219 .features[FEAT_XSAVE] =
1220 CPUID_XSAVE_XSAVEOPT,
1221 .features[FEAT_6_EAX] =
1222 CPUID_6_EAX_ARAT,
1223 .xlevel = 0x80000008,
1224 .model_id = "Intel Core Processor (Haswell)",
1227 .name = "Broadwell-noTSX",
1228 .level = 0xd,
1229 .vendor = CPUID_VENDOR_INTEL,
1230 .family = 6,
1231 .model = 61,
1232 .stepping = 2,
1233 .features[FEAT_1_EDX] =
1234 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1235 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1236 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1237 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1238 CPUID_DE | CPUID_FP87,
1239 .features[FEAT_1_ECX] =
1240 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1241 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1242 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1243 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1244 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1245 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1246 .features[FEAT_8000_0001_EDX] =
1247 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1248 CPUID_EXT2_SYSCALL,
1249 .features[FEAT_8000_0001_ECX] =
1250 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1251 .features[FEAT_7_0_EBX] =
1252 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1253 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1254 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1255 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1256 CPUID_7_0_EBX_SMAP,
1257 .features[FEAT_XSAVE] =
1258 CPUID_XSAVE_XSAVEOPT,
1259 .features[FEAT_6_EAX] =
1260 CPUID_6_EAX_ARAT,
1261 .xlevel = 0x80000008,
1262 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1265 .name = "Broadwell",
1266 .level = 0xd,
1267 .vendor = CPUID_VENDOR_INTEL,
1268 .family = 6,
1269 .model = 61,
1270 .stepping = 2,
1271 .features[FEAT_1_EDX] =
1272 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1273 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1274 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1275 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1276 CPUID_DE | CPUID_FP87,
1277 .features[FEAT_1_ECX] =
1278 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1279 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1280 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1281 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1282 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1283 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1284 .features[FEAT_8000_0001_EDX] =
1285 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1286 CPUID_EXT2_SYSCALL,
1287 .features[FEAT_8000_0001_ECX] =
1288 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1289 .features[FEAT_7_0_EBX] =
1290 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1291 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1292 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1293 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1294 CPUID_7_0_EBX_SMAP,
1295 .features[FEAT_XSAVE] =
1296 CPUID_XSAVE_XSAVEOPT,
1297 .features[FEAT_6_EAX] =
1298 CPUID_6_EAX_ARAT,
1299 .xlevel = 0x80000008,
1300 .model_id = "Intel Core Processor (Broadwell)",
1303 .name = "Skylake-Client",
1304 .level = 0xd,
1305 .vendor = CPUID_VENDOR_INTEL,
1306 .family = 6,
1307 .model = 94,
1308 .stepping = 3,
1309 .features[FEAT_1_EDX] =
1310 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1311 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1312 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1313 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1314 CPUID_DE | CPUID_FP87,
1315 .features[FEAT_1_ECX] =
1316 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1317 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1318 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1319 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1320 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1321 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1322 .features[FEAT_8000_0001_EDX] =
1323 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1324 CPUID_EXT2_SYSCALL,
1325 .features[FEAT_8000_0001_ECX] =
1326 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1327 .features[FEAT_7_0_EBX] =
1328 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1329 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1330 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1331 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1332 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1333 /* Missing: XSAVES (not supported by some Linux versions,
1334 * including v4.1 to v4.12).
1335 * KVM doesn't yet expose any XSAVES state save component,
1336 * and the only one defined in Skylake (processor tracing)
1337 * probably will block migration anyway.
1339 .features[FEAT_XSAVE] =
1340 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1341 CPUID_XSAVE_XGETBV1,
1342 .features[FEAT_6_EAX] =
1343 CPUID_6_EAX_ARAT,
1344 .xlevel = 0x80000008,
1345 .model_id = "Intel Core Processor (Skylake)",
1348 .name = "Opteron_G1",
1349 .level = 5,
1350 .vendor = CPUID_VENDOR_AMD,
1351 .family = 15,
1352 .model = 6,
1353 .stepping = 1,
1354 .features[FEAT_1_EDX] =
1355 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1356 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1357 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1358 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1359 CPUID_DE | CPUID_FP87,
1360 .features[FEAT_1_ECX] =
1361 CPUID_EXT_SSE3,
1362 .features[FEAT_8000_0001_EDX] =
1363 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1364 .xlevel = 0x80000008,
1365 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1368 .name = "Opteron_G2",
1369 .level = 5,
1370 .vendor = CPUID_VENDOR_AMD,
1371 .family = 15,
1372 .model = 6,
1373 .stepping = 1,
1374 .features[FEAT_1_EDX] =
1375 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1376 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1377 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1378 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1379 CPUID_DE | CPUID_FP87,
1380 .features[FEAT_1_ECX] =
1381 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1382 /* Missing: CPUID_EXT2_RDTSCP */
1383 .features[FEAT_8000_0001_EDX] =
1384 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1385 .features[FEAT_8000_0001_ECX] =
1386 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1387 .xlevel = 0x80000008,
1388 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1391 .name = "Opteron_G3",
1392 .level = 5,
1393 .vendor = CPUID_VENDOR_AMD,
1394 .family = 16,
1395 .model = 2,
1396 .stepping = 3,
1397 .features[FEAT_1_EDX] =
1398 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1399 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1400 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1401 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1402 CPUID_DE | CPUID_FP87,
1403 .features[FEAT_1_ECX] =
1404 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1405 CPUID_EXT_SSE3,
1406 /* Missing: CPUID_EXT2_RDTSCP */
1407 .features[FEAT_8000_0001_EDX] =
1408 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1409 .features[FEAT_8000_0001_ECX] =
1410 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1411 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1412 .xlevel = 0x80000008,
1413 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1416 .name = "Opteron_G4",
1417 .level = 0xd,
1418 .vendor = CPUID_VENDOR_AMD,
1419 .family = 21,
1420 .model = 1,
1421 .stepping = 2,
1422 .features[FEAT_1_EDX] =
1423 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1424 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1425 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1426 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1427 CPUID_DE | CPUID_FP87,
1428 .features[FEAT_1_ECX] =
1429 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1430 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1431 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1432 CPUID_EXT_SSE3,
1433 /* Missing: CPUID_EXT2_RDTSCP */
1434 .features[FEAT_8000_0001_EDX] =
1435 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1436 CPUID_EXT2_SYSCALL,
1437 .features[FEAT_8000_0001_ECX] =
1438 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1439 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1440 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1441 CPUID_EXT3_LAHF_LM,
1442 /* no xsaveopt! */
1443 .xlevel = 0x8000001A,
1444 .model_id = "AMD Opteron 62xx class CPU",
1447 .name = "Opteron_G5",
1448 .level = 0xd,
1449 .vendor = CPUID_VENDOR_AMD,
1450 .family = 21,
1451 .model = 2,
1452 .stepping = 0,
1453 .features[FEAT_1_EDX] =
1454 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1455 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1456 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1457 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1458 CPUID_DE | CPUID_FP87,
1459 .features[FEAT_1_ECX] =
1460 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1461 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1462 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1463 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1464 /* Missing: CPUID_EXT2_RDTSCP */
1465 .features[FEAT_8000_0001_EDX] =
1466 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1467 CPUID_EXT2_SYSCALL,
1468 .features[FEAT_8000_0001_ECX] =
1469 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1470 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1471 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1472 CPUID_EXT3_LAHF_LM,
1473 /* no xsaveopt! */
1474 .xlevel = 0x8000001A,
1475 .model_id = "AMD Opteron 63xx class CPU",
1479 typedef struct PropValue {
1480 const char *prop, *value;
1481 } PropValue;
1483 /* KVM-specific features that are automatically added/removed
1484 * from all CPU models when KVM is enabled.
1486 static PropValue kvm_default_props[] = {
1487 { "kvmclock", "on" },
1488 { "kvm-nopiodelay", "on" },
1489 { "kvm-asyncpf", "on" },
1490 { "kvm-steal-time", "on" },
1491 { "kvm-pv-eoi", "on" },
1492 { "kvmclock-stable-bit", "on" },
1493 { "x2apic", "on" },
1494 { "acpi", "off" },
1495 { "monitor", "off" },
1496 { "svm", "off" },
1497 { NULL, NULL },
1500 /* TCG-specific defaults that override all CPU models when using TCG
1502 static PropValue tcg_default_props[] = {
1503 { "vme", "off" },
1504 { NULL, NULL },
1508 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1510 PropValue *pv;
1511 for (pv = kvm_default_props; pv->prop; pv++) {
1512 if (!strcmp(pv->prop, prop)) {
1513 pv->value = value;
1514 break;
1518 /* It is valid to call this function only for properties that
1519 * are already present in the kvm_default_props table.
1521 assert(pv->prop);
1524 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1525 bool migratable_only);
1527 static bool lmce_supported(void)
1529 uint64_t mce_cap = 0;
1531 #ifdef CONFIG_KVM
1532 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1533 return false;
1535 #endif
1537 return !!(mce_cap & MCG_LMCE_P);
1540 static int cpu_x86_fill_model_id(char *str)
1542 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1543 int i;
1545 for (i = 0; i < 3; i++) {
1546 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1547 memcpy(str + i * 16 + 0, &eax, 4);
1548 memcpy(str + i * 16 + 4, &ebx, 4);
1549 memcpy(str + i * 16 + 8, &ecx, 4);
1550 memcpy(str + i * 16 + 12, &edx, 4);
1552 return 0;
1555 static Property max_x86_cpu_properties[] = {
1556 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1557 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1558 DEFINE_PROP_END_OF_LIST()
1561 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
1563 DeviceClass *dc = DEVICE_CLASS(oc);
1564 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1566 xcc->ordering = 9;
1568 xcc->model_description =
1569 "Enables all features supported by the accelerator in the current host";
1571 dc->props = max_x86_cpu_properties;
1574 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
1576 static void max_x86_cpu_initfn(Object *obj)
1578 X86CPU *cpu = X86_CPU(obj);
1579 CPUX86State *env = &cpu->env;
1580 KVMState *s = kvm_state;
1582 /* We can't fill the features array here because we don't know yet if
1583 * "migratable" is true or false.
1585 cpu->max_features = true;
1587 if (kvm_enabled()) {
1588 X86CPUDefinition host_cpudef = { };
1589 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1591 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1592 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1594 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1595 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1596 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1597 host_cpudef.stepping = eax & 0x0F;
1599 cpu_x86_fill_model_id(host_cpudef.model_id);
1601 x86_cpu_load_def(cpu, &host_cpudef, &error_abort);
1603 env->cpuid_min_level =
1604 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1605 env->cpuid_min_xlevel =
1606 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1607 env->cpuid_min_xlevel2 =
1608 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1610 if (lmce_supported()) {
1611 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1613 } else {
1614 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
1615 "vendor", &error_abort);
1616 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
1617 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
1618 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
1619 object_property_set_str(OBJECT(cpu),
1620 "QEMU TCG CPU version " QEMU_HW_VERSION,
1621 "model-id", &error_abort);
1624 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1627 static const TypeInfo max_x86_cpu_type_info = {
1628 .name = X86_CPU_TYPE_NAME("max"),
1629 .parent = TYPE_X86_CPU,
1630 .instance_init = max_x86_cpu_initfn,
1631 .class_init = max_x86_cpu_class_init,
1634 #ifdef CONFIG_KVM
1636 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1638 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1640 xcc->kvm_required = true;
1641 xcc->ordering = 8;
1643 xcc->model_description =
1644 "KVM processor with all supported host features "
1645 "(only available in KVM mode)";
1648 static const TypeInfo host_x86_cpu_type_info = {
1649 .name = X86_CPU_TYPE_NAME("host"),
1650 .parent = X86_CPU_TYPE_NAME("max"),
1651 .class_init = host_x86_cpu_class_init,
1654 #endif
1656 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1658 FeatureWordInfo *f = &feature_word_info[w];
1659 int i;
1661 for (i = 0; i < 32; ++i) {
1662 if ((1UL << i) & mask) {
1663 const char *reg = get_register_name_32(f->cpuid_reg);
1664 assert(reg);
1665 fprintf(stderr, "warning: %s doesn't support requested feature: "
1666 "CPUID.%02XH:%s%s%s [bit %d]\n",
1667 kvm_enabled() ? "host" : "TCG",
1668 f->cpuid_eax, reg,
1669 f->feat_names[i] ? "." : "",
1670 f->feat_names[i] ? f->feat_names[i] : "", i);
1675 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1676 const char *name, void *opaque,
1677 Error **errp)
1679 X86CPU *cpu = X86_CPU(obj);
1680 CPUX86State *env = &cpu->env;
1681 int64_t value;
1683 value = (env->cpuid_version >> 8) & 0xf;
1684 if (value == 0xf) {
1685 value += (env->cpuid_version >> 20) & 0xff;
1687 visit_type_int(v, name, &value, errp);
1690 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1691 const char *name, void *opaque,
1692 Error **errp)
1694 X86CPU *cpu = X86_CPU(obj);
1695 CPUX86State *env = &cpu->env;
1696 const int64_t min = 0;
1697 const int64_t max = 0xff + 0xf;
1698 Error *local_err = NULL;
1699 int64_t value;
1701 visit_type_int(v, name, &value, &local_err);
1702 if (local_err) {
1703 error_propagate(errp, local_err);
1704 return;
1706 if (value < min || value > max) {
1707 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1708 name ? name : "null", value, min, max);
1709 return;
1712 env->cpuid_version &= ~0xff00f00;
1713 if (value > 0x0f) {
1714 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1715 } else {
1716 env->cpuid_version |= value << 8;
1720 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1721 const char *name, void *opaque,
1722 Error **errp)
1724 X86CPU *cpu = X86_CPU(obj);
1725 CPUX86State *env = &cpu->env;
1726 int64_t value;
1728 value = (env->cpuid_version >> 4) & 0xf;
1729 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1730 visit_type_int(v, name, &value, errp);
1733 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1734 const char *name, void *opaque,
1735 Error **errp)
1737 X86CPU *cpu = X86_CPU(obj);
1738 CPUX86State *env = &cpu->env;
1739 const int64_t min = 0;
1740 const int64_t max = 0xff;
1741 Error *local_err = NULL;
1742 int64_t value;
1744 visit_type_int(v, name, &value, &local_err);
1745 if (local_err) {
1746 error_propagate(errp, local_err);
1747 return;
1749 if (value < min || value > max) {
1750 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1751 name ? name : "null", value, min, max);
1752 return;
1755 env->cpuid_version &= ~0xf00f0;
1756 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1759 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1760 const char *name, void *opaque,
1761 Error **errp)
1763 X86CPU *cpu = X86_CPU(obj);
1764 CPUX86State *env = &cpu->env;
1765 int64_t value;
1767 value = env->cpuid_version & 0xf;
1768 visit_type_int(v, name, &value, errp);
1771 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1772 const char *name, void *opaque,
1773 Error **errp)
1775 X86CPU *cpu = X86_CPU(obj);
1776 CPUX86State *env = &cpu->env;
1777 const int64_t min = 0;
1778 const int64_t max = 0xf;
1779 Error *local_err = NULL;
1780 int64_t value;
1782 visit_type_int(v, name, &value, &local_err);
1783 if (local_err) {
1784 error_propagate(errp, local_err);
1785 return;
1787 if (value < min || value > max) {
1788 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1789 name ? name : "null", value, min, max);
1790 return;
1793 env->cpuid_version &= ~0xf;
1794 env->cpuid_version |= value & 0xf;
1797 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1799 X86CPU *cpu = X86_CPU(obj);
1800 CPUX86State *env = &cpu->env;
1801 char *value;
1803 value = g_malloc(CPUID_VENDOR_SZ + 1);
1804 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1805 env->cpuid_vendor3);
1806 return value;
1809 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1810 Error **errp)
1812 X86CPU *cpu = X86_CPU(obj);
1813 CPUX86State *env = &cpu->env;
1814 int i;
1816 if (strlen(value) != CPUID_VENDOR_SZ) {
1817 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1818 return;
1821 env->cpuid_vendor1 = 0;
1822 env->cpuid_vendor2 = 0;
1823 env->cpuid_vendor3 = 0;
1824 for (i = 0; i < 4; i++) {
1825 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1826 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1827 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1831 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1833 X86CPU *cpu = X86_CPU(obj);
1834 CPUX86State *env = &cpu->env;
1835 char *value;
1836 int i;
1838 value = g_malloc(48 + 1);
1839 for (i = 0; i < 48; i++) {
1840 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1842 value[48] = '\0';
1843 return value;
1846 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1847 Error **errp)
1849 X86CPU *cpu = X86_CPU(obj);
1850 CPUX86State *env = &cpu->env;
1851 int c, len, i;
1853 if (model_id == NULL) {
1854 model_id = "";
1856 len = strlen(model_id);
1857 memset(env->cpuid_model, 0, 48);
1858 for (i = 0; i < 48; i++) {
1859 if (i >= len) {
1860 c = '\0';
1861 } else {
1862 c = (uint8_t)model_id[i];
1864 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1868 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1869 void *opaque, Error **errp)
1871 X86CPU *cpu = X86_CPU(obj);
1872 int64_t value;
1874 value = cpu->env.tsc_khz * 1000;
1875 visit_type_int(v, name, &value, errp);
1878 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1879 void *opaque, Error **errp)
1881 X86CPU *cpu = X86_CPU(obj);
1882 const int64_t min = 0;
1883 const int64_t max = INT64_MAX;
1884 Error *local_err = NULL;
1885 int64_t value;
1887 visit_type_int(v, name, &value, &local_err);
1888 if (local_err) {
1889 error_propagate(errp, local_err);
1890 return;
1892 if (value < min || value > max) {
1893 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1894 name ? name : "null", value, min, max);
1895 return;
1898 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1901 /* Generic getter for "feature-words" and "filtered-features" properties */
1902 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1903 const char *name, void *opaque,
1904 Error **errp)
1906 uint32_t *array = (uint32_t *)opaque;
1907 FeatureWord w;
1908 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1909 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1910 X86CPUFeatureWordInfoList *list = NULL;
1912 for (w = 0; w < FEATURE_WORDS; w++) {
1913 FeatureWordInfo *wi = &feature_word_info[w];
1914 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1915 qwi->cpuid_input_eax = wi->cpuid_eax;
1916 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1917 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1918 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1919 qwi->features = array[w];
1921 /* List will be in reverse order, but order shouldn't matter */
1922 list_entries[w].next = list;
1923 list_entries[w].value = &word_infos[w];
1924 list = &list_entries[w];
1927 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1930 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1931 void *opaque, Error **errp)
1933 X86CPU *cpu = X86_CPU(obj);
1934 int64_t value = cpu->hyperv_spinlock_attempts;
1936 visit_type_int(v, name, &value, errp);
1939 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1940 void *opaque, Error **errp)
1942 const int64_t min = 0xFFF;
1943 const int64_t max = UINT_MAX;
1944 X86CPU *cpu = X86_CPU(obj);
1945 Error *err = NULL;
1946 int64_t value;
1948 visit_type_int(v, name, &value, &err);
1949 if (err) {
1950 error_propagate(errp, err);
1951 return;
1954 if (value < min || value > max) {
1955 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1956 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1957 object_get_typename(obj), name ? name : "null",
1958 value, min, max);
1959 return;
1961 cpu->hyperv_spinlock_attempts = value;
1964 static const PropertyInfo qdev_prop_spinlocks = {
1965 .name = "int",
1966 .get = x86_get_hv_spinlocks,
1967 .set = x86_set_hv_spinlocks,
1970 /* Convert all '_' in a feature string option name to '-', to make feature
1971 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1973 static inline void feat2prop(char *s)
1975 while ((s = strchr(s, '_'))) {
1976 *s = '-';
1980 /* Return the feature property name for a feature flag bit */
1981 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
1983 /* XSAVE components are automatically enabled by other features,
1984 * so return the original feature name instead
1986 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
1987 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
1989 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
1990 x86_ext_save_areas[comp].bits) {
1991 w = x86_ext_save_areas[comp].feature;
1992 bitnr = ctz32(x86_ext_save_areas[comp].bits);
1996 assert(bitnr < 32);
1997 assert(w < FEATURE_WORDS);
1998 return feature_word_info[w].feat_names[bitnr];
2001 /* Compatibily hack to maintain legacy +-feat semantic,
2002 * where +-feat overwrites any feature set by
2003 * feat=on|feat even if the later is parsed after +-feat
2004 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2006 static GList *plus_features, *minus_features;
2008 static gint compare_string(gconstpointer a, gconstpointer b)
2010 return g_strcmp0(a, b);
2013 /* Parse "+feature,-feature,feature=foo" CPU feature string
2015 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2016 Error **errp)
2018 char *featurestr; /* Single 'key=value" string being parsed */
2019 static bool cpu_globals_initialized;
2020 bool ambiguous = false;
2022 if (cpu_globals_initialized) {
2023 return;
2025 cpu_globals_initialized = true;
2027 if (!features) {
2028 return;
2031 for (featurestr = strtok(features, ",");
2032 featurestr;
2033 featurestr = strtok(NULL, ",")) {
2034 const char *name;
2035 const char *val = NULL;
2036 char *eq = NULL;
2037 char num[32];
2038 GlobalProperty *prop;
2040 /* Compatibility syntax: */
2041 if (featurestr[0] == '+') {
2042 plus_features = g_list_append(plus_features,
2043 g_strdup(featurestr + 1));
2044 continue;
2045 } else if (featurestr[0] == '-') {
2046 minus_features = g_list_append(minus_features,
2047 g_strdup(featurestr + 1));
2048 continue;
2051 eq = strchr(featurestr, '=');
2052 if (eq) {
2053 *eq++ = 0;
2054 val = eq;
2055 } else {
2056 val = "on";
2059 feat2prop(featurestr);
2060 name = featurestr;
2062 if (g_list_find_custom(plus_features, name, compare_string)) {
2063 warn_report("Ambiguous CPU model string. "
2064 "Don't mix both \"+%s\" and \"%s=%s\"",
2065 name, name, val);
2066 ambiguous = true;
2068 if (g_list_find_custom(minus_features, name, compare_string)) {
2069 warn_report("Ambiguous CPU model string. "
2070 "Don't mix both \"-%s\" and \"%s=%s\"",
2071 name, name, val);
2072 ambiguous = true;
2075 /* Special case: */
2076 if (!strcmp(name, "tsc-freq")) {
2077 int ret;
2078 uint64_t tsc_freq;
2080 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2081 if (ret < 0 || tsc_freq > INT64_MAX) {
2082 error_setg(errp, "bad numerical value %s", val);
2083 return;
2085 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2086 val = num;
2087 name = "tsc-frequency";
2090 prop = g_new0(typeof(*prop), 1);
2091 prop->driver = typename;
2092 prop->property = g_strdup(name);
2093 prop->value = g_strdup(val);
2094 prop->errp = &error_fatal;
2095 qdev_prop_register_global(prop);
2098 if (ambiguous) {
2099 warn_report("Compatibility of ambiguous CPU model "
2100 "strings won't be kept on future QEMU versions");
2104 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2105 static int x86_cpu_filter_features(X86CPU *cpu);
2107 /* Check for missing features that may prevent the CPU class from
2108 * running using the current machine and accelerator.
2110 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2111 strList **missing_feats)
2113 X86CPU *xc;
2114 FeatureWord w;
2115 Error *err = NULL;
2116 strList **next = missing_feats;
2118 if (xcc->kvm_required && !kvm_enabled()) {
2119 strList *new = g_new0(strList, 1);
2120 new->value = g_strdup("kvm");;
2121 *missing_feats = new;
2122 return;
2125 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2127 x86_cpu_expand_features(xc, &err);
2128 if (err) {
2129 /* Errors at x86_cpu_expand_features should never happen,
2130 * but in case it does, just report the model as not
2131 * runnable at all using the "type" property.
2133 strList *new = g_new0(strList, 1);
2134 new->value = g_strdup("type");
2135 *next = new;
2136 next = &new->next;
2139 x86_cpu_filter_features(xc);
2141 for (w = 0; w < FEATURE_WORDS; w++) {
2142 uint32_t filtered = xc->filtered_features[w];
2143 int i;
2144 for (i = 0; i < 32; i++) {
2145 if (filtered & (1UL << i)) {
2146 strList *new = g_new0(strList, 1);
2147 new->value = g_strdup(x86_cpu_feature_name(w, i));
2148 *next = new;
2149 next = &new->next;
2154 object_unref(OBJECT(xc));
2157 /* Print all cpuid feature names in featureset
2159 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2161 int bit;
2162 bool first = true;
2164 for (bit = 0; bit < 32; bit++) {
2165 if (featureset[bit]) {
2166 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2167 first = false;
2172 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2173 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2175 ObjectClass *class_a = (ObjectClass *)a;
2176 ObjectClass *class_b = (ObjectClass *)b;
2177 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2178 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2179 const char *name_a, *name_b;
2181 if (cc_a->ordering != cc_b->ordering) {
2182 return cc_a->ordering - cc_b->ordering;
2183 } else {
2184 name_a = object_class_get_name(class_a);
2185 name_b = object_class_get_name(class_b);
2186 return strcmp(name_a, name_b);
2190 static GSList *get_sorted_cpu_model_list(void)
2192 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2193 list = g_slist_sort(list, x86_cpu_list_compare);
2194 return list;
2197 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2199 ObjectClass *oc = data;
2200 X86CPUClass *cc = X86_CPU_CLASS(oc);
2201 CPUListState *s = user_data;
2202 char *name = x86_cpu_class_get_model_name(cc);
2203 const char *desc = cc->model_description;
2204 if (!desc && cc->cpu_def) {
2205 desc = cc->cpu_def->model_id;
2208 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2209 name, desc);
2210 g_free(name);
2213 /* list available CPU models and flags */
2214 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2216 int i;
2217 CPUListState s = {
2218 .file = f,
2219 .cpu_fprintf = cpu_fprintf,
2221 GSList *list;
2223 (*cpu_fprintf)(f, "Available CPUs:\n");
2224 list = get_sorted_cpu_model_list();
2225 g_slist_foreach(list, x86_cpu_list_entry, &s);
2226 g_slist_free(list);
2228 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2229 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2230 FeatureWordInfo *fw = &feature_word_info[i];
2232 (*cpu_fprintf)(f, " ");
2233 listflags(f, cpu_fprintf, fw->feat_names);
2234 (*cpu_fprintf)(f, "\n");
2238 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2240 ObjectClass *oc = data;
2241 X86CPUClass *cc = X86_CPU_CLASS(oc);
2242 CpuDefinitionInfoList **cpu_list = user_data;
2243 CpuDefinitionInfoList *entry;
2244 CpuDefinitionInfo *info;
2246 info = g_malloc0(sizeof(*info));
2247 info->name = x86_cpu_class_get_model_name(cc);
2248 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2249 info->has_unavailable_features = true;
2250 info->q_typename = g_strdup(object_class_get_name(oc));
2251 info->migration_safe = cc->migration_safe;
2252 info->has_migration_safe = true;
2253 info->q_static = cc->static_model;
2255 entry = g_malloc0(sizeof(*entry));
2256 entry->value = info;
2257 entry->next = *cpu_list;
2258 *cpu_list = entry;
2261 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2263 CpuDefinitionInfoList *cpu_list = NULL;
2264 GSList *list = get_sorted_cpu_model_list();
2265 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2266 g_slist_free(list);
2267 return cpu_list;
2270 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2271 bool migratable_only)
2273 FeatureWordInfo *wi = &feature_word_info[w];
2274 uint32_t r;
2276 if (kvm_enabled()) {
2277 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2278 wi->cpuid_ecx,
2279 wi->cpuid_reg);
2280 } else if (tcg_enabled()) {
2281 r = wi->tcg_features;
2282 } else {
2283 return ~0;
2285 if (migratable_only) {
2286 r &= x86_cpu_get_migratable_flags(w);
2288 return r;
2291 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2293 FeatureWord w;
2295 for (w = 0; w < FEATURE_WORDS; w++) {
2296 report_unavailable_features(w, cpu->filtered_features[w]);
2300 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2302 PropValue *pv;
2303 for (pv = props; pv->prop; pv++) {
2304 if (!pv->value) {
2305 continue;
2307 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2308 &error_abort);
2312 /* Load data from X86CPUDefinition into a X86CPU object
2314 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2316 CPUX86State *env = &cpu->env;
2317 const char *vendor;
2318 char host_vendor[CPUID_VENDOR_SZ + 1];
2319 FeatureWord w;
2321 /*NOTE: any property set by this function should be returned by
2322 * x86_cpu_static_props(), so static expansion of
2323 * query-cpu-model-expansion is always complete.
2326 /* CPU models only set _minimum_ values for level/xlevel: */
2327 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2328 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2330 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2331 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2332 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2333 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2334 for (w = 0; w < FEATURE_WORDS; w++) {
2335 env->features[w] = def->features[w];
2338 /* Special cases not set in the X86CPUDefinition structs: */
2339 if (kvm_enabled()) {
2340 if (!kvm_irqchip_in_kernel()) {
2341 x86_cpu_change_kvm_default("x2apic", "off");
2344 x86_cpu_apply_props(cpu, kvm_default_props);
2345 } else if (tcg_enabled()) {
2346 x86_cpu_apply_props(cpu, tcg_default_props);
2349 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2351 /* sysenter isn't supported in compatibility mode on AMD,
2352 * syscall isn't supported in compatibility mode on Intel.
2353 * Normally we advertise the actual CPU vendor, but you can
2354 * override this using the 'vendor' property if you want to use
2355 * KVM's sysenter/syscall emulation in compatibility mode and
2356 * when doing cross vendor migration
2358 vendor = def->vendor;
2359 if (kvm_enabled()) {
2360 uint32_t ebx = 0, ecx = 0, edx = 0;
2361 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2362 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2363 vendor = host_vendor;
2366 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2370 /* Return a QDict containing keys for all properties that can be included
2371 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2372 * must be included in the dictionary.
2374 static QDict *x86_cpu_static_props(void)
2376 FeatureWord w;
2377 int i;
2378 static const char *props[] = {
2379 "min-level",
2380 "min-xlevel",
2381 "family",
2382 "model",
2383 "stepping",
2384 "model-id",
2385 "vendor",
2386 "lmce",
2387 NULL,
2389 static QDict *d;
2391 if (d) {
2392 return d;
2395 d = qdict_new();
2396 for (i = 0; props[i]; i++) {
2397 qdict_put_obj(d, props[i], qnull());
2400 for (w = 0; w < FEATURE_WORDS; w++) {
2401 FeatureWordInfo *fi = &feature_word_info[w];
2402 int bit;
2403 for (bit = 0; bit < 32; bit++) {
2404 if (!fi->feat_names[bit]) {
2405 continue;
2407 qdict_put_obj(d, fi->feat_names[bit], qnull());
2411 return d;
2414 /* Add an entry to @props dict, with the value for property. */
2415 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2417 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2418 &error_abort);
2420 qdict_put_obj(props, prop, value);
2423 /* Convert CPU model data from X86CPU object to a property dictionary
2424 * that can recreate exactly the same CPU model.
2426 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
2428 QDict *sprops = x86_cpu_static_props();
2429 const QDictEntry *e;
2431 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
2432 const char *prop = qdict_entry_key(e);
2433 x86_cpu_expand_prop(cpu, props, prop);
2437 /* Convert CPU model data from X86CPU object to a property dictionary
2438 * that can recreate exactly the same CPU model, including every
2439 * writeable QOM property.
2441 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
2443 ObjectPropertyIterator iter;
2444 ObjectProperty *prop;
2446 object_property_iter_init(&iter, OBJECT(cpu));
2447 while ((prop = object_property_iter_next(&iter))) {
2448 /* skip read-only or write-only properties */
2449 if (!prop->get || !prop->set) {
2450 continue;
2453 /* "hotplugged" is the only property that is configurable
2454 * on the command-line but will be set differently on CPUs
2455 * created using "-cpu ... -smp ..." and by CPUs created
2456 * on the fly by x86_cpu_from_model() for querying. Skip it.
2458 if (!strcmp(prop->name, "hotplugged")) {
2459 continue;
2461 x86_cpu_expand_prop(cpu, props, prop->name);
2465 static void object_apply_props(Object *obj, QDict *props, Error **errp)
2467 const QDictEntry *prop;
2468 Error *err = NULL;
2470 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
2471 object_property_set_qobject(obj, qdict_entry_value(prop),
2472 qdict_entry_key(prop), &err);
2473 if (err) {
2474 break;
2478 error_propagate(errp, err);
2481 /* Create X86CPU object according to model+props specification */
2482 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
2484 X86CPU *xc = NULL;
2485 X86CPUClass *xcc;
2486 Error *err = NULL;
2488 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
2489 if (xcc == NULL) {
2490 error_setg(&err, "CPU model '%s' not found", model);
2491 goto out;
2494 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2495 if (props) {
2496 object_apply_props(OBJECT(xc), props, &err);
2497 if (err) {
2498 goto out;
2502 x86_cpu_expand_features(xc, &err);
2503 if (err) {
2504 goto out;
2507 out:
2508 if (err) {
2509 error_propagate(errp, err);
2510 object_unref(OBJECT(xc));
2511 xc = NULL;
2513 return xc;
2516 CpuModelExpansionInfo *
2517 arch_query_cpu_model_expansion(CpuModelExpansionType type,
2518 CpuModelInfo *model,
2519 Error **errp)
2521 X86CPU *xc = NULL;
2522 Error *err = NULL;
2523 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
2524 QDict *props = NULL;
2525 const char *base_name;
2527 xc = x86_cpu_from_model(model->name,
2528 model->has_props ?
2529 qobject_to_qdict(model->props) :
2530 NULL, &err);
2531 if (err) {
2532 goto out;
2535 props = qdict_new();
2537 switch (type) {
2538 case CPU_MODEL_EXPANSION_TYPE_STATIC:
2539 /* Static expansion will be based on "base" only */
2540 base_name = "base";
2541 x86_cpu_to_dict(xc, props);
2542 break;
2543 case CPU_MODEL_EXPANSION_TYPE_FULL:
2544 /* As we don't return every single property, full expansion needs
2545 * to keep the original model name+props, and add extra
2546 * properties on top of that.
2548 base_name = model->name;
2549 x86_cpu_to_dict_full(xc, props);
2550 break;
2551 default:
2552 error_setg(&err, "Unsupportted expansion type");
2553 goto out;
2556 if (!props) {
2557 props = qdict_new();
2559 x86_cpu_to_dict(xc, props);
2561 ret->model = g_new0(CpuModelInfo, 1);
2562 ret->model->name = g_strdup(base_name);
2563 ret->model->props = QOBJECT(props);
2564 ret->model->has_props = true;
2566 out:
2567 object_unref(OBJECT(xc));
2568 if (err) {
2569 error_propagate(errp, err);
2570 qapi_free_CpuModelExpansionInfo(ret);
2571 ret = NULL;
2573 return ret;
2576 static gchar *x86_gdb_arch_name(CPUState *cs)
2578 #ifdef TARGET_X86_64
2579 return g_strdup("i386:x86-64");
2580 #else
2581 return g_strdup("i386");
2582 #endif
2585 X86CPU *cpu_x86_init(const char *cpu_model)
2587 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2590 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2592 X86CPUDefinition *cpudef = data;
2593 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2595 xcc->cpu_def = cpudef;
2596 xcc->migration_safe = true;
2599 static void x86_register_cpudef_type(X86CPUDefinition *def)
2601 char *typename = x86_cpu_type_name(def->name);
2602 TypeInfo ti = {
2603 .name = typename,
2604 .parent = TYPE_X86_CPU,
2605 .class_init = x86_cpu_cpudef_class_init,
2606 .class_data = def,
2609 /* AMD aliases are handled at runtime based on CPUID vendor, so
2610 * they shouldn't be set on the CPU model table.
2612 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
2614 type_register(&ti);
2615 g_free(typename);
2618 #if !defined(CONFIG_USER_ONLY)
2620 void cpu_clear_apic_feature(CPUX86State *env)
2622 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2625 #endif /* !CONFIG_USER_ONLY */
2627 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2628 uint32_t *eax, uint32_t *ebx,
2629 uint32_t *ecx, uint32_t *edx)
2631 X86CPU *cpu = x86_env_get_cpu(env);
2632 CPUState *cs = CPU(cpu);
2633 uint32_t pkg_offset;
2634 uint32_t limit;
2635 uint32_t signature[3];
2637 /* Calculate & apply limits for different index ranges */
2638 if (index >= 0xC0000000) {
2639 limit = env->cpuid_xlevel2;
2640 } else if (index >= 0x80000000) {
2641 limit = env->cpuid_xlevel;
2642 } else if (index >= 0x40000000) {
2643 limit = 0x40000001;
2644 } else {
2645 limit = env->cpuid_level;
2648 if (index > limit) {
2649 /* Intel documentation states that invalid EAX input will
2650 * return the same information as EAX=cpuid_level
2651 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2653 index = env->cpuid_level;
2656 switch(index) {
2657 case 0:
2658 *eax = env->cpuid_level;
2659 *ebx = env->cpuid_vendor1;
2660 *edx = env->cpuid_vendor2;
2661 *ecx = env->cpuid_vendor3;
2662 break;
2663 case 1:
2664 *eax = env->cpuid_version;
2665 *ebx = (cpu->apic_id << 24) |
2666 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2667 *ecx = env->features[FEAT_1_ECX];
2668 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2669 *ecx |= CPUID_EXT_OSXSAVE;
2671 *edx = env->features[FEAT_1_EDX];
2672 if (cs->nr_cores * cs->nr_threads > 1) {
2673 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2674 *edx |= CPUID_HT;
2676 break;
2677 case 2:
2678 /* cache info: needed for Pentium Pro compatibility */
2679 if (cpu->cache_info_passthrough) {
2680 host_cpuid(index, 0, eax, ebx, ecx, edx);
2681 break;
2683 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2684 *ebx = 0;
2685 if (!cpu->enable_l3_cache) {
2686 *ecx = 0;
2687 } else {
2688 *ecx = L3_N_DESCRIPTOR;
2690 *edx = (L1D_DESCRIPTOR << 16) | \
2691 (L1I_DESCRIPTOR << 8) | \
2692 (L2_DESCRIPTOR);
2693 break;
2694 case 4:
2695 /* cache info: needed for Core compatibility */
2696 if (cpu->cache_info_passthrough) {
2697 host_cpuid(index, count, eax, ebx, ecx, edx);
2698 *eax &= ~0xFC000000;
2699 } else {
2700 *eax = 0;
2701 switch (count) {
2702 case 0: /* L1 dcache info */
2703 *eax |= CPUID_4_TYPE_DCACHE | \
2704 CPUID_4_LEVEL(1) | \
2705 CPUID_4_SELF_INIT_LEVEL;
2706 *ebx = (L1D_LINE_SIZE - 1) | \
2707 ((L1D_PARTITIONS - 1) << 12) | \
2708 ((L1D_ASSOCIATIVITY - 1) << 22);
2709 *ecx = L1D_SETS - 1;
2710 *edx = CPUID_4_NO_INVD_SHARING;
2711 break;
2712 case 1: /* L1 icache info */
2713 *eax |= CPUID_4_TYPE_ICACHE | \
2714 CPUID_4_LEVEL(1) | \
2715 CPUID_4_SELF_INIT_LEVEL;
2716 *ebx = (L1I_LINE_SIZE - 1) | \
2717 ((L1I_PARTITIONS - 1) << 12) | \
2718 ((L1I_ASSOCIATIVITY - 1) << 22);
2719 *ecx = L1I_SETS - 1;
2720 *edx = CPUID_4_NO_INVD_SHARING;
2721 break;
2722 case 2: /* L2 cache info */
2723 *eax |= CPUID_4_TYPE_UNIFIED | \
2724 CPUID_4_LEVEL(2) | \
2725 CPUID_4_SELF_INIT_LEVEL;
2726 if (cs->nr_threads > 1) {
2727 *eax |= (cs->nr_threads - 1) << 14;
2729 *ebx = (L2_LINE_SIZE - 1) | \
2730 ((L2_PARTITIONS - 1) << 12) | \
2731 ((L2_ASSOCIATIVITY - 1) << 22);
2732 *ecx = L2_SETS - 1;
2733 *edx = CPUID_4_NO_INVD_SHARING;
2734 break;
2735 case 3: /* L3 cache info */
2736 if (!cpu->enable_l3_cache) {
2737 *eax = 0;
2738 *ebx = 0;
2739 *ecx = 0;
2740 *edx = 0;
2741 break;
2743 *eax |= CPUID_4_TYPE_UNIFIED | \
2744 CPUID_4_LEVEL(3) | \
2745 CPUID_4_SELF_INIT_LEVEL;
2746 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2747 *eax |= ((1 << pkg_offset) - 1) << 14;
2748 *ebx = (L3_N_LINE_SIZE - 1) | \
2749 ((L3_N_PARTITIONS - 1) << 12) | \
2750 ((L3_N_ASSOCIATIVITY - 1) << 22);
2751 *ecx = L3_N_SETS - 1;
2752 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2753 break;
2754 default: /* end of info */
2755 *eax = 0;
2756 *ebx = 0;
2757 *ecx = 0;
2758 *edx = 0;
2759 break;
2763 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2764 if ((*eax & 31) && cs->nr_cores > 1) {
2765 *eax |= (cs->nr_cores - 1) << 26;
2767 break;
2768 case 5:
2769 /* mwait info: needed for Core compatibility */
2770 *eax = 0; /* Smallest monitor-line size in bytes */
2771 *ebx = 0; /* Largest monitor-line size in bytes */
2772 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2773 *edx = 0;
2774 break;
2775 case 6:
2776 /* Thermal and Power Leaf */
2777 *eax = env->features[FEAT_6_EAX];
2778 *ebx = 0;
2779 *ecx = 0;
2780 *edx = 0;
2781 break;
2782 case 7:
2783 /* Structured Extended Feature Flags Enumeration Leaf */
2784 if (count == 0) {
2785 *eax = 0; /* Maximum ECX value for sub-leaves */
2786 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2787 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2788 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2789 *ecx |= CPUID_7_0_ECX_OSPKE;
2791 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2792 } else {
2793 *eax = 0;
2794 *ebx = 0;
2795 *ecx = 0;
2796 *edx = 0;
2798 break;
2799 case 9:
2800 /* Direct Cache Access Information Leaf */
2801 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2802 *ebx = 0;
2803 *ecx = 0;
2804 *edx = 0;
2805 break;
2806 case 0xA:
2807 /* Architectural Performance Monitoring Leaf */
2808 if (kvm_enabled() && cpu->enable_pmu) {
2809 KVMState *s = cs->kvm_state;
2811 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2812 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2813 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2814 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2815 } else {
2816 *eax = 0;
2817 *ebx = 0;
2818 *ecx = 0;
2819 *edx = 0;
2821 break;
2822 case 0xB:
2823 /* Extended Topology Enumeration Leaf */
2824 if (!cpu->enable_cpuid_0xb) {
2825 *eax = *ebx = *ecx = *edx = 0;
2826 break;
2829 *ecx = count & 0xff;
2830 *edx = cpu->apic_id;
2832 switch (count) {
2833 case 0:
2834 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2835 *ebx = cs->nr_threads;
2836 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2837 break;
2838 case 1:
2839 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2840 *ebx = cs->nr_cores * cs->nr_threads;
2841 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2842 break;
2843 default:
2844 *eax = 0;
2845 *ebx = 0;
2846 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2849 assert(!(*eax & ~0x1f));
2850 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2851 break;
2852 case 0xD: {
2853 /* Processor Extended State */
2854 *eax = 0;
2855 *ebx = 0;
2856 *ecx = 0;
2857 *edx = 0;
2858 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2859 break;
2862 if (count == 0) {
2863 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2864 *eax = env->features[FEAT_XSAVE_COMP_LO];
2865 *edx = env->features[FEAT_XSAVE_COMP_HI];
2866 *ebx = *ecx;
2867 } else if (count == 1) {
2868 *eax = env->features[FEAT_XSAVE];
2869 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2870 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2871 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2872 *eax = esa->size;
2873 *ebx = esa->offset;
2876 break;
2878 case 0x40000000:
2880 * CPUID code in kvm_arch_init_vcpu() ignores stuff
2881 * set here, but we restrict to TCG none the less.
2883 if (tcg_enabled() && cpu->expose_tcg) {
2884 memcpy(signature, "TCGTCGTCGTCG", 12);
2885 *eax = 0x40000001;
2886 *ebx = signature[0];
2887 *ecx = signature[1];
2888 *edx = signature[2];
2889 } else {
2890 *eax = 0;
2891 *ebx = 0;
2892 *ecx = 0;
2893 *edx = 0;
2895 break;
2896 case 0x40000001:
2897 *eax = 0;
2898 *ebx = 0;
2899 *ecx = 0;
2900 *edx = 0;
2901 break;
2902 case 0x80000000:
2903 *eax = env->cpuid_xlevel;
2904 *ebx = env->cpuid_vendor1;
2905 *edx = env->cpuid_vendor2;
2906 *ecx = env->cpuid_vendor3;
2907 break;
2908 case 0x80000001:
2909 *eax = env->cpuid_version;
2910 *ebx = 0;
2911 *ecx = env->features[FEAT_8000_0001_ECX];
2912 *edx = env->features[FEAT_8000_0001_EDX];
2914 /* The Linux kernel checks for the CMPLegacy bit and
2915 * discards multiple thread information if it is set.
2916 * So don't set it here for Intel to make Linux guests happy.
2918 if (cs->nr_cores * cs->nr_threads > 1) {
2919 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2920 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2921 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2922 *ecx |= 1 << 1; /* CmpLegacy bit */
2925 break;
2926 case 0x80000002:
2927 case 0x80000003:
2928 case 0x80000004:
2929 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2930 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2931 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2932 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2933 break;
2934 case 0x80000005:
2935 /* cache info (L1 cache) */
2936 if (cpu->cache_info_passthrough) {
2937 host_cpuid(index, 0, eax, ebx, ecx, edx);
2938 break;
2940 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2941 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2942 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2943 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2944 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2945 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2946 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2947 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2948 break;
2949 case 0x80000006:
2950 /* cache info (L2 cache) */
2951 if (cpu->cache_info_passthrough) {
2952 host_cpuid(index, 0, eax, ebx, ecx, edx);
2953 break;
2955 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2956 (L2_DTLB_2M_ENTRIES << 16) | \
2957 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2958 (L2_ITLB_2M_ENTRIES);
2959 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2960 (L2_DTLB_4K_ENTRIES << 16) | \
2961 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2962 (L2_ITLB_4K_ENTRIES);
2963 *ecx = (L2_SIZE_KB_AMD << 16) | \
2964 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2965 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2966 if (!cpu->enable_l3_cache) {
2967 *edx = ((L3_SIZE_KB / 512) << 18) | \
2968 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2969 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2970 } else {
2971 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2972 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2973 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2975 break;
2976 case 0x80000007:
2977 *eax = 0;
2978 *ebx = 0;
2979 *ecx = 0;
2980 *edx = env->features[FEAT_8000_0007_EDX];
2981 break;
2982 case 0x80000008:
2983 /* virtual & phys address size in low 2 bytes. */
2984 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2985 /* 64 bit processor */
2986 *eax = cpu->phys_bits; /* configurable physical bits */
2987 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
2988 *eax |= 0x00003900; /* 57 bits virtual */
2989 } else {
2990 *eax |= 0x00003000; /* 48 bits virtual */
2992 } else {
2993 *eax = cpu->phys_bits;
2995 *ebx = 0;
2996 *ecx = 0;
2997 *edx = 0;
2998 if (cs->nr_cores * cs->nr_threads > 1) {
2999 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3001 break;
3002 case 0x8000000A:
3003 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3004 *eax = 0x00000001; /* SVM Revision */
3005 *ebx = 0x00000010; /* nr of ASIDs */
3006 *ecx = 0;
3007 *edx = env->features[FEAT_SVM]; /* optional features */
3008 } else {
3009 *eax = 0;
3010 *ebx = 0;
3011 *ecx = 0;
3012 *edx = 0;
3014 break;
3015 case 0xC0000000:
3016 *eax = env->cpuid_xlevel2;
3017 *ebx = 0;
3018 *ecx = 0;
3019 *edx = 0;
3020 break;
3021 case 0xC0000001:
3022 /* Support for VIA CPU's CPUID instruction */
3023 *eax = env->cpuid_version;
3024 *ebx = 0;
3025 *ecx = 0;
3026 *edx = env->features[FEAT_C000_0001_EDX];
3027 break;
3028 case 0xC0000002:
3029 case 0xC0000003:
3030 case 0xC0000004:
3031 /* Reserved for the future, and now filled with zero */
3032 *eax = 0;
3033 *ebx = 0;
3034 *ecx = 0;
3035 *edx = 0;
3036 break;
3037 default:
3038 /* reserved values: zero */
3039 *eax = 0;
3040 *ebx = 0;
3041 *ecx = 0;
3042 *edx = 0;
3043 break;
3047 /* CPUClass::reset() */
3048 static void x86_cpu_reset(CPUState *s)
3050 X86CPU *cpu = X86_CPU(s);
3051 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3052 CPUX86State *env = &cpu->env;
3053 target_ulong cr4;
3054 uint64_t xcr0;
3055 int i;
3057 xcc->parent_reset(s);
3059 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3061 env->old_exception = -1;
3063 /* init to reset state */
3065 env->hflags2 |= HF2_GIF_MASK;
3067 cpu_x86_update_cr0(env, 0x60000010);
3068 env->a20_mask = ~0x0;
3069 env->smbase = 0x30000;
3071 env->idt.limit = 0xffff;
3072 env->gdt.limit = 0xffff;
3073 env->ldt.limit = 0xffff;
3074 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3075 env->tr.limit = 0xffff;
3076 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3078 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3079 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3080 DESC_R_MASK | DESC_A_MASK);
3081 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3082 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3083 DESC_A_MASK);
3084 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3085 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3086 DESC_A_MASK);
3087 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3088 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3089 DESC_A_MASK);
3090 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3091 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3092 DESC_A_MASK);
3093 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3094 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3095 DESC_A_MASK);
3097 env->eip = 0xfff0;
3098 env->regs[R_EDX] = env->cpuid_version;
3100 env->eflags = 0x2;
3102 /* FPU init */
3103 for (i = 0; i < 8; i++) {
3104 env->fptags[i] = 1;
3106 cpu_set_fpuc(env, 0x37f);
3108 env->mxcsr = 0x1f80;
3109 /* All units are in INIT state. */
3110 env->xstate_bv = 0;
3112 env->pat = 0x0007040600070406ULL;
3113 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3115 memset(env->dr, 0, sizeof(env->dr));
3116 env->dr[6] = DR6_FIXED_1;
3117 env->dr[7] = DR7_FIXED_1;
3118 cpu_breakpoint_remove_all(s, BP_CPU);
3119 cpu_watchpoint_remove_all(s, BP_CPU);
3121 cr4 = 0;
3122 xcr0 = XSTATE_FP_MASK;
3124 #ifdef CONFIG_USER_ONLY
3125 /* Enable all the features for user-mode. */
3126 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3127 xcr0 |= XSTATE_SSE_MASK;
3129 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3130 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3131 if (env->features[esa->feature] & esa->bits) {
3132 xcr0 |= 1ull << i;
3136 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3137 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3139 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3140 cr4 |= CR4_FSGSBASE_MASK;
3142 #endif
3144 env->xcr0 = xcr0;
3145 cpu_x86_update_cr4(env, cr4);
3148 * SDM 11.11.5 requires:
3149 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3150 * - IA32_MTRR_PHYSMASKn.V = 0
3151 * All other bits are undefined. For simplification, zero it all.
3153 env->mtrr_deftype = 0;
3154 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3155 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3157 #if !defined(CONFIG_USER_ONLY)
3158 /* We hard-wire the BSP to the first CPU. */
3159 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3161 s->halted = !cpu_is_bsp(cpu);
3163 if (kvm_enabled()) {
3164 kvm_arch_reset_vcpu(cpu);
3166 #endif
3169 #ifndef CONFIG_USER_ONLY
3170 bool cpu_is_bsp(X86CPU *cpu)
3172 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3175 /* TODO: remove me, when reset over QOM tree is implemented */
3176 static void x86_cpu_machine_reset_cb(void *opaque)
3178 X86CPU *cpu = opaque;
3179 cpu_reset(CPU(cpu));
3181 #endif
3183 static void mce_init(X86CPU *cpu)
3185 CPUX86State *cenv = &cpu->env;
3186 unsigned int bank;
3188 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3189 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3190 (CPUID_MCE | CPUID_MCA)) {
3191 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3192 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3193 cenv->mcg_ctl = ~(uint64_t)0;
3194 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3195 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3200 #ifndef CONFIG_USER_ONLY
3201 APICCommonClass *apic_get_class(void)
3203 const char *apic_type = "apic";
3205 if (kvm_apic_in_kernel()) {
3206 apic_type = "kvm-apic";
3207 } else if (xen_enabled()) {
3208 apic_type = "xen-apic";
3211 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3214 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3216 APICCommonState *apic;
3217 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3219 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3221 object_property_add_child(OBJECT(cpu), "lapic",
3222 OBJECT(cpu->apic_state), &error_abort);
3223 object_unref(OBJECT(cpu->apic_state));
3225 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3226 /* TODO: convert to link<> */
3227 apic = APIC_COMMON(cpu->apic_state);
3228 apic->cpu = cpu;
3229 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3232 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3234 APICCommonState *apic;
3235 static bool apic_mmio_map_once;
3237 if (cpu->apic_state == NULL) {
3238 return;
3240 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3241 errp);
3243 /* Map APIC MMIO area */
3244 apic = APIC_COMMON(cpu->apic_state);
3245 if (!apic_mmio_map_once) {
3246 memory_region_add_subregion_overlap(get_system_memory(),
3247 apic->apicbase &
3248 MSR_IA32_APICBASE_BASE,
3249 &apic->io_memory,
3250 0x1000);
3251 apic_mmio_map_once = true;
3255 static void x86_cpu_machine_done(Notifier *n, void *unused)
3257 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3258 MemoryRegion *smram =
3259 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3261 if (smram) {
3262 cpu->smram = g_new(MemoryRegion, 1);
3263 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3264 smram, 0, 1ull << 32);
3265 memory_region_set_enabled(cpu->smram, true);
3266 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3269 #else
3270 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3273 #endif
3275 /* Note: Only safe for use on x86(-64) hosts */
3276 static uint32_t x86_host_phys_bits(void)
3278 uint32_t eax;
3279 uint32_t host_phys_bits;
3281 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3282 if (eax >= 0x80000008) {
3283 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3284 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3285 * at 23:16 that can specify a maximum physical address bits for
3286 * the guest that can override this value; but I've not seen
3287 * anything with that set.
3289 host_phys_bits = eax & 0xff;
3290 } else {
3291 /* It's an odd 64 bit machine that doesn't have the leaf for
3292 * physical address bits; fall back to 36 that's most older
3293 * Intel.
3295 host_phys_bits = 36;
3298 return host_phys_bits;
3301 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3303 if (*min < value) {
3304 *min = value;
3308 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3309 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3311 CPUX86State *env = &cpu->env;
3312 FeatureWordInfo *fi = &feature_word_info[w];
3313 uint32_t eax = fi->cpuid_eax;
3314 uint32_t region = eax & 0xF0000000;
3316 if (!env->features[w]) {
3317 return;
3320 switch (region) {
3321 case 0x00000000:
3322 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3323 break;
3324 case 0x80000000:
3325 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3326 break;
3327 case 0xC0000000:
3328 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3329 break;
3333 /* Calculate XSAVE components based on the configured CPU feature flags */
3334 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3336 CPUX86State *env = &cpu->env;
3337 int i;
3338 uint64_t mask;
3340 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3341 return;
3344 mask = 0;
3345 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3346 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3347 if (env->features[esa->feature] & esa->bits) {
3348 mask |= (1ULL << i);
3352 env->features[FEAT_XSAVE_COMP_LO] = mask;
3353 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3356 /***** Steps involved on loading and filtering CPUID data
3358 * When initializing and realizing a CPU object, the steps
3359 * involved in setting up CPUID data are:
3361 * 1) Loading CPU model definition (X86CPUDefinition). This is
3362 * implemented by x86_cpu_load_def() and should be completely
3363 * transparent, as it is done automatically by instance_init.
3364 * No code should need to look at X86CPUDefinition structs
3365 * outside instance_init.
3367 * 2) CPU expansion. This is done by realize before CPUID
3368 * filtering, and will make sure host/accelerator data is
3369 * loaded for CPU models that depend on host capabilities
3370 * (e.g. "host"). Done by x86_cpu_expand_features().
3372 * 3) CPUID filtering. This initializes extra data related to
3373 * CPUID, and checks if the host supports all capabilities
3374 * required by the CPU. Runnability of a CPU model is
3375 * determined at this step. Done by x86_cpu_filter_features().
3377 * Some operations don't require all steps to be performed.
3378 * More precisely:
3380 * - CPU instance creation (instance_init) will run only CPU
3381 * model loading. CPU expansion can't run at instance_init-time
3382 * because host/accelerator data may be not available yet.
3383 * - CPU realization will perform both CPU model expansion and CPUID
3384 * filtering, and return an error in case one of them fails.
3385 * - query-cpu-definitions needs to run all 3 steps. It needs
3386 * to run CPUID filtering, as the 'unavailable-features'
3387 * field is set based on the filtering results.
3388 * - The query-cpu-model-expansion QMP command only needs to run
3389 * CPU model loading and CPU expansion. It should not filter
3390 * any CPUID data based on host capabilities.
3393 /* Expand CPU configuration data, based on configured features
3394 * and host/accelerator capabilities when appropriate.
3396 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3398 CPUX86State *env = &cpu->env;
3399 FeatureWord w;
3400 GList *l;
3401 Error *local_err = NULL;
3403 /*TODO: Now cpu->max_features doesn't overwrite features
3404 * set using QOM properties, and we can convert
3405 * plus_features & minus_features to global properties
3406 * inside x86_cpu_parse_featurestr() too.
3408 if (cpu->max_features) {
3409 for (w = 0; w < FEATURE_WORDS; w++) {
3410 /* Override only features that weren't set explicitly
3411 * by the user.
3413 env->features[w] |=
3414 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
3415 ~env->user_features[w];
3419 for (l = plus_features; l; l = l->next) {
3420 const char *prop = l->data;
3421 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3422 if (local_err) {
3423 goto out;
3427 for (l = minus_features; l; l = l->next) {
3428 const char *prop = l->data;
3429 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3430 if (local_err) {
3431 goto out;
3435 if (!kvm_enabled() || !cpu->expose_kvm) {
3436 env->features[FEAT_KVM] = 0;
3439 x86_cpu_enable_xsave_components(cpu);
3441 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3442 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3443 if (cpu->full_cpuid_auto_level) {
3444 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3445 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3446 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3447 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3448 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3449 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3450 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3451 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3452 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3453 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3454 /* SVM requires CPUID[0x8000000A] */
3455 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3456 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3460 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3461 if (env->cpuid_level == UINT32_MAX) {
3462 env->cpuid_level = env->cpuid_min_level;
3464 if (env->cpuid_xlevel == UINT32_MAX) {
3465 env->cpuid_xlevel = env->cpuid_min_xlevel;
3467 if (env->cpuid_xlevel2 == UINT32_MAX) {
3468 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3471 out:
3472 if (local_err != NULL) {
3473 error_propagate(errp, local_err);
3478 * Finishes initialization of CPUID data, filters CPU feature
3479 * words based on host availability of each feature.
3481 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3483 static int x86_cpu_filter_features(X86CPU *cpu)
3485 CPUX86State *env = &cpu->env;
3486 FeatureWord w;
3487 int rv = 0;
3489 for (w = 0; w < FEATURE_WORDS; w++) {
3490 uint32_t host_feat =
3491 x86_cpu_get_supported_feature_word(w, false);
3492 uint32_t requested_features = env->features[w];
3493 env->features[w] &= host_feat;
3494 cpu->filtered_features[w] = requested_features & ~env->features[w];
3495 if (cpu->filtered_features[w]) {
3496 rv = 1;
3500 return rv;
3503 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3504 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3505 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3506 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3507 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3508 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3509 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3511 CPUState *cs = CPU(dev);
3512 X86CPU *cpu = X86_CPU(dev);
3513 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3514 CPUX86State *env = &cpu->env;
3515 Error *local_err = NULL;
3516 static bool ht_warned;
3518 if (xcc->kvm_required && !kvm_enabled()) {
3519 char *name = x86_cpu_class_get_model_name(xcc);
3520 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3521 g_free(name);
3522 goto out;
3525 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3526 error_setg(errp, "apic-id property was not initialized properly");
3527 return;
3530 x86_cpu_expand_features(cpu, &local_err);
3531 if (local_err) {
3532 goto out;
3535 if (x86_cpu_filter_features(cpu) &&
3536 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3537 x86_cpu_report_filtered_features(cpu);
3538 if (cpu->enforce_cpuid) {
3539 error_setg(&local_err,
3540 kvm_enabled() ?
3541 "Host doesn't support requested features" :
3542 "TCG doesn't support requested features");
3543 goto out;
3547 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3548 * CPUID[1].EDX.
3550 if (IS_AMD_CPU(env)) {
3551 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3552 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3553 & CPUID_EXT2_AMD_ALIASES);
3556 /* For 64bit systems think about the number of physical bits to present.
3557 * ideally this should be the same as the host; anything other than matching
3558 * the host can cause incorrect guest behaviour.
3559 * QEMU used to pick the magic value of 40 bits that corresponds to
3560 * consumer AMD devices but nothing else.
3562 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3563 if (kvm_enabled()) {
3564 uint32_t host_phys_bits = x86_host_phys_bits();
3565 static bool warned;
3567 if (cpu->host_phys_bits) {
3568 /* The user asked for us to use the host physical bits */
3569 cpu->phys_bits = host_phys_bits;
3572 /* Print a warning if the user set it to a value that's not the
3573 * host value.
3575 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3576 !warned) {
3577 warn_report("Host physical bits (%u)"
3578 " does not match phys-bits property (%u)",
3579 host_phys_bits, cpu->phys_bits);
3580 warned = true;
3583 if (cpu->phys_bits &&
3584 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3585 cpu->phys_bits < 32)) {
3586 error_setg(errp, "phys-bits should be between 32 and %u "
3587 " (but is %u)",
3588 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3589 return;
3591 } else {
3592 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3593 error_setg(errp, "TCG only supports phys-bits=%u",
3594 TCG_PHYS_ADDR_BITS);
3595 return;
3598 /* 0 means it was not explicitly set by the user (or by machine
3599 * compat_props or by the host code above). In this case, the default
3600 * is the value used by TCG (40).
3602 if (cpu->phys_bits == 0) {
3603 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3605 } else {
3606 /* For 32 bit systems don't use the user set value, but keep
3607 * phys_bits consistent with what we tell the guest.
3609 if (cpu->phys_bits != 0) {
3610 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3611 return;
3614 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3615 cpu->phys_bits = 36;
3616 } else {
3617 cpu->phys_bits = 32;
3620 cpu_exec_realizefn(cs, &local_err);
3621 if (local_err != NULL) {
3622 error_propagate(errp, local_err);
3623 return;
3626 if (tcg_enabled()) {
3627 tcg_x86_init();
3630 #ifndef CONFIG_USER_ONLY
3631 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3633 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3634 x86_cpu_apic_create(cpu, &local_err);
3635 if (local_err != NULL) {
3636 goto out;
3639 #endif
3641 mce_init(cpu);
3643 #ifndef CONFIG_USER_ONLY
3644 if (tcg_enabled()) {
3645 AddressSpace *as_normal = address_space_init_shareable(cs->memory,
3646 "cpu-memory");
3647 AddressSpace *as_smm = g_new(AddressSpace, 1);
3649 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3650 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3652 /* Outer container... */
3653 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3654 memory_region_set_enabled(cpu->cpu_as_root, true);
3656 /* ... with two regions inside: normal system memory with low
3657 * priority, and...
3659 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3660 get_system_memory(), 0, ~0ull);
3661 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3662 memory_region_set_enabled(cpu->cpu_as_mem, true);
3663 address_space_init(as_smm, cpu->cpu_as_root, "CPU");
3665 cs->num_ases = 2;
3666 cpu_address_space_init(cs, as_normal, 0);
3667 cpu_address_space_init(cs, as_smm, 1);
3669 /* ... SMRAM with higher priority, linked from /machine/smram. */
3670 cpu->machine_done.notify = x86_cpu_machine_done;
3671 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3673 #endif
3675 qemu_init_vcpu(cs);
3677 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3678 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3679 * based on inputs (sockets,cores,threads), it is still better to gives
3680 * users a warning.
3682 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3683 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3685 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3686 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3687 " -smp options properly.");
3688 ht_warned = true;
3691 x86_cpu_apic_realize(cpu, &local_err);
3692 if (local_err != NULL) {
3693 goto out;
3695 cpu_reset(cs);
3697 xcc->parent_realize(dev, &local_err);
3699 out:
3700 if (local_err != NULL) {
3701 error_propagate(errp, local_err);
3702 return;
3706 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3708 X86CPU *cpu = X86_CPU(dev);
3709 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3710 Error *local_err = NULL;
3712 #ifndef CONFIG_USER_ONLY
3713 cpu_remove_sync(CPU(dev));
3714 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3715 #endif
3717 if (cpu->apic_state) {
3718 object_unparent(OBJECT(cpu->apic_state));
3719 cpu->apic_state = NULL;
3722 xcc->parent_unrealize(dev, &local_err);
3723 if (local_err != NULL) {
3724 error_propagate(errp, local_err);
3725 return;
3729 typedef struct BitProperty {
3730 FeatureWord w;
3731 uint32_t mask;
3732 } BitProperty;
3734 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3735 void *opaque, Error **errp)
3737 X86CPU *cpu = X86_CPU(obj);
3738 BitProperty *fp = opaque;
3739 uint32_t f = cpu->env.features[fp->w];
3740 bool value = (f & fp->mask) == fp->mask;
3741 visit_type_bool(v, name, &value, errp);
3744 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3745 void *opaque, Error **errp)
3747 DeviceState *dev = DEVICE(obj);
3748 X86CPU *cpu = X86_CPU(obj);
3749 BitProperty *fp = opaque;
3750 Error *local_err = NULL;
3751 bool value;
3753 if (dev->realized) {
3754 qdev_prop_set_after_realize(dev, name, errp);
3755 return;
3758 visit_type_bool(v, name, &value, &local_err);
3759 if (local_err) {
3760 error_propagate(errp, local_err);
3761 return;
3764 if (value) {
3765 cpu->env.features[fp->w] |= fp->mask;
3766 } else {
3767 cpu->env.features[fp->w] &= ~fp->mask;
3769 cpu->env.user_features[fp->w] |= fp->mask;
3772 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3773 void *opaque)
3775 BitProperty *prop = opaque;
3776 g_free(prop);
3779 /* Register a boolean property to get/set a single bit in a uint32_t field.
3781 * The same property name can be registered multiple times to make it affect
3782 * multiple bits in the same FeatureWord. In that case, the getter will return
3783 * true only if all bits are set.
3785 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3786 const char *prop_name,
3787 FeatureWord w,
3788 int bitnr)
3790 BitProperty *fp;
3791 ObjectProperty *op;
3792 uint32_t mask = (1UL << bitnr);
3794 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3795 if (op) {
3796 fp = op->opaque;
3797 assert(fp->w == w);
3798 fp->mask |= mask;
3799 } else {
3800 fp = g_new0(BitProperty, 1);
3801 fp->w = w;
3802 fp->mask = mask;
3803 object_property_add(OBJECT(cpu), prop_name, "bool",
3804 x86_cpu_get_bit_prop,
3805 x86_cpu_set_bit_prop,
3806 x86_cpu_release_bit_prop, fp, &error_abort);
3810 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3811 FeatureWord w,
3812 int bitnr)
3814 FeatureWordInfo *fi = &feature_word_info[w];
3815 const char *name = fi->feat_names[bitnr];
3817 if (!name) {
3818 return;
3821 /* Property names should use "-" instead of "_".
3822 * Old names containing underscores are registered as aliases
3823 * using object_property_add_alias()
3825 assert(!strchr(name, '_'));
3826 /* aliases don't use "|" delimiters anymore, they are registered
3827 * manually using object_property_add_alias() */
3828 assert(!strchr(name, '|'));
3829 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
3832 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
3834 X86CPU *cpu = X86_CPU(cs);
3835 CPUX86State *env = &cpu->env;
3836 GuestPanicInformation *panic_info = NULL;
3838 if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) {
3839 panic_info = g_malloc0(sizeof(GuestPanicInformation));
3841 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
3843 assert(HV_X64_MSR_CRASH_PARAMS >= 5);
3844 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
3845 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
3846 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
3847 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
3848 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
3851 return panic_info;
3853 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
3854 const char *name, void *opaque,
3855 Error **errp)
3857 CPUState *cs = CPU(obj);
3858 GuestPanicInformation *panic_info;
3860 if (!cs->crash_occurred) {
3861 error_setg(errp, "No crash occured");
3862 return;
3865 panic_info = x86_cpu_get_crash_info(cs);
3866 if (panic_info == NULL) {
3867 error_setg(errp, "No crash information");
3868 return;
3871 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
3872 errp);
3873 qapi_free_GuestPanicInformation(panic_info);
3876 static void x86_cpu_initfn(Object *obj)
3878 CPUState *cs = CPU(obj);
3879 X86CPU *cpu = X86_CPU(obj);
3880 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3881 CPUX86State *env = &cpu->env;
3882 FeatureWord w;
3884 cs->env_ptr = env;
3886 object_property_add(obj, "family", "int",
3887 x86_cpuid_version_get_family,
3888 x86_cpuid_version_set_family, NULL, NULL, NULL);
3889 object_property_add(obj, "model", "int",
3890 x86_cpuid_version_get_model,
3891 x86_cpuid_version_set_model, NULL, NULL, NULL);
3892 object_property_add(obj, "stepping", "int",
3893 x86_cpuid_version_get_stepping,
3894 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3895 object_property_add_str(obj, "vendor",
3896 x86_cpuid_get_vendor,
3897 x86_cpuid_set_vendor, NULL);
3898 object_property_add_str(obj, "model-id",
3899 x86_cpuid_get_model_id,
3900 x86_cpuid_set_model_id, NULL);
3901 object_property_add(obj, "tsc-frequency", "int",
3902 x86_cpuid_get_tsc_freq,
3903 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3904 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3905 x86_cpu_get_feature_words,
3906 NULL, NULL, (void *)env->features, NULL);
3907 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3908 x86_cpu_get_feature_words,
3909 NULL, NULL, (void *)cpu->filtered_features, NULL);
3911 object_property_add(obj, "crash-information", "GuestPanicInformation",
3912 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
3914 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3916 for (w = 0; w < FEATURE_WORDS; w++) {
3917 int bitnr;
3919 for (bitnr = 0; bitnr < 32; bitnr++) {
3920 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3924 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3925 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3926 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3927 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3928 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3929 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3930 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3932 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3933 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3934 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3935 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3936 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3937 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3938 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3939 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3940 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3941 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3942 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3943 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3944 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3945 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3946 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3947 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3948 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3949 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3950 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3951 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3952 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3954 if (xcc->cpu_def) {
3955 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3959 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3961 X86CPU *cpu = X86_CPU(cs);
3963 return cpu->apic_id;
3966 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3968 X86CPU *cpu = X86_CPU(cs);
3970 return cpu->env.cr[0] & CR0_PG_MASK;
3973 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3975 X86CPU *cpu = X86_CPU(cs);
3977 cpu->env.eip = value;
3980 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3982 X86CPU *cpu = X86_CPU(cs);
3984 cpu->env.eip = tb->pc - tb->cs_base;
3987 static bool x86_cpu_has_work(CPUState *cs)
3989 X86CPU *cpu = X86_CPU(cs);
3990 CPUX86State *env = &cpu->env;
3992 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3993 CPU_INTERRUPT_POLL)) &&
3994 (env->eflags & IF_MASK)) ||
3995 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3996 CPU_INTERRUPT_INIT |
3997 CPU_INTERRUPT_SIPI |
3998 CPU_INTERRUPT_MCE)) ||
3999 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4000 !(env->hflags & HF_SMM_MASK));
4003 static Property x86_cpu_properties[] = {
4004 #ifdef CONFIG_USER_ONLY
4005 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4006 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4007 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4008 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4009 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4010 #else
4011 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4012 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4013 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4014 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4015 #endif
4016 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4017 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4018 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4019 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4020 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4021 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4022 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4023 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4024 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4025 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4026 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4027 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4028 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4029 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4030 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4031 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4032 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4033 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4034 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4035 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4036 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4037 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4038 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4039 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4040 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4041 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4042 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4043 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4044 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4045 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4046 false),
4047 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4048 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4049 DEFINE_PROP_END_OF_LIST()
4052 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4054 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4055 CPUClass *cc = CPU_CLASS(oc);
4056 DeviceClass *dc = DEVICE_CLASS(oc);
4058 xcc->parent_realize = dc->realize;
4059 xcc->parent_unrealize = dc->unrealize;
4060 dc->realize = x86_cpu_realizefn;
4061 dc->unrealize = x86_cpu_unrealizefn;
4062 dc->props = x86_cpu_properties;
4064 xcc->parent_reset = cc->reset;
4065 cc->reset = x86_cpu_reset;
4066 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4068 cc->class_by_name = x86_cpu_class_by_name;
4069 cc->parse_features = x86_cpu_parse_featurestr;
4070 cc->has_work = x86_cpu_has_work;
4071 #ifdef CONFIG_TCG
4072 cc->do_interrupt = x86_cpu_do_interrupt;
4073 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4074 #endif
4075 cc->dump_state = x86_cpu_dump_state;
4076 cc->get_crash_info = x86_cpu_get_crash_info;
4077 cc->set_pc = x86_cpu_set_pc;
4078 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4079 cc->gdb_read_register = x86_cpu_gdb_read_register;
4080 cc->gdb_write_register = x86_cpu_gdb_write_register;
4081 cc->get_arch_id = x86_cpu_get_arch_id;
4082 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4083 #ifdef CONFIG_USER_ONLY
4084 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4085 #else
4086 cc->asidx_from_attrs = x86_asidx_from_attrs;
4087 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4088 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4089 cc->write_elf64_note = x86_cpu_write_elf64_note;
4090 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4091 cc->write_elf32_note = x86_cpu_write_elf32_note;
4092 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4093 cc->vmsd = &vmstate_x86_cpu;
4094 #endif
4095 cc->gdb_arch_name = x86_gdb_arch_name;
4096 #ifdef TARGET_X86_64
4097 cc->gdb_core_xml_file = "i386-64bit.xml";
4098 cc->gdb_num_core_regs = 57;
4099 #else
4100 cc->gdb_core_xml_file = "i386-32bit.xml";
4101 cc->gdb_num_core_regs = 41;
4102 #endif
4103 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4104 cc->debug_excp_handler = breakpoint_handler;
4105 #endif
4106 cc->cpu_exec_enter = x86_cpu_exec_enter;
4107 cc->cpu_exec_exit = x86_cpu_exec_exit;
4109 dc->user_creatable = true;
4112 static const TypeInfo x86_cpu_type_info = {
4113 .name = TYPE_X86_CPU,
4114 .parent = TYPE_CPU,
4115 .instance_size = sizeof(X86CPU),
4116 .instance_init = x86_cpu_initfn,
4117 .abstract = true,
4118 .class_size = sizeof(X86CPUClass),
4119 .class_init = x86_cpu_common_class_init,
4123 /* "base" CPU model, used by query-cpu-model-expansion */
4124 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4126 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4128 xcc->static_model = true;
4129 xcc->migration_safe = true;
4130 xcc->model_description = "base CPU model type with no features enabled";
4131 xcc->ordering = 8;
4134 static const TypeInfo x86_base_cpu_type_info = {
4135 .name = X86_CPU_TYPE_NAME("base"),
4136 .parent = TYPE_X86_CPU,
4137 .class_init = x86_cpu_base_class_init,
4140 static void x86_cpu_register_types(void)
4142 int i;
4144 type_register_static(&x86_cpu_type_info);
4145 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4146 x86_register_cpudef_type(&builtin_x86_defs[i]);
4148 type_register_static(&max_x86_cpu_type_info);
4149 type_register_static(&x86_base_cpu_type_info);
4150 #ifdef CONFIG_KVM
4151 type_register_static(&host_x86_cpu_type_info);
4152 #endif
4155 type_init(x86_cpu_register_types)