target-i386: xsave: Calculate set of xsave components on realize
[qemu/ar7.git] / target-i386 / cpu.c
blob8bef3cf8214d9bd2403663ba3d648f4e37941f1c
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
63 /* CPUID Leaf 4 constants: */
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
70 #define CPUID_4_LEVEL(l) ((l) << 5)
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
80 #define ASSOC_FULL 0xFF
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
97 /* Definitions of the hardcoded cache entries we expose: */
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
148 /* TLB definitions: */
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
181 dst[CPUID_VENDOR_SZ] = '\0';
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
203 /* missing:
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
210 /* missing:
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
217 #ifdef TARGET_X86_64
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
219 #else
220 #define TCG_EXT2_X86_64_FEATURES 0
221 #endif
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
236 CPUID_7_0_EBX_ERMS)
237 /* missing:
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
242 #define TCG_APM_FEATURES 0
243 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
244 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
245 /* missing:
246 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
248 typedef struct FeatureWordInfo {
249 /* feature flags names are taken from "Intel Processor Identification and
250 * the CPUID Instruction" and AMD's "CPUID Specification".
251 * In cases of disagreement between feature naming conventions,
252 * aliases may be added.
254 const char *feat_names[32];
255 uint32_t cpuid_eax; /* Input EAX for CPUID */
256 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
257 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
258 int cpuid_reg; /* output register (R_* constant) */
259 uint32_t tcg_features; /* Feature flags supported by TCG */
260 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
261 } FeatureWordInfo;
263 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
264 [FEAT_1_EDX] = {
265 .feat_names = {
266 "fpu", "vme", "de", "pse",
267 "tsc", "msr", "pae", "mce",
268 "cx8", "apic", NULL, "sep",
269 "mtrr", "pge", "mca", "cmov",
270 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
271 NULL, "ds" /* Intel dts */, "acpi", "mmx",
272 "fxsr", "sse", "sse2", "ss",
273 "ht" /* Intel htt */, "tm", "ia64", "pbe",
275 .cpuid_eax = 1, .cpuid_reg = R_EDX,
276 .tcg_features = TCG_FEATURES,
278 [FEAT_1_ECX] = {
279 .feat_names = {
280 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
281 "ds_cpl", "vmx", "smx", "est",
282 "tm2", "ssse3", "cid", NULL,
283 "fma", "cx16", "xtpr", "pdcm",
284 NULL, "pcid", "dca", "sse4.1|sse4_1",
285 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
286 "tsc-deadline", "aes", "xsave", "osxsave",
287 "avx", "f16c", "rdrand", "hypervisor",
289 .cpuid_eax = 1, .cpuid_reg = R_ECX,
290 .tcg_features = TCG_EXT_FEATURES,
292 /* Feature names that are already defined on feature_name[] but
293 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
294 * names on feat_names below. They are copied automatically
295 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
297 [FEAT_8000_0001_EDX] = {
298 .feat_names = {
299 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
300 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
301 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
302 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
303 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
304 "nx|xd", NULL, "mmxext", NULL /* mmx */,
305 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb", "rdtscp",
306 NULL, "lm|i64", "3dnowext", "3dnow",
308 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
309 .tcg_features = TCG_EXT2_FEATURES,
311 [FEAT_8000_0001_ECX] = {
312 .feat_names = {
313 "lahf_lm", "cmp_legacy", "svm", "extapic",
314 "cr8legacy", "abm", "sse4a", "misalignsse",
315 "3dnowprefetch", "osvw", "ibs", "xop",
316 "skinit", "wdt", NULL, "lwp",
317 "fma4", "tce", NULL, "nodeid_msr",
318 NULL, "tbm", "topoext", "perfctr_core",
319 "perfctr_nb", NULL, NULL, NULL,
320 NULL, NULL, NULL, NULL,
322 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
323 .tcg_features = TCG_EXT3_FEATURES,
325 [FEAT_C000_0001_EDX] = {
326 .feat_names = {
327 NULL, NULL, "xstore", "xstore-en",
328 NULL, NULL, "xcrypt", "xcrypt-en",
329 "ace2", "ace2-en", "phe", "phe-en",
330 "pmm", "pmm-en", NULL, NULL,
331 NULL, NULL, NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
336 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
337 .tcg_features = TCG_EXT4_FEATURES,
339 [FEAT_KVM] = {
340 .feat_names = {
341 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
342 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
343 NULL, NULL, NULL, NULL,
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 "kvmclock-stable-bit", NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
350 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
351 .tcg_features = TCG_KVM_FEATURES,
353 [FEAT_HYPERV_EAX] = {
354 .feat_names = {
355 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
356 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
357 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
358 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
359 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
360 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
361 NULL, NULL, NULL, NULL,
362 NULL, NULL, NULL, NULL,
363 NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
367 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
369 [FEAT_HYPERV_EBX] = {
370 .feat_names = {
371 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
372 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
373 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
374 NULL /* hv_create_port */, NULL /* hv_connect_port */,
375 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
376 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
377 NULL, NULL,
378 NULL, NULL, NULL, NULL,
379 NULL, NULL, NULL, NULL,
380 NULL, NULL, NULL, NULL,
381 NULL, NULL, NULL, NULL,
383 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
385 [FEAT_HYPERV_EDX] = {
386 .feat_names = {
387 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
388 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
389 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
390 NULL, NULL,
391 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
392 NULL, NULL, NULL, NULL,
393 NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
398 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
400 [FEAT_SVM] = {
401 .feat_names = {
402 "npt", "lbrv", "svm_lock", "nrip_save",
403 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
404 NULL, NULL, "pause_filter", NULL,
405 "pfthreshold", NULL, NULL, NULL,
406 NULL, NULL, NULL, NULL,
407 NULL, NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
411 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
412 .tcg_features = TCG_SVM_FEATURES,
414 [FEAT_7_0_EBX] = {
415 .feat_names = {
416 "fsgsbase", "tsc_adjust", NULL, "bmi1",
417 "hle", "avx2", NULL, "smep",
418 "bmi2", "erms", "invpcid", "rtm",
419 NULL, NULL, "mpx", NULL,
420 "avx512f", "avx512dq", "rdseed", "adx",
421 "smap", "avx512ifma", "pcommit", "clflushopt",
422 "clwb", NULL, "avx512pf", "avx512er",
423 "avx512cd", NULL, "avx512bw", "avx512vl",
425 .cpuid_eax = 7,
426 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
427 .cpuid_reg = R_EBX,
428 .tcg_features = TCG_7_0_EBX_FEATURES,
430 [FEAT_7_0_ECX] = {
431 .feat_names = {
432 NULL, "avx512vbmi", "umip", "pku",
433 "ospke", NULL, NULL, NULL,
434 NULL, NULL, NULL, NULL,
435 NULL, NULL, NULL, NULL,
436 NULL, NULL, NULL, NULL,
437 NULL, NULL, "rdpid", NULL,
438 NULL, NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
441 .cpuid_eax = 7,
442 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
443 .cpuid_reg = R_ECX,
444 .tcg_features = TCG_7_0_ECX_FEATURES,
446 [FEAT_8000_0007_EDX] = {
447 .feat_names = {
448 NULL, NULL, NULL, NULL,
449 NULL, NULL, NULL, NULL,
450 "invtsc", NULL, NULL, NULL,
451 NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
457 .cpuid_eax = 0x80000007,
458 .cpuid_reg = R_EDX,
459 .tcg_features = TCG_APM_FEATURES,
460 .unmigratable_flags = CPUID_APM_INVTSC,
462 [FEAT_XSAVE] = {
463 .feat_names = {
464 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
465 NULL, NULL, NULL, NULL,
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
473 .cpuid_eax = 0xd,
474 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
475 .cpuid_reg = R_EAX,
476 .tcg_features = TCG_XSAVE_FEATURES,
478 [FEAT_6_EAX] = {
479 .feat_names = {
480 NULL, NULL, "arat", NULL,
481 NULL, NULL, NULL, NULL,
482 NULL, NULL, NULL, NULL,
483 NULL, NULL, NULL, NULL,
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
489 .cpuid_eax = 6, .cpuid_reg = R_EAX,
490 .tcg_features = TCG_6_EAX_FEATURES,
494 typedef struct X86RegisterInfo32 {
495 /* Name of register */
496 const char *name;
497 /* QAPI enum value register */
498 X86CPURegister32 qapi_enum;
499 } X86RegisterInfo32;
501 #define REGISTER(reg) \
502 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
503 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
504 REGISTER(EAX),
505 REGISTER(ECX),
506 REGISTER(EDX),
507 REGISTER(EBX),
508 REGISTER(ESP),
509 REGISTER(EBP),
510 REGISTER(ESI),
511 REGISTER(EDI),
513 #undef REGISTER
515 typedef struct ExtSaveArea {
516 uint32_t feature, bits;
517 uint32_t offset, size;
518 } ExtSaveArea;
520 static const ExtSaveArea x86_ext_save_areas[] = {
521 [XSTATE_YMM_BIT] =
522 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
523 .offset = offsetof(X86XSaveArea, avx_state),
524 .size = sizeof(XSaveAVX) },
525 [XSTATE_BNDREGS_BIT] =
526 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
527 .offset = offsetof(X86XSaveArea, bndreg_state),
528 .size = sizeof(XSaveBNDREG) },
529 [XSTATE_BNDCSR_BIT] =
530 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
531 .offset = offsetof(X86XSaveArea, bndcsr_state),
532 .size = sizeof(XSaveBNDCSR) },
533 [XSTATE_OPMASK_BIT] =
534 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
535 .offset = offsetof(X86XSaveArea, opmask_state),
536 .size = sizeof(XSaveOpmask) },
537 [XSTATE_ZMM_Hi256_BIT] =
538 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
539 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
540 .size = sizeof(XSaveZMM_Hi256) },
541 [XSTATE_Hi16_ZMM_BIT] =
542 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
543 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
544 .size = sizeof(XSaveHi16_ZMM) },
545 [XSTATE_PKRU_BIT] =
546 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
547 .offset = offsetof(X86XSaveArea, pkru_state),
548 .size = sizeof(XSavePKRU) },
551 static uint32_t xsave_area_size(uint64_t mask)
553 int i;
554 uint64_t ret = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader);
556 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
557 const ExtSaveArea *esa = &x86_ext_save_areas[i];
558 if ((mask >> i) & 1) {
559 ret = MAX(ret, esa->offset + esa->size);
562 return ret;
565 const char *get_register_name_32(unsigned int reg)
567 if (reg >= CPU_NB_REGS32) {
568 return NULL;
570 return x86_reg_info_32[reg].name;
574 * Returns the set of feature flags that are supported and migratable by
575 * QEMU, for a given FeatureWord.
577 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
579 FeatureWordInfo *wi = &feature_word_info[w];
580 uint32_t r = 0;
581 int i;
583 for (i = 0; i < 32; i++) {
584 uint32_t f = 1U << i;
585 /* If the feature name is unknown, it is not supported by QEMU yet */
586 if (!wi->feat_names[i]) {
587 continue;
589 /* Skip features known to QEMU, but explicitly marked as unmigratable */
590 if (wi->unmigratable_flags & f) {
591 continue;
593 r |= f;
595 return r;
598 void host_cpuid(uint32_t function, uint32_t count,
599 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
601 uint32_t vec[4];
603 #ifdef __x86_64__
604 asm volatile("cpuid"
605 : "=a"(vec[0]), "=b"(vec[1]),
606 "=c"(vec[2]), "=d"(vec[3])
607 : "0"(function), "c"(count) : "cc");
608 #elif defined(__i386__)
609 asm volatile("pusha \n\t"
610 "cpuid \n\t"
611 "mov %%eax, 0(%2) \n\t"
612 "mov %%ebx, 4(%2) \n\t"
613 "mov %%ecx, 8(%2) \n\t"
614 "mov %%edx, 12(%2) \n\t"
615 "popa"
616 : : "a"(function), "c"(count), "S"(vec)
617 : "memory", "cc");
618 #else
619 abort();
620 #endif
622 if (eax)
623 *eax = vec[0];
624 if (ebx)
625 *ebx = vec[1];
626 if (ecx)
627 *ecx = vec[2];
628 if (edx)
629 *edx = vec[3];
632 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
634 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
635 * a substring. ex if !NULL points to the first char after a substring,
636 * otherwise the string is assumed to sized by a terminating nul.
637 * Return lexical ordering of *s1:*s2.
639 static int sstrcmp(const char *s1, const char *e1,
640 const char *s2, const char *e2)
642 for (;;) {
643 if (!*s1 || !*s2 || *s1 != *s2)
644 return (*s1 - *s2);
645 ++s1, ++s2;
646 if (s1 == e1 && s2 == e2)
647 return (0);
648 else if (s1 == e1)
649 return (*s2);
650 else if (s2 == e2)
651 return (*s1);
655 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
656 * '|' delimited (possibly empty) strings in which case search for a match
657 * within the alternatives proceeds left to right. Return 0 for success,
658 * non-zero otherwise.
660 static int altcmp(const char *s, const char *e, const char *altstr)
662 const char *p, *q;
664 for (q = p = altstr; ; ) {
665 while (*p && *p != '|')
666 ++p;
667 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
668 return (0);
669 if (!*p)
670 return (1);
671 else
672 q = ++p;
676 /* search featureset for flag *[s..e), if found set corresponding bit in
677 * *pval and return true, otherwise return false
679 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
680 const char **featureset)
682 uint32_t mask;
683 const char **ppc;
684 bool found = false;
686 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
687 if (*ppc && !altcmp(s, e, *ppc)) {
688 *pval |= mask;
689 found = true;
692 return found;
695 static void add_flagname_to_bitmaps(const char *flagname,
696 FeatureWordArray words,
697 Error **errp)
699 FeatureWord w;
700 for (w = 0; w < FEATURE_WORDS; w++) {
701 FeatureWordInfo *wi = &feature_word_info[w];
702 if (lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
703 break;
706 if (w == FEATURE_WORDS) {
707 error_setg(errp, "CPU feature %s not found", flagname);
711 /* CPU class name definitions: */
713 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
714 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
716 /* Return type name for a given CPU model name
717 * Caller is responsible for freeing the returned string.
719 static char *x86_cpu_type_name(const char *model_name)
721 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
724 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
726 ObjectClass *oc;
727 char *typename;
729 if (cpu_model == NULL) {
730 return NULL;
733 typename = x86_cpu_type_name(cpu_model);
734 oc = object_class_by_name(typename);
735 g_free(typename);
736 return oc;
739 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
741 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
742 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
743 return g_strndup(class_name,
744 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
747 struct X86CPUDefinition {
748 const char *name;
749 uint32_t level;
750 uint32_t xlevel;
751 /* vendor is zero-terminated, 12 character ASCII string */
752 char vendor[CPUID_VENDOR_SZ + 1];
753 int family;
754 int model;
755 int stepping;
756 FeatureWordArray features;
757 char model_id[48];
760 static X86CPUDefinition builtin_x86_defs[] = {
762 .name = "qemu64",
763 .level = 0xd,
764 .vendor = CPUID_VENDOR_AMD,
765 .family = 6,
766 .model = 6,
767 .stepping = 3,
768 .features[FEAT_1_EDX] =
769 PPRO_FEATURES |
770 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
771 CPUID_PSE36,
772 .features[FEAT_1_ECX] =
773 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
774 .features[FEAT_8000_0001_EDX] =
775 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
776 .features[FEAT_8000_0001_ECX] =
777 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
778 .xlevel = 0x8000000A,
779 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
782 .name = "phenom",
783 .level = 5,
784 .vendor = CPUID_VENDOR_AMD,
785 .family = 16,
786 .model = 2,
787 .stepping = 3,
788 /* Missing: CPUID_HT */
789 .features[FEAT_1_EDX] =
790 PPRO_FEATURES |
791 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
792 CPUID_PSE36 | CPUID_VME,
793 .features[FEAT_1_ECX] =
794 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
795 CPUID_EXT_POPCNT,
796 .features[FEAT_8000_0001_EDX] =
797 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
798 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
799 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
800 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
801 CPUID_EXT3_CR8LEG,
802 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
803 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
804 .features[FEAT_8000_0001_ECX] =
805 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
806 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
807 /* Missing: CPUID_SVM_LBRV */
808 .features[FEAT_SVM] =
809 CPUID_SVM_NPT,
810 .xlevel = 0x8000001A,
811 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
814 .name = "core2duo",
815 .level = 10,
816 .vendor = CPUID_VENDOR_INTEL,
817 .family = 6,
818 .model = 15,
819 .stepping = 11,
820 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
821 .features[FEAT_1_EDX] =
822 PPRO_FEATURES |
823 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
824 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
825 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
826 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
827 .features[FEAT_1_ECX] =
828 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
829 CPUID_EXT_CX16,
830 .features[FEAT_8000_0001_EDX] =
831 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
832 .features[FEAT_8000_0001_ECX] =
833 CPUID_EXT3_LAHF_LM,
834 .xlevel = 0x80000008,
835 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
838 .name = "kvm64",
839 .level = 0xd,
840 .vendor = CPUID_VENDOR_INTEL,
841 .family = 15,
842 .model = 6,
843 .stepping = 1,
844 /* Missing: CPUID_HT */
845 .features[FEAT_1_EDX] =
846 PPRO_FEATURES | CPUID_VME |
847 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
848 CPUID_PSE36,
849 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
850 .features[FEAT_1_ECX] =
851 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
852 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
853 .features[FEAT_8000_0001_EDX] =
854 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
855 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
856 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
857 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
858 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
859 .features[FEAT_8000_0001_ECX] =
861 .xlevel = 0x80000008,
862 .model_id = "Common KVM processor"
865 .name = "qemu32",
866 .level = 4,
867 .vendor = CPUID_VENDOR_INTEL,
868 .family = 6,
869 .model = 6,
870 .stepping = 3,
871 .features[FEAT_1_EDX] =
872 PPRO_FEATURES,
873 .features[FEAT_1_ECX] =
874 CPUID_EXT_SSE3,
875 .xlevel = 0x80000004,
876 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
879 .name = "kvm32",
880 .level = 5,
881 .vendor = CPUID_VENDOR_INTEL,
882 .family = 15,
883 .model = 6,
884 .stepping = 1,
885 .features[FEAT_1_EDX] =
886 PPRO_FEATURES | CPUID_VME |
887 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
888 .features[FEAT_1_ECX] =
889 CPUID_EXT_SSE3,
890 .features[FEAT_8000_0001_ECX] =
892 .xlevel = 0x80000008,
893 .model_id = "Common 32-bit KVM processor"
896 .name = "coreduo",
897 .level = 10,
898 .vendor = CPUID_VENDOR_INTEL,
899 .family = 6,
900 .model = 14,
901 .stepping = 8,
902 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
903 .features[FEAT_1_EDX] =
904 PPRO_FEATURES | CPUID_VME |
905 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
906 CPUID_SS,
907 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
908 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
909 .features[FEAT_1_ECX] =
910 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
911 .features[FEAT_8000_0001_EDX] =
912 CPUID_EXT2_NX,
913 .xlevel = 0x80000008,
914 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
917 .name = "486",
918 .level = 1,
919 .vendor = CPUID_VENDOR_INTEL,
920 .family = 4,
921 .model = 8,
922 .stepping = 0,
923 .features[FEAT_1_EDX] =
924 I486_FEATURES,
925 .xlevel = 0,
928 .name = "pentium",
929 .level = 1,
930 .vendor = CPUID_VENDOR_INTEL,
931 .family = 5,
932 .model = 4,
933 .stepping = 3,
934 .features[FEAT_1_EDX] =
935 PENTIUM_FEATURES,
936 .xlevel = 0,
939 .name = "pentium2",
940 .level = 2,
941 .vendor = CPUID_VENDOR_INTEL,
942 .family = 6,
943 .model = 5,
944 .stepping = 2,
945 .features[FEAT_1_EDX] =
946 PENTIUM2_FEATURES,
947 .xlevel = 0,
950 .name = "pentium3",
951 .level = 3,
952 .vendor = CPUID_VENDOR_INTEL,
953 .family = 6,
954 .model = 7,
955 .stepping = 3,
956 .features[FEAT_1_EDX] =
957 PENTIUM3_FEATURES,
958 .xlevel = 0,
961 .name = "athlon",
962 .level = 2,
963 .vendor = CPUID_VENDOR_AMD,
964 .family = 6,
965 .model = 2,
966 .stepping = 3,
967 .features[FEAT_1_EDX] =
968 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
969 CPUID_MCA,
970 .features[FEAT_8000_0001_EDX] =
971 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
972 .xlevel = 0x80000008,
973 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
976 .name = "n270",
977 .level = 10,
978 .vendor = CPUID_VENDOR_INTEL,
979 .family = 6,
980 .model = 28,
981 .stepping = 2,
982 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
983 .features[FEAT_1_EDX] =
984 PPRO_FEATURES |
985 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
986 CPUID_ACPI | CPUID_SS,
987 /* Some CPUs got no CPUID_SEP */
988 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
989 * CPUID_EXT_XTPR */
990 .features[FEAT_1_ECX] =
991 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
992 CPUID_EXT_MOVBE,
993 .features[FEAT_8000_0001_EDX] =
994 CPUID_EXT2_NX,
995 .features[FEAT_8000_0001_ECX] =
996 CPUID_EXT3_LAHF_LM,
997 .xlevel = 0x80000008,
998 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1001 .name = "Conroe",
1002 .level = 10,
1003 .vendor = CPUID_VENDOR_INTEL,
1004 .family = 6,
1005 .model = 15,
1006 .stepping = 3,
1007 .features[FEAT_1_EDX] =
1008 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1009 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1010 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1011 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1012 CPUID_DE | CPUID_FP87,
1013 .features[FEAT_1_ECX] =
1014 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1015 .features[FEAT_8000_0001_EDX] =
1016 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1017 .features[FEAT_8000_0001_ECX] =
1018 CPUID_EXT3_LAHF_LM,
1019 .xlevel = 0x80000008,
1020 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1023 .name = "Penryn",
1024 .level = 10,
1025 .vendor = CPUID_VENDOR_INTEL,
1026 .family = 6,
1027 .model = 23,
1028 .stepping = 3,
1029 .features[FEAT_1_EDX] =
1030 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1031 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1032 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1033 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1034 CPUID_DE | CPUID_FP87,
1035 .features[FEAT_1_ECX] =
1036 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1037 CPUID_EXT_SSE3,
1038 .features[FEAT_8000_0001_EDX] =
1039 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1040 .features[FEAT_8000_0001_ECX] =
1041 CPUID_EXT3_LAHF_LM,
1042 .xlevel = 0x80000008,
1043 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1046 .name = "Nehalem",
1047 .level = 11,
1048 .vendor = CPUID_VENDOR_INTEL,
1049 .family = 6,
1050 .model = 26,
1051 .stepping = 3,
1052 .features[FEAT_1_EDX] =
1053 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1054 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1055 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1056 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1057 CPUID_DE | CPUID_FP87,
1058 .features[FEAT_1_ECX] =
1059 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1060 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1061 .features[FEAT_8000_0001_EDX] =
1062 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1063 .features[FEAT_8000_0001_ECX] =
1064 CPUID_EXT3_LAHF_LM,
1065 .xlevel = 0x80000008,
1066 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1069 .name = "Westmere",
1070 .level = 11,
1071 .vendor = CPUID_VENDOR_INTEL,
1072 .family = 6,
1073 .model = 44,
1074 .stepping = 1,
1075 .features[FEAT_1_EDX] =
1076 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1077 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1078 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1079 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1080 CPUID_DE | CPUID_FP87,
1081 .features[FEAT_1_ECX] =
1082 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1083 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1084 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1085 .features[FEAT_8000_0001_EDX] =
1086 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1087 .features[FEAT_8000_0001_ECX] =
1088 CPUID_EXT3_LAHF_LM,
1089 .features[FEAT_6_EAX] =
1090 CPUID_6_EAX_ARAT,
1091 .xlevel = 0x80000008,
1092 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1095 .name = "SandyBridge",
1096 .level = 0xd,
1097 .vendor = CPUID_VENDOR_INTEL,
1098 .family = 6,
1099 .model = 42,
1100 .stepping = 1,
1101 .features[FEAT_1_EDX] =
1102 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1103 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1104 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1105 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1106 CPUID_DE | CPUID_FP87,
1107 .features[FEAT_1_ECX] =
1108 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1109 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1110 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1111 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1112 CPUID_EXT_SSE3,
1113 .features[FEAT_8000_0001_EDX] =
1114 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1115 CPUID_EXT2_SYSCALL,
1116 .features[FEAT_8000_0001_ECX] =
1117 CPUID_EXT3_LAHF_LM,
1118 .features[FEAT_XSAVE] =
1119 CPUID_XSAVE_XSAVEOPT,
1120 .features[FEAT_6_EAX] =
1121 CPUID_6_EAX_ARAT,
1122 .xlevel = 0x80000008,
1123 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1126 .name = "IvyBridge",
1127 .level = 0xd,
1128 .vendor = CPUID_VENDOR_INTEL,
1129 .family = 6,
1130 .model = 58,
1131 .stepping = 9,
1132 .features[FEAT_1_EDX] =
1133 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1134 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1135 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1136 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1137 CPUID_DE | CPUID_FP87,
1138 .features[FEAT_1_ECX] =
1139 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1140 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1141 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1142 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1143 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1144 .features[FEAT_7_0_EBX] =
1145 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1146 CPUID_7_0_EBX_ERMS,
1147 .features[FEAT_8000_0001_EDX] =
1148 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1149 CPUID_EXT2_SYSCALL,
1150 .features[FEAT_8000_0001_ECX] =
1151 CPUID_EXT3_LAHF_LM,
1152 .features[FEAT_XSAVE] =
1153 CPUID_XSAVE_XSAVEOPT,
1154 .features[FEAT_6_EAX] =
1155 CPUID_6_EAX_ARAT,
1156 .xlevel = 0x80000008,
1157 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1160 .name = "Haswell-noTSX",
1161 .level = 0xd,
1162 .vendor = CPUID_VENDOR_INTEL,
1163 .family = 6,
1164 .model = 60,
1165 .stepping = 1,
1166 .features[FEAT_1_EDX] =
1167 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1168 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1169 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1170 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1171 CPUID_DE | CPUID_FP87,
1172 .features[FEAT_1_ECX] =
1173 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1174 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1175 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1176 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1177 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1178 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1179 .features[FEAT_8000_0001_EDX] =
1180 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1181 CPUID_EXT2_SYSCALL,
1182 .features[FEAT_8000_0001_ECX] =
1183 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1184 .features[FEAT_7_0_EBX] =
1185 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1186 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1187 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1188 .features[FEAT_XSAVE] =
1189 CPUID_XSAVE_XSAVEOPT,
1190 .features[FEAT_6_EAX] =
1191 CPUID_6_EAX_ARAT,
1192 .xlevel = 0x80000008,
1193 .model_id = "Intel Core Processor (Haswell, no TSX)",
1194 }, {
1195 .name = "Haswell",
1196 .level = 0xd,
1197 .vendor = CPUID_VENDOR_INTEL,
1198 .family = 6,
1199 .model = 60,
1200 .stepping = 1,
1201 .features[FEAT_1_EDX] =
1202 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1203 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1204 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1205 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1206 CPUID_DE | CPUID_FP87,
1207 .features[FEAT_1_ECX] =
1208 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1209 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1210 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1211 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1212 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1213 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1214 .features[FEAT_8000_0001_EDX] =
1215 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1216 CPUID_EXT2_SYSCALL,
1217 .features[FEAT_8000_0001_ECX] =
1218 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1219 .features[FEAT_7_0_EBX] =
1220 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1221 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1222 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1223 CPUID_7_0_EBX_RTM,
1224 .features[FEAT_XSAVE] =
1225 CPUID_XSAVE_XSAVEOPT,
1226 .features[FEAT_6_EAX] =
1227 CPUID_6_EAX_ARAT,
1228 .xlevel = 0x80000008,
1229 .model_id = "Intel Core Processor (Haswell)",
1232 .name = "Broadwell-noTSX",
1233 .level = 0xd,
1234 .vendor = CPUID_VENDOR_INTEL,
1235 .family = 6,
1236 .model = 61,
1237 .stepping = 2,
1238 .features[FEAT_1_EDX] =
1239 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1240 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1241 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1242 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1243 CPUID_DE | CPUID_FP87,
1244 .features[FEAT_1_ECX] =
1245 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1246 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1247 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1248 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1249 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1250 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1251 .features[FEAT_8000_0001_EDX] =
1252 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1253 CPUID_EXT2_SYSCALL,
1254 .features[FEAT_8000_0001_ECX] =
1255 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1256 .features[FEAT_7_0_EBX] =
1257 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1258 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1259 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1260 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1261 CPUID_7_0_EBX_SMAP,
1262 .features[FEAT_XSAVE] =
1263 CPUID_XSAVE_XSAVEOPT,
1264 .features[FEAT_6_EAX] =
1265 CPUID_6_EAX_ARAT,
1266 .xlevel = 0x80000008,
1267 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1270 .name = "Broadwell",
1271 .level = 0xd,
1272 .vendor = CPUID_VENDOR_INTEL,
1273 .family = 6,
1274 .model = 61,
1275 .stepping = 2,
1276 .features[FEAT_1_EDX] =
1277 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1278 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1279 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1280 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1281 CPUID_DE | CPUID_FP87,
1282 .features[FEAT_1_ECX] =
1283 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1284 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1285 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1286 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1287 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1288 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1289 .features[FEAT_8000_0001_EDX] =
1290 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1291 CPUID_EXT2_SYSCALL,
1292 .features[FEAT_8000_0001_ECX] =
1293 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1294 .features[FEAT_7_0_EBX] =
1295 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1296 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1297 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1298 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1299 CPUID_7_0_EBX_SMAP,
1300 .features[FEAT_XSAVE] =
1301 CPUID_XSAVE_XSAVEOPT,
1302 .features[FEAT_6_EAX] =
1303 CPUID_6_EAX_ARAT,
1304 .xlevel = 0x80000008,
1305 .model_id = "Intel Core Processor (Broadwell)",
1308 .name = "Skylake-Client",
1309 .level = 0xd,
1310 .vendor = CPUID_VENDOR_INTEL,
1311 .family = 6,
1312 .model = 94,
1313 .stepping = 3,
1314 .features[FEAT_1_EDX] =
1315 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1316 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1317 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1318 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1319 CPUID_DE | CPUID_FP87,
1320 .features[FEAT_1_ECX] =
1321 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1322 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1323 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1324 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1325 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1326 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1327 .features[FEAT_8000_0001_EDX] =
1328 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1329 CPUID_EXT2_SYSCALL,
1330 .features[FEAT_8000_0001_ECX] =
1331 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1332 .features[FEAT_7_0_EBX] =
1333 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1334 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1335 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1336 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1337 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1338 /* Missing: XSAVES (not supported by some Linux versions,
1339 * including v4.1 to v4.6).
1340 * KVM doesn't yet expose any XSAVES state save component,
1341 * and the only one defined in Skylake (processor tracing)
1342 * probably will block migration anyway.
1344 .features[FEAT_XSAVE] =
1345 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1346 CPUID_XSAVE_XGETBV1,
1347 .features[FEAT_6_EAX] =
1348 CPUID_6_EAX_ARAT,
1349 .xlevel = 0x80000008,
1350 .model_id = "Intel Core Processor (Skylake)",
1353 .name = "Opteron_G1",
1354 .level = 5,
1355 .vendor = CPUID_VENDOR_AMD,
1356 .family = 15,
1357 .model = 6,
1358 .stepping = 1,
1359 .features[FEAT_1_EDX] =
1360 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1361 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1362 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1363 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1364 CPUID_DE | CPUID_FP87,
1365 .features[FEAT_1_ECX] =
1366 CPUID_EXT_SSE3,
1367 .features[FEAT_8000_0001_EDX] =
1368 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1369 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1370 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1371 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1372 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1373 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1374 .xlevel = 0x80000008,
1375 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1378 .name = "Opteron_G2",
1379 .level = 5,
1380 .vendor = CPUID_VENDOR_AMD,
1381 .family = 15,
1382 .model = 6,
1383 .stepping = 1,
1384 .features[FEAT_1_EDX] =
1385 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1386 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1387 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1388 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1389 CPUID_DE | CPUID_FP87,
1390 .features[FEAT_1_ECX] =
1391 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1392 /* Missing: CPUID_EXT2_RDTSCP */
1393 .features[FEAT_8000_0001_EDX] =
1394 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1395 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1396 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1397 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1398 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1399 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1400 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1401 .features[FEAT_8000_0001_ECX] =
1402 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1403 .xlevel = 0x80000008,
1404 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1407 .name = "Opteron_G3",
1408 .level = 5,
1409 .vendor = CPUID_VENDOR_AMD,
1410 .family = 15,
1411 .model = 6,
1412 .stepping = 1,
1413 .features[FEAT_1_EDX] =
1414 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1415 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1416 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1417 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1418 CPUID_DE | CPUID_FP87,
1419 .features[FEAT_1_ECX] =
1420 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1421 CPUID_EXT_SSE3,
1422 /* Missing: CPUID_EXT2_RDTSCP */
1423 .features[FEAT_8000_0001_EDX] =
1424 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1425 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1426 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1427 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1428 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1429 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1430 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1431 .features[FEAT_8000_0001_ECX] =
1432 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1433 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1434 .xlevel = 0x80000008,
1435 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1438 .name = "Opteron_G4",
1439 .level = 0xd,
1440 .vendor = CPUID_VENDOR_AMD,
1441 .family = 21,
1442 .model = 1,
1443 .stepping = 2,
1444 .features[FEAT_1_EDX] =
1445 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1446 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1447 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1448 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1449 CPUID_DE | CPUID_FP87,
1450 .features[FEAT_1_ECX] =
1451 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1452 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1453 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1454 CPUID_EXT_SSE3,
1455 /* Missing: CPUID_EXT2_RDTSCP */
1456 .features[FEAT_8000_0001_EDX] =
1457 CPUID_EXT2_LM |
1458 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1459 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1460 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1461 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1462 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1463 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1464 .features[FEAT_8000_0001_ECX] =
1465 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1466 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1467 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1468 CPUID_EXT3_LAHF_LM,
1469 /* no xsaveopt! */
1470 .xlevel = 0x8000001A,
1471 .model_id = "AMD Opteron 62xx class CPU",
1474 .name = "Opteron_G5",
1475 .level = 0xd,
1476 .vendor = CPUID_VENDOR_AMD,
1477 .family = 21,
1478 .model = 2,
1479 .stepping = 0,
1480 .features[FEAT_1_EDX] =
1481 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1482 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1483 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1484 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1485 CPUID_DE | CPUID_FP87,
1486 .features[FEAT_1_ECX] =
1487 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1488 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1489 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1490 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1491 /* Missing: CPUID_EXT2_RDTSCP */
1492 .features[FEAT_8000_0001_EDX] =
1493 CPUID_EXT2_LM |
1494 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1495 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1496 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1497 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1498 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1499 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1500 .features[FEAT_8000_0001_ECX] =
1501 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1502 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1503 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1504 CPUID_EXT3_LAHF_LM,
1505 /* no xsaveopt! */
1506 .xlevel = 0x8000001A,
1507 .model_id = "AMD Opteron 63xx class CPU",
1511 typedef struct PropValue {
1512 const char *prop, *value;
1513 } PropValue;
1515 /* KVM-specific features that are automatically added/removed
1516 * from all CPU models when KVM is enabled.
1518 static PropValue kvm_default_props[] = {
1519 { "kvmclock", "on" },
1520 { "kvm-nopiodelay", "on" },
1521 { "kvm-asyncpf", "on" },
1522 { "kvm-steal-time", "on" },
1523 { "kvm-pv-eoi", "on" },
1524 { "kvmclock-stable-bit", "on" },
1525 { "x2apic", "on" },
1526 { "acpi", "off" },
1527 { "monitor", "off" },
1528 { "svm", "off" },
1529 { NULL, NULL },
1532 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1534 PropValue *pv;
1535 for (pv = kvm_default_props; pv->prop; pv++) {
1536 if (!strcmp(pv->prop, prop)) {
1537 pv->value = value;
1538 break;
1542 /* It is valid to call this function only for properties that
1543 * are already present in the kvm_default_props table.
1545 assert(pv->prop);
1548 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1549 bool migratable_only);
1551 #ifdef CONFIG_KVM
1553 static bool lmce_supported(void)
1555 uint64_t mce_cap;
1557 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1558 return false;
1561 return !!(mce_cap & MCG_LMCE_P);
1564 static int cpu_x86_fill_model_id(char *str)
1566 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1567 int i;
1569 for (i = 0; i < 3; i++) {
1570 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1571 memcpy(str + i * 16 + 0, &eax, 4);
1572 memcpy(str + i * 16 + 4, &ebx, 4);
1573 memcpy(str + i * 16 + 8, &ecx, 4);
1574 memcpy(str + i * 16 + 12, &edx, 4);
1576 return 0;
1579 static X86CPUDefinition host_cpudef;
1581 static Property host_x86_cpu_properties[] = {
1582 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1583 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1584 DEFINE_PROP_END_OF_LIST()
1587 /* class_init for the "host" CPU model
1589 * This function may be called before KVM is initialized.
1591 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1593 DeviceClass *dc = DEVICE_CLASS(oc);
1594 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1595 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1597 xcc->kvm_required = true;
1599 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1600 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1602 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1603 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1604 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1605 host_cpudef.stepping = eax & 0x0F;
1607 cpu_x86_fill_model_id(host_cpudef.model_id);
1609 xcc->cpu_def = &host_cpudef;
1611 /* level, xlevel, xlevel2, and the feature words are initialized on
1612 * instance_init, because they require KVM to be initialized.
1615 dc->props = host_x86_cpu_properties;
1616 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1617 dc->cannot_destroy_with_object_finalize_yet = true;
1620 static void host_x86_cpu_initfn(Object *obj)
1622 X86CPU *cpu = X86_CPU(obj);
1623 CPUX86State *env = &cpu->env;
1624 KVMState *s = kvm_state;
1626 /* We can't fill the features array here because we don't know yet if
1627 * "migratable" is true or false.
1629 cpu->host_features = true;
1631 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1632 if (kvm_enabled()) {
1633 env->cpuid_min_level =
1634 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1635 env->cpuid_min_xlevel =
1636 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1637 env->cpuid_min_xlevel2 =
1638 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1640 if (lmce_supported()) {
1641 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1645 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1648 static const TypeInfo host_x86_cpu_type_info = {
1649 .name = X86_CPU_TYPE_NAME("host"),
1650 .parent = TYPE_X86_CPU,
1651 .instance_init = host_x86_cpu_initfn,
1652 .class_init = host_x86_cpu_class_init,
1655 #endif
1657 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1659 FeatureWordInfo *f = &feature_word_info[w];
1660 int i;
1662 for (i = 0; i < 32; ++i) {
1663 if ((1UL << i) & mask) {
1664 const char *reg = get_register_name_32(f->cpuid_reg);
1665 assert(reg);
1666 fprintf(stderr, "warning: %s doesn't support requested feature: "
1667 "CPUID.%02XH:%s%s%s [bit %d]\n",
1668 kvm_enabled() ? "host" : "TCG",
1669 f->cpuid_eax, reg,
1670 f->feat_names[i] ? "." : "",
1671 f->feat_names[i] ? f->feat_names[i] : "", i);
1676 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1677 const char *name, void *opaque,
1678 Error **errp)
1680 X86CPU *cpu = X86_CPU(obj);
1681 CPUX86State *env = &cpu->env;
1682 int64_t value;
1684 value = (env->cpuid_version >> 8) & 0xf;
1685 if (value == 0xf) {
1686 value += (env->cpuid_version >> 20) & 0xff;
1688 visit_type_int(v, name, &value, errp);
1691 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1692 const char *name, void *opaque,
1693 Error **errp)
1695 X86CPU *cpu = X86_CPU(obj);
1696 CPUX86State *env = &cpu->env;
1697 const int64_t min = 0;
1698 const int64_t max = 0xff + 0xf;
1699 Error *local_err = NULL;
1700 int64_t value;
1702 visit_type_int(v, name, &value, &local_err);
1703 if (local_err) {
1704 error_propagate(errp, local_err);
1705 return;
1707 if (value < min || value > max) {
1708 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1709 name ? name : "null", value, min, max);
1710 return;
1713 env->cpuid_version &= ~0xff00f00;
1714 if (value > 0x0f) {
1715 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1716 } else {
1717 env->cpuid_version |= value << 8;
1721 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1722 const char *name, void *opaque,
1723 Error **errp)
1725 X86CPU *cpu = X86_CPU(obj);
1726 CPUX86State *env = &cpu->env;
1727 int64_t value;
1729 value = (env->cpuid_version >> 4) & 0xf;
1730 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1731 visit_type_int(v, name, &value, errp);
1734 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1735 const char *name, void *opaque,
1736 Error **errp)
1738 X86CPU *cpu = X86_CPU(obj);
1739 CPUX86State *env = &cpu->env;
1740 const int64_t min = 0;
1741 const int64_t max = 0xff;
1742 Error *local_err = NULL;
1743 int64_t value;
1745 visit_type_int(v, name, &value, &local_err);
1746 if (local_err) {
1747 error_propagate(errp, local_err);
1748 return;
1750 if (value < min || value > max) {
1751 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1752 name ? name : "null", value, min, max);
1753 return;
1756 env->cpuid_version &= ~0xf00f0;
1757 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1760 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1761 const char *name, void *opaque,
1762 Error **errp)
1764 X86CPU *cpu = X86_CPU(obj);
1765 CPUX86State *env = &cpu->env;
1766 int64_t value;
1768 value = env->cpuid_version & 0xf;
1769 visit_type_int(v, name, &value, errp);
1772 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1773 const char *name, void *opaque,
1774 Error **errp)
1776 X86CPU *cpu = X86_CPU(obj);
1777 CPUX86State *env = &cpu->env;
1778 const int64_t min = 0;
1779 const int64_t max = 0xf;
1780 Error *local_err = NULL;
1781 int64_t value;
1783 visit_type_int(v, name, &value, &local_err);
1784 if (local_err) {
1785 error_propagate(errp, local_err);
1786 return;
1788 if (value < min || value > max) {
1789 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1790 name ? name : "null", value, min, max);
1791 return;
1794 env->cpuid_version &= ~0xf;
1795 env->cpuid_version |= value & 0xf;
1798 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1800 X86CPU *cpu = X86_CPU(obj);
1801 CPUX86State *env = &cpu->env;
1802 char *value;
1804 value = g_malloc(CPUID_VENDOR_SZ + 1);
1805 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1806 env->cpuid_vendor3);
1807 return value;
1810 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1811 Error **errp)
1813 X86CPU *cpu = X86_CPU(obj);
1814 CPUX86State *env = &cpu->env;
1815 int i;
1817 if (strlen(value) != CPUID_VENDOR_SZ) {
1818 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1819 return;
1822 env->cpuid_vendor1 = 0;
1823 env->cpuid_vendor2 = 0;
1824 env->cpuid_vendor3 = 0;
1825 for (i = 0; i < 4; i++) {
1826 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1827 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1828 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1832 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1834 X86CPU *cpu = X86_CPU(obj);
1835 CPUX86State *env = &cpu->env;
1836 char *value;
1837 int i;
1839 value = g_malloc(48 + 1);
1840 for (i = 0; i < 48; i++) {
1841 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1843 value[48] = '\0';
1844 return value;
1847 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1848 Error **errp)
1850 X86CPU *cpu = X86_CPU(obj);
1851 CPUX86State *env = &cpu->env;
1852 int c, len, i;
1854 if (model_id == NULL) {
1855 model_id = "";
1857 len = strlen(model_id);
1858 memset(env->cpuid_model, 0, 48);
1859 for (i = 0; i < 48; i++) {
1860 if (i >= len) {
1861 c = '\0';
1862 } else {
1863 c = (uint8_t)model_id[i];
1865 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1869 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1870 void *opaque, Error **errp)
1872 X86CPU *cpu = X86_CPU(obj);
1873 int64_t value;
1875 value = cpu->env.tsc_khz * 1000;
1876 visit_type_int(v, name, &value, errp);
1879 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1880 void *opaque, Error **errp)
1882 X86CPU *cpu = X86_CPU(obj);
1883 const int64_t min = 0;
1884 const int64_t max = INT64_MAX;
1885 Error *local_err = NULL;
1886 int64_t value;
1888 visit_type_int(v, name, &value, &local_err);
1889 if (local_err) {
1890 error_propagate(errp, local_err);
1891 return;
1893 if (value < min || value > max) {
1894 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1895 name ? name : "null", value, min, max);
1896 return;
1899 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1902 /* Generic getter for "feature-words" and "filtered-features" properties */
1903 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1904 const char *name, void *opaque,
1905 Error **errp)
1907 uint32_t *array = (uint32_t *)opaque;
1908 FeatureWord w;
1909 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1910 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1911 X86CPUFeatureWordInfoList *list = NULL;
1913 for (w = 0; w < FEATURE_WORDS; w++) {
1914 FeatureWordInfo *wi = &feature_word_info[w];
1915 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1916 qwi->cpuid_input_eax = wi->cpuid_eax;
1917 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1918 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1919 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1920 qwi->features = array[w];
1922 /* List will be in reverse order, but order shouldn't matter */
1923 list_entries[w].next = list;
1924 list_entries[w].value = &word_infos[w];
1925 list = &list_entries[w];
1928 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1931 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1932 void *opaque, Error **errp)
1934 X86CPU *cpu = X86_CPU(obj);
1935 int64_t value = cpu->hyperv_spinlock_attempts;
1937 visit_type_int(v, name, &value, errp);
1940 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1941 void *opaque, Error **errp)
1943 const int64_t min = 0xFFF;
1944 const int64_t max = UINT_MAX;
1945 X86CPU *cpu = X86_CPU(obj);
1946 Error *err = NULL;
1947 int64_t value;
1949 visit_type_int(v, name, &value, &err);
1950 if (err) {
1951 error_propagate(errp, err);
1952 return;
1955 if (value < min || value > max) {
1956 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1957 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1958 object_get_typename(obj), name ? name : "null",
1959 value, min, max);
1960 return;
1962 cpu->hyperv_spinlock_attempts = value;
1965 static PropertyInfo qdev_prop_spinlocks = {
1966 .name = "int",
1967 .get = x86_get_hv_spinlocks,
1968 .set = x86_set_hv_spinlocks,
1971 /* Convert all '_' in a feature string option name to '-', to make feature
1972 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1974 static inline void feat2prop(char *s)
1976 while ((s = strchr(s, '_'))) {
1977 *s = '-';
1981 /* Compatibily hack to maintain legacy +-feat semantic,
1982 * where +-feat overwrites any feature set by
1983 * feat=on|feat even if the later is parsed after +-feat
1984 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1986 static FeatureWordArray plus_features = { 0 };
1987 static FeatureWordArray minus_features = { 0 };
1989 /* Parse "+feature,-feature,feature=foo" CPU feature string
1991 static void x86_cpu_parse_featurestr(const char *typename, char *features,
1992 Error **errp)
1994 char *featurestr; /* Single 'key=value" string being parsed */
1995 Error *local_err = NULL;
1996 static bool cpu_globals_initialized;
1998 if (cpu_globals_initialized) {
1999 return;
2001 cpu_globals_initialized = true;
2003 if (!features) {
2004 return;
2007 for (featurestr = strtok(features, ",");
2008 featurestr && !local_err;
2009 featurestr = strtok(NULL, ",")) {
2010 const char *name;
2011 const char *val = NULL;
2012 char *eq = NULL;
2013 char num[32];
2014 GlobalProperty *prop;
2016 /* Compatibility syntax: */
2017 if (featurestr[0] == '+') {
2018 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2019 continue;
2020 } else if (featurestr[0] == '-') {
2021 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2022 continue;
2025 eq = strchr(featurestr, '=');
2026 if (eq) {
2027 *eq++ = 0;
2028 val = eq;
2029 } else {
2030 val = "on";
2033 feat2prop(featurestr);
2034 name = featurestr;
2036 /* Special case: */
2037 if (!strcmp(name, "tsc-freq")) {
2038 int64_t tsc_freq;
2039 char *err;
2041 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2042 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2043 if (tsc_freq < 0 || *err) {
2044 error_setg(errp, "bad numerical value %s", val);
2045 return;
2047 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2048 val = num;
2049 name = "tsc-frequency";
2052 prop = g_new0(typeof(*prop), 1);
2053 prop->driver = typename;
2054 prop->property = g_strdup(name);
2055 prop->value = g_strdup(val);
2056 prop->errp = &error_fatal;
2057 qdev_prop_register_global(prop);
2060 if (local_err) {
2061 error_propagate(errp, local_err);
2065 /* Print all cpuid feature names in featureset
2067 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2069 int bit;
2070 bool first = true;
2072 for (bit = 0; bit < 32; bit++) {
2073 if (featureset[bit]) {
2074 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2075 first = false;
2080 /* generate CPU information. */
2081 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2083 X86CPUDefinition *def;
2084 char buf[256];
2085 int i;
2087 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2088 def = &builtin_x86_defs[i];
2089 snprintf(buf, sizeof(buf), "%s", def->name);
2090 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2092 #ifdef CONFIG_KVM
2093 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2094 "KVM processor with all supported host features "
2095 "(only available in KVM mode)");
2096 #endif
2098 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2099 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2100 FeatureWordInfo *fw = &feature_word_info[i];
2102 (*cpu_fprintf)(f, " ");
2103 listflags(f, cpu_fprintf, fw->feat_names);
2104 (*cpu_fprintf)(f, "\n");
2108 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2110 CpuDefinitionInfoList *cpu_list = NULL;
2111 X86CPUDefinition *def;
2112 int i;
2114 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2115 CpuDefinitionInfoList *entry;
2116 CpuDefinitionInfo *info;
2118 def = &builtin_x86_defs[i];
2119 info = g_malloc0(sizeof(*info));
2120 info->name = g_strdup(def->name);
2122 entry = g_malloc0(sizeof(*entry));
2123 entry->value = info;
2124 entry->next = cpu_list;
2125 cpu_list = entry;
2128 return cpu_list;
2131 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2132 bool migratable_only)
2134 FeatureWordInfo *wi = &feature_word_info[w];
2135 uint32_t r;
2137 if (kvm_enabled()) {
2138 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2139 wi->cpuid_ecx,
2140 wi->cpuid_reg);
2141 } else if (tcg_enabled()) {
2142 r = wi->tcg_features;
2143 } else {
2144 return ~0;
2146 if (migratable_only) {
2147 r &= x86_cpu_get_migratable_flags(w);
2149 return r;
2153 * Filters CPU feature words based on host availability of each feature.
2155 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2157 static int x86_cpu_filter_features(X86CPU *cpu)
2159 CPUX86State *env = &cpu->env;
2160 FeatureWord w;
2161 int rv = 0;
2163 for (w = 0; w < FEATURE_WORDS; w++) {
2164 uint32_t host_feat =
2165 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2166 uint32_t requested_features = env->features[w];
2167 env->features[w] &= host_feat;
2168 cpu->filtered_features[w] = requested_features & ~env->features[w];
2169 if (cpu->filtered_features[w]) {
2170 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2171 report_unavailable_features(w, cpu->filtered_features[w]);
2173 rv = 1;
2177 return rv;
2180 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2182 PropValue *pv;
2183 for (pv = props; pv->prop; pv++) {
2184 if (!pv->value) {
2185 continue;
2187 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2188 &error_abort);
2192 /* Load data from X86CPUDefinition
2194 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2196 CPUX86State *env = &cpu->env;
2197 const char *vendor;
2198 char host_vendor[CPUID_VENDOR_SZ + 1];
2199 FeatureWord w;
2201 /* CPU models only set _minimum_ values for level/xlevel: */
2202 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2203 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2205 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2206 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2207 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2208 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2209 for (w = 0; w < FEATURE_WORDS; w++) {
2210 env->features[w] = def->features[w];
2213 /* Special cases not set in the X86CPUDefinition structs: */
2214 if (kvm_enabled()) {
2215 if (!kvm_irqchip_in_kernel()) {
2216 x86_cpu_change_kvm_default("x2apic", "off");
2219 x86_cpu_apply_props(cpu, kvm_default_props);
2222 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2224 /* sysenter isn't supported in compatibility mode on AMD,
2225 * syscall isn't supported in compatibility mode on Intel.
2226 * Normally we advertise the actual CPU vendor, but you can
2227 * override this using the 'vendor' property if you want to use
2228 * KVM's sysenter/syscall emulation in compatibility mode and
2229 * when doing cross vendor migration
2231 vendor = def->vendor;
2232 if (kvm_enabled()) {
2233 uint32_t ebx = 0, ecx = 0, edx = 0;
2234 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2235 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2236 vendor = host_vendor;
2239 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2243 X86CPU *cpu_x86_init(const char *cpu_model)
2245 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2248 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2250 X86CPUDefinition *cpudef = data;
2251 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2253 xcc->cpu_def = cpudef;
2256 static void x86_register_cpudef_type(X86CPUDefinition *def)
2258 char *typename = x86_cpu_type_name(def->name);
2259 TypeInfo ti = {
2260 .name = typename,
2261 .parent = TYPE_X86_CPU,
2262 .class_init = x86_cpu_cpudef_class_init,
2263 .class_data = def,
2266 type_register(&ti);
2267 g_free(typename);
2270 #if !defined(CONFIG_USER_ONLY)
2272 void cpu_clear_apic_feature(CPUX86State *env)
2274 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2277 #endif /* !CONFIG_USER_ONLY */
2279 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2280 uint32_t *eax, uint32_t *ebx,
2281 uint32_t *ecx, uint32_t *edx)
2283 X86CPU *cpu = x86_env_get_cpu(env);
2284 CPUState *cs = CPU(cpu);
2285 uint32_t pkg_offset;
2287 /* test if maximum index reached */
2288 if (index & 0x80000000) {
2289 if (index > env->cpuid_xlevel) {
2290 if (env->cpuid_xlevel2 > 0) {
2291 /* Handle the Centaur's CPUID instruction. */
2292 if (index > env->cpuid_xlevel2) {
2293 index = env->cpuid_xlevel2;
2294 } else if (index < 0xC0000000) {
2295 index = env->cpuid_xlevel;
2297 } else {
2298 /* Intel documentation states that invalid EAX input will
2299 * return the same information as EAX=cpuid_level
2300 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2302 index = env->cpuid_level;
2305 } else {
2306 if (index > env->cpuid_level)
2307 index = env->cpuid_level;
2310 switch(index) {
2311 case 0:
2312 *eax = env->cpuid_level;
2313 *ebx = env->cpuid_vendor1;
2314 *edx = env->cpuid_vendor2;
2315 *ecx = env->cpuid_vendor3;
2316 break;
2317 case 1:
2318 *eax = env->cpuid_version;
2319 *ebx = (cpu->apic_id << 24) |
2320 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2321 *ecx = env->features[FEAT_1_ECX];
2322 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2323 *ecx |= CPUID_EXT_OSXSAVE;
2325 *edx = env->features[FEAT_1_EDX];
2326 if (cs->nr_cores * cs->nr_threads > 1) {
2327 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2328 *edx |= CPUID_HT;
2330 break;
2331 case 2:
2332 /* cache info: needed for Pentium Pro compatibility */
2333 if (cpu->cache_info_passthrough) {
2334 host_cpuid(index, 0, eax, ebx, ecx, edx);
2335 break;
2337 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2338 *ebx = 0;
2339 if (!cpu->enable_l3_cache) {
2340 *ecx = 0;
2341 } else {
2342 *ecx = L3_N_DESCRIPTOR;
2344 *edx = (L1D_DESCRIPTOR << 16) | \
2345 (L1I_DESCRIPTOR << 8) | \
2346 (L2_DESCRIPTOR);
2347 break;
2348 case 4:
2349 /* cache info: needed for Core compatibility */
2350 if (cpu->cache_info_passthrough) {
2351 host_cpuid(index, count, eax, ebx, ecx, edx);
2352 *eax &= ~0xFC000000;
2353 } else {
2354 *eax = 0;
2355 switch (count) {
2356 case 0: /* L1 dcache info */
2357 *eax |= CPUID_4_TYPE_DCACHE | \
2358 CPUID_4_LEVEL(1) | \
2359 CPUID_4_SELF_INIT_LEVEL;
2360 *ebx = (L1D_LINE_SIZE - 1) | \
2361 ((L1D_PARTITIONS - 1) << 12) | \
2362 ((L1D_ASSOCIATIVITY - 1) << 22);
2363 *ecx = L1D_SETS - 1;
2364 *edx = CPUID_4_NO_INVD_SHARING;
2365 break;
2366 case 1: /* L1 icache info */
2367 *eax |= CPUID_4_TYPE_ICACHE | \
2368 CPUID_4_LEVEL(1) | \
2369 CPUID_4_SELF_INIT_LEVEL;
2370 *ebx = (L1I_LINE_SIZE - 1) | \
2371 ((L1I_PARTITIONS - 1) << 12) | \
2372 ((L1I_ASSOCIATIVITY - 1) << 22);
2373 *ecx = L1I_SETS - 1;
2374 *edx = CPUID_4_NO_INVD_SHARING;
2375 break;
2376 case 2: /* L2 cache info */
2377 *eax |= CPUID_4_TYPE_UNIFIED | \
2378 CPUID_4_LEVEL(2) | \
2379 CPUID_4_SELF_INIT_LEVEL;
2380 if (cs->nr_threads > 1) {
2381 *eax |= (cs->nr_threads - 1) << 14;
2383 *ebx = (L2_LINE_SIZE - 1) | \
2384 ((L2_PARTITIONS - 1) << 12) | \
2385 ((L2_ASSOCIATIVITY - 1) << 22);
2386 *ecx = L2_SETS - 1;
2387 *edx = CPUID_4_NO_INVD_SHARING;
2388 break;
2389 case 3: /* L3 cache info */
2390 if (!cpu->enable_l3_cache) {
2391 *eax = 0;
2392 *ebx = 0;
2393 *ecx = 0;
2394 *edx = 0;
2395 break;
2397 *eax |= CPUID_4_TYPE_UNIFIED | \
2398 CPUID_4_LEVEL(3) | \
2399 CPUID_4_SELF_INIT_LEVEL;
2400 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2401 *eax |= ((1 << pkg_offset) - 1) << 14;
2402 *ebx = (L3_N_LINE_SIZE - 1) | \
2403 ((L3_N_PARTITIONS - 1) << 12) | \
2404 ((L3_N_ASSOCIATIVITY - 1) << 22);
2405 *ecx = L3_N_SETS - 1;
2406 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2407 break;
2408 default: /* end of info */
2409 *eax = 0;
2410 *ebx = 0;
2411 *ecx = 0;
2412 *edx = 0;
2413 break;
2417 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2418 if ((*eax & 31) && cs->nr_cores > 1) {
2419 *eax |= (cs->nr_cores - 1) << 26;
2421 break;
2422 case 5:
2423 /* mwait info: needed for Core compatibility */
2424 *eax = 0; /* Smallest monitor-line size in bytes */
2425 *ebx = 0; /* Largest monitor-line size in bytes */
2426 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2427 *edx = 0;
2428 break;
2429 case 6:
2430 /* Thermal and Power Leaf */
2431 *eax = env->features[FEAT_6_EAX];
2432 *ebx = 0;
2433 *ecx = 0;
2434 *edx = 0;
2435 break;
2436 case 7:
2437 /* Structured Extended Feature Flags Enumeration Leaf */
2438 if (count == 0) {
2439 *eax = 0; /* Maximum ECX value for sub-leaves */
2440 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2441 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2442 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2443 *ecx |= CPUID_7_0_ECX_OSPKE;
2445 *edx = 0; /* Reserved */
2446 } else {
2447 *eax = 0;
2448 *ebx = 0;
2449 *ecx = 0;
2450 *edx = 0;
2452 break;
2453 case 9:
2454 /* Direct Cache Access Information Leaf */
2455 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2456 *ebx = 0;
2457 *ecx = 0;
2458 *edx = 0;
2459 break;
2460 case 0xA:
2461 /* Architectural Performance Monitoring Leaf */
2462 if (kvm_enabled() && cpu->enable_pmu) {
2463 KVMState *s = cs->kvm_state;
2465 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2466 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2467 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2468 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2469 } else {
2470 *eax = 0;
2471 *ebx = 0;
2472 *ecx = 0;
2473 *edx = 0;
2475 break;
2476 case 0xB:
2477 /* Extended Topology Enumeration Leaf */
2478 if (!cpu->enable_cpuid_0xb) {
2479 *eax = *ebx = *ecx = *edx = 0;
2480 break;
2483 *ecx = count & 0xff;
2484 *edx = cpu->apic_id;
2486 switch (count) {
2487 case 0:
2488 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2489 *ebx = cs->nr_threads;
2490 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2491 break;
2492 case 1:
2493 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2494 *ebx = cs->nr_cores * cs->nr_threads;
2495 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2496 break;
2497 default:
2498 *eax = 0;
2499 *ebx = 0;
2500 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2503 assert(!(*eax & ~0x1f));
2504 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2505 break;
2506 case 0xD: {
2507 /* Processor Extended State */
2508 *eax = 0;
2509 *ebx = 0;
2510 *ecx = 0;
2511 *edx = 0;
2512 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2513 break;
2516 if (count == 0) {
2517 *ecx = xsave_area_size(env->xsave_components);
2518 *eax = env->xsave_components;
2519 *edx = env->xsave_components >> 32;
2520 *ebx = *ecx;
2521 } else if (count == 1) {
2522 *eax = env->features[FEAT_XSAVE];
2523 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2524 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2525 if ((env->xsave_components >> count) & 1) {
2526 *eax = esa->size;
2527 *ebx = esa->offset;
2530 break;
2532 case 0x80000000:
2533 *eax = env->cpuid_xlevel;
2534 *ebx = env->cpuid_vendor1;
2535 *edx = env->cpuid_vendor2;
2536 *ecx = env->cpuid_vendor3;
2537 break;
2538 case 0x80000001:
2539 *eax = env->cpuid_version;
2540 *ebx = 0;
2541 *ecx = env->features[FEAT_8000_0001_ECX];
2542 *edx = env->features[FEAT_8000_0001_EDX];
2544 /* The Linux kernel checks for the CMPLegacy bit and
2545 * discards multiple thread information if it is set.
2546 * So don't set it here for Intel to make Linux guests happy.
2548 if (cs->nr_cores * cs->nr_threads > 1) {
2549 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2550 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2551 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2552 *ecx |= 1 << 1; /* CmpLegacy bit */
2555 break;
2556 case 0x80000002:
2557 case 0x80000003:
2558 case 0x80000004:
2559 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2560 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2561 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2562 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2563 break;
2564 case 0x80000005:
2565 /* cache info (L1 cache) */
2566 if (cpu->cache_info_passthrough) {
2567 host_cpuid(index, 0, eax, ebx, ecx, edx);
2568 break;
2570 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2571 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2572 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2573 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2574 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2575 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2576 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2577 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2578 break;
2579 case 0x80000006:
2580 /* cache info (L2 cache) */
2581 if (cpu->cache_info_passthrough) {
2582 host_cpuid(index, 0, eax, ebx, ecx, edx);
2583 break;
2585 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2586 (L2_DTLB_2M_ENTRIES << 16) | \
2587 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2588 (L2_ITLB_2M_ENTRIES);
2589 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2590 (L2_DTLB_4K_ENTRIES << 16) | \
2591 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2592 (L2_ITLB_4K_ENTRIES);
2593 *ecx = (L2_SIZE_KB_AMD << 16) | \
2594 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2595 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2596 if (!cpu->enable_l3_cache) {
2597 *edx = ((L3_SIZE_KB / 512) << 18) | \
2598 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2599 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2600 } else {
2601 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2602 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2603 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2605 break;
2606 case 0x80000007:
2607 *eax = 0;
2608 *ebx = 0;
2609 *ecx = 0;
2610 *edx = env->features[FEAT_8000_0007_EDX];
2611 break;
2612 case 0x80000008:
2613 /* virtual & phys address size in low 2 bytes. */
2614 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2615 /* 64 bit processor, 48 bits virtual, configurable
2616 * physical bits.
2618 *eax = 0x00003000 + cpu->phys_bits;
2619 } else {
2620 *eax = cpu->phys_bits;
2622 *ebx = 0;
2623 *ecx = 0;
2624 *edx = 0;
2625 if (cs->nr_cores * cs->nr_threads > 1) {
2626 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2628 break;
2629 case 0x8000000A:
2630 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2631 *eax = 0x00000001; /* SVM Revision */
2632 *ebx = 0x00000010; /* nr of ASIDs */
2633 *ecx = 0;
2634 *edx = env->features[FEAT_SVM]; /* optional features */
2635 } else {
2636 *eax = 0;
2637 *ebx = 0;
2638 *ecx = 0;
2639 *edx = 0;
2641 break;
2642 case 0xC0000000:
2643 *eax = env->cpuid_xlevel2;
2644 *ebx = 0;
2645 *ecx = 0;
2646 *edx = 0;
2647 break;
2648 case 0xC0000001:
2649 /* Support for VIA CPU's CPUID instruction */
2650 *eax = env->cpuid_version;
2651 *ebx = 0;
2652 *ecx = 0;
2653 *edx = env->features[FEAT_C000_0001_EDX];
2654 break;
2655 case 0xC0000002:
2656 case 0xC0000003:
2657 case 0xC0000004:
2658 /* Reserved for the future, and now filled with zero */
2659 *eax = 0;
2660 *ebx = 0;
2661 *ecx = 0;
2662 *edx = 0;
2663 break;
2664 default:
2665 /* reserved values: zero */
2666 *eax = 0;
2667 *ebx = 0;
2668 *ecx = 0;
2669 *edx = 0;
2670 break;
2674 /* CPUClass::reset() */
2675 static void x86_cpu_reset(CPUState *s)
2677 X86CPU *cpu = X86_CPU(s);
2678 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2679 CPUX86State *env = &cpu->env;
2680 target_ulong cr4;
2681 uint64_t xcr0;
2682 int i;
2684 xcc->parent_reset(s);
2686 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2688 tlb_flush(s, 1);
2690 env->old_exception = -1;
2692 /* init to reset state */
2694 env->hflags2 |= HF2_GIF_MASK;
2696 cpu_x86_update_cr0(env, 0x60000010);
2697 env->a20_mask = ~0x0;
2698 env->smbase = 0x30000;
2700 env->idt.limit = 0xffff;
2701 env->gdt.limit = 0xffff;
2702 env->ldt.limit = 0xffff;
2703 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2704 env->tr.limit = 0xffff;
2705 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2707 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2708 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2709 DESC_R_MASK | DESC_A_MASK);
2710 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2711 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2712 DESC_A_MASK);
2713 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2714 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2715 DESC_A_MASK);
2716 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2717 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2718 DESC_A_MASK);
2719 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2720 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2721 DESC_A_MASK);
2722 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2723 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2724 DESC_A_MASK);
2726 env->eip = 0xfff0;
2727 env->regs[R_EDX] = env->cpuid_version;
2729 env->eflags = 0x2;
2731 /* FPU init */
2732 for (i = 0; i < 8; i++) {
2733 env->fptags[i] = 1;
2735 cpu_set_fpuc(env, 0x37f);
2737 env->mxcsr = 0x1f80;
2738 /* All units are in INIT state. */
2739 env->xstate_bv = 0;
2741 env->pat = 0x0007040600070406ULL;
2742 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2744 memset(env->dr, 0, sizeof(env->dr));
2745 env->dr[6] = DR6_FIXED_1;
2746 env->dr[7] = DR7_FIXED_1;
2747 cpu_breakpoint_remove_all(s, BP_CPU);
2748 cpu_watchpoint_remove_all(s, BP_CPU);
2750 cr4 = 0;
2751 xcr0 = XSTATE_FP_MASK;
2753 #ifdef CONFIG_USER_ONLY
2754 /* Enable all the features for user-mode. */
2755 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2756 xcr0 |= XSTATE_SSE_MASK;
2758 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2759 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2760 if (env->features[esa->feature] & esa->bits) {
2761 xcr0 |= 1ull << i;
2765 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2766 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2768 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2769 cr4 |= CR4_FSGSBASE_MASK;
2771 #endif
2773 env->xcr0 = xcr0;
2774 cpu_x86_update_cr4(env, cr4);
2777 * SDM 11.11.5 requires:
2778 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2779 * - IA32_MTRR_PHYSMASKn.V = 0
2780 * All other bits are undefined. For simplification, zero it all.
2782 env->mtrr_deftype = 0;
2783 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2784 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2786 #if !defined(CONFIG_USER_ONLY)
2787 /* We hard-wire the BSP to the first CPU. */
2788 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2790 s->halted = !cpu_is_bsp(cpu);
2792 if (kvm_enabled()) {
2793 kvm_arch_reset_vcpu(cpu);
2795 #endif
2798 #ifndef CONFIG_USER_ONLY
2799 bool cpu_is_bsp(X86CPU *cpu)
2801 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2804 /* TODO: remove me, when reset over QOM tree is implemented */
2805 static void x86_cpu_machine_reset_cb(void *opaque)
2807 X86CPU *cpu = opaque;
2808 cpu_reset(CPU(cpu));
2810 #endif
2812 static void mce_init(X86CPU *cpu)
2814 CPUX86State *cenv = &cpu->env;
2815 unsigned int bank;
2817 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2818 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2819 (CPUID_MCE | CPUID_MCA)) {
2820 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2821 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2822 cenv->mcg_ctl = ~(uint64_t)0;
2823 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2824 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2829 #ifndef CONFIG_USER_ONLY
2830 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2832 APICCommonState *apic;
2833 const char *apic_type = "apic";
2835 if (kvm_apic_in_kernel()) {
2836 apic_type = "kvm-apic";
2837 } else if (xen_enabled()) {
2838 apic_type = "xen-apic";
2841 cpu->apic_state = DEVICE(object_new(apic_type));
2843 object_property_add_child(OBJECT(cpu), "lapic",
2844 OBJECT(cpu->apic_state), &error_abort);
2845 object_unref(OBJECT(cpu->apic_state));
2847 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2848 /* TODO: convert to link<> */
2849 apic = APIC_COMMON(cpu->apic_state);
2850 apic->cpu = cpu;
2851 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2854 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2856 APICCommonState *apic;
2857 static bool apic_mmio_map_once;
2859 if (cpu->apic_state == NULL) {
2860 return;
2862 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2863 errp);
2865 /* Map APIC MMIO area */
2866 apic = APIC_COMMON(cpu->apic_state);
2867 if (!apic_mmio_map_once) {
2868 memory_region_add_subregion_overlap(get_system_memory(),
2869 apic->apicbase &
2870 MSR_IA32_APICBASE_BASE,
2871 &apic->io_memory,
2872 0x1000);
2873 apic_mmio_map_once = true;
2877 static void x86_cpu_machine_done(Notifier *n, void *unused)
2879 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2880 MemoryRegion *smram =
2881 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2883 if (smram) {
2884 cpu->smram = g_new(MemoryRegion, 1);
2885 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2886 smram, 0, 1ull << 32);
2887 memory_region_set_enabled(cpu->smram, false);
2888 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2891 #else
2892 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2895 #endif
2897 /* Note: Only safe for use on x86(-64) hosts */
2898 static uint32_t x86_host_phys_bits(void)
2900 uint32_t eax;
2901 uint32_t host_phys_bits;
2903 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2904 if (eax >= 0x80000008) {
2905 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2906 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2907 * at 23:16 that can specify a maximum physical address bits for
2908 * the guest that can override this value; but I've not seen
2909 * anything with that set.
2911 host_phys_bits = eax & 0xff;
2912 } else {
2913 /* It's an odd 64 bit machine that doesn't have the leaf for
2914 * physical address bits; fall back to 36 that's most older
2915 * Intel.
2917 host_phys_bits = 36;
2920 return host_phys_bits;
2923 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
2925 if (*min < value) {
2926 *min = value;
2930 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
2931 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
2933 CPUX86State *env = &cpu->env;
2934 FeatureWordInfo *fi = &feature_word_info[w];
2935 uint32_t eax = fi->cpuid_eax;
2936 uint32_t region = eax & 0xF0000000;
2938 if (!env->features[w]) {
2939 return;
2942 switch (region) {
2943 case 0x00000000:
2944 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
2945 break;
2946 case 0x80000000:
2947 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
2948 break;
2949 case 0xC0000000:
2950 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
2951 break;
2955 /* Calculate XSAVE components based on the configured CPU feature flags */
2956 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
2958 CPUX86State *env = &cpu->env;
2959 int i;
2961 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2962 return;
2965 env->xsave_components = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2966 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2967 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2968 if (env->features[esa->feature] & esa->bits) {
2969 env->xsave_components |= (1ULL << i);
2973 if (kvm_enabled()) {
2974 KVMState *s = kvm_state;
2975 uint64_t kvm_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2976 kvm_mask <<= 32;
2977 kvm_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2978 env->xsave_components &= kvm_mask;
2982 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2983 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2984 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2985 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2986 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2987 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2988 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2990 CPUState *cs = CPU(dev);
2991 X86CPU *cpu = X86_CPU(dev);
2992 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2993 CPUX86State *env = &cpu->env;
2994 Error *local_err = NULL;
2995 static bool ht_warned;
2996 FeatureWord w;
2998 if (xcc->kvm_required && !kvm_enabled()) {
2999 char *name = x86_cpu_class_get_model_name(xcc);
3000 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3001 g_free(name);
3002 goto out;
3005 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3006 error_setg(errp, "apic-id property was not initialized properly");
3007 return;
3010 /*TODO: cpu->host_features incorrectly overwrites features
3011 * set using "feat=on|off". Once we fix this, we can convert
3012 * plus_features & minus_features to global properties
3013 * inside x86_cpu_parse_featurestr() too.
3015 if (cpu->host_features) {
3016 for (w = 0; w < FEATURE_WORDS; w++) {
3017 env->features[w] =
3018 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3022 for (w = 0; w < FEATURE_WORDS; w++) {
3023 cpu->env.features[w] |= plus_features[w];
3024 cpu->env.features[w] &= ~minus_features[w];
3027 x86_cpu_enable_xsave_components(cpu);
3029 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3030 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3031 if (cpu->full_cpuid_auto_level) {
3032 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3033 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3034 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3035 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3036 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3037 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3038 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3039 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3040 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3041 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3042 /* SVM requires CPUID[0x8000000A] */
3043 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3044 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3048 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3049 if (env->cpuid_level == UINT32_MAX) {
3050 env->cpuid_level = env->cpuid_min_level;
3052 if (env->cpuid_xlevel == UINT32_MAX) {
3053 env->cpuid_xlevel = env->cpuid_min_xlevel;
3055 if (env->cpuid_xlevel2 == UINT32_MAX) {
3056 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3059 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
3060 error_setg(&local_err,
3061 kvm_enabled() ?
3062 "Host doesn't support requested features" :
3063 "TCG doesn't support requested features");
3064 goto out;
3067 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3068 * CPUID[1].EDX.
3070 if (IS_AMD_CPU(env)) {
3071 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3072 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3073 & CPUID_EXT2_AMD_ALIASES);
3076 /* For 64bit systems think about the number of physical bits to present.
3077 * ideally this should be the same as the host; anything other than matching
3078 * the host can cause incorrect guest behaviour.
3079 * QEMU used to pick the magic value of 40 bits that corresponds to
3080 * consumer AMD devices but nothing else.
3082 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3083 if (kvm_enabled()) {
3084 uint32_t host_phys_bits = x86_host_phys_bits();
3085 static bool warned;
3087 if (cpu->host_phys_bits) {
3088 /* The user asked for us to use the host physical bits */
3089 cpu->phys_bits = host_phys_bits;
3092 /* Print a warning if the user set it to a value that's not the
3093 * host value.
3095 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3096 !warned) {
3097 error_report("Warning: Host physical bits (%u)"
3098 " does not match phys-bits property (%u)",
3099 host_phys_bits, cpu->phys_bits);
3100 warned = true;
3103 if (cpu->phys_bits &&
3104 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3105 cpu->phys_bits < 32)) {
3106 error_setg(errp, "phys-bits should be between 32 and %u "
3107 " (but is %u)",
3108 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3109 return;
3111 } else {
3112 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3113 error_setg(errp, "TCG only supports phys-bits=%u",
3114 TCG_PHYS_ADDR_BITS);
3115 return;
3118 /* 0 means it was not explicitly set by the user (or by machine
3119 * compat_props or by the host code above). In this case, the default
3120 * is the value used by TCG (40).
3122 if (cpu->phys_bits == 0) {
3123 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3125 } else {
3126 /* For 32 bit systems don't use the user set value, but keep
3127 * phys_bits consistent with what we tell the guest.
3129 if (cpu->phys_bits != 0) {
3130 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3131 return;
3134 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3135 cpu->phys_bits = 36;
3136 } else {
3137 cpu->phys_bits = 32;
3140 cpu_exec_init(cs, &error_abort);
3142 if (tcg_enabled()) {
3143 tcg_x86_init();
3146 #ifndef CONFIG_USER_ONLY
3147 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3149 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3150 x86_cpu_apic_create(cpu, &local_err);
3151 if (local_err != NULL) {
3152 goto out;
3155 #endif
3157 mce_init(cpu);
3159 #ifndef CONFIG_USER_ONLY
3160 if (tcg_enabled()) {
3161 AddressSpace *newas = g_new(AddressSpace, 1);
3163 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3164 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3166 /* Outer container... */
3167 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3168 memory_region_set_enabled(cpu->cpu_as_root, true);
3170 /* ... with two regions inside: normal system memory with low
3171 * priority, and...
3173 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3174 get_system_memory(), 0, ~0ull);
3175 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3176 memory_region_set_enabled(cpu->cpu_as_mem, true);
3177 address_space_init(newas, cpu->cpu_as_root, "CPU");
3178 cs->num_ases = 1;
3179 cpu_address_space_init(cs, newas, 0);
3181 /* ... SMRAM with higher priority, linked from /machine/smram. */
3182 cpu->machine_done.notify = x86_cpu_machine_done;
3183 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3185 #endif
3187 qemu_init_vcpu(cs);
3189 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3190 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3191 * based on inputs (sockets,cores,threads), it is still better to gives
3192 * users a warning.
3194 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3195 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3197 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3198 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3199 " -smp options properly.");
3200 ht_warned = true;
3203 x86_cpu_apic_realize(cpu, &local_err);
3204 if (local_err != NULL) {
3205 goto out;
3207 cpu_reset(cs);
3209 xcc->parent_realize(dev, &local_err);
3211 out:
3212 if (local_err != NULL) {
3213 error_propagate(errp, local_err);
3214 return;
3218 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3220 X86CPU *cpu = X86_CPU(dev);
3222 #ifndef CONFIG_USER_ONLY
3223 cpu_remove_sync(CPU(dev));
3224 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3225 #endif
3227 if (cpu->apic_state) {
3228 object_unparent(OBJECT(cpu->apic_state));
3229 cpu->apic_state = NULL;
3233 typedef struct BitProperty {
3234 uint32_t *ptr;
3235 uint32_t mask;
3236 } BitProperty;
3238 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3239 void *opaque, Error **errp)
3241 BitProperty *fp = opaque;
3242 bool value = (*fp->ptr & fp->mask) == fp->mask;
3243 visit_type_bool(v, name, &value, errp);
3246 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3247 void *opaque, Error **errp)
3249 DeviceState *dev = DEVICE(obj);
3250 BitProperty *fp = opaque;
3251 Error *local_err = NULL;
3252 bool value;
3254 if (dev->realized) {
3255 qdev_prop_set_after_realize(dev, name, errp);
3256 return;
3259 visit_type_bool(v, name, &value, &local_err);
3260 if (local_err) {
3261 error_propagate(errp, local_err);
3262 return;
3265 if (value) {
3266 *fp->ptr |= fp->mask;
3267 } else {
3268 *fp->ptr &= ~fp->mask;
3272 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3273 void *opaque)
3275 BitProperty *prop = opaque;
3276 g_free(prop);
3279 /* Register a boolean property to get/set a single bit in a uint32_t field.
3281 * The same property name can be registered multiple times to make it affect
3282 * multiple bits in the same FeatureWord. In that case, the getter will return
3283 * true only if all bits are set.
3285 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3286 const char *prop_name,
3287 uint32_t *field,
3288 int bitnr)
3290 BitProperty *fp;
3291 ObjectProperty *op;
3292 uint32_t mask = (1UL << bitnr);
3294 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3295 if (op) {
3296 fp = op->opaque;
3297 assert(fp->ptr == field);
3298 fp->mask |= mask;
3299 } else {
3300 fp = g_new0(BitProperty, 1);
3301 fp->ptr = field;
3302 fp->mask = mask;
3303 object_property_add(OBJECT(cpu), prop_name, "bool",
3304 x86_cpu_get_bit_prop,
3305 x86_cpu_set_bit_prop,
3306 x86_cpu_release_bit_prop, fp, &error_abort);
3310 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3311 FeatureWord w,
3312 int bitnr)
3314 Object *obj = OBJECT(cpu);
3315 int i;
3316 char **names;
3317 FeatureWordInfo *fi = &feature_word_info[w];
3319 if (!fi->feat_names[bitnr]) {
3320 return;
3323 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3325 feat2prop(names[0]);
3326 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3328 for (i = 1; names[i]; i++) {
3329 feat2prop(names[i]);
3330 object_property_add_alias(obj, names[i], obj, names[0],
3331 &error_abort);
3334 g_strfreev(names);
3337 static void x86_cpu_initfn(Object *obj)
3339 CPUState *cs = CPU(obj);
3340 X86CPU *cpu = X86_CPU(obj);
3341 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3342 CPUX86State *env = &cpu->env;
3343 FeatureWord w;
3345 cs->env_ptr = env;
3347 object_property_add(obj, "family", "int",
3348 x86_cpuid_version_get_family,
3349 x86_cpuid_version_set_family, NULL, NULL, NULL);
3350 object_property_add(obj, "model", "int",
3351 x86_cpuid_version_get_model,
3352 x86_cpuid_version_set_model, NULL, NULL, NULL);
3353 object_property_add(obj, "stepping", "int",
3354 x86_cpuid_version_get_stepping,
3355 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3356 object_property_add_str(obj, "vendor",
3357 x86_cpuid_get_vendor,
3358 x86_cpuid_set_vendor, NULL);
3359 object_property_add_str(obj, "model-id",
3360 x86_cpuid_get_model_id,
3361 x86_cpuid_set_model_id, NULL);
3362 object_property_add(obj, "tsc-frequency", "int",
3363 x86_cpuid_get_tsc_freq,
3364 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3365 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3366 x86_cpu_get_feature_words,
3367 NULL, NULL, (void *)env->features, NULL);
3368 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3369 x86_cpu_get_feature_words,
3370 NULL, NULL, (void *)cpu->filtered_features, NULL);
3372 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3374 for (w = 0; w < FEATURE_WORDS; w++) {
3375 int bitnr;
3377 for (bitnr = 0; bitnr < 32; bitnr++) {
3378 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3382 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3385 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3387 X86CPU *cpu = X86_CPU(cs);
3389 return cpu->apic_id;
3392 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3394 X86CPU *cpu = X86_CPU(cs);
3396 return cpu->env.cr[0] & CR0_PG_MASK;
3399 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3401 X86CPU *cpu = X86_CPU(cs);
3403 cpu->env.eip = value;
3406 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3408 X86CPU *cpu = X86_CPU(cs);
3410 cpu->env.eip = tb->pc - tb->cs_base;
3413 static bool x86_cpu_has_work(CPUState *cs)
3415 X86CPU *cpu = X86_CPU(cs);
3416 CPUX86State *env = &cpu->env;
3418 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3419 CPU_INTERRUPT_POLL)) &&
3420 (env->eflags & IF_MASK)) ||
3421 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3422 CPU_INTERRUPT_INIT |
3423 CPU_INTERRUPT_SIPI |
3424 CPU_INTERRUPT_MCE)) ||
3425 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3426 !(env->hflags & HF_SMM_MASK));
3429 static Property x86_cpu_properties[] = {
3430 #ifdef CONFIG_USER_ONLY
3431 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3432 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3433 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3434 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3435 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3436 #else
3437 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3438 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3439 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3440 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3441 #endif
3442 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3443 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3444 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3445 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3446 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3447 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3448 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3449 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3450 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3451 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3452 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3453 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3454 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3455 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3456 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3457 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3458 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3459 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3460 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3461 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3462 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3463 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3464 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3465 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3466 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3467 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3468 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3469 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3470 DEFINE_PROP_END_OF_LIST()
3473 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3475 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3476 CPUClass *cc = CPU_CLASS(oc);
3477 DeviceClass *dc = DEVICE_CLASS(oc);
3479 xcc->parent_realize = dc->realize;
3480 dc->realize = x86_cpu_realizefn;
3481 dc->unrealize = x86_cpu_unrealizefn;
3482 dc->props = x86_cpu_properties;
3484 xcc->parent_reset = cc->reset;
3485 cc->reset = x86_cpu_reset;
3486 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3488 cc->class_by_name = x86_cpu_class_by_name;
3489 cc->parse_features = x86_cpu_parse_featurestr;
3490 cc->has_work = x86_cpu_has_work;
3491 cc->do_interrupt = x86_cpu_do_interrupt;
3492 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3493 cc->dump_state = x86_cpu_dump_state;
3494 cc->set_pc = x86_cpu_set_pc;
3495 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3496 cc->gdb_read_register = x86_cpu_gdb_read_register;
3497 cc->gdb_write_register = x86_cpu_gdb_write_register;
3498 cc->get_arch_id = x86_cpu_get_arch_id;
3499 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3500 #ifdef CONFIG_USER_ONLY
3501 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3502 #else
3503 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3504 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3505 cc->write_elf64_note = x86_cpu_write_elf64_note;
3506 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3507 cc->write_elf32_note = x86_cpu_write_elf32_note;
3508 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3509 cc->vmsd = &vmstate_x86_cpu;
3510 #endif
3511 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3512 #ifndef CONFIG_USER_ONLY
3513 cc->debug_excp_handler = breakpoint_handler;
3514 #endif
3515 cc->cpu_exec_enter = x86_cpu_exec_enter;
3516 cc->cpu_exec_exit = x86_cpu_exec_exit;
3518 dc->cannot_instantiate_with_device_add_yet = false;
3520 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3521 * object in cpus -> dangling pointer after final object_unref().
3523 dc->cannot_destroy_with_object_finalize_yet = true;
3526 static const TypeInfo x86_cpu_type_info = {
3527 .name = TYPE_X86_CPU,
3528 .parent = TYPE_CPU,
3529 .instance_size = sizeof(X86CPU),
3530 .instance_init = x86_cpu_initfn,
3531 .abstract = true,
3532 .class_size = sizeof(X86CPUClass),
3533 .class_init = x86_cpu_common_class_init,
3536 static void x86_cpu_register_types(void)
3538 int i;
3540 type_register_static(&x86_cpu_type_info);
3541 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3542 x86_register_cpudef_type(&builtin_x86_defs[i]);
3544 #ifdef CONFIG_KVM
3545 type_register_static(&host_x86_cpu_type_info);
3546 #endif
3549 type_init(x86_cpu_register_types)