Merge remote-tracking branch 'remotes/kraxel/tags/pull-input-20160928-1' into staging
[qemu/kevin.git] / target-i386 / cpu.c
blob333309b9a70e4bb523ce79479497df0d0661b959
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
63 /* CPUID Leaf 4 constants: */
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
70 #define CPUID_4_LEVEL(l) ((l) << 5)
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
80 #define ASSOC_FULL 0xFF
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
97 /* Definitions of the hardcoded cache entries we expose: */
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
148 /* TLB definitions: */
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
181 dst[CPUID_VENDOR_SZ] = '\0';
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
203 /* missing:
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
210 /* missing:
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
217 #ifdef TARGET_X86_64
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
219 #else
220 #define TCG_EXT2_X86_64_FEATURES 0
221 #endif
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
236 CPUID_7_0_EBX_ERMS)
237 /* missing:
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
242 #define TCG_APM_FEATURES 0
243 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
244 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
245 /* missing:
246 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
248 typedef struct FeatureWordInfo {
249 /* feature flags names are taken from "Intel Processor Identification and
250 * the CPUID Instruction" and AMD's "CPUID Specification".
251 * In cases of disagreement between feature naming conventions,
252 * aliases may be added.
254 const char *feat_names[32];
255 uint32_t cpuid_eax; /* Input EAX for CPUID */
256 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
257 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
258 int cpuid_reg; /* output register (R_* constant) */
259 uint32_t tcg_features; /* Feature flags supported by TCG */
260 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
261 } FeatureWordInfo;
263 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
264 [FEAT_1_EDX] = {
265 .feat_names = {
266 "fpu", "vme", "de", "pse",
267 "tsc", "msr", "pae", "mce",
268 "cx8", "apic", NULL, "sep",
269 "mtrr", "pge", "mca", "cmov",
270 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
271 NULL, "ds" /* Intel dts */, "acpi", "mmx",
272 "fxsr", "sse", "sse2", "ss",
273 "ht" /* Intel htt */, "tm", "ia64", "pbe",
275 .cpuid_eax = 1, .cpuid_reg = R_EDX,
276 .tcg_features = TCG_FEATURES,
278 [FEAT_1_ECX] = {
279 .feat_names = {
280 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
281 "ds_cpl", "vmx", "smx", "est",
282 "tm2", "ssse3", "cid", NULL,
283 "fma", "cx16", "xtpr", "pdcm",
284 NULL, "pcid", "dca", "sse4.1|sse4_1",
285 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
286 "tsc-deadline", "aes", "xsave", "osxsave",
287 "avx", "f16c", "rdrand", "hypervisor",
289 .cpuid_eax = 1, .cpuid_reg = R_ECX,
290 .tcg_features = TCG_EXT_FEATURES,
292 /* Feature names that are already defined on feature_name[] but
293 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
294 * names on feat_names below. They are copied automatically
295 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
297 [FEAT_8000_0001_EDX] = {
298 .feat_names = {
299 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
300 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
301 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
302 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
303 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
304 "nx|xd", NULL, "mmxext", NULL /* mmx */,
305 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb", "rdtscp",
306 NULL, "lm|i64", "3dnowext", "3dnow",
308 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
309 .tcg_features = TCG_EXT2_FEATURES,
311 [FEAT_8000_0001_ECX] = {
312 .feat_names = {
313 "lahf_lm", "cmp_legacy", "svm", "extapic",
314 "cr8legacy", "abm", "sse4a", "misalignsse",
315 "3dnowprefetch", "osvw", "ibs", "xop",
316 "skinit", "wdt", NULL, "lwp",
317 "fma4", "tce", NULL, "nodeid_msr",
318 NULL, "tbm", "topoext", "perfctr_core",
319 "perfctr_nb", NULL, NULL, NULL,
320 NULL, NULL, NULL, NULL,
322 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
323 .tcg_features = TCG_EXT3_FEATURES,
325 [FEAT_C000_0001_EDX] = {
326 .feat_names = {
327 NULL, NULL, "xstore", "xstore-en",
328 NULL, NULL, "xcrypt", "xcrypt-en",
329 "ace2", "ace2-en", "phe", "phe-en",
330 "pmm", "pmm-en", NULL, NULL,
331 NULL, NULL, NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
336 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
337 .tcg_features = TCG_EXT4_FEATURES,
339 [FEAT_KVM] = {
340 .feat_names = {
341 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
342 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
343 NULL, NULL, NULL, NULL,
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 "kvmclock-stable-bit", NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
350 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
351 .tcg_features = TCG_KVM_FEATURES,
353 [FEAT_HYPERV_EAX] = {
354 .feat_names = {
355 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
356 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
357 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
358 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
359 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
360 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
361 NULL, NULL, NULL, NULL,
362 NULL, NULL, NULL, NULL,
363 NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
367 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
369 [FEAT_HYPERV_EBX] = {
370 .feat_names = {
371 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
372 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
373 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
374 NULL /* hv_create_port */, NULL /* hv_connect_port */,
375 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
376 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
377 NULL, NULL,
378 NULL, NULL, NULL, NULL,
379 NULL, NULL, NULL, NULL,
380 NULL, NULL, NULL, NULL,
381 NULL, NULL, NULL, NULL,
383 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
385 [FEAT_HYPERV_EDX] = {
386 .feat_names = {
387 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
388 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
389 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
390 NULL, NULL,
391 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
392 NULL, NULL, NULL, NULL,
393 NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
398 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
400 [FEAT_SVM] = {
401 .feat_names = {
402 "npt", "lbrv", "svm_lock", "nrip_save",
403 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
404 NULL, NULL, "pause_filter", NULL,
405 "pfthreshold", NULL, NULL, NULL,
406 NULL, NULL, NULL, NULL,
407 NULL, NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
411 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
412 .tcg_features = TCG_SVM_FEATURES,
414 [FEAT_7_0_EBX] = {
415 .feat_names = {
416 "fsgsbase", "tsc_adjust", NULL, "bmi1",
417 "hle", "avx2", NULL, "smep",
418 "bmi2", "erms", "invpcid", "rtm",
419 NULL, NULL, "mpx", NULL,
420 "avx512f", "avx512dq", "rdseed", "adx",
421 "smap", "avx512ifma", "pcommit", "clflushopt",
422 "clwb", NULL, "avx512pf", "avx512er",
423 "avx512cd", NULL, "avx512bw", "avx512vl",
425 .cpuid_eax = 7,
426 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
427 .cpuid_reg = R_EBX,
428 .tcg_features = TCG_7_0_EBX_FEATURES,
430 [FEAT_7_0_ECX] = {
431 .feat_names = {
432 NULL, "avx512vbmi", "umip", "pku",
433 "ospke", NULL, NULL, NULL,
434 NULL, NULL, NULL, NULL,
435 NULL, NULL, NULL, NULL,
436 NULL, NULL, NULL, NULL,
437 NULL, NULL, "rdpid", NULL,
438 NULL, NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
441 .cpuid_eax = 7,
442 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
443 .cpuid_reg = R_ECX,
444 .tcg_features = TCG_7_0_ECX_FEATURES,
446 [FEAT_8000_0007_EDX] = {
447 .feat_names = {
448 NULL, NULL, NULL, NULL,
449 NULL, NULL, NULL, NULL,
450 "invtsc", NULL, NULL, NULL,
451 NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
457 .cpuid_eax = 0x80000007,
458 .cpuid_reg = R_EDX,
459 .tcg_features = TCG_APM_FEATURES,
460 .unmigratable_flags = CPUID_APM_INVTSC,
462 [FEAT_XSAVE] = {
463 .feat_names = {
464 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
465 NULL, NULL, NULL, NULL,
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
473 .cpuid_eax = 0xd,
474 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
475 .cpuid_reg = R_EAX,
476 .tcg_features = TCG_XSAVE_FEATURES,
478 [FEAT_6_EAX] = {
479 .feat_names = {
480 NULL, NULL, "arat", NULL,
481 NULL, NULL, NULL, NULL,
482 NULL, NULL, NULL, NULL,
483 NULL, NULL, NULL, NULL,
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
489 .cpuid_eax = 6, .cpuid_reg = R_EAX,
490 .tcg_features = TCG_6_EAX_FEATURES,
492 [FEAT_XSAVE_COMP_LO] = {
493 .cpuid_eax = 0xD,
494 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
495 .cpuid_reg = R_EAX,
496 .tcg_features = ~0U,
498 [FEAT_XSAVE_COMP_HI] = {
499 .cpuid_eax = 0xD,
500 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
501 .cpuid_reg = R_EDX,
502 .tcg_features = ~0U,
506 typedef struct X86RegisterInfo32 {
507 /* Name of register */
508 const char *name;
509 /* QAPI enum value register */
510 X86CPURegister32 qapi_enum;
511 } X86RegisterInfo32;
513 #define REGISTER(reg) \
514 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
515 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
516 REGISTER(EAX),
517 REGISTER(ECX),
518 REGISTER(EDX),
519 REGISTER(EBX),
520 REGISTER(ESP),
521 REGISTER(EBP),
522 REGISTER(ESI),
523 REGISTER(EDI),
525 #undef REGISTER
527 typedef struct ExtSaveArea {
528 uint32_t feature, bits;
529 uint32_t offset, size;
530 } ExtSaveArea;
532 static const ExtSaveArea x86_ext_save_areas[] = {
533 [XSTATE_YMM_BIT] =
534 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
535 .offset = offsetof(X86XSaveArea, avx_state),
536 .size = sizeof(XSaveAVX) },
537 [XSTATE_BNDREGS_BIT] =
538 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
539 .offset = offsetof(X86XSaveArea, bndreg_state),
540 .size = sizeof(XSaveBNDREG) },
541 [XSTATE_BNDCSR_BIT] =
542 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
543 .offset = offsetof(X86XSaveArea, bndcsr_state),
544 .size = sizeof(XSaveBNDCSR) },
545 [XSTATE_OPMASK_BIT] =
546 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
547 .offset = offsetof(X86XSaveArea, opmask_state),
548 .size = sizeof(XSaveOpmask) },
549 [XSTATE_ZMM_Hi256_BIT] =
550 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
551 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
552 .size = sizeof(XSaveZMM_Hi256) },
553 [XSTATE_Hi16_ZMM_BIT] =
554 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
555 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
556 .size = sizeof(XSaveHi16_ZMM) },
557 [XSTATE_PKRU_BIT] =
558 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
559 .offset = offsetof(X86XSaveArea, pkru_state),
560 .size = sizeof(XSavePKRU) },
563 static uint32_t xsave_area_size(uint64_t mask)
565 int i;
566 uint64_t ret = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader);
568 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
569 const ExtSaveArea *esa = &x86_ext_save_areas[i];
570 if ((mask >> i) & 1) {
571 ret = MAX(ret, esa->offset + esa->size);
574 return ret;
577 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
579 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
580 cpu->env.features[FEAT_XSAVE_COMP_LO];
583 const char *get_register_name_32(unsigned int reg)
585 if (reg >= CPU_NB_REGS32) {
586 return NULL;
588 return x86_reg_info_32[reg].name;
592 * Returns the set of feature flags that are supported and migratable by
593 * QEMU, for a given FeatureWord.
595 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
597 FeatureWordInfo *wi = &feature_word_info[w];
598 uint32_t r = 0;
599 int i;
601 for (i = 0; i < 32; i++) {
602 uint32_t f = 1U << i;
603 /* If the feature name is unknown, it is not supported by QEMU yet */
604 if (!wi->feat_names[i]) {
605 continue;
607 /* Skip features known to QEMU, but explicitly marked as unmigratable */
608 if (wi->unmigratable_flags & f) {
609 continue;
611 r |= f;
613 return r;
616 void host_cpuid(uint32_t function, uint32_t count,
617 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
619 uint32_t vec[4];
621 #ifdef __x86_64__
622 asm volatile("cpuid"
623 : "=a"(vec[0]), "=b"(vec[1]),
624 "=c"(vec[2]), "=d"(vec[3])
625 : "0"(function), "c"(count) : "cc");
626 #elif defined(__i386__)
627 asm volatile("pusha \n\t"
628 "cpuid \n\t"
629 "mov %%eax, 0(%2) \n\t"
630 "mov %%ebx, 4(%2) \n\t"
631 "mov %%ecx, 8(%2) \n\t"
632 "mov %%edx, 12(%2) \n\t"
633 "popa"
634 : : "a"(function), "c"(count), "S"(vec)
635 : "memory", "cc");
636 #else
637 abort();
638 #endif
640 if (eax)
641 *eax = vec[0];
642 if (ebx)
643 *ebx = vec[1];
644 if (ecx)
645 *ecx = vec[2];
646 if (edx)
647 *edx = vec[3];
650 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
652 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
653 * a substring. ex if !NULL points to the first char after a substring,
654 * otherwise the string is assumed to sized by a terminating nul.
655 * Return lexical ordering of *s1:*s2.
657 static int sstrcmp(const char *s1, const char *e1,
658 const char *s2, const char *e2)
660 for (;;) {
661 if (!*s1 || !*s2 || *s1 != *s2)
662 return (*s1 - *s2);
663 ++s1, ++s2;
664 if (s1 == e1 && s2 == e2)
665 return (0);
666 else if (s1 == e1)
667 return (*s2);
668 else if (s2 == e2)
669 return (*s1);
673 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
674 * '|' delimited (possibly empty) strings in which case search for a match
675 * within the alternatives proceeds left to right. Return 0 for success,
676 * non-zero otherwise.
678 static int altcmp(const char *s, const char *e, const char *altstr)
680 const char *p, *q;
682 for (q = p = altstr; ; ) {
683 while (*p && *p != '|')
684 ++p;
685 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
686 return (0);
687 if (!*p)
688 return (1);
689 else
690 q = ++p;
694 /* search featureset for flag *[s..e), if found set corresponding bit in
695 * *pval and return true, otherwise return false
697 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
698 const char **featureset)
700 uint32_t mask;
701 const char **ppc;
702 bool found = false;
704 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
705 if (*ppc && !altcmp(s, e, *ppc)) {
706 *pval |= mask;
707 found = true;
710 return found;
713 static void add_flagname_to_bitmaps(const char *flagname,
714 FeatureWordArray words,
715 Error **errp)
717 FeatureWord w;
718 for (w = 0; w < FEATURE_WORDS; w++) {
719 FeatureWordInfo *wi = &feature_word_info[w];
720 if (lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
721 break;
724 if (w == FEATURE_WORDS) {
725 error_setg(errp, "CPU feature %s not found", flagname);
729 /* CPU class name definitions: */
731 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
732 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
734 /* Return type name for a given CPU model name
735 * Caller is responsible for freeing the returned string.
737 static char *x86_cpu_type_name(const char *model_name)
739 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
742 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
744 ObjectClass *oc;
745 char *typename;
747 if (cpu_model == NULL) {
748 return NULL;
751 typename = x86_cpu_type_name(cpu_model);
752 oc = object_class_by_name(typename);
753 g_free(typename);
754 return oc;
757 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
759 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
760 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
761 return g_strndup(class_name,
762 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
765 struct X86CPUDefinition {
766 const char *name;
767 uint32_t level;
768 uint32_t xlevel;
769 /* vendor is zero-terminated, 12 character ASCII string */
770 char vendor[CPUID_VENDOR_SZ + 1];
771 int family;
772 int model;
773 int stepping;
774 FeatureWordArray features;
775 char model_id[48];
778 static X86CPUDefinition builtin_x86_defs[] = {
780 .name = "qemu64",
781 .level = 0xd,
782 .vendor = CPUID_VENDOR_AMD,
783 .family = 6,
784 .model = 6,
785 .stepping = 3,
786 .features[FEAT_1_EDX] =
787 PPRO_FEATURES |
788 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
789 CPUID_PSE36,
790 .features[FEAT_1_ECX] =
791 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
792 .features[FEAT_8000_0001_EDX] =
793 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
794 .features[FEAT_8000_0001_ECX] =
795 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
796 .xlevel = 0x8000000A,
797 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
800 .name = "phenom",
801 .level = 5,
802 .vendor = CPUID_VENDOR_AMD,
803 .family = 16,
804 .model = 2,
805 .stepping = 3,
806 /* Missing: CPUID_HT */
807 .features[FEAT_1_EDX] =
808 PPRO_FEATURES |
809 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
810 CPUID_PSE36 | CPUID_VME,
811 .features[FEAT_1_ECX] =
812 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
813 CPUID_EXT_POPCNT,
814 .features[FEAT_8000_0001_EDX] =
815 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
816 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
817 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
818 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
819 CPUID_EXT3_CR8LEG,
820 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
821 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
822 .features[FEAT_8000_0001_ECX] =
823 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
824 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
825 /* Missing: CPUID_SVM_LBRV */
826 .features[FEAT_SVM] =
827 CPUID_SVM_NPT,
828 .xlevel = 0x8000001A,
829 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
832 .name = "core2duo",
833 .level = 10,
834 .vendor = CPUID_VENDOR_INTEL,
835 .family = 6,
836 .model = 15,
837 .stepping = 11,
838 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
839 .features[FEAT_1_EDX] =
840 PPRO_FEATURES |
841 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
842 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
843 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
844 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
845 .features[FEAT_1_ECX] =
846 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
847 CPUID_EXT_CX16,
848 .features[FEAT_8000_0001_EDX] =
849 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
850 .features[FEAT_8000_0001_ECX] =
851 CPUID_EXT3_LAHF_LM,
852 .xlevel = 0x80000008,
853 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
856 .name = "kvm64",
857 .level = 0xd,
858 .vendor = CPUID_VENDOR_INTEL,
859 .family = 15,
860 .model = 6,
861 .stepping = 1,
862 /* Missing: CPUID_HT */
863 .features[FEAT_1_EDX] =
864 PPRO_FEATURES | CPUID_VME |
865 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
866 CPUID_PSE36,
867 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
868 .features[FEAT_1_ECX] =
869 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
870 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
871 .features[FEAT_8000_0001_EDX] =
872 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
873 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
874 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
875 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
876 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
877 .features[FEAT_8000_0001_ECX] =
879 .xlevel = 0x80000008,
880 .model_id = "Common KVM processor"
883 .name = "qemu32",
884 .level = 4,
885 .vendor = CPUID_VENDOR_INTEL,
886 .family = 6,
887 .model = 6,
888 .stepping = 3,
889 .features[FEAT_1_EDX] =
890 PPRO_FEATURES,
891 .features[FEAT_1_ECX] =
892 CPUID_EXT_SSE3,
893 .xlevel = 0x80000004,
894 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
897 .name = "kvm32",
898 .level = 5,
899 .vendor = CPUID_VENDOR_INTEL,
900 .family = 15,
901 .model = 6,
902 .stepping = 1,
903 .features[FEAT_1_EDX] =
904 PPRO_FEATURES | CPUID_VME |
905 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
906 .features[FEAT_1_ECX] =
907 CPUID_EXT_SSE3,
908 .features[FEAT_8000_0001_ECX] =
910 .xlevel = 0x80000008,
911 .model_id = "Common 32-bit KVM processor"
914 .name = "coreduo",
915 .level = 10,
916 .vendor = CPUID_VENDOR_INTEL,
917 .family = 6,
918 .model = 14,
919 .stepping = 8,
920 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
921 .features[FEAT_1_EDX] =
922 PPRO_FEATURES | CPUID_VME |
923 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
924 CPUID_SS,
925 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
926 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
927 .features[FEAT_1_ECX] =
928 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
929 .features[FEAT_8000_0001_EDX] =
930 CPUID_EXT2_NX,
931 .xlevel = 0x80000008,
932 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
935 .name = "486",
936 .level = 1,
937 .vendor = CPUID_VENDOR_INTEL,
938 .family = 4,
939 .model = 8,
940 .stepping = 0,
941 .features[FEAT_1_EDX] =
942 I486_FEATURES,
943 .xlevel = 0,
946 .name = "pentium",
947 .level = 1,
948 .vendor = CPUID_VENDOR_INTEL,
949 .family = 5,
950 .model = 4,
951 .stepping = 3,
952 .features[FEAT_1_EDX] =
953 PENTIUM_FEATURES,
954 .xlevel = 0,
957 .name = "pentium2",
958 .level = 2,
959 .vendor = CPUID_VENDOR_INTEL,
960 .family = 6,
961 .model = 5,
962 .stepping = 2,
963 .features[FEAT_1_EDX] =
964 PENTIUM2_FEATURES,
965 .xlevel = 0,
968 .name = "pentium3",
969 .level = 3,
970 .vendor = CPUID_VENDOR_INTEL,
971 .family = 6,
972 .model = 7,
973 .stepping = 3,
974 .features[FEAT_1_EDX] =
975 PENTIUM3_FEATURES,
976 .xlevel = 0,
979 .name = "athlon",
980 .level = 2,
981 .vendor = CPUID_VENDOR_AMD,
982 .family = 6,
983 .model = 2,
984 .stepping = 3,
985 .features[FEAT_1_EDX] =
986 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
987 CPUID_MCA,
988 .features[FEAT_8000_0001_EDX] =
989 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
990 .xlevel = 0x80000008,
991 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
994 .name = "n270",
995 .level = 10,
996 .vendor = CPUID_VENDOR_INTEL,
997 .family = 6,
998 .model = 28,
999 .stepping = 2,
1000 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1001 .features[FEAT_1_EDX] =
1002 PPRO_FEATURES |
1003 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1004 CPUID_ACPI | CPUID_SS,
1005 /* Some CPUs got no CPUID_SEP */
1006 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1007 * CPUID_EXT_XTPR */
1008 .features[FEAT_1_ECX] =
1009 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1010 CPUID_EXT_MOVBE,
1011 .features[FEAT_8000_0001_EDX] =
1012 CPUID_EXT2_NX,
1013 .features[FEAT_8000_0001_ECX] =
1014 CPUID_EXT3_LAHF_LM,
1015 .xlevel = 0x80000008,
1016 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1019 .name = "Conroe",
1020 .level = 10,
1021 .vendor = CPUID_VENDOR_INTEL,
1022 .family = 6,
1023 .model = 15,
1024 .stepping = 3,
1025 .features[FEAT_1_EDX] =
1026 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1027 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1028 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1029 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1030 CPUID_DE | CPUID_FP87,
1031 .features[FEAT_1_ECX] =
1032 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1033 .features[FEAT_8000_0001_EDX] =
1034 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1035 .features[FEAT_8000_0001_ECX] =
1036 CPUID_EXT3_LAHF_LM,
1037 .xlevel = 0x80000008,
1038 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1041 .name = "Penryn",
1042 .level = 10,
1043 .vendor = CPUID_VENDOR_INTEL,
1044 .family = 6,
1045 .model = 23,
1046 .stepping = 3,
1047 .features[FEAT_1_EDX] =
1048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1052 CPUID_DE | CPUID_FP87,
1053 .features[FEAT_1_ECX] =
1054 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1055 CPUID_EXT_SSE3,
1056 .features[FEAT_8000_0001_EDX] =
1057 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1058 .features[FEAT_8000_0001_ECX] =
1059 CPUID_EXT3_LAHF_LM,
1060 .xlevel = 0x80000008,
1061 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1064 .name = "Nehalem",
1065 .level = 11,
1066 .vendor = CPUID_VENDOR_INTEL,
1067 .family = 6,
1068 .model = 26,
1069 .stepping = 3,
1070 .features[FEAT_1_EDX] =
1071 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1072 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1073 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1074 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1075 CPUID_DE | CPUID_FP87,
1076 .features[FEAT_1_ECX] =
1077 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1078 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1079 .features[FEAT_8000_0001_EDX] =
1080 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1081 .features[FEAT_8000_0001_ECX] =
1082 CPUID_EXT3_LAHF_LM,
1083 .xlevel = 0x80000008,
1084 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1087 .name = "Westmere",
1088 .level = 11,
1089 .vendor = CPUID_VENDOR_INTEL,
1090 .family = 6,
1091 .model = 44,
1092 .stepping = 1,
1093 .features[FEAT_1_EDX] =
1094 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1095 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1096 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1097 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1098 CPUID_DE | CPUID_FP87,
1099 .features[FEAT_1_ECX] =
1100 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1101 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1102 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1103 .features[FEAT_8000_0001_EDX] =
1104 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1105 .features[FEAT_8000_0001_ECX] =
1106 CPUID_EXT3_LAHF_LM,
1107 .features[FEAT_6_EAX] =
1108 CPUID_6_EAX_ARAT,
1109 .xlevel = 0x80000008,
1110 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1113 .name = "SandyBridge",
1114 .level = 0xd,
1115 .vendor = CPUID_VENDOR_INTEL,
1116 .family = 6,
1117 .model = 42,
1118 .stepping = 1,
1119 .features[FEAT_1_EDX] =
1120 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1121 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1122 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1123 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1124 CPUID_DE | CPUID_FP87,
1125 .features[FEAT_1_ECX] =
1126 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1127 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1128 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1129 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1130 CPUID_EXT_SSE3,
1131 .features[FEAT_8000_0001_EDX] =
1132 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1133 CPUID_EXT2_SYSCALL,
1134 .features[FEAT_8000_0001_ECX] =
1135 CPUID_EXT3_LAHF_LM,
1136 .features[FEAT_XSAVE] =
1137 CPUID_XSAVE_XSAVEOPT,
1138 .features[FEAT_6_EAX] =
1139 CPUID_6_EAX_ARAT,
1140 .xlevel = 0x80000008,
1141 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1144 .name = "IvyBridge",
1145 .level = 0xd,
1146 .vendor = CPUID_VENDOR_INTEL,
1147 .family = 6,
1148 .model = 58,
1149 .stepping = 9,
1150 .features[FEAT_1_EDX] =
1151 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1152 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1153 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1154 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1155 CPUID_DE | CPUID_FP87,
1156 .features[FEAT_1_ECX] =
1157 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1158 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1159 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1160 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1161 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1162 .features[FEAT_7_0_EBX] =
1163 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1164 CPUID_7_0_EBX_ERMS,
1165 .features[FEAT_8000_0001_EDX] =
1166 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1167 CPUID_EXT2_SYSCALL,
1168 .features[FEAT_8000_0001_ECX] =
1169 CPUID_EXT3_LAHF_LM,
1170 .features[FEAT_XSAVE] =
1171 CPUID_XSAVE_XSAVEOPT,
1172 .features[FEAT_6_EAX] =
1173 CPUID_6_EAX_ARAT,
1174 .xlevel = 0x80000008,
1175 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1178 .name = "Haswell-noTSX",
1179 .level = 0xd,
1180 .vendor = CPUID_VENDOR_INTEL,
1181 .family = 6,
1182 .model = 60,
1183 .stepping = 1,
1184 .features[FEAT_1_EDX] =
1185 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1186 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1187 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1188 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1189 CPUID_DE | CPUID_FP87,
1190 .features[FEAT_1_ECX] =
1191 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1192 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1193 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1194 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1195 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1196 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1197 .features[FEAT_8000_0001_EDX] =
1198 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1199 CPUID_EXT2_SYSCALL,
1200 .features[FEAT_8000_0001_ECX] =
1201 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1202 .features[FEAT_7_0_EBX] =
1203 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1204 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1205 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1206 .features[FEAT_XSAVE] =
1207 CPUID_XSAVE_XSAVEOPT,
1208 .features[FEAT_6_EAX] =
1209 CPUID_6_EAX_ARAT,
1210 .xlevel = 0x80000008,
1211 .model_id = "Intel Core Processor (Haswell, no TSX)",
1212 }, {
1213 .name = "Haswell",
1214 .level = 0xd,
1215 .vendor = CPUID_VENDOR_INTEL,
1216 .family = 6,
1217 .model = 60,
1218 .stepping = 1,
1219 .features[FEAT_1_EDX] =
1220 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1221 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1222 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1223 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1224 CPUID_DE | CPUID_FP87,
1225 .features[FEAT_1_ECX] =
1226 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1227 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1228 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1229 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1230 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1231 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1232 .features[FEAT_8000_0001_EDX] =
1233 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1234 CPUID_EXT2_SYSCALL,
1235 .features[FEAT_8000_0001_ECX] =
1236 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1237 .features[FEAT_7_0_EBX] =
1238 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1239 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1240 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1241 CPUID_7_0_EBX_RTM,
1242 .features[FEAT_XSAVE] =
1243 CPUID_XSAVE_XSAVEOPT,
1244 .features[FEAT_6_EAX] =
1245 CPUID_6_EAX_ARAT,
1246 .xlevel = 0x80000008,
1247 .model_id = "Intel Core Processor (Haswell)",
1250 .name = "Broadwell-noTSX",
1251 .level = 0xd,
1252 .vendor = CPUID_VENDOR_INTEL,
1253 .family = 6,
1254 .model = 61,
1255 .stepping = 2,
1256 .features[FEAT_1_EDX] =
1257 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1258 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1259 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1260 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1261 CPUID_DE | CPUID_FP87,
1262 .features[FEAT_1_ECX] =
1263 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1264 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1265 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1266 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1267 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1268 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1269 .features[FEAT_8000_0001_EDX] =
1270 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1271 CPUID_EXT2_SYSCALL,
1272 .features[FEAT_8000_0001_ECX] =
1273 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1274 .features[FEAT_7_0_EBX] =
1275 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1276 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1277 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1278 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1279 CPUID_7_0_EBX_SMAP,
1280 .features[FEAT_XSAVE] =
1281 CPUID_XSAVE_XSAVEOPT,
1282 .features[FEAT_6_EAX] =
1283 CPUID_6_EAX_ARAT,
1284 .xlevel = 0x80000008,
1285 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1288 .name = "Broadwell",
1289 .level = 0xd,
1290 .vendor = CPUID_VENDOR_INTEL,
1291 .family = 6,
1292 .model = 61,
1293 .stepping = 2,
1294 .features[FEAT_1_EDX] =
1295 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1296 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1297 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1298 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1299 CPUID_DE | CPUID_FP87,
1300 .features[FEAT_1_ECX] =
1301 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1302 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1303 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1304 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1305 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1306 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1307 .features[FEAT_8000_0001_EDX] =
1308 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1309 CPUID_EXT2_SYSCALL,
1310 .features[FEAT_8000_0001_ECX] =
1311 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1312 .features[FEAT_7_0_EBX] =
1313 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1314 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1315 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1316 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1317 CPUID_7_0_EBX_SMAP,
1318 .features[FEAT_XSAVE] =
1319 CPUID_XSAVE_XSAVEOPT,
1320 .features[FEAT_6_EAX] =
1321 CPUID_6_EAX_ARAT,
1322 .xlevel = 0x80000008,
1323 .model_id = "Intel Core Processor (Broadwell)",
1326 .name = "Skylake-Client",
1327 .level = 0xd,
1328 .vendor = CPUID_VENDOR_INTEL,
1329 .family = 6,
1330 .model = 94,
1331 .stepping = 3,
1332 .features[FEAT_1_EDX] =
1333 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1334 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1335 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1336 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1337 CPUID_DE | CPUID_FP87,
1338 .features[FEAT_1_ECX] =
1339 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1340 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1341 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1342 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1343 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1344 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1345 .features[FEAT_8000_0001_EDX] =
1346 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1347 CPUID_EXT2_SYSCALL,
1348 .features[FEAT_8000_0001_ECX] =
1349 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1350 .features[FEAT_7_0_EBX] =
1351 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1352 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1353 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1354 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1355 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1356 /* Missing: XSAVES (not supported by some Linux versions,
1357 * including v4.1 to v4.6).
1358 * KVM doesn't yet expose any XSAVES state save component,
1359 * and the only one defined in Skylake (processor tracing)
1360 * probably will block migration anyway.
1362 .features[FEAT_XSAVE] =
1363 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1364 CPUID_XSAVE_XGETBV1,
1365 .features[FEAT_6_EAX] =
1366 CPUID_6_EAX_ARAT,
1367 .xlevel = 0x80000008,
1368 .model_id = "Intel Core Processor (Skylake)",
1371 .name = "Opteron_G1",
1372 .level = 5,
1373 .vendor = CPUID_VENDOR_AMD,
1374 .family = 15,
1375 .model = 6,
1376 .stepping = 1,
1377 .features[FEAT_1_EDX] =
1378 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1379 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1380 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1381 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1382 CPUID_DE | CPUID_FP87,
1383 .features[FEAT_1_ECX] =
1384 CPUID_EXT_SSE3,
1385 .features[FEAT_8000_0001_EDX] =
1386 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1387 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1388 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1389 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1390 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1391 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1392 .xlevel = 0x80000008,
1393 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1396 .name = "Opteron_G2",
1397 .level = 5,
1398 .vendor = CPUID_VENDOR_AMD,
1399 .family = 15,
1400 .model = 6,
1401 .stepping = 1,
1402 .features[FEAT_1_EDX] =
1403 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1404 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1405 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1406 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1407 CPUID_DE | CPUID_FP87,
1408 .features[FEAT_1_ECX] =
1409 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1410 /* Missing: CPUID_EXT2_RDTSCP */
1411 .features[FEAT_8000_0001_EDX] =
1412 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1413 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1414 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1415 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1416 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1417 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1418 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1419 .features[FEAT_8000_0001_ECX] =
1420 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1421 .xlevel = 0x80000008,
1422 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1425 .name = "Opteron_G3",
1426 .level = 5,
1427 .vendor = CPUID_VENDOR_AMD,
1428 .family = 15,
1429 .model = 6,
1430 .stepping = 1,
1431 .features[FEAT_1_EDX] =
1432 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1433 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1434 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1435 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1436 CPUID_DE | CPUID_FP87,
1437 .features[FEAT_1_ECX] =
1438 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1439 CPUID_EXT_SSE3,
1440 /* Missing: CPUID_EXT2_RDTSCP */
1441 .features[FEAT_8000_0001_EDX] =
1442 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1443 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1444 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1445 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1446 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1447 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1448 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1449 .features[FEAT_8000_0001_ECX] =
1450 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1451 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1452 .xlevel = 0x80000008,
1453 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1456 .name = "Opteron_G4",
1457 .level = 0xd,
1458 .vendor = CPUID_VENDOR_AMD,
1459 .family = 21,
1460 .model = 1,
1461 .stepping = 2,
1462 .features[FEAT_1_EDX] =
1463 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1464 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1465 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1466 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1467 CPUID_DE | CPUID_FP87,
1468 .features[FEAT_1_ECX] =
1469 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1470 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1471 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1472 CPUID_EXT_SSE3,
1473 /* Missing: CPUID_EXT2_RDTSCP */
1474 .features[FEAT_8000_0001_EDX] =
1475 CPUID_EXT2_LM |
1476 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1477 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1478 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1479 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1480 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1481 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1482 .features[FEAT_8000_0001_ECX] =
1483 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1484 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1485 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1486 CPUID_EXT3_LAHF_LM,
1487 /* no xsaveopt! */
1488 .xlevel = 0x8000001A,
1489 .model_id = "AMD Opteron 62xx class CPU",
1492 .name = "Opteron_G5",
1493 .level = 0xd,
1494 .vendor = CPUID_VENDOR_AMD,
1495 .family = 21,
1496 .model = 2,
1497 .stepping = 0,
1498 .features[FEAT_1_EDX] =
1499 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1500 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1501 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1502 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1503 CPUID_DE | CPUID_FP87,
1504 .features[FEAT_1_ECX] =
1505 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1506 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1507 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1508 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1509 /* Missing: CPUID_EXT2_RDTSCP */
1510 .features[FEAT_8000_0001_EDX] =
1511 CPUID_EXT2_LM |
1512 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1513 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1514 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1515 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1516 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1517 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1518 .features[FEAT_8000_0001_ECX] =
1519 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1520 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1521 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1522 CPUID_EXT3_LAHF_LM,
1523 /* no xsaveopt! */
1524 .xlevel = 0x8000001A,
1525 .model_id = "AMD Opteron 63xx class CPU",
1529 typedef struct PropValue {
1530 const char *prop, *value;
1531 } PropValue;
1533 /* KVM-specific features that are automatically added/removed
1534 * from all CPU models when KVM is enabled.
1536 static PropValue kvm_default_props[] = {
1537 { "kvmclock", "on" },
1538 { "kvm-nopiodelay", "on" },
1539 { "kvm-asyncpf", "on" },
1540 { "kvm-steal-time", "on" },
1541 { "kvm-pv-eoi", "on" },
1542 { "kvmclock-stable-bit", "on" },
1543 { "x2apic", "on" },
1544 { "acpi", "off" },
1545 { "monitor", "off" },
1546 { "svm", "off" },
1547 { NULL, NULL },
1550 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1552 PropValue *pv;
1553 for (pv = kvm_default_props; pv->prop; pv++) {
1554 if (!strcmp(pv->prop, prop)) {
1555 pv->value = value;
1556 break;
1560 /* It is valid to call this function only for properties that
1561 * are already present in the kvm_default_props table.
1563 assert(pv->prop);
1566 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1567 bool migratable_only);
1569 #ifdef CONFIG_KVM
1571 static bool lmce_supported(void)
1573 uint64_t mce_cap;
1575 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1576 return false;
1579 return !!(mce_cap & MCG_LMCE_P);
1582 static int cpu_x86_fill_model_id(char *str)
1584 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1585 int i;
1587 for (i = 0; i < 3; i++) {
1588 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1589 memcpy(str + i * 16 + 0, &eax, 4);
1590 memcpy(str + i * 16 + 4, &ebx, 4);
1591 memcpy(str + i * 16 + 8, &ecx, 4);
1592 memcpy(str + i * 16 + 12, &edx, 4);
1594 return 0;
1597 static X86CPUDefinition host_cpudef;
1599 static Property host_x86_cpu_properties[] = {
1600 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1601 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1602 DEFINE_PROP_END_OF_LIST()
1605 /* class_init for the "host" CPU model
1607 * This function may be called before KVM is initialized.
1609 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1611 DeviceClass *dc = DEVICE_CLASS(oc);
1612 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1613 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1615 xcc->kvm_required = true;
1617 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1618 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1620 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1621 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1622 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1623 host_cpudef.stepping = eax & 0x0F;
1625 cpu_x86_fill_model_id(host_cpudef.model_id);
1627 xcc->cpu_def = &host_cpudef;
1629 /* level, xlevel, xlevel2, and the feature words are initialized on
1630 * instance_init, because they require KVM to be initialized.
1633 dc->props = host_x86_cpu_properties;
1634 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1635 dc->cannot_destroy_with_object_finalize_yet = true;
1638 static void host_x86_cpu_initfn(Object *obj)
1640 X86CPU *cpu = X86_CPU(obj);
1641 CPUX86State *env = &cpu->env;
1642 KVMState *s = kvm_state;
1644 /* We can't fill the features array here because we don't know yet if
1645 * "migratable" is true or false.
1647 cpu->host_features = true;
1649 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1650 if (kvm_enabled()) {
1651 env->cpuid_min_level =
1652 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1653 env->cpuid_min_xlevel =
1654 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1655 env->cpuid_min_xlevel2 =
1656 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1658 if (lmce_supported()) {
1659 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1663 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1666 static const TypeInfo host_x86_cpu_type_info = {
1667 .name = X86_CPU_TYPE_NAME("host"),
1668 .parent = TYPE_X86_CPU,
1669 .instance_init = host_x86_cpu_initfn,
1670 .class_init = host_x86_cpu_class_init,
1673 #endif
1675 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1677 FeatureWordInfo *f = &feature_word_info[w];
1678 int i;
1680 for (i = 0; i < 32; ++i) {
1681 if ((1UL << i) & mask) {
1682 const char *reg = get_register_name_32(f->cpuid_reg);
1683 assert(reg);
1684 fprintf(stderr, "warning: %s doesn't support requested feature: "
1685 "CPUID.%02XH:%s%s%s [bit %d]\n",
1686 kvm_enabled() ? "host" : "TCG",
1687 f->cpuid_eax, reg,
1688 f->feat_names[i] ? "." : "",
1689 f->feat_names[i] ? f->feat_names[i] : "", i);
1694 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1695 const char *name, void *opaque,
1696 Error **errp)
1698 X86CPU *cpu = X86_CPU(obj);
1699 CPUX86State *env = &cpu->env;
1700 int64_t value;
1702 value = (env->cpuid_version >> 8) & 0xf;
1703 if (value == 0xf) {
1704 value += (env->cpuid_version >> 20) & 0xff;
1706 visit_type_int(v, name, &value, errp);
1709 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1710 const char *name, void *opaque,
1711 Error **errp)
1713 X86CPU *cpu = X86_CPU(obj);
1714 CPUX86State *env = &cpu->env;
1715 const int64_t min = 0;
1716 const int64_t max = 0xff + 0xf;
1717 Error *local_err = NULL;
1718 int64_t value;
1720 visit_type_int(v, name, &value, &local_err);
1721 if (local_err) {
1722 error_propagate(errp, local_err);
1723 return;
1725 if (value < min || value > max) {
1726 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1727 name ? name : "null", value, min, max);
1728 return;
1731 env->cpuid_version &= ~0xff00f00;
1732 if (value > 0x0f) {
1733 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1734 } else {
1735 env->cpuid_version |= value << 8;
1739 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1740 const char *name, void *opaque,
1741 Error **errp)
1743 X86CPU *cpu = X86_CPU(obj);
1744 CPUX86State *env = &cpu->env;
1745 int64_t value;
1747 value = (env->cpuid_version >> 4) & 0xf;
1748 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1749 visit_type_int(v, name, &value, errp);
1752 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1753 const char *name, void *opaque,
1754 Error **errp)
1756 X86CPU *cpu = X86_CPU(obj);
1757 CPUX86State *env = &cpu->env;
1758 const int64_t min = 0;
1759 const int64_t max = 0xff;
1760 Error *local_err = NULL;
1761 int64_t value;
1763 visit_type_int(v, name, &value, &local_err);
1764 if (local_err) {
1765 error_propagate(errp, local_err);
1766 return;
1768 if (value < min || value > max) {
1769 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1770 name ? name : "null", value, min, max);
1771 return;
1774 env->cpuid_version &= ~0xf00f0;
1775 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1778 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1779 const char *name, void *opaque,
1780 Error **errp)
1782 X86CPU *cpu = X86_CPU(obj);
1783 CPUX86State *env = &cpu->env;
1784 int64_t value;
1786 value = env->cpuid_version & 0xf;
1787 visit_type_int(v, name, &value, errp);
1790 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1791 const char *name, void *opaque,
1792 Error **errp)
1794 X86CPU *cpu = X86_CPU(obj);
1795 CPUX86State *env = &cpu->env;
1796 const int64_t min = 0;
1797 const int64_t max = 0xf;
1798 Error *local_err = NULL;
1799 int64_t value;
1801 visit_type_int(v, name, &value, &local_err);
1802 if (local_err) {
1803 error_propagate(errp, local_err);
1804 return;
1806 if (value < min || value > max) {
1807 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1808 name ? name : "null", value, min, max);
1809 return;
1812 env->cpuid_version &= ~0xf;
1813 env->cpuid_version |= value & 0xf;
1816 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1818 X86CPU *cpu = X86_CPU(obj);
1819 CPUX86State *env = &cpu->env;
1820 char *value;
1822 value = g_malloc(CPUID_VENDOR_SZ + 1);
1823 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1824 env->cpuid_vendor3);
1825 return value;
1828 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1829 Error **errp)
1831 X86CPU *cpu = X86_CPU(obj);
1832 CPUX86State *env = &cpu->env;
1833 int i;
1835 if (strlen(value) != CPUID_VENDOR_SZ) {
1836 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1837 return;
1840 env->cpuid_vendor1 = 0;
1841 env->cpuid_vendor2 = 0;
1842 env->cpuid_vendor3 = 0;
1843 for (i = 0; i < 4; i++) {
1844 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1845 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1846 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1850 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1852 X86CPU *cpu = X86_CPU(obj);
1853 CPUX86State *env = &cpu->env;
1854 char *value;
1855 int i;
1857 value = g_malloc(48 + 1);
1858 for (i = 0; i < 48; i++) {
1859 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1861 value[48] = '\0';
1862 return value;
1865 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1866 Error **errp)
1868 X86CPU *cpu = X86_CPU(obj);
1869 CPUX86State *env = &cpu->env;
1870 int c, len, i;
1872 if (model_id == NULL) {
1873 model_id = "";
1875 len = strlen(model_id);
1876 memset(env->cpuid_model, 0, 48);
1877 for (i = 0; i < 48; i++) {
1878 if (i >= len) {
1879 c = '\0';
1880 } else {
1881 c = (uint8_t)model_id[i];
1883 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1887 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1888 void *opaque, Error **errp)
1890 X86CPU *cpu = X86_CPU(obj);
1891 int64_t value;
1893 value = cpu->env.tsc_khz * 1000;
1894 visit_type_int(v, name, &value, errp);
1897 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1898 void *opaque, Error **errp)
1900 X86CPU *cpu = X86_CPU(obj);
1901 const int64_t min = 0;
1902 const int64_t max = INT64_MAX;
1903 Error *local_err = NULL;
1904 int64_t value;
1906 visit_type_int(v, name, &value, &local_err);
1907 if (local_err) {
1908 error_propagate(errp, local_err);
1909 return;
1911 if (value < min || value > max) {
1912 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1913 name ? name : "null", value, min, max);
1914 return;
1917 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1920 /* Generic getter for "feature-words" and "filtered-features" properties */
1921 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1922 const char *name, void *opaque,
1923 Error **errp)
1925 uint32_t *array = (uint32_t *)opaque;
1926 FeatureWord w;
1927 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1928 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1929 X86CPUFeatureWordInfoList *list = NULL;
1931 for (w = 0; w < FEATURE_WORDS; w++) {
1932 FeatureWordInfo *wi = &feature_word_info[w];
1933 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1934 qwi->cpuid_input_eax = wi->cpuid_eax;
1935 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1936 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1937 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1938 qwi->features = array[w];
1940 /* List will be in reverse order, but order shouldn't matter */
1941 list_entries[w].next = list;
1942 list_entries[w].value = &word_infos[w];
1943 list = &list_entries[w];
1946 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1949 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1950 void *opaque, Error **errp)
1952 X86CPU *cpu = X86_CPU(obj);
1953 int64_t value = cpu->hyperv_spinlock_attempts;
1955 visit_type_int(v, name, &value, errp);
1958 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1959 void *opaque, Error **errp)
1961 const int64_t min = 0xFFF;
1962 const int64_t max = UINT_MAX;
1963 X86CPU *cpu = X86_CPU(obj);
1964 Error *err = NULL;
1965 int64_t value;
1967 visit_type_int(v, name, &value, &err);
1968 if (err) {
1969 error_propagate(errp, err);
1970 return;
1973 if (value < min || value > max) {
1974 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1975 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1976 object_get_typename(obj), name ? name : "null",
1977 value, min, max);
1978 return;
1980 cpu->hyperv_spinlock_attempts = value;
1983 static PropertyInfo qdev_prop_spinlocks = {
1984 .name = "int",
1985 .get = x86_get_hv_spinlocks,
1986 .set = x86_set_hv_spinlocks,
1989 /* Convert all '_' in a feature string option name to '-', to make feature
1990 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1992 static inline void feat2prop(char *s)
1994 while ((s = strchr(s, '_'))) {
1995 *s = '-';
1999 /* Compatibily hack to maintain legacy +-feat semantic,
2000 * where +-feat overwrites any feature set by
2001 * feat=on|feat even if the later is parsed after +-feat
2002 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2004 static FeatureWordArray plus_features = { 0 };
2005 static FeatureWordArray minus_features = { 0 };
2007 /* Parse "+feature,-feature,feature=foo" CPU feature string
2009 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2010 Error **errp)
2012 char *featurestr; /* Single 'key=value" string being parsed */
2013 Error *local_err = NULL;
2014 static bool cpu_globals_initialized;
2016 if (cpu_globals_initialized) {
2017 return;
2019 cpu_globals_initialized = true;
2021 if (!features) {
2022 return;
2025 for (featurestr = strtok(features, ",");
2026 featurestr && !local_err;
2027 featurestr = strtok(NULL, ",")) {
2028 const char *name;
2029 const char *val = NULL;
2030 char *eq = NULL;
2031 char num[32];
2032 GlobalProperty *prop;
2034 /* Compatibility syntax: */
2035 if (featurestr[0] == '+') {
2036 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2037 continue;
2038 } else if (featurestr[0] == '-') {
2039 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2040 continue;
2043 eq = strchr(featurestr, '=');
2044 if (eq) {
2045 *eq++ = 0;
2046 val = eq;
2047 } else {
2048 val = "on";
2051 feat2prop(featurestr);
2052 name = featurestr;
2054 /* Special case: */
2055 if (!strcmp(name, "tsc-freq")) {
2056 int64_t tsc_freq;
2057 char *err;
2059 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2060 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2061 if (tsc_freq < 0 || *err) {
2062 error_setg(errp, "bad numerical value %s", val);
2063 return;
2065 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2066 val = num;
2067 name = "tsc-frequency";
2070 prop = g_new0(typeof(*prop), 1);
2071 prop->driver = typename;
2072 prop->property = g_strdup(name);
2073 prop->value = g_strdup(val);
2074 prop->errp = &error_fatal;
2075 qdev_prop_register_global(prop);
2078 if (local_err) {
2079 error_propagate(errp, local_err);
2083 /* Print all cpuid feature names in featureset
2085 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2087 int bit;
2088 bool first = true;
2090 for (bit = 0; bit < 32; bit++) {
2091 if (featureset[bit]) {
2092 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2093 first = false;
2098 /* generate CPU information. */
2099 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2101 X86CPUDefinition *def;
2102 char buf[256];
2103 int i;
2105 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2106 def = &builtin_x86_defs[i];
2107 snprintf(buf, sizeof(buf), "%s", def->name);
2108 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2110 #ifdef CONFIG_KVM
2111 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2112 "KVM processor with all supported host features "
2113 "(only available in KVM mode)");
2114 #endif
2116 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2117 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2118 FeatureWordInfo *fw = &feature_word_info[i];
2120 (*cpu_fprintf)(f, " ");
2121 listflags(f, cpu_fprintf, fw->feat_names);
2122 (*cpu_fprintf)(f, "\n");
2126 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2128 CpuDefinitionInfoList *cpu_list = NULL;
2129 X86CPUDefinition *def;
2130 int i;
2132 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2133 CpuDefinitionInfoList *entry;
2134 CpuDefinitionInfo *info;
2136 def = &builtin_x86_defs[i];
2137 info = g_malloc0(sizeof(*info));
2138 info->name = g_strdup(def->name);
2140 entry = g_malloc0(sizeof(*entry));
2141 entry->value = info;
2142 entry->next = cpu_list;
2143 cpu_list = entry;
2146 return cpu_list;
2149 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2150 bool migratable_only)
2152 FeatureWordInfo *wi = &feature_word_info[w];
2153 uint32_t r;
2155 if (kvm_enabled()) {
2156 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2157 wi->cpuid_ecx,
2158 wi->cpuid_reg);
2159 } else if (tcg_enabled()) {
2160 r = wi->tcg_features;
2161 } else {
2162 return ~0;
2164 if (migratable_only) {
2165 r &= x86_cpu_get_migratable_flags(w);
2167 return r;
2171 * Filters CPU feature words based on host availability of each feature.
2173 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2175 static int x86_cpu_filter_features(X86CPU *cpu)
2177 CPUX86State *env = &cpu->env;
2178 FeatureWord w;
2179 int rv = 0;
2181 for (w = 0; w < FEATURE_WORDS; w++) {
2182 uint32_t host_feat =
2183 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2184 uint32_t requested_features = env->features[w];
2185 env->features[w] &= host_feat;
2186 cpu->filtered_features[w] = requested_features & ~env->features[w];
2187 if (cpu->filtered_features[w]) {
2188 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2189 report_unavailable_features(w, cpu->filtered_features[w]);
2191 rv = 1;
2195 return rv;
2198 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2200 PropValue *pv;
2201 for (pv = props; pv->prop; pv++) {
2202 if (!pv->value) {
2203 continue;
2205 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2206 &error_abort);
2210 /* Load data from X86CPUDefinition
2212 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2214 CPUX86State *env = &cpu->env;
2215 const char *vendor;
2216 char host_vendor[CPUID_VENDOR_SZ + 1];
2217 FeatureWord w;
2219 /* CPU models only set _minimum_ values for level/xlevel: */
2220 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2221 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2223 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2224 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2225 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2226 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2227 for (w = 0; w < FEATURE_WORDS; w++) {
2228 env->features[w] = def->features[w];
2231 /* Special cases not set in the X86CPUDefinition structs: */
2232 if (kvm_enabled()) {
2233 if (!kvm_irqchip_in_kernel()) {
2234 x86_cpu_change_kvm_default("x2apic", "off");
2237 x86_cpu_apply_props(cpu, kvm_default_props);
2240 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2242 /* sysenter isn't supported in compatibility mode on AMD,
2243 * syscall isn't supported in compatibility mode on Intel.
2244 * Normally we advertise the actual CPU vendor, but you can
2245 * override this using the 'vendor' property if you want to use
2246 * KVM's sysenter/syscall emulation in compatibility mode and
2247 * when doing cross vendor migration
2249 vendor = def->vendor;
2250 if (kvm_enabled()) {
2251 uint32_t ebx = 0, ecx = 0, edx = 0;
2252 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2253 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2254 vendor = host_vendor;
2257 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2261 X86CPU *cpu_x86_init(const char *cpu_model)
2263 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2266 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2268 X86CPUDefinition *cpudef = data;
2269 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2271 xcc->cpu_def = cpudef;
2274 static void x86_register_cpudef_type(X86CPUDefinition *def)
2276 char *typename = x86_cpu_type_name(def->name);
2277 TypeInfo ti = {
2278 .name = typename,
2279 .parent = TYPE_X86_CPU,
2280 .class_init = x86_cpu_cpudef_class_init,
2281 .class_data = def,
2284 type_register(&ti);
2285 g_free(typename);
2288 #if !defined(CONFIG_USER_ONLY)
2290 void cpu_clear_apic_feature(CPUX86State *env)
2292 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2295 #endif /* !CONFIG_USER_ONLY */
2297 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2298 uint32_t *eax, uint32_t *ebx,
2299 uint32_t *ecx, uint32_t *edx)
2301 X86CPU *cpu = x86_env_get_cpu(env);
2302 CPUState *cs = CPU(cpu);
2303 uint32_t pkg_offset;
2305 /* test if maximum index reached */
2306 if (index & 0x80000000) {
2307 if (index > env->cpuid_xlevel) {
2308 if (env->cpuid_xlevel2 > 0) {
2309 /* Handle the Centaur's CPUID instruction. */
2310 if (index > env->cpuid_xlevel2) {
2311 index = env->cpuid_xlevel2;
2312 } else if (index < 0xC0000000) {
2313 index = env->cpuid_xlevel;
2315 } else {
2316 /* Intel documentation states that invalid EAX input will
2317 * return the same information as EAX=cpuid_level
2318 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2320 index = env->cpuid_level;
2323 } else {
2324 if (index > env->cpuid_level)
2325 index = env->cpuid_level;
2328 switch(index) {
2329 case 0:
2330 *eax = env->cpuid_level;
2331 *ebx = env->cpuid_vendor1;
2332 *edx = env->cpuid_vendor2;
2333 *ecx = env->cpuid_vendor3;
2334 break;
2335 case 1:
2336 *eax = env->cpuid_version;
2337 *ebx = (cpu->apic_id << 24) |
2338 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2339 *ecx = env->features[FEAT_1_ECX];
2340 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2341 *ecx |= CPUID_EXT_OSXSAVE;
2343 *edx = env->features[FEAT_1_EDX];
2344 if (cs->nr_cores * cs->nr_threads > 1) {
2345 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2346 *edx |= CPUID_HT;
2348 break;
2349 case 2:
2350 /* cache info: needed for Pentium Pro compatibility */
2351 if (cpu->cache_info_passthrough) {
2352 host_cpuid(index, 0, eax, ebx, ecx, edx);
2353 break;
2355 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2356 *ebx = 0;
2357 if (!cpu->enable_l3_cache) {
2358 *ecx = 0;
2359 } else {
2360 *ecx = L3_N_DESCRIPTOR;
2362 *edx = (L1D_DESCRIPTOR << 16) | \
2363 (L1I_DESCRIPTOR << 8) | \
2364 (L2_DESCRIPTOR);
2365 break;
2366 case 4:
2367 /* cache info: needed for Core compatibility */
2368 if (cpu->cache_info_passthrough) {
2369 host_cpuid(index, count, eax, ebx, ecx, edx);
2370 *eax &= ~0xFC000000;
2371 } else {
2372 *eax = 0;
2373 switch (count) {
2374 case 0: /* L1 dcache info */
2375 *eax |= CPUID_4_TYPE_DCACHE | \
2376 CPUID_4_LEVEL(1) | \
2377 CPUID_4_SELF_INIT_LEVEL;
2378 *ebx = (L1D_LINE_SIZE - 1) | \
2379 ((L1D_PARTITIONS - 1) << 12) | \
2380 ((L1D_ASSOCIATIVITY - 1) << 22);
2381 *ecx = L1D_SETS - 1;
2382 *edx = CPUID_4_NO_INVD_SHARING;
2383 break;
2384 case 1: /* L1 icache info */
2385 *eax |= CPUID_4_TYPE_ICACHE | \
2386 CPUID_4_LEVEL(1) | \
2387 CPUID_4_SELF_INIT_LEVEL;
2388 *ebx = (L1I_LINE_SIZE - 1) | \
2389 ((L1I_PARTITIONS - 1) << 12) | \
2390 ((L1I_ASSOCIATIVITY - 1) << 22);
2391 *ecx = L1I_SETS - 1;
2392 *edx = CPUID_4_NO_INVD_SHARING;
2393 break;
2394 case 2: /* L2 cache info */
2395 *eax |= CPUID_4_TYPE_UNIFIED | \
2396 CPUID_4_LEVEL(2) | \
2397 CPUID_4_SELF_INIT_LEVEL;
2398 if (cs->nr_threads > 1) {
2399 *eax |= (cs->nr_threads - 1) << 14;
2401 *ebx = (L2_LINE_SIZE - 1) | \
2402 ((L2_PARTITIONS - 1) << 12) | \
2403 ((L2_ASSOCIATIVITY - 1) << 22);
2404 *ecx = L2_SETS - 1;
2405 *edx = CPUID_4_NO_INVD_SHARING;
2406 break;
2407 case 3: /* L3 cache info */
2408 if (!cpu->enable_l3_cache) {
2409 *eax = 0;
2410 *ebx = 0;
2411 *ecx = 0;
2412 *edx = 0;
2413 break;
2415 *eax |= CPUID_4_TYPE_UNIFIED | \
2416 CPUID_4_LEVEL(3) | \
2417 CPUID_4_SELF_INIT_LEVEL;
2418 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2419 *eax |= ((1 << pkg_offset) - 1) << 14;
2420 *ebx = (L3_N_LINE_SIZE - 1) | \
2421 ((L3_N_PARTITIONS - 1) << 12) | \
2422 ((L3_N_ASSOCIATIVITY - 1) << 22);
2423 *ecx = L3_N_SETS - 1;
2424 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2425 break;
2426 default: /* end of info */
2427 *eax = 0;
2428 *ebx = 0;
2429 *ecx = 0;
2430 *edx = 0;
2431 break;
2435 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2436 if ((*eax & 31) && cs->nr_cores > 1) {
2437 *eax |= (cs->nr_cores - 1) << 26;
2439 break;
2440 case 5:
2441 /* mwait info: needed for Core compatibility */
2442 *eax = 0; /* Smallest monitor-line size in bytes */
2443 *ebx = 0; /* Largest monitor-line size in bytes */
2444 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2445 *edx = 0;
2446 break;
2447 case 6:
2448 /* Thermal and Power Leaf */
2449 *eax = env->features[FEAT_6_EAX];
2450 *ebx = 0;
2451 *ecx = 0;
2452 *edx = 0;
2453 break;
2454 case 7:
2455 /* Structured Extended Feature Flags Enumeration Leaf */
2456 if (count == 0) {
2457 *eax = 0; /* Maximum ECX value for sub-leaves */
2458 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2459 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2460 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2461 *ecx |= CPUID_7_0_ECX_OSPKE;
2463 *edx = 0; /* Reserved */
2464 } else {
2465 *eax = 0;
2466 *ebx = 0;
2467 *ecx = 0;
2468 *edx = 0;
2470 break;
2471 case 9:
2472 /* Direct Cache Access Information Leaf */
2473 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2474 *ebx = 0;
2475 *ecx = 0;
2476 *edx = 0;
2477 break;
2478 case 0xA:
2479 /* Architectural Performance Monitoring Leaf */
2480 if (kvm_enabled() && cpu->enable_pmu) {
2481 KVMState *s = cs->kvm_state;
2483 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2484 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2485 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2486 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2487 } else {
2488 *eax = 0;
2489 *ebx = 0;
2490 *ecx = 0;
2491 *edx = 0;
2493 break;
2494 case 0xB:
2495 /* Extended Topology Enumeration Leaf */
2496 if (!cpu->enable_cpuid_0xb) {
2497 *eax = *ebx = *ecx = *edx = 0;
2498 break;
2501 *ecx = count & 0xff;
2502 *edx = cpu->apic_id;
2504 switch (count) {
2505 case 0:
2506 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2507 *ebx = cs->nr_threads;
2508 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2509 break;
2510 case 1:
2511 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2512 *ebx = cs->nr_cores * cs->nr_threads;
2513 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2514 break;
2515 default:
2516 *eax = 0;
2517 *ebx = 0;
2518 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2521 assert(!(*eax & ~0x1f));
2522 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2523 break;
2524 case 0xD: {
2525 /* Processor Extended State */
2526 *eax = 0;
2527 *ebx = 0;
2528 *ecx = 0;
2529 *edx = 0;
2530 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2531 break;
2534 if (count == 0) {
2535 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2536 *eax = env->features[FEAT_XSAVE_COMP_LO];
2537 *edx = env->features[FEAT_XSAVE_COMP_HI];
2538 *ebx = *ecx;
2539 } else if (count == 1) {
2540 *eax = env->features[FEAT_XSAVE];
2541 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2542 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2543 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2544 *eax = esa->size;
2545 *ebx = esa->offset;
2548 break;
2550 case 0x80000000:
2551 *eax = env->cpuid_xlevel;
2552 *ebx = env->cpuid_vendor1;
2553 *edx = env->cpuid_vendor2;
2554 *ecx = env->cpuid_vendor3;
2555 break;
2556 case 0x80000001:
2557 *eax = env->cpuid_version;
2558 *ebx = 0;
2559 *ecx = env->features[FEAT_8000_0001_ECX];
2560 *edx = env->features[FEAT_8000_0001_EDX];
2562 /* The Linux kernel checks for the CMPLegacy bit and
2563 * discards multiple thread information if it is set.
2564 * So don't set it here for Intel to make Linux guests happy.
2566 if (cs->nr_cores * cs->nr_threads > 1) {
2567 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2568 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2569 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2570 *ecx |= 1 << 1; /* CmpLegacy bit */
2573 break;
2574 case 0x80000002:
2575 case 0x80000003:
2576 case 0x80000004:
2577 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2578 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2579 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2580 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2581 break;
2582 case 0x80000005:
2583 /* cache info (L1 cache) */
2584 if (cpu->cache_info_passthrough) {
2585 host_cpuid(index, 0, eax, ebx, ecx, edx);
2586 break;
2588 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2589 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2590 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2591 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2592 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2593 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2594 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2595 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2596 break;
2597 case 0x80000006:
2598 /* cache info (L2 cache) */
2599 if (cpu->cache_info_passthrough) {
2600 host_cpuid(index, 0, eax, ebx, ecx, edx);
2601 break;
2603 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2604 (L2_DTLB_2M_ENTRIES << 16) | \
2605 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2606 (L2_ITLB_2M_ENTRIES);
2607 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2608 (L2_DTLB_4K_ENTRIES << 16) | \
2609 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2610 (L2_ITLB_4K_ENTRIES);
2611 *ecx = (L2_SIZE_KB_AMD << 16) | \
2612 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2613 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2614 if (!cpu->enable_l3_cache) {
2615 *edx = ((L3_SIZE_KB / 512) << 18) | \
2616 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2617 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2618 } else {
2619 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2620 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2621 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2623 break;
2624 case 0x80000007:
2625 *eax = 0;
2626 *ebx = 0;
2627 *ecx = 0;
2628 *edx = env->features[FEAT_8000_0007_EDX];
2629 break;
2630 case 0x80000008:
2631 /* virtual & phys address size in low 2 bytes. */
2632 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2633 /* 64 bit processor, 48 bits virtual, configurable
2634 * physical bits.
2636 *eax = 0x00003000 + cpu->phys_bits;
2637 } else {
2638 *eax = cpu->phys_bits;
2640 *ebx = 0;
2641 *ecx = 0;
2642 *edx = 0;
2643 if (cs->nr_cores * cs->nr_threads > 1) {
2644 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2646 break;
2647 case 0x8000000A:
2648 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2649 *eax = 0x00000001; /* SVM Revision */
2650 *ebx = 0x00000010; /* nr of ASIDs */
2651 *ecx = 0;
2652 *edx = env->features[FEAT_SVM]; /* optional features */
2653 } else {
2654 *eax = 0;
2655 *ebx = 0;
2656 *ecx = 0;
2657 *edx = 0;
2659 break;
2660 case 0xC0000000:
2661 *eax = env->cpuid_xlevel2;
2662 *ebx = 0;
2663 *ecx = 0;
2664 *edx = 0;
2665 break;
2666 case 0xC0000001:
2667 /* Support for VIA CPU's CPUID instruction */
2668 *eax = env->cpuid_version;
2669 *ebx = 0;
2670 *ecx = 0;
2671 *edx = env->features[FEAT_C000_0001_EDX];
2672 break;
2673 case 0xC0000002:
2674 case 0xC0000003:
2675 case 0xC0000004:
2676 /* Reserved for the future, and now filled with zero */
2677 *eax = 0;
2678 *ebx = 0;
2679 *ecx = 0;
2680 *edx = 0;
2681 break;
2682 default:
2683 /* reserved values: zero */
2684 *eax = 0;
2685 *ebx = 0;
2686 *ecx = 0;
2687 *edx = 0;
2688 break;
2692 /* CPUClass::reset() */
2693 static void x86_cpu_reset(CPUState *s)
2695 X86CPU *cpu = X86_CPU(s);
2696 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2697 CPUX86State *env = &cpu->env;
2698 target_ulong cr4;
2699 uint64_t xcr0;
2700 int i;
2702 xcc->parent_reset(s);
2704 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2706 tlb_flush(s, 1);
2708 env->old_exception = -1;
2710 /* init to reset state */
2712 env->hflags2 |= HF2_GIF_MASK;
2714 cpu_x86_update_cr0(env, 0x60000010);
2715 env->a20_mask = ~0x0;
2716 env->smbase = 0x30000;
2718 env->idt.limit = 0xffff;
2719 env->gdt.limit = 0xffff;
2720 env->ldt.limit = 0xffff;
2721 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2722 env->tr.limit = 0xffff;
2723 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2725 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2726 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2727 DESC_R_MASK | DESC_A_MASK);
2728 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2729 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2730 DESC_A_MASK);
2731 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2732 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2733 DESC_A_MASK);
2734 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2735 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2736 DESC_A_MASK);
2737 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2738 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2739 DESC_A_MASK);
2740 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2741 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2742 DESC_A_MASK);
2744 env->eip = 0xfff0;
2745 env->regs[R_EDX] = env->cpuid_version;
2747 env->eflags = 0x2;
2749 /* FPU init */
2750 for (i = 0; i < 8; i++) {
2751 env->fptags[i] = 1;
2753 cpu_set_fpuc(env, 0x37f);
2755 env->mxcsr = 0x1f80;
2756 /* All units are in INIT state. */
2757 env->xstate_bv = 0;
2759 env->pat = 0x0007040600070406ULL;
2760 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2762 memset(env->dr, 0, sizeof(env->dr));
2763 env->dr[6] = DR6_FIXED_1;
2764 env->dr[7] = DR7_FIXED_1;
2765 cpu_breakpoint_remove_all(s, BP_CPU);
2766 cpu_watchpoint_remove_all(s, BP_CPU);
2768 cr4 = 0;
2769 xcr0 = XSTATE_FP_MASK;
2771 #ifdef CONFIG_USER_ONLY
2772 /* Enable all the features for user-mode. */
2773 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2774 xcr0 |= XSTATE_SSE_MASK;
2776 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2777 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2778 if (env->features[esa->feature] & esa->bits) {
2779 xcr0 |= 1ull << i;
2783 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2784 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2786 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2787 cr4 |= CR4_FSGSBASE_MASK;
2789 #endif
2791 env->xcr0 = xcr0;
2792 cpu_x86_update_cr4(env, cr4);
2795 * SDM 11.11.5 requires:
2796 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2797 * - IA32_MTRR_PHYSMASKn.V = 0
2798 * All other bits are undefined. For simplification, zero it all.
2800 env->mtrr_deftype = 0;
2801 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2802 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2804 #if !defined(CONFIG_USER_ONLY)
2805 /* We hard-wire the BSP to the first CPU. */
2806 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2808 s->halted = !cpu_is_bsp(cpu);
2810 if (kvm_enabled()) {
2811 kvm_arch_reset_vcpu(cpu);
2813 #endif
2816 #ifndef CONFIG_USER_ONLY
2817 bool cpu_is_bsp(X86CPU *cpu)
2819 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2822 /* TODO: remove me, when reset over QOM tree is implemented */
2823 static void x86_cpu_machine_reset_cb(void *opaque)
2825 X86CPU *cpu = opaque;
2826 cpu_reset(CPU(cpu));
2828 #endif
2830 static void mce_init(X86CPU *cpu)
2832 CPUX86State *cenv = &cpu->env;
2833 unsigned int bank;
2835 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2836 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2837 (CPUID_MCE | CPUID_MCA)) {
2838 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2839 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2840 cenv->mcg_ctl = ~(uint64_t)0;
2841 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2842 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2847 #ifndef CONFIG_USER_ONLY
2848 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2850 APICCommonState *apic;
2851 const char *apic_type = "apic";
2853 if (kvm_apic_in_kernel()) {
2854 apic_type = "kvm-apic";
2855 } else if (xen_enabled()) {
2856 apic_type = "xen-apic";
2859 cpu->apic_state = DEVICE(object_new(apic_type));
2861 object_property_add_child(OBJECT(cpu), "lapic",
2862 OBJECT(cpu->apic_state), &error_abort);
2863 object_unref(OBJECT(cpu->apic_state));
2865 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2866 /* TODO: convert to link<> */
2867 apic = APIC_COMMON(cpu->apic_state);
2868 apic->cpu = cpu;
2869 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2872 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2874 APICCommonState *apic;
2875 static bool apic_mmio_map_once;
2877 if (cpu->apic_state == NULL) {
2878 return;
2880 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2881 errp);
2883 /* Map APIC MMIO area */
2884 apic = APIC_COMMON(cpu->apic_state);
2885 if (!apic_mmio_map_once) {
2886 memory_region_add_subregion_overlap(get_system_memory(),
2887 apic->apicbase &
2888 MSR_IA32_APICBASE_BASE,
2889 &apic->io_memory,
2890 0x1000);
2891 apic_mmio_map_once = true;
2895 static void x86_cpu_machine_done(Notifier *n, void *unused)
2897 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2898 MemoryRegion *smram =
2899 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2901 if (smram) {
2902 cpu->smram = g_new(MemoryRegion, 1);
2903 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2904 smram, 0, 1ull << 32);
2905 memory_region_set_enabled(cpu->smram, false);
2906 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2909 #else
2910 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2913 #endif
2915 /* Note: Only safe for use on x86(-64) hosts */
2916 static uint32_t x86_host_phys_bits(void)
2918 uint32_t eax;
2919 uint32_t host_phys_bits;
2921 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2922 if (eax >= 0x80000008) {
2923 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2924 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2925 * at 23:16 that can specify a maximum physical address bits for
2926 * the guest that can override this value; but I've not seen
2927 * anything with that set.
2929 host_phys_bits = eax & 0xff;
2930 } else {
2931 /* It's an odd 64 bit machine that doesn't have the leaf for
2932 * physical address bits; fall back to 36 that's most older
2933 * Intel.
2935 host_phys_bits = 36;
2938 return host_phys_bits;
2941 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
2943 if (*min < value) {
2944 *min = value;
2948 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
2949 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
2951 CPUX86State *env = &cpu->env;
2952 FeatureWordInfo *fi = &feature_word_info[w];
2953 uint32_t eax = fi->cpuid_eax;
2954 uint32_t region = eax & 0xF0000000;
2956 if (!env->features[w]) {
2957 return;
2960 switch (region) {
2961 case 0x00000000:
2962 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
2963 break;
2964 case 0x80000000:
2965 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
2966 break;
2967 case 0xC0000000:
2968 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
2969 break;
2973 /* Calculate XSAVE components based on the configured CPU feature flags */
2974 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
2976 CPUX86State *env = &cpu->env;
2977 int i;
2978 uint64_t mask;
2980 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2981 return;
2984 mask = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2985 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2986 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2987 if (env->features[esa->feature] & esa->bits) {
2988 mask |= (1ULL << i);
2992 env->features[FEAT_XSAVE_COMP_LO] = mask;
2993 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
2996 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2997 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2998 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2999 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3000 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3001 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3002 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3004 CPUState *cs = CPU(dev);
3005 X86CPU *cpu = X86_CPU(dev);
3006 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3007 CPUX86State *env = &cpu->env;
3008 Error *local_err = NULL;
3009 static bool ht_warned;
3010 FeatureWord w;
3012 if (xcc->kvm_required && !kvm_enabled()) {
3013 char *name = x86_cpu_class_get_model_name(xcc);
3014 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3015 g_free(name);
3016 goto out;
3019 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3020 error_setg(errp, "apic-id property was not initialized properly");
3021 return;
3024 /*TODO: cpu->host_features incorrectly overwrites features
3025 * set using "feat=on|off". Once we fix this, we can convert
3026 * plus_features & minus_features to global properties
3027 * inside x86_cpu_parse_featurestr() too.
3029 if (cpu->host_features) {
3030 for (w = 0; w < FEATURE_WORDS; w++) {
3031 env->features[w] =
3032 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3036 for (w = 0; w < FEATURE_WORDS; w++) {
3037 cpu->env.features[w] |= plus_features[w];
3038 cpu->env.features[w] &= ~minus_features[w];
3041 if (!kvm_enabled() || !cpu->expose_kvm) {
3042 env->features[FEAT_KVM] = 0;
3045 x86_cpu_enable_xsave_components(cpu);
3047 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3048 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3049 if (cpu->full_cpuid_auto_level) {
3050 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3051 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3052 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3053 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3054 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3055 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3056 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3057 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3058 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3059 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3060 /* SVM requires CPUID[0x8000000A] */
3061 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3062 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3066 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3067 if (env->cpuid_level == UINT32_MAX) {
3068 env->cpuid_level = env->cpuid_min_level;
3070 if (env->cpuid_xlevel == UINT32_MAX) {
3071 env->cpuid_xlevel = env->cpuid_min_xlevel;
3073 if (env->cpuid_xlevel2 == UINT32_MAX) {
3074 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3077 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
3078 error_setg(&local_err,
3079 kvm_enabled() ?
3080 "Host doesn't support requested features" :
3081 "TCG doesn't support requested features");
3082 goto out;
3085 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3086 * CPUID[1].EDX.
3088 if (IS_AMD_CPU(env)) {
3089 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3090 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3091 & CPUID_EXT2_AMD_ALIASES);
3094 /* For 64bit systems think about the number of physical bits to present.
3095 * ideally this should be the same as the host; anything other than matching
3096 * the host can cause incorrect guest behaviour.
3097 * QEMU used to pick the magic value of 40 bits that corresponds to
3098 * consumer AMD devices but nothing else.
3100 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3101 if (kvm_enabled()) {
3102 uint32_t host_phys_bits = x86_host_phys_bits();
3103 static bool warned;
3105 if (cpu->host_phys_bits) {
3106 /* The user asked for us to use the host physical bits */
3107 cpu->phys_bits = host_phys_bits;
3110 /* Print a warning if the user set it to a value that's not the
3111 * host value.
3113 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3114 !warned) {
3115 error_report("Warning: Host physical bits (%u)"
3116 " does not match phys-bits property (%u)",
3117 host_phys_bits, cpu->phys_bits);
3118 warned = true;
3121 if (cpu->phys_bits &&
3122 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3123 cpu->phys_bits < 32)) {
3124 error_setg(errp, "phys-bits should be between 32 and %u "
3125 " (but is %u)",
3126 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3127 return;
3129 } else {
3130 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3131 error_setg(errp, "TCG only supports phys-bits=%u",
3132 TCG_PHYS_ADDR_BITS);
3133 return;
3136 /* 0 means it was not explicitly set by the user (or by machine
3137 * compat_props or by the host code above). In this case, the default
3138 * is the value used by TCG (40).
3140 if (cpu->phys_bits == 0) {
3141 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3143 } else {
3144 /* For 32 bit systems don't use the user set value, but keep
3145 * phys_bits consistent with what we tell the guest.
3147 if (cpu->phys_bits != 0) {
3148 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3149 return;
3152 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3153 cpu->phys_bits = 36;
3154 } else {
3155 cpu->phys_bits = 32;
3158 cpu_exec_init(cs, &error_abort);
3160 if (tcg_enabled()) {
3161 tcg_x86_init();
3164 #ifndef CONFIG_USER_ONLY
3165 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3167 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3168 x86_cpu_apic_create(cpu, &local_err);
3169 if (local_err != NULL) {
3170 goto out;
3173 #endif
3175 mce_init(cpu);
3177 #ifndef CONFIG_USER_ONLY
3178 if (tcg_enabled()) {
3179 AddressSpace *newas = g_new(AddressSpace, 1);
3181 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3182 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3184 /* Outer container... */
3185 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3186 memory_region_set_enabled(cpu->cpu_as_root, true);
3188 /* ... with two regions inside: normal system memory with low
3189 * priority, and...
3191 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3192 get_system_memory(), 0, ~0ull);
3193 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3194 memory_region_set_enabled(cpu->cpu_as_mem, true);
3195 address_space_init(newas, cpu->cpu_as_root, "CPU");
3196 cs->num_ases = 1;
3197 cpu_address_space_init(cs, newas, 0);
3199 /* ... SMRAM with higher priority, linked from /machine/smram. */
3200 cpu->machine_done.notify = x86_cpu_machine_done;
3201 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3203 #endif
3205 qemu_init_vcpu(cs);
3207 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3208 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3209 * based on inputs (sockets,cores,threads), it is still better to gives
3210 * users a warning.
3212 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3213 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3215 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3216 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3217 " -smp options properly.");
3218 ht_warned = true;
3221 x86_cpu_apic_realize(cpu, &local_err);
3222 if (local_err != NULL) {
3223 goto out;
3225 cpu_reset(cs);
3227 xcc->parent_realize(dev, &local_err);
3229 out:
3230 if (local_err != NULL) {
3231 error_propagate(errp, local_err);
3232 return;
3236 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3238 X86CPU *cpu = X86_CPU(dev);
3240 #ifndef CONFIG_USER_ONLY
3241 cpu_remove_sync(CPU(dev));
3242 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3243 #endif
3245 if (cpu->apic_state) {
3246 object_unparent(OBJECT(cpu->apic_state));
3247 cpu->apic_state = NULL;
3251 typedef struct BitProperty {
3252 uint32_t *ptr;
3253 uint32_t mask;
3254 } BitProperty;
3256 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3257 void *opaque, Error **errp)
3259 BitProperty *fp = opaque;
3260 bool value = (*fp->ptr & fp->mask) == fp->mask;
3261 visit_type_bool(v, name, &value, errp);
3264 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3265 void *opaque, Error **errp)
3267 DeviceState *dev = DEVICE(obj);
3268 BitProperty *fp = opaque;
3269 Error *local_err = NULL;
3270 bool value;
3272 if (dev->realized) {
3273 qdev_prop_set_after_realize(dev, name, errp);
3274 return;
3277 visit_type_bool(v, name, &value, &local_err);
3278 if (local_err) {
3279 error_propagate(errp, local_err);
3280 return;
3283 if (value) {
3284 *fp->ptr |= fp->mask;
3285 } else {
3286 *fp->ptr &= ~fp->mask;
3290 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3291 void *opaque)
3293 BitProperty *prop = opaque;
3294 g_free(prop);
3297 /* Register a boolean property to get/set a single bit in a uint32_t field.
3299 * The same property name can be registered multiple times to make it affect
3300 * multiple bits in the same FeatureWord. In that case, the getter will return
3301 * true only if all bits are set.
3303 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3304 const char *prop_name,
3305 uint32_t *field,
3306 int bitnr)
3308 BitProperty *fp;
3309 ObjectProperty *op;
3310 uint32_t mask = (1UL << bitnr);
3312 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3313 if (op) {
3314 fp = op->opaque;
3315 assert(fp->ptr == field);
3316 fp->mask |= mask;
3317 } else {
3318 fp = g_new0(BitProperty, 1);
3319 fp->ptr = field;
3320 fp->mask = mask;
3321 object_property_add(OBJECT(cpu), prop_name, "bool",
3322 x86_cpu_get_bit_prop,
3323 x86_cpu_set_bit_prop,
3324 x86_cpu_release_bit_prop, fp, &error_abort);
3328 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3329 FeatureWord w,
3330 int bitnr)
3332 Object *obj = OBJECT(cpu);
3333 int i;
3334 char **names;
3335 FeatureWordInfo *fi = &feature_word_info[w];
3337 if (!fi->feat_names[bitnr]) {
3338 return;
3341 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3343 feat2prop(names[0]);
3344 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3346 for (i = 1; names[i]; i++) {
3347 feat2prop(names[i]);
3348 object_property_add_alias(obj, names[i], obj, names[0],
3349 &error_abort);
3352 g_strfreev(names);
3355 static void x86_cpu_initfn(Object *obj)
3357 CPUState *cs = CPU(obj);
3358 X86CPU *cpu = X86_CPU(obj);
3359 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3360 CPUX86State *env = &cpu->env;
3361 FeatureWord w;
3363 cs->env_ptr = env;
3365 object_property_add(obj, "family", "int",
3366 x86_cpuid_version_get_family,
3367 x86_cpuid_version_set_family, NULL, NULL, NULL);
3368 object_property_add(obj, "model", "int",
3369 x86_cpuid_version_get_model,
3370 x86_cpuid_version_set_model, NULL, NULL, NULL);
3371 object_property_add(obj, "stepping", "int",
3372 x86_cpuid_version_get_stepping,
3373 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3374 object_property_add_str(obj, "vendor",
3375 x86_cpuid_get_vendor,
3376 x86_cpuid_set_vendor, NULL);
3377 object_property_add_str(obj, "model-id",
3378 x86_cpuid_get_model_id,
3379 x86_cpuid_set_model_id, NULL);
3380 object_property_add(obj, "tsc-frequency", "int",
3381 x86_cpuid_get_tsc_freq,
3382 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3383 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3384 x86_cpu_get_feature_words,
3385 NULL, NULL, (void *)env->features, NULL);
3386 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3387 x86_cpu_get_feature_words,
3388 NULL, NULL, (void *)cpu->filtered_features, NULL);
3390 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3392 for (w = 0; w < FEATURE_WORDS; w++) {
3393 int bitnr;
3395 for (bitnr = 0; bitnr < 32; bitnr++) {
3396 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3400 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3403 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3405 X86CPU *cpu = X86_CPU(cs);
3407 return cpu->apic_id;
3410 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3412 X86CPU *cpu = X86_CPU(cs);
3414 return cpu->env.cr[0] & CR0_PG_MASK;
3417 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3419 X86CPU *cpu = X86_CPU(cs);
3421 cpu->env.eip = value;
3424 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3426 X86CPU *cpu = X86_CPU(cs);
3428 cpu->env.eip = tb->pc - tb->cs_base;
3431 static bool x86_cpu_has_work(CPUState *cs)
3433 X86CPU *cpu = X86_CPU(cs);
3434 CPUX86State *env = &cpu->env;
3436 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3437 CPU_INTERRUPT_POLL)) &&
3438 (env->eflags & IF_MASK)) ||
3439 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3440 CPU_INTERRUPT_INIT |
3441 CPU_INTERRUPT_SIPI |
3442 CPU_INTERRUPT_MCE)) ||
3443 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3444 !(env->hflags & HF_SMM_MASK));
3447 static Property x86_cpu_properties[] = {
3448 #ifdef CONFIG_USER_ONLY
3449 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3450 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3451 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3452 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3453 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3454 #else
3455 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3456 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3457 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3458 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3459 #endif
3460 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3461 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3462 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3463 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3464 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3465 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3466 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3467 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3468 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3469 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3470 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3471 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3472 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3473 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3474 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3475 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3476 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3477 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3478 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3479 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3480 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3481 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3482 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3483 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3484 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3485 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3486 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3487 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3488 DEFINE_PROP_END_OF_LIST()
3491 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3493 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3494 CPUClass *cc = CPU_CLASS(oc);
3495 DeviceClass *dc = DEVICE_CLASS(oc);
3497 xcc->parent_realize = dc->realize;
3498 dc->realize = x86_cpu_realizefn;
3499 dc->unrealize = x86_cpu_unrealizefn;
3500 dc->props = x86_cpu_properties;
3502 xcc->parent_reset = cc->reset;
3503 cc->reset = x86_cpu_reset;
3504 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3506 cc->class_by_name = x86_cpu_class_by_name;
3507 cc->parse_features = x86_cpu_parse_featurestr;
3508 cc->has_work = x86_cpu_has_work;
3509 cc->do_interrupt = x86_cpu_do_interrupt;
3510 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3511 cc->dump_state = x86_cpu_dump_state;
3512 cc->set_pc = x86_cpu_set_pc;
3513 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3514 cc->gdb_read_register = x86_cpu_gdb_read_register;
3515 cc->gdb_write_register = x86_cpu_gdb_write_register;
3516 cc->get_arch_id = x86_cpu_get_arch_id;
3517 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3518 #ifdef CONFIG_USER_ONLY
3519 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3520 #else
3521 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3522 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3523 cc->write_elf64_note = x86_cpu_write_elf64_note;
3524 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3525 cc->write_elf32_note = x86_cpu_write_elf32_note;
3526 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3527 cc->vmsd = &vmstate_x86_cpu;
3528 #endif
3529 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3530 #ifndef CONFIG_USER_ONLY
3531 cc->debug_excp_handler = breakpoint_handler;
3532 #endif
3533 cc->cpu_exec_enter = x86_cpu_exec_enter;
3534 cc->cpu_exec_exit = x86_cpu_exec_exit;
3536 dc->cannot_instantiate_with_device_add_yet = false;
3538 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3539 * object in cpus -> dangling pointer after final object_unref().
3541 dc->cannot_destroy_with_object_finalize_yet = true;
3544 static const TypeInfo x86_cpu_type_info = {
3545 .name = TYPE_X86_CPU,
3546 .parent = TYPE_CPU,
3547 .instance_size = sizeof(X86CPU),
3548 .instance_init = x86_cpu_initfn,
3549 .abstract = true,
3550 .class_size = sizeof(X86CPUClass),
3551 .class_init = x86_cpu_common_class_init,
3554 static void x86_cpu_register_types(void)
3556 int i;
3558 type_register_static(&x86_cpu_type_info);
3559 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3560 x86_register_cpudef_type(&builtin_x86_defs[i]);
3562 #ifdef CONFIG_KVM
3563 type_register_static(&host_x86_cpu_type_info);
3564 #endif
3567 type_init(x86_cpu_register_types)