Merge remote-tracking branch 'remotes/mdroth/tags/qga-pull-2016-10-31-tag' into staging
[qemu/kevin.git] / target-i386 / cpu.c
blob0f8a8fbd3b4a379ab270f4ad2059d795327cc25f
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
63 /* CPUID Leaf 4 constants: */
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
70 #define CPUID_4_LEVEL(l) ((l) << 5)
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
80 #define ASSOC_FULL 0xFF
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
97 /* Definitions of the hardcoded cache entries we expose: */
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
148 /* TLB definitions: */
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
181 dst[CPUID_VENDOR_SZ] = '\0';
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
203 /* missing:
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
210 /* missing:
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
217 #ifdef TARGET_X86_64
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
219 #else
220 #define TCG_EXT2_X86_64_FEATURES 0
221 #endif
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
236 CPUID_7_0_EBX_ERMS)
237 /* missing:
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
242 #define TCG_APM_FEATURES 0
243 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
244 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
245 /* missing:
246 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
248 typedef struct FeatureWordInfo {
249 /* feature flags names are taken from "Intel Processor Identification and
250 * the CPUID Instruction" and AMD's "CPUID Specification".
251 * In cases of disagreement between feature naming conventions,
252 * aliases may be added.
254 const char *feat_names[32];
255 uint32_t cpuid_eax; /* Input EAX for CPUID */
256 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
257 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
258 int cpuid_reg; /* output register (R_* constant) */
259 uint32_t tcg_features; /* Feature flags supported by TCG */
260 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
261 uint32_t migratable_flags; /* Feature flags known to be migratable */
262 } FeatureWordInfo;
264 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
265 [FEAT_1_EDX] = {
266 .feat_names = {
267 "fpu", "vme", "de", "pse",
268 "tsc", "msr", "pae", "mce",
269 "cx8", "apic", NULL, "sep",
270 "mtrr", "pge", "mca", "cmov",
271 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
272 NULL, "ds" /* Intel dts */, "acpi", "mmx",
273 "fxsr", "sse", "sse2", "ss",
274 "ht" /* Intel htt */, "tm", "ia64", "pbe",
276 .cpuid_eax = 1, .cpuid_reg = R_EDX,
277 .tcg_features = TCG_FEATURES,
279 [FEAT_1_ECX] = {
280 .feat_names = {
281 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
282 "ds-cpl", "vmx", "smx", "est",
283 "tm2", "ssse3", "cid", NULL,
284 "fma", "cx16", "xtpr", "pdcm",
285 NULL, "pcid", "dca", "sse4.1",
286 "sse4.2", "x2apic", "movbe", "popcnt",
287 "tsc-deadline", "aes", "xsave", "osxsave",
288 "avx", "f16c", "rdrand", "hypervisor",
290 .cpuid_eax = 1, .cpuid_reg = R_ECX,
291 .tcg_features = TCG_EXT_FEATURES,
293 /* Feature names that are already defined on feature_name[] but
294 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
295 * names on feat_names below. They are copied automatically
296 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
298 [FEAT_8000_0001_EDX] = {
299 .feat_names = {
300 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
301 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
302 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
303 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
304 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
305 "nx", NULL, "mmxext", NULL /* mmx */,
306 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
307 NULL, "lm", "3dnowext", "3dnow",
309 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
310 .tcg_features = TCG_EXT2_FEATURES,
312 [FEAT_8000_0001_ECX] = {
313 .feat_names = {
314 "lahf-lm", "cmp-legacy", "svm", "extapic",
315 "cr8legacy", "abm", "sse4a", "misalignsse",
316 "3dnowprefetch", "osvw", "ibs", "xop",
317 "skinit", "wdt", NULL, "lwp",
318 "fma4", "tce", NULL, "nodeid-msr",
319 NULL, "tbm", "topoext", "perfctr-core",
320 "perfctr-nb", NULL, NULL, NULL,
321 NULL, NULL, NULL, NULL,
323 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
324 .tcg_features = TCG_EXT3_FEATURES,
326 [FEAT_C000_0001_EDX] = {
327 .feat_names = {
328 NULL, NULL, "xstore", "xstore-en",
329 NULL, NULL, "xcrypt", "xcrypt-en",
330 "ace2", "ace2-en", "phe", "phe-en",
331 "pmm", "pmm-en", NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
337 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
338 .tcg_features = TCG_EXT4_FEATURES,
340 [FEAT_KVM] = {
341 .feat_names = {
342 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
343 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 "kvmclock-stable-bit", NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
351 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
352 .tcg_features = TCG_KVM_FEATURES,
354 [FEAT_HYPERV_EAX] = {
355 .feat_names = {
356 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
357 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
358 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
359 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
360 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
361 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
362 NULL, NULL, NULL, NULL,
363 NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
366 NULL, NULL, NULL, NULL,
368 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
370 [FEAT_HYPERV_EBX] = {
371 .feat_names = {
372 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
373 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
374 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
375 NULL /* hv_create_port */, NULL /* hv_connect_port */,
376 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
377 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
378 NULL, NULL,
379 NULL, NULL, NULL, NULL,
380 NULL, NULL, NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 NULL, NULL, NULL, NULL,
384 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
386 [FEAT_HYPERV_EDX] = {
387 .feat_names = {
388 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
389 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
390 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
391 NULL, NULL,
392 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
393 NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
399 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
401 [FEAT_SVM] = {
402 .feat_names = {
403 "npt", "lbrv", "svm-lock", "nrip-save",
404 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
405 NULL, NULL, "pause-filter", NULL,
406 "pfthreshold", NULL, NULL, NULL,
407 NULL, NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
410 NULL, NULL, NULL, NULL,
412 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
413 .tcg_features = TCG_SVM_FEATURES,
415 [FEAT_7_0_EBX] = {
416 .feat_names = {
417 "fsgsbase", "tsc-adjust", NULL, "bmi1",
418 "hle", "avx2", NULL, "smep",
419 "bmi2", "erms", "invpcid", "rtm",
420 NULL, NULL, "mpx", NULL,
421 "avx512f", "avx512dq", "rdseed", "adx",
422 "smap", "avx512ifma", "pcommit", "clflushopt",
423 "clwb", NULL, "avx512pf", "avx512er",
424 "avx512cd", NULL, "avx512bw", "avx512vl",
426 .cpuid_eax = 7,
427 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
428 .cpuid_reg = R_EBX,
429 .tcg_features = TCG_7_0_EBX_FEATURES,
431 [FEAT_7_0_ECX] = {
432 .feat_names = {
433 NULL, "avx512vbmi", "umip", "pku",
434 "ospke", NULL, NULL, NULL,
435 NULL, NULL, NULL, NULL,
436 NULL, NULL, NULL, NULL,
437 NULL, NULL, NULL, NULL,
438 NULL, NULL, "rdpid", NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, NULL, NULL,
442 .cpuid_eax = 7,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
444 .cpuid_reg = R_ECX,
445 .tcg_features = TCG_7_0_ECX_FEATURES,
447 [FEAT_8000_0007_EDX] = {
448 .feat_names = {
449 NULL, NULL, NULL, NULL,
450 NULL, NULL, NULL, NULL,
451 "invtsc", NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
458 .cpuid_eax = 0x80000007,
459 .cpuid_reg = R_EDX,
460 .tcg_features = TCG_APM_FEATURES,
461 .unmigratable_flags = CPUID_APM_INVTSC,
463 [FEAT_XSAVE] = {
464 .feat_names = {
465 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
474 .cpuid_eax = 0xd,
475 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
476 .cpuid_reg = R_EAX,
477 .tcg_features = TCG_XSAVE_FEATURES,
479 [FEAT_6_EAX] = {
480 .feat_names = {
481 NULL, NULL, "arat", NULL,
482 NULL, NULL, NULL, NULL,
483 NULL, NULL, NULL, NULL,
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
490 .cpuid_eax = 6, .cpuid_reg = R_EAX,
491 .tcg_features = TCG_6_EAX_FEATURES,
493 [FEAT_XSAVE_COMP_LO] = {
494 .cpuid_eax = 0xD,
495 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
496 .cpuid_reg = R_EAX,
497 .tcg_features = ~0U,
498 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
499 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
500 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
501 XSTATE_PKRU_MASK,
503 [FEAT_XSAVE_COMP_HI] = {
504 .cpuid_eax = 0xD,
505 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
506 .cpuid_reg = R_EDX,
507 .tcg_features = ~0U,
511 typedef struct X86RegisterInfo32 {
512 /* Name of register */
513 const char *name;
514 /* QAPI enum value register */
515 X86CPURegister32 qapi_enum;
516 } X86RegisterInfo32;
518 #define REGISTER(reg) \
519 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
520 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
521 REGISTER(EAX),
522 REGISTER(ECX),
523 REGISTER(EDX),
524 REGISTER(EBX),
525 REGISTER(ESP),
526 REGISTER(EBP),
527 REGISTER(ESI),
528 REGISTER(EDI),
530 #undef REGISTER
532 typedef struct ExtSaveArea {
533 uint32_t feature, bits;
534 uint32_t offset, size;
535 } ExtSaveArea;
537 static const ExtSaveArea x86_ext_save_areas[] = {
538 [XSTATE_FP_BIT] = {
539 /* x87 FP state component is always enabled if XSAVE is supported */
540 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
541 /* x87 state is in the legacy region of the XSAVE area */
542 .offset = 0,
543 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
545 [XSTATE_SSE_BIT] = {
546 /* SSE state component is always enabled if XSAVE is supported */
547 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
548 /* SSE state is in the legacy region of the XSAVE area */
549 .offset = 0,
550 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
552 [XSTATE_YMM_BIT] =
553 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
554 .offset = offsetof(X86XSaveArea, avx_state),
555 .size = sizeof(XSaveAVX) },
556 [XSTATE_BNDREGS_BIT] =
557 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
558 .offset = offsetof(X86XSaveArea, bndreg_state),
559 .size = sizeof(XSaveBNDREG) },
560 [XSTATE_BNDCSR_BIT] =
561 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
562 .offset = offsetof(X86XSaveArea, bndcsr_state),
563 .size = sizeof(XSaveBNDCSR) },
564 [XSTATE_OPMASK_BIT] =
565 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
566 .offset = offsetof(X86XSaveArea, opmask_state),
567 .size = sizeof(XSaveOpmask) },
568 [XSTATE_ZMM_Hi256_BIT] =
569 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
570 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
571 .size = sizeof(XSaveZMM_Hi256) },
572 [XSTATE_Hi16_ZMM_BIT] =
573 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
574 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
575 .size = sizeof(XSaveHi16_ZMM) },
576 [XSTATE_PKRU_BIT] =
577 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
578 .offset = offsetof(X86XSaveArea, pkru_state),
579 .size = sizeof(XSavePKRU) },
582 static uint32_t xsave_area_size(uint64_t mask)
584 int i;
585 uint64_t ret = 0;
587 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
588 const ExtSaveArea *esa = &x86_ext_save_areas[i];
589 if ((mask >> i) & 1) {
590 ret = MAX(ret, esa->offset + esa->size);
593 return ret;
596 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
598 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
599 cpu->env.features[FEAT_XSAVE_COMP_LO];
602 const char *get_register_name_32(unsigned int reg)
604 if (reg >= CPU_NB_REGS32) {
605 return NULL;
607 return x86_reg_info_32[reg].name;
611 * Returns the set of feature flags that are supported and migratable by
612 * QEMU, for a given FeatureWord.
614 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
616 FeatureWordInfo *wi = &feature_word_info[w];
617 uint32_t r = 0;
618 int i;
620 for (i = 0; i < 32; i++) {
621 uint32_t f = 1U << i;
623 /* If the feature name is known, it is implicitly considered migratable,
624 * unless it is explicitly set in unmigratable_flags */
625 if ((wi->migratable_flags & f) ||
626 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
627 r |= f;
630 return r;
633 void host_cpuid(uint32_t function, uint32_t count,
634 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
636 uint32_t vec[4];
638 #ifdef __x86_64__
639 asm volatile("cpuid"
640 : "=a"(vec[0]), "=b"(vec[1]),
641 "=c"(vec[2]), "=d"(vec[3])
642 : "0"(function), "c"(count) : "cc");
643 #elif defined(__i386__)
644 asm volatile("pusha \n\t"
645 "cpuid \n\t"
646 "mov %%eax, 0(%2) \n\t"
647 "mov %%ebx, 4(%2) \n\t"
648 "mov %%ecx, 8(%2) \n\t"
649 "mov %%edx, 12(%2) \n\t"
650 "popa"
651 : : "a"(function), "c"(count), "S"(vec)
652 : "memory", "cc");
653 #else
654 abort();
655 #endif
657 if (eax)
658 *eax = vec[0];
659 if (ebx)
660 *ebx = vec[1];
661 if (ecx)
662 *ecx = vec[2];
663 if (edx)
664 *edx = vec[3];
667 /* CPU class name definitions: */
669 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
670 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
672 /* Return type name for a given CPU model name
673 * Caller is responsible for freeing the returned string.
675 static char *x86_cpu_type_name(const char *model_name)
677 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
680 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
682 ObjectClass *oc;
683 char *typename;
685 if (cpu_model == NULL) {
686 return NULL;
689 typename = x86_cpu_type_name(cpu_model);
690 oc = object_class_by_name(typename);
691 g_free(typename);
692 return oc;
695 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
697 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
698 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
699 return g_strndup(class_name,
700 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
703 struct X86CPUDefinition {
704 const char *name;
705 uint32_t level;
706 uint32_t xlevel;
707 /* vendor is zero-terminated, 12 character ASCII string */
708 char vendor[CPUID_VENDOR_SZ + 1];
709 int family;
710 int model;
711 int stepping;
712 FeatureWordArray features;
713 char model_id[48];
716 static X86CPUDefinition builtin_x86_defs[] = {
718 .name = "qemu64",
719 .level = 0xd,
720 .vendor = CPUID_VENDOR_AMD,
721 .family = 6,
722 .model = 6,
723 .stepping = 3,
724 .features[FEAT_1_EDX] =
725 PPRO_FEATURES |
726 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
727 CPUID_PSE36,
728 .features[FEAT_1_ECX] =
729 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
730 .features[FEAT_8000_0001_EDX] =
731 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
732 .features[FEAT_8000_0001_ECX] =
733 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
734 .xlevel = 0x8000000A,
735 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
738 .name = "phenom",
739 .level = 5,
740 .vendor = CPUID_VENDOR_AMD,
741 .family = 16,
742 .model = 2,
743 .stepping = 3,
744 /* Missing: CPUID_HT */
745 .features[FEAT_1_EDX] =
746 PPRO_FEATURES |
747 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
748 CPUID_PSE36 | CPUID_VME,
749 .features[FEAT_1_ECX] =
750 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
751 CPUID_EXT_POPCNT,
752 .features[FEAT_8000_0001_EDX] =
753 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
754 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
755 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
756 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
757 CPUID_EXT3_CR8LEG,
758 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
759 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
760 .features[FEAT_8000_0001_ECX] =
761 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
762 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
763 /* Missing: CPUID_SVM_LBRV */
764 .features[FEAT_SVM] =
765 CPUID_SVM_NPT,
766 .xlevel = 0x8000001A,
767 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
770 .name = "core2duo",
771 .level = 10,
772 .vendor = CPUID_VENDOR_INTEL,
773 .family = 6,
774 .model = 15,
775 .stepping = 11,
776 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
777 .features[FEAT_1_EDX] =
778 PPRO_FEATURES |
779 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
780 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
781 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
782 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
783 .features[FEAT_1_ECX] =
784 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
785 CPUID_EXT_CX16,
786 .features[FEAT_8000_0001_EDX] =
787 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
788 .features[FEAT_8000_0001_ECX] =
789 CPUID_EXT3_LAHF_LM,
790 .xlevel = 0x80000008,
791 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
794 .name = "kvm64",
795 .level = 0xd,
796 .vendor = CPUID_VENDOR_INTEL,
797 .family = 15,
798 .model = 6,
799 .stepping = 1,
800 /* Missing: CPUID_HT */
801 .features[FEAT_1_EDX] =
802 PPRO_FEATURES | CPUID_VME |
803 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
804 CPUID_PSE36,
805 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
806 .features[FEAT_1_ECX] =
807 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
808 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
809 .features[FEAT_8000_0001_EDX] =
810 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
811 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
812 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
813 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
814 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
815 .features[FEAT_8000_0001_ECX] =
817 .xlevel = 0x80000008,
818 .model_id = "Common KVM processor"
821 .name = "qemu32",
822 .level = 4,
823 .vendor = CPUID_VENDOR_INTEL,
824 .family = 6,
825 .model = 6,
826 .stepping = 3,
827 .features[FEAT_1_EDX] =
828 PPRO_FEATURES,
829 .features[FEAT_1_ECX] =
830 CPUID_EXT_SSE3,
831 .xlevel = 0x80000004,
832 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
835 .name = "kvm32",
836 .level = 5,
837 .vendor = CPUID_VENDOR_INTEL,
838 .family = 15,
839 .model = 6,
840 .stepping = 1,
841 .features[FEAT_1_EDX] =
842 PPRO_FEATURES | CPUID_VME |
843 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
844 .features[FEAT_1_ECX] =
845 CPUID_EXT_SSE3,
846 .features[FEAT_8000_0001_ECX] =
848 .xlevel = 0x80000008,
849 .model_id = "Common 32-bit KVM processor"
852 .name = "coreduo",
853 .level = 10,
854 .vendor = CPUID_VENDOR_INTEL,
855 .family = 6,
856 .model = 14,
857 .stepping = 8,
858 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
859 .features[FEAT_1_EDX] =
860 PPRO_FEATURES | CPUID_VME |
861 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
862 CPUID_SS,
863 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
864 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
865 .features[FEAT_1_ECX] =
866 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
867 .features[FEAT_8000_0001_EDX] =
868 CPUID_EXT2_NX,
869 .xlevel = 0x80000008,
870 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
873 .name = "486",
874 .level = 1,
875 .vendor = CPUID_VENDOR_INTEL,
876 .family = 4,
877 .model = 8,
878 .stepping = 0,
879 .features[FEAT_1_EDX] =
880 I486_FEATURES,
881 .xlevel = 0,
884 .name = "pentium",
885 .level = 1,
886 .vendor = CPUID_VENDOR_INTEL,
887 .family = 5,
888 .model = 4,
889 .stepping = 3,
890 .features[FEAT_1_EDX] =
891 PENTIUM_FEATURES,
892 .xlevel = 0,
895 .name = "pentium2",
896 .level = 2,
897 .vendor = CPUID_VENDOR_INTEL,
898 .family = 6,
899 .model = 5,
900 .stepping = 2,
901 .features[FEAT_1_EDX] =
902 PENTIUM2_FEATURES,
903 .xlevel = 0,
906 .name = "pentium3",
907 .level = 3,
908 .vendor = CPUID_VENDOR_INTEL,
909 .family = 6,
910 .model = 7,
911 .stepping = 3,
912 .features[FEAT_1_EDX] =
913 PENTIUM3_FEATURES,
914 .xlevel = 0,
917 .name = "athlon",
918 .level = 2,
919 .vendor = CPUID_VENDOR_AMD,
920 .family = 6,
921 .model = 2,
922 .stepping = 3,
923 .features[FEAT_1_EDX] =
924 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
925 CPUID_MCA,
926 .features[FEAT_8000_0001_EDX] =
927 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
928 .xlevel = 0x80000008,
929 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
932 .name = "n270",
933 .level = 10,
934 .vendor = CPUID_VENDOR_INTEL,
935 .family = 6,
936 .model = 28,
937 .stepping = 2,
938 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
939 .features[FEAT_1_EDX] =
940 PPRO_FEATURES |
941 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
942 CPUID_ACPI | CPUID_SS,
943 /* Some CPUs got no CPUID_SEP */
944 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
945 * CPUID_EXT_XTPR */
946 .features[FEAT_1_ECX] =
947 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
948 CPUID_EXT_MOVBE,
949 .features[FEAT_8000_0001_EDX] =
950 CPUID_EXT2_NX,
951 .features[FEAT_8000_0001_ECX] =
952 CPUID_EXT3_LAHF_LM,
953 .xlevel = 0x80000008,
954 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
957 .name = "Conroe",
958 .level = 10,
959 .vendor = CPUID_VENDOR_INTEL,
960 .family = 6,
961 .model = 15,
962 .stepping = 3,
963 .features[FEAT_1_EDX] =
964 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
965 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
966 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
967 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
968 CPUID_DE | CPUID_FP87,
969 .features[FEAT_1_ECX] =
970 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
971 .features[FEAT_8000_0001_EDX] =
972 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
973 .features[FEAT_8000_0001_ECX] =
974 CPUID_EXT3_LAHF_LM,
975 .xlevel = 0x80000008,
976 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
979 .name = "Penryn",
980 .level = 10,
981 .vendor = CPUID_VENDOR_INTEL,
982 .family = 6,
983 .model = 23,
984 .stepping = 3,
985 .features[FEAT_1_EDX] =
986 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
987 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
988 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
989 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
990 CPUID_DE | CPUID_FP87,
991 .features[FEAT_1_ECX] =
992 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
993 CPUID_EXT_SSE3,
994 .features[FEAT_8000_0001_EDX] =
995 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
996 .features[FEAT_8000_0001_ECX] =
997 CPUID_EXT3_LAHF_LM,
998 .xlevel = 0x80000008,
999 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1002 .name = "Nehalem",
1003 .level = 11,
1004 .vendor = CPUID_VENDOR_INTEL,
1005 .family = 6,
1006 .model = 26,
1007 .stepping = 3,
1008 .features[FEAT_1_EDX] =
1009 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1010 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1011 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1012 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1013 CPUID_DE | CPUID_FP87,
1014 .features[FEAT_1_ECX] =
1015 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1016 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1017 .features[FEAT_8000_0001_EDX] =
1018 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1019 .features[FEAT_8000_0001_ECX] =
1020 CPUID_EXT3_LAHF_LM,
1021 .xlevel = 0x80000008,
1022 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1025 .name = "Westmere",
1026 .level = 11,
1027 .vendor = CPUID_VENDOR_INTEL,
1028 .family = 6,
1029 .model = 44,
1030 .stepping = 1,
1031 .features[FEAT_1_EDX] =
1032 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1033 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1034 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1035 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1036 CPUID_DE | CPUID_FP87,
1037 .features[FEAT_1_ECX] =
1038 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1039 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1040 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1041 .features[FEAT_8000_0001_EDX] =
1042 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1043 .features[FEAT_8000_0001_ECX] =
1044 CPUID_EXT3_LAHF_LM,
1045 .features[FEAT_6_EAX] =
1046 CPUID_6_EAX_ARAT,
1047 .xlevel = 0x80000008,
1048 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1051 .name = "SandyBridge",
1052 .level = 0xd,
1053 .vendor = CPUID_VENDOR_INTEL,
1054 .family = 6,
1055 .model = 42,
1056 .stepping = 1,
1057 .features[FEAT_1_EDX] =
1058 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1059 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1060 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1061 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1062 CPUID_DE | CPUID_FP87,
1063 .features[FEAT_1_ECX] =
1064 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1065 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1066 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1067 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1068 CPUID_EXT_SSE3,
1069 .features[FEAT_8000_0001_EDX] =
1070 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1071 CPUID_EXT2_SYSCALL,
1072 .features[FEAT_8000_0001_ECX] =
1073 CPUID_EXT3_LAHF_LM,
1074 .features[FEAT_XSAVE] =
1075 CPUID_XSAVE_XSAVEOPT,
1076 .features[FEAT_6_EAX] =
1077 CPUID_6_EAX_ARAT,
1078 .xlevel = 0x80000008,
1079 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1082 .name = "IvyBridge",
1083 .level = 0xd,
1084 .vendor = CPUID_VENDOR_INTEL,
1085 .family = 6,
1086 .model = 58,
1087 .stepping = 9,
1088 .features[FEAT_1_EDX] =
1089 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1090 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1091 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1092 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1093 CPUID_DE | CPUID_FP87,
1094 .features[FEAT_1_ECX] =
1095 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1096 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1097 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1098 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1099 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1100 .features[FEAT_7_0_EBX] =
1101 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1102 CPUID_7_0_EBX_ERMS,
1103 .features[FEAT_8000_0001_EDX] =
1104 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1105 CPUID_EXT2_SYSCALL,
1106 .features[FEAT_8000_0001_ECX] =
1107 CPUID_EXT3_LAHF_LM,
1108 .features[FEAT_XSAVE] =
1109 CPUID_XSAVE_XSAVEOPT,
1110 .features[FEAT_6_EAX] =
1111 CPUID_6_EAX_ARAT,
1112 .xlevel = 0x80000008,
1113 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1116 .name = "Haswell-noTSX",
1117 .level = 0xd,
1118 .vendor = CPUID_VENDOR_INTEL,
1119 .family = 6,
1120 .model = 60,
1121 .stepping = 1,
1122 .features[FEAT_1_EDX] =
1123 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1124 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1125 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1126 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1127 CPUID_DE | CPUID_FP87,
1128 .features[FEAT_1_ECX] =
1129 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1130 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1131 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1132 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1133 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1134 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1135 .features[FEAT_8000_0001_EDX] =
1136 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1137 CPUID_EXT2_SYSCALL,
1138 .features[FEAT_8000_0001_ECX] =
1139 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1140 .features[FEAT_7_0_EBX] =
1141 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1142 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1143 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1144 .features[FEAT_XSAVE] =
1145 CPUID_XSAVE_XSAVEOPT,
1146 .features[FEAT_6_EAX] =
1147 CPUID_6_EAX_ARAT,
1148 .xlevel = 0x80000008,
1149 .model_id = "Intel Core Processor (Haswell, no TSX)",
1150 }, {
1151 .name = "Haswell",
1152 .level = 0xd,
1153 .vendor = CPUID_VENDOR_INTEL,
1154 .family = 6,
1155 .model = 60,
1156 .stepping = 1,
1157 .features[FEAT_1_EDX] =
1158 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1159 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1160 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1161 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1162 CPUID_DE | CPUID_FP87,
1163 .features[FEAT_1_ECX] =
1164 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1165 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1166 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1167 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1168 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1169 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1170 .features[FEAT_8000_0001_EDX] =
1171 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1172 CPUID_EXT2_SYSCALL,
1173 .features[FEAT_8000_0001_ECX] =
1174 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1175 .features[FEAT_7_0_EBX] =
1176 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1177 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1178 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1179 CPUID_7_0_EBX_RTM,
1180 .features[FEAT_XSAVE] =
1181 CPUID_XSAVE_XSAVEOPT,
1182 .features[FEAT_6_EAX] =
1183 CPUID_6_EAX_ARAT,
1184 .xlevel = 0x80000008,
1185 .model_id = "Intel Core Processor (Haswell)",
1188 .name = "Broadwell-noTSX",
1189 .level = 0xd,
1190 .vendor = CPUID_VENDOR_INTEL,
1191 .family = 6,
1192 .model = 61,
1193 .stepping = 2,
1194 .features[FEAT_1_EDX] =
1195 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1196 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1197 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1198 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1199 CPUID_DE | CPUID_FP87,
1200 .features[FEAT_1_ECX] =
1201 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1202 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1203 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1204 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1205 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1206 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1207 .features[FEAT_8000_0001_EDX] =
1208 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1209 CPUID_EXT2_SYSCALL,
1210 .features[FEAT_8000_0001_ECX] =
1211 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1212 .features[FEAT_7_0_EBX] =
1213 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1214 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1215 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1216 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1217 CPUID_7_0_EBX_SMAP,
1218 .features[FEAT_XSAVE] =
1219 CPUID_XSAVE_XSAVEOPT,
1220 .features[FEAT_6_EAX] =
1221 CPUID_6_EAX_ARAT,
1222 .xlevel = 0x80000008,
1223 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1226 .name = "Broadwell",
1227 .level = 0xd,
1228 .vendor = CPUID_VENDOR_INTEL,
1229 .family = 6,
1230 .model = 61,
1231 .stepping = 2,
1232 .features[FEAT_1_EDX] =
1233 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1234 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1235 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1236 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1237 CPUID_DE | CPUID_FP87,
1238 .features[FEAT_1_ECX] =
1239 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1240 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1241 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1242 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1243 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1244 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1245 .features[FEAT_8000_0001_EDX] =
1246 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1247 CPUID_EXT2_SYSCALL,
1248 .features[FEAT_8000_0001_ECX] =
1249 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1250 .features[FEAT_7_0_EBX] =
1251 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1252 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1253 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1254 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1255 CPUID_7_0_EBX_SMAP,
1256 .features[FEAT_XSAVE] =
1257 CPUID_XSAVE_XSAVEOPT,
1258 .features[FEAT_6_EAX] =
1259 CPUID_6_EAX_ARAT,
1260 .xlevel = 0x80000008,
1261 .model_id = "Intel Core Processor (Broadwell)",
1264 .name = "Skylake-Client",
1265 .level = 0xd,
1266 .vendor = CPUID_VENDOR_INTEL,
1267 .family = 6,
1268 .model = 94,
1269 .stepping = 3,
1270 .features[FEAT_1_EDX] =
1271 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1272 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1273 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1274 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1275 CPUID_DE | CPUID_FP87,
1276 .features[FEAT_1_ECX] =
1277 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1278 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1279 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1280 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1281 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1282 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1283 .features[FEAT_8000_0001_EDX] =
1284 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1285 CPUID_EXT2_SYSCALL,
1286 .features[FEAT_8000_0001_ECX] =
1287 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1288 .features[FEAT_7_0_EBX] =
1289 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1290 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1291 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1292 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1293 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1294 /* Missing: XSAVES (not supported by some Linux versions,
1295 * including v4.1 to v4.6).
1296 * KVM doesn't yet expose any XSAVES state save component,
1297 * and the only one defined in Skylake (processor tracing)
1298 * probably will block migration anyway.
1300 .features[FEAT_XSAVE] =
1301 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1302 CPUID_XSAVE_XGETBV1,
1303 .features[FEAT_6_EAX] =
1304 CPUID_6_EAX_ARAT,
1305 .xlevel = 0x80000008,
1306 .model_id = "Intel Core Processor (Skylake)",
1309 .name = "Opteron_G1",
1310 .level = 5,
1311 .vendor = CPUID_VENDOR_AMD,
1312 .family = 15,
1313 .model = 6,
1314 .stepping = 1,
1315 .features[FEAT_1_EDX] =
1316 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1317 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1318 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1319 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1320 CPUID_DE | CPUID_FP87,
1321 .features[FEAT_1_ECX] =
1322 CPUID_EXT_SSE3,
1323 .features[FEAT_8000_0001_EDX] =
1324 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1325 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1326 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1327 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1328 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1329 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1330 .xlevel = 0x80000008,
1331 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1334 .name = "Opteron_G2",
1335 .level = 5,
1336 .vendor = CPUID_VENDOR_AMD,
1337 .family = 15,
1338 .model = 6,
1339 .stepping = 1,
1340 .features[FEAT_1_EDX] =
1341 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1342 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1343 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1344 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1345 CPUID_DE | CPUID_FP87,
1346 .features[FEAT_1_ECX] =
1347 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1348 /* Missing: CPUID_EXT2_RDTSCP */
1349 .features[FEAT_8000_0001_EDX] =
1350 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1351 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1352 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1353 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1354 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1355 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1356 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1357 .features[FEAT_8000_0001_ECX] =
1358 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1359 .xlevel = 0x80000008,
1360 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1363 .name = "Opteron_G3",
1364 .level = 5,
1365 .vendor = CPUID_VENDOR_AMD,
1366 .family = 16,
1367 .model = 2,
1368 .stepping = 3,
1369 .features[FEAT_1_EDX] =
1370 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1371 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1372 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1373 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1374 CPUID_DE | CPUID_FP87,
1375 .features[FEAT_1_ECX] =
1376 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1377 CPUID_EXT_SSE3,
1378 /* Missing: CPUID_EXT2_RDTSCP */
1379 .features[FEAT_8000_0001_EDX] =
1380 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1381 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1382 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1383 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1384 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1385 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1386 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1387 .features[FEAT_8000_0001_ECX] =
1388 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1389 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1390 .xlevel = 0x80000008,
1391 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1394 .name = "Opteron_G4",
1395 .level = 0xd,
1396 .vendor = CPUID_VENDOR_AMD,
1397 .family = 21,
1398 .model = 1,
1399 .stepping = 2,
1400 .features[FEAT_1_EDX] =
1401 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1402 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1403 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1404 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1405 CPUID_DE | CPUID_FP87,
1406 .features[FEAT_1_ECX] =
1407 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1408 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1409 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1410 CPUID_EXT_SSE3,
1411 /* Missing: CPUID_EXT2_RDTSCP */
1412 .features[FEAT_8000_0001_EDX] =
1413 CPUID_EXT2_LM |
1414 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1415 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1416 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1417 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1418 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1419 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1420 .features[FEAT_8000_0001_ECX] =
1421 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1422 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1423 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1424 CPUID_EXT3_LAHF_LM,
1425 /* no xsaveopt! */
1426 .xlevel = 0x8000001A,
1427 .model_id = "AMD Opteron 62xx class CPU",
1430 .name = "Opteron_G5",
1431 .level = 0xd,
1432 .vendor = CPUID_VENDOR_AMD,
1433 .family = 21,
1434 .model = 2,
1435 .stepping = 0,
1436 .features[FEAT_1_EDX] =
1437 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1438 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1439 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1440 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1441 CPUID_DE | CPUID_FP87,
1442 .features[FEAT_1_ECX] =
1443 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1444 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1445 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1446 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1447 /* Missing: CPUID_EXT2_RDTSCP */
1448 .features[FEAT_8000_0001_EDX] =
1449 CPUID_EXT2_LM |
1450 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1451 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1452 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1453 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1454 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1455 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1456 .features[FEAT_8000_0001_ECX] =
1457 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1458 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1459 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1460 CPUID_EXT3_LAHF_LM,
1461 /* no xsaveopt! */
1462 .xlevel = 0x8000001A,
1463 .model_id = "AMD Opteron 63xx class CPU",
1467 typedef struct PropValue {
1468 const char *prop, *value;
1469 } PropValue;
1471 /* KVM-specific features that are automatically added/removed
1472 * from all CPU models when KVM is enabled.
1474 static PropValue kvm_default_props[] = {
1475 { "kvmclock", "on" },
1476 { "kvm-nopiodelay", "on" },
1477 { "kvm-asyncpf", "on" },
1478 { "kvm-steal-time", "on" },
1479 { "kvm-pv-eoi", "on" },
1480 { "kvmclock-stable-bit", "on" },
1481 { "x2apic", "on" },
1482 { "acpi", "off" },
1483 { "monitor", "off" },
1484 { "svm", "off" },
1485 { NULL, NULL },
1488 /* TCG-specific defaults that override all CPU models when using TCG
1490 static PropValue tcg_default_props[] = {
1491 { "vme", "off" },
1492 { NULL, NULL },
1496 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1498 PropValue *pv;
1499 for (pv = kvm_default_props; pv->prop; pv++) {
1500 if (!strcmp(pv->prop, prop)) {
1501 pv->value = value;
1502 break;
1506 /* It is valid to call this function only for properties that
1507 * are already present in the kvm_default_props table.
1509 assert(pv->prop);
1512 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1513 bool migratable_only);
1515 #ifdef CONFIG_KVM
1517 static bool lmce_supported(void)
1519 uint64_t mce_cap;
1521 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1522 return false;
1525 return !!(mce_cap & MCG_LMCE_P);
1528 static int cpu_x86_fill_model_id(char *str)
1530 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1531 int i;
1533 for (i = 0; i < 3; i++) {
1534 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1535 memcpy(str + i * 16 + 0, &eax, 4);
1536 memcpy(str + i * 16 + 4, &ebx, 4);
1537 memcpy(str + i * 16 + 8, &ecx, 4);
1538 memcpy(str + i * 16 + 12, &edx, 4);
1540 return 0;
1543 static X86CPUDefinition host_cpudef;
1545 static Property host_x86_cpu_properties[] = {
1546 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1547 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1548 DEFINE_PROP_END_OF_LIST()
1551 /* class_init for the "host" CPU model
1553 * This function may be called before KVM is initialized.
1555 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1557 DeviceClass *dc = DEVICE_CLASS(oc);
1558 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1559 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1561 xcc->kvm_required = true;
1563 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1564 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1566 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1567 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1568 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1569 host_cpudef.stepping = eax & 0x0F;
1571 cpu_x86_fill_model_id(host_cpudef.model_id);
1573 xcc->cpu_def = &host_cpudef;
1574 xcc->model_description =
1575 "KVM processor with all supported host features "
1576 "(only available in KVM mode)";
1578 /* level, xlevel, xlevel2, and the feature words are initialized on
1579 * instance_init, because they require KVM to be initialized.
1582 dc->props = host_x86_cpu_properties;
1583 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1584 dc->cannot_destroy_with_object_finalize_yet = true;
1587 static void host_x86_cpu_initfn(Object *obj)
1589 X86CPU *cpu = X86_CPU(obj);
1590 CPUX86State *env = &cpu->env;
1591 KVMState *s = kvm_state;
1593 /* We can't fill the features array here because we don't know yet if
1594 * "migratable" is true or false.
1596 cpu->host_features = true;
1598 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1599 if (kvm_enabled()) {
1600 env->cpuid_min_level =
1601 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1602 env->cpuid_min_xlevel =
1603 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1604 env->cpuid_min_xlevel2 =
1605 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1607 if (lmce_supported()) {
1608 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1612 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1615 static const TypeInfo host_x86_cpu_type_info = {
1616 .name = X86_CPU_TYPE_NAME("host"),
1617 .parent = TYPE_X86_CPU,
1618 .instance_init = host_x86_cpu_initfn,
1619 .class_init = host_x86_cpu_class_init,
1622 #endif
1624 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1626 FeatureWordInfo *f = &feature_word_info[w];
1627 int i;
1629 for (i = 0; i < 32; ++i) {
1630 if ((1UL << i) & mask) {
1631 const char *reg = get_register_name_32(f->cpuid_reg);
1632 assert(reg);
1633 fprintf(stderr, "warning: %s doesn't support requested feature: "
1634 "CPUID.%02XH:%s%s%s [bit %d]\n",
1635 kvm_enabled() ? "host" : "TCG",
1636 f->cpuid_eax, reg,
1637 f->feat_names[i] ? "." : "",
1638 f->feat_names[i] ? f->feat_names[i] : "", i);
1643 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1644 const char *name, void *opaque,
1645 Error **errp)
1647 X86CPU *cpu = X86_CPU(obj);
1648 CPUX86State *env = &cpu->env;
1649 int64_t value;
1651 value = (env->cpuid_version >> 8) & 0xf;
1652 if (value == 0xf) {
1653 value += (env->cpuid_version >> 20) & 0xff;
1655 visit_type_int(v, name, &value, errp);
1658 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1659 const char *name, void *opaque,
1660 Error **errp)
1662 X86CPU *cpu = X86_CPU(obj);
1663 CPUX86State *env = &cpu->env;
1664 const int64_t min = 0;
1665 const int64_t max = 0xff + 0xf;
1666 Error *local_err = NULL;
1667 int64_t value;
1669 visit_type_int(v, name, &value, &local_err);
1670 if (local_err) {
1671 error_propagate(errp, local_err);
1672 return;
1674 if (value < min || value > max) {
1675 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1676 name ? name : "null", value, min, max);
1677 return;
1680 env->cpuid_version &= ~0xff00f00;
1681 if (value > 0x0f) {
1682 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1683 } else {
1684 env->cpuid_version |= value << 8;
1688 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1689 const char *name, void *opaque,
1690 Error **errp)
1692 X86CPU *cpu = X86_CPU(obj);
1693 CPUX86State *env = &cpu->env;
1694 int64_t value;
1696 value = (env->cpuid_version >> 4) & 0xf;
1697 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1698 visit_type_int(v, name, &value, errp);
1701 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1702 const char *name, void *opaque,
1703 Error **errp)
1705 X86CPU *cpu = X86_CPU(obj);
1706 CPUX86State *env = &cpu->env;
1707 const int64_t min = 0;
1708 const int64_t max = 0xff;
1709 Error *local_err = NULL;
1710 int64_t value;
1712 visit_type_int(v, name, &value, &local_err);
1713 if (local_err) {
1714 error_propagate(errp, local_err);
1715 return;
1717 if (value < min || value > max) {
1718 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1719 name ? name : "null", value, min, max);
1720 return;
1723 env->cpuid_version &= ~0xf00f0;
1724 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1727 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1728 const char *name, void *opaque,
1729 Error **errp)
1731 X86CPU *cpu = X86_CPU(obj);
1732 CPUX86State *env = &cpu->env;
1733 int64_t value;
1735 value = env->cpuid_version & 0xf;
1736 visit_type_int(v, name, &value, errp);
1739 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1740 const char *name, void *opaque,
1741 Error **errp)
1743 X86CPU *cpu = X86_CPU(obj);
1744 CPUX86State *env = &cpu->env;
1745 const int64_t min = 0;
1746 const int64_t max = 0xf;
1747 Error *local_err = NULL;
1748 int64_t value;
1750 visit_type_int(v, name, &value, &local_err);
1751 if (local_err) {
1752 error_propagate(errp, local_err);
1753 return;
1755 if (value < min || value > max) {
1756 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1757 name ? name : "null", value, min, max);
1758 return;
1761 env->cpuid_version &= ~0xf;
1762 env->cpuid_version |= value & 0xf;
1765 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1767 X86CPU *cpu = X86_CPU(obj);
1768 CPUX86State *env = &cpu->env;
1769 char *value;
1771 value = g_malloc(CPUID_VENDOR_SZ + 1);
1772 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1773 env->cpuid_vendor3);
1774 return value;
1777 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1778 Error **errp)
1780 X86CPU *cpu = X86_CPU(obj);
1781 CPUX86State *env = &cpu->env;
1782 int i;
1784 if (strlen(value) != CPUID_VENDOR_SZ) {
1785 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1786 return;
1789 env->cpuid_vendor1 = 0;
1790 env->cpuid_vendor2 = 0;
1791 env->cpuid_vendor3 = 0;
1792 for (i = 0; i < 4; i++) {
1793 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1794 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1795 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1799 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1801 X86CPU *cpu = X86_CPU(obj);
1802 CPUX86State *env = &cpu->env;
1803 char *value;
1804 int i;
1806 value = g_malloc(48 + 1);
1807 for (i = 0; i < 48; i++) {
1808 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1810 value[48] = '\0';
1811 return value;
1814 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1815 Error **errp)
1817 X86CPU *cpu = X86_CPU(obj);
1818 CPUX86State *env = &cpu->env;
1819 int c, len, i;
1821 if (model_id == NULL) {
1822 model_id = "";
1824 len = strlen(model_id);
1825 memset(env->cpuid_model, 0, 48);
1826 for (i = 0; i < 48; i++) {
1827 if (i >= len) {
1828 c = '\0';
1829 } else {
1830 c = (uint8_t)model_id[i];
1832 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1836 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1837 void *opaque, Error **errp)
1839 X86CPU *cpu = X86_CPU(obj);
1840 int64_t value;
1842 value = cpu->env.tsc_khz * 1000;
1843 visit_type_int(v, name, &value, errp);
1846 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1847 void *opaque, Error **errp)
1849 X86CPU *cpu = X86_CPU(obj);
1850 const int64_t min = 0;
1851 const int64_t max = INT64_MAX;
1852 Error *local_err = NULL;
1853 int64_t value;
1855 visit_type_int(v, name, &value, &local_err);
1856 if (local_err) {
1857 error_propagate(errp, local_err);
1858 return;
1860 if (value < min || value > max) {
1861 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1862 name ? name : "null", value, min, max);
1863 return;
1866 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1869 /* Generic getter for "feature-words" and "filtered-features" properties */
1870 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1871 const char *name, void *opaque,
1872 Error **errp)
1874 uint32_t *array = (uint32_t *)opaque;
1875 FeatureWord w;
1876 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1877 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1878 X86CPUFeatureWordInfoList *list = NULL;
1880 for (w = 0; w < FEATURE_WORDS; w++) {
1881 FeatureWordInfo *wi = &feature_word_info[w];
1882 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1883 qwi->cpuid_input_eax = wi->cpuid_eax;
1884 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1885 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1886 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1887 qwi->features = array[w];
1889 /* List will be in reverse order, but order shouldn't matter */
1890 list_entries[w].next = list;
1891 list_entries[w].value = &word_infos[w];
1892 list = &list_entries[w];
1895 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1898 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1899 void *opaque, Error **errp)
1901 X86CPU *cpu = X86_CPU(obj);
1902 int64_t value = cpu->hyperv_spinlock_attempts;
1904 visit_type_int(v, name, &value, errp);
1907 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1908 void *opaque, Error **errp)
1910 const int64_t min = 0xFFF;
1911 const int64_t max = UINT_MAX;
1912 X86CPU *cpu = X86_CPU(obj);
1913 Error *err = NULL;
1914 int64_t value;
1916 visit_type_int(v, name, &value, &err);
1917 if (err) {
1918 error_propagate(errp, err);
1919 return;
1922 if (value < min || value > max) {
1923 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1924 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1925 object_get_typename(obj), name ? name : "null",
1926 value, min, max);
1927 return;
1929 cpu->hyperv_spinlock_attempts = value;
1932 static PropertyInfo qdev_prop_spinlocks = {
1933 .name = "int",
1934 .get = x86_get_hv_spinlocks,
1935 .set = x86_set_hv_spinlocks,
1938 /* Convert all '_' in a feature string option name to '-', to make feature
1939 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1941 static inline void feat2prop(char *s)
1943 while ((s = strchr(s, '_'))) {
1944 *s = '-';
1948 /* Return the feature property name for a feature flag bit */
1949 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
1951 /* XSAVE components are automatically enabled by other features,
1952 * so return the original feature name instead
1954 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
1955 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
1957 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
1958 x86_ext_save_areas[comp].bits) {
1959 w = x86_ext_save_areas[comp].feature;
1960 bitnr = ctz32(x86_ext_save_areas[comp].bits);
1964 assert(bitnr < 32);
1965 assert(w < FEATURE_WORDS);
1966 return feature_word_info[w].feat_names[bitnr];
1969 /* Compatibily hack to maintain legacy +-feat semantic,
1970 * where +-feat overwrites any feature set by
1971 * feat=on|feat even if the later is parsed after +-feat
1972 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1974 static GList *plus_features, *minus_features;
1976 static gint compare_string(gconstpointer a, gconstpointer b)
1978 return g_strcmp0(a, b);
1981 /* Parse "+feature,-feature,feature=foo" CPU feature string
1983 static void x86_cpu_parse_featurestr(const char *typename, char *features,
1984 Error **errp)
1986 char *featurestr; /* Single 'key=value" string being parsed */
1987 Error *local_err = NULL;
1988 static bool cpu_globals_initialized;
1989 bool ambiguous = false;
1991 if (cpu_globals_initialized) {
1992 return;
1994 cpu_globals_initialized = true;
1996 if (!features) {
1997 return;
2000 for (featurestr = strtok(features, ",");
2001 featurestr && !local_err;
2002 featurestr = strtok(NULL, ",")) {
2003 const char *name;
2004 const char *val = NULL;
2005 char *eq = NULL;
2006 char num[32];
2007 GlobalProperty *prop;
2009 /* Compatibility syntax: */
2010 if (featurestr[0] == '+') {
2011 plus_features = g_list_append(plus_features,
2012 g_strdup(featurestr + 1));
2013 continue;
2014 } else if (featurestr[0] == '-') {
2015 minus_features = g_list_append(minus_features,
2016 g_strdup(featurestr + 1));
2017 continue;
2020 eq = strchr(featurestr, '=');
2021 if (eq) {
2022 *eq++ = 0;
2023 val = eq;
2024 } else {
2025 val = "on";
2028 feat2prop(featurestr);
2029 name = featurestr;
2031 if (g_list_find_custom(plus_features, name, compare_string)) {
2032 error_report("warning: Ambiguous CPU model string. "
2033 "Don't mix both \"+%s\" and \"%s=%s\"",
2034 name, name, val);
2035 ambiguous = true;
2037 if (g_list_find_custom(minus_features, name, compare_string)) {
2038 error_report("warning: Ambiguous CPU model string. "
2039 "Don't mix both \"-%s\" and \"%s=%s\"",
2040 name, name, val);
2041 ambiguous = true;
2044 /* Special case: */
2045 if (!strcmp(name, "tsc-freq")) {
2046 int64_t tsc_freq;
2047 char *err;
2049 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2050 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2051 if (tsc_freq < 0 || *err) {
2052 error_setg(errp, "bad numerical value %s", val);
2053 return;
2055 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2056 val = num;
2057 name = "tsc-frequency";
2060 prop = g_new0(typeof(*prop), 1);
2061 prop->driver = typename;
2062 prop->property = g_strdup(name);
2063 prop->value = g_strdup(val);
2064 prop->errp = &error_fatal;
2065 qdev_prop_register_global(prop);
2068 if (ambiguous) {
2069 error_report("warning: Compatibility of ambiguous CPU model "
2070 "strings won't be kept on future QEMU versions");
2073 if (local_err) {
2074 error_propagate(errp, local_err);
2078 static void x86_cpu_load_features(X86CPU *cpu, Error **errp);
2079 static int x86_cpu_filter_features(X86CPU *cpu);
2081 /* Check for missing features that may prevent the CPU class from
2082 * running using the current machine and accelerator.
2084 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2085 strList **missing_feats)
2087 X86CPU *xc;
2088 FeatureWord w;
2089 Error *err = NULL;
2090 strList **next = missing_feats;
2092 if (xcc->kvm_required && !kvm_enabled()) {
2093 strList *new = g_new0(strList, 1);
2094 new->value = g_strdup("kvm");;
2095 *missing_feats = new;
2096 return;
2099 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2101 x86_cpu_load_features(xc, &err);
2102 if (err) {
2103 /* Errors at x86_cpu_load_features should never happen,
2104 * but in case it does, just report the model as not
2105 * runnable at all using the "type" property.
2107 strList *new = g_new0(strList, 1);
2108 new->value = g_strdup("type");
2109 *next = new;
2110 next = &new->next;
2113 x86_cpu_filter_features(xc);
2115 for (w = 0; w < FEATURE_WORDS; w++) {
2116 uint32_t filtered = xc->filtered_features[w];
2117 int i;
2118 for (i = 0; i < 32; i++) {
2119 if (filtered & (1UL << i)) {
2120 strList *new = g_new0(strList, 1);
2121 new->value = g_strdup(x86_cpu_feature_name(w, i));
2122 *next = new;
2123 next = &new->next;
2128 object_unref(OBJECT(xc));
2131 /* Print all cpuid feature names in featureset
2133 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2135 int bit;
2136 bool first = true;
2138 for (bit = 0; bit < 32; bit++) {
2139 if (featureset[bit]) {
2140 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2141 first = false;
2146 /* Sort alphabetically by type name, listing kvm_required models last. */
2147 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2149 ObjectClass *class_a = (ObjectClass *)a;
2150 ObjectClass *class_b = (ObjectClass *)b;
2151 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2152 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2153 const char *name_a, *name_b;
2155 if (cc_a->kvm_required != cc_b->kvm_required) {
2156 /* kvm_required items go last */
2157 return cc_a->kvm_required ? 1 : -1;
2158 } else {
2159 name_a = object_class_get_name(class_a);
2160 name_b = object_class_get_name(class_b);
2161 return strcmp(name_a, name_b);
2165 static GSList *get_sorted_cpu_model_list(void)
2167 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2168 list = g_slist_sort(list, x86_cpu_list_compare);
2169 return list;
2172 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2174 ObjectClass *oc = data;
2175 X86CPUClass *cc = X86_CPU_CLASS(oc);
2176 CPUListState *s = user_data;
2177 char *name = x86_cpu_class_get_model_name(cc);
2178 const char *desc = cc->model_description;
2179 if (!desc) {
2180 desc = cc->cpu_def->model_id;
2183 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2184 name, desc);
2185 g_free(name);
2188 /* list available CPU models and flags */
2189 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2191 int i;
2192 CPUListState s = {
2193 .file = f,
2194 .cpu_fprintf = cpu_fprintf,
2196 GSList *list;
2198 (*cpu_fprintf)(f, "Available CPUs:\n");
2199 list = get_sorted_cpu_model_list();
2200 g_slist_foreach(list, x86_cpu_list_entry, &s);
2201 g_slist_free(list);
2203 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2204 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2205 FeatureWordInfo *fw = &feature_word_info[i];
2207 (*cpu_fprintf)(f, " ");
2208 listflags(f, cpu_fprintf, fw->feat_names);
2209 (*cpu_fprintf)(f, "\n");
2213 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2215 ObjectClass *oc = data;
2216 X86CPUClass *cc = X86_CPU_CLASS(oc);
2217 CpuDefinitionInfoList **cpu_list = user_data;
2218 CpuDefinitionInfoList *entry;
2219 CpuDefinitionInfo *info;
2221 info = g_malloc0(sizeof(*info));
2222 info->name = x86_cpu_class_get_model_name(cc);
2223 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2224 info->has_unavailable_features = true;
2226 entry = g_malloc0(sizeof(*entry));
2227 entry->value = info;
2228 entry->next = *cpu_list;
2229 *cpu_list = entry;
2232 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2234 CpuDefinitionInfoList *cpu_list = NULL;
2235 GSList *list = get_sorted_cpu_model_list();
2236 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2237 g_slist_free(list);
2238 return cpu_list;
2241 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2242 bool migratable_only)
2244 FeatureWordInfo *wi = &feature_word_info[w];
2245 uint32_t r;
2247 if (kvm_enabled()) {
2248 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2249 wi->cpuid_ecx,
2250 wi->cpuid_reg);
2251 } else if (tcg_enabled()) {
2252 r = wi->tcg_features;
2253 } else {
2254 return ~0;
2256 if (migratable_only) {
2257 r &= x86_cpu_get_migratable_flags(w);
2259 return r;
2263 * Filters CPU feature words based on host availability of each feature.
2265 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2267 static int x86_cpu_filter_features(X86CPU *cpu)
2269 CPUX86State *env = &cpu->env;
2270 FeatureWord w;
2271 int rv = 0;
2273 for (w = 0; w < FEATURE_WORDS; w++) {
2274 uint32_t host_feat =
2275 x86_cpu_get_supported_feature_word(w, false);
2276 uint32_t requested_features = env->features[w];
2277 env->features[w] &= host_feat;
2278 cpu->filtered_features[w] = requested_features & ~env->features[w];
2279 if (cpu->filtered_features[w]) {
2280 rv = 1;
2284 return rv;
2287 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2289 FeatureWord w;
2291 for (w = 0; w < FEATURE_WORDS; w++) {
2292 report_unavailable_features(w, cpu->filtered_features[w]);
2296 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2298 PropValue *pv;
2299 for (pv = props; pv->prop; pv++) {
2300 if (!pv->value) {
2301 continue;
2303 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2304 &error_abort);
2308 /* Load data from X86CPUDefinition
2310 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2312 CPUX86State *env = &cpu->env;
2313 const char *vendor;
2314 char host_vendor[CPUID_VENDOR_SZ + 1];
2315 FeatureWord w;
2317 /* CPU models only set _minimum_ values for level/xlevel: */
2318 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2319 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2321 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2322 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2323 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2324 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2325 for (w = 0; w < FEATURE_WORDS; w++) {
2326 env->features[w] = def->features[w];
2329 /* Special cases not set in the X86CPUDefinition structs: */
2330 if (kvm_enabled()) {
2331 if (!kvm_irqchip_in_kernel()) {
2332 x86_cpu_change_kvm_default("x2apic", "off");
2335 x86_cpu_apply_props(cpu, kvm_default_props);
2336 } else if (tcg_enabled()) {
2337 x86_cpu_apply_props(cpu, tcg_default_props);
2340 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2342 /* sysenter isn't supported in compatibility mode on AMD,
2343 * syscall isn't supported in compatibility mode on Intel.
2344 * Normally we advertise the actual CPU vendor, but you can
2345 * override this using the 'vendor' property if you want to use
2346 * KVM's sysenter/syscall emulation in compatibility mode and
2347 * when doing cross vendor migration
2349 vendor = def->vendor;
2350 if (kvm_enabled()) {
2351 uint32_t ebx = 0, ecx = 0, edx = 0;
2352 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2353 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2354 vendor = host_vendor;
2357 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2361 X86CPU *cpu_x86_init(const char *cpu_model)
2363 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2366 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2368 X86CPUDefinition *cpudef = data;
2369 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2371 xcc->cpu_def = cpudef;
2374 static void x86_register_cpudef_type(X86CPUDefinition *def)
2376 char *typename = x86_cpu_type_name(def->name);
2377 TypeInfo ti = {
2378 .name = typename,
2379 .parent = TYPE_X86_CPU,
2380 .class_init = x86_cpu_cpudef_class_init,
2381 .class_data = def,
2384 type_register(&ti);
2385 g_free(typename);
2388 #if !defined(CONFIG_USER_ONLY)
2390 void cpu_clear_apic_feature(CPUX86State *env)
2392 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2395 #endif /* !CONFIG_USER_ONLY */
2397 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2398 uint32_t *eax, uint32_t *ebx,
2399 uint32_t *ecx, uint32_t *edx)
2401 X86CPU *cpu = x86_env_get_cpu(env);
2402 CPUState *cs = CPU(cpu);
2403 uint32_t pkg_offset;
2405 /* test if maximum index reached */
2406 if (index & 0x80000000) {
2407 if (index > env->cpuid_xlevel) {
2408 if (env->cpuid_xlevel2 > 0) {
2409 /* Handle the Centaur's CPUID instruction. */
2410 if (index > env->cpuid_xlevel2) {
2411 index = env->cpuid_xlevel2;
2412 } else if (index < 0xC0000000) {
2413 index = env->cpuid_xlevel;
2415 } else {
2416 /* Intel documentation states that invalid EAX input will
2417 * return the same information as EAX=cpuid_level
2418 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2420 index = env->cpuid_level;
2423 } else {
2424 if (index > env->cpuid_level)
2425 index = env->cpuid_level;
2428 switch(index) {
2429 case 0:
2430 *eax = env->cpuid_level;
2431 *ebx = env->cpuid_vendor1;
2432 *edx = env->cpuid_vendor2;
2433 *ecx = env->cpuid_vendor3;
2434 break;
2435 case 1:
2436 *eax = env->cpuid_version;
2437 *ebx = (cpu->apic_id << 24) |
2438 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2439 *ecx = env->features[FEAT_1_ECX];
2440 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2441 *ecx |= CPUID_EXT_OSXSAVE;
2443 *edx = env->features[FEAT_1_EDX];
2444 if (cs->nr_cores * cs->nr_threads > 1) {
2445 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2446 *edx |= CPUID_HT;
2448 break;
2449 case 2:
2450 /* cache info: needed for Pentium Pro compatibility */
2451 if (cpu->cache_info_passthrough) {
2452 host_cpuid(index, 0, eax, ebx, ecx, edx);
2453 break;
2455 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2456 *ebx = 0;
2457 if (!cpu->enable_l3_cache) {
2458 *ecx = 0;
2459 } else {
2460 *ecx = L3_N_DESCRIPTOR;
2462 *edx = (L1D_DESCRIPTOR << 16) | \
2463 (L1I_DESCRIPTOR << 8) | \
2464 (L2_DESCRIPTOR);
2465 break;
2466 case 4:
2467 /* cache info: needed for Core compatibility */
2468 if (cpu->cache_info_passthrough) {
2469 host_cpuid(index, count, eax, ebx, ecx, edx);
2470 *eax &= ~0xFC000000;
2471 } else {
2472 *eax = 0;
2473 switch (count) {
2474 case 0: /* L1 dcache info */
2475 *eax |= CPUID_4_TYPE_DCACHE | \
2476 CPUID_4_LEVEL(1) | \
2477 CPUID_4_SELF_INIT_LEVEL;
2478 *ebx = (L1D_LINE_SIZE - 1) | \
2479 ((L1D_PARTITIONS - 1) << 12) | \
2480 ((L1D_ASSOCIATIVITY - 1) << 22);
2481 *ecx = L1D_SETS - 1;
2482 *edx = CPUID_4_NO_INVD_SHARING;
2483 break;
2484 case 1: /* L1 icache info */
2485 *eax |= CPUID_4_TYPE_ICACHE | \
2486 CPUID_4_LEVEL(1) | \
2487 CPUID_4_SELF_INIT_LEVEL;
2488 *ebx = (L1I_LINE_SIZE - 1) | \
2489 ((L1I_PARTITIONS - 1) << 12) | \
2490 ((L1I_ASSOCIATIVITY - 1) << 22);
2491 *ecx = L1I_SETS - 1;
2492 *edx = CPUID_4_NO_INVD_SHARING;
2493 break;
2494 case 2: /* L2 cache info */
2495 *eax |= CPUID_4_TYPE_UNIFIED | \
2496 CPUID_4_LEVEL(2) | \
2497 CPUID_4_SELF_INIT_LEVEL;
2498 if (cs->nr_threads > 1) {
2499 *eax |= (cs->nr_threads - 1) << 14;
2501 *ebx = (L2_LINE_SIZE - 1) | \
2502 ((L2_PARTITIONS - 1) << 12) | \
2503 ((L2_ASSOCIATIVITY - 1) << 22);
2504 *ecx = L2_SETS - 1;
2505 *edx = CPUID_4_NO_INVD_SHARING;
2506 break;
2507 case 3: /* L3 cache info */
2508 if (!cpu->enable_l3_cache) {
2509 *eax = 0;
2510 *ebx = 0;
2511 *ecx = 0;
2512 *edx = 0;
2513 break;
2515 *eax |= CPUID_4_TYPE_UNIFIED | \
2516 CPUID_4_LEVEL(3) | \
2517 CPUID_4_SELF_INIT_LEVEL;
2518 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2519 *eax |= ((1 << pkg_offset) - 1) << 14;
2520 *ebx = (L3_N_LINE_SIZE - 1) | \
2521 ((L3_N_PARTITIONS - 1) << 12) | \
2522 ((L3_N_ASSOCIATIVITY - 1) << 22);
2523 *ecx = L3_N_SETS - 1;
2524 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2525 break;
2526 default: /* end of info */
2527 *eax = 0;
2528 *ebx = 0;
2529 *ecx = 0;
2530 *edx = 0;
2531 break;
2535 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2536 if ((*eax & 31) && cs->nr_cores > 1) {
2537 *eax |= (cs->nr_cores - 1) << 26;
2539 break;
2540 case 5:
2541 /* mwait info: needed for Core compatibility */
2542 *eax = 0; /* Smallest monitor-line size in bytes */
2543 *ebx = 0; /* Largest monitor-line size in bytes */
2544 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2545 *edx = 0;
2546 break;
2547 case 6:
2548 /* Thermal and Power Leaf */
2549 *eax = env->features[FEAT_6_EAX];
2550 *ebx = 0;
2551 *ecx = 0;
2552 *edx = 0;
2553 break;
2554 case 7:
2555 /* Structured Extended Feature Flags Enumeration Leaf */
2556 if (count == 0) {
2557 *eax = 0; /* Maximum ECX value for sub-leaves */
2558 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2559 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2560 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2561 *ecx |= CPUID_7_0_ECX_OSPKE;
2563 *edx = 0; /* Reserved */
2564 } else {
2565 *eax = 0;
2566 *ebx = 0;
2567 *ecx = 0;
2568 *edx = 0;
2570 break;
2571 case 9:
2572 /* Direct Cache Access Information Leaf */
2573 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2574 *ebx = 0;
2575 *ecx = 0;
2576 *edx = 0;
2577 break;
2578 case 0xA:
2579 /* Architectural Performance Monitoring Leaf */
2580 if (kvm_enabled() && cpu->enable_pmu) {
2581 KVMState *s = cs->kvm_state;
2583 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2584 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2585 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2586 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2587 } else {
2588 *eax = 0;
2589 *ebx = 0;
2590 *ecx = 0;
2591 *edx = 0;
2593 break;
2594 case 0xB:
2595 /* Extended Topology Enumeration Leaf */
2596 if (!cpu->enable_cpuid_0xb) {
2597 *eax = *ebx = *ecx = *edx = 0;
2598 break;
2601 *ecx = count & 0xff;
2602 *edx = cpu->apic_id;
2604 switch (count) {
2605 case 0:
2606 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2607 *ebx = cs->nr_threads;
2608 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2609 break;
2610 case 1:
2611 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2612 *ebx = cs->nr_cores * cs->nr_threads;
2613 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2614 break;
2615 default:
2616 *eax = 0;
2617 *ebx = 0;
2618 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2621 assert(!(*eax & ~0x1f));
2622 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2623 break;
2624 case 0xD: {
2625 /* Processor Extended State */
2626 *eax = 0;
2627 *ebx = 0;
2628 *ecx = 0;
2629 *edx = 0;
2630 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2631 break;
2634 if (count == 0) {
2635 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2636 *eax = env->features[FEAT_XSAVE_COMP_LO];
2637 *edx = env->features[FEAT_XSAVE_COMP_HI];
2638 *ebx = *ecx;
2639 } else if (count == 1) {
2640 *eax = env->features[FEAT_XSAVE];
2641 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2642 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2643 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2644 *eax = esa->size;
2645 *ebx = esa->offset;
2648 break;
2650 case 0x80000000:
2651 *eax = env->cpuid_xlevel;
2652 *ebx = env->cpuid_vendor1;
2653 *edx = env->cpuid_vendor2;
2654 *ecx = env->cpuid_vendor3;
2655 break;
2656 case 0x80000001:
2657 *eax = env->cpuid_version;
2658 *ebx = 0;
2659 *ecx = env->features[FEAT_8000_0001_ECX];
2660 *edx = env->features[FEAT_8000_0001_EDX];
2662 /* The Linux kernel checks for the CMPLegacy bit and
2663 * discards multiple thread information if it is set.
2664 * So don't set it here for Intel to make Linux guests happy.
2666 if (cs->nr_cores * cs->nr_threads > 1) {
2667 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2668 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2669 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2670 *ecx |= 1 << 1; /* CmpLegacy bit */
2673 break;
2674 case 0x80000002:
2675 case 0x80000003:
2676 case 0x80000004:
2677 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2678 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2679 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2680 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2681 break;
2682 case 0x80000005:
2683 /* cache info (L1 cache) */
2684 if (cpu->cache_info_passthrough) {
2685 host_cpuid(index, 0, eax, ebx, ecx, edx);
2686 break;
2688 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2689 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2690 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2691 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2692 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2693 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2694 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2695 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2696 break;
2697 case 0x80000006:
2698 /* cache info (L2 cache) */
2699 if (cpu->cache_info_passthrough) {
2700 host_cpuid(index, 0, eax, ebx, ecx, edx);
2701 break;
2703 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2704 (L2_DTLB_2M_ENTRIES << 16) | \
2705 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2706 (L2_ITLB_2M_ENTRIES);
2707 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2708 (L2_DTLB_4K_ENTRIES << 16) | \
2709 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2710 (L2_ITLB_4K_ENTRIES);
2711 *ecx = (L2_SIZE_KB_AMD << 16) | \
2712 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2713 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2714 if (!cpu->enable_l3_cache) {
2715 *edx = ((L3_SIZE_KB / 512) << 18) | \
2716 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2717 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2718 } else {
2719 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2720 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2721 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2723 break;
2724 case 0x80000007:
2725 *eax = 0;
2726 *ebx = 0;
2727 *ecx = 0;
2728 *edx = env->features[FEAT_8000_0007_EDX];
2729 break;
2730 case 0x80000008:
2731 /* virtual & phys address size in low 2 bytes. */
2732 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2733 /* 64 bit processor, 48 bits virtual, configurable
2734 * physical bits.
2736 *eax = 0x00003000 + cpu->phys_bits;
2737 } else {
2738 *eax = cpu->phys_bits;
2740 *ebx = 0;
2741 *ecx = 0;
2742 *edx = 0;
2743 if (cs->nr_cores * cs->nr_threads > 1) {
2744 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2746 break;
2747 case 0x8000000A:
2748 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2749 *eax = 0x00000001; /* SVM Revision */
2750 *ebx = 0x00000010; /* nr of ASIDs */
2751 *ecx = 0;
2752 *edx = env->features[FEAT_SVM]; /* optional features */
2753 } else {
2754 *eax = 0;
2755 *ebx = 0;
2756 *ecx = 0;
2757 *edx = 0;
2759 break;
2760 case 0xC0000000:
2761 *eax = env->cpuid_xlevel2;
2762 *ebx = 0;
2763 *ecx = 0;
2764 *edx = 0;
2765 break;
2766 case 0xC0000001:
2767 /* Support for VIA CPU's CPUID instruction */
2768 *eax = env->cpuid_version;
2769 *ebx = 0;
2770 *ecx = 0;
2771 *edx = env->features[FEAT_C000_0001_EDX];
2772 break;
2773 case 0xC0000002:
2774 case 0xC0000003:
2775 case 0xC0000004:
2776 /* Reserved for the future, and now filled with zero */
2777 *eax = 0;
2778 *ebx = 0;
2779 *ecx = 0;
2780 *edx = 0;
2781 break;
2782 default:
2783 /* reserved values: zero */
2784 *eax = 0;
2785 *ebx = 0;
2786 *ecx = 0;
2787 *edx = 0;
2788 break;
2792 /* CPUClass::reset() */
2793 static void x86_cpu_reset(CPUState *s)
2795 X86CPU *cpu = X86_CPU(s);
2796 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2797 CPUX86State *env = &cpu->env;
2798 target_ulong cr4;
2799 uint64_t xcr0;
2800 int i;
2802 xcc->parent_reset(s);
2804 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2806 tlb_flush(s, 1);
2808 env->old_exception = -1;
2810 /* init to reset state */
2812 env->hflags2 |= HF2_GIF_MASK;
2814 cpu_x86_update_cr0(env, 0x60000010);
2815 env->a20_mask = ~0x0;
2816 env->smbase = 0x30000;
2818 env->idt.limit = 0xffff;
2819 env->gdt.limit = 0xffff;
2820 env->ldt.limit = 0xffff;
2821 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2822 env->tr.limit = 0xffff;
2823 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2825 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2826 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2827 DESC_R_MASK | DESC_A_MASK);
2828 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2829 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2830 DESC_A_MASK);
2831 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2832 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2833 DESC_A_MASK);
2834 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2835 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2836 DESC_A_MASK);
2837 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2838 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2839 DESC_A_MASK);
2840 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2841 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2842 DESC_A_MASK);
2844 env->eip = 0xfff0;
2845 env->regs[R_EDX] = env->cpuid_version;
2847 env->eflags = 0x2;
2849 /* FPU init */
2850 for (i = 0; i < 8; i++) {
2851 env->fptags[i] = 1;
2853 cpu_set_fpuc(env, 0x37f);
2855 env->mxcsr = 0x1f80;
2856 /* All units are in INIT state. */
2857 env->xstate_bv = 0;
2859 env->pat = 0x0007040600070406ULL;
2860 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2862 memset(env->dr, 0, sizeof(env->dr));
2863 env->dr[6] = DR6_FIXED_1;
2864 env->dr[7] = DR7_FIXED_1;
2865 cpu_breakpoint_remove_all(s, BP_CPU);
2866 cpu_watchpoint_remove_all(s, BP_CPU);
2868 cr4 = 0;
2869 xcr0 = XSTATE_FP_MASK;
2871 #ifdef CONFIG_USER_ONLY
2872 /* Enable all the features for user-mode. */
2873 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2874 xcr0 |= XSTATE_SSE_MASK;
2876 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2877 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2878 if (env->features[esa->feature] & esa->bits) {
2879 xcr0 |= 1ull << i;
2883 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2884 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2886 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2887 cr4 |= CR4_FSGSBASE_MASK;
2889 #endif
2891 env->xcr0 = xcr0;
2892 cpu_x86_update_cr4(env, cr4);
2895 * SDM 11.11.5 requires:
2896 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2897 * - IA32_MTRR_PHYSMASKn.V = 0
2898 * All other bits are undefined. For simplification, zero it all.
2900 env->mtrr_deftype = 0;
2901 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2902 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2904 #if !defined(CONFIG_USER_ONLY)
2905 /* We hard-wire the BSP to the first CPU. */
2906 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2908 s->halted = !cpu_is_bsp(cpu);
2910 if (kvm_enabled()) {
2911 kvm_arch_reset_vcpu(cpu);
2913 #endif
2916 #ifndef CONFIG_USER_ONLY
2917 bool cpu_is_bsp(X86CPU *cpu)
2919 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2922 /* TODO: remove me, when reset over QOM tree is implemented */
2923 static void x86_cpu_machine_reset_cb(void *opaque)
2925 X86CPU *cpu = opaque;
2926 cpu_reset(CPU(cpu));
2928 #endif
2930 static void mce_init(X86CPU *cpu)
2932 CPUX86State *cenv = &cpu->env;
2933 unsigned int bank;
2935 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2936 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2937 (CPUID_MCE | CPUID_MCA)) {
2938 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2939 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2940 cenv->mcg_ctl = ~(uint64_t)0;
2941 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2942 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2947 #ifndef CONFIG_USER_ONLY
2948 APICCommonClass *apic_get_class(void)
2950 const char *apic_type = "apic";
2952 if (kvm_apic_in_kernel()) {
2953 apic_type = "kvm-apic";
2954 } else if (xen_enabled()) {
2955 apic_type = "xen-apic";
2958 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
2961 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2963 APICCommonState *apic;
2964 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
2966 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
2968 object_property_add_child(OBJECT(cpu), "lapic",
2969 OBJECT(cpu->apic_state), &error_abort);
2970 object_unref(OBJECT(cpu->apic_state));
2972 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
2973 /* TODO: convert to link<> */
2974 apic = APIC_COMMON(cpu->apic_state);
2975 apic->cpu = cpu;
2976 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2979 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2981 APICCommonState *apic;
2982 static bool apic_mmio_map_once;
2984 if (cpu->apic_state == NULL) {
2985 return;
2987 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2988 errp);
2990 /* Map APIC MMIO area */
2991 apic = APIC_COMMON(cpu->apic_state);
2992 if (!apic_mmio_map_once) {
2993 memory_region_add_subregion_overlap(get_system_memory(),
2994 apic->apicbase &
2995 MSR_IA32_APICBASE_BASE,
2996 &apic->io_memory,
2997 0x1000);
2998 apic_mmio_map_once = true;
3002 static void x86_cpu_machine_done(Notifier *n, void *unused)
3004 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3005 MemoryRegion *smram =
3006 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3008 if (smram) {
3009 cpu->smram = g_new(MemoryRegion, 1);
3010 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3011 smram, 0, 1ull << 32);
3012 memory_region_set_enabled(cpu->smram, false);
3013 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3016 #else
3017 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3020 #endif
3022 /* Note: Only safe for use on x86(-64) hosts */
3023 static uint32_t x86_host_phys_bits(void)
3025 uint32_t eax;
3026 uint32_t host_phys_bits;
3028 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3029 if (eax >= 0x80000008) {
3030 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3031 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3032 * at 23:16 that can specify a maximum physical address bits for
3033 * the guest that can override this value; but I've not seen
3034 * anything with that set.
3036 host_phys_bits = eax & 0xff;
3037 } else {
3038 /* It's an odd 64 bit machine that doesn't have the leaf for
3039 * physical address bits; fall back to 36 that's most older
3040 * Intel.
3042 host_phys_bits = 36;
3045 return host_phys_bits;
3048 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3050 if (*min < value) {
3051 *min = value;
3055 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3056 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3058 CPUX86State *env = &cpu->env;
3059 FeatureWordInfo *fi = &feature_word_info[w];
3060 uint32_t eax = fi->cpuid_eax;
3061 uint32_t region = eax & 0xF0000000;
3063 if (!env->features[w]) {
3064 return;
3067 switch (region) {
3068 case 0x00000000:
3069 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3070 break;
3071 case 0x80000000:
3072 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3073 break;
3074 case 0xC0000000:
3075 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3076 break;
3080 /* Calculate XSAVE components based on the configured CPU feature flags */
3081 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3083 CPUX86State *env = &cpu->env;
3084 int i;
3085 uint64_t mask;
3087 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3088 return;
3091 mask = 0;
3092 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3093 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3094 if (env->features[esa->feature] & esa->bits) {
3095 mask |= (1ULL << i);
3099 env->features[FEAT_XSAVE_COMP_LO] = mask;
3100 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3103 /* Load CPUID data based on configured features */
3104 static void x86_cpu_load_features(X86CPU *cpu, Error **errp)
3106 CPUX86State *env = &cpu->env;
3107 FeatureWord w;
3108 GList *l;
3109 Error *local_err = NULL;
3111 /*TODO: cpu->host_features incorrectly overwrites features
3112 * set using "feat=on|off". Once we fix this, we can convert
3113 * plus_features & minus_features to global properties
3114 * inside x86_cpu_parse_featurestr() too.
3116 if (cpu->host_features) {
3117 for (w = 0; w < FEATURE_WORDS; w++) {
3118 env->features[w] =
3119 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3123 for (l = plus_features; l; l = l->next) {
3124 const char *prop = l->data;
3125 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3126 if (local_err) {
3127 goto out;
3131 for (l = minus_features; l; l = l->next) {
3132 const char *prop = l->data;
3133 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3134 if (local_err) {
3135 goto out;
3139 if (!kvm_enabled() || !cpu->expose_kvm) {
3140 env->features[FEAT_KVM] = 0;
3143 x86_cpu_enable_xsave_components(cpu);
3145 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3146 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3147 if (cpu->full_cpuid_auto_level) {
3148 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3149 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3150 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3151 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3152 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3153 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3154 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3155 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3156 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3157 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3158 /* SVM requires CPUID[0x8000000A] */
3159 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3160 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3164 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3165 if (env->cpuid_level == UINT32_MAX) {
3166 env->cpuid_level = env->cpuid_min_level;
3168 if (env->cpuid_xlevel == UINT32_MAX) {
3169 env->cpuid_xlevel = env->cpuid_min_xlevel;
3171 if (env->cpuid_xlevel2 == UINT32_MAX) {
3172 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3175 out:
3176 if (local_err != NULL) {
3177 error_propagate(errp, local_err);
3181 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3182 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3183 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3184 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3185 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3186 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3187 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3189 CPUState *cs = CPU(dev);
3190 X86CPU *cpu = X86_CPU(dev);
3191 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3192 CPUX86State *env = &cpu->env;
3193 Error *local_err = NULL;
3194 static bool ht_warned;
3196 if (xcc->kvm_required && !kvm_enabled()) {
3197 char *name = x86_cpu_class_get_model_name(xcc);
3198 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3199 g_free(name);
3200 goto out;
3203 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3204 error_setg(errp, "apic-id property was not initialized properly");
3205 return;
3208 x86_cpu_load_features(cpu, &local_err);
3209 if (local_err) {
3210 goto out;
3213 if (x86_cpu_filter_features(cpu) &&
3214 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3215 x86_cpu_report_filtered_features(cpu);
3216 if (cpu->enforce_cpuid) {
3217 error_setg(&local_err,
3218 kvm_enabled() ?
3219 "Host doesn't support requested features" :
3220 "TCG doesn't support requested features");
3221 goto out;
3225 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3226 * CPUID[1].EDX.
3228 if (IS_AMD_CPU(env)) {
3229 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3230 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3231 & CPUID_EXT2_AMD_ALIASES);
3234 /* For 64bit systems think about the number of physical bits to present.
3235 * ideally this should be the same as the host; anything other than matching
3236 * the host can cause incorrect guest behaviour.
3237 * QEMU used to pick the magic value of 40 bits that corresponds to
3238 * consumer AMD devices but nothing else.
3240 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3241 if (kvm_enabled()) {
3242 uint32_t host_phys_bits = x86_host_phys_bits();
3243 static bool warned;
3245 if (cpu->host_phys_bits) {
3246 /* The user asked for us to use the host physical bits */
3247 cpu->phys_bits = host_phys_bits;
3250 /* Print a warning if the user set it to a value that's not the
3251 * host value.
3253 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3254 !warned) {
3255 error_report("Warning: Host physical bits (%u)"
3256 " does not match phys-bits property (%u)",
3257 host_phys_bits, cpu->phys_bits);
3258 warned = true;
3261 if (cpu->phys_bits &&
3262 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3263 cpu->phys_bits < 32)) {
3264 error_setg(errp, "phys-bits should be between 32 and %u "
3265 " (but is %u)",
3266 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3267 return;
3269 } else {
3270 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3271 error_setg(errp, "TCG only supports phys-bits=%u",
3272 TCG_PHYS_ADDR_BITS);
3273 return;
3276 /* 0 means it was not explicitly set by the user (or by machine
3277 * compat_props or by the host code above). In this case, the default
3278 * is the value used by TCG (40).
3280 if (cpu->phys_bits == 0) {
3281 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3283 } else {
3284 /* For 32 bit systems don't use the user set value, but keep
3285 * phys_bits consistent with what we tell the guest.
3287 if (cpu->phys_bits != 0) {
3288 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3289 return;
3292 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3293 cpu->phys_bits = 36;
3294 } else {
3295 cpu->phys_bits = 32;
3298 cpu_exec_realizefn(cs, &local_err);
3299 if (local_err != NULL) {
3300 error_propagate(errp, local_err);
3301 return;
3304 if (tcg_enabled()) {
3305 tcg_x86_init();
3308 #ifndef CONFIG_USER_ONLY
3309 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3311 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3312 x86_cpu_apic_create(cpu, &local_err);
3313 if (local_err != NULL) {
3314 goto out;
3317 #endif
3319 mce_init(cpu);
3321 #ifndef CONFIG_USER_ONLY
3322 if (tcg_enabled()) {
3323 AddressSpace *newas = g_new(AddressSpace, 1);
3325 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3326 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3328 /* Outer container... */
3329 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3330 memory_region_set_enabled(cpu->cpu_as_root, true);
3332 /* ... with two regions inside: normal system memory with low
3333 * priority, and...
3335 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3336 get_system_memory(), 0, ~0ull);
3337 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3338 memory_region_set_enabled(cpu->cpu_as_mem, true);
3339 address_space_init(newas, cpu->cpu_as_root, "CPU");
3340 cs->num_ases = 1;
3341 cpu_address_space_init(cs, newas, 0);
3343 /* ... SMRAM with higher priority, linked from /machine/smram. */
3344 cpu->machine_done.notify = x86_cpu_machine_done;
3345 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3347 #endif
3349 qemu_init_vcpu(cs);
3351 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3352 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3353 * based on inputs (sockets,cores,threads), it is still better to gives
3354 * users a warning.
3356 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3357 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3359 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3360 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3361 " -smp options properly.");
3362 ht_warned = true;
3365 x86_cpu_apic_realize(cpu, &local_err);
3366 if (local_err != NULL) {
3367 goto out;
3369 cpu_reset(cs);
3371 xcc->parent_realize(dev, &local_err);
3373 out:
3374 if (local_err != NULL) {
3375 error_propagate(errp, local_err);
3376 return;
3380 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3382 X86CPU *cpu = X86_CPU(dev);
3383 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3384 Error *local_err = NULL;
3386 #ifndef CONFIG_USER_ONLY
3387 cpu_remove_sync(CPU(dev));
3388 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3389 #endif
3391 if (cpu->apic_state) {
3392 object_unparent(OBJECT(cpu->apic_state));
3393 cpu->apic_state = NULL;
3396 xcc->parent_unrealize(dev, &local_err);
3397 if (local_err != NULL) {
3398 error_propagate(errp, local_err);
3399 return;
3403 typedef struct BitProperty {
3404 uint32_t *ptr;
3405 uint32_t mask;
3406 } BitProperty;
3408 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3409 void *opaque, Error **errp)
3411 BitProperty *fp = opaque;
3412 bool value = (*fp->ptr & fp->mask) == fp->mask;
3413 visit_type_bool(v, name, &value, errp);
3416 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3417 void *opaque, Error **errp)
3419 DeviceState *dev = DEVICE(obj);
3420 BitProperty *fp = opaque;
3421 Error *local_err = NULL;
3422 bool value;
3424 if (dev->realized) {
3425 qdev_prop_set_after_realize(dev, name, errp);
3426 return;
3429 visit_type_bool(v, name, &value, &local_err);
3430 if (local_err) {
3431 error_propagate(errp, local_err);
3432 return;
3435 if (value) {
3436 *fp->ptr |= fp->mask;
3437 } else {
3438 *fp->ptr &= ~fp->mask;
3442 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3443 void *opaque)
3445 BitProperty *prop = opaque;
3446 g_free(prop);
3449 /* Register a boolean property to get/set a single bit in a uint32_t field.
3451 * The same property name can be registered multiple times to make it affect
3452 * multiple bits in the same FeatureWord. In that case, the getter will return
3453 * true only if all bits are set.
3455 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3456 const char *prop_name,
3457 uint32_t *field,
3458 int bitnr)
3460 BitProperty *fp;
3461 ObjectProperty *op;
3462 uint32_t mask = (1UL << bitnr);
3464 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3465 if (op) {
3466 fp = op->opaque;
3467 assert(fp->ptr == field);
3468 fp->mask |= mask;
3469 } else {
3470 fp = g_new0(BitProperty, 1);
3471 fp->ptr = field;
3472 fp->mask = mask;
3473 object_property_add(OBJECT(cpu), prop_name, "bool",
3474 x86_cpu_get_bit_prop,
3475 x86_cpu_set_bit_prop,
3476 x86_cpu_release_bit_prop, fp, &error_abort);
3480 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3481 FeatureWord w,
3482 int bitnr)
3484 FeatureWordInfo *fi = &feature_word_info[w];
3485 const char *name = fi->feat_names[bitnr];
3487 if (!name) {
3488 return;
3491 /* Property names should use "-" instead of "_".
3492 * Old names containing underscores are registered as aliases
3493 * using object_property_add_alias()
3495 assert(!strchr(name, '_'));
3496 /* aliases don't use "|" delimiters anymore, they are registered
3497 * manually using object_property_add_alias() */
3498 assert(!strchr(name, '|'));
3499 x86_cpu_register_bit_prop(cpu, name, &cpu->env.features[w], bitnr);
3502 static void x86_cpu_initfn(Object *obj)
3504 CPUState *cs = CPU(obj);
3505 X86CPU *cpu = X86_CPU(obj);
3506 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3507 CPUX86State *env = &cpu->env;
3508 FeatureWord w;
3510 cs->env_ptr = env;
3512 object_property_add(obj, "family", "int",
3513 x86_cpuid_version_get_family,
3514 x86_cpuid_version_set_family, NULL, NULL, NULL);
3515 object_property_add(obj, "model", "int",
3516 x86_cpuid_version_get_model,
3517 x86_cpuid_version_set_model, NULL, NULL, NULL);
3518 object_property_add(obj, "stepping", "int",
3519 x86_cpuid_version_get_stepping,
3520 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3521 object_property_add_str(obj, "vendor",
3522 x86_cpuid_get_vendor,
3523 x86_cpuid_set_vendor, NULL);
3524 object_property_add_str(obj, "model-id",
3525 x86_cpuid_get_model_id,
3526 x86_cpuid_set_model_id, NULL);
3527 object_property_add(obj, "tsc-frequency", "int",
3528 x86_cpuid_get_tsc_freq,
3529 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3530 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3531 x86_cpu_get_feature_words,
3532 NULL, NULL, (void *)env->features, NULL);
3533 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3534 x86_cpu_get_feature_words,
3535 NULL, NULL, (void *)cpu->filtered_features, NULL);
3537 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3539 for (w = 0; w < FEATURE_WORDS; w++) {
3540 int bitnr;
3542 for (bitnr = 0; bitnr < 32; bitnr++) {
3543 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3547 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3548 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3549 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3550 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3551 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3552 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3553 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3555 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3556 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3557 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3558 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3559 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3560 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3561 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3562 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3563 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3564 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3565 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3566 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3567 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3568 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3569 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3570 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3571 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3572 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3573 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3574 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3575 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3577 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3580 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3582 X86CPU *cpu = X86_CPU(cs);
3584 return cpu->apic_id;
3587 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3589 X86CPU *cpu = X86_CPU(cs);
3591 return cpu->env.cr[0] & CR0_PG_MASK;
3594 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3596 X86CPU *cpu = X86_CPU(cs);
3598 cpu->env.eip = value;
3601 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3603 X86CPU *cpu = X86_CPU(cs);
3605 cpu->env.eip = tb->pc - tb->cs_base;
3608 static bool x86_cpu_has_work(CPUState *cs)
3610 X86CPU *cpu = X86_CPU(cs);
3611 CPUX86State *env = &cpu->env;
3613 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3614 CPU_INTERRUPT_POLL)) &&
3615 (env->eflags & IF_MASK)) ||
3616 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3617 CPU_INTERRUPT_INIT |
3618 CPU_INTERRUPT_SIPI |
3619 CPU_INTERRUPT_MCE)) ||
3620 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3621 !(env->hflags & HF_SMM_MASK));
3624 static Property x86_cpu_properties[] = {
3625 #ifdef CONFIG_USER_ONLY
3626 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3627 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3628 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3629 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3630 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3631 #else
3632 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3633 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3634 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3635 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3636 #endif
3637 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3638 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3639 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3640 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3641 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3642 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3643 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3644 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3645 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3646 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3647 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3648 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3649 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3650 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3651 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3652 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3653 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3654 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3655 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3656 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3657 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3658 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3659 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3660 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3661 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3662 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3663 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3664 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3665 DEFINE_PROP_END_OF_LIST()
3668 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3670 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3671 CPUClass *cc = CPU_CLASS(oc);
3672 DeviceClass *dc = DEVICE_CLASS(oc);
3674 xcc->parent_realize = dc->realize;
3675 xcc->parent_unrealize = dc->unrealize;
3676 dc->realize = x86_cpu_realizefn;
3677 dc->unrealize = x86_cpu_unrealizefn;
3678 dc->props = x86_cpu_properties;
3680 xcc->parent_reset = cc->reset;
3681 cc->reset = x86_cpu_reset;
3682 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3684 cc->class_by_name = x86_cpu_class_by_name;
3685 cc->parse_features = x86_cpu_parse_featurestr;
3686 cc->has_work = x86_cpu_has_work;
3687 cc->do_interrupt = x86_cpu_do_interrupt;
3688 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3689 cc->dump_state = x86_cpu_dump_state;
3690 cc->set_pc = x86_cpu_set_pc;
3691 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3692 cc->gdb_read_register = x86_cpu_gdb_read_register;
3693 cc->gdb_write_register = x86_cpu_gdb_write_register;
3694 cc->get_arch_id = x86_cpu_get_arch_id;
3695 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3696 #ifdef CONFIG_USER_ONLY
3697 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3698 #else
3699 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3700 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3701 cc->write_elf64_note = x86_cpu_write_elf64_note;
3702 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3703 cc->write_elf32_note = x86_cpu_write_elf32_note;
3704 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3705 cc->vmsd = &vmstate_x86_cpu;
3706 #endif
3707 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3708 #ifndef CONFIG_USER_ONLY
3709 cc->debug_excp_handler = breakpoint_handler;
3710 #endif
3711 cc->cpu_exec_enter = x86_cpu_exec_enter;
3712 cc->cpu_exec_exit = x86_cpu_exec_exit;
3714 dc->cannot_instantiate_with_device_add_yet = false;
3717 static const TypeInfo x86_cpu_type_info = {
3718 .name = TYPE_X86_CPU,
3719 .parent = TYPE_CPU,
3720 .instance_size = sizeof(X86CPU),
3721 .instance_init = x86_cpu_initfn,
3722 .abstract = true,
3723 .class_size = sizeof(X86CPUClass),
3724 .class_init = x86_cpu_common_class_init,
3727 static void x86_cpu_register_types(void)
3729 int i;
3731 type_register_static(&x86_cpu_type_info);
3732 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3733 x86_register_cpudef_type(&builtin_x86_defs[i]);
3735 #ifdef CONFIG_KVM
3736 type_register_static(&host_x86_cpu_type_info);
3737 #endif
3740 type_init(x86_cpu_register_types)