vhost: ensure vhost_ops are set before calling iotlb callback
[qemu/kevin.git] / target / i386 / cpu.c
blob642519a7fc6018e2ce8cbf28877caccd1ca83b7e
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qapi/qmp/types.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "qom/qom-qobject.h"
38 #include "sysemu/arch_init.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/i386/topology.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "exec/address-spaces.h"
49 #include "hw/hw.h"
50 #include "hw/xen/xen.h"
51 #include "hw/i386/apic_internal.h"
52 #endif
55 /* Cache topology CPUID constants: */
57 /* CPUID Leaf 2 Descriptors */
59 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
60 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
61 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
65 /* CPUID Leaf 4 constants: */
67 /* EAX: */
68 #define CPUID_4_TYPE_DCACHE 1
69 #define CPUID_4_TYPE_ICACHE 2
70 #define CPUID_4_TYPE_UNIFIED 3
72 #define CPUID_4_LEVEL(l) ((l) << 5)
74 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
75 #define CPUID_4_FULLY_ASSOC (1 << 9)
77 /* EDX: */
78 #define CPUID_4_NO_INVD_SHARING (1 << 0)
79 #define CPUID_4_INCLUSIVE (1 << 1)
80 #define CPUID_4_COMPLEX_IDX (1 << 2)
82 #define ASSOC_FULL 0xFF
84 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
85 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
86 a == 2 ? 0x2 : \
87 a == 4 ? 0x4 : \
88 a == 8 ? 0x6 : \
89 a == 16 ? 0x8 : \
90 a == 32 ? 0xA : \
91 a == 48 ? 0xB : \
92 a == 64 ? 0xC : \
93 a == 96 ? 0xD : \
94 a == 128 ? 0xE : \
95 a == ASSOC_FULL ? 0xF : \
96 0 /* invalid value */)
99 /* Definitions of the hardcoded cache entries we expose: */
101 /* L1 data cache: */
102 #define L1D_LINE_SIZE 64
103 #define L1D_ASSOCIATIVITY 8
104 #define L1D_SETS 64
105 #define L1D_PARTITIONS 1
106 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
107 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
108 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
109 #define L1D_LINES_PER_TAG 1
110 #define L1D_SIZE_KB_AMD 64
111 #define L1D_ASSOCIATIVITY_AMD 2
113 /* L1 instruction cache: */
114 #define L1I_LINE_SIZE 64
115 #define L1I_ASSOCIATIVITY 8
116 #define L1I_SETS 64
117 #define L1I_PARTITIONS 1
118 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
119 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
120 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
121 #define L1I_LINES_PER_TAG 1
122 #define L1I_SIZE_KB_AMD 64
123 #define L1I_ASSOCIATIVITY_AMD 2
125 /* Level 2 unified cache: */
126 #define L2_LINE_SIZE 64
127 #define L2_ASSOCIATIVITY 16
128 #define L2_SETS 4096
129 #define L2_PARTITIONS 1
130 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
131 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
132 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
133 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
134 #define L2_LINES_PER_TAG 1
135 #define L2_SIZE_KB_AMD 512
137 /* Level 3 unified cache: */
138 #define L3_SIZE_KB 0 /* disabled */
139 #define L3_ASSOCIATIVITY 0 /* disabled */
140 #define L3_LINES_PER_TAG 0 /* disabled */
141 #define L3_LINE_SIZE 0 /* disabled */
142 #define L3_N_LINE_SIZE 64
143 #define L3_N_ASSOCIATIVITY 16
144 #define L3_N_SETS 16384
145 #define L3_N_PARTITIONS 1
146 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
147 #define L3_N_LINES_PER_TAG 1
148 #define L3_N_SIZE_KB_AMD 16384
150 /* TLB definitions: */
152 #define L1_DTLB_2M_ASSOC 1
153 #define L1_DTLB_2M_ENTRIES 255
154 #define L1_DTLB_4K_ASSOC 1
155 #define L1_DTLB_4K_ENTRIES 255
157 #define L1_ITLB_2M_ASSOC 1
158 #define L1_ITLB_2M_ENTRIES 255
159 #define L1_ITLB_4K_ASSOC 1
160 #define L1_ITLB_4K_ENTRIES 255
162 #define L2_DTLB_2M_ASSOC 0 /* disabled */
163 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
164 #define L2_DTLB_4K_ASSOC 4
165 #define L2_DTLB_4K_ENTRIES 512
167 #define L2_ITLB_2M_ASSOC 0 /* disabled */
168 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
169 #define L2_ITLB_4K_ASSOC 4
170 #define L2_ITLB_4K_ENTRIES 512
174 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
175 uint32_t vendor2, uint32_t vendor3)
177 int i;
178 for (i = 0; i < 4; i++) {
179 dst[i] = vendor1 >> (8 * i);
180 dst[i + 4] = vendor2 >> (8 * i);
181 dst[i + 8] = vendor3 >> (8 * i);
183 dst[CPUID_VENDOR_SZ] = '\0';
186 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
187 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
188 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
189 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
190 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
191 CPUID_PSE36 | CPUID_FXSR)
192 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
193 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
194 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
195 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
196 CPUID_PAE | CPUID_SEP | CPUID_APIC)
198 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
199 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
200 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
201 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
202 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
203 /* partly implemented:
204 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
205 /* missing:
206 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
207 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
208 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
209 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
210 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
211 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
212 /* missing:
213 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
214 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
215 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
216 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
217 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
219 #ifdef TARGET_X86_64
220 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
221 #else
222 #define TCG_EXT2_X86_64_FEATURES 0
223 #endif
225 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
226 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
227 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
228 TCG_EXT2_X86_64_FEATURES)
229 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
230 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
231 #define TCG_EXT4_FEATURES 0
232 #define TCG_SVM_FEATURES 0
233 #define TCG_KVM_FEATURES 0
234 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
235 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
236 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
237 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
238 CPUID_7_0_EBX_ERMS)
239 /* missing:
240 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
241 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
242 CPUID_7_0_EBX_RDSEED */
243 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
244 CPUID_7_0_ECX_LA57)
245 #define TCG_7_0_EDX_FEATURES 0
246 #define TCG_APM_FEATURES 0
247 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
248 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
249 /* missing:
250 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
252 typedef struct FeatureWordInfo {
253 /* feature flags names are taken from "Intel Processor Identification and
254 * the CPUID Instruction" and AMD's "CPUID Specification".
255 * In cases of disagreement between feature naming conventions,
256 * aliases may be added.
258 const char *feat_names[32];
259 uint32_t cpuid_eax; /* Input EAX for CPUID */
260 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
261 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
262 int cpuid_reg; /* output register (R_* constant) */
263 uint32_t tcg_features; /* Feature flags supported by TCG */
264 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
265 uint32_t migratable_flags; /* Feature flags known to be migratable */
266 } FeatureWordInfo;
268 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
269 [FEAT_1_EDX] = {
270 .feat_names = {
271 "fpu", "vme", "de", "pse",
272 "tsc", "msr", "pae", "mce",
273 "cx8", "apic", NULL, "sep",
274 "mtrr", "pge", "mca", "cmov",
275 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
276 NULL, "ds" /* Intel dts */, "acpi", "mmx",
277 "fxsr", "sse", "sse2", "ss",
278 "ht" /* Intel htt */, "tm", "ia64", "pbe",
280 .cpuid_eax = 1, .cpuid_reg = R_EDX,
281 .tcg_features = TCG_FEATURES,
283 [FEAT_1_ECX] = {
284 .feat_names = {
285 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
286 "ds-cpl", "vmx", "smx", "est",
287 "tm2", "ssse3", "cid", NULL,
288 "fma", "cx16", "xtpr", "pdcm",
289 NULL, "pcid", "dca", "sse4.1",
290 "sse4.2", "x2apic", "movbe", "popcnt",
291 "tsc-deadline", "aes", "xsave", "osxsave",
292 "avx", "f16c", "rdrand", "hypervisor",
294 .cpuid_eax = 1, .cpuid_reg = R_ECX,
295 .tcg_features = TCG_EXT_FEATURES,
297 /* Feature names that are already defined on feature_name[] but
298 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
299 * names on feat_names below. They are copied automatically
300 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
302 [FEAT_8000_0001_EDX] = {
303 .feat_names = {
304 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
305 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
306 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
307 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
308 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
309 "nx", NULL, "mmxext", NULL /* mmx */,
310 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
311 NULL, "lm", "3dnowext", "3dnow",
313 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
314 .tcg_features = TCG_EXT2_FEATURES,
316 [FEAT_8000_0001_ECX] = {
317 .feat_names = {
318 "lahf-lm", "cmp-legacy", "svm", "extapic",
319 "cr8legacy", "abm", "sse4a", "misalignsse",
320 "3dnowprefetch", "osvw", "ibs", "xop",
321 "skinit", "wdt", NULL, "lwp",
322 "fma4", "tce", NULL, "nodeid-msr",
323 NULL, "tbm", "topoext", "perfctr-core",
324 "perfctr-nb", NULL, NULL, NULL,
325 NULL, NULL, NULL, NULL,
327 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
328 .tcg_features = TCG_EXT3_FEATURES,
330 [FEAT_C000_0001_EDX] = {
331 .feat_names = {
332 NULL, NULL, "xstore", "xstore-en",
333 NULL, NULL, "xcrypt", "xcrypt-en",
334 "ace2", "ace2-en", "phe", "phe-en",
335 "pmm", "pmm-en", NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 NULL, NULL, NULL, NULL,
339 NULL, NULL, NULL, NULL,
341 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
342 .tcg_features = TCG_EXT4_FEATURES,
344 [FEAT_KVM] = {
345 .feat_names = {
346 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
347 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
348 NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 NULL, NULL, NULL, NULL,
351 NULL, NULL, NULL, NULL,
352 "kvmclock-stable-bit", NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL,
355 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
356 .tcg_features = TCG_KVM_FEATURES,
358 [FEAT_HYPERV_EAX] = {
359 .feat_names = {
360 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
361 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
362 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
363 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
364 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
365 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
366 NULL, NULL, NULL, NULL,
367 NULL, NULL, NULL, NULL,
368 NULL, NULL, NULL, NULL,
369 NULL, NULL, NULL, NULL,
370 NULL, NULL, NULL, NULL,
372 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
374 [FEAT_HYPERV_EBX] = {
375 .feat_names = {
376 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
377 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
378 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
379 NULL /* hv_create_port */, NULL /* hv_connect_port */,
380 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
381 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
382 NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 NULL, NULL, NULL, NULL,
385 NULL, NULL, NULL, NULL,
386 NULL, NULL, NULL, NULL,
388 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
390 [FEAT_HYPERV_EDX] = {
391 .feat_names = {
392 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
393 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
394 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
395 NULL, NULL,
396 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL,
400 NULL, NULL, NULL, NULL,
401 NULL, NULL, NULL, NULL,
403 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
405 [FEAT_SVM] = {
406 .feat_names = {
407 "npt", "lbrv", "svm-lock", "nrip-save",
408 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
409 NULL, NULL, "pause-filter", NULL,
410 "pfthreshold", NULL, NULL, NULL,
411 NULL, NULL, NULL, NULL,
412 NULL, NULL, NULL, NULL,
413 NULL, NULL, NULL, NULL,
414 NULL, NULL, NULL, NULL,
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
419 [FEAT_7_0_EBX] = {
420 .feat_names = {
421 "fsgsbase", "tsc-adjust", NULL, "bmi1",
422 "hle", "avx2", NULL, "smep",
423 "bmi2", "erms", "invpcid", "rtm",
424 NULL, NULL, "mpx", NULL,
425 "avx512f", "avx512dq", "rdseed", "adx",
426 "smap", "avx512ifma", "pcommit", "clflushopt",
427 "clwb", NULL, "avx512pf", "avx512er",
428 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
430 .cpuid_eax = 7,
431 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
432 .cpuid_reg = R_EBX,
433 .tcg_features = TCG_7_0_EBX_FEATURES,
435 [FEAT_7_0_ECX] = {
436 .feat_names = {
437 NULL, "avx512vbmi", "umip", "pku",
438 "ospke", NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, "avx512-vpopcntdq", NULL,
441 "la57", NULL, NULL, NULL,
442 NULL, NULL, "rdpid", NULL,
443 NULL, NULL, NULL, NULL,
444 NULL, NULL, NULL, NULL,
446 .cpuid_eax = 7,
447 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
448 .cpuid_reg = R_ECX,
449 .tcg_features = TCG_7_0_ECX_FEATURES,
451 [FEAT_7_0_EDX] = {
452 .feat_names = {
453 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
462 .cpuid_eax = 7,
463 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
464 .cpuid_reg = R_EDX,
465 .tcg_features = TCG_7_0_EDX_FEATURES,
467 [FEAT_8000_0007_EDX] = {
468 .feat_names = {
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 "invtsc", NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL,
478 .cpuid_eax = 0x80000007,
479 .cpuid_reg = R_EDX,
480 .tcg_features = TCG_APM_FEATURES,
481 .unmigratable_flags = CPUID_APM_INVTSC,
483 [FEAT_XSAVE] = {
484 .feat_names = {
485 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
489 NULL, NULL, NULL, NULL,
490 NULL, NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL,
494 .cpuid_eax = 0xd,
495 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
496 .cpuid_reg = R_EAX,
497 .tcg_features = TCG_XSAVE_FEATURES,
499 [FEAT_6_EAX] = {
500 .feat_names = {
501 NULL, NULL, "arat", NULL,
502 NULL, NULL, NULL, NULL,
503 NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
510 .cpuid_eax = 6, .cpuid_reg = R_EAX,
511 .tcg_features = TCG_6_EAX_FEATURES,
513 [FEAT_XSAVE_COMP_LO] = {
514 .cpuid_eax = 0xD,
515 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
516 .cpuid_reg = R_EAX,
517 .tcg_features = ~0U,
518 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
519 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
520 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
521 XSTATE_PKRU_MASK,
523 [FEAT_XSAVE_COMP_HI] = {
524 .cpuid_eax = 0xD,
525 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
526 .cpuid_reg = R_EDX,
527 .tcg_features = ~0U,
531 typedef struct X86RegisterInfo32 {
532 /* Name of register */
533 const char *name;
534 /* QAPI enum value register */
535 X86CPURegister32 qapi_enum;
536 } X86RegisterInfo32;
538 #define REGISTER(reg) \
539 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
540 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
541 REGISTER(EAX),
542 REGISTER(ECX),
543 REGISTER(EDX),
544 REGISTER(EBX),
545 REGISTER(ESP),
546 REGISTER(EBP),
547 REGISTER(ESI),
548 REGISTER(EDI),
550 #undef REGISTER
552 typedef struct ExtSaveArea {
553 uint32_t feature, bits;
554 uint32_t offset, size;
555 } ExtSaveArea;
557 static const ExtSaveArea x86_ext_save_areas[] = {
558 [XSTATE_FP_BIT] = {
559 /* x87 FP state component is always enabled if XSAVE is supported */
560 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
561 /* x87 state is in the legacy region of the XSAVE area */
562 .offset = 0,
563 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
565 [XSTATE_SSE_BIT] = {
566 /* SSE state component is always enabled if XSAVE is supported */
567 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
568 /* SSE state is in the legacy region of the XSAVE area */
569 .offset = 0,
570 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
572 [XSTATE_YMM_BIT] =
573 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
574 .offset = offsetof(X86XSaveArea, avx_state),
575 .size = sizeof(XSaveAVX) },
576 [XSTATE_BNDREGS_BIT] =
577 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
578 .offset = offsetof(X86XSaveArea, bndreg_state),
579 .size = sizeof(XSaveBNDREG) },
580 [XSTATE_BNDCSR_BIT] =
581 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
582 .offset = offsetof(X86XSaveArea, bndcsr_state),
583 .size = sizeof(XSaveBNDCSR) },
584 [XSTATE_OPMASK_BIT] =
585 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
586 .offset = offsetof(X86XSaveArea, opmask_state),
587 .size = sizeof(XSaveOpmask) },
588 [XSTATE_ZMM_Hi256_BIT] =
589 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
590 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
591 .size = sizeof(XSaveZMM_Hi256) },
592 [XSTATE_Hi16_ZMM_BIT] =
593 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
594 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
595 .size = sizeof(XSaveHi16_ZMM) },
596 [XSTATE_PKRU_BIT] =
597 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
598 .offset = offsetof(X86XSaveArea, pkru_state),
599 .size = sizeof(XSavePKRU) },
602 static uint32_t xsave_area_size(uint64_t mask)
604 int i;
605 uint64_t ret = 0;
607 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
608 const ExtSaveArea *esa = &x86_ext_save_areas[i];
609 if ((mask >> i) & 1) {
610 ret = MAX(ret, esa->offset + esa->size);
613 return ret;
616 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
618 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
619 cpu->env.features[FEAT_XSAVE_COMP_LO];
622 const char *get_register_name_32(unsigned int reg)
624 if (reg >= CPU_NB_REGS32) {
625 return NULL;
627 return x86_reg_info_32[reg].name;
631 * Returns the set of feature flags that are supported and migratable by
632 * QEMU, for a given FeatureWord.
634 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
636 FeatureWordInfo *wi = &feature_word_info[w];
637 uint32_t r = 0;
638 int i;
640 for (i = 0; i < 32; i++) {
641 uint32_t f = 1U << i;
643 /* If the feature name is known, it is implicitly considered migratable,
644 * unless it is explicitly set in unmigratable_flags */
645 if ((wi->migratable_flags & f) ||
646 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
647 r |= f;
650 return r;
653 void host_cpuid(uint32_t function, uint32_t count,
654 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
656 uint32_t vec[4];
658 #ifdef __x86_64__
659 asm volatile("cpuid"
660 : "=a"(vec[0]), "=b"(vec[1]),
661 "=c"(vec[2]), "=d"(vec[3])
662 : "0"(function), "c"(count) : "cc");
663 #elif defined(__i386__)
664 asm volatile("pusha \n\t"
665 "cpuid \n\t"
666 "mov %%eax, 0(%2) \n\t"
667 "mov %%ebx, 4(%2) \n\t"
668 "mov %%ecx, 8(%2) \n\t"
669 "mov %%edx, 12(%2) \n\t"
670 "popa"
671 : : "a"(function), "c"(count), "S"(vec)
672 : "memory", "cc");
673 #else
674 abort();
675 #endif
677 if (eax)
678 *eax = vec[0];
679 if (ebx)
680 *ebx = vec[1];
681 if (ecx)
682 *ecx = vec[2];
683 if (edx)
684 *edx = vec[3];
687 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
689 uint32_t eax, ebx, ecx, edx;
691 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
692 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
694 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
695 if (family) {
696 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
698 if (model) {
699 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
701 if (stepping) {
702 *stepping = eax & 0x0F;
706 /* CPU class name definitions: */
708 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
709 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
711 /* Return type name for a given CPU model name
712 * Caller is responsible for freeing the returned string.
714 static char *x86_cpu_type_name(const char *model_name)
716 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
719 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
721 ObjectClass *oc;
722 char *typename;
724 if (cpu_model == NULL) {
725 return NULL;
728 typename = x86_cpu_type_name(cpu_model);
729 oc = object_class_by_name(typename);
730 g_free(typename);
731 return oc;
734 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
736 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
737 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
738 return g_strndup(class_name,
739 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
742 struct X86CPUDefinition {
743 const char *name;
744 uint32_t level;
745 uint32_t xlevel;
746 /* vendor is zero-terminated, 12 character ASCII string */
747 char vendor[CPUID_VENDOR_SZ + 1];
748 int family;
749 int model;
750 int stepping;
751 FeatureWordArray features;
752 char model_id[48];
755 static X86CPUDefinition builtin_x86_defs[] = {
757 .name = "qemu64",
758 .level = 0xd,
759 .vendor = CPUID_VENDOR_AMD,
760 .family = 6,
761 .model = 6,
762 .stepping = 3,
763 .features[FEAT_1_EDX] =
764 PPRO_FEATURES |
765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
766 CPUID_PSE36,
767 .features[FEAT_1_ECX] =
768 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
769 .features[FEAT_8000_0001_EDX] =
770 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
771 .features[FEAT_8000_0001_ECX] =
772 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
773 .xlevel = 0x8000000A,
774 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
777 .name = "phenom",
778 .level = 5,
779 .vendor = CPUID_VENDOR_AMD,
780 .family = 16,
781 .model = 2,
782 .stepping = 3,
783 /* Missing: CPUID_HT */
784 .features[FEAT_1_EDX] =
785 PPRO_FEATURES |
786 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
787 CPUID_PSE36 | CPUID_VME,
788 .features[FEAT_1_ECX] =
789 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
790 CPUID_EXT_POPCNT,
791 .features[FEAT_8000_0001_EDX] =
792 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
793 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
794 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
795 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
796 CPUID_EXT3_CR8LEG,
797 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
798 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
799 .features[FEAT_8000_0001_ECX] =
800 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
801 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
802 /* Missing: CPUID_SVM_LBRV */
803 .features[FEAT_SVM] =
804 CPUID_SVM_NPT,
805 .xlevel = 0x8000001A,
806 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
809 .name = "core2duo",
810 .level = 10,
811 .vendor = CPUID_VENDOR_INTEL,
812 .family = 6,
813 .model = 15,
814 .stepping = 11,
815 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
816 .features[FEAT_1_EDX] =
817 PPRO_FEATURES |
818 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
819 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
820 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
821 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
822 .features[FEAT_1_ECX] =
823 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
824 CPUID_EXT_CX16,
825 .features[FEAT_8000_0001_EDX] =
826 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
827 .features[FEAT_8000_0001_ECX] =
828 CPUID_EXT3_LAHF_LM,
829 .xlevel = 0x80000008,
830 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
833 .name = "kvm64",
834 .level = 0xd,
835 .vendor = CPUID_VENDOR_INTEL,
836 .family = 15,
837 .model = 6,
838 .stepping = 1,
839 /* Missing: CPUID_HT */
840 .features[FEAT_1_EDX] =
841 PPRO_FEATURES | CPUID_VME |
842 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
843 CPUID_PSE36,
844 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
845 .features[FEAT_1_ECX] =
846 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
847 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
848 .features[FEAT_8000_0001_EDX] =
849 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
850 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
851 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
852 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
853 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
854 .features[FEAT_8000_0001_ECX] =
856 .xlevel = 0x80000008,
857 .model_id = "Common KVM processor"
860 .name = "qemu32",
861 .level = 4,
862 .vendor = CPUID_VENDOR_INTEL,
863 .family = 6,
864 .model = 6,
865 .stepping = 3,
866 .features[FEAT_1_EDX] =
867 PPRO_FEATURES,
868 .features[FEAT_1_ECX] =
869 CPUID_EXT_SSE3,
870 .xlevel = 0x80000004,
871 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
874 .name = "kvm32",
875 .level = 5,
876 .vendor = CPUID_VENDOR_INTEL,
877 .family = 15,
878 .model = 6,
879 .stepping = 1,
880 .features[FEAT_1_EDX] =
881 PPRO_FEATURES | CPUID_VME |
882 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
883 .features[FEAT_1_ECX] =
884 CPUID_EXT_SSE3,
885 .features[FEAT_8000_0001_ECX] =
887 .xlevel = 0x80000008,
888 .model_id = "Common 32-bit KVM processor"
891 .name = "coreduo",
892 .level = 10,
893 .vendor = CPUID_VENDOR_INTEL,
894 .family = 6,
895 .model = 14,
896 .stepping = 8,
897 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
898 .features[FEAT_1_EDX] =
899 PPRO_FEATURES | CPUID_VME |
900 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
901 CPUID_SS,
902 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
903 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
904 .features[FEAT_1_ECX] =
905 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
906 .features[FEAT_8000_0001_EDX] =
907 CPUID_EXT2_NX,
908 .xlevel = 0x80000008,
909 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
912 .name = "486",
913 .level = 1,
914 .vendor = CPUID_VENDOR_INTEL,
915 .family = 4,
916 .model = 8,
917 .stepping = 0,
918 .features[FEAT_1_EDX] =
919 I486_FEATURES,
920 .xlevel = 0,
923 .name = "pentium",
924 .level = 1,
925 .vendor = CPUID_VENDOR_INTEL,
926 .family = 5,
927 .model = 4,
928 .stepping = 3,
929 .features[FEAT_1_EDX] =
930 PENTIUM_FEATURES,
931 .xlevel = 0,
934 .name = "pentium2",
935 .level = 2,
936 .vendor = CPUID_VENDOR_INTEL,
937 .family = 6,
938 .model = 5,
939 .stepping = 2,
940 .features[FEAT_1_EDX] =
941 PENTIUM2_FEATURES,
942 .xlevel = 0,
945 .name = "pentium3",
946 .level = 3,
947 .vendor = CPUID_VENDOR_INTEL,
948 .family = 6,
949 .model = 7,
950 .stepping = 3,
951 .features[FEAT_1_EDX] =
952 PENTIUM3_FEATURES,
953 .xlevel = 0,
956 .name = "athlon",
957 .level = 2,
958 .vendor = CPUID_VENDOR_AMD,
959 .family = 6,
960 .model = 2,
961 .stepping = 3,
962 .features[FEAT_1_EDX] =
963 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
964 CPUID_MCA,
965 .features[FEAT_8000_0001_EDX] =
966 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
967 .xlevel = 0x80000008,
968 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
971 .name = "n270",
972 .level = 10,
973 .vendor = CPUID_VENDOR_INTEL,
974 .family = 6,
975 .model = 28,
976 .stepping = 2,
977 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
978 .features[FEAT_1_EDX] =
979 PPRO_FEATURES |
980 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
981 CPUID_ACPI | CPUID_SS,
982 /* Some CPUs got no CPUID_SEP */
983 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
984 * CPUID_EXT_XTPR */
985 .features[FEAT_1_ECX] =
986 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
987 CPUID_EXT_MOVBE,
988 .features[FEAT_8000_0001_EDX] =
989 CPUID_EXT2_NX,
990 .features[FEAT_8000_0001_ECX] =
991 CPUID_EXT3_LAHF_LM,
992 .xlevel = 0x80000008,
993 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
996 .name = "Conroe",
997 .level = 10,
998 .vendor = CPUID_VENDOR_INTEL,
999 .family = 6,
1000 .model = 15,
1001 .stepping = 3,
1002 .features[FEAT_1_EDX] =
1003 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1004 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1005 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1006 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1007 CPUID_DE | CPUID_FP87,
1008 .features[FEAT_1_ECX] =
1009 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1010 .features[FEAT_8000_0001_EDX] =
1011 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1012 .features[FEAT_8000_0001_ECX] =
1013 CPUID_EXT3_LAHF_LM,
1014 .xlevel = 0x80000008,
1015 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1018 .name = "Penryn",
1019 .level = 10,
1020 .vendor = CPUID_VENDOR_INTEL,
1021 .family = 6,
1022 .model = 23,
1023 .stepping = 3,
1024 .features[FEAT_1_EDX] =
1025 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1026 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1027 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1028 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1029 CPUID_DE | CPUID_FP87,
1030 .features[FEAT_1_ECX] =
1031 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1032 CPUID_EXT_SSE3,
1033 .features[FEAT_8000_0001_EDX] =
1034 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1035 .features[FEAT_8000_0001_ECX] =
1036 CPUID_EXT3_LAHF_LM,
1037 .xlevel = 0x80000008,
1038 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1041 .name = "Nehalem",
1042 .level = 11,
1043 .vendor = CPUID_VENDOR_INTEL,
1044 .family = 6,
1045 .model = 26,
1046 .stepping = 3,
1047 .features[FEAT_1_EDX] =
1048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1052 CPUID_DE | CPUID_FP87,
1053 .features[FEAT_1_ECX] =
1054 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1055 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1056 .features[FEAT_8000_0001_EDX] =
1057 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1058 .features[FEAT_8000_0001_ECX] =
1059 CPUID_EXT3_LAHF_LM,
1060 .xlevel = 0x80000008,
1061 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1064 .name = "Westmere",
1065 .level = 11,
1066 .vendor = CPUID_VENDOR_INTEL,
1067 .family = 6,
1068 .model = 44,
1069 .stepping = 1,
1070 .features[FEAT_1_EDX] =
1071 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1072 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1073 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1074 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1075 CPUID_DE | CPUID_FP87,
1076 .features[FEAT_1_ECX] =
1077 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1078 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1079 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1080 .features[FEAT_8000_0001_EDX] =
1081 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1082 .features[FEAT_8000_0001_ECX] =
1083 CPUID_EXT3_LAHF_LM,
1084 .features[FEAT_6_EAX] =
1085 CPUID_6_EAX_ARAT,
1086 .xlevel = 0x80000008,
1087 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1090 .name = "SandyBridge",
1091 .level = 0xd,
1092 .vendor = CPUID_VENDOR_INTEL,
1093 .family = 6,
1094 .model = 42,
1095 .stepping = 1,
1096 .features[FEAT_1_EDX] =
1097 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1098 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1099 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1100 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1101 CPUID_DE | CPUID_FP87,
1102 .features[FEAT_1_ECX] =
1103 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1104 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1105 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1106 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1107 CPUID_EXT_SSE3,
1108 .features[FEAT_8000_0001_EDX] =
1109 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1110 CPUID_EXT2_SYSCALL,
1111 .features[FEAT_8000_0001_ECX] =
1112 CPUID_EXT3_LAHF_LM,
1113 .features[FEAT_XSAVE] =
1114 CPUID_XSAVE_XSAVEOPT,
1115 .features[FEAT_6_EAX] =
1116 CPUID_6_EAX_ARAT,
1117 .xlevel = 0x80000008,
1118 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1121 .name = "IvyBridge",
1122 .level = 0xd,
1123 .vendor = CPUID_VENDOR_INTEL,
1124 .family = 6,
1125 .model = 58,
1126 .stepping = 9,
1127 .features[FEAT_1_EDX] =
1128 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1129 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1130 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1131 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1132 CPUID_DE | CPUID_FP87,
1133 .features[FEAT_1_ECX] =
1134 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1135 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1136 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1137 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1138 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1139 .features[FEAT_7_0_EBX] =
1140 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1141 CPUID_7_0_EBX_ERMS,
1142 .features[FEAT_8000_0001_EDX] =
1143 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1144 CPUID_EXT2_SYSCALL,
1145 .features[FEAT_8000_0001_ECX] =
1146 CPUID_EXT3_LAHF_LM,
1147 .features[FEAT_XSAVE] =
1148 CPUID_XSAVE_XSAVEOPT,
1149 .features[FEAT_6_EAX] =
1150 CPUID_6_EAX_ARAT,
1151 .xlevel = 0x80000008,
1152 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1155 .name = "Haswell-noTSX",
1156 .level = 0xd,
1157 .vendor = CPUID_VENDOR_INTEL,
1158 .family = 6,
1159 .model = 60,
1160 .stepping = 1,
1161 .features[FEAT_1_EDX] =
1162 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1163 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1164 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1165 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1166 CPUID_DE | CPUID_FP87,
1167 .features[FEAT_1_ECX] =
1168 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1169 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1170 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1171 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1172 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1173 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1174 .features[FEAT_8000_0001_EDX] =
1175 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1176 CPUID_EXT2_SYSCALL,
1177 .features[FEAT_8000_0001_ECX] =
1178 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1179 .features[FEAT_7_0_EBX] =
1180 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1181 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1182 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1183 .features[FEAT_XSAVE] =
1184 CPUID_XSAVE_XSAVEOPT,
1185 .features[FEAT_6_EAX] =
1186 CPUID_6_EAX_ARAT,
1187 .xlevel = 0x80000008,
1188 .model_id = "Intel Core Processor (Haswell, no TSX)",
1189 }, {
1190 .name = "Haswell",
1191 .level = 0xd,
1192 .vendor = CPUID_VENDOR_INTEL,
1193 .family = 6,
1194 .model = 60,
1195 .stepping = 4,
1196 .features[FEAT_1_EDX] =
1197 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1198 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1199 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1200 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1201 CPUID_DE | CPUID_FP87,
1202 .features[FEAT_1_ECX] =
1203 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1204 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1205 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1206 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1207 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1208 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1209 .features[FEAT_8000_0001_EDX] =
1210 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1211 CPUID_EXT2_SYSCALL,
1212 .features[FEAT_8000_0001_ECX] =
1213 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1214 .features[FEAT_7_0_EBX] =
1215 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1216 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1217 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1218 CPUID_7_0_EBX_RTM,
1219 .features[FEAT_XSAVE] =
1220 CPUID_XSAVE_XSAVEOPT,
1221 .features[FEAT_6_EAX] =
1222 CPUID_6_EAX_ARAT,
1223 .xlevel = 0x80000008,
1224 .model_id = "Intel Core Processor (Haswell)",
1227 .name = "Broadwell-noTSX",
1228 .level = 0xd,
1229 .vendor = CPUID_VENDOR_INTEL,
1230 .family = 6,
1231 .model = 61,
1232 .stepping = 2,
1233 .features[FEAT_1_EDX] =
1234 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1235 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1236 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1237 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1238 CPUID_DE | CPUID_FP87,
1239 .features[FEAT_1_ECX] =
1240 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1241 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1242 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1243 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1244 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1245 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1246 .features[FEAT_8000_0001_EDX] =
1247 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1248 CPUID_EXT2_SYSCALL,
1249 .features[FEAT_8000_0001_ECX] =
1250 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1251 .features[FEAT_7_0_EBX] =
1252 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1253 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1254 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1255 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1256 CPUID_7_0_EBX_SMAP,
1257 .features[FEAT_XSAVE] =
1258 CPUID_XSAVE_XSAVEOPT,
1259 .features[FEAT_6_EAX] =
1260 CPUID_6_EAX_ARAT,
1261 .xlevel = 0x80000008,
1262 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1265 .name = "Broadwell",
1266 .level = 0xd,
1267 .vendor = CPUID_VENDOR_INTEL,
1268 .family = 6,
1269 .model = 61,
1270 .stepping = 2,
1271 .features[FEAT_1_EDX] =
1272 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1273 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1274 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1275 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1276 CPUID_DE | CPUID_FP87,
1277 .features[FEAT_1_ECX] =
1278 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1279 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1280 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1281 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1282 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1283 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1284 .features[FEAT_8000_0001_EDX] =
1285 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1286 CPUID_EXT2_SYSCALL,
1287 .features[FEAT_8000_0001_ECX] =
1288 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1289 .features[FEAT_7_0_EBX] =
1290 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1291 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1292 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1293 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1294 CPUID_7_0_EBX_SMAP,
1295 .features[FEAT_XSAVE] =
1296 CPUID_XSAVE_XSAVEOPT,
1297 .features[FEAT_6_EAX] =
1298 CPUID_6_EAX_ARAT,
1299 .xlevel = 0x80000008,
1300 .model_id = "Intel Core Processor (Broadwell)",
1303 .name = "Skylake-Client",
1304 .level = 0xd,
1305 .vendor = CPUID_VENDOR_INTEL,
1306 .family = 6,
1307 .model = 94,
1308 .stepping = 3,
1309 .features[FEAT_1_EDX] =
1310 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1311 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1312 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1313 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1314 CPUID_DE | CPUID_FP87,
1315 .features[FEAT_1_ECX] =
1316 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1317 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1318 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1319 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1320 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1321 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1322 .features[FEAT_8000_0001_EDX] =
1323 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1324 CPUID_EXT2_SYSCALL,
1325 .features[FEAT_8000_0001_ECX] =
1326 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1327 .features[FEAT_7_0_EBX] =
1328 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1329 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1330 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1331 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1332 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1333 /* Missing: XSAVES (not supported by some Linux versions,
1334 * including v4.1 to v4.6).
1335 * KVM doesn't yet expose any XSAVES state save component,
1336 * and the only one defined in Skylake (processor tracing)
1337 * probably will block migration anyway.
1339 .features[FEAT_XSAVE] =
1340 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1341 CPUID_XSAVE_XGETBV1,
1342 .features[FEAT_6_EAX] =
1343 CPUID_6_EAX_ARAT,
1344 .xlevel = 0x80000008,
1345 .model_id = "Intel Core Processor (Skylake)",
1348 .name = "Opteron_G1",
1349 .level = 5,
1350 .vendor = CPUID_VENDOR_AMD,
1351 .family = 15,
1352 .model = 6,
1353 .stepping = 1,
1354 .features[FEAT_1_EDX] =
1355 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1356 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1357 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1358 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1359 CPUID_DE | CPUID_FP87,
1360 .features[FEAT_1_ECX] =
1361 CPUID_EXT_SSE3,
1362 .features[FEAT_8000_0001_EDX] =
1363 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1364 .xlevel = 0x80000008,
1365 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1368 .name = "Opteron_G2",
1369 .level = 5,
1370 .vendor = CPUID_VENDOR_AMD,
1371 .family = 15,
1372 .model = 6,
1373 .stepping = 1,
1374 .features[FEAT_1_EDX] =
1375 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1376 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1377 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1378 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1379 CPUID_DE | CPUID_FP87,
1380 .features[FEAT_1_ECX] =
1381 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1382 /* Missing: CPUID_EXT2_RDTSCP */
1383 .features[FEAT_8000_0001_EDX] =
1384 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1385 .features[FEAT_8000_0001_ECX] =
1386 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1387 .xlevel = 0x80000008,
1388 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1391 .name = "Opteron_G3",
1392 .level = 5,
1393 .vendor = CPUID_VENDOR_AMD,
1394 .family = 16,
1395 .model = 2,
1396 .stepping = 3,
1397 .features[FEAT_1_EDX] =
1398 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1399 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1400 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1401 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1402 CPUID_DE | CPUID_FP87,
1403 .features[FEAT_1_ECX] =
1404 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1405 CPUID_EXT_SSE3,
1406 /* Missing: CPUID_EXT2_RDTSCP */
1407 .features[FEAT_8000_0001_EDX] =
1408 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1409 .features[FEAT_8000_0001_ECX] =
1410 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1411 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1412 .xlevel = 0x80000008,
1413 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1416 .name = "Opteron_G4",
1417 .level = 0xd,
1418 .vendor = CPUID_VENDOR_AMD,
1419 .family = 21,
1420 .model = 1,
1421 .stepping = 2,
1422 .features[FEAT_1_EDX] =
1423 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1424 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1425 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1426 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1427 CPUID_DE | CPUID_FP87,
1428 .features[FEAT_1_ECX] =
1429 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1430 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1431 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1432 CPUID_EXT_SSE3,
1433 /* Missing: CPUID_EXT2_RDTSCP */
1434 .features[FEAT_8000_0001_EDX] =
1435 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1436 CPUID_EXT2_SYSCALL,
1437 .features[FEAT_8000_0001_ECX] =
1438 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1439 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1440 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1441 CPUID_EXT3_LAHF_LM,
1442 /* no xsaveopt! */
1443 .xlevel = 0x8000001A,
1444 .model_id = "AMD Opteron 62xx class CPU",
1447 .name = "Opteron_G5",
1448 .level = 0xd,
1449 .vendor = CPUID_VENDOR_AMD,
1450 .family = 21,
1451 .model = 2,
1452 .stepping = 0,
1453 .features[FEAT_1_EDX] =
1454 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1455 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1456 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1457 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1458 CPUID_DE | CPUID_FP87,
1459 .features[FEAT_1_ECX] =
1460 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1461 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1462 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1463 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1464 /* Missing: CPUID_EXT2_RDTSCP */
1465 .features[FEAT_8000_0001_EDX] =
1466 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1467 CPUID_EXT2_SYSCALL,
1468 .features[FEAT_8000_0001_ECX] =
1469 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1470 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1471 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1472 CPUID_EXT3_LAHF_LM,
1473 /* no xsaveopt! */
1474 .xlevel = 0x8000001A,
1475 .model_id = "AMD Opteron 63xx class CPU",
1479 typedef struct PropValue {
1480 const char *prop, *value;
1481 } PropValue;
1483 /* KVM-specific features that are automatically added/removed
1484 * from all CPU models when KVM is enabled.
1486 static PropValue kvm_default_props[] = {
1487 { "kvmclock", "on" },
1488 { "kvm-nopiodelay", "on" },
1489 { "kvm-asyncpf", "on" },
1490 { "kvm-steal-time", "on" },
1491 { "kvm-pv-eoi", "on" },
1492 { "kvmclock-stable-bit", "on" },
1493 { "x2apic", "on" },
1494 { "acpi", "off" },
1495 { "monitor", "off" },
1496 { "svm", "off" },
1497 { NULL, NULL },
1500 /* TCG-specific defaults that override all CPU models when using TCG
1502 static PropValue tcg_default_props[] = {
1503 { "vme", "off" },
1504 { NULL, NULL },
1508 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1510 PropValue *pv;
1511 for (pv = kvm_default_props; pv->prop; pv++) {
1512 if (!strcmp(pv->prop, prop)) {
1513 pv->value = value;
1514 break;
1518 /* It is valid to call this function only for properties that
1519 * are already present in the kvm_default_props table.
1521 assert(pv->prop);
1524 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1525 bool migratable_only);
1527 static bool lmce_supported(void)
1529 uint64_t mce_cap = 0;
1531 #ifdef CONFIG_KVM
1532 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1533 return false;
1535 #endif
1537 return !!(mce_cap & MCG_LMCE_P);
1540 static int cpu_x86_fill_model_id(char *str)
1542 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1543 int i;
1545 for (i = 0; i < 3; i++) {
1546 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1547 memcpy(str + i * 16 + 0, &eax, 4);
1548 memcpy(str + i * 16 + 4, &ebx, 4);
1549 memcpy(str + i * 16 + 8, &ecx, 4);
1550 memcpy(str + i * 16 + 12, &edx, 4);
1552 return 0;
1555 static Property max_x86_cpu_properties[] = {
1556 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1557 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1558 DEFINE_PROP_END_OF_LIST()
1561 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
1563 DeviceClass *dc = DEVICE_CLASS(oc);
1564 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1566 xcc->ordering = 9;
1568 xcc->model_description =
1569 "Enables all features supported by the accelerator in the current host";
1571 dc->props = max_x86_cpu_properties;
1574 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
1576 static void max_x86_cpu_initfn(Object *obj)
1578 X86CPU *cpu = X86_CPU(obj);
1579 CPUX86State *env = &cpu->env;
1580 KVMState *s = kvm_state;
1582 /* We can't fill the features array here because we don't know yet if
1583 * "migratable" is true or false.
1585 cpu->max_features = true;
1587 if (kvm_enabled()) {
1588 X86CPUDefinition host_cpudef = { };
1589 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1591 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1592 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1594 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1595 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1596 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1597 host_cpudef.stepping = eax & 0x0F;
1599 cpu_x86_fill_model_id(host_cpudef.model_id);
1601 x86_cpu_load_def(cpu, &host_cpudef, &error_abort);
1603 env->cpuid_min_level =
1604 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1605 env->cpuid_min_xlevel =
1606 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1607 env->cpuid_min_xlevel2 =
1608 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1610 if (lmce_supported()) {
1611 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1613 } else {
1614 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
1615 "vendor", &error_abort);
1616 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
1617 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
1618 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
1619 object_property_set_str(OBJECT(cpu),
1620 "QEMU TCG CPU version " QEMU_HW_VERSION,
1621 "model-id", &error_abort);
1624 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1627 static const TypeInfo max_x86_cpu_type_info = {
1628 .name = X86_CPU_TYPE_NAME("max"),
1629 .parent = TYPE_X86_CPU,
1630 .instance_init = max_x86_cpu_initfn,
1631 .class_init = max_x86_cpu_class_init,
1634 #ifdef CONFIG_KVM
1636 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1638 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1640 xcc->kvm_required = true;
1641 xcc->ordering = 8;
1643 xcc->model_description =
1644 "KVM processor with all supported host features "
1645 "(only available in KVM mode)";
1648 static const TypeInfo host_x86_cpu_type_info = {
1649 .name = X86_CPU_TYPE_NAME("host"),
1650 .parent = X86_CPU_TYPE_NAME("max"),
1651 .class_init = host_x86_cpu_class_init,
1654 #endif
1656 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1658 FeatureWordInfo *f = &feature_word_info[w];
1659 int i;
1661 for (i = 0; i < 32; ++i) {
1662 if ((1UL << i) & mask) {
1663 const char *reg = get_register_name_32(f->cpuid_reg);
1664 assert(reg);
1665 fprintf(stderr, "warning: %s doesn't support requested feature: "
1666 "CPUID.%02XH:%s%s%s [bit %d]\n",
1667 kvm_enabled() ? "host" : "TCG",
1668 f->cpuid_eax, reg,
1669 f->feat_names[i] ? "." : "",
1670 f->feat_names[i] ? f->feat_names[i] : "", i);
1675 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1676 const char *name, void *opaque,
1677 Error **errp)
1679 X86CPU *cpu = X86_CPU(obj);
1680 CPUX86State *env = &cpu->env;
1681 int64_t value;
1683 value = (env->cpuid_version >> 8) & 0xf;
1684 if (value == 0xf) {
1685 value += (env->cpuid_version >> 20) & 0xff;
1687 visit_type_int(v, name, &value, errp);
1690 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1691 const char *name, void *opaque,
1692 Error **errp)
1694 X86CPU *cpu = X86_CPU(obj);
1695 CPUX86State *env = &cpu->env;
1696 const int64_t min = 0;
1697 const int64_t max = 0xff + 0xf;
1698 Error *local_err = NULL;
1699 int64_t value;
1701 visit_type_int(v, name, &value, &local_err);
1702 if (local_err) {
1703 error_propagate(errp, local_err);
1704 return;
1706 if (value < min || value > max) {
1707 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1708 name ? name : "null", value, min, max);
1709 return;
1712 env->cpuid_version &= ~0xff00f00;
1713 if (value > 0x0f) {
1714 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1715 } else {
1716 env->cpuid_version |= value << 8;
1720 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1721 const char *name, void *opaque,
1722 Error **errp)
1724 X86CPU *cpu = X86_CPU(obj);
1725 CPUX86State *env = &cpu->env;
1726 int64_t value;
1728 value = (env->cpuid_version >> 4) & 0xf;
1729 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1730 visit_type_int(v, name, &value, errp);
1733 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1734 const char *name, void *opaque,
1735 Error **errp)
1737 X86CPU *cpu = X86_CPU(obj);
1738 CPUX86State *env = &cpu->env;
1739 const int64_t min = 0;
1740 const int64_t max = 0xff;
1741 Error *local_err = NULL;
1742 int64_t value;
1744 visit_type_int(v, name, &value, &local_err);
1745 if (local_err) {
1746 error_propagate(errp, local_err);
1747 return;
1749 if (value < min || value > max) {
1750 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1751 name ? name : "null", value, min, max);
1752 return;
1755 env->cpuid_version &= ~0xf00f0;
1756 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1759 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1760 const char *name, void *opaque,
1761 Error **errp)
1763 X86CPU *cpu = X86_CPU(obj);
1764 CPUX86State *env = &cpu->env;
1765 int64_t value;
1767 value = env->cpuid_version & 0xf;
1768 visit_type_int(v, name, &value, errp);
1771 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1772 const char *name, void *opaque,
1773 Error **errp)
1775 X86CPU *cpu = X86_CPU(obj);
1776 CPUX86State *env = &cpu->env;
1777 const int64_t min = 0;
1778 const int64_t max = 0xf;
1779 Error *local_err = NULL;
1780 int64_t value;
1782 visit_type_int(v, name, &value, &local_err);
1783 if (local_err) {
1784 error_propagate(errp, local_err);
1785 return;
1787 if (value < min || value > max) {
1788 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1789 name ? name : "null", value, min, max);
1790 return;
1793 env->cpuid_version &= ~0xf;
1794 env->cpuid_version |= value & 0xf;
1797 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1799 X86CPU *cpu = X86_CPU(obj);
1800 CPUX86State *env = &cpu->env;
1801 char *value;
1803 value = g_malloc(CPUID_VENDOR_SZ + 1);
1804 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1805 env->cpuid_vendor3);
1806 return value;
1809 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1810 Error **errp)
1812 X86CPU *cpu = X86_CPU(obj);
1813 CPUX86State *env = &cpu->env;
1814 int i;
1816 if (strlen(value) != CPUID_VENDOR_SZ) {
1817 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1818 return;
1821 env->cpuid_vendor1 = 0;
1822 env->cpuid_vendor2 = 0;
1823 env->cpuid_vendor3 = 0;
1824 for (i = 0; i < 4; i++) {
1825 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1826 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1827 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1831 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1833 X86CPU *cpu = X86_CPU(obj);
1834 CPUX86State *env = &cpu->env;
1835 char *value;
1836 int i;
1838 value = g_malloc(48 + 1);
1839 for (i = 0; i < 48; i++) {
1840 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1842 value[48] = '\0';
1843 return value;
1846 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1847 Error **errp)
1849 X86CPU *cpu = X86_CPU(obj);
1850 CPUX86State *env = &cpu->env;
1851 int c, len, i;
1853 if (model_id == NULL) {
1854 model_id = "";
1856 len = strlen(model_id);
1857 memset(env->cpuid_model, 0, 48);
1858 for (i = 0; i < 48; i++) {
1859 if (i >= len) {
1860 c = '\0';
1861 } else {
1862 c = (uint8_t)model_id[i];
1864 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1868 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1869 void *opaque, Error **errp)
1871 X86CPU *cpu = X86_CPU(obj);
1872 int64_t value;
1874 value = cpu->env.tsc_khz * 1000;
1875 visit_type_int(v, name, &value, errp);
1878 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1879 void *opaque, Error **errp)
1881 X86CPU *cpu = X86_CPU(obj);
1882 const int64_t min = 0;
1883 const int64_t max = INT64_MAX;
1884 Error *local_err = NULL;
1885 int64_t value;
1887 visit_type_int(v, name, &value, &local_err);
1888 if (local_err) {
1889 error_propagate(errp, local_err);
1890 return;
1892 if (value < min || value > max) {
1893 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1894 name ? name : "null", value, min, max);
1895 return;
1898 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1901 /* Generic getter for "feature-words" and "filtered-features" properties */
1902 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1903 const char *name, void *opaque,
1904 Error **errp)
1906 uint32_t *array = (uint32_t *)opaque;
1907 FeatureWord w;
1908 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1909 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1910 X86CPUFeatureWordInfoList *list = NULL;
1912 for (w = 0; w < FEATURE_WORDS; w++) {
1913 FeatureWordInfo *wi = &feature_word_info[w];
1914 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1915 qwi->cpuid_input_eax = wi->cpuid_eax;
1916 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1917 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1918 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1919 qwi->features = array[w];
1921 /* List will be in reverse order, but order shouldn't matter */
1922 list_entries[w].next = list;
1923 list_entries[w].value = &word_infos[w];
1924 list = &list_entries[w];
1927 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1930 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1931 void *opaque, Error **errp)
1933 X86CPU *cpu = X86_CPU(obj);
1934 int64_t value = cpu->hyperv_spinlock_attempts;
1936 visit_type_int(v, name, &value, errp);
1939 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1940 void *opaque, Error **errp)
1942 const int64_t min = 0xFFF;
1943 const int64_t max = UINT_MAX;
1944 X86CPU *cpu = X86_CPU(obj);
1945 Error *err = NULL;
1946 int64_t value;
1948 visit_type_int(v, name, &value, &err);
1949 if (err) {
1950 error_propagate(errp, err);
1951 return;
1954 if (value < min || value > max) {
1955 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1956 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1957 object_get_typename(obj), name ? name : "null",
1958 value, min, max);
1959 return;
1961 cpu->hyperv_spinlock_attempts = value;
1964 static PropertyInfo qdev_prop_spinlocks = {
1965 .name = "int",
1966 .get = x86_get_hv_spinlocks,
1967 .set = x86_set_hv_spinlocks,
1970 /* Convert all '_' in a feature string option name to '-', to make feature
1971 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1973 static inline void feat2prop(char *s)
1975 while ((s = strchr(s, '_'))) {
1976 *s = '-';
1980 /* Return the feature property name for a feature flag bit */
1981 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
1983 /* XSAVE components are automatically enabled by other features,
1984 * so return the original feature name instead
1986 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
1987 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
1989 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
1990 x86_ext_save_areas[comp].bits) {
1991 w = x86_ext_save_areas[comp].feature;
1992 bitnr = ctz32(x86_ext_save_areas[comp].bits);
1996 assert(bitnr < 32);
1997 assert(w < FEATURE_WORDS);
1998 return feature_word_info[w].feat_names[bitnr];
2001 /* Compatibily hack to maintain legacy +-feat semantic,
2002 * where +-feat overwrites any feature set by
2003 * feat=on|feat even if the later is parsed after +-feat
2004 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2006 static GList *plus_features, *minus_features;
2008 static gint compare_string(gconstpointer a, gconstpointer b)
2010 return g_strcmp0(a, b);
2013 /* Parse "+feature,-feature,feature=foo" CPU feature string
2015 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2016 Error **errp)
2018 char *featurestr; /* Single 'key=value" string being parsed */
2019 static bool cpu_globals_initialized;
2020 bool ambiguous = false;
2022 if (cpu_globals_initialized) {
2023 return;
2025 cpu_globals_initialized = true;
2027 if (!features) {
2028 return;
2031 for (featurestr = strtok(features, ",");
2032 featurestr;
2033 featurestr = strtok(NULL, ",")) {
2034 const char *name;
2035 const char *val = NULL;
2036 char *eq = NULL;
2037 char num[32];
2038 GlobalProperty *prop;
2040 /* Compatibility syntax: */
2041 if (featurestr[0] == '+') {
2042 plus_features = g_list_append(plus_features,
2043 g_strdup(featurestr + 1));
2044 continue;
2045 } else if (featurestr[0] == '-') {
2046 minus_features = g_list_append(minus_features,
2047 g_strdup(featurestr + 1));
2048 continue;
2051 eq = strchr(featurestr, '=');
2052 if (eq) {
2053 *eq++ = 0;
2054 val = eq;
2055 } else {
2056 val = "on";
2059 feat2prop(featurestr);
2060 name = featurestr;
2062 if (g_list_find_custom(plus_features, name, compare_string)) {
2063 error_report("warning: Ambiguous CPU model string. "
2064 "Don't mix both \"+%s\" and \"%s=%s\"",
2065 name, name, val);
2066 ambiguous = true;
2068 if (g_list_find_custom(minus_features, name, compare_string)) {
2069 error_report("warning: Ambiguous CPU model string. "
2070 "Don't mix both \"-%s\" and \"%s=%s\"",
2071 name, name, val);
2072 ambiguous = true;
2075 /* Special case: */
2076 if (!strcmp(name, "tsc-freq")) {
2077 int ret;
2078 uint64_t tsc_freq;
2080 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2081 if (ret < 0 || tsc_freq > INT64_MAX) {
2082 error_setg(errp, "bad numerical value %s", val);
2083 return;
2085 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2086 val = num;
2087 name = "tsc-frequency";
2090 prop = g_new0(typeof(*prop), 1);
2091 prop->driver = typename;
2092 prop->property = g_strdup(name);
2093 prop->value = g_strdup(val);
2094 prop->errp = &error_fatal;
2095 qdev_prop_register_global(prop);
2098 if (ambiguous) {
2099 error_report("warning: Compatibility of ambiguous CPU model "
2100 "strings won't be kept on future QEMU versions");
2104 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2105 static int x86_cpu_filter_features(X86CPU *cpu);
2107 /* Check for missing features that may prevent the CPU class from
2108 * running using the current machine and accelerator.
2110 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2111 strList **missing_feats)
2113 X86CPU *xc;
2114 FeatureWord w;
2115 Error *err = NULL;
2116 strList **next = missing_feats;
2118 if (xcc->kvm_required && !kvm_enabled()) {
2119 strList *new = g_new0(strList, 1);
2120 new->value = g_strdup("kvm");;
2121 *missing_feats = new;
2122 return;
2125 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2127 x86_cpu_expand_features(xc, &err);
2128 if (err) {
2129 /* Errors at x86_cpu_expand_features should never happen,
2130 * but in case it does, just report the model as not
2131 * runnable at all using the "type" property.
2133 strList *new = g_new0(strList, 1);
2134 new->value = g_strdup("type");
2135 *next = new;
2136 next = &new->next;
2139 x86_cpu_filter_features(xc);
2141 for (w = 0; w < FEATURE_WORDS; w++) {
2142 uint32_t filtered = xc->filtered_features[w];
2143 int i;
2144 for (i = 0; i < 32; i++) {
2145 if (filtered & (1UL << i)) {
2146 strList *new = g_new0(strList, 1);
2147 new->value = g_strdup(x86_cpu_feature_name(w, i));
2148 *next = new;
2149 next = &new->next;
2154 object_unref(OBJECT(xc));
2157 /* Print all cpuid feature names in featureset
2159 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2161 int bit;
2162 bool first = true;
2164 for (bit = 0; bit < 32; bit++) {
2165 if (featureset[bit]) {
2166 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2167 first = false;
2172 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2173 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2175 ObjectClass *class_a = (ObjectClass *)a;
2176 ObjectClass *class_b = (ObjectClass *)b;
2177 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2178 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2179 const char *name_a, *name_b;
2181 if (cc_a->ordering != cc_b->ordering) {
2182 return cc_a->ordering - cc_b->ordering;
2183 } else {
2184 name_a = object_class_get_name(class_a);
2185 name_b = object_class_get_name(class_b);
2186 return strcmp(name_a, name_b);
2190 static GSList *get_sorted_cpu_model_list(void)
2192 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2193 list = g_slist_sort(list, x86_cpu_list_compare);
2194 return list;
2197 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2199 ObjectClass *oc = data;
2200 X86CPUClass *cc = X86_CPU_CLASS(oc);
2201 CPUListState *s = user_data;
2202 char *name = x86_cpu_class_get_model_name(cc);
2203 const char *desc = cc->model_description;
2204 if (!desc && cc->cpu_def) {
2205 desc = cc->cpu_def->model_id;
2208 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2209 name, desc);
2210 g_free(name);
2213 /* list available CPU models and flags */
2214 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2216 int i;
2217 CPUListState s = {
2218 .file = f,
2219 .cpu_fprintf = cpu_fprintf,
2221 GSList *list;
2223 (*cpu_fprintf)(f, "Available CPUs:\n");
2224 list = get_sorted_cpu_model_list();
2225 g_slist_foreach(list, x86_cpu_list_entry, &s);
2226 g_slist_free(list);
2228 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2229 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2230 FeatureWordInfo *fw = &feature_word_info[i];
2232 (*cpu_fprintf)(f, " ");
2233 listflags(f, cpu_fprintf, fw->feat_names);
2234 (*cpu_fprintf)(f, "\n");
2238 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2240 ObjectClass *oc = data;
2241 X86CPUClass *cc = X86_CPU_CLASS(oc);
2242 CpuDefinitionInfoList **cpu_list = user_data;
2243 CpuDefinitionInfoList *entry;
2244 CpuDefinitionInfo *info;
2246 info = g_malloc0(sizeof(*info));
2247 info->name = x86_cpu_class_get_model_name(cc);
2248 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2249 info->has_unavailable_features = true;
2250 info->q_typename = g_strdup(object_class_get_name(oc));
2251 info->migration_safe = cc->migration_safe;
2252 info->has_migration_safe = true;
2253 info->q_static = cc->static_model;
2255 entry = g_malloc0(sizeof(*entry));
2256 entry->value = info;
2257 entry->next = *cpu_list;
2258 *cpu_list = entry;
2261 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2263 CpuDefinitionInfoList *cpu_list = NULL;
2264 GSList *list = get_sorted_cpu_model_list();
2265 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2266 g_slist_free(list);
2267 return cpu_list;
2270 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2271 bool migratable_only)
2273 FeatureWordInfo *wi = &feature_word_info[w];
2274 uint32_t r;
2276 if (kvm_enabled()) {
2277 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2278 wi->cpuid_ecx,
2279 wi->cpuid_reg);
2280 } else if (tcg_enabled()) {
2281 r = wi->tcg_features;
2282 } else {
2283 return ~0;
2285 if (migratable_only) {
2286 r &= x86_cpu_get_migratable_flags(w);
2288 return r;
2291 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2293 FeatureWord w;
2295 for (w = 0; w < FEATURE_WORDS; w++) {
2296 report_unavailable_features(w, cpu->filtered_features[w]);
2300 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2302 PropValue *pv;
2303 for (pv = props; pv->prop; pv++) {
2304 if (!pv->value) {
2305 continue;
2307 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2308 &error_abort);
2312 /* Load data from X86CPUDefinition into a X86CPU object
2314 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2316 CPUX86State *env = &cpu->env;
2317 const char *vendor;
2318 char host_vendor[CPUID_VENDOR_SZ + 1];
2319 FeatureWord w;
2321 /*NOTE: any property set by this function should be returned by
2322 * x86_cpu_static_props(), so static expansion of
2323 * query-cpu-model-expansion is always complete.
2326 /* CPU models only set _minimum_ values for level/xlevel: */
2327 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2328 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2330 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2331 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2332 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2333 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2334 for (w = 0; w < FEATURE_WORDS; w++) {
2335 env->features[w] = def->features[w];
2338 /* Special cases not set in the X86CPUDefinition structs: */
2339 if (kvm_enabled()) {
2340 if (!kvm_irqchip_in_kernel()) {
2341 x86_cpu_change_kvm_default("x2apic", "off");
2344 x86_cpu_apply_props(cpu, kvm_default_props);
2345 } else if (tcg_enabled()) {
2346 x86_cpu_apply_props(cpu, tcg_default_props);
2349 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2351 /* sysenter isn't supported in compatibility mode on AMD,
2352 * syscall isn't supported in compatibility mode on Intel.
2353 * Normally we advertise the actual CPU vendor, but you can
2354 * override this using the 'vendor' property if you want to use
2355 * KVM's sysenter/syscall emulation in compatibility mode and
2356 * when doing cross vendor migration
2358 vendor = def->vendor;
2359 if (kvm_enabled()) {
2360 uint32_t ebx = 0, ecx = 0, edx = 0;
2361 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2362 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2363 vendor = host_vendor;
2366 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2370 /* Return a QDict containing keys for all properties that can be included
2371 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2372 * must be included in the dictionary.
2374 static QDict *x86_cpu_static_props(void)
2376 FeatureWord w;
2377 int i;
2378 static const char *props[] = {
2379 "min-level",
2380 "min-xlevel",
2381 "family",
2382 "model",
2383 "stepping",
2384 "model-id",
2385 "vendor",
2386 "lmce",
2387 NULL,
2389 static QDict *d;
2391 if (d) {
2392 return d;
2395 d = qdict_new();
2396 for (i = 0; props[i]; i++) {
2397 qdict_put_obj(d, props[i], qnull());
2400 for (w = 0; w < FEATURE_WORDS; w++) {
2401 FeatureWordInfo *fi = &feature_word_info[w];
2402 int bit;
2403 for (bit = 0; bit < 32; bit++) {
2404 if (!fi->feat_names[bit]) {
2405 continue;
2407 qdict_put_obj(d, fi->feat_names[bit], qnull());
2411 return d;
2414 /* Add an entry to @props dict, with the value for property. */
2415 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2417 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2418 &error_abort);
2420 qdict_put_obj(props, prop, value);
2423 /* Convert CPU model data from X86CPU object to a property dictionary
2424 * that can recreate exactly the same CPU model.
2426 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
2428 QDict *sprops = x86_cpu_static_props();
2429 const QDictEntry *e;
2431 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
2432 const char *prop = qdict_entry_key(e);
2433 x86_cpu_expand_prop(cpu, props, prop);
2437 /* Convert CPU model data from X86CPU object to a property dictionary
2438 * that can recreate exactly the same CPU model, including every
2439 * writeable QOM property.
2441 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
2443 ObjectPropertyIterator iter;
2444 ObjectProperty *prop;
2446 object_property_iter_init(&iter, OBJECT(cpu));
2447 while ((prop = object_property_iter_next(&iter))) {
2448 /* skip read-only or write-only properties */
2449 if (!prop->get || !prop->set) {
2450 continue;
2453 /* "hotplugged" is the only property that is configurable
2454 * on the command-line but will be set differently on CPUs
2455 * created using "-cpu ... -smp ..." and by CPUs created
2456 * on the fly by x86_cpu_from_model() for querying. Skip it.
2458 if (!strcmp(prop->name, "hotplugged")) {
2459 continue;
2461 x86_cpu_expand_prop(cpu, props, prop->name);
2465 static void object_apply_props(Object *obj, QDict *props, Error **errp)
2467 const QDictEntry *prop;
2468 Error *err = NULL;
2470 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
2471 object_property_set_qobject(obj, qdict_entry_value(prop),
2472 qdict_entry_key(prop), &err);
2473 if (err) {
2474 break;
2478 error_propagate(errp, err);
2481 /* Create X86CPU object according to model+props specification */
2482 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
2484 X86CPU *xc = NULL;
2485 X86CPUClass *xcc;
2486 Error *err = NULL;
2488 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
2489 if (xcc == NULL) {
2490 error_setg(&err, "CPU model '%s' not found", model);
2491 goto out;
2494 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2495 if (props) {
2496 object_apply_props(OBJECT(xc), props, &err);
2497 if (err) {
2498 goto out;
2502 x86_cpu_expand_features(xc, &err);
2503 if (err) {
2504 goto out;
2507 out:
2508 if (err) {
2509 error_propagate(errp, err);
2510 object_unref(OBJECT(xc));
2511 xc = NULL;
2513 return xc;
2516 CpuModelExpansionInfo *
2517 arch_query_cpu_model_expansion(CpuModelExpansionType type,
2518 CpuModelInfo *model,
2519 Error **errp)
2521 X86CPU *xc = NULL;
2522 Error *err = NULL;
2523 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
2524 QDict *props = NULL;
2525 const char *base_name;
2527 xc = x86_cpu_from_model(model->name,
2528 model->has_props ?
2529 qobject_to_qdict(model->props) :
2530 NULL, &err);
2531 if (err) {
2532 goto out;
2535 props = qdict_new();
2537 switch (type) {
2538 case CPU_MODEL_EXPANSION_TYPE_STATIC:
2539 /* Static expansion will be based on "base" only */
2540 base_name = "base";
2541 x86_cpu_to_dict(xc, props);
2542 break;
2543 case CPU_MODEL_EXPANSION_TYPE_FULL:
2544 /* As we don't return every single property, full expansion needs
2545 * to keep the original model name+props, and add extra
2546 * properties on top of that.
2548 base_name = model->name;
2549 x86_cpu_to_dict_full(xc, props);
2550 break;
2551 default:
2552 error_setg(&err, "Unsupportted expansion type");
2553 goto out;
2556 if (!props) {
2557 props = qdict_new();
2559 x86_cpu_to_dict(xc, props);
2561 ret->model = g_new0(CpuModelInfo, 1);
2562 ret->model->name = g_strdup(base_name);
2563 ret->model->props = QOBJECT(props);
2564 ret->model->has_props = true;
2566 out:
2567 object_unref(OBJECT(xc));
2568 if (err) {
2569 error_propagate(errp, err);
2570 qapi_free_CpuModelExpansionInfo(ret);
2571 ret = NULL;
2573 return ret;
2576 static gchar *x86_gdb_arch_name(CPUState *cs)
2578 #ifdef TARGET_X86_64
2579 return g_strdup("i386:x86-64");
2580 #else
2581 return g_strdup("i386");
2582 #endif
2585 X86CPU *cpu_x86_init(const char *cpu_model)
2587 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2590 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2592 X86CPUDefinition *cpudef = data;
2593 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2595 xcc->cpu_def = cpudef;
2596 xcc->migration_safe = true;
2599 static void x86_register_cpudef_type(X86CPUDefinition *def)
2601 char *typename = x86_cpu_type_name(def->name);
2602 TypeInfo ti = {
2603 .name = typename,
2604 .parent = TYPE_X86_CPU,
2605 .class_init = x86_cpu_cpudef_class_init,
2606 .class_data = def,
2609 /* AMD aliases are handled at runtime based on CPUID vendor, so
2610 * they shouldn't be set on the CPU model table.
2612 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
2614 type_register(&ti);
2615 g_free(typename);
2618 #if !defined(CONFIG_USER_ONLY)
2620 void cpu_clear_apic_feature(CPUX86State *env)
2622 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2625 #endif /* !CONFIG_USER_ONLY */
2627 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2628 uint32_t *eax, uint32_t *ebx,
2629 uint32_t *ecx, uint32_t *edx)
2631 X86CPU *cpu = x86_env_get_cpu(env);
2632 CPUState *cs = CPU(cpu);
2633 uint32_t pkg_offset;
2634 uint32_t limit;
2636 /* Calculate & apply limits for different index ranges */
2637 if (index >= 0xC0000000) {
2638 limit = env->cpuid_xlevel2;
2639 } else if (index >= 0x80000000) {
2640 limit = env->cpuid_xlevel;
2641 } else {
2642 limit = env->cpuid_level;
2645 if (index > limit) {
2646 /* Intel documentation states that invalid EAX input will
2647 * return the same information as EAX=cpuid_level
2648 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2650 index = env->cpuid_level;
2653 switch(index) {
2654 case 0:
2655 *eax = env->cpuid_level;
2656 *ebx = env->cpuid_vendor1;
2657 *edx = env->cpuid_vendor2;
2658 *ecx = env->cpuid_vendor3;
2659 break;
2660 case 1:
2661 *eax = env->cpuid_version;
2662 *ebx = (cpu->apic_id << 24) |
2663 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2664 *ecx = env->features[FEAT_1_ECX];
2665 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2666 *ecx |= CPUID_EXT_OSXSAVE;
2668 *edx = env->features[FEAT_1_EDX];
2669 if (cs->nr_cores * cs->nr_threads > 1) {
2670 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2671 *edx |= CPUID_HT;
2673 break;
2674 case 2:
2675 /* cache info: needed for Pentium Pro compatibility */
2676 if (cpu->cache_info_passthrough) {
2677 host_cpuid(index, 0, eax, ebx, ecx, edx);
2678 break;
2680 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2681 *ebx = 0;
2682 if (!cpu->enable_l3_cache) {
2683 *ecx = 0;
2684 } else {
2685 *ecx = L3_N_DESCRIPTOR;
2687 *edx = (L1D_DESCRIPTOR << 16) | \
2688 (L1I_DESCRIPTOR << 8) | \
2689 (L2_DESCRIPTOR);
2690 break;
2691 case 4:
2692 /* cache info: needed for Core compatibility */
2693 if (cpu->cache_info_passthrough) {
2694 host_cpuid(index, count, eax, ebx, ecx, edx);
2695 *eax &= ~0xFC000000;
2696 } else {
2697 *eax = 0;
2698 switch (count) {
2699 case 0: /* L1 dcache info */
2700 *eax |= CPUID_4_TYPE_DCACHE | \
2701 CPUID_4_LEVEL(1) | \
2702 CPUID_4_SELF_INIT_LEVEL;
2703 *ebx = (L1D_LINE_SIZE - 1) | \
2704 ((L1D_PARTITIONS - 1) << 12) | \
2705 ((L1D_ASSOCIATIVITY - 1) << 22);
2706 *ecx = L1D_SETS - 1;
2707 *edx = CPUID_4_NO_INVD_SHARING;
2708 break;
2709 case 1: /* L1 icache info */
2710 *eax |= CPUID_4_TYPE_ICACHE | \
2711 CPUID_4_LEVEL(1) | \
2712 CPUID_4_SELF_INIT_LEVEL;
2713 *ebx = (L1I_LINE_SIZE - 1) | \
2714 ((L1I_PARTITIONS - 1) << 12) | \
2715 ((L1I_ASSOCIATIVITY - 1) << 22);
2716 *ecx = L1I_SETS - 1;
2717 *edx = CPUID_4_NO_INVD_SHARING;
2718 break;
2719 case 2: /* L2 cache info */
2720 *eax |= CPUID_4_TYPE_UNIFIED | \
2721 CPUID_4_LEVEL(2) | \
2722 CPUID_4_SELF_INIT_LEVEL;
2723 if (cs->nr_threads > 1) {
2724 *eax |= (cs->nr_threads - 1) << 14;
2726 *ebx = (L2_LINE_SIZE - 1) | \
2727 ((L2_PARTITIONS - 1) << 12) | \
2728 ((L2_ASSOCIATIVITY - 1) << 22);
2729 *ecx = L2_SETS - 1;
2730 *edx = CPUID_4_NO_INVD_SHARING;
2731 break;
2732 case 3: /* L3 cache info */
2733 if (!cpu->enable_l3_cache) {
2734 *eax = 0;
2735 *ebx = 0;
2736 *ecx = 0;
2737 *edx = 0;
2738 break;
2740 *eax |= CPUID_4_TYPE_UNIFIED | \
2741 CPUID_4_LEVEL(3) | \
2742 CPUID_4_SELF_INIT_LEVEL;
2743 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2744 *eax |= ((1 << pkg_offset) - 1) << 14;
2745 *ebx = (L3_N_LINE_SIZE - 1) | \
2746 ((L3_N_PARTITIONS - 1) << 12) | \
2747 ((L3_N_ASSOCIATIVITY - 1) << 22);
2748 *ecx = L3_N_SETS - 1;
2749 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2750 break;
2751 default: /* end of info */
2752 *eax = 0;
2753 *ebx = 0;
2754 *ecx = 0;
2755 *edx = 0;
2756 break;
2760 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2761 if ((*eax & 31) && cs->nr_cores > 1) {
2762 *eax |= (cs->nr_cores - 1) << 26;
2764 break;
2765 case 5:
2766 /* mwait info: needed for Core compatibility */
2767 *eax = 0; /* Smallest monitor-line size in bytes */
2768 *ebx = 0; /* Largest monitor-line size in bytes */
2769 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2770 *edx = 0;
2771 break;
2772 case 6:
2773 /* Thermal and Power Leaf */
2774 *eax = env->features[FEAT_6_EAX];
2775 *ebx = 0;
2776 *ecx = 0;
2777 *edx = 0;
2778 break;
2779 case 7:
2780 /* Structured Extended Feature Flags Enumeration Leaf */
2781 if (count == 0) {
2782 *eax = 0; /* Maximum ECX value for sub-leaves */
2783 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2784 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2785 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2786 *ecx |= CPUID_7_0_ECX_OSPKE;
2788 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2789 } else {
2790 *eax = 0;
2791 *ebx = 0;
2792 *ecx = 0;
2793 *edx = 0;
2795 break;
2796 case 9:
2797 /* Direct Cache Access Information Leaf */
2798 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2799 *ebx = 0;
2800 *ecx = 0;
2801 *edx = 0;
2802 break;
2803 case 0xA:
2804 /* Architectural Performance Monitoring Leaf */
2805 if (kvm_enabled() && cpu->enable_pmu) {
2806 KVMState *s = cs->kvm_state;
2808 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2809 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2810 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2811 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2812 } else {
2813 *eax = 0;
2814 *ebx = 0;
2815 *ecx = 0;
2816 *edx = 0;
2818 break;
2819 case 0xB:
2820 /* Extended Topology Enumeration Leaf */
2821 if (!cpu->enable_cpuid_0xb) {
2822 *eax = *ebx = *ecx = *edx = 0;
2823 break;
2826 *ecx = count & 0xff;
2827 *edx = cpu->apic_id;
2829 switch (count) {
2830 case 0:
2831 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2832 *ebx = cs->nr_threads;
2833 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2834 break;
2835 case 1:
2836 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2837 *ebx = cs->nr_cores * cs->nr_threads;
2838 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2839 break;
2840 default:
2841 *eax = 0;
2842 *ebx = 0;
2843 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2846 assert(!(*eax & ~0x1f));
2847 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2848 break;
2849 case 0xD: {
2850 /* Processor Extended State */
2851 *eax = 0;
2852 *ebx = 0;
2853 *ecx = 0;
2854 *edx = 0;
2855 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2856 break;
2859 if (count == 0) {
2860 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2861 *eax = env->features[FEAT_XSAVE_COMP_LO];
2862 *edx = env->features[FEAT_XSAVE_COMP_HI];
2863 *ebx = *ecx;
2864 } else if (count == 1) {
2865 *eax = env->features[FEAT_XSAVE];
2866 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2867 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2868 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2869 *eax = esa->size;
2870 *ebx = esa->offset;
2873 break;
2875 case 0x80000000:
2876 *eax = env->cpuid_xlevel;
2877 *ebx = env->cpuid_vendor1;
2878 *edx = env->cpuid_vendor2;
2879 *ecx = env->cpuid_vendor3;
2880 break;
2881 case 0x80000001:
2882 *eax = env->cpuid_version;
2883 *ebx = 0;
2884 *ecx = env->features[FEAT_8000_0001_ECX];
2885 *edx = env->features[FEAT_8000_0001_EDX];
2887 /* The Linux kernel checks for the CMPLegacy bit and
2888 * discards multiple thread information if it is set.
2889 * So don't set it here for Intel to make Linux guests happy.
2891 if (cs->nr_cores * cs->nr_threads > 1) {
2892 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2893 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2894 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2895 *ecx |= 1 << 1; /* CmpLegacy bit */
2898 break;
2899 case 0x80000002:
2900 case 0x80000003:
2901 case 0x80000004:
2902 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2903 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2904 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2905 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2906 break;
2907 case 0x80000005:
2908 /* cache info (L1 cache) */
2909 if (cpu->cache_info_passthrough) {
2910 host_cpuid(index, 0, eax, ebx, ecx, edx);
2911 break;
2913 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2914 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2915 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2916 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2917 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2918 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2919 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2920 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2921 break;
2922 case 0x80000006:
2923 /* cache info (L2 cache) */
2924 if (cpu->cache_info_passthrough) {
2925 host_cpuid(index, 0, eax, ebx, ecx, edx);
2926 break;
2928 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2929 (L2_DTLB_2M_ENTRIES << 16) | \
2930 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2931 (L2_ITLB_2M_ENTRIES);
2932 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2933 (L2_DTLB_4K_ENTRIES << 16) | \
2934 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2935 (L2_ITLB_4K_ENTRIES);
2936 *ecx = (L2_SIZE_KB_AMD << 16) | \
2937 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2938 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2939 if (!cpu->enable_l3_cache) {
2940 *edx = ((L3_SIZE_KB / 512) << 18) | \
2941 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2942 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2943 } else {
2944 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2945 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2946 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2948 break;
2949 case 0x80000007:
2950 *eax = 0;
2951 *ebx = 0;
2952 *ecx = 0;
2953 *edx = env->features[FEAT_8000_0007_EDX];
2954 break;
2955 case 0x80000008:
2956 /* virtual & phys address size in low 2 bytes. */
2957 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2958 /* 64 bit processor */
2959 *eax = cpu->phys_bits; /* configurable physical bits */
2960 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
2961 *eax |= 0x00003900; /* 57 bits virtual */
2962 } else {
2963 *eax |= 0x00003000; /* 48 bits virtual */
2965 } else {
2966 *eax = cpu->phys_bits;
2968 *ebx = 0;
2969 *ecx = 0;
2970 *edx = 0;
2971 if (cs->nr_cores * cs->nr_threads > 1) {
2972 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2974 break;
2975 case 0x8000000A:
2976 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2977 *eax = 0x00000001; /* SVM Revision */
2978 *ebx = 0x00000010; /* nr of ASIDs */
2979 *ecx = 0;
2980 *edx = env->features[FEAT_SVM]; /* optional features */
2981 } else {
2982 *eax = 0;
2983 *ebx = 0;
2984 *ecx = 0;
2985 *edx = 0;
2987 break;
2988 case 0xC0000000:
2989 *eax = env->cpuid_xlevel2;
2990 *ebx = 0;
2991 *ecx = 0;
2992 *edx = 0;
2993 break;
2994 case 0xC0000001:
2995 /* Support for VIA CPU's CPUID instruction */
2996 *eax = env->cpuid_version;
2997 *ebx = 0;
2998 *ecx = 0;
2999 *edx = env->features[FEAT_C000_0001_EDX];
3000 break;
3001 case 0xC0000002:
3002 case 0xC0000003:
3003 case 0xC0000004:
3004 /* Reserved for the future, and now filled with zero */
3005 *eax = 0;
3006 *ebx = 0;
3007 *ecx = 0;
3008 *edx = 0;
3009 break;
3010 default:
3011 /* reserved values: zero */
3012 *eax = 0;
3013 *ebx = 0;
3014 *ecx = 0;
3015 *edx = 0;
3016 break;
3020 /* CPUClass::reset() */
3021 static void x86_cpu_reset(CPUState *s)
3023 X86CPU *cpu = X86_CPU(s);
3024 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3025 CPUX86State *env = &cpu->env;
3026 target_ulong cr4;
3027 uint64_t xcr0;
3028 int i;
3030 xcc->parent_reset(s);
3032 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3034 env->old_exception = -1;
3036 /* init to reset state */
3038 env->hflags2 |= HF2_GIF_MASK;
3040 cpu_x86_update_cr0(env, 0x60000010);
3041 env->a20_mask = ~0x0;
3042 env->smbase = 0x30000;
3044 env->idt.limit = 0xffff;
3045 env->gdt.limit = 0xffff;
3046 env->ldt.limit = 0xffff;
3047 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3048 env->tr.limit = 0xffff;
3049 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3051 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3052 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3053 DESC_R_MASK | DESC_A_MASK);
3054 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3055 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3056 DESC_A_MASK);
3057 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3058 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3059 DESC_A_MASK);
3060 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3061 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3062 DESC_A_MASK);
3063 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3064 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3065 DESC_A_MASK);
3066 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3067 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3068 DESC_A_MASK);
3070 env->eip = 0xfff0;
3071 env->regs[R_EDX] = env->cpuid_version;
3073 env->eflags = 0x2;
3075 /* FPU init */
3076 for (i = 0; i < 8; i++) {
3077 env->fptags[i] = 1;
3079 cpu_set_fpuc(env, 0x37f);
3081 env->mxcsr = 0x1f80;
3082 /* All units are in INIT state. */
3083 env->xstate_bv = 0;
3085 env->pat = 0x0007040600070406ULL;
3086 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3088 memset(env->dr, 0, sizeof(env->dr));
3089 env->dr[6] = DR6_FIXED_1;
3090 env->dr[7] = DR7_FIXED_1;
3091 cpu_breakpoint_remove_all(s, BP_CPU);
3092 cpu_watchpoint_remove_all(s, BP_CPU);
3094 cr4 = 0;
3095 xcr0 = XSTATE_FP_MASK;
3097 #ifdef CONFIG_USER_ONLY
3098 /* Enable all the features for user-mode. */
3099 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3100 xcr0 |= XSTATE_SSE_MASK;
3102 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3103 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3104 if (env->features[esa->feature] & esa->bits) {
3105 xcr0 |= 1ull << i;
3109 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3110 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3112 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3113 cr4 |= CR4_FSGSBASE_MASK;
3115 #endif
3117 env->xcr0 = xcr0;
3118 cpu_x86_update_cr4(env, cr4);
3121 * SDM 11.11.5 requires:
3122 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3123 * - IA32_MTRR_PHYSMASKn.V = 0
3124 * All other bits are undefined. For simplification, zero it all.
3126 env->mtrr_deftype = 0;
3127 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3128 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3130 #if !defined(CONFIG_USER_ONLY)
3131 /* We hard-wire the BSP to the first CPU. */
3132 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3134 s->halted = !cpu_is_bsp(cpu);
3136 if (kvm_enabled()) {
3137 kvm_arch_reset_vcpu(cpu);
3139 #endif
3142 #ifndef CONFIG_USER_ONLY
3143 bool cpu_is_bsp(X86CPU *cpu)
3145 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3148 /* TODO: remove me, when reset over QOM tree is implemented */
3149 static void x86_cpu_machine_reset_cb(void *opaque)
3151 X86CPU *cpu = opaque;
3152 cpu_reset(CPU(cpu));
3154 #endif
3156 static void mce_init(X86CPU *cpu)
3158 CPUX86State *cenv = &cpu->env;
3159 unsigned int bank;
3161 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3162 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3163 (CPUID_MCE | CPUID_MCA)) {
3164 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3165 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3166 cenv->mcg_ctl = ~(uint64_t)0;
3167 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3168 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3173 #ifndef CONFIG_USER_ONLY
3174 APICCommonClass *apic_get_class(void)
3176 const char *apic_type = "apic";
3178 if (kvm_apic_in_kernel()) {
3179 apic_type = "kvm-apic";
3180 } else if (xen_enabled()) {
3181 apic_type = "xen-apic";
3184 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3187 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3189 APICCommonState *apic;
3190 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3192 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3194 object_property_add_child(OBJECT(cpu), "lapic",
3195 OBJECT(cpu->apic_state), &error_abort);
3196 object_unref(OBJECT(cpu->apic_state));
3198 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3199 /* TODO: convert to link<> */
3200 apic = APIC_COMMON(cpu->apic_state);
3201 apic->cpu = cpu;
3202 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3205 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3207 APICCommonState *apic;
3208 static bool apic_mmio_map_once;
3210 if (cpu->apic_state == NULL) {
3211 return;
3213 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3214 errp);
3216 /* Map APIC MMIO area */
3217 apic = APIC_COMMON(cpu->apic_state);
3218 if (!apic_mmio_map_once) {
3219 memory_region_add_subregion_overlap(get_system_memory(),
3220 apic->apicbase &
3221 MSR_IA32_APICBASE_BASE,
3222 &apic->io_memory,
3223 0x1000);
3224 apic_mmio_map_once = true;
3228 static void x86_cpu_machine_done(Notifier *n, void *unused)
3230 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3231 MemoryRegion *smram =
3232 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3234 if (smram) {
3235 cpu->smram = g_new(MemoryRegion, 1);
3236 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3237 smram, 0, 1ull << 32);
3238 memory_region_set_enabled(cpu->smram, true);
3239 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3242 #else
3243 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3246 #endif
3248 /* Note: Only safe for use on x86(-64) hosts */
3249 static uint32_t x86_host_phys_bits(void)
3251 uint32_t eax;
3252 uint32_t host_phys_bits;
3254 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3255 if (eax >= 0x80000008) {
3256 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3257 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3258 * at 23:16 that can specify a maximum physical address bits for
3259 * the guest that can override this value; but I've not seen
3260 * anything with that set.
3262 host_phys_bits = eax & 0xff;
3263 } else {
3264 /* It's an odd 64 bit machine that doesn't have the leaf for
3265 * physical address bits; fall back to 36 that's most older
3266 * Intel.
3268 host_phys_bits = 36;
3271 return host_phys_bits;
3274 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3276 if (*min < value) {
3277 *min = value;
3281 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3282 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3284 CPUX86State *env = &cpu->env;
3285 FeatureWordInfo *fi = &feature_word_info[w];
3286 uint32_t eax = fi->cpuid_eax;
3287 uint32_t region = eax & 0xF0000000;
3289 if (!env->features[w]) {
3290 return;
3293 switch (region) {
3294 case 0x00000000:
3295 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3296 break;
3297 case 0x80000000:
3298 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3299 break;
3300 case 0xC0000000:
3301 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3302 break;
3306 /* Calculate XSAVE components based on the configured CPU feature flags */
3307 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3309 CPUX86State *env = &cpu->env;
3310 int i;
3311 uint64_t mask;
3313 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3314 return;
3317 mask = 0;
3318 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3319 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3320 if (env->features[esa->feature] & esa->bits) {
3321 mask |= (1ULL << i);
3325 env->features[FEAT_XSAVE_COMP_LO] = mask;
3326 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3329 /***** Steps involved on loading and filtering CPUID data
3331 * When initializing and realizing a CPU object, the steps
3332 * involved in setting up CPUID data are:
3334 * 1) Loading CPU model definition (X86CPUDefinition). This is
3335 * implemented by x86_cpu_load_def() and should be completely
3336 * transparent, as it is done automatically by instance_init.
3337 * No code should need to look at X86CPUDefinition structs
3338 * outside instance_init.
3340 * 2) CPU expansion. This is done by realize before CPUID
3341 * filtering, and will make sure host/accelerator data is
3342 * loaded for CPU models that depend on host capabilities
3343 * (e.g. "host"). Done by x86_cpu_expand_features().
3345 * 3) CPUID filtering. This initializes extra data related to
3346 * CPUID, and checks if the host supports all capabilities
3347 * required by the CPU. Runnability of a CPU model is
3348 * determined at this step. Done by x86_cpu_filter_features().
3350 * Some operations don't require all steps to be performed.
3351 * More precisely:
3353 * - CPU instance creation (instance_init) will run only CPU
3354 * model loading. CPU expansion can't run at instance_init-time
3355 * because host/accelerator data may be not available yet.
3356 * - CPU realization will perform both CPU model expansion and CPUID
3357 * filtering, and return an error in case one of them fails.
3358 * - query-cpu-definitions needs to run all 3 steps. It needs
3359 * to run CPUID filtering, as the 'unavailable-features'
3360 * field is set based on the filtering results.
3361 * - The query-cpu-model-expansion QMP command only needs to run
3362 * CPU model loading and CPU expansion. It should not filter
3363 * any CPUID data based on host capabilities.
3366 /* Expand CPU configuration data, based on configured features
3367 * and host/accelerator capabilities when appropriate.
3369 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3371 CPUX86State *env = &cpu->env;
3372 FeatureWord w;
3373 GList *l;
3374 Error *local_err = NULL;
3376 /*TODO: Now cpu->max_features doesn't overwrite features
3377 * set using QOM properties, and we can convert
3378 * plus_features & minus_features to global properties
3379 * inside x86_cpu_parse_featurestr() too.
3381 if (cpu->max_features) {
3382 for (w = 0; w < FEATURE_WORDS; w++) {
3383 /* Override only features that weren't set explicitly
3384 * by the user.
3386 env->features[w] |=
3387 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
3388 ~env->user_features[w];
3392 for (l = plus_features; l; l = l->next) {
3393 const char *prop = l->data;
3394 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3395 if (local_err) {
3396 goto out;
3400 for (l = minus_features; l; l = l->next) {
3401 const char *prop = l->data;
3402 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3403 if (local_err) {
3404 goto out;
3408 if (!kvm_enabled() || !cpu->expose_kvm) {
3409 env->features[FEAT_KVM] = 0;
3412 x86_cpu_enable_xsave_components(cpu);
3414 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3415 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3416 if (cpu->full_cpuid_auto_level) {
3417 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3418 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3419 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3420 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3421 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3422 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3423 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3424 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3425 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3426 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3427 /* SVM requires CPUID[0x8000000A] */
3428 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3429 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3433 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3434 if (env->cpuid_level == UINT32_MAX) {
3435 env->cpuid_level = env->cpuid_min_level;
3437 if (env->cpuid_xlevel == UINT32_MAX) {
3438 env->cpuid_xlevel = env->cpuid_min_xlevel;
3440 if (env->cpuid_xlevel2 == UINT32_MAX) {
3441 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3444 out:
3445 if (local_err != NULL) {
3446 error_propagate(errp, local_err);
3451 * Finishes initialization of CPUID data, filters CPU feature
3452 * words based on host availability of each feature.
3454 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3456 static int x86_cpu_filter_features(X86CPU *cpu)
3458 CPUX86State *env = &cpu->env;
3459 FeatureWord w;
3460 int rv = 0;
3462 for (w = 0; w < FEATURE_WORDS; w++) {
3463 uint32_t host_feat =
3464 x86_cpu_get_supported_feature_word(w, false);
3465 uint32_t requested_features = env->features[w];
3466 env->features[w] &= host_feat;
3467 cpu->filtered_features[w] = requested_features & ~env->features[w];
3468 if (cpu->filtered_features[w]) {
3469 rv = 1;
3473 return rv;
3476 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3477 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3478 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3479 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3480 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3481 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3482 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3484 CPUState *cs = CPU(dev);
3485 X86CPU *cpu = X86_CPU(dev);
3486 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3487 CPUX86State *env = &cpu->env;
3488 Error *local_err = NULL;
3489 static bool ht_warned;
3491 if (xcc->kvm_required && !kvm_enabled()) {
3492 char *name = x86_cpu_class_get_model_name(xcc);
3493 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3494 g_free(name);
3495 goto out;
3498 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3499 error_setg(errp, "apic-id property was not initialized properly");
3500 return;
3503 x86_cpu_expand_features(cpu, &local_err);
3504 if (local_err) {
3505 goto out;
3508 if (x86_cpu_filter_features(cpu) &&
3509 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3510 x86_cpu_report_filtered_features(cpu);
3511 if (cpu->enforce_cpuid) {
3512 error_setg(&local_err,
3513 kvm_enabled() ?
3514 "Host doesn't support requested features" :
3515 "TCG doesn't support requested features");
3516 goto out;
3520 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3521 * CPUID[1].EDX.
3523 if (IS_AMD_CPU(env)) {
3524 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3525 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3526 & CPUID_EXT2_AMD_ALIASES);
3529 /* For 64bit systems think about the number of physical bits to present.
3530 * ideally this should be the same as the host; anything other than matching
3531 * the host can cause incorrect guest behaviour.
3532 * QEMU used to pick the magic value of 40 bits that corresponds to
3533 * consumer AMD devices but nothing else.
3535 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3536 if (kvm_enabled()) {
3537 uint32_t host_phys_bits = x86_host_phys_bits();
3538 static bool warned;
3540 if (cpu->host_phys_bits) {
3541 /* The user asked for us to use the host physical bits */
3542 cpu->phys_bits = host_phys_bits;
3545 /* Print a warning if the user set it to a value that's not the
3546 * host value.
3548 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3549 !warned) {
3550 error_report("Warning: Host physical bits (%u)"
3551 " does not match phys-bits property (%u)",
3552 host_phys_bits, cpu->phys_bits);
3553 warned = true;
3556 if (cpu->phys_bits &&
3557 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3558 cpu->phys_bits < 32)) {
3559 error_setg(errp, "phys-bits should be between 32 and %u "
3560 " (but is %u)",
3561 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3562 return;
3564 } else {
3565 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3566 error_setg(errp, "TCG only supports phys-bits=%u",
3567 TCG_PHYS_ADDR_BITS);
3568 return;
3571 /* 0 means it was not explicitly set by the user (or by machine
3572 * compat_props or by the host code above). In this case, the default
3573 * is the value used by TCG (40).
3575 if (cpu->phys_bits == 0) {
3576 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3578 } else {
3579 /* For 32 bit systems don't use the user set value, but keep
3580 * phys_bits consistent with what we tell the guest.
3582 if (cpu->phys_bits != 0) {
3583 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3584 return;
3587 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3588 cpu->phys_bits = 36;
3589 } else {
3590 cpu->phys_bits = 32;
3593 cpu_exec_realizefn(cs, &local_err);
3594 if (local_err != NULL) {
3595 error_propagate(errp, local_err);
3596 return;
3599 if (tcg_enabled()) {
3600 tcg_x86_init();
3603 #ifndef CONFIG_USER_ONLY
3604 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3606 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3607 x86_cpu_apic_create(cpu, &local_err);
3608 if (local_err != NULL) {
3609 goto out;
3612 #endif
3614 mce_init(cpu);
3616 #ifndef CONFIG_USER_ONLY
3617 if (tcg_enabled()) {
3618 AddressSpace *as_normal = address_space_init_shareable(cs->memory,
3619 "cpu-memory");
3620 AddressSpace *as_smm = g_new(AddressSpace, 1);
3622 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3623 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3625 /* Outer container... */
3626 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3627 memory_region_set_enabled(cpu->cpu_as_root, true);
3629 /* ... with two regions inside: normal system memory with low
3630 * priority, and...
3632 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3633 get_system_memory(), 0, ~0ull);
3634 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3635 memory_region_set_enabled(cpu->cpu_as_mem, true);
3636 address_space_init(as_smm, cpu->cpu_as_root, "CPU");
3638 cs->num_ases = 2;
3639 cpu_address_space_init(cs, as_normal, 0);
3640 cpu_address_space_init(cs, as_smm, 1);
3642 /* ... SMRAM with higher priority, linked from /machine/smram. */
3643 cpu->machine_done.notify = x86_cpu_machine_done;
3644 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3646 #endif
3648 qemu_init_vcpu(cs);
3650 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3651 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3652 * based on inputs (sockets,cores,threads), it is still better to gives
3653 * users a warning.
3655 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3656 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3658 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3659 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3660 " -smp options properly.");
3661 ht_warned = true;
3664 x86_cpu_apic_realize(cpu, &local_err);
3665 if (local_err != NULL) {
3666 goto out;
3668 cpu_reset(cs);
3670 xcc->parent_realize(dev, &local_err);
3672 out:
3673 if (local_err != NULL) {
3674 error_propagate(errp, local_err);
3675 return;
3679 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3681 X86CPU *cpu = X86_CPU(dev);
3682 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3683 Error *local_err = NULL;
3685 #ifndef CONFIG_USER_ONLY
3686 cpu_remove_sync(CPU(dev));
3687 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3688 #endif
3690 if (cpu->apic_state) {
3691 object_unparent(OBJECT(cpu->apic_state));
3692 cpu->apic_state = NULL;
3695 xcc->parent_unrealize(dev, &local_err);
3696 if (local_err != NULL) {
3697 error_propagate(errp, local_err);
3698 return;
3702 typedef struct BitProperty {
3703 FeatureWord w;
3704 uint32_t mask;
3705 } BitProperty;
3707 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3708 void *opaque, Error **errp)
3710 X86CPU *cpu = X86_CPU(obj);
3711 BitProperty *fp = opaque;
3712 uint32_t f = cpu->env.features[fp->w];
3713 bool value = (f & fp->mask) == fp->mask;
3714 visit_type_bool(v, name, &value, errp);
3717 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3718 void *opaque, Error **errp)
3720 DeviceState *dev = DEVICE(obj);
3721 X86CPU *cpu = X86_CPU(obj);
3722 BitProperty *fp = opaque;
3723 Error *local_err = NULL;
3724 bool value;
3726 if (dev->realized) {
3727 qdev_prop_set_after_realize(dev, name, errp);
3728 return;
3731 visit_type_bool(v, name, &value, &local_err);
3732 if (local_err) {
3733 error_propagate(errp, local_err);
3734 return;
3737 if (value) {
3738 cpu->env.features[fp->w] |= fp->mask;
3739 } else {
3740 cpu->env.features[fp->w] &= ~fp->mask;
3742 cpu->env.user_features[fp->w] |= fp->mask;
3745 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3746 void *opaque)
3748 BitProperty *prop = opaque;
3749 g_free(prop);
3752 /* Register a boolean property to get/set a single bit in a uint32_t field.
3754 * The same property name can be registered multiple times to make it affect
3755 * multiple bits in the same FeatureWord. In that case, the getter will return
3756 * true only if all bits are set.
3758 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3759 const char *prop_name,
3760 FeatureWord w,
3761 int bitnr)
3763 BitProperty *fp;
3764 ObjectProperty *op;
3765 uint32_t mask = (1UL << bitnr);
3767 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3768 if (op) {
3769 fp = op->opaque;
3770 assert(fp->w == w);
3771 fp->mask |= mask;
3772 } else {
3773 fp = g_new0(BitProperty, 1);
3774 fp->w = w;
3775 fp->mask = mask;
3776 object_property_add(OBJECT(cpu), prop_name, "bool",
3777 x86_cpu_get_bit_prop,
3778 x86_cpu_set_bit_prop,
3779 x86_cpu_release_bit_prop, fp, &error_abort);
3783 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3784 FeatureWord w,
3785 int bitnr)
3787 FeatureWordInfo *fi = &feature_word_info[w];
3788 const char *name = fi->feat_names[bitnr];
3790 if (!name) {
3791 return;
3794 /* Property names should use "-" instead of "_".
3795 * Old names containing underscores are registered as aliases
3796 * using object_property_add_alias()
3798 assert(!strchr(name, '_'));
3799 /* aliases don't use "|" delimiters anymore, they are registered
3800 * manually using object_property_add_alias() */
3801 assert(!strchr(name, '|'));
3802 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
3805 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
3807 X86CPU *cpu = X86_CPU(cs);
3808 CPUX86State *env = &cpu->env;
3809 GuestPanicInformation *panic_info = NULL;
3811 if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) {
3812 panic_info = g_malloc0(sizeof(GuestPanicInformation));
3814 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
3816 assert(HV_X64_MSR_CRASH_PARAMS >= 5);
3817 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
3818 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
3819 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
3820 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
3821 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
3824 return panic_info;
3826 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
3827 const char *name, void *opaque,
3828 Error **errp)
3830 CPUState *cs = CPU(obj);
3831 GuestPanicInformation *panic_info;
3833 if (!cs->crash_occurred) {
3834 error_setg(errp, "No crash occured");
3835 return;
3838 panic_info = x86_cpu_get_crash_info(cs);
3839 if (panic_info == NULL) {
3840 error_setg(errp, "No crash information");
3841 return;
3844 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
3845 errp);
3846 qapi_free_GuestPanicInformation(panic_info);
3849 static void x86_cpu_initfn(Object *obj)
3851 CPUState *cs = CPU(obj);
3852 X86CPU *cpu = X86_CPU(obj);
3853 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3854 CPUX86State *env = &cpu->env;
3855 FeatureWord w;
3857 cs->env_ptr = env;
3859 object_property_add(obj, "family", "int",
3860 x86_cpuid_version_get_family,
3861 x86_cpuid_version_set_family, NULL, NULL, NULL);
3862 object_property_add(obj, "model", "int",
3863 x86_cpuid_version_get_model,
3864 x86_cpuid_version_set_model, NULL, NULL, NULL);
3865 object_property_add(obj, "stepping", "int",
3866 x86_cpuid_version_get_stepping,
3867 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3868 object_property_add_str(obj, "vendor",
3869 x86_cpuid_get_vendor,
3870 x86_cpuid_set_vendor, NULL);
3871 object_property_add_str(obj, "model-id",
3872 x86_cpuid_get_model_id,
3873 x86_cpuid_set_model_id, NULL);
3874 object_property_add(obj, "tsc-frequency", "int",
3875 x86_cpuid_get_tsc_freq,
3876 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3877 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3878 x86_cpu_get_feature_words,
3879 NULL, NULL, (void *)env->features, NULL);
3880 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3881 x86_cpu_get_feature_words,
3882 NULL, NULL, (void *)cpu->filtered_features, NULL);
3884 object_property_add(obj, "crash-information", "GuestPanicInformation",
3885 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
3887 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3889 for (w = 0; w < FEATURE_WORDS; w++) {
3890 int bitnr;
3892 for (bitnr = 0; bitnr < 32; bitnr++) {
3893 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3897 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3898 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3899 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3900 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3901 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3902 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3903 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3905 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3906 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3907 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3908 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3909 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3910 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3911 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3912 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3913 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3914 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3915 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3916 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3917 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3918 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3919 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3920 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3921 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3922 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3923 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3924 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3925 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3927 if (xcc->cpu_def) {
3928 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3932 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3934 X86CPU *cpu = X86_CPU(cs);
3936 return cpu->apic_id;
3939 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3941 X86CPU *cpu = X86_CPU(cs);
3943 return cpu->env.cr[0] & CR0_PG_MASK;
3946 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3948 X86CPU *cpu = X86_CPU(cs);
3950 cpu->env.eip = value;
3953 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3955 X86CPU *cpu = X86_CPU(cs);
3957 cpu->env.eip = tb->pc - tb->cs_base;
3960 static bool x86_cpu_has_work(CPUState *cs)
3962 X86CPU *cpu = X86_CPU(cs);
3963 CPUX86State *env = &cpu->env;
3965 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3966 CPU_INTERRUPT_POLL)) &&
3967 (env->eflags & IF_MASK)) ||
3968 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3969 CPU_INTERRUPT_INIT |
3970 CPU_INTERRUPT_SIPI |
3971 CPU_INTERRUPT_MCE)) ||
3972 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3973 !(env->hflags & HF_SMM_MASK));
3976 static Property x86_cpu_properties[] = {
3977 #ifdef CONFIG_USER_ONLY
3978 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3979 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3980 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3981 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3982 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3983 #else
3984 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3985 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3986 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3987 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3988 #endif
3989 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
3990 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3991 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3992 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3993 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3994 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3995 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3996 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3997 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3998 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3999 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4000 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4001 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4002 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4003 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4004 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4005 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4006 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4007 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4008 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4009 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4010 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4011 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4012 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4013 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4014 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4015 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4016 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4017 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4018 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4019 false),
4020 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4021 DEFINE_PROP_END_OF_LIST()
4024 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4026 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4027 CPUClass *cc = CPU_CLASS(oc);
4028 DeviceClass *dc = DEVICE_CLASS(oc);
4030 xcc->parent_realize = dc->realize;
4031 xcc->parent_unrealize = dc->unrealize;
4032 dc->realize = x86_cpu_realizefn;
4033 dc->unrealize = x86_cpu_unrealizefn;
4034 dc->props = x86_cpu_properties;
4036 xcc->parent_reset = cc->reset;
4037 cc->reset = x86_cpu_reset;
4038 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4040 cc->class_by_name = x86_cpu_class_by_name;
4041 cc->parse_features = x86_cpu_parse_featurestr;
4042 cc->has_work = x86_cpu_has_work;
4043 cc->do_interrupt = x86_cpu_do_interrupt;
4044 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4045 cc->dump_state = x86_cpu_dump_state;
4046 cc->get_crash_info = x86_cpu_get_crash_info;
4047 cc->set_pc = x86_cpu_set_pc;
4048 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4049 cc->gdb_read_register = x86_cpu_gdb_read_register;
4050 cc->gdb_write_register = x86_cpu_gdb_write_register;
4051 cc->get_arch_id = x86_cpu_get_arch_id;
4052 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4053 #ifdef CONFIG_USER_ONLY
4054 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4055 #else
4056 cc->asidx_from_attrs = x86_asidx_from_attrs;
4057 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4058 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4059 cc->write_elf64_note = x86_cpu_write_elf64_note;
4060 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4061 cc->write_elf32_note = x86_cpu_write_elf32_note;
4062 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4063 cc->vmsd = &vmstate_x86_cpu;
4064 #endif
4065 cc->gdb_arch_name = x86_gdb_arch_name;
4066 #ifdef TARGET_X86_64
4067 cc->gdb_core_xml_file = "i386-64bit.xml";
4068 cc->gdb_num_core_regs = 57;
4069 #else
4070 cc->gdb_core_xml_file = "i386-32bit.xml";
4071 cc->gdb_num_core_regs = 41;
4072 #endif
4073 #ifndef CONFIG_USER_ONLY
4074 cc->debug_excp_handler = breakpoint_handler;
4075 #endif
4076 cc->cpu_exec_enter = x86_cpu_exec_enter;
4077 cc->cpu_exec_exit = x86_cpu_exec_exit;
4079 dc->user_creatable = true;
4082 static const TypeInfo x86_cpu_type_info = {
4083 .name = TYPE_X86_CPU,
4084 .parent = TYPE_CPU,
4085 .instance_size = sizeof(X86CPU),
4086 .instance_init = x86_cpu_initfn,
4087 .abstract = true,
4088 .class_size = sizeof(X86CPUClass),
4089 .class_init = x86_cpu_common_class_init,
4093 /* "base" CPU model, used by query-cpu-model-expansion */
4094 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4096 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4098 xcc->static_model = true;
4099 xcc->migration_safe = true;
4100 xcc->model_description = "base CPU model type with no features enabled";
4101 xcc->ordering = 8;
4104 static const TypeInfo x86_base_cpu_type_info = {
4105 .name = X86_CPU_TYPE_NAME("base"),
4106 .parent = TYPE_X86_CPU,
4107 .class_init = x86_cpu_base_class_init,
4110 static void x86_cpu_register_types(void)
4112 int i;
4114 type_register_static(&x86_cpu_type_info);
4115 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4116 x86_register_cpudef_type(&builtin_x86_defs[i]);
4118 type_register_static(&max_x86_cpu_type_info);
4119 type_register_static(&x86_base_cpu_type_info);
4120 #ifdef CONFIG_KVM
4121 type_register_static(&host_x86_cpu_type_info);
4122 #endif
4125 type_init(x86_cpu_register_types)