qemu-img: add support for --object command line arg
[qemu/kevin.git] / target-i386 / cpu.c
blob0af43a3ae1683fc8819c7df5df0829e6e83b8c5f
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_7_0_ecx_feature_name[] = {
267 NULL, NULL, NULL, "pku",
268 "ospke", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 static const char *cpuid_apm_edx_feature_name[] = {
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 "invtsc", NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
288 static const char *cpuid_xsave_feature_name[] = {
289 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
299 static const char *cpuid_6_feature_name[] = {
300 NULL, NULL, "arat", NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
310 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
311 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
312 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
313 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_FXSR)
316 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
317 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
318 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
319 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
320 CPUID_PAE | CPUID_SEP | CPUID_APIC)
322 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
323 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
324 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
325 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
326 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
327 /* partly implemented:
328 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
329 /* missing:
330 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
331 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
332 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
333 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
334 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
335 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
336 /* missing:
337 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
338 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
339 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
340 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
341 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
343 #ifdef TARGET_X86_64
344 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
345 #else
346 #define TCG_EXT2_X86_64_FEATURES 0
347 #endif
349 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
350 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
351 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
352 TCG_EXT2_X86_64_FEATURES)
353 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
354 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
355 #define TCG_EXT4_FEATURES 0
356 #define TCG_SVM_FEATURES 0
357 #define TCG_KVM_FEATURES 0
358 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
359 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
360 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
361 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
362 /* missing:
363 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
364 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
365 CPUID_7_0_EBX_RDSEED */
366 #define TCG_7_0_ECX_FEATURES 0
367 #define TCG_APM_FEATURES 0
368 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
369 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
370 /* missing:
371 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
373 typedef struct FeatureWordInfo {
374 const char **feat_names;
375 uint32_t cpuid_eax; /* Input EAX for CPUID */
376 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
377 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
378 int cpuid_reg; /* output register (R_* constant) */
379 uint32_t tcg_features; /* Feature flags supported by TCG */
380 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
381 } FeatureWordInfo;
383 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
384 [FEAT_1_EDX] = {
385 .feat_names = feature_name,
386 .cpuid_eax = 1, .cpuid_reg = R_EDX,
387 .tcg_features = TCG_FEATURES,
389 [FEAT_1_ECX] = {
390 .feat_names = ext_feature_name,
391 .cpuid_eax = 1, .cpuid_reg = R_ECX,
392 .tcg_features = TCG_EXT_FEATURES,
394 [FEAT_8000_0001_EDX] = {
395 .feat_names = ext2_feature_name,
396 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
397 .tcg_features = TCG_EXT2_FEATURES,
399 [FEAT_8000_0001_ECX] = {
400 .feat_names = ext3_feature_name,
401 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
402 .tcg_features = TCG_EXT3_FEATURES,
404 [FEAT_C000_0001_EDX] = {
405 .feat_names = ext4_feature_name,
406 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
407 .tcg_features = TCG_EXT4_FEATURES,
409 [FEAT_KVM] = {
410 .feat_names = kvm_feature_name,
411 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
412 .tcg_features = TCG_KVM_FEATURES,
414 [FEAT_SVM] = {
415 .feat_names = svm_feature_name,
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
419 [FEAT_7_0_EBX] = {
420 .feat_names = cpuid_7_0_ebx_feature_name,
421 .cpuid_eax = 7,
422 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
423 .cpuid_reg = R_EBX,
424 .tcg_features = TCG_7_0_EBX_FEATURES,
426 [FEAT_7_0_ECX] = {
427 .feat_names = cpuid_7_0_ecx_feature_name,
428 .cpuid_eax = 7,
429 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
430 .cpuid_reg = R_ECX,
431 .tcg_features = TCG_7_0_ECX_FEATURES,
433 [FEAT_8000_0007_EDX] = {
434 .feat_names = cpuid_apm_edx_feature_name,
435 .cpuid_eax = 0x80000007,
436 .cpuid_reg = R_EDX,
437 .tcg_features = TCG_APM_FEATURES,
438 .unmigratable_flags = CPUID_APM_INVTSC,
440 [FEAT_XSAVE] = {
441 .feat_names = cpuid_xsave_feature_name,
442 .cpuid_eax = 0xd,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
444 .cpuid_reg = R_EAX,
445 .tcg_features = TCG_XSAVE_FEATURES,
447 [FEAT_6_EAX] = {
448 .feat_names = cpuid_6_feature_name,
449 .cpuid_eax = 6, .cpuid_reg = R_EAX,
450 .tcg_features = TCG_6_EAX_FEATURES,
454 typedef struct X86RegisterInfo32 {
455 /* Name of register */
456 const char *name;
457 /* QAPI enum value register */
458 X86CPURegister32 qapi_enum;
459 } X86RegisterInfo32;
461 #define REGISTER(reg) \
462 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
463 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
464 REGISTER(EAX),
465 REGISTER(ECX),
466 REGISTER(EDX),
467 REGISTER(EBX),
468 REGISTER(ESP),
469 REGISTER(EBP),
470 REGISTER(ESI),
471 REGISTER(EDI),
473 #undef REGISTER
475 const ExtSaveArea x86_ext_save_areas[] = {
476 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
477 .offset = 0x240, .size = 0x100 },
478 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
479 .offset = 0x3c0, .size = 0x40 },
480 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
481 .offset = 0x400, .size = 0x40 },
482 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
483 .offset = 0x440, .size = 0x40 },
484 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
485 .offset = 0x480, .size = 0x200 },
486 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
487 .offset = 0x680, .size = 0x400 },
488 [9] = { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
489 .offset = 0xA80, .size = 0x8 },
492 const char *get_register_name_32(unsigned int reg)
494 if (reg >= CPU_NB_REGS32) {
495 return NULL;
497 return x86_reg_info_32[reg].name;
501 * Returns the set of feature flags that are supported and migratable by
502 * QEMU, for a given FeatureWord.
504 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
506 FeatureWordInfo *wi = &feature_word_info[w];
507 uint32_t r = 0;
508 int i;
510 for (i = 0; i < 32; i++) {
511 uint32_t f = 1U << i;
512 /* If the feature name is unknown, it is not supported by QEMU yet */
513 if (!wi->feat_names[i]) {
514 continue;
516 /* Skip features known to QEMU, but explicitly marked as unmigratable */
517 if (wi->unmigratable_flags & f) {
518 continue;
520 r |= f;
522 return r;
525 void host_cpuid(uint32_t function, uint32_t count,
526 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
528 uint32_t vec[4];
530 #ifdef __x86_64__
531 asm volatile("cpuid"
532 : "=a"(vec[0]), "=b"(vec[1]),
533 "=c"(vec[2]), "=d"(vec[3])
534 : "0"(function), "c"(count) : "cc");
535 #elif defined(__i386__)
536 asm volatile("pusha \n\t"
537 "cpuid \n\t"
538 "mov %%eax, 0(%2) \n\t"
539 "mov %%ebx, 4(%2) \n\t"
540 "mov %%ecx, 8(%2) \n\t"
541 "mov %%edx, 12(%2) \n\t"
542 "popa"
543 : : "a"(function), "c"(count), "S"(vec)
544 : "memory", "cc");
545 #else
546 abort();
547 #endif
549 if (eax)
550 *eax = vec[0];
551 if (ebx)
552 *ebx = vec[1];
553 if (ecx)
554 *ecx = vec[2];
555 if (edx)
556 *edx = vec[3];
559 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
561 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
562 * a substring. ex if !NULL points to the first char after a substring,
563 * otherwise the string is assumed to sized by a terminating nul.
564 * Return lexical ordering of *s1:*s2.
566 static int sstrcmp(const char *s1, const char *e1,
567 const char *s2, const char *e2)
569 for (;;) {
570 if (!*s1 || !*s2 || *s1 != *s2)
571 return (*s1 - *s2);
572 ++s1, ++s2;
573 if (s1 == e1 && s2 == e2)
574 return (0);
575 else if (s1 == e1)
576 return (*s2);
577 else if (s2 == e2)
578 return (*s1);
582 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
583 * '|' delimited (possibly empty) strings in which case search for a match
584 * within the alternatives proceeds left to right. Return 0 for success,
585 * non-zero otherwise.
587 static int altcmp(const char *s, const char *e, const char *altstr)
589 const char *p, *q;
591 for (q = p = altstr; ; ) {
592 while (*p && *p != '|')
593 ++p;
594 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
595 return (0);
596 if (!*p)
597 return (1);
598 else
599 q = ++p;
603 /* search featureset for flag *[s..e), if found set corresponding bit in
604 * *pval and return true, otherwise return false
606 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
607 const char **featureset)
609 uint32_t mask;
610 const char **ppc;
611 bool found = false;
613 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
614 if (*ppc && !altcmp(s, e, *ppc)) {
615 *pval |= mask;
616 found = true;
619 return found;
622 static void add_flagname_to_bitmaps(const char *flagname,
623 FeatureWordArray words,
624 Error **errp)
626 FeatureWord w;
627 for (w = 0; w < FEATURE_WORDS; w++) {
628 FeatureWordInfo *wi = &feature_word_info[w];
629 if (wi->feat_names &&
630 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
631 break;
634 if (w == FEATURE_WORDS) {
635 error_setg(errp, "CPU feature %s not found", flagname);
639 /* CPU class name definitions: */
641 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
642 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
644 /* Return type name for a given CPU model name
645 * Caller is responsible for freeing the returned string.
647 static char *x86_cpu_type_name(const char *model_name)
649 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
652 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
654 ObjectClass *oc;
655 char *typename;
657 if (cpu_model == NULL) {
658 return NULL;
661 typename = x86_cpu_type_name(cpu_model);
662 oc = object_class_by_name(typename);
663 g_free(typename);
664 return oc;
667 struct X86CPUDefinition {
668 const char *name;
669 uint32_t level;
670 uint32_t xlevel;
671 uint32_t xlevel2;
672 /* vendor is zero-terminated, 12 character ASCII string */
673 char vendor[CPUID_VENDOR_SZ + 1];
674 int family;
675 int model;
676 int stepping;
677 FeatureWordArray features;
678 char model_id[48];
681 static X86CPUDefinition builtin_x86_defs[] = {
683 .name = "qemu64",
684 .level = 0xd,
685 .vendor = CPUID_VENDOR_AMD,
686 .family = 6,
687 .model = 6,
688 .stepping = 3,
689 .features[FEAT_1_EDX] =
690 PPRO_FEATURES |
691 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
692 CPUID_PSE36,
693 .features[FEAT_1_ECX] =
694 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
695 .features[FEAT_8000_0001_EDX] =
696 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
697 .features[FEAT_8000_0001_ECX] =
698 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
699 .xlevel = 0x8000000A,
702 .name = "phenom",
703 .level = 5,
704 .vendor = CPUID_VENDOR_AMD,
705 .family = 16,
706 .model = 2,
707 .stepping = 3,
708 /* Missing: CPUID_HT */
709 .features[FEAT_1_EDX] =
710 PPRO_FEATURES |
711 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
712 CPUID_PSE36 | CPUID_VME,
713 .features[FEAT_1_ECX] =
714 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
715 CPUID_EXT_POPCNT,
716 .features[FEAT_8000_0001_EDX] =
717 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
718 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
719 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
720 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
721 CPUID_EXT3_CR8LEG,
722 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
723 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
724 .features[FEAT_8000_0001_ECX] =
725 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
726 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
727 /* Missing: CPUID_SVM_LBRV */
728 .features[FEAT_SVM] =
729 CPUID_SVM_NPT,
730 .xlevel = 0x8000001A,
731 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
734 .name = "core2duo",
735 .level = 10,
736 .vendor = CPUID_VENDOR_INTEL,
737 .family = 6,
738 .model = 15,
739 .stepping = 11,
740 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
741 .features[FEAT_1_EDX] =
742 PPRO_FEATURES |
743 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
744 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
745 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
746 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
747 .features[FEAT_1_ECX] =
748 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
749 CPUID_EXT_CX16,
750 .features[FEAT_8000_0001_EDX] =
751 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
752 .features[FEAT_8000_0001_ECX] =
753 CPUID_EXT3_LAHF_LM,
754 .xlevel = 0x80000008,
755 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
758 .name = "kvm64",
759 .level = 0xd,
760 .vendor = CPUID_VENDOR_INTEL,
761 .family = 15,
762 .model = 6,
763 .stepping = 1,
764 /* Missing: CPUID_HT */
765 .features[FEAT_1_EDX] =
766 PPRO_FEATURES | CPUID_VME |
767 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
768 CPUID_PSE36,
769 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
770 .features[FEAT_1_ECX] =
771 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
772 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
773 .features[FEAT_8000_0001_EDX] =
774 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
775 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
776 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
777 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
778 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
779 .features[FEAT_8000_0001_ECX] =
781 .xlevel = 0x80000008,
782 .model_id = "Common KVM processor"
785 .name = "qemu32",
786 .level = 4,
787 .vendor = CPUID_VENDOR_INTEL,
788 .family = 6,
789 .model = 6,
790 .stepping = 3,
791 .features[FEAT_1_EDX] =
792 PPRO_FEATURES,
793 .features[FEAT_1_ECX] =
794 CPUID_EXT_SSE3,
795 .xlevel = 0x80000004,
798 .name = "kvm32",
799 .level = 5,
800 .vendor = CPUID_VENDOR_INTEL,
801 .family = 15,
802 .model = 6,
803 .stepping = 1,
804 .features[FEAT_1_EDX] =
805 PPRO_FEATURES | CPUID_VME |
806 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
807 .features[FEAT_1_ECX] =
808 CPUID_EXT_SSE3,
809 .features[FEAT_8000_0001_ECX] =
811 .xlevel = 0x80000008,
812 .model_id = "Common 32-bit KVM processor"
815 .name = "coreduo",
816 .level = 10,
817 .vendor = CPUID_VENDOR_INTEL,
818 .family = 6,
819 .model = 14,
820 .stepping = 8,
821 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
822 .features[FEAT_1_EDX] =
823 PPRO_FEATURES | CPUID_VME |
824 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
825 CPUID_SS,
826 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
827 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
828 .features[FEAT_1_ECX] =
829 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
830 .features[FEAT_8000_0001_EDX] =
831 CPUID_EXT2_NX,
832 .xlevel = 0x80000008,
833 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
836 .name = "486",
837 .level = 1,
838 .vendor = CPUID_VENDOR_INTEL,
839 .family = 4,
840 .model = 8,
841 .stepping = 0,
842 .features[FEAT_1_EDX] =
843 I486_FEATURES,
844 .xlevel = 0,
847 .name = "pentium",
848 .level = 1,
849 .vendor = CPUID_VENDOR_INTEL,
850 .family = 5,
851 .model = 4,
852 .stepping = 3,
853 .features[FEAT_1_EDX] =
854 PENTIUM_FEATURES,
855 .xlevel = 0,
858 .name = "pentium2",
859 .level = 2,
860 .vendor = CPUID_VENDOR_INTEL,
861 .family = 6,
862 .model = 5,
863 .stepping = 2,
864 .features[FEAT_1_EDX] =
865 PENTIUM2_FEATURES,
866 .xlevel = 0,
869 .name = "pentium3",
870 .level = 3,
871 .vendor = CPUID_VENDOR_INTEL,
872 .family = 6,
873 .model = 7,
874 .stepping = 3,
875 .features[FEAT_1_EDX] =
876 PENTIUM3_FEATURES,
877 .xlevel = 0,
880 .name = "athlon",
881 .level = 2,
882 .vendor = CPUID_VENDOR_AMD,
883 .family = 6,
884 .model = 2,
885 .stepping = 3,
886 .features[FEAT_1_EDX] =
887 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
888 CPUID_MCA,
889 .features[FEAT_8000_0001_EDX] =
890 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
891 .xlevel = 0x80000008,
894 .name = "n270",
895 .level = 10,
896 .vendor = CPUID_VENDOR_INTEL,
897 .family = 6,
898 .model = 28,
899 .stepping = 2,
900 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
901 .features[FEAT_1_EDX] =
902 PPRO_FEATURES |
903 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
904 CPUID_ACPI | CPUID_SS,
905 /* Some CPUs got no CPUID_SEP */
906 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
907 * CPUID_EXT_XTPR */
908 .features[FEAT_1_ECX] =
909 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
910 CPUID_EXT_MOVBE,
911 .features[FEAT_8000_0001_EDX] =
912 CPUID_EXT2_NX,
913 .features[FEAT_8000_0001_ECX] =
914 CPUID_EXT3_LAHF_LM,
915 .xlevel = 0x80000008,
916 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
919 .name = "Conroe",
920 .level = 10,
921 .vendor = CPUID_VENDOR_INTEL,
922 .family = 6,
923 .model = 15,
924 .stepping = 3,
925 .features[FEAT_1_EDX] =
926 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
927 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
928 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
929 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
930 CPUID_DE | CPUID_FP87,
931 .features[FEAT_1_ECX] =
932 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
933 .features[FEAT_8000_0001_EDX] =
934 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
935 .features[FEAT_8000_0001_ECX] =
936 CPUID_EXT3_LAHF_LM,
937 .xlevel = 0x80000008,
938 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
941 .name = "Penryn",
942 .level = 10,
943 .vendor = CPUID_VENDOR_INTEL,
944 .family = 6,
945 .model = 23,
946 .stepping = 3,
947 .features[FEAT_1_EDX] =
948 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
949 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
950 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
951 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
952 CPUID_DE | CPUID_FP87,
953 .features[FEAT_1_ECX] =
954 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
955 CPUID_EXT_SSE3,
956 .features[FEAT_8000_0001_EDX] =
957 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
958 .features[FEAT_8000_0001_ECX] =
959 CPUID_EXT3_LAHF_LM,
960 .xlevel = 0x80000008,
961 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
964 .name = "Nehalem",
965 .level = 11,
966 .vendor = CPUID_VENDOR_INTEL,
967 .family = 6,
968 .model = 26,
969 .stepping = 3,
970 .features[FEAT_1_EDX] =
971 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
972 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
973 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
974 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
975 CPUID_DE | CPUID_FP87,
976 .features[FEAT_1_ECX] =
977 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
978 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
979 .features[FEAT_8000_0001_EDX] =
980 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
981 .features[FEAT_8000_0001_ECX] =
982 CPUID_EXT3_LAHF_LM,
983 .xlevel = 0x80000008,
984 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
987 .name = "Westmere",
988 .level = 11,
989 .vendor = CPUID_VENDOR_INTEL,
990 .family = 6,
991 .model = 44,
992 .stepping = 1,
993 .features[FEAT_1_EDX] =
994 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
995 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
996 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
997 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
998 CPUID_DE | CPUID_FP87,
999 .features[FEAT_1_ECX] =
1000 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1001 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1002 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1003 .features[FEAT_8000_0001_EDX] =
1004 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1005 .features[FEAT_8000_0001_ECX] =
1006 CPUID_EXT3_LAHF_LM,
1007 .features[FEAT_6_EAX] =
1008 CPUID_6_EAX_ARAT,
1009 .xlevel = 0x80000008,
1010 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1013 .name = "SandyBridge",
1014 .level = 0xd,
1015 .vendor = CPUID_VENDOR_INTEL,
1016 .family = 6,
1017 .model = 42,
1018 .stepping = 1,
1019 .features[FEAT_1_EDX] =
1020 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1021 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1022 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1023 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1024 CPUID_DE | CPUID_FP87,
1025 .features[FEAT_1_ECX] =
1026 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1027 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1028 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1029 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1030 CPUID_EXT_SSE3,
1031 .features[FEAT_8000_0001_EDX] =
1032 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1033 CPUID_EXT2_SYSCALL,
1034 .features[FEAT_8000_0001_ECX] =
1035 CPUID_EXT3_LAHF_LM,
1036 .features[FEAT_XSAVE] =
1037 CPUID_XSAVE_XSAVEOPT,
1038 .features[FEAT_6_EAX] =
1039 CPUID_6_EAX_ARAT,
1040 .xlevel = 0x80000008,
1041 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1044 .name = "IvyBridge",
1045 .level = 0xd,
1046 .vendor = CPUID_VENDOR_INTEL,
1047 .family = 6,
1048 .model = 58,
1049 .stepping = 9,
1050 .features[FEAT_1_EDX] =
1051 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1052 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1053 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1054 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1055 CPUID_DE | CPUID_FP87,
1056 .features[FEAT_1_ECX] =
1057 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1058 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1059 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1060 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1061 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1062 .features[FEAT_7_0_EBX] =
1063 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1064 CPUID_7_0_EBX_ERMS,
1065 .features[FEAT_8000_0001_EDX] =
1066 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1067 CPUID_EXT2_SYSCALL,
1068 .features[FEAT_8000_0001_ECX] =
1069 CPUID_EXT3_LAHF_LM,
1070 .features[FEAT_XSAVE] =
1071 CPUID_XSAVE_XSAVEOPT,
1072 .features[FEAT_6_EAX] =
1073 CPUID_6_EAX_ARAT,
1074 .xlevel = 0x80000008,
1075 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1078 .name = "Haswell-noTSX",
1079 .level = 0xd,
1080 .vendor = CPUID_VENDOR_INTEL,
1081 .family = 6,
1082 .model = 60,
1083 .stepping = 1,
1084 .features[FEAT_1_EDX] =
1085 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1086 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1087 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1088 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1089 CPUID_DE | CPUID_FP87,
1090 .features[FEAT_1_ECX] =
1091 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1092 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1093 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1094 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1095 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1096 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1097 .features[FEAT_8000_0001_EDX] =
1098 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1099 CPUID_EXT2_SYSCALL,
1100 .features[FEAT_8000_0001_ECX] =
1101 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1102 .features[FEAT_7_0_EBX] =
1103 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1104 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1105 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1106 .features[FEAT_XSAVE] =
1107 CPUID_XSAVE_XSAVEOPT,
1108 .features[FEAT_6_EAX] =
1109 CPUID_6_EAX_ARAT,
1110 .xlevel = 0x80000008,
1111 .model_id = "Intel Core Processor (Haswell, no TSX)",
1112 }, {
1113 .name = "Haswell",
1114 .level = 0xd,
1115 .vendor = CPUID_VENDOR_INTEL,
1116 .family = 6,
1117 .model = 60,
1118 .stepping = 1,
1119 .features[FEAT_1_EDX] =
1120 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1121 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1122 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1123 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1124 CPUID_DE | CPUID_FP87,
1125 .features[FEAT_1_ECX] =
1126 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1127 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1128 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1129 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1130 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1131 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1132 .features[FEAT_8000_0001_EDX] =
1133 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1134 CPUID_EXT2_SYSCALL,
1135 .features[FEAT_8000_0001_ECX] =
1136 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1137 .features[FEAT_7_0_EBX] =
1138 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1139 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1140 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1141 CPUID_7_0_EBX_RTM,
1142 .features[FEAT_XSAVE] =
1143 CPUID_XSAVE_XSAVEOPT,
1144 .features[FEAT_6_EAX] =
1145 CPUID_6_EAX_ARAT,
1146 .xlevel = 0x80000008,
1147 .model_id = "Intel Core Processor (Haswell)",
1150 .name = "Broadwell-noTSX",
1151 .level = 0xd,
1152 .vendor = CPUID_VENDOR_INTEL,
1153 .family = 6,
1154 .model = 61,
1155 .stepping = 2,
1156 .features[FEAT_1_EDX] =
1157 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1158 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1159 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1160 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1161 CPUID_DE | CPUID_FP87,
1162 .features[FEAT_1_ECX] =
1163 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1164 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1165 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1166 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1167 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1168 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1169 .features[FEAT_8000_0001_EDX] =
1170 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1171 CPUID_EXT2_SYSCALL,
1172 .features[FEAT_8000_0001_ECX] =
1173 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1174 .features[FEAT_7_0_EBX] =
1175 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1176 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1177 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1178 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1179 CPUID_7_0_EBX_SMAP,
1180 .features[FEAT_XSAVE] =
1181 CPUID_XSAVE_XSAVEOPT,
1182 .features[FEAT_6_EAX] =
1183 CPUID_6_EAX_ARAT,
1184 .xlevel = 0x80000008,
1185 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1188 .name = "Broadwell",
1189 .level = 0xd,
1190 .vendor = CPUID_VENDOR_INTEL,
1191 .family = 6,
1192 .model = 61,
1193 .stepping = 2,
1194 .features[FEAT_1_EDX] =
1195 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1196 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1197 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1198 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1199 CPUID_DE | CPUID_FP87,
1200 .features[FEAT_1_ECX] =
1201 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1202 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1203 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1204 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1205 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1206 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1207 .features[FEAT_8000_0001_EDX] =
1208 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1209 CPUID_EXT2_SYSCALL,
1210 .features[FEAT_8000_0001_ECX] =
1211 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1212 .features[FEAT_7_0_EBX] =
1213 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1214 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1215 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1216 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1217 CPUID_7_0_EBX_SMAP,
1218 .features[FEAT_XSAVE] =
1219 CPUID_XSAVE_XSAVEOPT,
1220 .features[FEAT_6_EAX] =
1221 CPUID_6_EAX_ARAT,
1222 .xlevel = 0x80000008,
1223 .model_id = "Intel Core Processor (Broadwell)",
1226 .name = "Opteron_G1",
1227 .level = 5,
1228 .vendor = CPUID_VENDOR_AMD,
1229 .family = 15,
1230 .model = 6,
1231 .stepping = 1,
1232 .features[FEAT_1_EDX] =
1233 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1234 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1235 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1236 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1237 CPUID_DE | CPUID_FP87,
1238 .features[FEAT_1_ECX] =
1239 CPUID_EXT_SSE3,
1240 .features[FEAT_8000_0001_EDX] =
1241 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1242 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1243 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1244 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1245 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1246 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1247 .xlevel = 0x80000008,
1248 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1251 .name = "Opteron_G2",
1252 .level = 5,
1253 .vendor = CPUID_VENDOR_AMD,
1254 .family = 15,
1255 .model = 6,
1256 .stepping = 1,
1257 .features[FEAT_1_EDX] =
1258 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1259 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1260 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1261 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1262 CPUID_DE | CPUID_FP87,
1263 .features[FEAT_1_ECX] =
1264 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1265 /* Missing: CPUID_EXT2_RDTSCP */
1266 .features[FEAT_8000_0001_EDX] =
1267 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1268 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1269 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1270 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1271 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1272 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1273 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1274 .features[FEAT_8000_0001_ECX] =
1275 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1276 .xlevel = 0x80000008,
1277 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1280 .name = "Opteron_G3",
1281 .level = 5,
1282 .vendor = CPUID_VENDOR_AMD,
1283 .family = 15,
1284 .model = 6,
1285 .stepping = 1,
1286 .features[FEAT_1_EDX] =
1287 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1288 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1289 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1290 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1291 CPUID_DE | CPUID_FP87,
1292 .features[FEAT_1_ECX] =
1293 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1294 CPUID_EXT_SSE3,
1295 /* Missing: CPUID_EXT2_RDTSCP */
1296 .features[FEAT_8000_0001_EDX] =
1297 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1298 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1299 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1300 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1301 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1302 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1303 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1304 .features[FEAT_8000_0001_ECX] =
1305 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1306 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1307 .xlevel = 0x80000008,
1308 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1311 .name = "Opteron_G4",
1312 .level = 0xd,
1313 .vendor = CPUID_VENDOR_AMD,
1314 .family = 21,
1315 .model = 1,
1316 .stepping = 2,
1317 .features[FEAT_1_EDX] =
1318 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1319 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1320 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1321 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1322 CPUID_DE | CPUID_FP87,
1323 .features[FEAT_1_ECX] =
1324 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1325 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1326 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1327 CPUID_EXT_SSE3,
1328 /* Missing: CPUID_EXT2_RDTSCP */
1329 .features[FEAT_8000_0001_EDX] =
1330 CPUID_EXT2_LM |
1331 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1332 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1333 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1334 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1335 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1336 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1337 .features[FEAT_8000_0001_ECX] =
1338 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1339 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1340 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1341 CPUID_EXT3_LAHF_LM,
1342 /* no xsaveopt! */
1343 .xlevel = 0x8000001A,
1344 .model_id = "AMD Opteron 62xx class CPU",
1347 .name = "Opteron_G5",
1348 .level = 0xd,
1349 .vendor = CPUID_VENDOR_AMD,
1350 .family = 21,
1351 .model = 2,
1352 .stepping = 0,
1353 .features[FEAT_1_EDX] =
1354 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1355 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1356 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1357 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1358 CPUID_DE | CPUID_FP87,
1359 .features[FEAT_1_ECX] =
1360 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1361 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1362 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1363 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1364 /* Missing: CPUID_EXT2_RDTSCP */
1365 .features[FEAT_8000_0001_EDX] =
1366 CPUID_EXT2_LM |
1367 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1368 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1369 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1370 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1371 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1372 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1373 .features[FEAT_8000_0001_ECX] =
1374 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1375 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1376 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1377 CPUID_EXT3_LAHF_LM,
1378 /* no xsaveopt! */
1379 .xlevel = 0x8000001A,
1380 .model_id = "AMD Opteron 63xx class CPU",
1384 typedef struct PropValue {
1385 const char *prop, *value;
1386 } PropValue;
1388 /* KVM-specific features that are automatically added/removed
1389 * from all CPU models when KVM is enabled.
1391 static PropValue kvm_default_props[] = {
1392 { "kvmclock", "on" },
1393 { "kvm-nopiodelay", "on" },
1394 { "kvm-asyncpf", "on" },
1395 { "kvm-steal-time", "on" },
1396 { "kvm-pv-eoi", "on" },
1397 { "kvmclock-stable-bit", "on" },
1398 { "x2apic", "on" },
1399 { "acpi", "off" },
1400 { "monitor", "off" },
1401 { "svm", "off" },
1402 { NULL, NULL },
1405 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1407 PropValue *pv;
1408 for (pv = kvm_default_props; pv->prop; pv++) {
1409 if (!strcmp(pv->prop, prop)) {
1410 pv->value = value;
1411 break;
1415 /* It is valid to call this function only for properties that
1416 * are already present in the kvm_default_props table.
1418 assert(pv->prop);
1421 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1422 bool migratable_only);
1424 #ifdef CONFIG_KVM
1426 static int cpu_x86_fill_model_id(char *str)
1428 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1429 int i;
1431 for (i = 0; i < 3; i++) {
1432 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1433 memcpy(str + i * 16 + 0, &eax, 4);
1434 memcpy(str + i * 16 + 4, &ebx, 4);
1435 memcpy(str + i * 16 + 8, &ecx, 4);
1436 memcpy(str + i * 16 + 12, &edx, 4);
1438 return 0;
1441 static X86CPUDefinition host_cpudef;
1443 static Property host_x86_cpu_properties[] = {
1444 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1445 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1446 DEFINE_PROP_END_OF_LIST()
1449 /* class_init for the "host" CPU model
1451 * This function may be called before KVM is initialized.
1453 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1455 DeviceClass *dc = DEVICE_CLASS(oc);
1456 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1457 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1459 xcc->kvm_required = true;
1461 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1462 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1464 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1465 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1466 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1467 host_cpudef.stepping = eax & 0x0F;
1469 cpu_x86_fill_model_id(host_cpudef.model_id);
1471 xcc->cpu_def = &host_cpudef;
1473 /* level, xlevel, xlevel2, and the feature words are initialized on
1474 * instance_init, because they require KVM to be initialized.
1477 dc->props = host_x86_cpu_properties;
1478 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1479 dc->cannot_destroy_with_object_finalize_yet = true;
1482 static void host_x86_cpu_initfn(Object *obj)
1484 X86CPU *cpu = X86_CPU(obj);
1485 CPUX86State *env = &cpu->env;
1486 KVMState *s = kvm_state;
1488 assert(kvm_enabled());
1490 /* We can't fill the features array here because we don't know yet if
1491 * "migratable" is true or false.
1493 cpu->host_features = true;
1495 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1496 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1497 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1499 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1502 static const TypeInfo host_x86_cpu_type_info = {
1503 .name = X86_CPU_TYPE_NAME("host"),
1504 .parent = TYPE_X86_CPU,
1505 .instance_init = host_x86_cpu_initfn,
1506 .class_init = host_x86_cpu_class_init,
1509 #endif
1511 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1513 FeatureWordInfo *f = &feature_word_info[w];
1514 int i;
1516 for (i = 0; i < 32; ++i) {
1517 if ((1UL << i) & mask) {
1518 const char *reg = get_register_name_32(f->cpuid_reg);
1519 assert(reg);
1520 fprintf(stderr, "warning: %s doesn't support requested feature: "
1521 "CPUID.%02XH:%s%s%s [bit %d]\n",
1522 kvm_enabled() ? "host" : "TCG",
1523 f->cpuid_eax, reg,
1524 f->feat_names[i] ? "." : "",
1525 f->feat_names[i] ? f->feat_names[i] : "", i);
1530 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1531 const char *name, void *opaque,
1532 Error **errp)
1534 X86CPU *cpu = X86_CPU(obj);
1535 CPUX86State *env = &cpu->env;
1536 int64_t value;
1538 value = (env->cpuid_version >> 8) & 0xf;
1539 if (value == 0xf) {
1540 value += (env->cpuid_version >> 20) & 0xff;
1542 visit_type_int(v, name, &value, errp);
1545 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1546 const char *name, void *opaque,
1547 Error **errp)
1549 X86CPU *cpu = X86_CPU(obj);
1550 CPUX86State *env = &cpu->env;
1551 const int64_t min = 0;
1552 const int64_t max = 0xff + 0xf;
1553 Error *local_err = NULL;
1554 int64_t value;
1556 visit_type_int(v, name, &value, &local_err);
1557 if (local_err) {
1558 error_propagate(errp, local_err);
1559 return;
1561 if (value < min || value > max) {
1562 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1563 name ? name : "null", value, min, max);
1564 return;
1567 env->cpuid_version &= ~0xff00f00;
1568 if (value > 0x0f) {
1569 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1570 } else {
1571 env->cpuid_version |= value << 8;
1575 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1576 const char *name, void *opaque,
1577 Error **errp)
1579 X86CPU *cpu = X86_CPU(obj);
1580 CPUX86State *env = &cpu->env;
1581 int64_t value;
1583 value = (env->cpuid_version >> 4) & 0xf;
1584 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1585 visit_type_int(v, name, &value, errp);
1588 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1589 const char *name, void *opaque,
1590 Error **errp)
1592 X86CPU *cpu = X86_CPU(obj);
1593 CPUX86State *env = &cpu->env;
1594 const int64_t min = 0;
1595 const int64_t max = 0xff;
1596 Error *local_err = NULL;
1597 int64_t value;
1599 visit_type_int(v, name, &value, &local_err);
1600 if (local_err) {
1601 error_propagate(errp, local_err);
1602 return;
1604 if (value < min || value > max) {
1605 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1606 name ? name : "null", value, min, max);
1607 return;
1610 env->cpuid_version &= ~0xf00f0;
1611 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1614 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1615 const char *name, void *opaque,
1616 Error **errp)
1618 X86CPU *cpu = X86_CPU(obj);
1619 CPUX86State *env = &cpu->env;
1620 int64_t value;
1622 value = env->cpuid_version & 0xf;
1623 visit_type_int(v, name, &value, errp);
1626 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1627 const char *name, void *opaque,
1628 Error **errp)
1630 X86CPU *cpu = X86_CPU(obj);
1631 CPUX86State *env = &cpu->env;
1632 const int64_t min = 0;
1633 const int64_t max = 0xf;
1634 Error *local_err = NULL;
1635 int64_t value;
1637 visit_type_int(v, name, &value, &local_err);
1638 if (local_err) {
1639 error_propagate(errp, local_err);
1640 return;
1642 if (value < min || value > max) {
1643 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1644 name ? name : "null", value, min, max);
1645 return;
1648 env->cpuid_version &= ~0xf;
1649 env->cpuid_version |= value & 0xf;
1652 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1654 X86CPU *cpu = X86_CPU(obj);
1655 CPUX86State *env = &cpu->env;
1656 char *value;
1658 value = g_malloc(CPUID_VENDOR_SZ + 1);
1659 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1660 env->cpuid_vendor3);
1661 return value;
1664 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1665 Error **errp)
1667 X86CPU *cpu = X86_CPU(obj);
1668 CPUX86State *env = &cpu->env;
1669 int i;
1671 if (strlen(value) != CPUID_VENDOR_SZ) {
1672 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1673 return;
1676 env->cpuid_vendor1 = 0;
1677 env->cpuid_vendor2 = 0;
1678 env->cpuid_vendor3 = 0;
1679 for (i = 0; i < 4; i++) {
1680 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1681 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1682 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1686 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1688 X86CPU *cpu = X86_CPU(obj);
1689 CPUX86State *env = &cpu->env;
1690 char *value;
1691 int i;
1693 value = g_malloc(48 + 1);
1694 for (i = 0; i < 48; i++) {
1695 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1697 value[48] = '\0';
1698 return value;
1701 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1702 Error **errp)
1704 X86CPU *cpu = X86_CPU(obj);
1705 CPUX86State *env = &cpu->env;
1706 int c, len, i;
1708 if (model_id == NULL) {
1709 model_id = "";
1711 len = strlen(model_id);
1712 memset(env->cpuid_model, 0, 48);
1713 for (i = 0; i < 48; i++) {
1714 if (i >= len) {
1715 c = '\0';
1716 } else {
1717 c = (uint8_t)model_id[i];
1719 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1723 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1724 void *opaque, Error **errp)
1726 X86CPU *cpu = X86_CPU(obj);
1727 int64_t value;
1729 value = cpu->env.tsc_khz * 1000;
1730 visit_type_int(v, name, &value, errp);
1733 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1734 void *opaque, Error **errp)
1736 X86CPU *cpu = X86_CPU(obj);
1737 const int64_t min = 0;
1738 const int64_t max = INT64_MAX;
1739 Error *local_err = NULL;
1740 int64_t value;
1742 visit_type_int(v, name, &value, &local_err);
1743 if (local_err) {
1744 error_propagate(errp, local_err);
1745 return;
1747 if (value < min || value > max) {
1748 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1749 name ? name : "null", value, min, max);
1750 return;
1753 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1756 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1757 void *opaque, Error **errp)
1759 X86CPU *cpu = X86_CPU(obj);
1760 int64_t value = cpu->apic_id;
1762 visit_type_int(v, name, &value, errp);
1765 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1766 void *opaque, Error **errp)
1768 X86CPU *cpu = X86_CPU(obj);
1769 DeviceState *dev = DEVICE(obj);
1770 const int64_t min = 0;
1771 const int64_t max = UINT32_MAX;
1772 Error *error = NULL;
1773 int64_t value;
1775 if (dev->realized) {
1776 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1777 "it was realized", name, object_get_typename(obj));
1778 return;
1781 visit_type_int(v, name, &value, &error);
1782 if (error) {
1783 error_propagate(errp, error);
1784 return;
1786 if (value < min || value > max) {
1787 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1788 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1789 object_get_typename(obj), name, value, min, max);
1790 return;
1793 if ((value != cpu->apic_id) && cpu_exists(value)) {
1794 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1795 return;
1797 cpu->apic_id = value;
1800 /* Generic getter for "feature-words" and "filtered-features" properties */
1801 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1802 const char *name, void *opaque,
1803 Error **errp)
1805 uint32_t *array = (uint32_t *)opaque;
1806 FeatureWord w;
1807 Error *err = NULL;
1808 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1809 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1810 X86CPUFeatureWordInfoList *list = NULL;
1812 for (w = 0; w < FEATURE_WORDS; w++) {
1813 FeatureWordInfo *wi = &feature_word_info[w];
1814 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1815 qwi->cpuid_input_eax = wi->cpuid_eax;
1816 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1817 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1818 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1819 qwi->features = array[w];
1821 /* List will be in reverse order, but order shouldn't matter */
1822 list_entries[w].next = list;
1823 list_entries[w].value = &word_infos[w];
1824 list = &list_entries[w];
1827 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1828 error_propagate(errp, err);
1831 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1832 void *opaque, Error **errp)
1834 X86CPU *cpu = X86_CPU(obj);
1835 int64_t value = cpu->hyperv_spinlock_attempts;
1837 visit_type_int(v, name, &value, errp);
1840 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1841 void *opaque, Error **errp)
1843 const int64_t min = 0xFFF;
1844 const int64_t max = UINT_MAX;
1845 X86CPU *cpu = X86_CPU(obj);
1846 Error *err = NULL;
1847 int64_t value;
1849 visit_type_int(v, name, &value, &err);
1850 if (err) {
1851 error_propagate(errp, err);
1852 return;
1855 if (value < min || value > max) {
1856 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1857 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1858 object_get_typename(obj), name ? name : "null",
1859 value, min, max);
1860 return;
1862 cpu->hyperv_spinlock_attempts = value;
1865 static PropertyInfo qdev_prop_spinlocks = {
1866 .name = "int",
1867 .get = x86_get_hv_spinlocks,
1868 .set = x86_set_hv_spinlocks,
1871 /* Convert all '_' in a feature string option name to '-', to make feature
1872 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1874 static inline void feat2prop(char *s)
1876 while ((s = strchr(s, '_'))) {
1877 *s = '-';
1881 /* Parse "+feature,-feature,feature=foo" CPU feature string
1883 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1884 Error **errp)
1886 X86CPU *cpu = X86_CPU(cs);
1887 char *featurestr; /* Single 'key=value" string being parsed */
1888 FeatureWord w;
1889 /* Features to be added */
1890 FeatureWordArray plus_features = { 0 };
1891 /* Features to be removed */
1892 FeatureWordArray minus_features = { 0 };
1893 uint32_t numvalue;
1894 CPUX86State *env = &cpu->env;
1895 Error *local_err = NULL;
1897 featurestr = features ? strtok(features, ",") : NULL;
1899 while (featurestr) {
1900 char *val;
1901 if (featurestr[0] == '+') {
1902 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1903 } else if (featurestr[0] == '-') {
1904 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1905 } else if ((val = strchr(featurestr, '='))) {
1906 *val = 0; val++;
1907 feat2prop(featurestr);
1908 if (!strcmp(featurestr, "xlevel")) {
1909 char *err;
1910 char num[32];
1912 numvalue = strtoul(val, &err, 0);
1913 if (!*val || *err) {
1914 error_setg(errp, "bad numerical value %s", val);
1915 return;
1917 if (numvalue < 0x80000000) {
1918 error_report("xlevel value shall always be >= 0x80000000"
1919 ", fixup will be removed in future versions");
1920 numvalue += 0x80000000;
1922 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1923 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1924 } else if (!strcmp(featurestr, "tsc-freq")) {
1925 int64_t tsc_freq;
1926 char *err;
1927 char num[32];
1929 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1930 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1931 if (tsc_freq < 0 || *err) {
1932 error_setg(errp, "bad numerical value %s", val);
1933 return;
1935 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1936 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1937 &local_err);
1938 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1939 char *err;
1940 const int min = 0xFFF;
1941 char num[32];
1942 numvalue = strtoul(val, &err, 0);
1943 if (!*val || *err) {
1944 error_setg(errp, "bad numerical value %s", val);
1945 return;
1947 if (numvalue < min) {
1948 error_report("hv-spinlocks value shall always be >= 0x%x"
1949 ", fixup will be removed in future versions",
1950 min);
1951 numvalue = min;
1953 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1954 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1955 } else {
1956 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1958 } else {
1959 feat2prop(featurestr);
1960 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1962 if (local_err) {
1963 error_propagate(errp, local_err);
1964 return;
1966 featurestr = strtok(NULL, ",");
1969 if (cpu->host_features) {
1970 for (w = 0; w < FEATURE_WORDS; w++) {
1971 env->features[w] =
1972 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1976 for (w = 0; w < FEATURE_WORDS; w++) {
1977 env->features[w] |= plus_features[w];
1978 env->features[w] &= ~minus_features[w];
1982 /* Print all cpuid feature names in featureset
1984 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1986 int bit;
1987 bool first = true;
1989 for (bit = 0; bit < 32; bit++) {
1990 if (featureset[bit]) {
1991 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1992 first = false;
1997 /* generate CPU information. */
1998 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2000 X86CPUDefinition *def;
2001 char buf[256];
2002 int i;
2004 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2005 def = &builtin_x86_defs[i];
2006 snprintf(buf, sizeof(buf), "%s", def->name);
2007 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2009 #ifdef CONFIG_KVM
2010 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2011 "KVM processor with all supported host features "
2012 "(only available in KVM mode)");
2013 #endif
2015 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2016 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2017 FeatureWordInfo *fw = &feature_word_info[i];
2019 (*cpu_fprintf)(f, " ");
2020 listflags(f, cpu_fprintf, fw->feat_names);
2021 (*cpu_fprintf)(f, "\n");
2025 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2027 CpuDefinitionInfoList *cpu_list = NULL;
2028 X86CPUDefinition *def;
2029 int i;
2031 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2032 CpuDefinitionInfoList *entry;
2033 CpuDefinitionInfo *info;
2035 def = &builtin_x86_defs[i];
2036 info = g_malloc0(sizeof(*info));
2037 info->name = g_strdup(def->name);
2039 entry = g_malloc0(sizeof(*entry));
2040 entry->value = info;
2041 entry->next = cpu_list;
2042 cpu_list = entry;
2045 return cpu_list;
2048 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2049 bool migratable_only)
2051 FeatureWordInfo *wi = &feature_word_info[w];
2052 uint32_t r;
2054 if (kvm_enabled()) {
2055 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2056 wi->cpuid_ecx,
2057 wi->cpuid_reg);
2058 } else if (tcg_enabled()) {
2059 r = wi->tcg_features;
2060 } else {
2061 return ~0;
2063 if (migratable_only) {
2064 r &= x86_cpu_get_migratable_flags(w);
2066 return r;
2070 * Filters CPU feature words based on host availability of each feature.
2072 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2074 static int x86_cpu_filter_features(X86CPU *cpu)
2076 CPUX86State *env = &cpu->env;
2077 FeatureWord w;
2078 int rv = 0;
2080 for (w = 0; w < FEATURE_WORDS; w++) {
2081 uint32_t host_feat =
2082 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2083 uint32_t requested_features = env->features[w];
2084 env->features[w] &= host_feat;
2085 cpu->filtered_features[w] = requested_features & ~env->features[w];
2086 if (cpu->filtered_features[w]) {
2087 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2088 report_unavailable_features(w, cpu->filtered_features[w]);
2090 rv = 1;
2094 return rv;
2097 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2099 PropValue *pv;
2100 for (pv = props; pv->prop; pv++) {
2101 if (!pv->value) {
2102 continue;
2104 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2105 &error_abort);
2109 /* Load data from X86CPUDefinition
2111 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2113 CPUX86State *env = &cpu->env;
2114 const char *vendor;
2115 char host_vendor[CPUID_VENDOR_SZ + 1];
2116 FeatureWord w;
2118 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2119 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2120 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2121 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2122 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2123 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2124 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2125 for (w = 0; w < FEATURE_WORDS; w++) {
2126 env->features[w] = def->features[w];
2129 /* Special cases not set in the X86CPUDefinition structs: */
2130 if (kvm_enabled()) {
2131 x86_cpu_apply_props(cpu, kvm_default_props);
2134 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2136 /* sysenter isn't supported in compatibility mode on AMD,
2137 * syscall isn't supported in compatibility mode on Intel.
2138 * Normally we advertise the actual CPU vendor, but you can
2139 * override this using the 'vendor' property if you want to use
2140 * KVM's sysenter/syscall emulation in compatibility mode and
2141 * when doing cross vendor migration
2143 vendor = def->vendor;
2144 if (kvm_enabled()) {
2145 uint32_t ebx = 0, ecx = 0, edx = 0;
2146 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2147 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2148 vendor = host_vendor;
2151 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2155 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2157 X86CPU *cpu = NULL;
2158 X86CPUClass *xcc;
2159 ObjectClass *oc;
2160 gchar **model_pieces;
2161 char *name, *features;
2162 Error *error = NULL;
2164 model_pieces = g_strsplit(cpu_model, ",", 2);
2165 if (!model_pieces[0]) {
2166 error_setg(&error, "Invalid/empty CPU model name");
2167 goto out;
2169 name = model_pieces[0];
2170 features = model_pieces[1];
2172 oc = x86_cpu_class_by_name(name);
2173 if (oc == NULL) {
2174 error_setg(&error, "Unable to find CPU definition: %s", name);
2175 goto out;
2177 xcc = X86_CPU_CLASS(oc);
2179 if (xcc->kvm_required && !kvm_enabled()) {
2180 error_setg(&error, "CPU model '%s' requires KVM", name);
2181 goto out;
2184 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2186 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2187 if (error) {
2188 goto out;
2191 out:
2192 if (error != NULL) {
2193 error_propagate(errp, error);
2194 if (cpu) {
2195 object_unref(OBJECT(cpu));
2196 cpu = NULL;
2199 g_strfreev(model_pieces);
2200 return cpu;
2203 X86CPU *cpu_x86_init(const char *cpu_model)
2205 Error *error = NULL;
2206 X86CPU *cpu;
2208 cpu = cpu_x86_create(cpu_model, &error);
2209 if (error) {
2210 goto out;
2213 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2215 out:
2216 if (error) {
2217 error_report_err(error);
2218 if (cpu != NULL) {
2219 object_unref(OBJECT(cpu));
2220 cpu = NULL;
2223 return cpu;
2226 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2228 X86CPUDefinition *cpudef = data;
2229 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2231 xcc->cpu_def = cpudef;
2234 static void x86_register_cpudef_type(X86CPUDefinition *def)
2236 char *typename = x86_cpu_type_name(def->name);
2237 TypeInfo ti = {
2238 .name = typename,
2239 .parent = TYPE_X86_CPU,
2240 .class_init = x86_cpu_cpudef_class_init,
2241 .class_data = def,
2244 type_register(&ti);
2245 g_free(typename);
2248 #if !defined(CONFIG_USER_ONLY)
2250 void cpu_clear_apic_feature(CPUX86State *env)
2252 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2255 #endif /* !CONFIG_USER_ONLY */
2257 /* Initialize list of CPU models, filling some non-static fields if necessary
2259 void x86_cpudef_setup(void)
2261 int i, j;
2262 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2264 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2265 X86CPUDefinition *def = &builtin_x86_defs[i];
2267 /* Look for specific "cpudef" models that */
2268 /* have the QEMU version in .model_id */
2269 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2270 if (strcmp(model_with_versions[j], def->name) == 0) {
2271 pstrcpy(def->model_id, sizeof(def->model_id),
2272 "QEMU Virtual CPU version ");
2273 pstrcat(def->model_id, sizeof(def->model_id),
2274 qemu_hw_version());
2275 break;
2281 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2282 uint32_t *eax, uint32_t *ebx,
2283 uint32_t *ecx, uint32_t *edx)
2285 X86CPU *cpu = x86_env_get_cpu(env);
2286 CPUState *cs = CPU(cpu);
2288 /* test if maximum index reached */
2289 if (index & 0x80000000) {
2290 if (index > env->cpuid_xlevel) {
2291 if (env->cpuid_xlevel2 > 0) {
2292 /* Handle the Centaur's CPUID instruction. */
2293 if (index > env->cpuid_xlevel2) {
2294 index = env->cpuid_xlevel2;
2295 } else if (index < 0xC0000000) {
2296 index = env->cpuid_xlevel;
2298 } else {
2299 /* Intel documentation states that invalid EAX input will
2300 * return the same information as EAX=cpuid_level
2301 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2303 index = env->cpuid_level;
2306 } else {
2307 if (index > env->cpuid_level)
2308 index = env->cpuid_level;
2311 switch(index) {
2312 case 0:
2313 *eax = env->cpuid_level;
2314 *ebx = env->cpuid_vendor1;
2315 *edx = env->cpuid_vendor2;
2316 *ecx = env->cpuid_vendor3;
2317 break;
2318 case 1:
2319 *eax = env->cpuid_version;
2320 *ebx = (cpu->apic_id << 24) |
2321 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2322 *ecx = env->features[FEAT_1_ECX];
2323 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2324 *ecx |= CPUID_EXT_OSXSAVE;
2326 *edx = env->features[FEAT_1_EDX];
2327 if (cs->nr_cores * cs->nr_threads > 1) {
2328 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2329 *edx |= CPUID_HT;
2331 break;
2332 case 2:
2333 /* cache info: needed for Pentium Pro compatibility */
2334 if (cpu->cache_info_passthrough) {
2335 host_cpuid(index, 0, eax, ebx, ecx, edx);
2336 break;
2338 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2339 *ebx = 0;
2340 *ecx = 0;
2341 *edx = (L1D_DESCRIPTOR << 16) | \
2342 (L1I_DESCRIPTOR << 8) | \
2343 (L2_DESCRIPTOR);
2344 break;
2345 case 4:
2346 /* cache info: needed for Core compatibility */
2347 if (cpu->cache_info_passthrough) {
2348 host_cpuid(index, count, eax, ebx, ecx, edx);
2349 *eax &= ~0xFC000000;
2350 } else {
2351 *eax = 0;
2352 switch (count) {
2353 case 0: /* L1 dcache info */
2354 *eax |= CPUID_4_TYPE_DCACHE | \
2355 CPUID_4_LEVEL(1) | \
2356 CPUID_4_SELF_INIT_LEVEL;
2357 *ebx = (L1D_LINE_SIZE - 1) | \
2358 ((L1D_PARTITIONS - 1) << 12) | \
2359 ((L1D_ASSOCIATIVITY - 1) << 22);
2360 *ecx = L1D_SETS - 1;
2361 *edx = CPUID_4_NO_INVD_SHARING;
2362 break;
2363 case 1: /* L1 icache info */
2364 *eax |= CPUID_4_TYPE_ICACHE | \
2365 CPUID_4_LEVEL(1) | \
2366 CPUID_4_SELF_INIT_LEVEL;
2367 *ebx = (L1I_LINE_SIZE - 1) | \
2368 ((L1I_PARTITIONS - 1) << 12) | \
2369 ((L1I_ASSOCIATIVITY - 1) << 22);
2370 *ecx = L1I_SETS - 1;
2371 *edx = CPUID_4_NO_INVD_SHARING;
2372 break;
2373 case 2: /* L2 cache info */
2374 *eax |= CPUID_4_TYPE_UNIFIED | \
2375 CPUID_4_LEVEL(2) | \
2376 CPUID_4_SELF_INIT_LEVEL;
2377 if (cs->nr_threads > 1) {
2378 *eax |= (cs->nr_threads - 1) << 14;
2380 *ebx = (L2_LINE_SIZE - 1) | \
2381 ((L2_PARTITIONS - 1) << 12) | \
2382 ((L2_ASSOCIATIVITY - 1) << 22);
2383 *ecx = L2_SETS - 1;
2384 *edx = CPUID_4_NO_INVD_SHARING;
2385 break;
2386 default: /* end of info */
2387 *eax = 0;
2388 *ebx = 0;
2389 *ecx = 0;
2390 *edx = 0;
2391 break;
2395 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2396 if ((*eax & 31) && cs->nr_cores > 1) {
2397 *eax |= (cs->nr_cores - 1) << 26;
2399 break;
2400 case 5:
2401 /* mwait info: needed for Core compatibility */
2402 *eax = 0; /* Smallest monitor-line size in bytes */
2403 *ebx = 0; /* Largest monitor-line size in bytes */
2404 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2405 *edx = 0;
2406 break;
2407 case 6:
2408 /* Thermal and Power Leaf */
2409 *eax = env->features[FEAT_6_EAX];
2410 *ebx = 0;
2411 *ecx = 0;
2412 *edx = 0;
2413 break;
2414 case 7:
2415 /* Structured Extended Feature Flags Enumeration Leaf */
2416 if (count == 0) {
2417 *eax = 0; /* Maximum ECX value for sub-leaves */
2418 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2419 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2420 *edx = 0; /* Reserved */
2421 } else {
2422 *eax = 0;
2423 *ebx = 0;
2424 *ecx = 0;
2425 *edx = 0;
2427 break;
2428 case 9:
2429 /* Direct Cache Access Information Leaf */
2430 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2431 *ebx = 0;
2432 *ecx = 0;
2433 *edx = 0;
2434 break;
2435 case 0xA:
2436 /* Architectural Performance Monitoring Leaf */
2437 if (kvm_enabled() && cpu->enable_pmu) {
2438 KVMState *s = cs->kvm_state;
2440 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2441 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2442 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2443 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2444 } else {
2445 *eax = 0;
2446 *ebx = 0;
2447 *ecx = 0;
2448 *edx = 0;
2450 break;
2451 case 0xD: {
2452 KVMState *s = cs->kvm_state;
2453 uint64_t ena_mask;
2454 int i;
2456 /* Processor Extended State */
2457 *eax = 0;
2458 *ebx = 0;
2459 *ecx = 0;
2460 *edx = 0;
2461 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2462 break;
2464 if (kvm_enabled()) {
2465 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2466 ena_mask <<= 32;
2467 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2468 } else {
2469 ena_mask = -1;
2472 if (count == 0) {
2473 *ecx = 0x240;
2474 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2475 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2476 if ((env->features[esa->feature] & esa->bits) == esa->bits
2477 && ((ena_mask >> i) & 1) != 0) {
2478 if (i < 32) {
2479 *eax |= 1u << i;
2480 } else {
2481 *edx |= 1u << (i - 32);
2483 *ecx = MAX(*ecx, esa->offset + esa->size);
2486 *eax |= ena_mask & (XSTATE_FP | XSTATE_SSE);
2487 *ebx = *ecx;
2488 } else if (count == 1) {
2489 *eax = env->features[FEAT_XSAVE];
2490 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2491 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2492 if ((env->features[esa->feature] & esa->bits) == esa->bits
2493 && ((ena_mask >> count) & 1) != 0) {
2494 *eax = esa->size;
2495 *ebx = esa->offset;
2498 break;
2500 case 0x80000000:
2501 *eax = env->cpuid_xlevel;
2502 *ebx = env->cpuid_vendor1;
2503 *edx = env->cpuid_vendor2;
2504 *ecx = env->cpuid_vendor3;
2505 break;
2506 case 0x80000001:
2507 *eax = env->cpuid_version;
2508 *ebx = 0;
2509 *ecx = env->features[FEAT_8000_0001_ECX];
2510 *edx = env->features[FEAT_8000_0001_EDX];
2512 /* The Linux kernel checks for the CMPLegacy bit and
2513 * discards multiple thread information if it is set.
2514 * So dont set it here for Intel to make Linux guests happy.
2516 if (cs->nr_cores * cs->nr_threads > 1) {
2517 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2518 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2519 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2520 *ecx |= 1 << 1; /* CmpLegacy bit */
2523 break;
2524 case 0x80000002:
2525 case 0x80000003:
2526 case 0x80000004:
2527 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2528 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2529 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2530 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2531 break;
2532 case 0x80000005:
2533 /* cache info (L1 cache) */
2534 if (cpu->cache_info_passthrough) {
2535 host_cpuid(index, 0, eax, ebx, ecx, edx);
2536 break;
2538 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2539 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2540 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2541 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2542 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2543 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2544 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2545 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2546 break;
2547 case 0x80000006:
2548 /* cache info (L2 cache) */
2549 if (cpu->cache_info_passthrough) {
2550 host_cpuid(index, 0, eax, ebx, ecx, edx);
2551 break;
2553 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2554 (L2_DTLB_2M_ENTRIES << 16) | \
2555 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2556 (L2_ITLB_2M_ENTRIES);
2557 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2558 (L2_DTLB_4K_ENTRIES << 16) | \
2559 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2560 (L2_ITLB_4K_ENTRIES);
2561 *ecx = (L2_SIZE_KB_AMD << 16) | \
2562 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2563 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2564 *edx = ((L3_SIZE_KB/512) << 18) | \
2565 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2566 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2567 break;
2568 case 0x80000007:
2569 *eax = 0;
2570 *ebx = 0;
2571 *ecx = 0;
2572 *edx = env->features[FEAT_8000_0007_EDX];
2573 break;
2574 case 0x80000008:
2575 /* virtual & phys address size in low 2 bytes. */
2576 /* XXX: This value must match the one used in the MMU code. */
2577 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2578 /* 64 bit processor */
2579 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2580 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2581 } else {
2582 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2583 *eax = 0x00000024; /* 36 bits physical */
2584 } else {
2585 *eax = 0x00000020; /* 32 bits physical */
2588 *ebx = 0;
2589 *ecx = 0;
2590 *edx = 0;
2591 if (cs->nr_cores * cs->nr_threads > 1) {
2592 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2594 break;
2595 case 0x8000000A:
2596 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2597 *eax = 0x00000001; /* SVM Revision */
2598 *ebx = 0x00000010; /* nr of ASIDs */
2599 *ecx = 0;
2600 *edx = env->features[FEAT_SVM]; /* optional features */
2601 } else {
2602 *eax = 0;
2603 *ebx = 0;
2604 *ecx = 0;
2605 *edx = 0;
2607 break;
2608 case 0xC0000000:
2609 *eax = env->cpuid_xlevel2;
2610 *ebx = 0;
2611 *ecx = 0;
2612 *edx = 0;
2613 break;
2614 case 0xC0000001:
2615 /* Support for VIA CPU's CPUID instruction */
2616 *eax = env->cpuid_version;
2617 *ebx = 0;
2618 *ecx = 0;
2619 *edx = env->features[FEAT_C000_0001_EDX];
2620 break;
2621 case 0xC0000002:
2622 case 0xC0000003:
2623 case 0xC0000004:
2624 /* Reserved for the future, and now filled with zero */
2625 *eax = 0;
2626 *ebx = 0;
2627 *ecx = 0;
2628 *edx = 0;
2629 break;
2630 default:
2631 /* reserved values: zero */
2632 *eax = 0;
2633 *ebx = 0;
2634 *ecx = 0;
2635 *edx = 0;
2636 break;
2640 /* CPUClass::reset() */
2641 static void x86_cpu_reset(CPUState *s)
2643 X86CPU *cpu = X86_CPU(s);
2644 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2645 CPUX86State *env = &cpu->env;
2646 target_ulong cr4;
2647 uint64_t xcr0;
2648 int i;
2650 xcc->parent_reset(s);
2652 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2654 tlb_flush(s, 1);
2656 env->old_exception = -1;
2658 /* init to reset state */
2660 #ifdef CONFIG_SOFTMMU
2661 env->hflags |= HF_SOFTMMU_MASK;
2662 #endif
2663 env->hflags2 |= HF2_GIF_MASK;
2665 cpu_x86_update_cr0(env, 0x60000010);
2666 env->a20_mask = ~0x0;
2667 env->smbase = 0x30000;
2669 env->idt.limit = 0xffff;
2670 env->gdt.limit = 0xffff;
2671 env->ldt.limit = 0xffff;
2672 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2673 env->tr.limit = 0xffff;
2674 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2676 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2677 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2678 DESC_R_MASK | DESC_A_MASK);
2679 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2680 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2681 DESC_A_MASK);
2682 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2683 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2684 DESC_A_MASK);
2685 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2686 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2687 DESC_A_MASK);
2688 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2689 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2690 DESC_A_MASK);
2691 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2692 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2693 DESC_A_MASK);
2695 env->eip = 0xfff0;
2696 env->regs[R_EDX] = env->cpuid_version;
2698 env->eflags = 0x2;
2700 /* FPU init */
2701 for (i = 0; i < 8; i++) {
2702 env->fptags[i] = 1;
2704 cpu_set_fpuc(env, 0x37f);
2706 env->mxcsr = 0x1f80;
2707 /* All units are in INIT state. */
2708 env->xstate_bv = 0;
2710 env->pat = 0x0007040600070406ULL;
2711 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2713 memset(env->dr, 0, sizeof(env->dr));
2714 env->dr[6] = DR6_FIXED_1;
2715 env->dr[7] = DR7_FIXED_1;
2716 cpu_breakpoint_remove_all(s, BP_CPU);
2717 cpu_watchpoint_remove_all(s, BP_CPU);
2719 cr4 = 0;
2720 xcr0 = XSTATE_FP;
2722 #ifdef CONFIG_USER_ONLY
2723 /* Enable all the features for user-mode. */
2724 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2725 xcr0 |= XSTATE_SSE;
2727 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_MPX) {
2728 xcr0 |= XSTATE_BNDREGS | XSTATE_BNDCSR;
2730 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2731 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2733 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2734 cr4 |= CR4_FSGSBASE_MASK;
2736 #endif
2738 env->xcr0 = xcr0;
2739 cpu_x86_update_cr4(env, cr4);
2742 * SDM 11.11.5 requires:
2743 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2744 * - IA32_MTRR_PHYSMASKn.V = 0
2745 * All other bits are undefined. For simplification, zero it all.
2747 env->mtrr_deftype = 0;
2748 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2749 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2751 #if !defined(CONFIG_USER_ONLY)
2752 /* We hard-wire the BSP to the first CPU. */
2753 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2755 s->halted = !cpu_is_bsp(cpu);
2757 if (kvm_enabled()) {
2758 kvm_arch_reset_vcpu(cpu);
2760 #endif
2763 #ifndef CONFIG_USER_ONLY
2764 bool cpu_is_bsp(X86CPU *cpu)
2766 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2769 /* TODO: remove me, when reset over QOM tree is implemented */
2770 static void x86_cpu_machine_reset_cb(void *opaque)
2772 X86CPU *cpu = opaque;
2773 cpu_reset(CPU(cpu));
2775 #endif
2777 static void mce_init(X86CPU *cpu)
2779 CPUX86State *cenv = &cpu->env;
2780 unsigned int bank;
2782 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2783 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2784 (CPUID_MCE | CPUID_MCA)) {
2785 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2786 cenv->mcg_ctl = ~(uint64_t)0;
2787 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2788 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2793 #ifndef CONFIG_USER_ONLY
2794 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2796 APICCommonState *apic;
2797 const char *apic_type = "apic";
2799 if (kvm_apic_in_kernel()) {
2800 apic_type = "kvm-apic";
2801 } else if (xen_enabled()) {
2802 apic_type = "xen-apic";
2805 cpu->apic_state = DEVICE(object_new(apic_type));
2807 object_property_add_child(OBJECT(cpu), "apic",
2808 OBJECT(cpu->apic_state), NULL);
2809 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2810 /* TODO: convert to link<> */
2811 apic = APIC_COMMON(cpu->apic_state);
2812 apic->cpu = cpu;
2813 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2816 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2818 APICCommonState *apic;
2819 static bool apic_mmio_map_once;
2821 if (cpu->apic_state == NULL) {
2822 return;
2824 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2825 errp);
2827 /* Map APIC MMIO area */
2828 apic = APIC_COMMON(cpu->apic_state);
2829 if (!apic_mmio_map_once) {
2830 memory_region_add_subregion_overlap(get_system_memory(),
2831 apic->apicbase &
2832 MSR_IA32_APICBASE_BASE,
2833 &apic->io_memory,
2834 0x1000);
2835 apic_mmio_map_once = true;
2839 static void x86_cpu_machine_done(Notifier *n, void *unused)
2841 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2842 MemoryRegion *smram =
2843 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2845 if (smram) {
2846 cpu->smram = g_new(MemoryRegion, 1);
2847 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2848 smram, 0, 1ull << 32);
2849 memory_region_set_enabled(cpu->smram, false);
2850 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2853 #else
2854 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2857 #endif
2860 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2861 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2862 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2863 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2864 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2865 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2866 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2868 CPUState *cs = CPU(dev);
2869 X86CPU *cpu = X86_CPU(dev);
2870 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2871 CPUX86State *env = &cpu->env;
2872 Error *local_err = NULL;
2873 static bool ht_warned;
2875 if (cpu->apic_id < 0) {
2876 error_setg(errp, "apic-id property was not initialized properly");
2877 return;
2880 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2881 env->cpuid_level = 7;
2884 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2885 * CPUID[1].EDX.
2887 if (IS_AMD_CPU(env)) {
2888 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2889 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2890 & CPUID_EXT2_AMD_ALIASES);
2894 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2895 error_setg(&local_err,
2896 kvm_enabled() ?
2897 "Host doesn't support requested features" :
2898 "TCG doesn't support requested features");
2899 goto out;
2902 #ifndef CONFIG_USER_ONLY
2903 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2905 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2906 x86_cpu_apic_create(cpu, &local_err);
2907 if (local_err != NULL) {
2908 goto out;
2911 #endif
2913 mce_init(cpu);
2915 #ifndef CONFIG_USER_ONLY
2916 if (tcg_enabled()) {
2917 AddressSpace *newas = g_new(AddressSpace, 1);
2919 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2920 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2922 /* Outer container... */
2923 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2924 memory_region_set_enabled(cpu->cpu_as_root, true);
2926 /* ... with two regions inside: normal system memory with low
2927 * priority, and...
2929 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2930 get_system_memory(), 0, ~0ull);
2931 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2932 memory_region_set_enabled(cpu->cpu_as_mem, true);
2933 address_space_init(newas, cpu->cpu_as_root, "CPU");
2934 cs->num_ases = 1;
2935 cpu_address_space_init(cs, newas, 0);
2937 /* ... SMRAM with higher priority, linked from /machine/smram. */
2938 cpu->machine_done.notify = x86_cpu_machine_done;
2939 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2941 #endif
2943 qemu_init_vcpu(cs);
2945 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2946 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2947 * based on inputs (sockets,cores,threads), it is still better to gives
2948 * users a warning.
2950 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2951 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2953 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2954 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2955 " -smp options properly.");
2956 ht_warned = true;
2959 x86_cpu_apic_realize(cpu, &local_err);
2960 if (local_err != NULL) {
2961 goto out;
2963 cpu_reset(cs);
2965 xcc->parent_realize(dev, &local_err);
2967 out:
2968 if (local_err != NULL) {
2969 error_propagate(errp, local_err);
2970 return;
2974 typedef struct BitProperty {
2975 uint32_t *ptr;
2976 uint32_t mask;
2977 } BitProperty;
2979 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
2980 void *opaque, Error **errp)
2982 BitProperty *fp = opaque;
2983 bool value = (*fp->ptr & fp->mask) == fp->mask;
2984 visit_type_bool(v, name, &value, errp);
2987 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
2988 void *opaque, Error **errp)
2990 DeviceState *dev = DEVICE(obj);
2991 BitProperty *fp = opaque;
2992 Error *local_err = NULL;
2993 bool value;
2995 if (dev->realized) {
2996 qdev_prop_set_after_realize(dev, name, errp);
2997 return;
3000 visit_type_bool(v, name, &value, &local_err);
3001 if (local_err) {
3002 error_propagate(errp, local_err);
3003 return;
3006 if (value) {
3007 *fp->ptr |= fp->mask;
3008 } else {
3009 *fp->ptr &= ~fp->mask;
3013 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3014 void *opaque)
3016 BitProperty *prop = opaque;
3017 g_free(prop);
3020 /* Register a boolean property to get/set a single bit in a uint32_t field.
3022 * The same property name can be registered multiple times to make it affect
3023 * multiple bits in the same FeatureWord. In that case, the getter will return
3024 * true only if all bits are set.
3026 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3027 const char *prop_name,
3028 uint32_t *field,
3029 int bitnr)
3031 BitProperty *fp;
3032 ObjectProperty *op;
3033 uint32_t mask = (1UL << bitnr);
3035 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3036 if (op) {
3037 fp = op->opaque;
3038 assert(fp->ptr == field);
3039 fp->mask |= mask;
3040 } else {
3041 fp = g_new0(BitProperty, 1);
3042 fp->ptr = field;
3043 fp->mask = mask;
3044 object_property_add(OBJECT(cpu), prop_name, "bool",
3045 x86_cpu_get_bit_prop,
3046 x86_cpu_set_bit_prop,
3047 x86_cpu_release_bit_prop, fp, &error_abort);
3051 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3052 FeatureWord w,
3053 int bitnr)
3055 Object *obj = OBJECT(cpu);
3056 int i;
3057 char **names;
3058 FeatureWordInfo *fi = &feature_word_info[w];
3060 if (!fi->feat_names) {
3061 return;
3063 if (!fi->feat_names[bitnr]) {
3064 return;
3067 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3069 feat2prop(names[0]);
3070 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3072 for (i = 1; names[i]; i++) {
3073 feat2prop(names[i]);
3074 object_property_add_alias(obj, names[i], obj, names[0],
3075 &error_abort);
3078 g_strfreev(names);
3081 static void x86_cpu_initfn(Object *obj)
3083 CPUState *cs = CPU(obj);
3084 X86CPU *cpu = X86_CPU(obj);
3085 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3086 CPUX86State *env = &cpu->env;
3087 FeatureWord w;
3088 static int inited;
3090 cs->env_ptr = env;
3091 cpu_exec_init(cs, &error_abort);
3093 object_property_add(obj, "family", "int",
3094 x86_cpuid_version_get_family,
3095 x86_cpuid_version_set_family, NULL, NULL, NULL);
3096 object_property_add(obj, "model", "int",
3097 x86_cpuid_version_get_model,
3098 x86_cpuid_version_set_model, NULL, NULL, NULL);
3099 object_property_add(obj, "stepping", "int",
3100 x86_cpuid_version_get_stepping,
3101 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3102 object_property_add_str(obj, "vendor",
3103 x86_cpuid_get_vendor,
3104 x86_cpuid_set_vendor, NULL);
3105 object_property_add_str(obj, "model-id",
3106 x86_cpuid_get_model_id,
3107 x86_cpuid_set_model_id, NULL);
3108 object_property_add(obj, "tsc-frequency", "int",
3109 x86_cpuid_get_tsc_freq,
3110 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3111 object_property_add(obj, "apic-id", "int",
3112 x86_cpuid_get_apic_id,
3113 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3114 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3115 x86_cpu_get_feature_words,
3116 NULL, NULL, (void *)env->features, NULL);
3117 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3118 x86_cpu_get_feature_words,
3119 NULL, NULL, (void *)cpu->filtered_features, NULL);
3121 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3123 #ifndef CONFIG_USER_ONLY
3124 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3125 cpu->apic_id = -1;
3126 #endif
3128 for (w = 0; w < FEATURE_WORDS; w++) {
3129 int bitnr;
3131 for (bitnr = 0; bitnr < 32; bitnr++) {
3132 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3136 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3138 /* init various static tables used in TCG mode */
3139 if (tcg_enabled() && !inited) {
3140 inited = 1;
3141 tcg_x86_init();
3145 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3147 X86CPU *cpu = X86_CPU(cs);
3149 return cpu->apic_id;
3152 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3154 X86CPU *cpu = X86_CPU(cs);
3156 return cpu->env.cr[0] & CR0_PG_MASK;
3159 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3161 X86CPU *cpu = X86_CPU(cs);
3163 cpu->env.eip = value;
3166 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3168 X86CPU *cpu = X86_CPU(cs);
3170 cpu->env.eip = tb->pc - tb->cs_base;
3173 static bool x86_cpu_has_work(CPUState *cs)
3175 X86CPU *cpu = X86_CPU(cs);
3176 CPUX86State *env = &cpu->env;
3178 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3179 CPU_INTERRUPT_POLL)) &&
3180 (env->eflags & IF_MASK)) ||
3181 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3182 CPU_INTERRUPT_INIT |
3183 CPU_INTERRUPT_SIPI |
3184 CPU_INTERRUPT_MCE)) ||
3185 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3186 !(env->hflags & HF_SMM_MASK));
3189 static Property x86_cpu_properties[] = {
3190 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3191 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3192 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3193 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3194 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3195 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3196 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3197 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3198 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3199 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3200 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3201 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3202 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3203 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3204 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3205 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3206 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3207 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3208 DEFINE_PROP_END_OF_LIST()
3211 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3213 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3214 CPUClass *cc = CPU_CLASS(oc);
3215 DeviceClass *dc = DEVICE_CLASS(oc);
3217 xcc->parent_realize = dc->realize;
3218 dc->realize = x86_cpu_realizefn;
3219 dc->props = x86_cpu_properties;
3221 xcc->parent_reset = cc->reset;
3222 cc->reset = x86_cpu_reset;
3223 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3225 cc->class_by_name = x86_cpu_class_by_name;
3226 cc->parse_features = x86_cpu_parse_featurestr;
3227 cc->has_work = x86_cpu_has_work;
3228 cc->do_interrupt = x86_cpu_do_interrupt;
3229 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3230 cc->dump_state = x86_cpu_dump_state;
3231 cc->set_pc = x86_cpu_set_pc;
3232 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3233 cc->gdb_read_register = x86_cpu_gdb_read_register;
3234 cc->gdb_write_register = x86_cpu_gdb_write_register;
3235 cc->get_arch_id = x86_cpu_get_arch_id;
3236 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3237 #ifdef CONFIG_USER_ONLY
3238 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3239 #else
3240 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3241 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3242 cc->write_elf64_note = x86_cpu_write_elf64_note;
3243 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3244 cc->write_elf32_note = x86_cpu_write_elf32_note;
3245 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3246 cc->vmsd = &vmstate_x86_cpu;
3247 #endif
3248 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3249 #ifndef CONFIG_USER_ONLY
3250 cc->debug_excp_handler = breakpoint_handler;
3251 #endif
3252 cc->cpu_exec_enter = x86_cpu_exec_enter;
3253 cc->cpu_exec_exit = x86_cpu_exec_exit;
3256 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3257 * object in cpus -> dangling pointer after final object_unref().
3259 dc->cannot_destroy_with_object_finalize_yet = true;
3262 static const TypeInfo x86_cpu_type_info = {
3263 .name = TYPE_X86_CPU,
3264 .parent = TYPE_CPU,
3265 .instance_size = sizeof(X86CPU),
3266 .instance_init = x86_cpu_initfn,
3267 .abstract = true,
3268 .class_size = sizeof(X86CPUClass),
3269 .class_init = x86_cpu_common_class_init,
3272 static void x86_cpu_register_types(void)
3274 int i;
3276 type_register_static(&x86_cpu_type_info);
3277 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3278 x86_register_cpudef_type(&builtin_x86_defs[i]);
3280 #ifdef CONFIG_KVM
3281 type_register_static(&host_x86_cpu_type_info);
3282 #endif
3285 type_init(x86_cpu_register_types)