disas: arm-a64: Make printfer and stream variable
[qemu/ar7.git] / target-i386 / cpu.c
blobf9b1788cbda295a9fdd8546d6ef4ae82a184aaea
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "exec/address-spaces.h"
49 #include "hw/xen/xen.h"
50 #include "hw/i386/apic_internal.h"
51 #endif
54 /* Cache topology CPUID constants: */
56 /* CPUID Leaf 2 Descriptors */
58 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
59 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
60 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
63 /* CPUID Leaf 4 constants: */
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
70 #define CPUID_4_LEVEL(l) ((l) << 5)
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
80 #define ASSOC_FULL 0xFF
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
97 /* Definitions of the hardcoded cache entries we expose: */
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
135 /* No L3 cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
141 /* TLB definitions: */
143 #define L1_DTLB_2M_ASSOC 1
144 #define L1_DTLB_2M_ENTRIES 255
145 #define L1_DTLB_4K_ASSOC 1
146 #define L1_DTLB_4K_ENTRIES 255
148 #define L1_ITLB_2M_ASSOC 1
149 #define L1_ITLB_2M_ENTRIES 255
150 #define L1_ITLB_4K_ASSOC 1
151 #define L1_ITLB_4K_ENTRIES 255
153 #define L2_DTLB_2M_ASSOC 0 /* disabled */
154 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
155 #define L2_DTLB_4K_ASSOC 4
156 #define L2_DTLB_4K_ENTRIES 512
158 #define L2_ITLB_2M_ASSOC 0 /* disabled */
159 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
160 #define L2_ITLB_4K_ASSOC 4
161 #define L2_ITLB_4K_ENTRIES 512
165 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
166 uint32_t vendor2, uint32_t vendor3)
168 int i;
169 for (i = 0; i < 4; i++) {
170 dst[i] = vendor1 >> (8 * i);
171 dst[i + 4] = vendor2 >> (8 * i);
172 dst[i + 8] = vendor3 >> (8 * i);
174 dst[CPUID_VENDOR_SZ] = '\0';
177 /* feature flags taken from "Intel Processor Identification and the CPUID
178 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
179 * between feature naming conventions, aliases may be added.
181 static const char *feature_name[] = {
182 "fpu", "vme", "de", "pse",
183 "tsc", "msr", "pae", "mce",
184 "cx8", "apic", NULL, "sep",
185 "mtrr", "pge", "mca", "cmov",
186 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
187 NULL, "ds" /* Intel dts */, "acpi", "mmx",
188 "fxsr", "sse", "sse2", "ss",
189 "ht" /* Intel htt */, "tm", "ia64", "pbe",
191 static const char *ext_feature_name[] = {
192 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
193 "ds_cpl", "vmx", "smx", "est",
194 "tm2", "ssse3", "cid", NULL,
195 "fma", "cx16", "xtpr", "pdcm",
196 NULL, "pcid", "dca", "sse4.1|sse4_1",
197 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
198 "tsc-deadline", "aes", "xsave", "osxsave",
199 "avx", "f16c", "rdrand", "hypervisor",
201 /* Feature names that are already defined on feature_name[] but are set on
202 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
203 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
204 * if and only if CPU vendor is AMD.
206 static const char *ext2_feature_name[] = {
207 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
208 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
209 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
210 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
211 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
212 "nx|xd", NULL, "mmxext", NULL /* mmx */,
213 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
214 NULL, "lm|i64", "3dnowext", "3dnow",
216 static const char *ext3_feature_name[] = {
217 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
218 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
219 "3dnowprefetch", "osvw", "ibs", "xop",
220 "skinit", "wdt", NULL, "lwp",
221 "fma4", "tce", NULL, "nodeid_msr",
222 NULL, "tbm", "topoext", "perfctr_core",
223 "perfctr_nb", NULL, NULL, NULL,
224 NULL, NULL, NULL, NULL,
227 static const char *ext4_feature_name[] = {
228 NULL, NULL, "xstore", "xstore-en",
229 NULL, NULL, "xcrypt", "xcrypt-en",
230 "ace2", "ace2-en", "phe", "phe-en",
231 "pmm", "pmm-en", NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 NULL, NULL, NULL, NULL,
238 static const char *kvm_feature_name[] = {
239 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
240 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
245 "kvmclock-stable-bit", NULL, NULL, NULL,
246 NULL, NULL, NULL, NULL,
249 static const char *svm_feature_name[] = {
250 "npt", "lbrv", "svm_lock", "nrip_save",
251 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
252 NULL, NULL, "pause_filter", NULL,
253 "pfthreshold", NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 NULL, NULL, NULL, NULL,
260 static const char *cpuid_7_0_ebx_feature_name[] = {
261 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
262 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
263 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
264 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
267 static const char *cpuid_apm_edx_feature_name[] = {
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 "invtsc", NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 NULL, NULL, NULL, NULL,
278 static const char *cpuid_xsave_feature_name[] = {
279 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 NULL, NULL, NULL, NULL,
289 static const char *cpuid_6_feature_name[] = {
290 NULL, NULL, "arat", NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 NULL, NULL, NULL, NULL,
300 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
301 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
302 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
303 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
304 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
305 CPUID_PSE36 | CPUID_FXSR)
306 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
307 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
308 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
309 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
310 CPUID_PAE | CPUID_SEP | CPUID_APIC)
312 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
313 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
316 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
317 /* partly implemented:
318 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
319 /* missing:
320 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
321 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
322 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
323 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
324 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
325 /* missing:
326 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
327 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
328 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
329 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
330 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
331 CPUID_EXT_RDRAND */
333 #ifdef TARGET_X86_64
334 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
335 #else
336 #define TCG_EXT2_X86_64_FEATURES 0
337 #endif
339 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
340 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
341 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
342 TCG_EXT2_X86_64_FEATURES)
343 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
344 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
345 #define TCG_EXT4_FEATURES 0
346 #define TCG_SVM_FEATURES 0
347 #define TCG_KVM_FEATURES 0
348 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
349 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
350 /* missing:
351 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
352 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
353 CPUID_7_0_EBX_RDSEED */
354 #define TCG_APM_FEATURES 0
355 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
358 typedef struct FeatureWordInfo {
359 const char **feat_names;
360 uint32_t cpuid_eax; /* Input EAX for CPUID */
361 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
362 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
363 int cpuid_reg; /* output register (R_* constant) */
364 uint32_t tcg_features; /* Feature flags supported by TCG */
365 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
366 } FeatureWordInfo;
368 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
369 [FEAT_1_EDX] = {
370 .feat_names = feature_name,
371 .cpuid_eax = 1, .cpuid_reg = R_EDX,
372 .tcg_features = TCG_FEATURES,
374 [FEAT_1_ECX] = {
375 .feat_names = ext_feature_name,
376 .cpuid_eax = 1, .cpuid_reg = R_ECX,
377 .tcg_features = TCG_EXT_FEATURES,
379 [FEAT_8000_0001_EDX] = {
380 .feat_names = ext2_feature_name,
381 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
382 .tcg_features = TCG_EXT2_FEATURES,
384 [FEAT_8000_0001_ECX] = {
385 .feat_names = ext3_feature_name,
386 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
387 .tcg_features = TCG_EXT3_FEATURES,
389 [FEAT_C000_0001_EDX] = {
390 .feat_names = ext4_feature_name,
391 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
392 .tcg_features = TCG_EXT4_FEATURES,
394 [FEAT_KVM] = {
395 .feat_names = kvm_feature_name,
396 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
397 .tcg_features = TCG_KVM_FEATURES,
399 [FEAT_SVM] = {
400 .feat_names = svm_feature_name,
401 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
402 .tcg_features = TCG_SVM_FEATURES,
404 [FEAT_7_0_EBX] = {
405 .feat_names = cpuid_7_0_ebx_feature_name,
406 .cpuid_eax = 7,
407 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
408 .cpuid_reg = R_EBX,
409 .tcg_features = TCG_7_0_EBX_FEATURES,
411 [FEAT_8000_0007_EDX] = {
412 .feat_names = cpuid_apm_edx_feature_name,
413 .cpuid_eax = 0x80000007,
414 .cpuid_reg = R_EDX,
415 .tcg_features = TCG_APM_FEATURES,
416 .unmigratable_flags = CPUID_APM_INVTSC,
418 [FEAT_XSAVE] = {
419 .feat_names = cpuid_xsave_feature_name,
420 .cpuid_eax = 0xd,
421 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
422 .cpuid_reg = R_EAX,
423 .tcg_features = 0,
425 [FEAT_6_EAX] = {
426 .feat_names = cpuid_6_feature_name,
427 .cpuid_eax = 6, .cpuid_reg = R_EAX,
428 .tcg_features = TCG_6_EAX_FEATURES,
432 typedef struct X86RegisterInfo32 {
433 /* Name of register */
434 const char *name;
435 /* QAPI enum value register */
436 X86CPURegister32 qapi_enum;
437 } X86RegisterInfo32;
439 #define REGISTER(reg) \
440 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
441 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
442 REGISTER(EAX),
443 REGISTER(ECX),
444 REGISTER(EDX),
445 REGISTER(EBX),
446 REGISTER(ESP),
447 REGISTER(EBP),
448 REGISTER(ESI),
449 REGISTER(EDI),
451 #undef REGISTER
453 typedef struct ExtSaveArea {
454 uint32_t feature, bits;
455 uint32_t offset, size;
456 } ExtSaveArea;
458 static const ExtSaveArea ext_save_areas[] = {
459 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
460 .offset = 0x240, .size = 0x100 },
461 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
462 .offset = 0x3c0, .size = 0x40 },
463 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
464 .offset = 0x400, .size = 0x40 },
465 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
466 .offset = 0x440, .size = 0x40 },
467 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
468 .offset = 0x480, .size = 0x200 },
469 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
470 .offset = 0x680, .size = 0x400 },
473 const char *get_register_name_32(unsigned int reg)
475 if (reg >= CPU_NB_REGS32) {
476 return NULL;
478 return x86_reg_info_32[reg].name;
481 /* KVM-specific features that are automatically added to all CPU models
482 * when KVM is enabled.
484 static uint32_t kvm_default_features[FEATURE_WORDS] = {
485 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
486 (1 << KVM_FEATURE_NOP_IO_DELAY) |
487 (1 << KVM_FEATURE_CLOCKSOURCE2) |
488 (1 << KVM_FEATURE_ASYNC_PF) |
489 (1 << KVM_FEATURE_STEAL_TIME) |
490 (1 << KVM_FEATURE_PV_EOI) |
491 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
492 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
495 /* Features that are not added by default to any CPU model when KVM is enabled.
497 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
498 [FEAT_1_EDX] = CPUID_ACPI,
499 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
500 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
503 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
505 kvm_default_features[w] &= ~features;
508 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
510 kvm_default_unset_features[w] &= ~features;
514 * Returns the set of feature flags that are supported and migratable by
515 * QEMU, for a given FeatureWord.
517 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
519 FeatureWordInfo *wi = &feature_word_info[w];
520 uint32_t r = 0;
521 int i;
523 for (i = 0; i < 32; i++) {
524 uint32_t f = 1U << i;
525 /* If the feature name is unknown, it is not supported by QEMU yet */
526 if (!wi->feat_names[i]) {
527 continue;
529 /* Skip features known to QEMU, but explicitly marked as unmigratable */
530 if (wi->unmigratable_flags & f) {
531 continue;
533 r |= f;
535 return r;
538 void host_cpuid(uint32_t function, uint32_t count,
539 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
541 uint32_t vec[4];
543 #ifdef __x86_64__
544 asm volatile("cpuid"
545 : "=a"(vec[0]), "=b"(vec[1]),
546 "=c"(vec[2]), "=d"(vec[3])
547 : "0"(function), "c"(count) : "cc");
548 #elif defined(__i386__)
549 asm volatile("pusha \n\t"
550 "cpuid \n\t"
551 "mov %%eax, 0(%2) \n\t"
552 "mov %%ebx, 4(%2) \n\t"
553 "mov %%ecx, 8(%2) \n\t"
554 "mov %%edx, 12(%2) \n\t"
555 "popa"
556 : : "a"(function), "c"(count), "S"(vec)
557 : "memory", "cc");
558 #else
559 abort();
560 #endif
562 if (eax)
563 *eax = vec[0];
564 if (ebx)
565 *ebx = vec[1];
566 if (ecx)
567 *ecx = vec[2];
568 if (edx)
569 *edx = vec[3];
572 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
574 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
575 * a substring. ex if !NULL points to the first char after a substring,
576 * otherwise the string is assumed to sized by a terminating nul.
577 * Return lexical ordering of *s1:*s2.
579 static int sstrcmp(const char *s1, const char *e1,
580 const char *s2, const char *e2)
582 for (;;) {
583 if (!*s1 || !*s2 || *s1 != *s2)
584 return (*s1 - *s2);
585 ++s1, ++s2;
586 if (s1 == e1 && s2 == e2)
587 return (0);
588 else if (s1 == e1)
589 return (*s2);
590 else if (s2 == e2)
591 return (*s1);
595 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
596 * '|' delimited (possibly empty) strings in which case search for a match
597 * within the alternatives proceeds left to right. Return 0 for success,
598 * non-zero otherwise.
600 static int altcmp(const char *s, const char *e, const char *altstr)
602 const char *p, *q;
604 for (q = p = altstr; ; ) {
605 while (*p && *p != '|')
606 ++p;
607 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
608 return (0);
609 if (!*p)
610 return (1);
611 else
612 q = ++p;
616 /* search featureset for flag *[s..e), if found set corresponding bit in
617 * *pval and return true, otherwise return false
619 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
620 const char **featureset)
622 uint32_t mask;
623 const char **ppc;
624 bool found = false;
626 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
627 if (*ppc && !altcmp(s, e, *ppc)) {
628 *pval |= mask;
629 found = true;
632 return found;
635 static void add_flagname_to_bitmaps(const char *flagname,
636 FeatureWordArray words,
637 Error **errp)
639 FeatureWord w;
640 for (w = 0; w < FEATURE_WORDS; w++) {
641 FeatureWordInfo *wi = &feature_word_info[w];
642 if (wi->feat_names &&
643 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
644 break;
647 if (w == FEATURE_WORDS) {
648 error_setg(errp, "CPU feature %s not found", flagname);
652 /* CPU class name definitions: */
654 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
655 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
657 /* Return type name for a given CPU model name
658 * Caller is responsible for freeing the returned string.
660 static char *x86_cpu_type_name(const char *model_name)
662 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
665 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
667 ObjectClass *oc;
668 char *typename;
670 if (cpu_model == NULL) {
671 return NULL;
674 typename = x86_cpu_type_name(cpu_model);
675 oc = object_class_by_name(typename);
676 g_free(typename);
677 return oc;
680 struct X86CPUDefinition {
681 const char *name;
682 uint32_t level;
683 uint32_t xlevel;
684 uint32_t xlevel2;
685 /* vendor is zero-terminated, 12 character ASCII string */
686 char vendor[CPUID_VENDOR_SZ + 1];
687 int family;
688 int model;
689 int stepping;
690 FeatureWordArray features;
691 char model_id[48];
692 bool cache_info_passthrough;
695 static X86CPUDefinition builtin_x86_defs[] = {
697 .name = "qemu64",
698 .level = 4,
699 .vendor = CPUID_VENDOR_AMD,
700 .family = 6,
701 .model = 6,
702 .stepping = 3,
703 .features[FEAT_1_EDX] =
704 PPRO_FEATURES |
705 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
706 CPUID_PSE36,
707 .features[FEAT_1_ECX] =
708 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
709 .features[FEAT_8000_0001_EDX] =
710 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
711 .features[FEAT_8000_0001_ECX] =
712 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
713 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
714 .xlevel = 0x8000000A,
717 .name = "phenom",
718 .level = 5,
719 .vendor = CPUID_VENDOR_AMD,
720 .family = 16,
721 .model = 2,
722 .stepping = 3,
723 /* Missing: CPUID_HT */
724 .features[FEAT_1_EDX] =
725 PPRO_FEATURES |
726 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
727 CPUID_PSE36 | CPUID_VME,
728 .features[FEAT_1_ECX] =
729 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
730 CPUID_EXT_POPCNT,
731 .features[FEAT_8000_0001_EDX] =
732 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
733 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
734 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
735 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
736 CPUID_EXT3_CR8LEG,
737 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
738 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
739 .features[FEAT_8000_0001_ECX] =
740 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
741 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
742 /* Missing: CPUID_SVM_LBRV */
743 .features[FEAT_SVM] =
744 CPUID_SVM_NPT,
745 .xlevel = 0x8000001A,
746 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
749 .name = "core2duo",
750 .level = 10,
751 .vendor = CPUID_VENDOR_INTEL,
752 .family = 6,
753 .model = 15,
754 .stepping = 11,
755 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
756 .features[FEAT_1_EDX] =
757 PPRO_FEATURES |
758 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
759 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
760 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
761 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
762 .features[FEAT_1_ECX] =
763 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
764 CPUID_EXT_CX16,
765 .features[FEAT_8000_0001_EDX] =
766 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
767 .features[FEAT_8000_0001_ECX] =
768 CPUID_EXT3_LAHF_LM,
769 .xlevel = 0x80000008,
770 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
773 .name = "kvm64",
774 .level = 5,
775 .vendor = CPUID_VENDOR_INTEL,
776 .family = 15,
777 .model = 6,
778 .stepping = 1,
779 /* Missing: CPUID_HT */
780 .features[FEAT_1_EDX] =
781 PPRO_FEATURES | CPUID_VME |
782 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
783 CPUID_PSE36,
784 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
785 .features[FEAT_1_ECX] =
786 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
787 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
788 .features[FEAT_8000_0001_EDX] =
789 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
790 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
791 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
792 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
793 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
794 .features[FEAT_8000_0001_ECX] =
796 .xlevel = 0x80000008,
797 .model_id = "Common KVM processor"
800 .name = "qemu32",
801 .level = 4,
802 .vendor = CPUID_VENDOR_INTEL,
803 .family = 6,
804 .model = 6,
805 .stepping = 3,
806 .features[FEAT_1_EDX] =
807 PPRO_FEATURES,
808 .features[FEAT_1_ECX] =
809 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
810 .xlevel = 0x80000004,
813 .name = "kvm32",
814 .level = 5,
815 .vendor = CPUID_VENDOR_INTEL,
816 .family = 15,
817 .model = 6,
818 .stepping = 1,
819 .features[FEAT_1_EDX] =
820 PPRO_FEATURES | CPUID_VME |
821 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
822 .features[FEAT_1_ECX] =
823 CPUID_EXT_SSE3,
824 .features[FEAT_8000_0001_ECX] =
826 .xlevel = 0x80000008,
827 .model_id = "Common 32-bit KVM processor"
830 .name = "coreduo",
831 .level = 10,
832 .vendor = CPUID_VENDOR_INTEL,
833 .family = 6,
834 .model = 14,
835 .stepping = 8,
836 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
837 .features[FEAT_1_EDX] =
838 PPRO_FEATURES | CPUID_VME |
839 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
840 CPUID_SS,
841 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
842 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
843 .features[FEAT_1_ECX] =
844 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
845 .features[FEAT_8000_0001_EDX] =
846 CPUID_EXT2_NX,
847 .xlevel = 0x80000008,
848 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
851 .name = "486",
852 .level = 1,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 4,
855 .model = 8,
856 .stepping = 0,
857 .features[FEAT_1_EDX] =
858 I486_FEATURES,
859 .xlevel = 0,
862 .name = "pentium",
863 .level = 1,
864 .vendor = CPUID_VENDOR_INTEL,
865 .family = 5,
866 .model = 4,
867 .stepping = 3,
868 .features[FEAT_1_EDX] =
869 PENTIUM_FEATURES,
870 .xlevel = 0,
873 .name = "pentium2",
874 .level = 2,
875 .vendor = CPUID_VENDOR_INTEL,
876 .family = 6,
877 .model = 5,
878 .stepping = 2,
879 .features[FEAT_1_EDX] =
880 PENTIUM2_FEATURES,
881 .xlevel = 0,
884 .name = "pentium3",
885 .level = 2,
886 .vendor = CPUID_VENDOR_INTEL,
887 .family = 6,
888 .model = 7,
889 .stepping = 3,
890 .features[FEAT_1_EDX] =
891 PENTIUM3_FEATURES,
892 .xlevel = 0,
895 .name = "athlon",
896 .level = 2,
897 .vendor = CPUID_VENDOR_AMD,
898 .family = 6,
899 .model = 2,
900 .stepping = 3,
901 .features[FEAT_1_EDX] =
902 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
903 CPUID_MCA,
904 .features[FEAT_8000_0001_EDX] =
905 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
906 .xlevel = 0x80000008,
909 .name = "n270",
910 /* original is on level 10 */
911 .level = 5,
912 .vendor = CPUID_VENDOR_INTEL,
913 .family = 6,
914 .model = 28,
915 .stepping = 2,
916 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
917 .features[FEAT_1_EDX] =
918 PPRO_FEATURES |
919 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
920 CPUID_ACPI | CPUID_SS,
921 /* Some CPUs got no CPUID_SEP */
922 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
923 * CPUID_EXT_XTPR */
924 .features[FEAT_1_ECX] =
925 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
926 CPUID_EXT_MOVBE,
927 .features[FEAT_8000_0001_EDX] =
928 CPUID_EXT2_NX,
929 .features[FEAT_8000_0001_ECX] =
930 CPUID_EXT3_LAHF_LM,
931 .xlevel = 0x8000000A,
932 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
935 .name = "Conroe",
936 .level = 4,
937 .vendor = CPUID_VENDOR_INTEL,
938 .family = 6,
939 .model = 15,
940 .stepping = 3,
941 .features[FEAT_1_EDX] =
942 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
943 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
944 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
945 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
946 CPUID_DE | CPUID_FP87,
947 .features[FEAT_1_ECX] =
948 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
949 .features[FEAT_8000_0001_EDX] =
950 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
951 .features[FEAT_8000_0001_ECX] =
952 CPUID_EXT3_LAHF_LM,
953 .xlevel = 0x8000000A,
954 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
957 .name = "Penryn",
958 .level = 4,
959 .vendor = CPUID_VENDOR_INTEL,
960 .family = 6,
961 .model = 23,
962 .stepping = 3,
963 .features[FEAT_1_EDX] =
964 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
965 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
966 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
967 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
968 CPUID_DE | CPUID_FP87,
969 .features[FEAT_1_ECX] =
970 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
971 CPUID_EXT_SSE3,
972 .features[FEAT_8000_0001_EDX] =
973 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
974 .features[FEAT_8000_0001_ECX] =
975 CPUID_EXT3_LAHF_LM,
976 .xlevel = 0x8000000A,
977 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
980 .name = "Nehalem",
981 .level = 4,
982 .vendor = CPUID_VENDOR_INTEL,
983 .family = 6,
984 .model = 26,
985 .stepping = 3,
986 .features[FEAT_1_EDX] =
987 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
988 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
989 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
990 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
991 CPUID_DE | CPUID_FP87,
992 .features[FEAT_1_ECX] =
993 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
994 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
995 .features[FEAT_8000_0001_EDX] =
996 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
997 .features[FEAT_8000_0001_ECX] =
998 CPUID_EXT3_LAHF_LM,
999 .xlevel = 0x8000000A,
1000 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1003 .name = "Westmere",
1004 .level = 11,
1005 .vendor = CPUID_VENDOR_INTEL,
1006 .family = 6,
1007 .model = 44,
1008 .stepping = 1,
1009 .features[FEAT_1_EDX] =
1010 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1011 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1012 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1013 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1014 CPUID_DE | CPUID_FP87,
1015 .features[FEAT_1_ECX] =
1016 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1017 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1018 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1019 .features[FEAT_8000_0001_EDX] =
1020 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1021 .features[FEAT_8000_0001_ECX] =
1022 CPUID_EXT3_LAHF_LM,
1023 .features[FEAT_6_EAX] =
1024 CPUID_6_EAX_ARAT,
1025 .xlevel = 0x8000000A,
1026 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1029 .name = "SandyBridge",
1030 .level = 0xd,
1031 .vendor = CPUID_VENDOR_INTEL,
1032 .family = 6,
1033 .model = 42,
1034 .stepping = 1,
1035 .features[FEAT_1_EDX] =
1036 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1037 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1038 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1039 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1040 CPUID_DE | CPUID_FP87,
1041 .features[FEAT_1_ECX] =
1042 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1043 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1044 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1045 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1046 CPUID_EXT_SSE3,
1047 .features[FEAT_8000_0001_EDX] =
1048 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1049 CPUID_EXT2_SYSCALL,
1050 .features[FEAT_8000_0001_ECX] =
1051 CPUID_EXT3_LAHF_LM,
1052 .features[FEAT_XSAVE] =
1053 CPUID_XSAVE_XSAVEOPT,
1054 .features[FEAT_6_EAX] =
1055 CPUID_6_EAX_ARAT,
1056 .xlevel = 0x8000000A,
1057 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1060 .name = "IvyBridge",
1061 .level = 0xd,
1062 .vendor = CPUID_VENDOR_INTEL,
1063 .family = 6,
1064 .model = 58,
1065 .stepping = 9,
1066 .features[FEAT_1_EDX] =
1067 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1068 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1069 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1070 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1071 CPUID_DE | CPUID_FP87,
1072 .features[FEAT_1_ECX] =
1073 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1074 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1075 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1076 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1077 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1078 .features[FEAT_7_0_EBX] =
1079 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1080 CPUID_7_0_EBX_ERMS,
1081 .features[FEAT_8000_0001_EDX] =
1082 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1083 CPUID_EXT2_SYSCALL,
1084 .features[FEAT_8000_0001_ECX] =
1085 CPUID_EXT3_LAHF_LM,
1086 .features[FEAT_XSAVE] =
1087 CPUID_XSAVE_XSAVEOPT,
1088 .features[FEAT_6_EAX] =
1089 CPUID_6_EAX_ARAT,
1090 .xlevel = 0x8000000A,
1091 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1094 .name = "Haswell-noTSX",
1095 .level = 0xd,
1096 .vendor = CPUID_VENDOR_INTEL,
1097 .family = 6,
1098 .model = 60,
1099 .stepping = 1,
1100 .features[FEAT_1_EDX] =
1101 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1102 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1103 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1104 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1105 CPUID_DE | CPUID_FP87,
1106 .features[FEAT_1_ECX] =
1107 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1108 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1109 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1110 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1111 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1112 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1113 .features[FEAT_8000_0001_EDX] =
1114 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1115 CPUID_EXT2_SYSCALL,
1116 .features[FEAT_8000_0001_ECX] =
1117 CPUID_EXT3_LAHF_LM,
1118 .features[FEAT_7_0_EBX] =
1119 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1120 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1121 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1122 .features[FEAT_XSAVE] =
1123 CPUID_XSAVE_XSAVEOPT,
1124 .features[FEAT_6_EAX] =
1125 CPUID_6_EAX_ARAT,
1126 .xlevel = 0x8000000A,
1127 .model_id = "Intel Core Processor (Haswell, no TSX)",
1128 }, {
1129 .name = "Haswell",
1130 .level = 0xd,
1131 .vendor = CPUID_VENDOR_INTEL,
1132 .family = 6,
1133 .model = 60,
1134 .stepping = 1,
1135 .features[FEAT_1_EDX] =
1136 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1137 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1138 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1139 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1140 CPUID_DE | CPUID_FP87,
1141 .features[FEAT_1_ECX] =
1142 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1143 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1144 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1145 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1146 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1147 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1148 .features[FEAT_8000_0001_EDX] =
1149 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1150 CPUID_EXT2_SYSCALL,
1151 .features[FEAT_8000_0001_ECX] =
1152 CPUID_EXT3_LAHF_LM,
1153 .features[FEAT_7_0_EBX] =
1154 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1155 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1156 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1157 CPUID_7_0_EBX_RTM,
1158 .features[FEAT_XSAVE] =
1159 CPUID_XSAVE_XSAVEOPT,
1160 .features[FEAT_6_EAX] =
1161 CPUID_6_EAX_ARAT,
1162 .xlevel = 0x8000000A,
1163 .model_id = "Intel Core Processor (Haswell)",
1166 .name = "Broadwell-noTSX",
1167 .level = 0xd,
1168 .vendor = CPUID_VENDOR_INTEL,
1169 .family = 6,
1170 .model = 61,
1171 .stepping = 2,
1172 .features[FEAT_1_EDX] =
1173 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1174 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1175 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1176 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1177 CPUID_DE | CPUID_FP87,
1178 .features[FEAT_1_ECX] =
1179 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1180 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1181 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1182 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1183 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1184 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1185 .features[FEAT_8000_0001_EDX] =
1186 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1187 CPUID_EXT2_SYSCALL,
1188 .features[FEAT_8000_0001_ECX] =
1189 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1190 .features[FEAT_7_0_EBX] =
1191 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1192 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1193 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1194 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1195 CPUID_7_0_EBX_SMAP,
1196 .features[FEAT_XSAVE] =
1197 CPUID_XSAVE_XSAVEOPT,
1198 .features[FEAT_6_EAX] =
1199 CPUID_6_EAX_ARAT,
1200 .xlevel = 0x8000000A,
1201 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1204 .name = "Broadwell",
1205 .level = 0xd,
1206 .vendor = CPUID_VENDOR_INTEL,
1207 .family = 6,
1208 .model = 61,
1209 .stepping = 2,
1210 .features[FEAT_1_EDX] =
1211 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1212 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1213 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1214 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1215 CPUID_DE | CPUID_FP87,
1216 .features[FEAT_1_ECX] =
1217 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1218 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1219 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1220 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1221 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1222 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1223 .features[FEAT_8000_0001_EDX] =
1224 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1225 CPUID_EXT2_SYSCALL,
1226 .features[FEAT_8000_0001_ECX] =
1227 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1228 .features[FEAT_7_0_EBX] =
1229 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1230 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1231 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1232 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1233 CPUID_7_0_EBX_SMAP,
1234 .features[FEAT_XSAVE] =
1235 CPUID_XSAVE_XSAVEOPT,
1236 .features[FEAT_6_EAX] =
1237 CPUID_6_EAX_ARAT,
1238 .xlevel = 0x8000000A,
1239 .model_id = "Intel Core Processor (Broadwell)",
1242 .name = "Opteron_G1",
1243 .level = 5,
1244 .vendor = CPUID_VENDOR_AMD,
1245 .family = 15,
1246 .model = 6,
1247 .stepping = 1,
1248 .features[FEAT_1_EDX] =
1249 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1250 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1251 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1252 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1253 CPUID_DE | CPUID_FP87,
1254 .features[FEAT_1_ECX] =
1255 CPUID_EXT_SSE3,
1256 .features[FEAT_8000_0001_EDX] =
1257 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1258 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1259 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1260 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1261 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1262 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1263 .xlevel = 0x80000008,
1264 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1267 .name = "Opteron_G2",
1268 .level = 5,
1269 .vendor = CPUID_VENDOR_AMD,
1270 .family = 15,
1271 .model = 6,
1272 .stepping = 1,
1273 .features[FEAT_1_EDX] =
1274 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1275 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1276 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1277 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1278 CPUID_DE | CPUID_FP87,
1279 .features[FEAT_1_ECX] =
1280 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1281 .features[FEAT_8000_0001_EDX] =
1282 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1283 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1284 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1285 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1286 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1287 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1288 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1289 .features[FEAT_8000_0001_ECX] =
1290 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1291 .xlevel = 0x80000008,
1292 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1295 .name = "Opteron_G3",
1296 .level = 5,
1297 .vendor = CPUID_VENDOR_AMD,
1298 .family = 15,
1299 .model = 6,
1300 .stepping = 1,
1301 .features[FEAT_1_EDX] =
1302 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1303 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1304 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1305 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1306 CPUID_DE | CPUID_FP87,
1307 .features[FEAT_1_ECX] =
1308 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1309 CPUID_EXT_SSE3,
1310 .features[FEAT_8000_0001_EDX] =
1311 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1312 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1313 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1314 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1315 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1316 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1317 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1318 .features[FEAT_8000_0001_ECX] =
1319 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1320 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1321 .xlevel = 0x80000008,
1322 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1325 .name = "Opteron_G4",
1326 .level = 0xd,
1327 .vendor = CPUID_VENDOR_AMD,
1328 .family = 21,
1329 .model = 1,
1330 .stepping = 2,
1331 .features[FEAT_1_EDX] =
1332 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1333 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1334 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1335 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1336 CPUID_DE | CPUID_FP87,
1337 .features[FEAT_1_ECX] =
1338 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1339 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1340 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1341 CPUID_EXT_SSE3,
1342 .features[FEAT_8000_0001_EDX] =
1343 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1344 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1345 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1346 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1347 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1348 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1349 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1350 .features[FEAT_8000_0001_ECX] =
1351 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1352 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1353 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1354 CPUID_EXT3_LAHF_LM,
1355 /* no xsaveopt! */
1356 .xlevel = 0x8000001A,
1357 .model_id = "AMD Opteron 62xx class CPU",
1360 .name = "Opteron_G5",
1361 .level = 0xd,
1362 .vendor = CPUID_VENDOR_AMD,
1363 .family = 21,
1364 .model = 2,
1365 .stepping = 0,
1366 .features[FEAT_1_EDX] =
1367 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1368 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1369 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1370 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1371 CPUID_DE | CPUID_FP87,
1372 .features[FEAT_1_ECX] =
1373 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1374 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1375 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1376 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1377 .features[FEAT_8000_0001_EDX] =
1378 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1379 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1380 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1381 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1382 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1383 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1384 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1385 .features[FEAT_8000_0001_ECX] =
1386 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1387 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1388 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1389 CPUID_EXT3_LAHF_LM,
1390 /* no xsaveopt! */
1391 .xlevel = 0x8000001A,
1392 .model_id = "AMD Opteron 63xx class CPU",
1397 * x86_cpu_compat_set_features:
1398 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1399 * @w: Identifies the feature word to be changed.
1400 * @feat_add: Feature bits to be added to feature word
1401 * @feat_remove: Feature bits to be removed from feature word
1403 * Change CPU model feature bits for compatibility.
1405 * This function may be used by machine-type compatibility functions
1406 * to enable or disable feature bits on specific CPU models.
1408 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1409 uint32_t feat_add, uint32_t feat_remove)
1411 X86CPUDefinition *def;
1412 int i;
1413 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1414 def = &builtin_x86_defs[i];
1415 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1416 def->features[w] |= feat_add;
1417 def->features[w] &= ~feat_remove;
1422 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1423 bool migratable_only);
1425 #ifdef CONFIG_KVM
1427 static int cpu_x86_fill_model_id(char *str)
1429 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1430 int i;
1432 for (i = 0; i < 3; i++) {
1433 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1434 memcpy(str + i * 16 + 0, &eax, 4);
1435 memcpy(str + i * 16 + 4, &ebx, 4);
1436 memcpy(str + i * 16 + 8, &ecx, 4);
1437 memcpy(str + i * 16 + 12, &edx, 4);
1439 return 0;
1442 static X86CPUDefinition host_cpudef;
1444 static Property host_x86_cpu_properties[] = {
1445 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1446 DEFINE_PROP_END_OF_LIST()
1449 /* class_init for the "host" CPU model
1451 * This function may be called before KVM is initialized.
1453 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1455 DeviceClass *dc = DEVICE_CLASS(oc);
1456 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1457 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1459 xcc->kvm_required = true;
1461 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1462 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1464 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1465 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1466 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1467 host_cpudef.stepping = eax & 0x0F;
1469 cpu_x86_fill_model_id(host_cpudef.model_id);
1471 xcc->cpu_def = &host_cpudef;
1472 host_cpudef.cache_info_passthrough = true;
1474 /* level, xlevel, xlevel2, and the feature words are initialized on
1475 * instance_init, because they require KVM to be initialized.
1478 dc->props = host_x86_cpu_properties;
1481 static void host_x86_cpu_initfn(Object *obj)
1483 X86CPU *cpu = X86_CPU(obj);
1484 CPUX86State *env = &cpu->env;
1485 KVMState *s = kvm_state;
1487 assert(kvm_enabled());
1489 /* We can't fill the features array here because we don't know yet if
1490 * "migratable" is true or false.
1492 cpu->host_features = true;
1494 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1495 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1496 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1498 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1501 static const TypeInfo host_x86_cpu_type_info = {
1502 .name = X86_CPU_TYPE_NAME("host"),
1503 .parent = TYPE_X86_CPU,
1504 .instance_init = host_x86_cpu_initfn,
1505 .class_init = host_x86_cpu_class_init,
1508 #endif
1510 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1512 FeatureWordInfo *f = &feature_word_info[w];
1513 int i;
1515 for (i = 0; i < 32; ++i) {
1516 if (1 << i & mask) {
1517 const char *reg = get_register_name_32(f->cpuid_reg);
1518 assert(reg);
1519 fprintf(stderr, "warning: %s doesn't support requested feature: "
1520 "CPUID.%02XH:%s%s%s [bit %d]\n",
1521 kvm_enabled() ? "host" : "TCG",
1522 f->cpuid_eax, reg,
1523 f->feat_names[i] ? "." : "",
1524 f->feat_names[i] ? f->feat_names[i] : "", i);
1529 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1530 const char *name, Error **errp)
1532 X86CPU *cpu = X86_CPU(obj);
1533 CPUX86State *env = &cpu->env;
1534 int64_t value;
1536 value = (env->cpuid_version >> 8) & 0xf;
1537 if (value == 0xf) {
1538 value += (env->cpuid_version >> 20) & 0xff;
1540 visit_type_int(v, &value, name, errp);
1543 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1544 const char *name, Error **errp)
1546 X86CPU *cpu = X86_CPU(obj);
1547 CPUX86State *env = &cpu->env;
1548 const int64_t min = 0;
1549 const int64_t max = 0xff + 0xf;
1550 Error *local_err = NULL;
1551 int64_t value;
1553 visit_type_int(v, &value, name, &local_err);
1554 if (local_err) {
1555 error_propagate(errp, local_err);
1556 return;
1558 if (value < min || value > max) {
1559 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1560 name ? name : "null", value, min, max);
1561 return;
1564 env->cpuid_version &= ~0xff00f00;
1565 if (value > 0x0f) {
1566 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1567 } else {
1568 env->cpuid_version |= value << 8;
1572 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1573 const char *name, Error **errp)
1575 X86CPU *cpu = X86_CPU(obj);
1576 CPUX86State *env = &cpu->env;
1577 int64_t value;
1579 value = (env->cpuid_version >> 4) & 0xf;
1580 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1581 visit_type_int(v, &value, name, errp);
1584 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1585 const char *name, Error **errp)
1587 X86CPU *cpu = X86_CPU(obj);
1588 CPUX86State *env = &cpu->env;
1589 const int64_t min = 0;
1590 const int64_t max = 0xff;
1591 Error *local_err = NULL;
1592 int64_t value;
1594 visit_type_int(v, &value, name, &local_err);
1595 if (local_err) {
1596 error_propagate(errp, local_err);
1597 return;
1599 if (value < min || value > max) {
1600 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1601 name ? name : "null", value, min, max);
1602 return;
1605 env->cpuid_version &= ~0xf00f0;
1606 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1609 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1610 void *opaque, const char *name,
1611 Error **errp)
1613 X86CPU *cpu = X86_CPU(obj);
1614 CPUX86State *env = &cpu->env;
1615 int64_t value;
1617 value = env->cpuid_version & 0xf;
1618 visit_type_int(v, &value, name, errp);
1621 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1622 void *opaque, const char *name,
1623 Error **errp)
1625 X86CPU *cpu = X86_CPU(obj);
1626 CPUX86State *env = &cpu->env;
1627 const int64_t min = 0;
1628 const int64_t max = 0xf;
1629 Error *local_err = NULL;
1630 int64_t value;
1632 visit_type_int(v, &value, name, &local_err);
1633 if (local_err) {
1634 error_propagate(errp, local_err);
1635 return;
1637 if (value < min || value > max) {
1638 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1639 name ? name : "null", value, min, max);
1640 return;
1643 env->cpuid_version &= ~0xf;
1644 env->cpuid_version |= value & 0xf;
1647 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1649 X86CPU *cpu = X86_CPU(obj);
1650 CPUX86State *env = &cpu->env;
1651 char *value;
1653 value = g_malloc(CPUID_VENDOR_SZ + 1);
1654 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1655 env->cpuid_vendor3);
1656 return value;
1659 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1660 Error **errp)
1662 X86CPU *cpu = X86_CPU(obj);
1663 CPUX86State *env = &cpu->env;
1664 int i;
1666 if (strlen(value) != CPUID_VENDOR_SZ) {
1667 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1668 return;
1671 env->cpuid_vendor1 = 0;
1672 env->cpuid_vendor2 = 0;
1673 env->cpuid_vendor3 = 0;
1674 for (i = 0; i < 4; i++) {
1675 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1676 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1677 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1681 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1683 X86CPU *cpu = X86_CPU(obj);
1684 CPUX86State *env = &cpu->env;
1685 char *value;
1686 int i;
1688 value = g_malloc(48 + 1);
1689 for (i = 0; i < 48; i++) {
1690 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1692 value[48] = '\0';
1693 return value;
1696 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1697 Error **errp)
1699 X86CPU *cpu = X86_CPU(obj);
1700 CPUX86State *env = &cpu->env;
1701 int c, len, i;
1703 if (model_id == NULL) {
1704 model_id = "";
1706 len = strlen(model_id);
1707 memset(env->cpuid_model, 0, 48);
1708 for (i = 0; i < 48; i++) {
1709 if (i >= len) {
1710 c = '\0';
1711 } else {
1712 c = (uint8_t)model_id[i];
1714 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1718 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1719 const char *name, Error **errp)
1721 X86CPU *cpu = X86_CPU(obj);
1722 int64_t value;
1724 value = cpu->env.tsc_khz * 1000;
1725 visit_type_int(v, &value, name, errp);
1728 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1729 const char *name, Error **errp)
1731 X86CPU *cpu = X86_CPU(obj);
1732 const int64_t min = 0;
1733 const int64_t max = INT64_MAX;
1734 Error *local_err = NULL;
1735 int64_t value;
1737 visit_type_int(v, &value, name, &local_err);
1738 if (local_err) {
1739 error_propagate(errp, local_err);
1740 return;
1742 if (value < min || value > max) {
1743 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1744 name ? name : "null", value, min, max);
1745 return;
1748 cpu->env.tsc_khz = value / 1000;
1751 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1752 const char *name, Error **errp)
1754 X86CPU *cpu = X86_CPU(obj);
1755 int64_t value = cpu->apic_id;
1757 visit_type_int(v, &value, name, errp);
1760 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1761 const char *name, Error **errp)
1763 X86CPU *cpu = X86_CPU(obj);
1764 DeviceState *dev = DEVICE(obj);
1765 const int64_t min = 0;
1766 const int64_t max = UINT32_MAX;
1767 Error *error = NULL;
1768 int64_t value;
1770 if (dev->realized) {
1771 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1772 "it was realized", name, object_get_typename(obj));
1773 return;
1776 visit_type_int(v, &value, name, &error);
1777 if (error) {
1778 error_propagate(errp, error);
1779 return;
1781 if (value < min || value > max) {
1782 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1783 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1784 object_get_typename(obj), name, value, min, max);
1785 return;
1788 if ((value != cpu->apic_id) && cpu_exists(value)) {
1789 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1790 return;
1792 cpu->apic_id = value;
1795 /* Generic getter for "feature-words" and "filtered-features" properties */
1796 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1797 const char *name, Error **errp)
1799 uint32_t *array = (uint32_t *)opaque;
1800 FeatureWord w;
1801 Error *err = NULL;
1802 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1803 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1804 X86CPUFeatureWordInfoList *list = NULL;
1806 for (w = 0; w < FEATURE_WORDS; w++) {
1807 FeatureWordInfo *wi = &feature_word_info[w];
1808 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1809 qwi->cpuid_input_eax = wi->cpuid_eax;
1810 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1811 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1812 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1813 qwi->features = array[w];
1815 /* List will be in reverse order, but order shouldn't matter */
1816 list_entries[w].next = list;
1817 list_entries[w].value = &word_infos[w];
1818 list = &list_entries[w];
1821 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1822 error_propagate(errp, err);
1825 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1826 const char *name, Error **errp)
1828 X86CPU *cpu = X86_CPU(obj);
1829 int64_t value = cpu->hyperv_spinlock_attempts;
1831 visit_type_int(v, &value, name, errp);
1834 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1835 const char *name, Error **errp)
1837 const int64_t min = 0xFFF;
1838 const int64_t max = UINT_MAX;
1839 X86CPU *cpu = X86_CPU(obj);
1840 Error *err = NULL;
1841 int64_t value;
1843 visit_type_int(v, &value, name, &err);
1844 if (err) {
1845 error_propagate(errp, err);
1846 return;
1849 if (value < min || value > max) {
1850 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1851 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1852 object_get_typename(obj), name ? name : "null",
1853 value, min, max);
1854 return;
1856 cpu->hyperv_spinlock_attempts = value;
1859 static PropertyInfo qdev_prop_spinlocks = {
1860 .name = "int",
1861 .get = x86_get_hv_spinlocks,
1862 .set = x86_set_hv_spinlocks,
1865 /* Convert all '_' in a feature string option name to '-', to make feature
1866 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1868 static inline void feat2prop(char *s)
1870 while ((s = strchr(s, '_'))) {
1871 *s = '-';
1875 /* Parse "+feature,-feature,feature=foo" CPU feature string
1877 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1878 Error **errp)
1880 X86CPU *cpu = X86_CPU(cs);
1881 char *featurestr; /* Single 'key=value" string being parsed */
1882 FeatureWord w;
1883 /* Features to be added */
1884 FeatureWordArray plus_features = { 0 };
1885 /* Features to be removed */
1886 FeatureWordArray minus_features = { 0 };
1887 uint32_t numvalue;
1888 CPUX86State *env = &cpu->env;
1889 Error *local_err = NULL;
1891 featurestr = features ? strtok(features, ",") : NULL;
1893 while (featurestr) {
1894 char *val;
1895 if (featurestr[0] == '+') {
1896 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1897 } else if (featurestr[0] == '-') {
1898 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1899 } else if ((val = strchr(featurestr, '='))) {
1900 *val = 0; val++;
1901 feat2prop(featurestr);
1902 if (!strcmp(featurestr, "xlevel")) {
1903 char *err;
1904 char num[32];
1906 numvalue = strtoul(val, &err, 0);
1907 if (!*val || *err) {
1908 error_setg(errp, "bad numerical value %s", val);
1909 return;
1911 if (numvalue < 0x80000000) {
1912 error_report("xlevel value shall always be >= 0x80000000"
1913 ", fixup will be removed in future versions");
1914 numvalue += 0x80000000;
1916 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1917 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1918 } else if (!strcmp(featurestr, "tsc-freq")) {
1919 int64_t tsc_freq;
1920 char *err;
1921 char num[32];
1923 tsc_freq = strtosz_suffix_unit(val, &err,
1924 STRTOSZ_DEFSUFFIX_B, 1000);
1925 if (tsc_freq < 0 || *err) {
1926 error_setg(errp, "bad numerical value %s", val);
1927 return;
1929 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1930 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1931 &local_err);
1932 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1933 char *err;
1934 const int min = 0xFFF;
1935 char num[32];
1936 numvalue = strtoul(val, &err, 0);
1937 if (!*val || *err) {
1938 error_setg(errp, "bad numerical value %s", val);
1939 return;
1941 if (numvalue < min) {
1942 error_report("hv-spinlocks value shall always be >= 0x%x"
1943 ", fixup will be removed in future versions",
1944 min);
1945 numvalue = min;
1947 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1948 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1949 } else {
1950 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1952 } else {
1953 feat2prop(featurestr);
1954 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1956 if (local_err) {
1957 error_propagate(errp, local_err);
1958 return;
1960 featurestr = strtok(NULL, ",");
1963 if (cpu->host_features) {
1964 for (w = 0; w < FEATURE_WORDS; w++) {
1965 env->features[w] =
1966 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1970 for (w = 0; w < FEATURE_WORDS; w++) {
1971 env->features[w] |= plus_features[w];
1972 env->features[w] &= ~minus_features[w];
1976 /* Print all cpuid feature names in featureset
1978 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1980 int bit;
1981 bool first = true;
1983 for (bit = 0; bit < 32; bit++) {
1984 if (featureset[bit]) {
1985 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1986 first = false;
1991 /* generate CPU information. */
1992 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1994 X86CPUDefinition *def;
1995 char buf[256];
1996 int i;
1998 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1999 def = &builtin_x86_defs[i];
2000 snprintf(buf, sizeof(buf), "%s", def->name);
2001 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2003 #ifdef CONFIG_KVM
2004 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2005 "KVM processor with all supported host features "
2006 "(only available in KVM mode)");
2007 #endif
2009 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2010 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2011 FeatureWordInfo *fw = &feature_word_info[i];
2013 (*cpu_fprintf)(f, " ");
2014 listflags(f, cpu_fprintf, fw->feat_names);
2015 (*cpu_fprintf)(f, "\n");
2019 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2021 CpuDefinitionInfoList *cpu_list = NULL;
2022 X86CPUDefinition *def;
2023 int i;
2025 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2026 CpuDefinitionInfoList *entry;
2027 CpuDefinitionInfo *info;
2029 def = &builtin_x86_defs[i];
2030 info = g_malloc0(sizeof(*info));
2031 info->name = g_strdup(def->name);
2033 entry = g_malloc0(sizeof(*entry));
2034 entry->value = info;
2035 entry->next = cpu_list;
2036 cpu_list = entry;
2039 return cpu_list;
2042 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2043 bool migratable_only)
2045 FeatureWordInfo *wi = &feature_word_info[w];
2046 uint32_t r;
2048 if (kvm_enabled()) {
2049 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2050 wi->cpuid_ecx,
2051 wi->cpuid_reg);
2052 } else if (tcg_enabled()) {
2053 r = wi->tcg_features;
2054 } else {
2055 return ~0;
2057 if (migratable_only) {
2058 r &= x86_cpu_get_migratable_flags(w);
2060 return r;
2064 * Filters CPU feature words based on host availability of each feature.
2066 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2068 static int x86_cpu_filter_features(X86CPU *cpu)
2070 CPUX86State *env = &cpu->env;
2071 FeatureWord w;
2072 int rv = 0;
2074 for (w = 0; w < FEATURE_WORDS; w++) {
2075 uint32_t host_feat =
2076 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2077 uint32_t requested_features = env->features[w];
2078 env->features[w] &= host_feat;
2079 cpu->filtered_features[w] = requested_features & ~env->features[w];
2080 if (cpu->filtered_features[w]) {
2081 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2082 report_unavailable_features(w, cpu->filtered_features[w]);
2084 rv = 1;
2088 return rv;
2091 /* Load data from X86CPUDefinition
2093 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2095 CPUX86State *env = &cpu->env;
2096 const char *vendor;
2097 char host_vendor[CPUID_VENDOR_SZ + 1];
2098 FeatureWord w;
2100 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2101 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2102 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2103 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2104 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2105 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2106 cpu->cache_info_passthrough = def->cache_info_passthrough;
2107 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2108 for (w = 0; w < FEATURE_WORDS; w++) {
2109 env->features[w] = def->features[w];
2112 /* Special cases not set in the X86CPUDefinition structs: */
2113 if (kvm_enabled()) {
2114 FeatureWord w;
2115 for (w = 0; w < FEATURE_WORDS; w++) {
2116 env->features[w] |= kvm_default_features[w];
2117 env->features[w] &= ~kvm_default_unset_features[w];
2121 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2123 /* sysenter isn't supported in compatibility mode on AMD,
2124 * syscall isn't supported in compatibility mode on Intel.
2125 * Normally we advertise the actual CPU vendor, but you can
2126 * override this using the 'vendor' property if you want to use
2127 * KVM's sysenter/syscall emulation in compatibility mode and
2128 * when doing cross vendor migration
2130 vendor = def->vendor;
2131 if (kvm_enabled()) {
2132 uint32_t ebx = 0, ecx = 0, edx = 0;
2133 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2134 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2135 vendor = host_vendor;
2138 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2142 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2144 X86CPU *cpu = NULL;
2145 X86CPUClass *xcc;
2146 ObjectClass *oc;
2147 gchar **model_pieces;
2148 char *name, *features;
2149 Error *error = NULL;
2151 model_pieces = g_strsplit(cpu_model, ",", 2);
2152 if (!model_pieces[0]) {
2153 error_setg(&error, "Invalid/empty CPU model name");
2154 goto out;
2156 name = model_pieces[0];
2157 features = model_pieces[1];
2159 oc = x86_cpu_class_by_name(name);
2160 if (oc == NULL) {
2161 error_setg(&error, "Unable to find CPU definition: %s", name);
2162 goto out;
2164 xcc = X86_CPU_CLASS(oc);
2166 if (xcc->kvm_required && !kvm_enabled()) {
2167 error_setg(&error, "CPU model '%s' requires KVM", name);
2168 goto out;
2171 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2173 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2174 if (error) {
2175 goto out;
2178 out:
2179 if (error != NULL) {
2180 error_propagate(errp, error);
2181 if (cpu) {
2182 object_unref(OBJECT(cpu));
2183 cpu = NULL;
2186 g_strfreev(model_pieces);
2187 return cpu;
2190 X86CPU *cpu_x86_init(const char *cpu_model)
2192 Error *error = NULL;
2193 X86CPU *cpu;
2195 cpu = cpu_x86_create(cpu_model, &error);
2196 if (error) {
2197 goto out;
2200 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2202 out:
2203 if (error) {
2204 error_report_err(error);
2205 if (cpu != NULL) {
2206 object_unref(OBJECT(cpu));
2207 cpu = NULL;
2210 return cpu;
2213 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2215 X86CPUDefinition *cpudef = data;
2216 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2218 xcc->cpu_def = cpudef;
2221 static void x86_register_cpudef_type(X86CPUDefinition *def)
2223 char *typename = x86_cpu_type_name(def->name);
2224 TypeInfo ti = {
2225 .name = typename,
2226 .parent = TYPE_X86_CPU,
2227 .class_init = x86_cpu_cpudef_class_init,
2228 .class_data = def,
2231 type_register(&ti);
2232 g_free(typename);
2235 #if !defined(CONFIG_USER_ONLY)
2237 void cpu_clear_apic_feature(CPUX86State *env)
2239 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2242 #endif /* !CONFIG_USER_ONLY */
2244 /* Initialize list of CPU models, filling some non-static fields if necessary
2246 void x86_cpudef_setup(void)
2248 int i, j;
2249 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2251 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2252 X86CPUDefinition *def = &builtin_x86_defs[i];
2254 /* Look for specific "cpudef" models that */
2255 /* have the QEMU version in .model_id */
2256 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2257 if (strcmp(model_with_versions[j], def->name) == 0) {
2258 pstrcpy(def->model_id, sizeof(def->model_id),
2259 "QEMU Virtual CPU version ");
2260 pstrcat(def->model_id, sizeof(def->model_id),
2261 qemu_get_version());
2262 break;
2268 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2269 uint32_t *eax, uint32_t *ebx,
2270 uint32_t *ecx, uint32_t *edx)
2272 X86CPU *cpu = x86_env_get_cpu(env);
2273 CPUState *cs = CPU(cpu);
2275 /* test if maximum index reached */
2276 if (index & 0x80000000) {
2277 if (index > env->cpuid_xlevel) {
2278 if (env->cpuid_xlevel2 > 0) {
2279 /* Handle the Centaur's CPUID instruction. */
2280 if (index > env->cpuid_xlevel2) {
2281 index = env->cpuid_xlevel2;
2282 } else if (index < 0xC0000000) {
2283 index = env->cpuid_xlevel;
2285 } else {
2286 /* Intel documentation states that invalid EAX input will
2287 * return the same information as EAX=cpuid_level
2288 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2290 index = env->cpuid_level;
2293 } else {
2294 if (index > env->cpuid_level)
2295 index = env->cpuid_level;
2298 switch(index) {
2299 case 0:
2300 *eax = env->cpuid_level;
2301 *ebx = env->cpuid_vendor1;
2302 *edx = env->cpuid_vendor2;
2303 *ecx = env->cpuid_vendor3;
2304 break;
2305 case 1:
2306 *eax = env->cpuid_version;
2307 *ebx = (cpu->apic_id << 24) |
2308 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2309 *ecx = env->features[FEAT_1_ECX];
2310 *edx = env->features[FEAT_1_EDX];
2311 if (cs->nr_cores * cs->nr_threads > 1) {
2312 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2313 *edx |= 1 << 28; /* HTT bit */
2315 break;
2316 case 2:
2317 /* cache info: needed for Pentium Pro compatibility */
2318 if (cpu->cache_info_passthrough) {
2319 host_cpuid(index, 0, eax, ebx, ecx, edx);
2320 break;
2322 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2323 *ebx = 0;
2324 *ecx = 0;
2325 *edx = (L1D_DESCRIPTOR << 16) | \
2326 (L1I_DESCRIPTOR << 8) | \
2327 (L2_DESCRIPTOR);
2328 break;
2329 case 4:
2330 /* cache info: needed for Core compatibility */
2331 if (cpu->cache_info_passthrough) {
2332 host_cpuid(index, count, eax, ebx, ecx, edx);
2333 *eax &= ~0xFC000000;
2334 } else {
2335 *eax = 0;
2336 switch (count) {
2337 case 0: /* L1 dcache info */
2338 *eax |= CPUID_4_TYPE_DCACHE | \
2339 CPUID_4_LEVEL(1) | \
2340 CPUID_4_SELF_INIT_LEVEL;
2341 *ebx = (L1D_LINE_SIZE - 1) | \
2342 ((L1D_PARTITIONS - 1) << 12) | \
2343 ((L1D_ASSOCIATIVITY - 1) << 22);
2344 *ecx = L1D_SETS - 1;
2345 *edx = CPUID_4_NO_INVD_SHARING;
2346 break;
2347 case 1: /* L1 icache info */
2348 *eax |= CPUID_4_TYPE_ICACHE | \
2349 CPUID_4_LEVEL(1) | \
2350 CPUID_4_SELF_INIT_LEVEL;
2351 *ebx = (L1I_LINE_SIZE - 1) | \
2352 ((L1I_PARTITIONS - 1) << 12) | \
2353 ((L1I_ASSOCIATIVITY - 1) << 22);
2354 *ecx = L1I_SETS - 1;
2355 *edx = CPUID_4_NO_INVD_SHARING;
2356 break;
2357 case 2: /* L2 cache info */
2358 *eax |= CPUID_4_TYPE_UNIFIED | \
2359 CPUID_4_LEVEL(2) | \
2360 CPUID_4_SELF_INIT_LEVEL;
2361 if (cs->nr_threads > 1) {
2362 *eax |= (cs->nr_threads - 1) << 14;
2364 *ebx = (L2_LINE_SIZE - 1) | \
2365 ((L2_PARTITIONS - 1) << 12) | \
2366 ((L2_ASSOCIATIVITY - 1) << 22);
2367 *ecx = L2_SETS - 1;
2368 *edx = CPUID_4_NO_INVD_SHARING;
2369 break;
2370 default: /* end of info */
2371 *eax = 0;
2372 *ebx = 0;
2373 *ecx = 0;
2374 *edx = 0;
2375 break;
2379 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2380 if ((*eax & 31) && cs->nr_cores > 1) {
2381 *eax |= (cs->nr_cores - 1) << 26;
2383 break;
2384 case 5:
2385 /* mwait info: needed for Core compatibility */
2386 *eax = 0; /* Smallest monitor-line size in bytes */
2387 *ebx = 0; /* Largest monitor-line size in bytes */
2388 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2389 *edx = 0;
2390 break;
2391 case 6:
2392 /* Thermal and Power Leaf */
2393 *eax = env->features[FEAT_6_EAX];
2394 *ebx = 0;
2395 *ecx = 0;
2396 *edx = 0;
2397 break;
2398 case 7:
2399 /* Structured Extended Feature Flags Enumeration Leaf */
2400 if (count == 0) {
2401 *eax = 0; /* Maximum ECX value for sub-leaves */
2402 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2403 *ecx = 0; /* Reserved */
2404 *edx = 0; /* Reserved */
2405 } else {
2406 *eax = 0;
2407 *ebx = 0;
2408 *ecx = 0;
2409 *edx = 0;
2411 break;
2412 case 9:
2413 /* Direct Cache Access Information Leaf */
2414 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2415 *ebx = 0;
2416 *ecx = 0;
2417 *edx = 0;
2418 break;
2419 case 0xA:
2420 /* Architectural Performance Monitoring Leaf */
2421 if (kvm_enabled() && cpu->enable_pmu) {
2422 KVMState *s = cs->kvm_state;
2424 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2425 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2426 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2427 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2428 } else {
2429 *eax = 0;
2430 *ebx = 0;
2431 *ecx = 0;
2432 *edx = 0;
2434 break;
2435 case 0xD: {
2436 KVMState *s = cs->kvm_state;
2437 uint64_t kvm_mask;
2438 int i;
2440 /* Processor Extended State */
2441 *eax = 0;
2442 *ebx = 0;
2443 *ecx = 0;
2444 *edx = 0;
2445 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2446 break;
2448 kvm_mask =
2449 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2450 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2452 if (count == 0) {
2453 *ecx = 0x240;
2454 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2455 const ExtSaveArea *esa = &ext_save_areas[i];
2456 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2457 (kvm_mask & (1 << i)) != 0) {
2458 if (i < 32) {
2459 *eax |= 1 << i;
2460 } else {
2461 *edx |= 1 << (i - 32);
2463 *ecx = MAX(*ecx, esa->offset + esa->size);
2466 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2467 *ebx = *ecx;
2468 } else if (count == 1) {
2469 *eax = env->features[FEAT_XSAVE];
2470 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2471 const ExtSaveArea *esa = &ext_save_areas[count];
2472 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2473 (kvm_mask & (1 << count)) != 0) {
2474 *eax = esa->size;
2475 *ebx = esa->offset;
2478 break;
2480 case 0x80000000:
2481 *eax = env->cpuid_xlevel;
2482 *ebx = env->cpuid_vendor1;
2483 *edx = env->cpuid_vendor2;
2484 *ecx = env->cpuid_vendor3;
2485 break;
2486 case 0x80000001:
2487 *eax = env->cpuid_version;
2488 *ebx = 0;
2489 *ecx = env->features[FEAT_8000_0001_ECX];
2490 *edx = env->features[FEAT_8000_0001_EDX];
2492 /* The Linux kernel checks for the CMPLegacy bit and
2493 * discards multiple thread information if it is set.
2494 * So dont set it here for Intel to make Linux guests happy.
2496 if (cs->nr_cores * cs->nr_threads > 1) {
2497 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2498 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2499 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2500 *ecx |= 1 << 1; /* CmpLegacy bit */
2503 break;
2504 case 0x80000002:
2505 case 0x80000003:
2506 case 0x80000004:
2507 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2508 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2509 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2510 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2511 break;
2512 case 0x80000005:
2513 /* cache info (L1 cache) */
2514 if (cpu->cache_info_passthrough) {
2515 host_cpuid(index, 0, eax, ebx, ecx, edx);
2516 break;
2518 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2519 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2520 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2521 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2522 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2523 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2524 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2525 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2526 break;
2527 case 0x80000006:
2528 /* cache info (L2 cache) */
2529 if (cpu->cache_info_passthrough) {
2530 host_cpuid(index, 0, eax, ebx, ecx, edx);
2531 break;
2533 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2534 (L2_DTLB_2M_ENTRIES << 16) | \
2535 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2536 (L2_ITLB_2M_ENTRIES);
2537 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2538 (L2_DTLB_4K_ENTRIES << 16) | \
2539 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2540 (L2_ITLB_4K_ENTRIES);
2541 *ecx = (L2_SIZE_KB_AMD << 16) | \
2542 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2543 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2544 *edx = ((L3_SIZE_KB/512) << 18) | \
2545 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2546 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2547 break;
2548 case 0x80000007:
2549 *eax = 0;
2550 *ebx = 0;
2551 *ecx = 0;
2552 *edx = env->features[FEAT_8000_0007_EDX];
2553 break;
2554 case 0x80000008:
2555 /* virtual & phys address size in low 2 bytes. */
2556 /* XXX: This value must match the one used in the MMU code. */
2557 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2558 /* 64 bit processor */
2559 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2560 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2561 } else {
2562 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2563 *eax = 0x00000024; /* 36 bits physical */
2564 } else {
2565 *eax = 0x00000020; /* 32 bits physical */
2568 *ebx = 0;
2569 *ecx = 0;
2570 *edx = 0;
2571 if (cs->nr_cores * cs->nr_threads > 1) {
2572 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2574 break;
2575 case 0x8000000A:
2576 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2577 *eax = 0x00000001; /* SVM Revision */
2578 *ebx = 0x00000010; /* nr of ASIDs */
2579 *ecx = 0;
2580 *edx = env->features[FEAT_SVM]; /* optional features */
2581 } else {
2582 *eax = 0;
2583 *ebx = 0;
2584 *ecx = 0;
2585 *edx = 0;
2587 break;
2588 case 0xC0000000:
2589 *eax = env->cpuid_xlevel2;
2590 *ebx = 0;
2591 *ecx = 0;
2592 *edx = 0;
2593 break;
2594 case 0xC0000001:
2595 /* Support for VIA CPU's CPUID instruction */
2596 *eax = env->cpuid_version;
2597 *ebx = 0;
2598 *ecx = 0;
2599 *edx = env->features[FEAT_C000_0001_EDX];
2600 break;
2601 case 0xC0000002:
2602 case 0xC0000003:
2603 case 0xC0000004:
2604 /* Reserved for the future, and now filled with zero */
2605 *eax = 0;
2606 *ebx = 0;
2607 *ecx = 0;
2608 *edx = 0;
2609 break;
2610 default:
2611 /* reserved values: zero */
2612 *eax = 0;
2613 *ebx = 0;
2614 *ecx = 0;
2615 *edx = 0;
2616 break;
2620 /* CPUClass::reset() */
2621 static void x86_cpu_reset(CPUState *s)
2623 X86CPU *cpu = X86_CPU(s);
2624 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2625 CPUX86State *env = &cpu->env;
2626 int i;
2628 xcc->parent_reset(s);
2630 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2632 tlb_flush(s, 1);
2634 env->old_exception = -1;
2636 /* init to reset state */
2638 #ifdef CONFIG_SOFTMMU
2639 env->hflags |= HF_SOFTMMU_MASK;
2640 #endif
2641 env->hflags2 |= HF2_GIF_MASK;
2643 cpu_x86_update_cr0(env, 0x60000010);
2644 env->a20_mask = ~0x0;
2645 env->smbase = 0x30000;
2647 env->idt.limit = 0xffff;
2648 env->gdt.limit = 0xffff;
2649 env->ldt.limit = 0xffff;
2650 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2651 env->tr.limit = 0xffff;
2652 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2654 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2655 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2656 DESC_R_MASK | DESC_A_MASK);
2657 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2658 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2659 DESC_A_MASK);
2660 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2661 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2662 DESC_A_MASK);
2663 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2664 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2665 DESC_A_MASK);
2666 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2667 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2668 DESC_A_MASK);
2669 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2670 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2671 DESC_A_MASK);
2673 env->eip = 0xfff0;
2674 env->regs[R_EDX] = env->cpuid_version;
2676 env->eflags = 0x2;
2678 /* FPU init */
2679 for (i = 0; i < 8; i++) {
2680 env->fptags[i] = 1;
2682 cpu_set_fpuc(env, 0x37f);
2684 env->mxcsr = 0x1f80;
2685 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2687 env->pat = 0x0007040600070406ULL;
2688 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2690 memset(env->dr, 0, sizeof(env->dr));
2691 env->dr[6] = DR6_FIXED_1;
2692 env->dr[7] = DR7_FIXED_1;
2693 cpu_breakpoint_remove_all(s, BP_CPU);
2694 cpu_watchpoint_remove_all(s, BP_CPU);
2696 env->xcr0 = 1;
2699 * SDM 11.11.5 requires:
2700 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2701 * - IA32_MTRR_PHYSMASKn.V = 0
2702 * All other bits are undefined. For simplification, zero it all.
2704 env->mtrr_deftype = 0;
2705 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2706 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2708 #if !defined(CONFIG_USER_ONLY)
2709 /* We hard-wire the BSP to the first CPU. */
2710 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2712 s->halted = !cpu_is_bsp(cpu);
2714 if (kvm_enabled()) {
2715 kvm_arch_reset_vcpu(cpu);
2717 #endif
2720 #ifndef CONFIG_USER_ONLY
2721 bool cpu_is_bsp(X86CPU *cpu)
2723 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2726 /* TODO: remove me, when reset over QOM tree is implemented */
2727 static void x86_cpu_machine_reset_cb(void *opaque)
2729 X86CPU *cpu = opaque;
2730 cpu_reset(CPU(cpu));
2732 #endif
2734 static void mce_init(X86CPU *cpu)
2736 CPUX86State *cenv = &cpu->env;
2737 unsigned int bank;
2739 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2740 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2741 (CPUID_MCE | CPUID_MCA)) {
2742 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2743 cenv->mcg_ctl = ~(uint64_t)0;
2744 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2745 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2750 #ifndef CONFIG_USER_ONLY
2751 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2753 DeviceState *dev = DEVICE(cpu);
2754 APICCommonState *apic;
2755 const char *apic_type = "apic";
2757 if (kvm_irqchip_in_kernel()) {
2758 apic_type = "kvm-apic";
2759 } else if (xen_enabled()) {
2760 apic_type = "xen-apic";
2763 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2764 if (cpu->apic_state == NULL) {
2765 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2766 return;
2769 object_property_add_child(OBJECT(cpu), "apic",
2770 OBJECT(cpu->apic_state), NULL);
2771 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2772 /* TODO: convert to link<> */
2773 apic = APIC_COMMON(cpu->apic_state);
2774 apic->cpu = cpu;
2777 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2779 if (cpu->apic_state == NULL) {
2780 return;
2782 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2783 errp);
2786 static void x86_cpu_machine_done(Notifier *n, void *unused)
2788 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2789 MemoryRegion *smram =
2790 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2792 if (smram) {
2793 cpu->smram = g_new(MemoryRegion, 1);
2794 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2795 smram, 0, 1ull << 32);
2796 memory_region_set_enabled(cpu->smram, false);
2797 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2800 #else
2801 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2804 #endif
2807 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2808 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2809 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2810 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2811 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2812 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2813 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2815 CPUState *cs = CPU(dev);
2816 X86CPU *cpu = X86_CPU(dev);
2817 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2818 CPUX86State *env = &cpu->env;
2819 Error *local_err = NULL;
2820 static bool ht_warned;
2822 if (cpu->apic_id < 0) {
2823 error_setg(errp, "apic-id property was not initialized properly");
2824 return;
2827 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2828 env->cpuid_level = 7;
2831 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2832 * CPUID[1].EDX.
2834 if (IS_AMD_CPU(env)) {
2835 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2836 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2837 & CPUID_EXT2_AMD_ALIASES);
2841 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2842 error_setg(&local_err,
2843 kvm_enabled() ?
2844 "Host doesn't support requested features" :
2845 "TCG doesn't support requested features");
2846 goto out;
2849 #ifndef CONFIG_USER_ONLY
2850 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2852 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2853 x86_cpu_apic_create(cpu, &local_err);
2854 if (local_err != NULL) {
2855 goto out;
2858 #endif
2860 mce_init(cpu);
2862 #ifndef CONFIG_USER_ONLY
2863 if (tcg_enabled()) {
2864 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2865 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2866 cs->as = g_new(AddressSpace, 1);
2868 /* Outer container... */
2869 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2870 memory_region_set_enabled(cpu->cpu_as_root, true);
2872 /* ... with two regions inside: normal system memory with low
2873 * priority, and...
2875 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2876 get_system_memory(), 0, ~0ull);
2877 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2878 memory_region_set_enabled(cpu->cpu_as_mem, true);
2879 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2881 /* ... SMRAM with higher priority, linked from /machine/smram. */
2882 cpu->machine_done.notify = x86_cpu_machine_done;
2883 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2885 #endif
2887 qemu_init_vcpu(cs);
2889 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2890 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2891 * based on inputs (sockets,cores,threads), it is still better to gives
2892 * users a warning.
2894 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2895 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2897 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2898 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2899 " -smp options properly.");
2900 ht_warned = true;
2903 x86_cpu_apic_realize(cpu, &local_err);
2904 if (local_err != NULL) {
2905 goto out;
2907 cpu_reset(cs);
2909 xcc->parent_realize(dev, &local_err);
2911 out:
2912 if (local_err != NULL) {
2913 error_propagate(errp, local_err);
2914 return;
2918 typedef struct BitProperty {
2919 uint32_t *ptr;
2920 uint32_t mask;
2921 } BitProperty;
2923 static void x86_cpu_get_bit_prop(Object *obj,
2924 struct Visitor *v,
2925 void *opaque,
2926 const char *name,
2927 Error **errp)
2929 BitProperty *fp = opaque;
2930 bool value = (*fp->ptr & fp->mask) == fp->mask;
2931 visit_type_bool(v, &value, name, errp);
2934 static void x86_cpu_set_bit_prop(Object *obj,
2935 struct Visitor *v,
2936 void *opaque,
2937 const char *name,
2938 Error **errp)
2940 DeviceState *dev = DEVICE(obj);
2941 BitProperty *fp = opaque;
2942 Error *local_err = NULL;
2943 bool value;
2945 if (dev->realized) {
2946 qdev_prop_set_after_realize(dev, name, errp);
2947 return;
2950 visit_type_bool(v, &value, name, &local_err);
2951 if (local_err) {
2952 error_propagate(errp, local_err);
2953 return;
2956 if (value) {
2957 *fp->ptr |= fp->mask;
2958 } else {
2959 *fp->ptr &= ~fp->mask;
2963 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2964 void *opaque)
2966 BitProperty *prop = opaque;
2967 g_free(prop);
2970 /* Register a boolean property to get/set a single bit in a uint32_t field.
2972 * The same property name can be registered multiple times to make it affect
2973 * multiple bits in the same FeatureWord. In that case, the getter will return
2974 * true only if all bits are set.
2976 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2977 const char *prop_name,
2978 uint32_t *field,
2979 int bitnr)
2981 BitProperty *fp;
2982 ObjectProperty *op;
2983 uint32_t mask = (1UL << bitnr);
2985 op = object_property_find(OBJECT(cpu), prop_name, NULL);
2986 if (op) {
2987 fp = op->opaque;
2988 assert(fp->ptr == field);
2989 fp->mask |= mask;
2990 } else {
2991 fp = g_new0(BitProperty, 1);
2992 fp->ptr = field;
2993 fp->mask = mask;
2994 object_property_add(OBJECT(cpu), prop_name, "bool",
2995 x86_cpu_get_bit_prop,
2996 x86_cpu_set_bit_prop,
2997 x86_cpu_release_bit_prop, fp, &error_abort);
3001 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3002 FeatureWord w,
3003 int bitnr)
3005 Object *obj = OBJECT(cpu);
3006 int i;
3007 char **names;
3008 FeatureWordInfo *fi = &feature_word_info[w];
3010 if (!fi->feat_names) {
3011 return;
3013 if (!fi->feat_names[bitnr]) {
3014 return;
3017 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3019 feat2prop(names[0]);
3020 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3022 for (i = 1; names[i]; i++) {
3023 feat2prop(names[i]);
3024 object_property_add_alias(obj, names[i], obj, g_strdup(names[0]),
3025 &error_abort);
3028 g_strfreev(names);
3031 static void x86_cpu_initfn(Object *obj)
3033 CPUState *cs = CPU(obj);
3034 X86CPU *cpu = X86_CPU(obj);
3035 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3036 CPUX86State *env = &cpu->env;
3037 FeatureWord w;
3038 static int inited;
3040 cs->env_ptr = env;
3041 cpu_exec_init(cs, &error_abort);
3043 object_property_add(obj, "family", "int",
3044 x86_cpuid_version_get_family,
3045 x86_cpuid_version_set_family, NULL, NULL, NULL);
3046 object_property_add(obj, "model", "int",
3047 x86_cpuid_version_get_model,
3048 x86_cpuid_version_set_model, NULL, NULL, NULL);
3049 object_property_add(obj, "stepping", "int",
3050 x86_cpuid_version_get_stepping,
3051 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3052 object_property_add_str(obj, "vendor",
3053 x86_cpuid_get_vendor,
3054 x86_cpuid_set_vendor, NULL);
3055 object_property_add_str(obj, "model-id",
3056 x86_cpuid_get_model_id,
3057 x86_cpuid_set_model_id, NULL);
3058 object_property_add(obj, "tsc-frequency", "int",
3059 x86_cpuid_get_tsc_freq,
3060 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3061 object_property_add(obj, "apic-id", "int",
3062 x86_cpuid_get_apic_id,
3063 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3064 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3065 x86_cpu_get_feature_words,
3066 NULL, NULL, (void *)env->features, NULL);
3067 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3068 x86_cpu_get_feature_words,
3069 NULL, NULL, (void *)cpu->filtered_features, NULL);
3071 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3073 #ifndef CONFIG_USER_ONLY
3074 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3075 cpu->apic_id = -1;
3076 #endif
3078 for (w = 0; w < FEATURE_WORDS; w++) {
3079 int bitnr;
3081 for (bitnr = 0; bitnr < 32; bitnr++) {
3082 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3086 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3088 /* init various static tables used in TCG mode */
3089 if (tcg_enabled() && !inited) {
3090 inited = 1;
3091 optimize_flags_init();
3095 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3097 X86CPU *cpu = X86_CPU(cs);
3099 return cpu->apic_id;
3102 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3104 X86CPU *cpu = X86_CPU(cs);
3106 return cpu->env.cr[0] & CR0_PG_MASK;
3109 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3111 X86CPU *cpu = X86_CPU(cs);
3113 cpu->env.eip = value;
3116 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3118 X86CPU *cpu = X86_CPU(cs);
3120 cpu->env.eip = tb->pc - tb->cs_base;
3123 static bool x86_cpu_has_work(CPUState *cs)
3125 X86CPU *cpu = X86_CPU(cs);
3126 CPUX86State *env = &cpu->env;
3128 #if !defined(CONFIG_USER_ONLY)
3129 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
3130 apic_poll_irq(cpu->apic_state);
3131 cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
3133 #endif
3135 return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
3136 (env->eflags & IF_MASK)) ||
3137 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3138 CPU_INTERRUPT_INIT |
3139 CPU_INTERRUPT_SIPI |
3140 CPU_INTERRUPT_MCE)) ||
3141 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3142 !(env->hflags & HF_SMM_MASK));
3145 static Property x86_cpu_properties[] = {
3146 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3147 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3148 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3149 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3150 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3151 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
3152 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3153 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3154 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3155 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3156 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3157 DEFINE_PROP_END_OF_LIST()
3160 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3162 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3163 CPUClass *cc = CPU_CLASS(oc);
3164 DeviceClass *dc = DEVICE_CLASS(oc);
3166 xcc->parent_realize = dc->realize;
3167 dc->realize = x86_cpu_realizefn;
3168 dc->bus_type = TYPE_ICC_BUS;
3169 dc->props = x86_cpu_properties;
3171 xcc->parent_reset = cc->reset;
3172 cc->reset = x86_cpu_reset;
3173 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3175 cc->class_by_name = x86_cpu_class_by_name;
3176 cc->parse_features = x86_cpu_parse_featurestr;
3177 cc->has_work = x86_cpu_has_work;
3178 cc->do_interrupt = x86_cpu_do_interrupt;
3179 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3180 cc->dump_state = x86_cpu_dump_state;
3181 cc->set_pc = x86_cpu_set_pc;
3182 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3183 cc->gdb_read_register = x86_cpu_gdb_read_register;
3184 cc->gdb_write_register = x86_cpu_gdb_write_register;
3185 cc->get_arch_id = x86_cpu_get_arch_id;
3186 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3187 #ifdef CONFIG_USER_ONLY
3188 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3189 #else
3190 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3191 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3192 cc->write_elf64_note = x86_cpu_write_elf64_note;
3193 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3194 cc->write_elf32_note = x86_cpu_write_elf32_note;
3195 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3196 cc->vmsd = &vmstate_x86_cpu;
3197 #endif
3198 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3199 #ifndef CONFIG_USER_ONLY
3200 cc->debug_excp_handler = breakpoint_handler;
3201 #endif
3202 cc->cpu_exec_enter = x86_cpu_exec_enter;
3203 cc->cpu_exec_exit = x86_cpu_exec_exit;
3206 static const TypeInfo x86_cpu_type_info = {
3207 .name = TYPE_X86_CPU,
3208 .parent = TYPE_CPU,
3209 .instance_size = sizeof(X86CPU),
3210 .instance_init = x86_cpu_initfn,
3211 .abstract = true,
3212 .class_size = sizeof(X86CPUClass),
3213 .class_init = x86_cpu_common_class_init,
3216 static void x86_cpu_register_types(void)
3218 int i;
3220 type_register_static(&x86_cpu_type_info);
3221 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3222 x86_register_cpudef_type(&builtin_x86_defs[i]);
3224 #ifdef CONFIG_KVM
3225 type_register_static(&host_x86_cpu_type_info);
3226 #endif
3229 type_init(x86_cpu_register_types)