virtio: optimize virtio_access_is_big_endian() for little-endian targets
[qemu.git] / target-i386 / cpu.c
blob3fa14bf171c3128ce9b773ef67cb01ce5d0c4664
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_7_0_ecx_feature_name[] = {
267 NULL, NULL, NULL, "pku",
268 "ospke", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 static const char *cpuid_apm_edx_feature_name[] = {
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 "invtsc", NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
288 static const char *cpuid_xsave_feature_name[] = {
289 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
299 static const char *cpuid_6_feature_name[] = {
300 NULL, NULL, "arat", NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
310 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
311 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
312 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
313 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_FXSR)
316 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
317 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
318 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
319 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
320 CPUID_PAE | CPUID_SEP | CPUID_APIC)
322 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
323 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
324 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
325 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
326 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
327 /* partly implemented:
328 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
329 /* missing:
330 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
331 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
332 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
333 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
334 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
335 /* missing:
336 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
337 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
338 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
339 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
340 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
341 CPUID_EXT_RDRAND */
343 #ifdef TARGET_X86_64
344 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
345 #else
346 #define TCG_EXT2_X86_64_FEATURES 0
347 #endif
349 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
350 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
351 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
352 TCG_EXT2_X86_64_FEATURES)
353 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
354 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
355 #define TCG_EXT4_FEATURES 0
356 #define TCG_SVM_FEATURES 0
357 #define TCG_KVM_FEATURES 0
358 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
359 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
360 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
361 CPUID_7_0_EBX_CLWB)
362 /* missing:
363 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
364 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
365 CPUID_7_0_EBX_RDSEED */
366 #define TCG_7_0_ECX_FEATURES 0
367 #define TCG_APM_FEATURES 0
368 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
371 typedef struct FeatureWordInfo {
372 const char **feat_names;
373 uint32_t cpuid_eax; /* Input EAX for CPUID */
374 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
375 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
376 int cpuid_reg; /* output register (R_* constant) */
377 uint32_t tcg_features; /* Feature flags supported by TCG */
378 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
379 } FeatureWordInfo;
381 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
382 [FEAT_1_EDX] = {
383 .feat_names = feature_name,
384 .cpuid_eax = 1, .cpuid_reg = R_EDX,
385 .tcg_features = TCG_FEATURES,
387 [FEAT_1_ECX] = {
388 .feat_names = ext_feature_name,
389 .cpuid_eax = 1, .cpuid_reg = R_ECX,
390 .tcg_features = TCG_EXT_FEATURES,
392 [FEAT_8000_0001_EDX] = {
393 .feat_names = ext2_feature_name,
394 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
395 .tcg_features = TCG_EXT2_FEATURES,
397 [FEAT_8000_0001_ECX] = {
398 .feat_names = ext3_feature_name,
399 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
400 .tcg_features = TCG_EXT3_FEATURES,
402 [FEAT_C000_0001_EDX] = {
403 .feat_names = ext4_feature_name,
404 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
405 .tcg_features = TCG_EXT4_FEATURES,
407 [FEAT_KVM] = {
408 .feat_names = kvm_feature_name,
409 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
410 .tcg_features = TCG_KVM_FEATURES,
412 [FEAT_SVM] = {
413 .feat_names = svm_feature_name,
414 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
415 .tcg_features = TCG_SVM_FEATURES,
417 [FEAT_7_0_EBX] = {
418 .feat_names = cpuid_7_0_ebx_feature_name,
419 .cpuid_eax = 7,
420 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
421 .cpuid_reg = R_EBX,
422 .tcg_features = TCG_7_0_EBX_FEATURES,
424 [FEAT_7_0_ECX] = {
425 .feat_names = cpuid_7_0_ecx_feature_name,
426 .cpuid_eax = 7,
427 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
428 .cpuid_reg = R_ECX,
429 .tcg_features = TCG_7_0_ECX_FEATURES,
431 [FEAT_8000_0007_EDX] = {
432 .feat_names = cpuid_apm_edx_feature_name,
433 .cpuid_eax = 0x80000007,
434 .cpuid_reg = R_EDX,
435 .tcg_features = TCG_APM_FEATURES,
436 .unmigratable_flags = CPUID_APM_INVTSC,
438 [FEAT_XSAVE] = {
439 .feat_names = cpuid_xsave_feature_name,
440 .cpuid_eax = 0xd,
441 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
442 .cpuid_reg = R_EAX,
443 .tcg_features = 0,
445 [FEAT_6_EAX] = {
446 .feat_names = cpuid_6_feature_name,
447 .cpuid_eax = 6, .cpuid_reg = R_EAX,
448 .tcg_features = TCG_6_EAX_FEATURES,
452 typedef struct X86RegisterInfo32 {
453 /* Name of register */
454 const char *name;
455 /* QAPI enum value register */
456 X86CPURegister32 qapi_enum;
457 } X86RegisterInfo32;
459 #define REGISTER(reg) \
460 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
461 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
462 REGISTER(EAX),
463 REGISTER(ECX),
464 REGISTER(EDX),
465 REGISTER(EBX),
466 REGISTER(ESP),
467 REGISTER(EBP),
468 REGISTER(ESI),
469 REGISTER(EDI),
471 #undef REGISTER
473 typedef struct ExtSaveArea {
474 uint32_t feature, bits;
475 uint32_t offset, size;
476 } ExtSaveArea;
478 static const ExtSaveArea ext_save_areas[] = {
479 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
480 .offset = 0x240, .size = 0x100 },
481 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
482 .offset = 0x3c0, .size = 0x40 },
483 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
484 .offset = 0x400, .size = 0x40 },
485 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
486 .offset = 0x440, .size = 0x40 },
487 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
488 .offset = 0x480, .size = 0x200 },
489 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
490 .offset = 0x680, .size = 0x400 },
491 [9] = { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
492 .offset = 0xA80, .size = 0x8 },
495 const char *get_register_name_32(unsigned int reg)
497 if (reg >= CPU_NB_REGS32) {
498 return NULL;
500 return x86_reg_info_32[reg].name;
504 * Returns the set of feature flags that are supported and migratable by
505 * QEMU, for a given FeatureWord.
507 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
509 FeatureWordInfo *wi = &feature_word_info[w];
510 uint32_t r = 0;
511 int i;
513 for (i = 0; i < 32; i++) {
514 uint32_t f = 1U << i;
515 /* If the feature name is unknown, it is not supported by QEMU yet */
516 if (!wi->feat_names[i]) {
517 continue;
519 /* Skip features known to QEMU, but explicitly marked as unmigratable */
520 if (wi->unmigratable_flags & f) {
521 continue;
523 r |= f;
525 return r;
528 void host_cpuid(uint32_t function, uint32_t count,
529 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
531 uint32_t vec[4];
533 #ifdef __x86_64__
534 asm volatile("cpuid"
535 : "=a"(vec[0]), "=b"(vec[1]),
536 "=c"(vec[2]), "=d"(vec[3])
537 : "0"(function), "c"(count) : "cc");
538 #elif defined(__i386__)
539 asm volatile("pusha \n\t"
540 "cpuid \n\t"
541 "mov %%eax, 0(%2) \n\t"
542 "mov %%ebx, 4(%2) \n\t"
543 "mov %%ecx, 8(%2) \n\t"
544 "mov %%edx, 12(%2) \n\t"
545 "popa"
546 : : "a"(function), "c"(count), "S"(vec)
547 : "memory", "cc");
548 #else
549 abort();
550 #endif
552 if (eax)
553 *eax = vec[0];
554 if (ebx)
555 *ebx = vec[1];
556 if (ecx)
557 *ecx = vec[2];
558 if (edx)
559 *edx = vec[3];
562 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
564 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
565 * a substring. ex if !NULL points to the first char after a substring,
566 * otherwise the string is assumed to sized by a terminating nul.
567 * Return lexical ordering of *s1:*s2.
569 static int sstrcmp(const char *s1, const char *e1,
570 const char *s2, const char *e2)
572 for (;;) {
573 if (!*s1 || !*s2 || *s1 != *s2)
574 return (*s1 - *s2);
575 ++s1, ++s2;
576 if (s1 == e1 && s2 == e2)
577 return (0);
578 else if (s1 == e1)
579 return (*s2);
580 else if (s2 == e2)
581 return (*s1);
585 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
586 * '|' delimited (possibly empty) strings in which case search for a match
587 * within the alternatives proceeds left to right. Return 0 for success,
588 * non-zero otherwise.
590 static int altcmp(const char *s, const char *e, const char *altstr)
592 const char *p, *q;
594 for (q = p = altstr; ; ) {
595 while (*p && *p != '|')
596 ++p;
597 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
598 return (0);
599 if (!*p)
600 return (1);
601 else
602 q = ++p;
606 /* search featureset for flag *[s..e), if found set corresponding bit in
607 * *pval and return true, otherwise return false
609 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
610 const char **featureset)
612 uint32_t mask;
613 const char **ppc;
614 bool found = false;
616 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
617 if (*ppc && !altcmp(s, e, *ppc)) {
618 *pval |= mask;
619 found = true;
622 return found;
625 static void add_flagname_to_bitmaps(const char *flagname,
626 FeatureWordArray words,
627 Error **errp)
629 FeatureWord w;
630 for (w = 0; w < FEATURE_WORDS; w++) {
631 FeatureWordInfo *wi = &feature_word_info[w];
632 if (wi->feat_names &&
633 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
634 break;
637 if (w == FEATURE_WORDS) {
638 error_setg(errp, "CPU feature %s not found", flagname);
642 /* CPU class name definitions: */
644 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
645 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
647 /* Return type name for a given CPU model name
648 * Caller is responsible for freeing the returned string.
650 static char *x86_cpu_type_name(const char *model_name)
652 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
655 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
657 ObjectClass *oc;
658 char *typename;
660 if (cpu_model == NULL) {
661 return NULL;
664 typename = x86_cpu_type_name(cpu_model);
665 oc = object_class_by_name(typename);
666 g_free(typename);
667 return oc;
670 struct X86CPUDefinition {
671 const char *name;
672 uint32_t level;
673 uint32_t xlevel;
674 uint32_t xlevel2;
675 /* vendor is zero-terminated, 12 character ASCII string */
676 char vendor[CPUID_VENDOR_SZ + 1];
677 int family;
678 int model;
679 int stepping;
680 FeatureWordArray features;
681 char model_id[48];
684 static X86CPUDefinition builtin_x86_defs[] = {
686 .name = "qemu64",
687 .level = 0xd,
688 .vendor = CPUID_VENDOR_AMD,
689 .family = 6,
690 .model = 6,
691 .stepping = 3,
692 .features[FEAT_1_EDX] =
693 PPRO_FEATURES |
694 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
695 CPUID_PSE36,
696 .features[FEAT_1_ECX] =
697 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
698 .features[FEAT_8000_0001_EDX] =
699 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
700 .features[FEAT_8000_0001_ECX] =
701 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
702 .xlevel = 0x8000000A,
705 .name = "phenom",
706 .level = 5,
707 .vendor = CPUID_VENDOR_AMD,
708 .family = 16,
709 .model = 2,
710 .stepping = 3,
711 /* Missing: CPUID_HT */
712 .features[FEAT_1_EDX] =
713 PPRO_FEATURES |
714 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
715 CPUID_PSE36 | CPUID_VME,
716 .features[FEAT_1_ECX] =
717 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
718 CPUID_EXT_POPCNT,
719 .features[FEAT_8000_0001_EDX] =
720 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
721 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
722 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
723 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
724 CPUID_EXT3_CR8LEG,
725 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
726 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
727 .features[FEAT_8000_0001_ECX] =
728 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
729 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
730 /* Missing: CPUID_SVM_LBRV */
731 .features[FEAT_SVM] =
732 CPUID_SVM_NPT,
733 .xlevel = 0x8000001A,
734 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
737 .name = "core2duo",
738 .level = 10,
739 .vendor = CPUID_VENDOR_INTEL,
740 .family = 6,
741 .model = 15,
742 .stepping = 11,
743 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
744 .features[FEAT_1_EDX] =
745 PPRO_FEATURES |
746 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
747 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
748 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
749 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
750 .features[FEAT_1_ECX] =
751 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
752 CPUID_EXT_CX16,
753 .features[FEAT_8000_0001_EDX] =
754 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
755 .features[FEAT_8000_0001_ECX] =
756 CPUID_EXT3_LAHF_LM,
757 .xlevel = 0x80000008,
758 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
761 .name = "kvm64",
762 .level = 0xd,
763 .vendor = CPUID_VENDOR_INTEL,
764 .family = 15,
765 .model = 6,
766 .stepping = 1,
767 /* Missing: CPUID_HT */
768 .features[FEAT_1_EDX] =
769 PPRO_FEATURES | CPUID_VME |
770 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
771 CPUID_PSE36,
772 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
773 .features[FEAT_1_ECX] =
774 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
775 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
776 .features[FEAT_8000_0001_EDX] =
777 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
778 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
779 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
780 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
781 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
782 .features[FEAT_8000_0001_ECX] =
784 .xlevel = 0x80000008,
785 .model_id = "Common KVM processor"
788 .name = "qemu32",
789 .level = 4,
790 .vendor = CPUID_VENDOR_INTEL,
791 .family = 6,
792 .model = 6,
793 .stepping = 3,
794 .features[FEAT_1_EDX] =
795 PPRO_FEATURES,
796 .features[FEAT_1_ECX] =
797 CPUID_EXT_SSE3,
798 .xlevel = 0x80000004,
801 .name = "kvm32",
802 .level = 5,
803 .vendor = CPUID_VENDOR_INTEL,
804 .family = 15,
805 .model = 6,
806 .stepping = 1,
807 .features[FEAT_1_EDX] =
808 PPRO_FEATURES | CPUID_VME |
809 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
810 .features[FEAT_1_ECX] =
811 CPUID_EXT_SSE3,
812 .features[FEAT_8000_0001_ECX] =
814 .xlevel = 0x80000008,
815 .model_id = "Common 32-bit KVM processor"
818 .name = "coreduo",
819 .level = 10,
820 .vendor = CPUID_VENDOR_INTEL,
821 .family = 6,
822 .model = 14,
823 .stepping = 8,
824 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
825 .features[FEAT_1_EDX] =
826 PPRO_FEATURES | CPUID_VME |
827 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
828 CPUID_SS,
829 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
830 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
831 .features[FEAT_1_ECX] =
832 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
833 .features[FEAT_8000_0001_EDX] =
834 CPUID_EXT2_NX,
835 .xlevel = 0x80000008,
836 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
839 .name = "486",
840 .level = 1,
841 .vendor = CPUID_VENDOR_INTEL,
842 .family = 4,
843 .model = 8,
844 .stepping = 0,
845 .features[FEAT_1_EDX] =
846 I486_FEATURES,
847 .xlevel = 0,
850 .name = "pentium",
851 .level = 1,
852 .vendor = CPUID_VENDOR_INTEL,
853 .family = 5,
854 .model = 4,
855 .stepping = 3,
856 .features[FEAT_1_EDX] =
857 PENTIUM_FEATURES,
858 .xlevel = 0,
861 .name = "pentium2",
862 .level = 2,
863 .vendor = CPUID_VENDOR_INTEL,
864 .family = 6,
865 .model = 5,
866 .stepping = 2,
867 .features[FEAT_1_EDX] =
868 PENTIUM2_FEATURES,
869 .xlevel = 0,
872 .name = "pentium3",
873 .level = 3,
874 .vendor = CPUID_VENDOR_INTEL,
875 .family = 6,
876 .model = 7,
877 .stepping = 3,
878 .features[FEAT_1_EDX] =
879 PENTIUM3_FEATURES,
880 .xlevel = 0,
883 .name = "athlon",
884 .level = 2,
885 .vendor = CPUID_VENDOR_AMD,
886 .family = 6,
887 .model = 2,
888 .stepping = 3,
889 .features[FEAT_1_EDX] =
890 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
891 CPUID_MCA,
892 .features[FEAT_8000_0001_EDX] =
893 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
894 .xlevel = 0x80000008,
897 .name = "n270",
898 .level = 10,
899 .vendor = CPUID_VENDOR_INTEL,
900 .family = 6,
901 .model = 28,
902 .stepping = 2,
903 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
904 .features[FEAT_1_EDX] =
905 PPRO_FEATURES |
906 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
907 CPUID_ACPI | CPUID_SS,
908 /* Some CPUs got no CPUID_SEP */
909 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
910 * CPUID_EXT_XTPR */
911 .features[FEAT_1_ECX] =
912 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
913 CPUID_EXT_MOVBE,
914 .features[FEAT_8000_0001_EDX] =
915 CPUID_EXT2_NX,
916 .features[FEAT_8000_0001_ECX] =
917 CPUID_EXT3_LAHF_LM,
918 .xlevel = 0x80000008,
919 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
922 .name = "Conroe",
923 .level = 10,
924 .vendor = CPUID_VENDOR_INTEL,
925 .family = 6,
926 .model = 15,
927 .stepping = 3,
928 .features[FEAT_1_EDX] =
929 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
930 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
931 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
932 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
933 CPUID_DE | CPUID_FP87,
934 .features[FEAT_1_ECX] =
935 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
936 .features[FEAT_8000_0001_EDX] =
937 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
938 .features[FEAT_8000_0001_ECX] =
939 CPUID_EXT3_LAHF_LM,
940 .xlevel = 0x80000008,
941 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
944 .name = "Penryn",
945 .level = 10,
946 .vendor = CPUID_VENDOR_INTEL,
947 .family = 6,
948 .model = 23,
949 .stepping = 3,
950 .features[FEAT_1_EDX] =
951 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
952 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
953 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
954 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
955 CPUID_DE | CPUID_FP87,
956 .features[FEAT_1_ECX] =
957 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
958 CPUID_EXT_SSE3,
959 .features[FEAT_8000_0001_EDX] =
960 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
961 .features[FEAT_8000_0001_ECX] =
962 CPUID_EXT3_LAHF_LM,
963 .xlevel = 0x80000008,
964 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
967 .name = "Nehalem",
968 .level = 11,
969 .vendor = CPUID_VENDOR_INTEL,
970 .family = 6,
971 .model = 26,
972 .stepping = 3,
973 .features[FEAT_1_EDX] =
974 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
975 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
976 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
977 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
978 CPUID_DE | CPUID_FP87,
979 .features[FEAT_1_ECX] =
980 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
981 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
982 .features[FEAT_8000_0001_EDX] =
983 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
984 .features[FEAT_8000_0001_ECX] =
985 CPUID_EXT3_LAHF_LM,
986 .xlevel = 0x80000008,
987 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
990 .name = "Westmere",
991 .level = 11,
992 .vendor = CPUID_VENDOR_INTEL,
993 .family = 6,
994 .model = 44,
995 .stepping = 1,
996 .features[FEAT_1_EDX] =
997 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
998 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
999 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1000 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1001 CPUID_DE | CPUID_FP87,
1002 .features[FEAT_1_ECX] =
1003 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1004 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1005 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1006 .features[FEAT_8000_0001_EDX] =
1007 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1008 .features[FEAT_8000_0001_ECX] =
1009 CPUID_EXT3_LAHF_LM,
1010 .features[FEAT_6_EAX] =
1011 CPUID_6_EAX_ARAT,
1012 .xlevel = 0x80000008,
1013 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1016 .name = "SandyBridge",
1017 .level = 0xd,
1018 .vendor = CPUID_VENDOR_INTEL,
1019 .family = 6,
1020 .model = 42,
1021 .stepping = 1,
1022 .features[FEAT_1_EDX] =
1023 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1024 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1025 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1026 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1027 CPUID_DE | CPUID_FP87,
1028 .features[FEAT_1_ECX] =
1029 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1030 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1031 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1032 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1033 CPUID_EXT_SSE3,
1034 .features[FEAT_8000_0001_EDX] =
1035 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1036 CPUID_EXT2_SYSCALL,
1037 .features[FEAT_8000_0001_ECX] =
1038 CPUID_EXT3_LAHF_LM,
1039 .features[FEAT_XSAVE] =
1040 CPUID_XSAVE_XSAVEOPT,
1041 .features[FEAT_6_EAX] =
1042 CPUID_6_EAX_ARAT,
1043 .xlevel = 0x80000008,
1044 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1047 .name = "IvyBridge",
1048 .level = 0xd,
1049 .vendor = CPUID_VENDOR_INTEL,
1050 .family = 6,
1051 .model = 58,
1052 .stepping = 9,
1053 .features[FEAT_1_EDX] =
1054 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1055 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1056 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1057 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1058 CPUID_DE | CPUID_FP87,
1059 .features[FEAT_1_ECX] =
1060 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1061 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1062 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1063 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1064 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1065 .features[FEAT_7_0_EBX] =
1066 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1067 CPUID_7_0_EBX_ERMS,
1068 .features[FEAT_8000_0001_EDX] =
1069 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1070 CPUID_EXT2_SYSCALL,
1071 .features[FEAT_8000_0001_ECX] =
1072 CPUID_EXT3_LAHF_LM,
1073 .features[FEAT_XSAVE] =
1074 CPUID_XSAVE_XSAVEOPT,
1075 .features[FEAT_6_EAX] =
1076 CPUID_6_EAX_ARAT,
1077 .xlevel = 0x80000008,
1078 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1081 .name = "Haswell-noTSX",
1082 .level = 0xd,
1083 .vendor = CPUID_VENDOR_INTEL,
1084 .family = 6,
1085 .model = 60,
1086 .stepping = 1,
1087 .features[FEAT_1_EDX] =
1088 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1089 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1090 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1091 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1092 CPUID_DE | CPUID_FP87,
1093 .features[FEAT_1_ECX] =
1094 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1095 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1096 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1097 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1098 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1099 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1100 .features[FEAT_8000_0001_EDX] =
1101 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1102 CPUID_EXT2_SYSCALL,
1103 .features[FEAT_8000_0001_ECX] =
1104 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1105 .features[FEAT_7_0_EBX] =
1106 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1107 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1108 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1109 .features[FEAT_XSAVE] =
1110 CPUID_XSAVE_XSAVEOPT,
1111 .features[FEAT_6_EAX] =
1112 CPUID_6_EAX_ARAT,
1113 .xlevel = 0x80000008,
1114 .model_id = "Intel Core Processor (Haswell, no TSX)",
1115 }, {
1116 .name = "Haswell",
1117 .level = 0xd,
1118 .vendor = CPUID_VENDOR_INTEL,
1119 .family = 6,
1120 .model = 60,
1121 .stepping = 1,
1122 .features[FEAT_1_EDX] =
1123 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1124 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1125 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1126 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1127 CPUID_DE | CPUID_FP87,
1128 .features[FEAT_1_ECX] =
1129 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1130 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1131 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1132 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1133 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1134 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1135 .features[FEAT_8000_0001_EDX] =
1136 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1137 CPUID_EXT2_SYSCALL,
1138 .features[FEAT_8000_0001_ECX] =
1139 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1140 .features[FEAT_7_0_EBX] =
1141 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1142 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1143 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1144 CPUID_7_0_EBX_RTM,
1145 .features[FEAT_XSAVE] =
1146 CPUID_XSAVE_XSAVEOPT,
1147 .features[FEAT_6_EAX] =
1148 CPUID_6_EAX_ARAT,
1149 .xlevel = 0x80000008,
1150 .model_id = "Intel Core Processor (Haswell)",
1153 .name = "Broadwell-noTSX",
1154 .level = 0xd,
1155 .vendor = CPUID_VENDOR_INTEL,
1156 .family = 6,
1157 .model = 61,
1158 .stepping = 2,
1159 .features[FEAT_1_EDX] =
1160 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1161 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1162 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1163 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1164 CPUID_DE | CPUID_FP87,
1165 .features[FEAT_1_ECX] =
1166 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1167 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1168 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1169 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1170 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1171 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1172 .features[FEAT_8000_0001_EDX] =
1173 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1174 CPUID_EXT2_SYSCALL,
1175 .features[FEAT_8000_0001_ECX] =
1176 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1177 .features[FEAT_7_0_EBX] =
1178 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1179 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1180 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1181 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1182 CPUID_7_0_EBX_SMAP,
1183 .features[FEAT_XSAVE] =
1184 CPUID_XSAVE_XSAVEOPT,
1185 .features[FEAT_6_EAX] =
1186 CPUID_6_EAX_ARAT,
1187 .xlevel = 0x80000008,
1188 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1191 .name = "Broadwell",
1192 .level = 0xd,
1193 .vendor = CPUID_VENDOR_INTEL,
1194 .family = 6,
1195 .model = 61,
1196 .stepping = 2,
1197 .features[FEAT_1_EDX] =
1198 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1199 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1200 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1201 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1202 CPUID_DE | CPUID_FP87,
1203 .features[FEAT_1_ECX] =
1204 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1205 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1206 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1207 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1208 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1209 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1210 .features[FEAT_8000_0001_EDX] =
1211 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1212 CPUID_EXT2_SYSCALL,
1213 .features[FEAT_8000_0001_ECX] =
1214 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1215 .features[FEAT_7_0_EBX] =
1216 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1217 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1218 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1219 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1220 CPUID_7_0_EBX_SMAP,
1221 .features[FEAT_XSAVE] =
1222 CPUID_XSAVE_XSAVEOPT,
1223 .features[FEAT_6_EAX] =
1224 CPUID_6_EAX_ARAT,
1225 .xlevel = 0x80000008,
1226 .model_id = "Intel Core Processor (Broadwell)",
1229 .name = "Opteron_G1",
1230 .level = 5,
1231 .vendor = CPUID_VENDOR_AMD,
1232 .family = 15,
1233 .model = 6,
1234 .stepping = 1,
1235 .features[FEAT_1_EDX] =
1236 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1237 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1238 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1239 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1240 CPUID_DE | CPUID_FP87,
1241 .features[FEAT_1_ECX] =
1242 CPUID_EXT_SSE3,
1243 .features[FEAT_8000_0001_EDX] =
1244 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1245 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1246 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1247 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1248 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1249 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1250 .xlevel = 0x80000008,
1251 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1254 .name = "Opteron_G2",
1255 .level = 5,
1256 .vendor = CPUID_VENDOR_AMD,
1257 .family = 15,
1258 .model = 6,
1259 .stepping = 1,
1260 .features[FEAT_1_EDX] =
1261 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1262 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1263 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1264 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1265 CPUID_DE | CPUID_FP87,
1266 .features[FEAT_1_ECX] =
1267 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1268 /* Missing: CPUID_EXT2_RDTSCP */
1269 .features[FEAT_8000_0001_EDX] =
1270 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1271 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1272 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1273 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1274 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1275 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1276 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1277 .features[FEAT_8000_0001_ECX] =
1278 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1279 .xlevel = 0x80000008,
1280 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1283 .name = "Opteron_G3",
1284 .level = 5,
1285 .vendor = CPUID_VENDOR_AMD,
1286 .family = 15,
1287 .model = 6,
1288 .stepping = 1,
1289 .features[FEAT_1_EDX] =
1290 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1291 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1292 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1293 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1294 CPUID_DE | CPUID_FP87,
1295 .features[FEAT_1_ECX] =
1296 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1297 CPUID_EXT_SSE3,
1298 /* Missing: CPUID_EXT2_RDTSCP */
1299 .features[FEAT_8000_0001_EDX] =
1300 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1301 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1302 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1303 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1304 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1305 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1306 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1307 .features[FEAT_8000_0001_ECX] =
1308 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1309 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1310 .xlevel = 0x80000008,
1311 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1314 .name = "Opteron_G4",
1315 .level = 0xd,
1316 .vendor = CPUID_VENDOR_AMD,
1317 .family = 21,
1318 .model = 1,
1319 .stepping = 2,
1320 .features[FEAT_1_EDX] =
1321 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1322 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1323 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1324 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1325 CPUID_DE | CPUID_FP87,
1326 .features[FEAT_1_ECX] =
1327 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1328 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1329 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1330 CPUID_EXT_SSE3,
1331 /* Missing: CPUID_EXT2_RDTSCP */
1332 .features[FEAT_8000_0001_EDX] =
1333 CPUID_EXT2_LM |
1334 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1335 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1336 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1337 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1338 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1339 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1340 .features[FEAT_8000_0001_ECX] =
1341 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1342 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1343 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1344 CPUID_EXT3_LAHF_LM,
1345 /* no xsaveopt! */
1346 .xlevel = 0x8000001A,
1347 .model_id = "AMD Opteron 62xx class CPU",
1350 .name = "Opteron_G5",
1351 .level = 0xd,
1352 .vendor = CPUID_VENDOR_AMD,
1353 .family = 21,
1354 .model = 2,
1355 .stepping = 0,
1356 .features[FEAT_1_EDX] =
1357 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1358 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1359 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1360 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1361 CPUID_DE | CPUID_FP87,
1362 .features[FEAT_1_ECX] =
1363 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1364 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1365 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1366 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1367 /* Missing: CPUID_EXT2_RDTSCP */
1368 .features[FEAT_8000_0001_EDX] =
1369 CPUID_EXT2_LM |
1370 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1371 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1372 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1373 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1374 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1375 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1376 .features[FEAT_8000_0001_ECX] =
1377 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1378 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1379 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1380 CPUID_EXT3_LAHF_LM,
1381 /* no xsaveopt! */
1382 .xlevel = 0x8000001A,
1383 .model_id = "AMD Opteron 63xx class CPU",
1387 typedef struct PropValue {
1388 const char *prop, *value;
1389 } PropValue;
1391 /* KVM-specific features that are automatically added/removed
1392 * from all CPU models when KVM is enabled.
1394 static PropValue kvm_default_props[] = {
1395 { "kvmclock", "on" },
1396 { "kvm-nopiodelay", "on" },
1397 { "kvm-asyncpf", "on" },
1398 { "kvm-steal-time", "on" },
1399 { "kvm-pv-eoi", "on" },
1400 { "kvmclock-stable-bit", "on" },
1401 { "x2apic", "on" },
1402 { "acpi", "off" },
1403 { "monitor", "off" },
1404 { "svm", "off" },
1405 { NULL, NULL },
1408 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1410 PropValue *pv;
1411 for (pv = kvm_default_props; pv->prop; pv++) {
1412 if (!strcmp(pv->prop, prop)) {
1413 pv->value = value;
1414 break;
1418 /* It is valid to call this function only for properties that
1419 * are already present in the kvm_default_props table.
1421 assert(pv->prop);
1424 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1425 bool migratable_only);
1427 #ifdef CONFIG_KVM
1429 static int cpu_x86_fill_model_id(char *str)
1431 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1432 int i;
1434 for (i = 0; i < 3; i++) {
1435 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1436 memcpy(str + i * 16 + 0, &eax, 4);
1437 memcpy(str + i * 16 + 4, &ebx, 4);
1438 memcpy(str + i * 16 + 8, &ecx, 4);
1439 memcpy(str + i * 16 + 12, &edx, 4);
1441 return 0;
1444 static X86CPUDefinition host_cpudef;
1446 static Property host_x86_cpu_properties[] = {
1447 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1448 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1449 DEFINE_PROP_END_OF_LIST()
1452 /* class_init for the "host" CPU model
1454 * This function may be called before KVM is initialized.
1456 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1458 DeviceClass *dc = DEVICE_CLASS(oc);
1459 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1460 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1462 xcc->kvm_required = true;
1464 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1465 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1467 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1468 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1469 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1470 host_cpudef.stepping = eax & 0x0F;
1472 cpu_x86_fill_model_id(host_cpudef.model_id);
1474 xcc->cpu_def = &host_cpudef;
1476 /* level, xlevel, xlevel2, and the feature words are initialized on
1477 * instance_init, because they require KVM to be initialized.
1480 dc->props = host_x86_cpu_properties;
1481 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1482 dc->cannot_destroy_with_object_finalize_yet = true;
1485 static void host_x86_cpu_initfn(Object *obj)
1487 X86CPU *cpu = X86_CPU(obj);
1488 CPUX86State *env = &cpu->env;
1489 KVMState *s = kvm_state;
1491 assert(kvm_enabled());
1493 /* We can't fill the features array here because we don't know yet if
1494 * "migratable" is true or false.
1496 cpu->host_features = true;
1498 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1499 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1500 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1502 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1505 static const TypeInfo host_x86_cpu_type_info = {
1506 .name = X86_CPU_TYPE_NAME("host"),
1507 .parent = TYPE_X86_CPU,
1508 .instance_init = host_x86_cpu_initfn,
1509 .class_init = host_x86_cpu_class_init,
1512 #endif
1514 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1516 FeatureWordInfo *f = &feature_word_info[w];
1517 int i;
1519 for (i = 0; i < 32; ++i) {
1520 if ((1UL << i) & mask) {
1521 const char *reg = get_register_name_32(f->cpuid_reg);
1522 assert(reg);
1523 fprintf(stderr, "warning: %s doesn't support requested feature: "
1524 "CPUID.%02XH:%s%s%s [bit %d]\n",
1525 kvm_enabled() ? "host" : "TCG",
1526 f->cpuid_eax, reg,
1527 f->feat_names[i] ? "." : "",
1528 f->feat_names[i] ? f->feat_names[i] : "", i);
1533 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1534 const char *name, void *opaque,
1535 Error **errp)
1537 X86CPU *cpu = X86_CPU(obj);
1538 CPUX86State *env = &cpu->env;
1539 int64_t value;
1541 value = (env->cpuid_version >> 8) & 0xf;
1542 if (value == 0xf) {
1543 value += (env->cpuid_version >> 20) & 0xff;
1545 visit_type_int(v, name, &value, errp);
1548 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1549 const char *name, void *opaque,
1550 Error **errp)
1552 X86CPU *cpu = X86_CPU(obj);
1553 CPUX86State *env = &cpu->env;
1554 const int64_t min = 0;
1555 const int64_t max = 0xff + 0xf;
1556 Error *local_err = NULL;
1557 int64_t value;
1559 visit_type_int(v, name, &value, &local_err);
1560 if (local_err) {
1561 error_propagate(errp, local_err);
1562 return;
1564 if (value < min || value > max) {
1565 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1566 name ? name : "null", value, min, max);
1567 return;
1570 env->cpuid_version &= ~0xff00f00;
1571 if (value > 0x0f) {
1572 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1573 } else {
1574 env->cpuid_version |= value << 8;
1578 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1579 const char *name, void *opaque,
1580 Error **errp)
1582 X86CPU *cpu = X86_CPU(obj);
1583 CPUX86State *env = &cpu->env;
1584 int64_t value;
1586 value = (env->cpuid_version >> 4) & 0xf;
1587 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1588 visit_type_int(v, name, &value, errp);
1591 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1592 const char *name, void *opaque,
1593 Error **errp)
1595 X86CPU *cpu = X86_CPU(obj);
1596 CPUX86State *env = &cpu->env;
1597 const int64_t min = 0;
1598 const int64_t max = 0xff;
1599 Error *local_err = NULL;
1600 int64_t value;
1602 visit_type_int(v, name, &value, &local_err);
1603 if (local_err) {
1604 error_propagate(errp, local_err);
1605 return;
1607 if (value < min || value > max) {
1608 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1609 name ? name : "null", value, min, max);
1610 return;
1613 env->cpuid_version &= ~0xf00f0;
1614 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1617 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1618 const char *name, void *opaque,
1619 Error **errp)
1621 X86CPU *cpu = X86_CPU(obj);
1622 CPUX86State *env = &cpu->env;
1623 int64_t value;
1625 value = env->cpuid_version & 0xf;
1626 visit_type_int(v, name, &value, errp);
1629 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1630 const char *name, void *opaque,
1631 Error **errp)
1633 X86CPU *cpu = X86_CPU(obj);
1634 CPUX86State *env = &cpu->env;
1635 const int64_t min = 0;
1636 const int64_t max = 0xf;
1637 Error *local_err = NULL;
1638 int64_t value;
1640 visit_type_int(v, name, &value, &local_err);
1641 if (local_err) {
1642 error_propagate(errp, local_err);
1643 return;
1645 if (value < min || value > max) {
1646 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1647 name ? name : "null", value, min, max);
1648 return;
1651 env->cpuid_version &= ~0xf;
1652 env->cpuid_version |= value & 0xf;
1655 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1657 X86CPU *cpu = X86_CPU(obj);
1658 CPUX86State *env = &cpu->env;
1659 char *value;
1661 value = g_malloc(CPUID_VENDOR_SZ + 1);
1662 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1663 env->cpuid_vendor3);
1664 return value;
1667 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1668 Error **errp)
1670 X86CPU *cpu = X86_CPU(obj);
1671 CPUX86State *env = &cpu->env;
1672 int i;
1674 if (strlen(value) != CPUID_VENDOR_SZ) {
1675 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1676 return;
1679 env->cpuid_vendor1 = 0;
1680 env->cpuid_vendor2 = 0;
1681 env->cpuid_vendor3 = 0;
1682 for (i = 0; i < 4; i++) {
1683 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1684 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1685 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1689 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1691 X86CPU *cpu = X86_CPU(obj);
1692 CPUX86State *env = &cpu->env;
1693 char *value;
1694 int i;
1696 value = g_malloc(48 + 1);
1697 for (i = 0; i < 48; i++) {
1698 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1700 value[48] = '\0';
1701 return value;
1704 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1705 Error **errp)
1707 X86CPU *cpu = X86_CPU(obj);
1708 CPUX86State *env = &cpu->env;
1709 int c, len, i;
1711 if (model_id == NULL) {
1712 model_id = "";
1714 len = strlen(model_id);
1715 memset(env->cpuid_model, 0, 48);
1716 for (i = 0; i < 48; i++) {
1717 if (i >= len) {
1718 c = '\0';
1719 } else {
1720 c = (uint8_t)model_id[i];
1722 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1726 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1727 void *opaque, Error **errp)
1729 X86CPU *cpu = X86_CPU(obj);
1730 int64_t value;
1732 value = cpu->env.tsc_khz * 1000;
1733 visit_type_int(v, name, &value, errp);
1736 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1737 void *opaque, Error **errp)
1739 X86CPU *cpu = X86_CPU(obj);
1740 const int64_t min = 0;
1741 const int64_t max = INT64_MAX;
1742 Error *local_err = NULL;
1743 int64_t value;
1745 visit_type_int(v, name, &value, &local_err);
1746 if (local_err) {
1747 error_propagate(errp, local_err);
1748 return;
1750 if (value < min || value > max) {
1751 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1752 name ? name : "null", value, min, max);
1753 return;
1756 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1759 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1760 void *opaque, Error **errp)
1762 X86CPU *cpu = X86_CPU(obj);
1763 int64_t value = cpu->apic_id;
1765 visit_type_int(v, name, &value, errp);
1768 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1769 void *opaque, Error **errp)
1771 X86CPU *cpu = X86_CPU(obj);
1772 DeviceState *dev = DEVICE(obj);
1773 const int64_t min = 0;
1774 const int64_t max = UINT32_MAX;
1775 Error *error = NULL;
1776 int64_t value;
1778 if (dev->realized) {
1779 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1780 "it was realized", name, object_get_typename(obj));
1781 return;
1784 visit_type_int(v, name, &value, &error);
1785 if (error) {
1786 error_propagate(errp, error);
1787 return;
1789 if (value < min || value > max) {
1790 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1791 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1792 object_get_typename(obj), name, value, min, max);
1793 return;
1796 if ((value != cpu->apic_id) && cpu_exists(value)) {
1797 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1798 return;
1800 cpu->apic_id = value;
1803 /* Generic getter for "feature-words" and "filtered-features" properties */
1804 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1805 const char *name, void *opaque,
1806 Error **errp)
1808 uint32_t *array = (uint32_t *)opaque;
1809 FeatureWord w;
1810 Error *err = NULL;
1811 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1812 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1813 X86CPUFeatureWordInfoList *list = NULL;
1815 for (w = 0; w < FEATURE_WORDS; w++) {
1816 FeatureWordInfo *wi = &feature_word_info[w];
1817 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1818 qwi->cpuid_input_eax = wi->cpuid_eax;
1819 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1820 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1821 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1822 qwi->features = array[w];
1824 /* List will be in reverse order, but order shouldn't matter */
1825 list_entries[w].next = list;
1826 list_entries[w].value = &word_infos[w];
1827 list = &list_entries[w];
1830 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1831 error_propagate(errp, err);
1834 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1835 void *opaque, Error **errp)
1837 X86CPU *cpu = X86_CPU(obj);
1838 int64_t value = cpu->hyperv_spinlock_attempts;
1840 visit_type_int(v, name, &value, errp);
1843 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1844 void *opaque, Error **errp)
1846 const int64_t min = 0xFFF;
1847 const int64_t max = UINT_MAX;
1848 X86CPU *cpu = X86_CPU(obj);
1849 Error *err = NULL;
1850 int64_t value;
1852 visit_type_int(v, name, &value, &err);
1853 if (err) {
1854 error_propagate(errp, err);
1855 return;
1858 if (value < min || value > max) {
1859 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1860 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1861 object_get_typename(obj), name ? name : "null",
1862 value, min, max);
1863 return;
1865 cpu->hyperv_spinlock_attempts = value;
1868 static PropertyInfo qdev_prop_spinlocks = {
1869 .name = "int",
1870 .get = x86_get_hv_spinlocks,
1871 .set = x86_set_hv_spinlocks,
1874 /* Convert all '_' in a feature string option name to '-', to make feature
1875 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1877 static inline void feat2prop(char *s)
1879 while ((s = strchr(s, '_'))) {
1880 *s = '-';
1884 /* Parse "+feature,-feature,feature=foo" CPU feature string
1886 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1887 Error **errp)
1889 X86CPU *cpu = X86_CPU(cs);
1890 char *featurestr; /* Single 'key=value" string being parsed */
1891 FeatureWord w;
1892 /* Features to be added */
1893 FeatureWordArray plus_features = { 0 };
1894 /* Features to be removed */
1895 FeatureWordArray minus_features = { 0 };
1896 uint32_t numvalue;
1897 CPUX86State *env = &cpu->env;
1898 Error *local_err = NULL;
1900 featurestr = features ? strtok(features, ",") : NULL;
1902 while (featurestr) {
1903 char *val;
1904 if (featurestr[0] == '+') {
1905 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1906 } else if (featurestr[0] == '-') {
1907 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1908 } else if ((val = strchr(featurestr, '='))) {
1909 *val = 0; val++;
1910 feat2prop(featurestr);
1911 if (!strcmp(featurestr, "xlevel")) {
1912 char *err;
1913 char num[32];
1915 numvalue = strtoul(val, &err, 0);
1916 if (!*val || *err) {
1917 error_setg(errp, "bad numerical value %s", val);
1918 return;
1920 if (numvalue < 0x80000000) {
1921 error_report("xlevel value shall always be >= 0x80000000"
1922 ", fixup will be removed in future versions");
1923 numvalue += 0x80000000;
1925 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1926 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1927 } else if (!strcmp(featurestr, "tsc-freq")) {
1928 int64_t tsc_freq;
1929 char *err;
1930 char num[32];
1932 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1933 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1934 if (tsc_freq < 0 || *err) {
1935 error_setg(errp, "bad numerical value %s", val);
1936 return;
1938 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1939 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1940 &local_err);
1941 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1942 char *err;
1943 const int min = 0xFFF;
1944 char num[32];
1945 numvalue = strtoul(val, &err, 0);
1946 if (!*val || *err) {
1947 error_setg(errp, "bad numerical value %s", val);
1948 return;
1950 if (numvalue < min) {
1951 error_report("hv-spinlocks value shall always be >= 0x%x"
1952 ", fixup will be removed in future versions",
1953 min);
1954 numvalue = min;
1956 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1957 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1958 } else {
1959 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1961 } else {
1962 feat2prop(featurestr);
1963 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1965 if (local_err) {
1966 error_propagate(errp, local_err);
1967 return;
1969 featurestr = strtok(NULL, ",");
1972 if (cpu->host_features) {
1973 for (w = 0; w < FEATURE_WORDS; w++) {
1974 env->features[w] =
1975 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1979 for (w = 0; w < FEATURE_WORDS; w++) {
1980 env->features[w] |= plus_features[w];
1981 env->features[w] &= ~minus_features[w];
1985 /* Print all cpuid feature names in featureset
1987 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1989 int bit;
1990 bool first = true;
1992 for (bit = 0; bit < 32; bit++) {
1993 if (featureset[bit]) {
1994 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1995 first = false;
2000 /* generate CPU information. */
2001 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2003 X86CPUDefinition *def;
2004 char buf[256];
2005 int i;
2007 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2008 def = &builtin_x86_defs[i];
2009 snprintf(buf, sizeof(buf), "%s", def->name);
2010 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2012 #ifdef CONFIG_KVM
2013 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2014 "KVM processor with all supported host features "
2015 "(only available in KVM mode)");
2016 #endif
2018 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2019 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2020 FeatureWordInfo *fw = &feature_word_info[i];
2022 (*cpu_fprintf)(f, " ");
2023 listflags(f, cpu_fprintf, fw->feat_names);
2024 (*cpu_fprintf)(f, "\n");
2028 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2030 CpuDefinitionInfoList *cpu_list = NULL;
2031 X86CPUDefinition *def;
2032 int i;
2034 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2035 CpuDefinitionInfoList *entry;
2036 CpuDefinitionInfo *info;
2038 def = &builtin_x86_defs[i];
2039 info = g_malloc0(sizeof(*info));
2040 info->name = g_strdup(def->name);
2042 entry = g_malloc0(sizeof(*entry));
2043 entry->value = info;
2044 entry->next = cpu_list;
2045 cpu_list = entry;
2048 return cpu_list;
2051 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2052 bool migratable_only)
2054 FeatureWordInfo *wi = &feature_word_info[w];
2055 uint32_t r;
2057 if (kvm_enabled()) {
2058 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2059 wi->cpuid_ecx,
2060 wi->cpuid_reg);
2061 } else if (tcg_enabled()) {
2062 r = wi->tcg_features;
2063 } else {
2064 return ~0;
2066 if (migratable_only) {
2067 r &= x86_cpu_get_migratable_flags(w);
2069 return r;
2073 * Filters CPU feature words based on host availability of each feature.
2075 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2077 static int x86_cpu_filter_features(X86CPU *cpu)
2079 CPUX86State *env = &cpu->env;
2080 FeatureWord w;
2081 int rv = 0;
2083 for (w = 0; w < FEATURE_WORDS; w++) {
2084 uint32_t host_feat =
2085 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2086 uint32_t requested_features = env->features[w];
2087 env->features[w] &= host_feat;
2088 cpu->filtered_features[w] = requested_features & ~env->features[w];
2089 if (cpu->filtered_features[w]) {
2090 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2091 report_unavailable_features(w, cpu->filtered_features[w]);
2093 rv = 1;
2097 return rv;
2100 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2102 PropValue *pv;
2103 for (pv = props; pv->prop; pv++) {
2104 if (!pv->value) {
2105 continue;
2107 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2108 &error_abort);
2112 /* Load data from X86CPUDefinition
2114 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2116 CPUX86State *env = &cpu->env;
2117 const char *vendor;
2118 char host_vendor[CPUID_VENDOR_SZ + 1];
2119 FeatureWord w;
2121 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2122 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2123 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2124 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2125 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2126 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2127 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2128 for (w = 0; w < FEATURE_WORDS; w++) {
2129 env->features[w] = def->features[w];
2132 /* Special cases not set in the X86CPUDefinition structs: */
2133 if (kvm_enabled()) {
2134 x86_cpu_apply_props(cpu, kvm_default_props);
2137 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2139 /* sysenter isn't supported in compatibility mode on AMD,
2140 * syscall isn't supported in compatibility mode on Intel.
2141 * Normally we advertise the actual CPU vendor, but you can
2142 * override this using the 'vendor' property if you want to use
2143 * KVM's sysenter/syscall emulation in compatibility mode and
2144 * when doing cross vendor migration
2146 vendor = def->vendor;
2147 if (kvm_enabled()) {
2148 uint32_t ebx = 0, ecx = 0, edx = 0;
2149 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2150 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2151 vendor = host_vendor;
2154 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2158 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2160 X86CPU *cpu = NULL;
2161 X86CPUClass *xcc;
2162 ObjectClass *oc;
2163 gchar **model_pieces;
2164 char *name, *features;
2165 Error *error = NULL;
2167 model_pieces = g_strsplit(cpu_model, ",", 2);
2168 if (!model_pieces[0]) {
2169 error_setg(&error, "Invalid/empty CPU model name");
2170 goto out;
2172 name = model_pieces[0];
2173 features = model_pieces[1];
2175 oc = x86_cpu_class_by_name(name);
2176 if (oc == NULL) {
2177 error_setg(&error, "Unable to find CPU definition: %s", name);
2178 goto out;
2180 xcc = X86_CPU_CLASS(oc);
2182 if (xcc->kvm_required && !kvm_enabled()) {
2183 error_setg(&error, "CPU model '%s' requires KVM", name);
2184 goto out;
2187 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2189 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2190 if (error) {
2191 goto out;
2194 out:
2195 if (error != NULL) {
2196 error_propagate(errp, error);
2197 if (cpu) {
2198 object_unref(OBJECT(cpu));
2199 cpu = NULL;
2202 g_strfreev(model_pieces);
2203 return cpu;
2206 X86CPU *cpu_x86_init(const char *cpu_model)
2208 Error *error = NULL;
2209 X86CPU *cpu;
2211 cpu = cpu_x86_create(cpu_model, &error);
2212 if (error) {
2213 goto out;
2216 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2218 out:
2219 if (error) {
2220 error_report_err(error);
2221 if (cpu != NULL) {
2222 object_unref(OBJECT(cpu));
2223 cpu = NULL;
2226 return cpu;
2229 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2231 X86CPUDefinition *cpudef = data;
2232 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2234 xcc->cpu_def = cpudef;
2237 static void x86_register_cpudef_type(X86CPUDefinition *def)
2239 char *typename = x86_cpu_type_name(def->name);
2240 TypeInfo ti = {
2241 .name = typename,
2242 .parent = TYPE_X86_CPU,
2243 .class_init = x86_cpu_cpudef_class_init,
2244 .class_data = def,
2247 type_register(&ti);
2248 g_free(typename);
2251 #if !defined(CONFIG_USER_ONLY)
2253 void cpu_clear_apic_feature(CPUX86State *env)
2255 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2258 #endif /* !CONFIG_USER_ONLY */
2260 /* Initialize list of CPU models, filling some non-static fields if necessary
2262 void x86_cpudef_setup(void)
2264 int i, j;
2265 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2267 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2268 X86CPUDefinition *def = &builtin_x86_defs[i];
2270 /* Look for specific "cpudef" models that */
2271 /* have the QEMU version in .model_id */
2272 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2273 if (strcmp(model_with_versions[j], def->name) == 0) {
2274 pstrcpy(def->model_id, sizeof(def->model_id),
2275 "QEMU Virtual CPU version ");
2276 pstrcat(def->model_id, sizeof(def->model_id),
2277 qemu_hw_version());
2278 break;
2284 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2285 uint32_t *eax, uint32_t *ebx,
2286 uint32_t *ecx, uint32_t *edx)
2288 X86CPU *cpu = x86_env_get_cpu(env);
2289 CPUState *cs = CPU(cpu);
2291 /* test if maximum index reached */
2292 if (index & 0x80000000) {
2293 if (index > env->cpuid_xlevel) {
2294 if (env->cpuid_xlevel2 > 0) {
2295 /* Handle the Centaur's CPUID instruction. */
2296 if (index > env->cpuid_xlevel2) {
2297 index = env->cpuid_xlevel2;
2298 } else if (index < 0xC0000000) {
2299 index = env->cpuid_xlevel;
2301 } else {
2302 /* Intel documentation states that invalid EAX input will
2303 * return the same information as EAX=cpuid_level
2304 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2306 index = env->cpuid_level;
2309 } else {
2310 if (index > env->cpuid_level)
2311 index = env->cpuid_level;
2314 switch(index) {
2315 case 0:
2316 *eax = env->cpuid_level;
2317 *ebx = env->cpuid_vendor1;
2318 *edx = env->cpuid_vendor2;
2319 *ecx = env->cpuid_vendor3;
2320 break;
2321 case 1:
2322 *eax = env->cpuid_version;
2323 *ebx = (cpu->apic_id << 24) |
2324 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2325 *ecx = env->features[FEAT_1_ECX];
2326 *edx = env->features[FEAT_1_EDX];
2327 if (cs->nr_cores * cs->nr_threads > 1) {
2328 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2329 *edx |= 1 << 28; /* HTT bit */
2331 break;
2332 case 2:
2333 /* cache info: needed for Pentium Pro compatibility */
2334 if (cpu->cache_info_passthrough) {
2335 host_cpuid(index, 0, eax, ebx, ecx, edx);
2336 break;
2338 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2339 *ebx = 0;
2340 *ecx = 0;
2341 *edx = (L1D_DESCRIPTOR << 16) | \
2342 (L1I_DESCRIPTOR << 8) | \
2343 (L2_DESCRIPTOR);
2344 break;
2345 case 4:
2346 /* cache info: needed for Core compatibility */
2347 if (cpu->cache_info_passthrough) {
2348 host_cpuid(index, count, eax, ebx, ecx, edx);
2349 *eax &= ~0xFC000000;
2350 } else {
2351 *eax = 0;
2352 switch (count) {
2353 case 0: /* L1 dcache info */
2354 *eax |= CPUID_4_TYPE_DCACHE | \
2355 CPUID_4_LEVEL(1) | \
2356 CPUID_4_SELF_INIT_LEVEL;
2357 *ebx = (L1D_LINE_SIZE - 1) | \
2358 ((L1D_PARTITIONS - 1) << 12) | \
2359 ((L1D_ASSOCIATIVITY - 1) << 22);
2360 *ecx = L1D_SETS - 1;
2361 *edx = CPUID_4_NO_INVD_SHARING;
2362 break;
2363 case 1: /* L1 icache info */
2364 *eax |= CPUID_4_TYPE_ICACHE | \
2365 CPUID_4_LEVEL(1) | \
2366 CPUID_4_SELF_INIT_LEVEL;
2367 *ebx = (L1I_LINE_SIZE - 1) | \
2368 ((L1I_PARTITIONS - 1) << 12) | \
2369 ((L1I_ASSOCIATIVITY - 1) << 22);
2370 *ecx = L1I_SETS - 1;
2371 *edx = CPUID_4_NO_INVD_SHARING;
2372 break;
2373 case 2: /* L2 cache info */
2374 *eax |= CPUID_4_TYPE_UNIFIED | \
2375 CPUID_4_LEVEL(2) | \
2376 CPUID_4_SELF_INIT_LEVEL;
2377 if (cs->nr_threads > 1) {
2378 *eax |= (cs->nr_threads - 1) << 14;
2380 *ebx = (L2_LINE_SIZE - 1) | \
2381 ((L2_PARTITIONS - 1) << 12) | \
2382 ((L2_ASSOCIATIVITY - 1) << 22);
2383 *ecx = L2_SETS - 1;
2384 *edx = CPUID_4_NO_INVD_SHARING;
2385 break;
2386 default: /* end of info */
2387 *eax = 0;
2388 *ebx = 0;
2389 *ecx = 0;
2390 *edx = 0;
2391 break;
2395 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2396 if ((*eax & 31) && cs->nr_cores > 1) {
2397 *eax |= (cs->nr_cores - 1) << 26;
2399 break;
2400 case 5:
2401 /* mwait info: needed for Core compatibility */
2402 *eax = 0; /* Smallest monitor-line size in bytes */
2403 *ebx = 0; /* Largest monitor-line size in bytes */
2404 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2405 *edx = 0;
2406 break;
2407 case 6:
2408 /* Thermal and Power Leaf */
2409 *eax = env->features[FEAT_6_EAX];
2410 *ebx = 0;
2411 *ecx = 0;
2412 *edx = 0;
2413 break;
2414 case 7:
2415 /* Structured Extended Feature Flags Enumeration Leaf */
2416 if (count == 0) {
2417 *eax = 0; /* Maximum ECX value for sub-leaves */
2418 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2419 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2420 *edx = 0; /* Reserved */
2421 } else {
2422 *eax = 0;
2423 *ebx = 0;
2424 *ecx = 0;
2425 *edx = 0;
2427 break;
2428 case 9:
2429 /* Direct Cache Access Information Leaf */
2430 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2431 *ebx = 0;
2432 *ecx = 0;
2433 *edx = 0;
2434 break;
2435 case 0xA:
2436 /* Architectural Performance Monitoring Leaf */
2437 if (kvm_enabled() && cpu->enable_pmu) {
2438 KVMState *s = cs->kvm_state;
2440 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2441 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2442 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2443 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2444 } else {
2445 *eax = 0;
2446 *ebx = 0;
2447 *ecx = 0;
2448 *edx = 0;
2450 break;
2451 case 0xD: {
2452 KVMState *s = cs->kvm_state;
2453 uint64_t kvm_mask;
2454 int i;
2456 /* Processor Extended State */
2457 *eax = 0;
2458 *ebx = 0;
2459 *ecx = 0;
2460 *edx = 0;
2461 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2462 break;
2464 kvm_mask =
2465 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2466 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2468 if (count == 0) {
2469 *ecx = 0x240;
2470 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2471 const ExtSaveArea *esa = &ext_save_areas[i];
2472 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2473 (kvm_mask & (1 << i)) != 0) {
2474 if (i < 32) {
2475 *eax |= 1 << i;
2476 } else {
2477 *edx |= 1 << (i - 32);
2479 *ecx = MAX(*ecx, esa->offset + esa->size);
2482 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2483 *ebx = *ecx;
2484 } else if (count == 1) {
2485 *eax = env->features[FEAT_XSAVE];
2486 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2487 const ExtSaveArea *esa = &ext_save_areas[count];
2488 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2489 (kvm_mask & (1 << count)) != 0) {
2490 *eax = esa->size;
2491 *ebx = esa->offset;
2494 break;
2496 case 0x80000000:
2497 *eax = env->cpuid_xlevel;
2498 *ebx = env->cpuid_vendor1;
2499 *edx = env->cpuid_vendor2;
2500 *ecx = env->cpuid_vendor3;
2501 break;
2502 case 0x80000001:
2503 *eax = env->cpuid_version;
2504 *ebx = 0;
2505 *ecx = env->features[FEAT_8000_0001_ECX];
2506 *edx = env->features[FEAT_8000_0001_EDX];
2508 /* The Linux kernel checks for the CMPLegacy bit and
2509 * discards multiple thread information if it is set.
2510 * So dont set it here for Intel to make Linux guests happy.
2512 if (cs->nr_cores * cs->nr_threads > 1) {
2513 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2514 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2515 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2516 *ecx |= 1 << 1; /* CmpLegacy bit */
2519 break;
2520 case 0x80000002:
2521 case 0x80000003:
2522 case 0x80000004:
2523 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2524 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2525 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2526 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2527 break;
2528 case 0x80000005:
2529 /* cache info (L1 cache) */
2530 if (cpu->cache_info_passthrough) {
2531 host_cpuid(index, 0, eax, ebx, ecx, edx);
2532 break;
2534 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2535 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2536 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2537 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2538 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2539 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2540 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2541 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2542 break;
2543 case 0x80000006:
2544 /* cache info (L2 cache) */
2545 if (cpu->cache_info_passthrough) {
2546 host_cpuid(index, 0, eax, ebx, ecx, edx);
2547 break;
2549 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2550 (L2_DTLB_2M_ENTRIES << 16) | \
2551 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2552 (L2_ITLB_2M_ENTRIES);
2553 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2554 (L2_DTLB_4K_ENTRIES << 16) | \
2555 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2556 (L2_ITLB_4K_ENTRIES);
2557 *ecx = (L2_SIZE_KB_AMD << 16) | \
2558 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2559 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2560 *edx = ((L3_SIZE_KB/512) << 18) | \
2561 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2562 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2563 break;
2564 case 0x80000007:
2565 *eax = 0;
2566 *ebx = 0;
2567 *ecx = 0;
2568 *edx = env->features[FEAT_8000_0007_EDX];
2569 break;
2570 case 0x80000008:
2571 /* virtual & phys address size in low 2 bytes. */
2572 /* XXX: This value must match the one used in the MMU code. */
2573 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2574 /* 64 bit processor */
2575 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2576 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2577 } else {
2578 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2579 *eax = 0x00000024; /* 36 bits physical */
2580 } else {
2581 *eax = 0x00000020; /* 32 bits physical */
2584 *ebx = 0;
2585 *ecx = 0;
2586 *edx = 0;
2587 if (cs->nr_cores * cs->nr_threads > 1) {
2588 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2590 break;
2591 case 0x8000000A:
2592 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2593 *eax = 0x00000001; /* SVM Revision */
2594 *ebx = 0x00000010; /* nr of ASIDs */
2595 *ecx = 0;
2596 *edx = env->features[FEAT_SVM]; /* optional features */
2597 } else {
2598 *eax = 0;
2599 *ebx = 0;
2600 *ecx = 0;
2601 *edx = 0;
2603 break;
2604 case 0xC0000000:
2605 *eax = env->cpuid_xlevel2;
2606 *ebx = 0;
2607 *ecx = 0;
2608 *edx = 0;
2609 break;
2610 case 0xC0000001:
2611 /* Support for VIA CPU's CPUID instruction */
2612 *eax = env->cpuid_version;
2613 *ebx = 0;
2614 *ecx = 0;
2615 *edx = env->features[FEAT_C000_0001_EDX];
2616 break;
2617 case 0xC0000002:
2618 case 0xC0000003:
2619 case 0xC0000004:
2620 /* Reserved for the future, and now filled with zero */
2621 *eax = 0;
2622 *ebx = 0;
2623 *ecx = 0;
2624 *edx = 0;
2625 break;
2626 default:
2627 /* reserved values: zero */
2628 *eax = 0;
2629 *ebx = 0;
2630 *ecx = 0;
2631 *edx = 0;
2632 break;
2636 /* CPUClass::reset() */
2637 static void x86_cpu_reset(CPUState *s)
2639 X86CPU *cpu = X86_CPU(s);
2640 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2641 CPUX86State *env = &cpu->env;
2642 int i;
2644 xcc->parent_reset(s);
2646 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2648 tlb_flush(s, 1);
2650 env->old_exception = -1;
2652 /* init to reset state */
2654 #ifdef CONFIG_SOFTMMU
2655 env->hflags |= HF_SOFTMMU_MASK;
2656 #endif
2657 env->hflags2 |= HF2_GIF_MASK;
2659 cpu_x86_update_cr0(env, 0x60000010);
2660 env->a20_mask = ~0x0;
2661 env->smbase = 0x30000;
2663 env->idt.limit = 0xffff;
2664 env->gdt.limit = 0xffff;
2665 env->ldt.limit = 0xffff;
2666 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2667 env->tr.limit = 0xffff;
2668 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2670 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2671 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2672 DESC_R_MASK | DESC_A_MASK);
2673 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2674 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2675 DESC_A_MASK);
2676 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2677 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2678 DESC_A_MASK);
2679 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2680 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2681 DESC_A_MASK);
2682 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2683 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2684 DESC_A_MASK);
2685 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2686 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2687 DESC_A_MASK);
2689 env->eip = 0xfff0;
2690 env->regs[R_EDX] = env->cpuid_version;
2692 env->eflags = 0x2;
2694 /* FPU init */
2695 for (i = 0; i < 8; i++) {
2696 env->fptags[i] = 1;
2698 cpu_set_fpuc(env, 0x37f);
2700 env->mxcsr = 0x1f80;
2701 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2703 env->pat = 0x0007040600070406ULL;
2704 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2706 memset(env->dr, 0, sizeof(env->dr));
2707 env->dr[6] = DR6_FIXED_1;
2708 env->dr[7] = DR7_FIXED_1;
2709 cpu_breakpoint_remove_all(s, BP_CPU);
2710 cpu_watchpoint_remove_all(s, BP_CPU);
2712 env->xcr0 = 1;
2715 * SDM 11.11.5 requires:
2716 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2717 * - IA32_MTRR_PHYSMASKn.V = 0
2718 * All other bits are undefined. For simplification, zero it all.
2720 env->mtrr_deftype = 0;
2721 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2722 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2724 #if !defined(CONFIG_USER_ONLY)
2725 /* We hard-wire the BSP to the first CPU. */
2726 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2728 s->halted = !cpu_is_bsp(cpu);
2730 if (kvm_enabled()) {
2731 kvm_arch_reset_vcpu(cpu);
2733 #endif
2736 #ifndef CONFIG_USER_ONLY
2737 bool cpu_is_bsp(X86CPU *cpu)
2739 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2742 /* TODO: remove me, when reset over QOM tree is implemented */
2743 static void x86_cpu_machine_reset_cb(void *opaque)
2745 X86CPU *cpu = opaque;
2746 cpu_reset(CPU(cpu));
2748 #endif
2750 static void mce_init(X86CPU *cpu)
2752 CPUX86State *cenv = &cpu->env;
2753 unsigned int bank;
2755 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2756 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2757 (CPUID_MCE | CPUID_MCA)) {
2758 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2759 cenv->mcg_ctl = ~(uint64_t)0;
2760 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2761 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2766 #ifndef CONFIG_USER_ONLY
2767 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2769 APICCommonState *apic;
2770 const char *apic_type = "apic";
2772 if (kvm_apic_in_kernel()) {
2773 apic_type = "kvm-apic";
2774 } else if (xen_enabled()) {
2775 apic_type = "xen-apic";
2778 cpu->apic_state = DEVICE(object_new(apic_type));
2780 object_property_add_child(OBJECT(cpu), "apic",
2781 OBJECT(cpu->apic_state), NULL);
2782 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2783 /* TODO: convert to link<> */
2784 apic = APIC_COMMON(cpu->apic_state);
2785 apic->cpu = cpu;
2786 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2789 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2791 APICCommonState *apic;
2792 static bool apic_mmio_map_once;
2794 if (cpu->apic_state == NULL) {
2795 return;
2797 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2798 errp);
2800 /* Map APIC MMIO area */
2801 apic = APIC_COMMON(cpu->apic_state);
2802 if (!apic_mmio_map_once) {
2803 memory_region_add_subregion_overlap(get_system_memory(),
2804 apic->apicbase &
2805 MSR_IA32_APICBASE_BASE,
2806 &apic->io_memory,
2807 0x1000);
2808 apic_mmio_map_once = true;
2812 static void x86_cpu_machine_done(Notifier *n, void *unused)
2814 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2815 MemoryRegion *smram =
2816 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2818 if (smram) {
2819 cpu->smram = g_new(MemoryRegion, 1);
2820 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2821 smram, 0, 1ull << 32);
2822 memory_region_set_enabled(cpu->smram, false);
2823 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2826 #else
2827 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2830 #endif
2833 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2834 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2835 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2836 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2837 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2838 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2839 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2841 CPUState *cs = CPU(dev);
2842 X86CPU *cpu = X86_CPU(dev);
2843 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2844 CPUX86State *env = &cpu->env;
2845 Error *local_err = NULL;
2846 static bool ht_warned;
2848 if (cpu->apic_id < 0) {
2849 error_setg(errp, "apic-id property was not initialized properly");
2850 return;
2853 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2854 env->cpuid_level = 7;
2857 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2858 * CPUID[1].EDX.
2860 if (IS_AMD_CPU(env)) {
2861 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2862 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2863 & CPUID_EXT2_AMD_ALIASES);
2867 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2868 error_setg(&local_err,
2869 kvm_enabled() ?
2870 "Host doesn't support requested features" :
2871 "TCG doesn't support requested features");
2872 goto out;
2875 #ifndef CONFIG_USER_ONLY
2876 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2878 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2879 x86_cpu_apic_create(cpu, &local_err);
2880 if (local_err != NULL) {
2881 goto out;
2884 #endif
2886 mce_init(cpu);
2888 #ifndef CONFIG_USER_ONLY
2889 if (tcg_enabled()) {
2890 AddressSpace *newas = g_new(AddressSpace, 1);
2892 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2893 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2895 /* Outer container... */
2896 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2897 memory_region_set_enabled(cpu->cpu_as_root, true);
2899 /* ... with two regions inside: normal system memory with low
2900 * priority, and...
2902 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2903 get_system_memory(), 0, ~0ull);
2904 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2905 memory_region_set_enabled(cpu->cpu_as_mem, true);
2906 address_space_init(newas, cpu->cpu_as_root, "CPU");
2907 cs->num_ases = 1;
2908 cpu_address_space_init(cs, newas, 0);
2910 /* ... SMRAM with higher priority, linked from /machine/smram. */
2911 cpu->machine_done.notify = x86_cpu_machine_done;
2912 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2914 #endif
2916 qemu_init_vcpu(cs);
2918 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2919 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2920 * based on inputs (sockets,cores,threads), it is still better to gives
2921 * users a warning.
2923 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2924 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2926 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2927 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2928 " -smp options properly.");
2929 ht_warned = true;
2932 x86_cpu_apic_realize(cpu, &local_err);
2933 if (local_err != NULL) {
2934 goto out;
2936 cpu_reset(cs);
2938 xcc->parent_realize(dev, &local_err);
2940 out:
2941 if (local_err != NULL) {
2942 error_propagate(errp, local_err);
2943 return;
2947 typedef struct BitProperty {
2948 uint32_t *ptr;
2949 uint32_t mask;
2950 } BitProperty;
2952 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
2953 void *opaque, Error **errp)
2955 BitProperty *fp = opaque;
2956 bool value = (*fp->ptr & fp->mask) == fp->mask;
2957 visit_type_bool(v, name, &value, errp);
2960 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
2961 void *opaque, Error **errp)
2963 DeviceState *dev = DEVICE(obj);
2964 BitProperty *fp = opaque;
2965 Error *local_err = NULL;
2966 bool value;
2968 if (dev->realized) {
2969 qdev_prop_set_after_realize(dev, name, errp);
2970 return;
2973 visit_type_bool(v, name, &value, &local_err);
2974 if (local_err) {
2975 error_propagate(errp, local_err);
2976 return;
2979 if (value) {
2980 *fp->ptr |= fp->mask;
2981 } else {
2982 *fp->ptr &= ~fp->mask;
2986 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2987 void *opaque)
2989 BitProperty *prop = opaque;
2990 g_free(prop);
2993 /* Register a boolean property to get/set a single bit in a uint32_t field.
2995 * The same property name can be registered multiple times to make it affect
2996 * multiple bits in the same FeatureWord. In that case, the getter will return
2997 * true only if all bits are set.
2999 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3000 const char *prop_name,
3001 uint32_t *field,
3002 int bitnr)
3004 BitProperty *fp;
3005 ObjectProperty *op;
3006 uint32_t mask = (1UL << bitnr);
3008 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3009 if (op) {
3010 fp = op->opaque;
3011 assert(fp->ptr == field);
3012 fp->mask |= mask;
3013 } else {
3014 fp = g_new0(BitProperty, 1);
3015 fp->ptr = field;
3016 fp->mask = mask;
3017 object_property_add(OBJECT(cpu), prop_name, "bool",
3018 x86_cpu_get_bit_prop,
3019 x86_cpu_set_bit_prop,
3020 x86_cpu_release_bit_prop, fp, &error_abort);
3024 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3025 FeatureWord w,
3026 int bitnr)
3028 Object *obj = OBJECT(cpu);
3029 int i;
3030 char **names;
3031 FeatureWordInfo *fi = &feature_word_info[w];
3033 if (!fi->feat_names) {
3034 return;
3036 if (!fi->feat_names[bitnr]) {
3037 return;
3040 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3042 feat2prop(names[0]);
3043 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3045 for (i = 1; names[i]; i++) {
3046 feat2prop(names[i]);
3047 object_property_add_alias(obj, names[i], obj, names[0],
3048 &error_abort);
3051 g_strfreev(names);
3054 static void x86_cpu_initfn(Object *obj)
3056 CPUState *cs = CPU(obj);
3057 X86CPU *cpu = X86_CPU(obj);
3058 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3059 CPUX86State *env = &cpu->env;
3060 FeatureWord w;
3061 static int inited;
3063 cs->env_ptr = env;
3064 cpu_exec_init(cs, &error_abort);
3066 object_property_add(obj, "family", "int",
3067 x86_cpuid_version_get_family,
3068 x86_cpuid_version_set_family, NULL, NULL, NULL);
3069 object_property_add(obj, "model", "int",
3070 x86_cpuid_version_get_model,
3071 x86_cpuid_version_set_model, NULL, NULL, NULL);
3072 object_property_add(obj, "stepping", "int",
3073 x86_cpuid_version_get_stepping,
3074 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3075 object_property_add_str(obj, "vendor",
3076 x86_cpuid_get_vendor,
3077 x86_cpuid_set_vendor, NULL);
3078 object_property_add_str(obj, "model-id",
3079 x86_cpuid_get_model_id,
3080 x86_cpuid_set_model_id, NULL);
3081 object_property_add(obj, "tsc-frequency", "int",
3082 x86_cpuid_get_tsc_freq,
3083 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3084 object_property_add(obj, "apic-id", "int",
3085 x86_cpuid_get_apic_id,
3086 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3087 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3088 x86_cpu_get_feature_words,
3089 NULL, NULL, (void *)env->features, NULL);
3090 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3091 x86_cpu_get_feature_words,
3092 NULL, NULL, (void *)cpu->filtered_features, NULL);
3094 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3096 #ifndef CONFIG_USER_ONLY
3097 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3098 cpu->apic_id = -1;
3099 #endif
3101 for (w = 0; w < FEATURE_WORDS; w++) {
3102 int bitnr;
3104 for (bitnr = 0; bitnr < 32; bitnr++) {
3105 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3109 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3111 /* init various static tables used in TCG mode */
3112 if (tcg_enabled() && !inited) {
3113 inited = 1;
3114 tcg_x86_init();
3118 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3120 X86CPU *cpu = X86_CPU(cs);
3122 return cpu->apic_id;
3125 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3127 X86CPU *cpu = X86_CPU(cs);
3129 return cpu->env.cr[0] & CR0_PG_MASK;
3132 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3134 X86CPU *cpu = X86_CPU(cs);
3136 cpu->env.eip = value;
3139 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3141 X86CPU *cpu = X86_CPU(cs);
3143 cpu->env.eip = tb->pc - tb->cs_base;
3146 static bool x86_cpu_has_work(CPUState *cs)
3148 X86CPU *cpu = X86_CPU(cs);
3149 CPUX86State *env = &cpu->env;
3151 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3152 CPU_INTERRUPT_POLL)) &&
3153 (env->eflags & IF_MASK)) ||
3154 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3155 CPU_INTERRUPT_INIT |
3156 CPU_INTERRUPT_SIPI |
3157 CPU_INTERRUPT_MCE)) ||
3158 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3159 !(env->hflags & HF_SMM_MASK));
3162 static Property x86_cpu_properties[] = {
3163 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3164 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3165 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3166 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3167 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3168 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3169 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3170 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3171 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3172 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3173 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3174 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3175 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3176 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3177 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3178 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3179 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3180 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3181 DEFINE_PROP_END_OF_LIST()
3184 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3186 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3187 CPUClass *cc = CPU_CLASS(oc);
3188 DeviceClass *dc = DEVICE_CLASS(oc);
3190 xcc->parent_realize = dc->realize;
3191 dc->realize = x86_cpu_realizefn;
3192 dc->props = x86_cpu_properties;
3194 xcc->parent_reset = cc->reset;
3195 cc->reset = x86_cpu_reset;
3196 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3198 cc->class_by_name = x86_cpu_class_by_name;
3199 cc->parse_features = x86_cpu_parse_featurestr;
3200 cc->has_work = x86_cpu_has_work;
3201 cc->do_interrupt = x86_cpu_do_interrupt;
3202 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3203 cc->dump_state = x86_cpu_dump_state;
3204 cc->set_pc = x86_cpu_set_pc;
3205 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3206 cc->gdb_read_register = x86_cpu_gdb_read_register;
3207 cc->gdb_write_register = x86_cpu_gdb_write_register;
3208 cc->get_arch_id = x86_cpu_get_arch_id;
3209 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3210 #ifdef CONFIG_USER_ONLY
3211 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3212 #else
3213 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3214 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3215 cc->write_elf64_note = x86_cpu_write_elf64_note;
3216 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3217 cc->write_elf32_note = x86_cpu_write_elf32_note;
3218 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3219 cc->vmsd = &vmstate_x86_cpu;
3220 #endif
3221 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3222 #ifndef CONFIG_USER_ONLY
3223 cc->debug_excp_handler = breakpoint_handler;
3224 #endif
3225 cc->cpu_exec_enter = x86_cpu_exec_enter;
3226 cc->cpu_exec_exit = x86_cpu_exec_exit;
3229 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3230 * object in cpus -> dangling pointer after final object_unref().
3232 dc->cannot_destroy_with_object_finalize_yet = true;
3235 static const TypeInfo x86_cpu_type_info = {
3236 .name = TYPE_X86_CPU,
3237 .parent = TYPE_CPU,
3238 .instance_size = sizeof(X86CPU),
3239 .instance_init = x86_cpu_initfn,
3240 .abstract = true,
3241 .class_size = sizeof(X86CPUClass),
3242 .class_init = x86_cpu_common_class_init,
3245 static void x86_cpu_register_types(void)
3247 int i;
3249 type_register_static(&x86_cpu_type_info);
3250 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3251 x86_register_cpudef_type(&builtin_x86_defs[i]);
3253 #ifdef CONFIG_KVM
3254 type_register_static(&host_x86_cpu_type_info);
3255 #endif
3258 type_init(x86_cpu_register_types)