loader: fix NEGATIVE_RETURNS
[qemu.git] / target-i386 / cpu.c
blob3f13dfe5f5f03b121ed43107223301144a09b506
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28 #include "topology.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
278 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
279 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
280 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
281 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
282 CPUID_PSE36 | CPUID_FXSR)
283 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
284 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
285 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
286 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
287 CPUID_PAE | CPUID_SEP | CPUID_APIC)
289 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
290 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
291 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
292 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
293 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
294 /* partly implemented:
295 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
296 /* missing:
297 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
298 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
299 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
300 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
301 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
302 /* missing:
303 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
304 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
305 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
306 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
307 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
308 CPUID_EXT_RDRAND */
310 #ifdef TARGET_X86_64
311 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
312 #else
313 #define TCG_EXT2_X86_64_FEATURES 0
314 #endif
316 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
317 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
318 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
319 TCG_EXT2_X86_64_FEATURES)
320 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
321 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
322 #define TCG_EXT4_FEATURES 0
323 #define TCG_SVM_FEATURES 0
324 #define TCG_KVM_FEATURES 0
325 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
326 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
327 /* missing:
328 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
329 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
330 CPUID_7_0_EBX_RDSEED */
331 #define TCG_APM_FEATURES 0
334 typedef struct FeatureWordInfo {
335 const char **feat_names;
336 uint32_t cpuid_eax; /* Input EAX for CPUID */
337 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
338 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
339 int cpuid_reg; /* output register (R_* constant) */
340 uint32_t tcg_features; /* Feature flags supported by TCG */
341 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
342 } FeatureWordInfo;
344 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
345 [FEAT_1_EDX] = {
346 .feat_names = feature_name,
347 .cpuid_eax = 1, .cpuid_reg = R_EDX,
348 .tcg_features = TCG_FEATURES,
350 [FEAT_1_ECX] = {
351 .feat_names = ext_feature_name,
352 .cpuid_eax = 1, .cpuid_reg = R_ECX,
353 .tcg_features = TCG_EXT_FEATURES,
355 [FEAT_8000_0001_EDX] = {
356 .feat_names = ext2_feature_name,
357 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
358 .tcg_features = TCG_EXT2_FEATURES,
360 [FEAT_8000_0001_ECX] = {
361 .feat_names = ext3_feature_name,
362 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
363 .tcg_features = TCG_EXT3_FEATURES,
365 [FEAT_C000_0001_EDX] = {
366 .feat_names = ext4_feature_name,
367 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
368 .tcg_features = TCG_EXT4_FEATURES,
370 [FEAT_KVM] = {
371 .feat_names = kvm_feature_name,
372 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
373 .tcg_features = TCG_KVM_FEATURES,
375 [FEAT_SVM] = {
376 .feat_names = svm_feature_name,
377 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
378 .tcg_features = TCG_SVM_FEATURES,
380 [FEAT_7_0_EBX] = {
381 .feat_names = cpuid_7_0_ebx_feature_name,
382 .cpuid_eax = 7,
383 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
384 .cpuid_reg = R_EBX,
385 .tcg_features = TCG_7_0_EBX_FEATURES,
387 [FEAT_8000_0007_EDX] = {
388 .feat_names = cpuid_apm_edx_feature_name,
389 .cpuid_eax = 0x80000007,
390 .cpuid_reg = R_EDX,
391 .tcg_features = TCG_APM_FEATURES,
392 .unmigratable_flags = CPUID_APM_INVTSC,
396 typedef struct X86RegisterInfo32 {
397 /* Name of register */
398 const char *name;
399 /* QAPI enum value register */
400 X86CPURegister32 qapi_enum;
401 } X86RegisterInfo32;
403 #define REGISTER(reg) \
404 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
405 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
406 REGISTER(EAX),
407 REGISTER(ECX),
408 REGISTER(EDX),
409 REGISTER(EBX),
410 REGISTER(ESP),
411 REGISTER(EBP),
412 REGISTER(ESI),
413 REGISTER(EDI),
415 #undef REGISTER
417 typedef struct ExtSaveArea {
418 uint32_t feature, bits;
419 uint32_t offset, size;
420 } ExtSaveArea;
422 static const ExtSaveArea ext_save_areas[] = {
423 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
424 .offset = 0x240, .size = 0x100 },
425 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
426 .offset = 0x3c0, .size = 0x40 },
427 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
428 .offset = 0x400, .size = 0x40 },
429 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
430 .offset = 0x440, .size = 0x40 },
431 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
432 .offset = 0x480, .size = 0x200 },
433 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
434 .offset = 0x680, .size = 0x400 },
437 const char *get_register_name_32(unsigned int reg)
439 if (reg >= CPU_NB_REGS32) {
440 return NULL;
442 return x86_reg_info_32[reg].name;
445 /* KVM-specific features that are automatically added to all CPU models
446 * when KVM is enabled.
448 static uint32_t kvm_default_features[FEATURE_WORDS] = {
449 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
450 (1 << KVM_FEATURE_NOP_IO_DELAY) |
451 (1 << KVM_FEATURE_CLOCKSOURCE2) |
452 (1 << KVM_FEATURE_ASYNC_PF) |
453 (1 << KVM_FEATURE_STEAL_TIME) |
454 (1 << KVM_FEATURE_PV_EOI) |
455 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
456 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
459 /* Features that are not added by default to any CPU model when KVM is enabled.
461 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
462 [FEAT_1_EDX] = CPUID_ACPI,
463 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
464 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
467 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
469 kvm_default_features[w] &= ~features;
472 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
474 kvm_default_unset_features[w] &= ~features;
478 * Returns the set of feature flags that are supported and migratable by
479 * QEMU, for a given FeatureWord.
481 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
483 FeatureWordInfo *wi = &feature_word_info[w];
484 uint32_t r = 0;
485 int i;
487 for (i = 0; i < 32; i++) {
488 uint32_t f = 1U << i;
489 /* If the feature name is unknown, it is not supported by QEMU yet */
490 if (!wi->feat_names[i]) {
491 continue;
493 /* Skip features known to QEMU, but explicitly marked as unmigratable */
494 if (wi->unmigratable_flags & f) {
495 continue;
497 r |= f;
499 return r;
502 void host_cpuid(uint32_t function, uint32_t count,
503 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
505 uint32_t vec[4];
507 #ifdef __x86_64__
508 asm volatile("cpuid"
509 : "=a"(vec[0]), "=b"(vec[1]),
510 "=c"(vec[2]), "=d"(vec[3])
511 : "0"(function), "c"(count) : "cc");
512 #elif defined(__i386__)
513 asm volatile("pusha \n\t"
514 "cpuid \n\t"
515 "mov %%eax, 0(%2) \n\t"
516 "mov %%ebx, 4(%2) \n\t"
517 "mov %%ecx, 8(%2) \n\t"
518 "mov %%edx, 12(%2) \n\t"
519 "popa"
520 : : "a"(function), "c"(count), "S"(vec)
521 : "memory", "cc");
522 #else
523 abort();
524 #endif
526 if (eax)
527 *eax = vec[0];
528 if (ebx)
529 *ebx = vec[1];
530 if (ecx)
531 *ecx = vec[2];
532 if (edx)
533 *edx = vec[3];
536 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
538 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
539 * a substring. ex if !NULL points to the first char after a substring,
540 * otherwise the string is assumed to sized by a terminating nul.
541 * Return lexical ordering of *s1:*s2.
543 static int sstrcmp(const char *s1, const char *e1,
544 const char *s2, const char *e2)
546 for (;;) {
547 if (!*s1 || !*s2 || *s1 != *s2)
548 return (*s1 - *s2);
549 ++s1, ++s2;
550 if (s1 == e1 && s2 == e2)
551 return (0);
552 else if (s1 == e1)
553 return (*s2);
554 else if (s2 == e2)
555 return (*s1);
559 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
560 * '|' delimited (possibly empty) strings in which case search for a match
561 * within the alternatives proceeds left to right. Return 0 for success,
562 * non-zero otherwise.
564 static int altcmp(const char *s, const char *e, const char *altstr)
566 const char *p, *q;
568 for (q = p = altstr; ; ) {
569 while (*p && *p != '|')
570 ++p;
571 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
572 return (0);
573 if (!*p)
574 return (1);
575 else
576 q = ++p;
580 /* search featureset for flag *[s..e), if found set corresponding bit in
581 * *pval and return true, otherwise return false
583 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
584 const char **featureset)
586 uint32_t mask;
587 const char **ppc;
588 bool found = false;
590 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
591 if (*ppc && !altcmp(s, e, *ppc)) {
592 *pval |= mask;
593 found = true;
596 return found;
599 static void add_flagname_to_bitmaps(const char *flagname,
600 FeatureWordArray words,
601 Error **errp)
603 FeatureWord w;
604 for (w = 0; w < FEATURE_WORDS; w++) {
605 FeatureWordInfo *wi = &feature_word_info[w];
606 if (wi->feat_names &&
607 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
608 break;
611 if (w == FEATURE_WORDS) {
612 error_setg(errp, "CPU feature %s not found", flagname);
616 /* CPU class name definitions: */
618 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
619 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
621 /* Return type name for a given CPU model name
622 * Caller is responsible for freeing the returned string.
624 static char *x86_cpu_type_name(const char *model_name)
626 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
629 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
631 ObjectClass *oc;
632 char *typename;
634 if (cpu_model == NULL) {
635 return NULL;
638 typename = x86_cpu_type_name(cpu_model);
639 oc = object_class_by_name(typename);
640 g_free(typename);
641 return oc;
644 struct X86CPUDefinition {
645 const char *name;
646 uint32_t level;
647 uint32_t xlevel;
648 uint32_t xlevel2;
649 /* vendor is zero-terminated, 12 character ASCII string */
650 char vendor[CPUID_VENDOR_SZ + 1];
651 int family;
652 int model;
653 int stepping;
654 FeatureWordArray features;
655 char model_id[48];
656 bool cache_info_passthrough;
659 static X86CPUDefinition builtin_x86_defs[] = {
661 .name = "qemu64",
662 .level = 4,
663 .vendor = CPUID_VENDOR_AMD,
664 .family = 6,
665 .model = 6,
666 .stepping = 3,
667 .features[FEAT_1_EDX] =
668 PPRO_FEATURES |
669 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
670 CPUID_PSE36,
671 .features[FEAT_1_ECX] =
672 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
673 .features[FEAT_8000_0001_EDX] =
674 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
675 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
676 .features[FEAT_8000_0001_ECX] =
677 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
678 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
679 .xlevel = 0x8000000A,
682 .name = "phenom",
683 .level = 5,
684 .vendor = CPUID_VENDOR_AMD,
685 .family = 16,
686 .model = 2,
687 .stepping = 3,
688 /* Missing: CPUID_HT */
689 .features[FEAT_1_EDX] =
690 PPRO_FEATURES |
691 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
692 CPUID_PSE36 | CPUID_VME,
693 .features[FEAT_1_ECX] =
694 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
695 CPUID_EXT_POPCNT,
696 .features[FEAT_8000_0001_EDX] =
697 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
698 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
699 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
700 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
701 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
702 CPUID_EXT3_CR8LEG,
703 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
704 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
705 .features[FEAT_8000_0001_ECX] =
706 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
707 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
708 /* Missing: CPUID_SVM_LBRV */
709 .features[FEAT_SVM] =
710 CPUID_SVM_NPT,
711 .xlevel = 0x8000001A,
712 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
715 .name = "core2duo",
716 .level = 10,
717 .vendor = CPUID_VENDOR_INTEL,
718 .family = 6,
719 .model = 15,
720 .stepping = 11,
721 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
722 .features[FEAT_1_EDX] =
723 PPRO_FEATURES |
724 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
725 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
726 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
727 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
728 .features[FEAT_1_ECX] =
729 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
730 CPUID_EXT_CX16,
731 .features[FEAT_8000_0001_EDX] =
732 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
733 .features[FEAT_8000_0001_ECX] =
734 CPUID_EXT3_LAHF_LM,
735 .xlevel = 0x80000008,
736 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
739 .name = "kvm64",
740 .level = 5,
741 .vendor = CPUID_VENDOR_INTEL,
742 .family = 15,
743 .model = 6,
744 .stepping = 1,
745 /* Missing: CPUID_VME, CPUID_HT */
746 .features[FEAT_1_EDX] =
747 PPRO_FEATURES |
748 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
749 CPUID_PSE36,
750 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
751 .features[FEAT_1_ECX] =
752 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
753 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
754 .features[FEAT_8000_0001_EDX] =
755 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
756 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
757 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
758 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
759 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
760 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
761 .features[FEAT_8000_0001_ECX] =
763 .xlevel = 0x80000008,
764 .model_id = "Common KVM processor"
767 .name = "qemu32",
768 .level = 4,
769 .vendor = CPUID_VENDOR_INTEL,
770 .family = 6,
771 .model = 6,
772 .stepping = 3,
773 .features[FEAT_1_EDX] =
774 PPRO_FEATURES,
775 .features[FEAT_1_ECX] =
776 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
777 .xlevel = 0x80000004,
780 .name = "kvm32",
781 .level = 5,
782 .vendor = CPUID_VENDOR_INTEL,
783 .family = 15,
784 .model = 6,
785 .stepping = 1,
786 .features[FEAT_1_EDX] =
787 PPRO_FEATURES |
788 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
789 .features[FEAT_1_ECX] =
790 CPUID_EXT_SSE3,
791 .features[FEAT_8000_0001_EDX] =
792 PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
793 .features[FEAT_8000_0001_ECX] =
795 .xlevel = 0x80000008,
796 .model_id = "Common 32-bit KVM processor"
799 .name = "coreduo",
800 .level = 10,
801 .vendor = CPUID_VENDOR_INTEL,
802 .family = 6,
803 .model = 14,
804 .stepping = 8,
805 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
806 .features[FEAT_1_EDX] =
807 PPRO_FEATURES | CPUID_VME |
808 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
809 CPUID_SS,
810 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
811 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
812 .features[FEAT_1_ECX] =
813 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
814 .features[FEAT_8000_0001_EDX] =
815 CPUID_EXT2_NX,
816 .xlevel = 0x80000008,
817 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
820 .name = "486",
821 .level = 1,
822 .vendor = CPUID_VENDOR_INTEL,
823 .family = 4,
824 .model = 8,
825 .stepping = 0,
826 .features[FEAT_1_EDX] =
827 I486_FEATURES,
828 .xlevel = 0,
831 .name = "pentium",
832 .level = 1,
833 .vendor = CPUID_VENDOR_INTEL,
834 .family = 5,
835 .model = 4,
836 .stepping = 3,
837 .features[FEAT_1_EDX] =
838 PENTIUM_FEATURES,
839 .xlevel = 0,
842 .name = "pentium2",
843 .level = 2,
844 .vendor = CPUID_VENDOR_INTEL,
845 .family = 6,
846 .model = 5,
847 .stepping = 2,
848 .features[FEAT_1_EDX] =
849 PENTIUM2_FEATURES,
850 .xlevel = 0,
853 .name = "pentium3",
854 .level = 2,
855 .vendor = CPUID_VENDOR_INTEL,
856 .family = 6,
857 .model = 7,
858 .stepping = 3,
859 .features[FEAT_1_EDX] =
860 PENTIUM3_FEATURES,
861 .xlevel = 0,
864 .name = "athlon",
865 .level = 2,
866 .vendor = CPUID_VENDOR_AMD,
867 .family = 6,
868 .model = 2,
869 .stepping = 3,
870 .features[FEAT_1_EDX] =
871 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
872 CPUID_MCA,
873 .features[FEAT_8000_0001_EDX] =
874 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
875 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
876 .xlevel = 0x80000008,
879 .name = "n270",
880 /* original is on level 10 */
881 .level = 5,
882 .vendor = CPUID_VENDOR_INTEL,
883 .family = 6,
884 .model = 28,
885 .stepping = 2,
886 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
887 .features[FEAT_1_EDX] =
888 PPRO_FEATURES |
889 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
890 CPUID_ACPI | CPUID_SS,
891 /* Some CPUs got no CPUID_SEP */
892 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
893 * CPUID_EXT_XTPR */
894 .features[FEAT_1_ECX] =
895 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
896 CPUID_EXT_MOVBE,
897 .features[FEAT_8000_0001_EDX] =
898 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
899 CPUID_EXT2_NX,
900 .features[FEAT_8000_0001_ECX] =
901 CPUID_EXT3_LAHF_LM,
902 .xlevel = 0x8000000A,
903 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
906 .name = "Conroe",
907 .level = 4,
908 .vendor = CPUID_VENDOR_INTEL,
909 .family = 6,
910 .model = 15,
911 .stepping = 3,
912 .features[FEAT_1_EDX] =
913 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
914 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
915 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
916 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
917 CPUID_DE | CPUID_FP87,
918 .features[FEAT_1_ECX] =
919 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
920 .features[FEAT_8000_0001_EDX] =
921 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
922 .features[FEAT_8000_0001_ECX] =
923 CPUID_EXT3_LAHF_LM,
924 .xlevel = 0x8000000A,
925 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
928 .name = "Penryn",
929 .level = 4,
930 .vendor = CPUID_VENDOR_INTEL,
931 .family = 6,
932 .model = 23,
933 .stepping = 3,
934 .features[FEAT_1_EDX] =
935 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
936 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
937 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
938 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
939 CPUID_DE | CPUID_FP87,
940 .features[FEAT_1_ECX] =
941 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
942 CPUID_EXT_SSE3,
943 .features[FEAT_8000_0001_EDX] =
944 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
945 .features[FEAT_8000_0001_ECX] =
946 CPUID_EXT3_LAHF_LM,
947 .xlevel = 0x8000000A,
948 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
951 .name = "Nehalem",
952 .level = 4,
953 .vendor = CPUID_VENDOR_INTEL,
954 .family = 6,
955 .model = 26,
956 .stepping = 3,
957 .features[FEAT_1_EDX] =
958 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
959 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
960 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
961 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
962 CPUID_DE | CPUID_FP87,
963 .features[FEAT_1_ECX] =
964 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
965 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
966 .features[FEAT_8000_0001_EDX] =
967 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
968 .features[FEAT_8000_0001_ECX] =
969 CPUID_EXT3_LAHF_LM,
970 .xlevel = 0x8000000A,
971 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
974 .name = "Westmere",
975 .level = 11,
976 .vendor = CPUID_VENDOR_INTEL,
977 .family = 6,
978 .model = 44,
979 .stepping = 1,
980 .features[FEAT_1_EDX] =
981 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
982 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
983 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
984 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
985 CPUID_DE | CPUID_FP87,
986 .features[FEAT_1_ECX] =
987 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
988 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
989 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
990 .features[FEAT_8000_0001_EDX] =
991 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
992 .features[FEAT_8000_0001_ECX] =
993 CPUID_EXT3_LAHF_LM,
994 .xlevel = 0x8000000A,
995 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
998 .name = "SandyBridge",
999 .level = 0xd,
1000 .vendor = CPUID_VENDOR_INTEL,
1001 .family = 6,
1002 .model = 42,
1003 .stepping = 1,
1004 .features[FEAT_1_EDX] =
1005 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1006 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1007 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1008 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1009 CPUID_DE | CPUID_FP87,
1010 .features[FEAT_1_ECX] =
1011 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1012 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1013 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1014 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1015 CPUID_EXT_SSE3,
1016 .features[FEAT_8000_0001_EDX] =
1017 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1018 CPUID_EXT2_SYSCALL,
1019 .features[FEAT_8000_0001_ECX] =
1020 CPUID_EXT3_LAHF_LM,
1021 .xlevel = 0x8000000A,
1022 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1025 .name = "Haswell",
1026 .level = 0xd,
1027 .vendor = CPUID_VENDOR_INTEL,
1028 .family = 6,
1029 .model = 60,
1030 .stepping = 1,
1031 .features[FEAT_1_EDX] =
1032 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1033 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1034 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1035 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1036 CPUID_DE | CPUID_FP87,
1037 .features[FEAT_1_ECX] =
1038 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1039 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1040 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1041 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1042 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1043 CPUID_EXT_PCID,
1044 .features[FEAT_8000_0001_EDX] =
1045 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1046 CPUID_EXT2_SYSCALL,
1047 .features[FEAT_8000_0001_ECX] =
1048 CPUID_EXT3_LAHF_LM,
1049 .features[FEAT_7_0_EBX] =
1050 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1051 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1052 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1053 CPUID_7_0_EBX_RTM,
1054 .xlevel = 0x8000000A,
1055 .model_id = "Intel Core Processor (Haswell)",
1058 .name = "Broadwell",
1059 .level = 0xd,
1060 .vendor = CPUID_VENDOR_INTEL,
1061 .family = 6,
1062 .model = 61,
1063 .stepping = 2,
1064 .features[FEAT_1_EDX] =
1065 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1066 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1067 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1068 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1069 CPUID_DE | CPUID_FP87,
1070 .features[FEAT_1_ECX] =
1071 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1072 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1073 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1074 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1075 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1076 CPUID_EXT_PCID,
1077 .features[FEAT_8000_0001_EDX] =
1078 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1079 CPUID_EXT2_SYSCALL,
1080 .features[FEAT_8000_0001_ECX] =
1081 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1082 .features[FEAT_7_0_EBX] =
1083 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1084 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1085 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1086 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1087 CPUID_7_0_EBX_SMAP,
1088 .xlevel = 0x8000000A,
1089 .model_id = "Intel Core Processor (Broadwell)",
1092 .name = "Opteron_G1",
1093 .level = 5,
1094 .vendor = CPUID_VENDOR_AMD,
1095 .family = 15,
1096 .model = 6,
1097 .stepping = 1,
1098 .features[FEAT_1_EDX] =
1099 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1100 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1101 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1102 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1103 CPUID_DE | CPUID_FP87,
1104 .features[FEAT_1_ECX] =
1105 CPUID_EXT_SSE3,
1106 .features[FEAT_8000_0001_EDX] =
1107 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1108 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1109 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1110 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1111 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1112 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1113 .xlevel = 0x80000008,
1114 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1117 .name = "Opteron_G2",
1118 .level = 5,
1119 .vendor = CPUID_VENDOR_AMD,
1120 .family = 15,
1121 .model = 6,
1122 .stepping = 1,
1123 .features[FEAT_1_EDX] =
1124 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1125 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1126 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1127 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1128 CPUID_DE | CPUID_FP87,
1129 .features[FEAT_1_ECX] =
1130 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1131 .features[FEAT_8000_0001_EDX] =
1132 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1133 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1134 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1135 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1136 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1137 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1138 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1139 .features[FEAT_8000_0001_ECX] =
1140 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1141 .xlevel = 0x80000008,
1142 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1145 .name = "Opteron_G3",
1146 .level = 5,
1147 .vendor = CPUID_VENDOR_AMD,
1148 .family = 15,
1149 .model = 6,
1150 .stepping = 1,
1151 .features[FEAT_1_EDX] =
1152 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1153 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1154 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1155 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1156 CPUID_DE | CPUID_FP87,
1157 .features[FEAT_1_ECX] =
1158 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1159 CPUID_EXT_SSE3,
1160 .features[FEAT_8000_0001_EDX] =
1161 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1162 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1163 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1164 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1165 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1166 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1167 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1168 .features[FEAT_8000_0001_ECX] =
1169 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1170 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1171 .xlevel = 0x80000008,
1172 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1175 .name = "Opteron_G4",
1176 .level = 0xd,
1177 .vendor = CPUID_VENDOR_AMD,
1178 .family = 21,
1179 .model = 1,
1180 .stepping = 2,
1181 .features[FEAT_1_EDX] =
1182 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1183 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1184 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1185 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1186 CPUID_DE | CPUID_FP87,
1187 .features[FEAT_1_ECX] =
1188 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1189 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1190 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1191 CPUID_EXT_SSE3,
1192 .features[FEAT_8000_0001_EDX] =
1193 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1194 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1195 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1196 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1197 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1198 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1199 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1200 .features[FEAT_8000_0001_ECX] =
1201 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1202 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1203 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1204 CPUID_EXT3_LAHF_LM,
1205 .xlevel = 0x8000001A,
1206 .model_id = "AMD Opteron 62xx class CPU",
1209 .name = "Opteron_G5",
1210 .level = 0xd,
1211 .vendor = CPUID_VENDOR_AMD,
1212 .family = 21,
1213 .model = 2,
1214 .stepping = 0,
1215 .features[FEAT_1_EDX] =
1216 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1217 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1218 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1219 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1220 CPUID_DE | CPUID_FP87,
1221 .features[FEAT_1_ECX] =
1222 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1223 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1224 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1225 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1226 .features[FEAT_8000_0001_EDX] =
1227 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1228 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1229 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1230 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1231 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1232 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1233 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1234 .features[FEAT_8000_0001_ECX] =
1235 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1236 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1237 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1238 CPUID_EXT3_LAHF_LM,
1239 .xlevel = 0x8000001A,
1240 .model_id = "AMD Opteron 63xx class CPU",
1245 * x86_cpu_compat_set_features:
1246 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1247 * @w: Identifies the feature word to be changed.
1248 * @feat_add: Feature bits to be added to feature word
1249 * @feat_remove: Feature bits to be removed from feature word
1251 * Change CPU model feature bits for compatibility.
1253 * This function may be used by machine-type compatibility functions
1254 * to enable or disable feature bits on specific CPU models.
1256 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1257 uint32_t feat_add, uint32_t feat_remove)
1259 X86CPUDefinition *def;
1260 int i;
1261 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1262 def = &builtin_x86_defs[i];
1263 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1264 def->features[w] |= feat_add;
1265 def->features[w] &= ~feat_remove;
1270 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1271 bool migratable_only);
1273 #ifdef CONFIG_KVM
1275 static int cpu_x86_fill_model_id(char *str)
1277 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1278 int i;
1280 for (i = 0; i < 3; i++) {
1281 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1282 memcpy(str + i * 16 + 0, &eax, 4);
1283 memcpy(str + i * 16 + 4, &ebx, 4);
1284 memcpy(str + i * 16 + 8, &ecx, 4);
1285 memcpy(str + i * 16 + 12, &edx, 4);
1287 return 0;
1290 static X86CPUDefinition host_cpudef;
1292 static Property host_x86_cpu_properties[] = {
1293 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1294 DEFINE_PROP_END_OF_LIST()
1297 /* class_init for the "host" CPU model
1299 * This function may be called before KVM is initialized.
1301 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1303 DeviceClass *dc = DEVICE_CLASS(oc);
1304 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1305 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1307 xcc->kvm_required = true;
1309 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1310 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1312 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1313 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1314 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1315 host_cpudef.stepping = eax & 0x0F;
1317 cpu_x86_fill_model_id(host_cpudef.model_id);
1319 xcc->cpu_def = &host_cpudef;
1320 host_cpudef.cache_info_passthrough = true;
1322 /* level, xlevel, xlevel2, and the feature words are initialized on
1323 * instance_init, because they require KVM to be initialized.
1326 dc->props = host_x86_cpu_properties;
1329 static void host_x86_cpu_initfn(Object *obj)
1331 X86CPU *cpu = X86_CPU(obj);
1332 CPUX86State *env = &cpu->env;
1333 KVMState *s = kvm_state;
1335 assert(kvm_enabled());
1337 /* We can't fill the features array here because we don't know yet if
1338 * "migratable" is true or false.
1340 cpu->host_features = true;
1342 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1343 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1344 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1346 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1349 static const TypeInfo host_x86_cpu_type_info = {
1350 .name = X86_CPU_TYPE_NAME("host"),
1351 .parent = TYPE_X86_CPU,
1352 .instance_init = host_x86_cpu_initfn,
1353 .class_init = host_x86_cpu_class_init,
1356 #endif
1358 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1360 FeatureWordInfo *f = &feature_word_info[w];
1361 int i;
1363 for (i = 0; i < 32; ++i) {
1364 if (1 << i & mask) {
1365 const char *reg = get_register_name_32(f->cpuid_reg);
1366 assert(reg);
1367 fprintf(stderr, "warning: %s doesn't support requested feature: "
1368 "CPUID.%02XH:%s%s%s [bit %d]\n",
1369 kvm_enabled() ? "host" : "TCG",
1370 f->cpuid_eax, reg,
1371 f->feat_names[i] ? "." : "",
1372 f->feat_names[i] ? f->feat_names[i] : "", i);
1377 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1378 const char *name, Error **errp)
1380 X86CPU *cpu = X86_CPU(obj);
1381 CPUX86State *env = &cpu->env;
1382 int64_t value;
1384 value = (env->cpuid_version >> 8) & 0xf;
1385 if (value == 0xf) {
1386 value += (env->cpuid_version >> 20) & 0xff;
1388 visit_type_int(v, &value, name, errp);
1391 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1392 const char *name, Error **errp)
1394 X86CPU *cpu = X86_CPU(obj);
1395 CPUX86State *env = &cpu->env;
1396 const int64_t min = 0;
1397 const int64_t max = 0xff + 0xf;
1398 Error *local_err = NULL;
1399 int64_t value;
1401 visit_type_int(v, &value, name, &local_err);
1402 if (local_err) {
1403 error_propagate(errp, local_err);
1404 return;
1406 if (value < min || value > max) {
1407 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1408 name ? name : "null", value, min, max);
1409 return;
1412 env->cpuid_version &= ~0xff00f00;
1413 if (value > 0x0f) {
1414 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1415 } else {
1416 env->cpuid_version |= value << 8;
1420 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1421 const char *name, Error **errp)
1423 X86CPU *cpu = X86_CPU(obj);
1424 CPUX86State *env = &cpu->env;
1425 int64_t value;
1427 value = (env->cpuid_version >> 4) & 0xf;
1428 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1429 visit_type_int(v, &value, name, errp);
1432 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1433 const char *name, Error **errp)
1435 X86CPU *cpu = X86_CPU(obj);
1436 CPUX86State *env = &cpu->env;
1437 const int64_t min = 0;
1438 const int64_t max = 0xff;
1439 Error *local_err = NULL;
1440 int64_t value;
1442 visit_type_int(v, &value, name, &local_err);
1443 if (local_err) {
1444 error_propagate(errp, local_err);
1445 return;
1447 if (value < min || value > max) {
1448 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1449 name ? name : "null", value, min, max);
1450 return;
1453 env->cpuid_version &= ~0xf00f0;
1454 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1457 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1458 void *opaque, const char *name,
1459 Error **errp)
1461 X86CPU *cpu = X86_CPU(obj);
1462 CPUX86State *env = &cpu->env;
1463 int64_t value;
1465 value = env->cpuid_version & 0xf;
1466 visit_type_int(v, &value, name, errp);
1469 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1470 void *opaque, const char *name,
1471 Error **errp)
1473 X86CPU *cpu = X86_CPU(obj);
1474 CPUX86State *env = &cpu->env;
1475 const int64_t min = 0;
1476 const int64_t max = 0xf;
1477 Error *local_err = NULL;
1478 int64_t value;
1480 visit_type_int(v, &value, name, &local_err);
1481 if (local_err) {
1482 error_propagate(errp, local_err);
1483 return;
1485 if (value < min || value > max) {
1486 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1487 name ? name : "null", value, min, max);
1488 return;
1491 env->cpuid_version &= ~0xf;
1492 env->cpuid_version |= value & 0xf;
1495 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
1496 const char *name, Error **errp)
1498 X86CPU *cpu = X86_CPU(obj);
1500 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1503 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
1504 const char *name, Error **errp)
1506 X86CPU *cpu = X86_CPU(obj);
1508 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1511 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
1512 const char *name, Error **errp)
1514 X86CPU *cpu = X86_CPU(obj);
1516 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1519 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
1520 const char *name, Error **errp)
1522 X86CPU *cpu = X86_CPU(obj);
1524 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1527 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1529 X86CPU *cpu = X86_CPU(obj);
1530 CPUX86State *env = &cpu->env;
1531 char *value;
1533 value = (char *)g_malloc(CPUID_VENDOR_SZ + 1);
1534 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1535 env->cpuid_vendor3);
1536 return value;
1539 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1540 Error **errp)
1542 X86CPU *cpu = X86_CPU(obj);
1543 CPUX86State *env = &cpu->env;
1544 int i;
1546 if (strlen(value) != CPUID_VENDOR_SZ) {
1547 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1548 "vendor", value);
1549 return;
1552 env->cpuid_vendor1 = 0;
1553 env->cpuid_vendor2 = 0;
1554 env->cpuid_vendor3 = 0;
1555 for (i = 0; i < 4; i++) {
1556 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1557 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1558 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1562 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1564 X86CPU *cpu = X86_CPU(obj);
1565 CPUX86State *env = &cpu->env;
1566 char *value;
1567 int i;
1569 value = g_malloc(48 + 1);
1570 for (i = 0; i < 48; i++) {
1571 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1573 value[48] = '\0';
1574 return value;
1577 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1578 Error **errp)
1580 X86CPU *cpu = X86_CPU(obj);
1581 CPUX86State *env = &cpu->env;
1582 int c, len, i;
1584 if (model_id == NULL) {
1585 model_id = "";
1587 len = strlen(model_id);
1588 memset(env->cpuid_model, 0, 48);
1589 for (i = 0; i < 48; i++) {
1590 if (i >= len) {
1591 c = '\0';
1592 } else {
1593 c = (uint8_t)model_id[i];
1595 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1599 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1600 const char *name, Error **errp)
1602 X86CPU *cpu = X86_CPU(obj);
1603 int64_t value;
1605 value = cpu->env.tsc_khz * 1000;
1606 visit_type_int(v, &value, name, errp);
1609 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1610 const char *name, Error **errp)
1612 X86CPU *cpu = X86_CPU(obj);
1613 const int64_t min = 0;
1614 const int64_t max = INT64_MAX;
1615 Error *local_err = NULL;
1616 int64_t value;
1618 visit_type_int(v, &value, name, &local_err);
1619 if (local_err) {
1620 error_propagate(errp, local_err);
1621 return;
1623 if (value < min || value > max) {
1624 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1625 name ? name : "null", value, min, max);
1626 return;
1629 cpu->env.tsc_khz = value / 1000;
1632 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1633 const char *name, Error **errp)
1635 X86CPU *cpu = X86_CPU(obj);
1636 int64_t value = cpu->env.cpuid_apic_id;
1638 visit_type_int(v, &value, name, errp);
1641 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1642 const char *name, Error **errp)
1644 X86CPU *cpu = X86_CPU(obj);
1645 DeviceState *dev = DEVICE(obj);
1646 const int64_t min = 0;
1647 const int64_t max = UINT32_MAX;
1648 Error *error = NULL;
1649 int64_t value;
1651 if (dev->realized) {
1652 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1653 "it was realized", name, object_get_typename(obj));
1654 return;
1657 visit_type_int(v, &value, name, &error);
1658 if (error) {
1659 error_propagate(errp, error);
1660 return;
1662 if (value < min || value > max) {
1663 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1664 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1665 object_get_typename(obj), name, value, min, max);
1666 return;
1669 if ((value != cpu->env.cpuid_apic_id) && cpu_exists(value)) {
1670 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1671 return;
1673 cpu->env.cpuid_apic_id = value;
1676 /* Generic getter for "feature-words" and "filtered-features" properties */
1677 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1678 const char *name, Error **errp)
1680 uint32_t *array = (uint32_t *)opaque;
1681 FeatureWord w;
1682 Error *err = NULL;
1683 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1684 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1685 X86CPUFeatureWordInfoList *list = NULL;
1687 for (w = 0; w < FEATURE_WORDS; w++) {
1688 FeatureWordInfo *wi = &feature_word_info[w];
1689 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1690 qwi->cpuid_input_eax = wi->cpuid_eax;
1691 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1692 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1693 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1694 qwi->features = array[w];
1696 /* List will be in reverse order, but order shouldn't matter */
1697 list_entries[w].next = list;
1698 list_entries[w].value = &word_infos[w];
1699 list = &list_entries[w];
1702 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1703 error_propagate(errp, err);
1706 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1707 const char *name, Error **errp)
1709 X86CPU *cpu = X86_CPU(obj);
1710 int64_t value = cpu->hyperv_spinlock_attempts;
1712 visit_type_int(v, &value, name, errp);
1715 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1716 const char *name, Error **errp)
1718 const int64_t min = 0xFFF;
1719 const int64_t max = UINT_MAX;
1720 X86CPU *cpu = X86_CPU(obj);
1721 Error *err = NULL;
1722 int64_t value;
1724 visit_type_int(v, &value, name, &err);
1725 if (err) {
1726 error_propagate(errp, err);
1727 return;
1730 if (value < min || value > max) {
1731 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1732 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1733 object_get_typename(obj), name ? name : "null",
1734 value, min, max);
1735 return;
1737 cpu->hyperv_spinlock_attempts = value;
1740 static PropertyInfo qdev_prop_spinlocks = {
1741 .name = "int",
1742 .get = x86_get_hv_spinlocks,
1743 .set = x86_set_hv_spinlocks,
1746 /* Convert all '_' in a feature string option name to '-', to make feature
1747 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1749 static inline void feat2prop(char *s)
1751 while ((s = strchr(s, '_'))) {
1752 *s = '-';
1756 /* Parse "+feature,-feature,feature=foo" CPU feature string
1758 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1759 Error **errp)
1761 X86CPU *cpu = X86_CPU(cs);
1762 char *featurestr; /* Single 'key=value" string being parsed */
1763 FeatureWord w;
1764 /* Features to be added */
1765 FeatureWordArray plus_features = { 0 };
1766 /* Features to be removed */
1767 FeatureWordArray minus_features = { 0 };
1768 uint32_t numvalue;
1769 CPUX86State *env = &cpu->env;
1770 Error *local_err = NULL;
1772 featurestr = features ? strtok(features, ",") : NULL;
1774 while (featurestr) {
1775 char *val;
1776 if (featurestr[0] == '+') {
1777 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1778 } else if (featurestr[0] == '-') {
1779 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1780 } else if ((val = strchr(featurestr, '='))) {
1781 *val = 0; val++;
1782 feat2prop(featurestr);
1783 if (!strcmp(featurestr, "xlevel")) {
1784 char *err;
1785 char num[32];
1787 numvalue = strtoul(val, &err, 0);
1788 if (!*val || *err) {
1789 error_setg(errp, "bad numerical value %s", val);
1790 return;
1792 if (numvalue < 0x80000000) {
1793 error_report("xlevel value shall always be >= 0x80000000"
1794 ", fixup will be removed in future versions");
1795 numvalue += 0x80000000;
1797 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1798 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1799 } else if (!strcmp(featurestr, "tsc-freq")) {
1800 int64_t tsc_freq;
1801 char *err;
1802 char num[32];
1804 tsc_freq = strtosz_suffix_unit(val, &err,
1805 STRTOSZ_DEFSUFFIX_B, 1000);
1806 if (tsc_freq < 0 || *err) {
1807 error_setg(errp, "bad numerical value %s", val);
1808 return;
1810 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1811 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1812 &local_err);
1813 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1814 char *err;
1815 const int min = 0xFFF;
1816 char num[32];
1817 numvalue = strtoul(val, &err, 0);
1818 if (!*val || *err) {
1819 error_setg(errp, "bad numerical value %s", val);
1820 return;
1822 if (numvalue < min) {
1823 error_report("hv-spinlocks value shall always be >= 0x%x"
1824 ", fixup will be removed in future versions",
1825 min);
1826 numvalue = min;
1828 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1829 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1830 } else {
1831 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1833 } else {
1834 feat2prop(featurestr);
1835 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1837 if (local_err) {
1838 error_propagate(errp, local_err);
1839 return;
1841 featurestr = strtok(NULL, ",");
1844 if (cpu->host_features) {
1845 for (w = 0; w < FEATURE_WORDS; w++) {
1846 env->features[w] =
1847 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1851 for (w = 0; w < FEATURE_WORDS; w++) {
1852 env->features[w] |= plus_features[w];
1853 env->features[w] &= ~minus_features[w];
1857 /* generate a composite string into buf of all cpuid names in featureset
1858 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1859 * if flags, suppress names undefined in featureset.
1861 static void listflags(char *buf, int bufsize, uint32_t fbits,
1862 const char **featureset, uint32_t flags)
1864 const char **p = &featureset[31];
1865 char *q, *b, bit;
1866 int nc;
1868 b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
1869 *buf = '\0';
1870 for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
1871 if (fbits & 1 << bit && (*p || !flags)) {
1872 if (*p)
1873 nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
1874 else
1875 nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
1876 if (bufsize <= nc) {
1877 if (b) {
1878 memcpy(b, "...", sizeof("..."));
1880 return;
1882 q += nc;
1883 bufsize -= nc;
1887 /* generate CPU information. */
1888 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1890 X86CPUDefinition *def;
1891 char buf[256];
1892 int i;
1894 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1895 def = &builtin_x86_defs[i];
1896 snprintf(buf, sizeof(buf), "%s", def->name);
1897 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1899 #ifdef CONFIG_KVM
1900 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1901 "KVM processor with all supported host features "
1902 "(only available in KVM mode)");
1903 #endif
1905 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1906 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1907 FeatureWordInfo *fw = &feature_word_info[i];
1909 listflags(buf, sizeof(buf), (uint32_t)~0, fw->feat_names, 1);
1910 (*cpu_fprintf)(f, " %s\n", buf);
1914 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1916 CpuDefinitionInfoList *cpu_list = NULL;
1917 X86CPUDefinition *def;
1918 int i;
1920 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1921 CpuDefinitionInfoList *entry;
1922 CpuDefinitionInfo *info;
1924 def = &builtin_x86_defs[i];
1925 info = g_malloc0(sizeof(*info));
1926 info->name = g_strdup(def->name);
1928 entry = g_malloc0(sizeof(*entry));
1929 entry->value = info;
1930 entry->next = cpu_list;
1931 cpu_list = entry;
1934 return cpu_list;
1937 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1938 bool migratable_only)
1940 FeatureWordInfo *wi = &feature_word_info[w];
1941 uint32_t r;
1943 if (kvm_enabled()) {
1944 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
1945 wi->cpuid_ecx,
1946 wi->cpuid_reg);
1947 } else if (tcg_enabled()) {
1948 r = wi->tcg_features;
1949 } else {
1950 return ~0;
1952 if (migratable_only) {
1953 r &= x86_cpu_get_migratable_flags(w);
1955 return r;
1959 * Filters CPU feature words based on host availability of each feature.
1961 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
1963 static int x86_cpu_filter_features(X86CPU *cpu)
1965 CPUX86State *env = &cpu->env;
1966 FeatureWord w;
1967 int rv = 0;
1969 for (w = 0; w < FEATURE_WORDS; w++) {
1970 uint32_t host_feat =
1971 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1972 uint32_t requested_features = env->features[w];
1973 env->features[w] &= host_feat;
1974 cpu->filtered_features[w] = requested_features & ~env->features[w];
1975 if (cpu->filtered_features[w]) {
1976 if (cpu->check_cpuid || cpu->enforce_cpuid) {
1977 report_unavailable_features(w, cpu->filtered_features[w]);
1979 rv = 1;
1983 return rv;
1986 /* Load data from X86CPUDefinition
1988 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
1990 CPUX86State *env = &cpu->env;
1991 const char *vendor;
1992 char host_vendor[CPUID_VENDOR_SZ + 1];
1993 FeatureWord w;
1995 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
1996 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
1997 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
1998 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
1999 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2000 env->cpuid_xlevel2 = def->xlevel2;
2001 cpu->cache_info_passthrough = def->cache_info_passthrough;
2002 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2003 for (w = 0; w < FEATURE_WORDS; w++) {
2004 env->features[w] = def->features[w];
2007 /* Special cases not set in the X86CPUDefinition structs: */
2008 if (kvm_enabled()) {
2009 FeatureWord w;
2010 for (w = 0; w < FEATURE_WORDS; w++) {
2011 env->features[w] |= kvm_default_features[w];
2012 env->features[w] &= ~kvm_default_unset_features[w];
2016 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2018 /* sysenter isn't supported in compatibility mode on AMD,
2019 * syscall isn't supported in compatibility mode on Intel.
2020 * Normally we advertise the actual CPU vendor, but you can
2021 * override this using the 'vendor' property if you want to use
2022 * KVM's sysenter/syscall emulation in compatibility mode and
2023 * when doing cross vendor migration
2025 vendor = def->vendor;
2026 if (kvm_enabled()) {
2027 uint32_t ebx = 0, ecx = 0, edx = 0;
2028 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2029 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2030 vendor = host_vendor;
2033 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2037 X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
2038 Error **errp)
2040 X86CPU *cpu = NULL;
2041 X86CPUClass *xcc;
2042 ObjectClass *oc;
2043 gchar **model_pieces;
2044 char *name, *features;
2045 Error *error = NULL;
2047 model_pieces = g_strsplit(cpu_model, ",", 2);
2048 if (!model_pieces[0]) {
2049 error_setg(&error, "Invalid/empty CPU model name");
2050 goto out;
2052 name = model_pieces[0];
2053 features = model_pieces[1];
2055 oc = x86_cpu_class_by_name(name);
2056 if (oc == NULL) {
2057 error_setg(&error, "Unable to find CPU definition: %s", name);
2058 goto out;
2060 xcc = X86_CPU_CLASS(oc);
2062 if (xcc->kvm_required && !kvm_enabled()) {
2063 error_setg(&error, "CPU model '%s' requires KVM", name);
2064 goto out;
2067 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2069 #ifndef CONFIG_USER_ONLY
2070 if (icc_bridge == NULL) {
2071 error_setg(&error, "Invalid icc-bridge value");
2072 goto out;
2074 qdev_set_parent_bus(DEVICE(cpu), qdev_get_child_bus(icc_bridge, "icc"));
2075 object_unref(OBJECT(cpu));
2076 #endif
2078 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2079 if (error) {
2080 goto out;
2083 out:
2084 if (error != NULL) {
2085 error_propagate(errp, error);
2086 if (cpu) {
2087 object_unref(OBJECT(cpu));
2088 cpu = NULL;
2091 g_strfreev(model_pieces);
2092 return cpu;
2095 X86CPU *cpu_x86_init(const char *cpu_model)
2097 Error *error = NULL;
2098 X86CPU *cpu;
2100 cpu = cpu_x86_create(cpu_model, NULL, &error);
2101 if (error) {
2102 goto out;
2105 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2107 out:
2108 if (error) {
2109 error_report("%s", error_get_pretty(error));
2110 error_free(error);
2111 if (cpu != NULL) {
2112 object_unref(OBJECT(cpu));
2113 cpu = NULL;
2116 return cpu;
2119 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2121 X86CPUDefinition *cpudef = data;
2122 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2124 xcc->cpu_def = cpudef;
2127 static void x86_register_cpudef_type(X86CPUDefinition *def)
2129 char *typename = x86_cpu_type_name(def->name);
2130 TypeInfo ti = {
2131 .name = typename,
2132 .parent = TYPE_X86_CPU,
2133 .class_init = x86_cpu_cpudef_class_init,
2134 .class_data = def,
2137 type_register(&ti);
2138 g_free(typename);
2141 #if !defined(CONFIG_USER_ONLY)
2143 void cpu_clear_apic_feature(CPUX86State *env)
2145 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2148 #endif /* !CONFIG_USER_ONLY */
2150 /* Initialize list of CPU models, filling some non-static fields if necessary
2152 void x86_cpudef_setup(void)
2154 int i, j;
2155 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2157 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2158 X86CPUDefinition *def = &builtin_x86_defs[i];
2160 /* Look for specific "cpudef" models that */
2161 /* have the QEMU version in .model_id */
2162 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2163 if (strcmp(model_with_versions[j], def->name) == 0) {
2164 pstrcpy(def->model_id, sizeof(def->model_id),
2165 "QEMU Virtual CPU version ");
2166 pstrcat(def->model_id, sizeof(def->model_id),
2167 qemu_get_version());
2168 break;
2174 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
2175 uint32_t *ecx, uint32_t *edx)
2177 *ebx = env->cpuid_vendor1;
2178 *edx = env->cpuid_vendor2;
2179 *ecx = env->cpuid_vendor3;
2182 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2183 uint32_t *eax, uint32_t *ebx,
2184 uint32_t *ecx, uint32_t *edx)
2186 X86CPU *cpu = x86_env_get_cpu(env);
2187 CPUState *cs = CPU(cpu);
2189 /* test if maximum index reached */
2190 if (index & 0x80000000) {
2191 if (index > env->cpuid_xlevel) {
2192 if (env->cpuid_xlevel2 > 0) {
2193 /* Handle the Centaur's CPUID instruction. */
2194 if (index > env->cpuid_xlevel2) {
2195 index = env->cpuid_xlevel2;
2196 } else if (index < 0xC0000000) {
2197 index = env->cpuid_xlevel;
2199 } else {
2200 /* Intel documentation states that invalid EAX input will
2201 * return the same information as EAX=cpuid_level
2202 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2204 index = env->cpuid_level;
2207 } else {
2208 if (index > env->cpuid_level)
2209 index = env->cpuid_level;
2212 switch(index) {
2213 case 0:
2214 *eax = env->cpuid_level;
2215 get_cpuid_vendor(env, ebx, ecx, edx);
2216 break;
2217 case 1:
2218 *eax = env->cpuid_version;
2219 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2220 *ecx = env->features[FEAT_1_ECX];
2221 *edx = env->features[FEAT_1_EDX];
2222 if (cs->nr_cores * cs->nr_threads > 1) {
2223 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2224 *edx |= 1 << 28; /* HTT bit */
2226 break;
2227 case 2:
2228 /* cache info: needed for Pentium Pro compatibility */
2229 if (cpu->cache_info_passthrough) {
2230 host_cpuid(index, 0, eax, ebx, ecx, edx);
2231 break;
2233 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2234 *ebx = 0;
2235 *ecx = 0;
2236 *edx = (L1D_DESCRIPTOR << 16) | \
2237 (L1I_DESCRIPTOR << 8) | \
2238 (L2_DESCRIPTOR);
2239 break;
2240 case 4:
2241 /* cache info: needed for Core compatibility */
2242 if (cpu->cache_info_passthrough) {
2243 host_cpuid(index, count, eax, ebx, ecx, edx);
2244 *eax &= ~0xFC000000;
2245 } else {
2246 *eax = 0;
2247 switch (count) {
2248 case 0: /* L1 dcache info */
2249 *eax |= CPUID_4_TYPE_DCACHE | \
2250 CPUID_4_LEVEL(1) | \
2251 CPUID_4_SELF_INIT_LEVEL;
2252 *ebx = (L1D_LINE_SIZE - 1) | \
2253 ((L1D_PARTITIONS - 1) << 12) | \
2254 ((L1D_ASSOCIATIVITY - 1) << 22);
2255 *ecx = L1D_SETS - 1;
2256 *edx = CPUID_4_NO_INVD_SHARING;
2257 break;
2258 case 1: /* L1 icache info */
2259 *eax |= CPUID_4_TYPE_ICACHE | \
2260 CPUID_4_LEVEL(1) | \
2261 CPUID_4_SELF_INIT_LEVEL;
2262 *ebx = (L1I_LINE_SIZE - 1) | \
2263 ((L1I_PARTITIONS - 1) << 12) | \
2264 ((L1I_ASSOCIATIVITY - 1) << 22);
2265 *ecx = L1I_SETS - 1;
2266 *edx = CPUID_4_NO_INVD_SHARING;
2267 break;
2268 case 2: /* L2 cache info */
2269 *eax |= CPUID_4_TYPE_UNIFIED | \
2270 CPUID_4_LEVEL(2) | \
2271 CPUID_4_SELF_INIT_LEVEL;
2272 if (cs->nr_threads > 1) {
2273 *eax |= (cs->nr_threads - 1) << 14;
2275 *ebx = (L2_LINE_SIZE - 1) | \
2276 ((L2_PARTITIONS - 1) << 12) | \
2277 ((L2_ASSOCIATIVITY - 1) << 22);
2278 *ecx = L2_SETS - 1;
2279 *edx = CPUID_4_NO_INVD_SHARING;
2280 break;
2281 default: /* end of info */
2282 *eax = 0;
2283 *ebx = 0;
2284 *ecx = 0;
2285 *edx = 0;
2286 break;
2290 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2291 if ((*eax & 31) && cs->nr_cores > 1) {
2292 *eax |= (cs->nr_cores - 1) << 26;
2294 break;
2295 case 5:
2296 /* mwait info: needed for Core compatibility */
2297 *eax = 0; /* Smallest monitor-line size in bytes */
2298 *ebx = 0; /* Largest monitor-line size in bytes */
2299 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2300 *edx = 0;
2301 break;
2302 case 6:
2303 /* Thermal and Power Leaf */
2304 *eax = 0;
2305 *ebx = 0;
2306 *ecx = 0;
2307 *edx = 0;
2308 break;
2309 case 7:
2310 /* Structured Extended Feature Flags Enumeration Leaf */
2311 if (count == 0) {
2312 *eax = 0; /* Maximum ECX value for sub-leaves */
2313 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2314 *ecx = 0; /* Reserved */
2315 *edx = 0; /* Reserved */
2316 } else {
2317 *eax = 0;
2318 *ebx = 0;
2319 *ecx = 0;
2320 *edx = 0;
2322 break;
2323 case 9:
2324 /* Direct Cache Access Information Leaf */
2325 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2326 *ebx = 0;
2327 *ecx = 0;
2328 *edx = 0;
2329 break;
2330 case 0xA:
2331 /* Architectural Performance Monitoring Leaf */
2332 if (kvm_enabled() && cpu->enable_pmu) {
2333 KVMState *s = cs->kvm_state;
2335 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2336 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2337 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2338 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2339 } else {
2340 *eax = 0;
2341 *ebx = 0;
2342 *ecx = 0;
2343 *edx = 0;
2345 break;
2346 case 0xD: {
2347 KVMState *s = cs->kvm_state;
2348 uint64_t kvm_mask;
2349 int i;
2351 /* Processor Extended State */
2352 *eax = 0;
2353 *ebx = 0;
2354 *ecx = 0;
2355 *edx = 0;
2356 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2357 break;
2359 kvm_mask =
2360 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2361 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2363 if (count == 0) {
2364 *ecx = 0x240;
2365 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2366 const ExtSaveArea *esa = &ext_save_areas[i];
2367 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2368 (kvm_mask & (1 << i)) != 0) {
2369 if (i < 32) {
2370 *eax |= 1 << i;
2371 } else {
2372 *edx |= 1 << (i - 32);
2374 *ecx = MAX(*ecx, esa->offset + esa->size);
2377 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2378 *ebx = *ecx;
2379 } else if (count == 1) {
2380 *eax = kvm_arch_get_supported_cpuid(s, 0xd, 1, R_EAX);
2381 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2382 const ExtSaveArea *esa = &ext_save_areas[count];
2383 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2384 (kvm_mask & (1 << count)) != 0) {
2385 *eax = esa->size;
2386 *ebx = esa->offset;
2389 break;
2391 case 0x80000000:
2392 *eax = env->cpuid_xlevel;
2393 *ebx = env->cpuid_vendor1;
2394 *edx = env->cpuid_vendor2;
2395 *ecx = env->cpuid_vendor3;
2396 break;
2397 case 0x80000001:
2398 *eax = env->cpuid_version;
2399 *ebx = 0;
2400 *ecx = env->features[FEAT_8000_0001_ECX];
2401 *edx = env->features[FEAT_8000_0001_EDX];
2403 /* The Linux kernel checks for the CMPLegacy bit and
2404 * discards multiple thread information if it is set.
2405 * So dont set it here for Intel to make Linux guests happy.
2407 if (cs->nr_cores * cs->nr_threads > 1) {
2408 uint32_t tebx, tecx, tedx;
2409 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
2410 if (tebx != CPUID_VENDOR_INTEL_1 ||
2411 tedx != CPUID_VENDOR_INTEL_2 ||
2412 tecx != CPUID_VENDOR_INTEL_3) {
2413 *ecx |= 1 << 1; /* CmpLegacy bit */
2416 break;
2417 case 0x80000002:
2418 case 0x80000003:
2419 case 0x80000004:
2420 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2421 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2422 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2423 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2424 break;
2425 case 0x80000005:
2426 /* cache info (L1 cache) */
2427 if (cpu->cache_info_passthrough) {
2428 host_cpuid(index, 0, eax, ebx, ecx, edx);
2429 break;
2431 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2432 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2433 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2434 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2435 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2436 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2437 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2438 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2439 break;
2440 case 0x80000006:
2441 /* cache info (L2 cache) */
2442 if (cpu->cache_info_passthrough) {
2443 host_cpuid(index, 0, eax, ebx, ecx, edx);
2444 break;
2446 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2447 (L2_DTLB_2M_ENTRIES << 16) | \
2448 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2449 (L2_ITLB_2M_ENTRIES);
2450 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2451 (L2_DTLB_4K_ENTRIES << 16) | \
2452 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2453 (L2_ITLB_4K_ENTRIES);
2454 *ecx = (L2_SIZE_KB_AMD << 16) | \
2455 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2456 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2457 *edx = ((L3_SIZE_KB/512) << 18) | \
2458 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2459 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2460 break;
2461 case 0x80000007:
2462 *eax = 0;
2463 *ebx = 0;
2464 *ecx = 0;
2465 *edx = env->features[FEAT_8000_0007_EDX];
2466 break;
2467 case 0x80000008:
2468 /* virtual & phys address size in low 2 bytes. */
2469 /* XXX: This value must match the one used in the MMU code. */
2470 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2471 /* 64 bit processor */
2472 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2473 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2474 } else {
2475 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2476 *eax = 0x00000024; /* 36 bits physical */
2477 } else {
2478 *eax = 0x00000020; /* 32 bits physical */
2481 *ebx = 0;
2482 *ecx = 0;
2483 *edx = 0;
2484 if (cs->nr_cores * cs->nr_threads > 1) {
2485 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2487 break;
2488 case 0x8000000A:
2489 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2490 *eax = 0x00000001; /* SVM Revision */
2491 *ebx = 0x00000010; /* nr of ASIDs */
2492 *ecx = 0;
2493 *edx = env->features[FEAT_SVM]; /* optional features */
2494 } else {
2495 *eax = 0;
2496 *ebx = 0;
2497 *ecx = 0;
2498 *edx = 0;
2500 break;
2501 case 0xC0000000:
2502 *eax = env->cpuid_xlevel2;
2503 *ebx = 0;
2504 *ecx = 0;
2505 *edx = 0;
2506 break;
2507 case 0xC0000001:
2508 /* Support for VIA CPU's CPUID instruction */
2509 *eax = env->cpuid_version;
2510 *ebx = 0;
2511 *ecx = 0;
2512 *edx = env->features[FEAT_C000_0001_EDX];
2513 break;
2514 case 0xC0000002:
2515 case 0xC0000003:
2516 case 0xC0000004:
2517 /* Reserved for the future, and now filled with zero */
2518 *eax = 0;
2519 *ebx = 0;
2520 *ecx = 0;
2521 *edx = 0;
2522 break;
2523 default:
2524 /* reserved values: zero */
2525 *eax = 0;
2526 *ebx = 0;
2527 *ecx = 0;
2528 *edx = 0;
2529 break;
2533 /* CPUClass::reset() */
2534 static void x86_cpu_reset(CPUState *s)
2536 X86CPU *cpu = X86_CPU(s);
2537 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2538 CPUX86State *env = &cpu->env;
2539 int i;
2541 xcc->parent_reset(s);
2543 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2545 tlb_flush(s, 1);
2547 env->old_exception = -1;
2549 /* init to reset state */
2551 #ifdef CONFIG_SOFTMMU
2552 env->hflags |= HF_SOFTMMU_MASK;
2553 #endif
2554 env->hflags2 |= HF2_GIF_MASK;
2556 cpu_x86_update_cr0(env, 0x60000010);
2557 env->a20_mask = ~0x0;
2558 env->smbase = 0x30000;
2560 env->idt.limit = 0xffff;
2561 env->gdt.limit = 0xffff;
2562 env->ldt.limit = 0xffff;
2563 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2564 env->tr.limit = 0xffff;
2565 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2567 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2568 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2569 DESC_R_MASK | DESC_A_MASK);
2570 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2571 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2572 DESC_A_MASK);
2573 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2574 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2575 DESC_A_MASK);
2576 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2577 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2578 DESC_A_MASK);
2579 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2580 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2581 DESC_A_MASK);
2582 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2583 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2584 DESC_A_MASK);
2586 env->eip = 0xfff0;
2587 env->regs[R_EDX] = env->cpuid_version;
2589 env->eflags = 0x2;
2591 /* FPU init */
2592 for (i = 0; i < 8; i++) {
2593 env->fptags[i] = 1;
2595 cpu_set_fpuc(env, 0x37f);
2597 env->mxcsr = 0x1f80;
2598 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2600 env->pat = 0x0007040600070406ULL;
2601 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2603 memset(env->dr, 0, sizeof(env->dr));
2604 env->dr[6] = DR6_FIXED_1;
2605 env->dr[7] = DR7_FIXED_1;
2606 cpu_breakpoint_remove_all(s, BP_CPU);
2607 cpu_watchpoint_remove_all(s, BP_CPU);
2609 env->xcr0 = 1;
2612 * SDM 11.11.5 requires:
2613 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2614 * - IA32_MTRR_PHYSMASKn.V = 0
2615 * All other bits are undefined. For simplification, zero it all.
2617 env->mtrr_deftype = 0;
2618 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2619 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2621 #if !defined(CONFIG_USER_ONLY)
2622 /* We hard-wire the BSP to the first CPU. */
2623 if (s->cpu_index == 0) {
2624 apic_designate_bsp(cpu->apic_state);
2627 s->halted = !cpu_is_bsp(cpu);
2629 if (kvm_enabled()) {
2630 kvm_arch_reset_vcpu(cpu);
2632 #endif
2635 #ifndef CONFIG_USER_ONLY
2636 bool cpu_is_bsp(X86CPU *cpu)
2638 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2641 /* TODO: remove me, when reset over QOM tree is implemented */
2642 static void x86_cpu_machine_reset_cb(void *opaque)
2644 X86CPU *cpu = opaque;
2645 cpu_reset(CPU(cpu));
2647 #endif
2649 static void mce_init(X86CPU *cpu)
2651 CPUX86State *cenv = &cpu->env;
2652 unsigned int bank;
2654 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2655 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2656 (CPUID_MCE | CPUID_MCA)) {
2657 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2658 cenv->mcg_ctl = ~(uint64_t)0;
2659 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2660 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2665 #ifndef CONFIG_USER_ONLY
2666 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2668 CPUX86State *env = &cpu->env;
2669 DeviceState *dev = DEVICE(cpu);
2670 APICCommonState *apic;
2671 const char *apic_type = "apic";
2673 if (kvm_irqchip_in_kernel()) {
2674 apic_type = "kvm-apic";
2675 } else if (xen_enabled()) {
2676 apic_type = "xen-apic";
2679 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2680 if (cpu->apic_state == NULL) {
2681 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2682 return;
2685 object_property_add_child(OBJECT(cpu), "apic",
2686 OBJECT(cpu->apic_state), NULL);
2687 qdev_prop_set_uint8(cpu->apic_state, "id", env->cpuid_apic_id);
2688 /* TODO: convert to link<> */
2689 apic = APIC_COMMON(cpu->apic_state);
2690 apic->cpu = cpu;
2693 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2695 if (cpu->apic_state == NULL) {
2696 return;
2699 if (qdev_init(cpu->apic_state)) {
2700 error_setg(errp, "APIC device '%s' could not be initialized",
2701 object_get_typename(OBJECT(cpu->apic_state)));
2702 return;
2705 #else
2706 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2709 #endif
2712 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2713 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2714 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2715 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2716 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2717 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2718 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2720 CPUState *cs = CPU(dev);
2721 X86CPU *cpu = X86_CPU(dev);
2722 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2723 CPUX86State *env = &cpu->env;
2724 Error *local_err = NULL;
2725 static bool ht_warned;
2727 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2728 env->cpuid_level = 7;
2731 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2732 * CPUID[1].EDX.
2734 if (IS_AMD_CPU(env)) {
2735 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2736 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2737 & CPUID_EXT2_AMD_ALIASES);
2741 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2742 error_setg(&local_err,
2743 kvm_enabled() ?
2744 "Host doesn't support requested features" :
2745 "TCG doesn't support requested features");
2746 goto out;
2749 #ifndef CONFIG_USER_ONLY
2750 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2752 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2753 x86_cpu_apic_create(cpu, &local_err);
2754 if (local_err != NULL) {
2755 goto out;
2758 #endif
2760 mce_init(cpu);
2761 qemu_init_vcpu(cs);
2763 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2764 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2765 * based on inputs (sockets,cores,threads), it is still better to gives
2766 * users a warning.
2768 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2769 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2771 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2772 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2773 " -smp options properly.");
2774 ht_warned = true;
2777 x86_cpu_apic_realize(cpu, &local_err);
2778 if (local_err != NULL) {
2779 goto out;
2781 cpu_reset(cs);
2783 xcc->parent_realize(dev, &local_err);
2784 out:
2785 if (local_err != NULL) {
2786 error_propagate(errp, local_err);
2787 return;
2791 /* Enables contiguous-apic-ID mode, for compatibility */
2792 static bool compat_apic_id_mode;
2794 void enable_compat_apic_id_mode(void)
2796 compat_apic_id_mode = true;
2799 /* Calculates initial APIC ID for a specific CPU index
2801 * Currently we need to be able to calculate the APIC ID from the CPU index
2802 * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have
2803 * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of
2804 * all CPUs up to max_cpus.
2806 uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index)
2808 uint32_t correct_id;
2809 static bool warned;
2811 correct_id = x86_apicid_from_cpu_idx(smp_cores, smp_threads, cpu_index);
2812 if (compat_apic_id_mode) {
2813 if (cpu_index != correct_id && !warned) {
2814 error_report("APIC IDs set in compatibility mode, "
2815 "CPU topology won't match the configuration");
2816 warned = true;
2818 return cpu_index;
2819 } else {
2820 return correct_id;
2824 static void x86_cpu_initfn(Object *obj)
2826 CPUState *cs = CPU(obj);
2827 X86CPU *cpu = X86_CPU(obj);
2828 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
2829 CPUX86State *env = &cpu->env;
2830 static int inited;
2832 cs->env_ptr = env;
2833 cpu_exec_init(env);
2835 object_property_add(obj, "family", "int",
2836 x86_cpuid_version_get_family,
2837 x86_cpuid_version_set_family, NULL, NULL, NULL);
2838 object_property_add(obj, "model", "int",
2839 x86_cpuid_version_get_model,
2840 x86_cpuid_version_set_model, NULL, NULL, NULL);
2841 object_property_add(obj, "stepping", "int",
2842 x86_cpuid_version_get_stepping,
2843 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
2844 object_property_add(obj, "level", "int",
2845 x86_cpuid_get_level,
2846 x86_cpuid_set_level, NULL, NULL, NULL);
2847 object_property_add(obj, "xlevel", "int",
2848 x86_cpuid_get_xlevel,
2849 x86_cpuid_set_xlevel, NULL, NULL, NULL);
2850 object_property_add_str(obj, "vendor",
2851 x86_cpuid_get_vendor,
2852 x86_cpuid_set_vendor, NULL);
2853 object_property_add_str(obj, "model-id",
2854 x86_cpuid_get_model_id,
2855 x86_cpuid_set_model_id, NULL);
2856 object_property_add(obj, "tsc-frequency", "int",
2857 x86_cpuid_get_tsc_freq,
2858 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
2859 object_property_add(obj, "apic-id", "int",
2860 x86_cpuid_get_apic_id,
2861 x86_cpuid_set_apic_id, NULL, NULL, NULL);
2862 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
2863 x86_cpu_get_feature_words,
2864 NULL, NULL, (void *)env->features, NULL);
2865 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
2866 x86_cpu_get_feature_words,
2867 NULL, NULL, (void *)cpu->filtered_features, NULL);
2869 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
2870 env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index);
2872 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
2874 /* init various static tables used in TCG mode */
2875 if (tcg_enabled() && !inited) {
2876 inited = 1;
2877 optimize_flags_init();
2881 static int64_t x86_cpu_get_arch_id(CPUState *cs)
2883 X86CPU *cpu = X86_CPU(cs);
2884 CPUX86State *env = &cpu->env;
2886 return env->cpuid_apic_id;
2889 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
2891 X86CPU *cpu = X86_CPU(cs);
2893 return cpu->env.cr[0] & CR0_PG_MASK;
2896 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
2898 X86CPU *cpu = X86_CPU(cs);
2900 cpu->env.eip = value;
2903 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
2905 X86CPU *cpu = X86_CPU(cs);
2907 cpu->env.eip = tb->pc - tb->cs_base;
2910 static bool x86_cpu_has_work(CPUState *cs)
2912 X86CPU *cpu = X86_CPU(cs);
2913 CPUX86State *env = &cpu->env;
2915 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
2916 CPU_INTERRUPT_POLL)) &&
2917 (env->eflags & IF_MASK)) ||
2918 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
2919 CPU_INTERRUPT_INIT |
2920 CPU_INTERRUPT_SIPI |
2921 CPU_INTERRUPT_MCE));
2924 static Property x86_cpu_properties[] = {
2925 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
2926 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
2927 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
2928 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
2929 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
2930 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
2931 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
2932 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
2933 DEFINE_PROP_END_OF_LIST()
2936 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
2938 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2939 CPUClass *cc = CPU_CLASS(oc);
2940 DeviceClass *dc = DEVICE_CLASS(oc);
2942 xcc->parent_realize = dc->realize;
2943 dc->realize = x86_cpu_realizefn;
2944 dc->bus_type = TYPE_ICC_BUS;
2945 dc->props = x86_cpu_properties;
2947 xcc->parent_reset = cc->reset;
2948 cc->reset = x86_cpu_reset;
2949 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
2951 cc->class_by_name = x86_cpu_class_by_name;
2952 cc->parse_features = x86_cpu_parse_featurestr;
2953 cc->has_work = x86_cpu_has_work;
2954 cc->do_interrupt = x86_cpu_do_interrupt;
2955 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
2956 cc->dump_state = x86_cpu_dump_state;
2957 cc->set_pc = x86_cpu_set_pc;
2958 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
2959 cc->gdb_read_register = x86_cpu_gdb_read_register;
2960 cc->gdb_write_register = x86_cpu_gdb_write_register;
2961 cc->get_arch_id = x86_cpu_get_arch_id;
2962 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
2963 #ifdef CONFIG_USER_ONLY
2964 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
2965 #else
2966 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
2967 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
2968 cc->write_elf64_note = x86_cpu_write_elf64_note;
2969 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
2970 cc->write_elf32_note = x86_cpu_write_elf32_note;
2971 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
2972 cc->vmsd = &vmstate_x86_cpu;
2973 #endif
2974 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
2975 #ifndef CONFIG_USER_ONLY
2976 cc->debug_excp_handler = breakpoint_handler;
2977 #endif
2978 cc->cpu_exec_enter = x86_cpu_exec_enter;
2979 cc->cpu_exec_exit = x86_cpu_exec_exit;
2982 static const TypeInfo x86_cpu_type_info = {
2983 .name = TYPE_X86_CPU,
2984 .parent = TYPE_CPU,
2985 .instance_size = sizeof(X86CPU),
2986 .instance_init = x86_cpu_initfn,
2987 .abstract = true,
2988 .class_size = sizeof(X86CPUClass),
2989 .class_init = x86_cpu_common_class_init,
2992 static void x86_cpu_register_types(void)
2994 int i;
2996 type_register_static(&x86_cpu_type_info);
2997 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2998 x86_register_cpudef_type(&builtin_x86_defs[i]);
3000 #ifdef CONFIG_KVM
3001 type_register_static(&host_x86_cpu_type_info);
3002 #endif
3005 type_init(x86_cpu_register_types)