MAINTAINERS: add some tests directories
[qemu/ar7.git] / target-i386 / cpu.c
blob69a2bd39c54a58a69d599b5a97f76a59933265b6
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28 #include "topology.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 NULL, NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
278 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
279 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
280 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
281 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
282 CPUID_PSE36 | CPUID_FXSR)
283 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
284 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
285 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
286 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
287 CPUID_PAE | CPUID_SEP | CPUID_APIC)
289 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
290 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
291 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
292 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
293 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
294 /* partly implemented:
295 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
296 /* missing:
297 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
298 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
299 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
300 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
301 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
302 /* missing:
303 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
304 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
305 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
306 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
307 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
308 CPUID_EXT_RDRAND */
310 #ifdef TARGET_X86_64
311 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
312 #else
313 #define TCG_EXT2_X86_64_FEATURES 0
314 #endif
316 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
317 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
318 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
319 TCG_EXT2_X86_64_FEATURES)
320 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
321 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
322 #define TCG_EXT4_FEATURES 0
323 #define TCG_SVM_FEATURES 0
324 #define TCG_KVM_FEATURES 0
325 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
326 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
327 /* missing:
328 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
329 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
330 CPUID_7_0_EBX_RDSEED */
331 #define TCG_APM_FEATURES 0
334 typedef struct FeatureWordInfo {
335 const char **feat_names;
336 uint32_t cpuid_eax; /* Input EAX for CPUID */
337 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
338 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
339 int cpuid_reg; /* output register (R_* constant) */
340 uint32_t tcg_features; /* Feature flags supported by TCG */
341 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
342 } FeatureWordInfo;
344 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
345 [FEAT_1_EDX] = {
346 .feat_names = feature_name,
347 .cpuid_eax = 1, .cpuid_reg = R_EDX,
348 .tcg_features = TCG_FEATURES,
350 [FEAT_1_ECX] = {
351 .feat_names = ext_feature_name,
352 .cpuid_eax = 1, .cpuid_reg = R_ECX,
353 .tcg_features = TCG_EXT_FEATURES,
355 [FEAT_8000_0001_EDX] = {
356 .feat_names = ext2_feature_name,
357 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
358 .tcg_features = TCG_EXT2_FEATURES,
360 [FEAT_8000_0001_ECX] = {
361 .feat_names = ext3_feature_name,
362 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
363 .tcg_features = TCG_EXT3_FEATURES,
365 [FEAT_C000_0001_EDX] = {
366 .feat_names = ext4_feature_name,
367 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
368 .tcg_features = TCG_EXT4_FEATURES,
370 [FEAT_KVM] = {
371 .feat_names = kvm_feature_name,
372 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
373 .tcg_features = TCG_KVM_FEATURES,
375 [FEAT_SVM] = {
376 .feat_names = svm_feature_name,
377 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
378 .tcg_features = TCG_SVM_FEATURES,
380 [FEAT_7_0_EBX] = {
381 .feat_names = cpuid_7_0_ebx_feature_name,
382 .cpuid_eax = 7,
383 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
384 .cpuid_reg = R_EBX,
385 .tcg_features = TCG_7_0_EBX_FEATURES,
387 [FEAT_8000_0007_EDX] = {
388 .feat_names = cpuid_apm_edx_feature_name,
389 .cpuid_eax = 0x80000007,
390 .cpuid_reg = R_EDX,
391 .tcg_features = TCG_APM_FEATURES,
392 .unmigratable_flags = CPUID_APM_INVTSC,
396 typedef struct X86RegisterInfo32 {
397 /* Name of register */
398 const char *name;
399 /* QAPI enum value register */
400 X86CPURegister32 qapi_enum;
401 } X86RegisterInfo32;
403 #define REGISTER(reg) \
404 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
405 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
406 REGISTER(EAX),
407 REGISTER(ECX),
408 REGISTER(EDX),
409 REGISTER(EBX),
410 REGISTER(ESP),
411 REGISTER(EBP),
412 REGISTER(ESI),
413 REGISTER(EDI),
415 #undef REGISTER
417 typedef struct ExtSaveArea {
418 uint32_t feature, bits;
419 uint32_t offset, size;
420 } ExtSaveArea;
422 static const ExtSaveArea ext_save_areas[] = {
423 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
424 .offset = 0x240, .size = 0x100 },
425 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
426 .offset = 0x3c0, .size = 0x40 },
427 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
428 .offset = 0x400, .size = 0x40 },
431 const char *get_register_name_32(unsigned int reg)
433 if (reg >= CPU_NB_REGS32) {
434 return NULL;
436 return x86_reg_info_32[reg].name;
439 /* collects per-function cpuid data
441 typedef struct model_features_t {
442 uint32_t *guest_feat;
443 uint32_t *host_feat;
444 FeatureWord feat_word;
445 } model_features_t;
447 /* KVM-specific features that are automatically added to all CPU models
448 * when KVM is enabled.
450 static uint32_t kvm_default_features[FEATURE_WORDS] = {
451 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
452 (1 << KVM_FEATURE_NOP_IO_DELAY) |
453 (1 << KVM_FEATURE_CLOCKSOURCE2) |
454 (1 << KVM_FEATURE_ASYNC_PF) |
455 (1 << KVM_FEATURE_STEAL_TIME) |
456 (1 << KVM_FEATURE_PV_EOI) |
457 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
458 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
461 /* Features that are not added by default to any CPU model when KVM is enabled.
463 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
464 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
467 void x86_cpu_compat_disable_kvm_features(FeatureWord w, uint32_t features)
469 kvm_default_features[w] &= ~features;
473 * Returns the set of feature flags that are supported and migratable by
474 * QEMU, for a given FeatureWord.
476 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
478 FeatureWordInfo *wi = &feature_word_info[w];
479 uint32_t r = 0;
480 int i;
482 for (i = 0; i < 32; i++) {
483 uint32_t f = 1U << i;
484 /* If the feature name is unknown, it is not supported by QEMU yet */
485 if (!wi->feat_names[i]) {
486 continue;
488 /* Skip features known to QEMU, but explicitly marked as unmigratable */
489 if (wi->unmigratable_flags & f) {
490 continue;
492 r |= f;
494 return r;
497 void host_cpuid(uint32_t function, uint32_t count,
498 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
500 uint32_t vec[4];
502 #ifdef __x86_64__
503 asm volatile("cpuid"
504 : "=a"(vec[0]), "=b"(vec[1]),
505 "=c"(vec[2]), "=d"(vec[3])
506 : "0"(function), "c"(count) : "cc");
507 #elif defined(__i386__)
508 asm volatile("pusha \n\t"
509 "cpuid \n\t"
510 "mov %%eax, 0(%2) \n\t"
511 "mov %%ebx, 4(%2) \n\t"
512 "mov %%ecx, 8(%2) \n\t"
513 "mov %%edx, 12(%2) \n\t"
514 "popa"
515 : : "a"(function), "c"(count), "S"(vec)
516 : "memory", "cc");
517 #else
518 abort();
519 #endif
521 if (eax)
522 *eax = vec[0];
523 if (ebx)
524 *ebx = vec[1];
525 if (ecx)
526 *ecx = vec[2];
527 if (edx)
528 *edx = vec[3];
531 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
533 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
534 * a substring. ex if !NULL points to the first char after a substring,
535 * otherwise the string is assumed to sized by a terminating nul.
536 * Return lexical ordering of *s1:*s2.
538 static int sstrcmp(const char *s1, const char *e1, const char *s2,
539 const char *e2)
541 for (;;) {
542 if (!*s1 || !*s2 || *s1 != *s2)
543 return (*s1 - *s2);
544 ++s1, ++s2;
545 if (s1 == e1 && s2 == e2)
546 return (0);
547 else if (s1 == e1)
548 return (*s2);
549 else if (s2 == e2)
550 return (*s1);
554 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
555 * '|' delimited (possibly empty) strings in which case search for a match
556 * within the alternatives proceeds left to right. Return 0 for success,
557 * non-zero otherwise.
559 static int altcmp(const char *s, const char *e, const char *altstr)
561 const char *p, *q;
563 for (q = p = altstr; ; ) {
564 while (*p && *p != '|')
565 ++p;
566 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
567 return (0);
568 if (!*p)
569 return (1);
570 else
571 q = ++p;
575 /* search featureset for flag *[s..e), if found set corresponding bit in
576 * *pval and return true, otherwise return false
578 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
579 const char **featureset)
581 uint32_t mask;
582 const char **ppc;
583 bool found = false;
585 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
586 if (*ppc && !altcmp(s, e, *ppc)) {
587 *pval |= mask;
588 found = true;
591 return found;
594 static void add_flagname_to_bitmaps(const char *flagname,
595 FeatureWordArray words,
596 Error **errp)
598 FeatureWord w;
599 for (w = 0; w < FEATURE_WORDS; w++) {
600 FeatureWordInfo *wi = &feature_word_info[w];
601 if (wi->feat_names &&
602 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
603 break;
606 if (w == FEATURE_WORDS) {
607 error_setg(errp, "CPU feature %s not found", flagname);
611 /* CPU class name definitions: */
613 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
614 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
616 /* Return type name for a given CPU model name
617 * Caller is responsible for freeing the returned string.
619 static char *x86_cpu_type_name(const char *model_name)
621 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
624 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
626 ObjectClass *oc;
627 char *typename;
629 if (cpu_model == NULL) {
630 return NULL;
633 typename = x86_cpu_type_name(cpu_model);
634 oc = object_class_by_name(typename);
635 g_free(typename);
636 return oc;
639 struct X86CPUDefinition {
640 const char *name;
641 uint32_t level;
642 uint32_t xlevel;
643 uint32_t xlevel2;
644 /* vendor is zero-terminated, 12 character ASCII string */
645 char vendor[CPUID_VENDOR_SZ + 1];
646 int family;
647 int model;
648 int stepping;
649 FeatureWordArray features;
650 char model_id[48];
651 bool cache_info_passthrough;
654 static X86CPUDefinition builtin_x86_defs[] = {
656 .name = "qemu64",
657 .level = 4,
658 .vendor = CPUID_VENDOR_AMD,
659 .family = 6,
660 .model = 6,
661 .stepping = 3,
662 .features[FEAT_1_EDX] =
663 PPRO_FEATURES |
664 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
665 CPUID_PSE36,
666 .features[FEAT_1_ECX] =
667 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
668 .features[FEAT_8000_0001_EDX] =
669 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
670 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
671 .features[FEAT_8000_0001_ECX] =
672 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
673 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
674 .xlevel = 0x8000000A,
677 .name = "phenom",
678 .level = 5,
679 .vendor = CPUID_VENDOR_AMD,
680 .family = 16,
681 .model = 2,
682 .stepping = 3,
683 .features[FEAT_1_EDX] =
684 PPRO_FEATURES |
685 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
686 CPUID_PSE36 | CPUID_VME | CPUID_HT,
687 .features[FEAT_1_ECX] =
688 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
689 CPUID_EXT_POPCNT,
690 .features[FEAT_8000_0001_EDX] =
691 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
692 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
693 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
694 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
695 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
696 CPUID_EXT3_CR8LEG,
697 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
698 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
699 .features[FEAT_8000_0001_ECX] =
700 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
701 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
702 .features[FEAT_SVM] =
703 CPUID_SVM_NPT | CPUID_SVM_LBRV,
704 .xlevel = 0x8000001A,
705 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
708 .name = "core2duo",
709 .level = 10,
710 .vendor = CPUID_VENDOR_INTEL,
711 .family = 6,
712 .model = 15,
713 .stepping = 11,
714 .features[FEAT_1_EDX] =
715 PPRO_FEATURES |
716 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
717 CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |
718 CPUID_HT | CPUID_TM | CPUID_PBE,
719 .features[FEAT_1_ECX] =
720 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
721 CPUID_EXT_DTES64 | CPUID_EXT_DSCPL | CPUID_EXT_VMX | CPUID_EXT_EST |
722 CPUID_EXT_TM2 | CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
723 .features[FEAT_8000_0001_EDX] =
724 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
725 .features[FEAT_8000_0001_ECX] =
726 CPUID_EXT3_LAHF_LM,
727 .xlevel = 0x80000008,
728 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
731 .name = "kvm64",
732 .level = 5,
733 .vendor = CPUID_VENDOR_INTEL,
734 .family = 15,
735 .model = 6,
736 .stepping = 1,
737 /* Missing: CPUID_VME, CPUID_HT */
738 .features[FEAT_1_EDX] =
739 PPRO_FEATURES |
740 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
741 CPUID_PSE36,
742 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
743 .features[FEAT_1_ECX] =
744 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
745 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
746 .features[FEAT_8000_0001_EDX] =
747 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
748 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
749 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
750 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
751 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
752 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
753 .features[FEAT_8000_0001_ECX] =
755 .xlevel = 0x80000008,
756 .model_id = "Common KVM processor"
759 .name = "qemu32",
760 .level = 4,
761 .vendor = CPUID_VENDOR_INTEL,
762 .family = 6,
763 .model = 6,
764 .stepping = 3,
765 .features[FEAT_1_EDX] =
766 PPRO_FEATURES,
767 .features[FEAT_1_ECX] =
768 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
769 .xlevel = 0x80000004,
772 .name = "kvm32",
773 .level = 5,
774 .vendor = CPUID_VENDOR_INTEL,
775 .family = 15,
776 .model = 6,
777 .stepping = 1,
778 .features[FEAT_1_EDX] =
779 PPRO_FEATURES |
780 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
781 .features[FEAT_1_ECX] =
782 CPUID_EXT_SSE3,
783 .features[FEAT_8000_0001_EDX] =
784 PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
785 .features[FEAT_8000_0001_ECX] =
787 .xlevel = 0x80000008,
788 .model_id = "Common 32-bit KVM processor"
791 .name = "coreduo",
792 .level = 10,
793 .vendor = CPUID_VENDOR_INTEL,
794 .family = 6,
795 .model = 14,
796 .stepping = 8,
797 .features[FEAT_1_EDX] =
798 PPRO_FEATURES | CPUID_VME |
799 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_DTS | CPUID_ACPI |
800 CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
801 .features[FEAT_1_ECX] =
802 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_VMX |
803 CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
804 .features[FEAT_8000_0001_EDX] =
805 CPUID_EXT2_NX,
806 .xlevel = 0x80000008,
807 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
810 .name = "486",
811 .level = 1,
812 .vendor = CPUID_VENDOR_INTEL,
813 .family = 4,
814 .model = 8,
815 .stepping = 0,
816 .features[FEAT_1_EDX] =
817 I486_FEATURES,
818 .xlevel = 0,
821 .name = "pentium",
822 .level = 1,
823 .vendor = CPUID_VENDOR_INTEL,
824 .family = 5,
825 .model = 4,
826 .stepping = 3,
827 .features[FEAT_1_EDX] =
828 PENTIUM_FEATURES,
829 .xlevel = 0,
832 .name = "pentium2",
833 .level = 2,
834 .vendor = CPUID_VENDOR_INTEL,
835 .family = 6,
836 .model = 5,
837 .stepping = 2,
838 .features[FEAT_1_EDX] =
839 PENTIUM2_FEATURES,
840 .xlevel = 0,
843 .name = "pentium3",
844 .level = 2,
845 .vendor = CPUID_VENDOR_INTEL,
846 .family = 6,
847 .model = 7,
848 .stepping = 3,
849 .features[FEAT_1_EDX] =
850 PENTIUM3_FEATURES,
851 .xlevel = 0,
854 .name = "athlon",
855 .level = 2,
856 .vendor = CPUID_VENDOR_AMD,
857 .family = 6,
858 .model = 2,
859 .stepping = 3,
860 .features[FEAT_1_EDX] =
861 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
862 CPUID_MCA,
863 .features[FEAT_8000_0001_EDX] =
864 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
865 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
866 .xlevel = 0x80000008,
869 .name = "n270",
870 /* original is on level 10 */
871 .level = 5,
872 .vendor = CPUID_VENDOR_INTEL,
873 .family = 6,
874 .model = 28,
875 .stepping = 2,
876 .features[FEAT_1_EDX] =
877 PPRO_FEATURES |
878 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | CPUID_DTS |
879 CPUID_ACPI | CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
880 /* Some CPUs got no CPUID_SEP */
881 .features[FEAT_1_ECX] =
882 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
883 CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR |
884 CPUID_EXT_MOVBE,
885 .features[FEAT_8000_0001_EDX] =
886 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
887 CPUID_EXT2_NX,
888 .features[FEAT_8000_0001_ECX] =
889 CPUID_EXT3_LAHF_LM,
890 .xlevel = 0x8000000A,
891 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
894 .name = "Conroe",
895 .level = 4,
896 .vendor = CPUID_VENDOR_INTEL,
897 .family = 6,
898 .model = 15,
899 .stepping = 3,
900 .features[FEAT_1_EDX] =
901 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
902 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
903 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
904 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
905 CPUID_DE | CPUID_FP87,
906 .features[FEAT_1_ECX] =
907 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
908 .features[FEAT_8000_0001_EDX] =
909 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
910 .features[FEAT_8000_0001_ECX] =
911 CPUID_EXT3_LAHF_LM,
912 .xlevel = 0x8000000A,
913 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
916 .name = "Penryn",
917 .level = 4,
918 .vendor = CPUID_VENDOR_INTEL,
919 .family = 6,
920 .model = 23,
921 .stepping = 3,
922 .features[FEAT_1_EDX] =
923 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
924 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
925 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
926 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
927 CPUID_DE | CPUID_FP87,
928 .features[FEAT_1_ECX] =
929 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
930 CPUID_EXT_SSE3,
931 .features[FEAT_8000_0001_EDX] =
932 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
933 .features[FEAT_8000_0001_ECX] =
934 CPUID_EXT3_LAHF_LM,
935 .xlevel = 0x8000000A,
936 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
939 .name = "Nehalem",
940 .level = 4,
941 .vendor = CPUID_VENDOR_INTEL,
942 .family = 6,
943 .model = 26,
944 .stepping = 3,
945 .features[FEAT_1_EDX] =
946 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
947 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
948 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
949 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
950 CPUID_DE | CPUID_FP87,
951 .features[FEAT_1_ECX] =
952 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
953 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
954 .features[FEAT_8000_0001_EDX] =
955 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
956 .features[FEAT_8000_0001_ECX] =
957 CPUID_EXT3_LAHF_LM,
958 .xlevel = 0x8000000A,
959 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
962 .name = "Westmere",
963 .level = 11,
964 .vendor = CPUID_VENDOR_INTEL,
965 .family = 6,
966 .model = 44,
967 .stepping = 1,
968 .features[FEAT_1_EDX] =
969 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
970 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
971 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
972 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
973 CPUID_DE | CPUID_FP87,
974 .features[FEAT_1_ECX] =
975 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
976 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
977 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
978 .features[FEAT_8000_0001_EDX] =
979 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
980 .features[FEAT_8000_0001_ECX] =
981 CPUID_EXT3_LAHF_LM,
982 .xlevel = 0x8000000A,
983 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
986 .name = "SandyBridge",
987 .level = 0xd,
988 .vendor = CPUID_VENDOR_INTEL,
989 .family = 6,
990 .model = 42,
991 .stepping = 1,
992 .features[FEAT_1_EDX] =
993 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
994 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
995 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
996 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
997 CPUID_DE | CPUID_FP87,
998 .features[FEAT_1_ECX] =
999 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1000 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1001 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1002 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1003 CPUID_EXT_SSE3,
1004 .features[FEAT_8000_0001_EDX] =
1005 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1006 CPUID_EXT2_SYSCALL,
1007 .features[FEAT_8000_0001_ECX] =
1008 CPUID_EXT3_LAHF_LM,
1009 .xlevel = 0x8000000A,
1010 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1013 .name = "Haswell",
1014 .level = 0xd,
1015 .vendor = CPUID_VENDOR_INTEL,
1016 .family = 6,
1017 .model = 60,
1018 .stepping = 1,
1019 .features[FEAT_1_EDX] =
1020 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1021 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1022 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1023 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1024 CPUID_DE | CPUID_FP87,
1025 .features[FEAT_1_ECX] =
1026 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1027 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1028 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1029 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1030 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1031 CPUID_EXT_PCID,
1032 .features[FEAT_8000_0001_EDX] =
1033 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1034 CPUID_EXT2_SYSCALL,
1035 .features[FEAT_8000_0001_ECX] =
1036 CPUID_EXT3_LAHF_LM,
1037 .features[FEAT_7_0_EBX] =
1038 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1039 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1040 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1041 CPUID_7_0_EBX_RTM,
1042 .xlevel = 0x8000000A,
1043 .model_id = "Intel Core Processor (Haswell)",
1046 .name = "Broadwell",
1047 .level = 0xd,
1048 .vendor = CPUID_VENDOR_INTEL,
1049 .family = 6,
1050 .model = 61,
1051 .stepping = 2,
1052 .features[FEAT_1_EDX] =
1053 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1054 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1055 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1056 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1057 CPUID_DE | CPUID_FP87,
1058 .features[FEAT_1_ECX] =
1059 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1060 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1061 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1062 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1063 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1064 CPUID_EXT_PCID,
1065 .features[FEAT_8000_0001_EDX] =
1066 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1067 CPUID_EXT2_SYSCALL,
1068 .features[FEAT_8000_0001_ECX] =
1069 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1070 .features[FEAT_7_0_EBX] =
1071 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1072 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1073 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1074 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1075 CPUID_7_0_EBX_SMAP,
1076 .xlevel = 0x8000000A,
1077 .model_id = "Intel Core Processor (Broadwell)",
1080 .name = "Opteron_G1",
1081 .level = 5,
1082 .vendor = CPUID_VENDOR_AMD,
1083 .family = 15,
1084 .model = 6,
1085 .stepping = 1,
1086 .features[FEAT_1_EDX] =
1087 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1088 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1089 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1090 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1091 CPUID_DE | CPUID_FP87,
1092 .features[FEAT_1_ECX] =
1093 CPUID_EXT_SSE3,
1094 .features[FEAT_8000_0001_EDX] =
1095 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1096 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1097 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1098 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1099 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1100 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1101 .xlevel = 0x80000008,
1102 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1105 .name = "Opteron_G2",
1106 .level = 5,
1107 .vendor = CPUID_VENDOR_AMD,
1108 .family = 15,
1109 .model = 6,
1110 .stepping = 1,
1111 .features[FEAT_1_EDX] =
1112 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1113 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1114 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1115 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1116 CPUID_DE | CPUID_FP87,
1117 .features[FEAT_1_ECX] =
1118 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1119 .features[FEAT_8000_0001_EDX] =
1120 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1121 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1122 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1123 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1124 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1125 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1126 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1127 .features[FEAT_8000_0001_ECX] =
1128 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1129 .xlevel = 0x80000008,
1130 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1133 .name = "Opteron_G3",
1134 .level = 5,
1135 .vendor = CPUID_VENDOR_AMD,
1136 .family = 15,
1137 .model = 6,
1138 .stepping = 1,
1139 .features[FEAT_1_EDX] =
1140 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1141 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1142 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1143 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1144 CPUID_DE | CPUID_FP87,
1145 .features[FEAT_1_ECX] =
1146 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1147 CPUID_EXT_SSE3,
1148 .features[FEAT_8000_0001_EDX] =
1149 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1150 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1151 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1152 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1153 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1154 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1155 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1156 .features[FEAT_8000_0001_ECX] =
1157 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1158 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1159 .xlevel = 0x80000008,
1160 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1163 .name = "Opteron_G4",
1164 .level = 0xd,
1165 .vendor = CPUID_VENDOR_AMD,
1166 .family = 21,
1167 .model = 1,
1168 .stepping = 2,
1169 .features[FEAT_1_EDX] =
1170 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1171 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1172 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1173 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1174 CPUID_DE | CPUID_FP87,
1175 .features[FEAT_1_ECX] =
1176 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1177 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1178 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1179 CPUID_EXT_SSE3,
1180 .features[FEAT_8000_0001_EDX] =
1181 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1182 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1183 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1184 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1185 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1186 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1187 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1188 .features[FEAT_8000_0001_ECX] =
1189 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1190 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1191 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1192 CPUID_EXT3_LAHF_LM,
1193 .xlevel = 0x8000001A,
1194 .model_id = "AMD Opteron 62xx class CPU",
1197 .name = "Opteron_G5",
1198 .level = 0xd,
1199 .vendor = CPUID_VENDOR_AMD,
1200 .family = 21,
1201 .model = 2,
1202 .stepping = 0,
1203 .features[FEAT_1_EDX] =
1204 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1205 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1206 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1207 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1208 CPUID_DE | CPUID_FP87,
1209 .features[FEAT_1_ECX] =
1210 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1211 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1212 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1213 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1214 .features[FEAT_8000_0001_EDX] =
1215 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1216 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1217 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1218 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1219 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1220 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1221 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1222 .features[FEAT_8000_0001_ECX] =
1223 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1224 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1225 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1226 CPUID_EXT3_LAHF_LM,
1227 .xlevel = 0x8000001A,
1228 .model_id = "AMD Opteron 63xx class CPU",
1233 * x86_cpu_compat_set_features:
1234 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1235 * @w: Identifies the feature word to be changed.
1236 * @feat_add: Feature bits to be added to feature word
1237 * @feat_remove: Feature bits to be removed from feature word
1239 * Change CPU model feature bits for compatibility.
1241 * This function may be used by machine-type compatibility functions
1242 * to enable or disable feature bits on specific CPU models.
1244 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1245 uint32_t feat_add, uint32_t feat_remove)
1247 X86CPUDefinition *def;
1248 int i;
1249 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1250 def = &builtin_x86_defs[i];
1251 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1252 def->features[w] |= feat_add;
1253 def->features[w] &= ~feat_remove;
1258 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1259 bool migratable_only);
1261 #ifdef CONFIG_KVM
1263 static int cpu_x86_fill_model_id(char *str)
1265 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1266 int i;
1268 for (i = 0; i < 3; i++) {
1269 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1270 memcpy(str + i * 16 + 0, &eax, 4);
1271 memcpy(str + i * 16 + 4, &ebx, 4);
1272 memcpy(str + i * 16 + 8, &ecx, 4);
1273 memcpy(str + i * 16 + 12, &edx, 4);
1275 return 0;
1278 static X86CPUDefinition host_cpudef;
1280 static Property host_x86_cpu_properties[] = {
1281 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1282 DEFINE_PROP_END_OF_LIST()
1285 /* class_init for the "host" CPU model
1287 * This function may be called before KVM is initialized.
1289 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1291 DeviceClass *dc = DEVICE_CLASS(oc);
1292 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1293 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1295 xcc->kvm_required = true;
1297 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1298 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1300 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1301 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1302 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1303 host_cpudef.stepping = eax & 0x0F;
1305 cpu_x86_fill_model_id(host_cpudef.model_id);
1307 xcc->cpu_def = &host_cpudef;
1308 host_cpudef.cache_info_passthrough = true;
1310 /* level, xlevel, xlevel2, and the feature words are initialized on
1311 * instance_init, because they require KVM to be initialized.
1314 dc->props = host_x86_cpu_properties;
1317 static void host_x86_cpu_initfn(Object *obj)
1319 X86CPU *cpu = X86_CPU(obj);
1320 CPUX86State *env = &cpu->env;
1321 KVMState *s = kvm_state;
1323 assert(kvm_enabled());
1325 /* We can't fill the features array here because we don't know yet if
1326 * "migratable" is true or false.
1328 cpu->host_features = true;
1330 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1331 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1332 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1334 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1337 static const TypeInfo host_x86_cpu_type_info = {
1338 .name = X86_CPU_TYPE_NAME("host"),
1339 .parent = TYPE_X86_CPU,
1340 .instance_init = host_x86_cpu_initfn,
1341 .class_init = host_x86_cpu_class_init,
1344 #endif
1346 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1348 FeatureWordInfo *f = &feature_word_info[w];
1349 int i;
1351 for (i = 0; i < 32; ++i) {
1352 if (1 << i & mask) {
1353 const char *reg = get_register_name_32(f->cpuid_reg);
1354 assert(reg);
1355 fprintf(stderr, "warning: %s doesn't support requested feature: "
1356 "CPUID.%02XH:%s%s%s [bit %d]\n",
1357 kvm_enabled() ? "host" : "TCG",
1358 f->cpuid_eax, reg,
1359 f->feat_names[i] ? "." : "",
1360 f->feat_names[i] ? f->feat_names[i] : "", i);
1365 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1366 const char *name, Error **errp)
1368 X86CPU *cpu = X86_CPU(obj);
1369 CPUX86State *env = &cpu->env;
1370 int64_t value;
1372 value = (env->cpuid_version >> 8) & 0xf;
1373 if (value == 0xf) {
1374 value += (env->cpuid_version >> 20) & 0xff;
1376 visit_type_int(v, &value, name, errp);
1379 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1380 const char *name, Error **errp)
1382 X86CPU *cpu = X86_CPU(obj);
1383 CPUX86State *env = &cpu->env;
1384 const int64_t min = 0;
1385 const int64_t max = 0xff + 0xf;
1386 Error *local_err = NULL;
1387 int64_t value;
1389 visit_type_int(v, &value, name, &local_err);
1390 if (local_err) {
1391 error_propagate(errp, local_err);
1392 return;
1394 if (value < min || value > max) {
1395 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1396 name ? name : "null", value, min, max);
1397 return;
1400 env->cpuid_version &= ~0xff00f00;
1401 if (value > 0x0f) {
1402 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1403 } else {
1404 env->cpuid_version |= value << 8;
1408 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1409 const char *name, Error **errp)
1411 X86CPU *cpu = X86_CPU(obj);
1412 CPUX86State *env = &cpu->env;
1413 int64_t value;
1415 value = (env->cpuid_version >> 4) & 0xf;
1416 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1417 visit_type_int(v, &value, name, errp);
1420 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1421 const char *name, Error **errp)
1423 X86CPU *cpu = X86_CPU(obj);
1424 CPUX86State *env = &cpu->env;
1425 const int64_t min = 0;
1426 const int64_t max = 0xff;
1427 Error *local_err = NULL;
1428 int64_t value;
1430 visit_type_int(v, &value, name, &local_err);
1431 if (local_err) {
1432 error_propagate(errp, local_err);
1433 return;
1435 if (value < min || value > max) {
1436 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1437 name ? name : "null", value, min, max);
1438 return;
1441 env->cpuid_version &= ~0xf00f0;
1442 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1445 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1446 void *opaque, const char *name,
1447 Error **errp)
1449 X86CPU *cpu = X86_CPU(obj);
1450 CPUX86State *env = &cpu->env;
1451 int64_t value;
1453 value = env->cpuid_version & 0xf;
1454 visit_type_int(v, &value, name, errp);
1457 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1458 void *opaque, const char *name,
1459 Error **errp)
1461 X86CPU *cpu = X86_CPU(obj);
1462 CPUX86State *env = &cpu->env;
1463 const int64_t min = 0;
1464 const int64_t max = 0xf;
1465 Error *local_err = NULL;
1466 int64_t value;
1468 visit_type_int(v, &value, name, &local_err);
1469 if (local_err) {
1470 error_propagate(errp, local_err);
1471 return;
1473 if (value < min || value > max) {
1474 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1475 name ? name : "null", value, min, max);
1476 return;
1479 env->cpuid_version &= ~0xf;
1480 env->cpuid_version |= value & 0xf;
1483 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
1484 const char *name, Error **errp)
1486 X86CPU *cpu = X86_CPU(obj);
1488 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1491 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
1492 const char *name, Error **errp)
1494 X86CPU *cpu = X86_CPU(obj);
1496 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1499 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
1500 const char *name, Error **errp)
1502 X86CPU *cpu = X86_CPU(obj);
1504 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1507 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
1508 const char *name, Error **errp)
1510 X86CPU *cpu = X86_CPU(obj);
1512 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1515 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1517 X86CPU *cpu = X86_CPU(obj);
1518 CPUX86State *env = &cpu->env;
1519 char *value;
1521 value = (char *)g_malloc(CPUID_VENDOR_SZ + 1);
1522 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1523 env->cpuid_vendor3);
1524 return value;
1527 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1528 Error **errp)
1530 X86CPU *cpu = X86_CPU(obj);
1531 CPUX86State *env = &cpu->env;
1532 int i;
1534 if (strlen(value) != CPUID_VENDOR_SZ) {
1535 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1536 "vendor", value);
1537 return;
1540 env->cpuid_vendor1 = 0;
1541 env->cpuid_vendor2 = 0;
1542 env->cpuid_vendor3 = 0;
1543 for (i = 0; i < 4; i++) {
1544 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1545 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1546 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1550 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1552 X86CPU *cpu = X86_CPU(obj);
1553 CPUX86State *env = &cpu->env;
1554 char *value;
1555 int i;
1557 value = g_malloc(48 + 1);
1558 for (i = 0; i < 48; i++) {
1559 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1561 value[48] = '\0';
1562 return value;
1565 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1566 Error **errp)
1568 X86CPU *cpu = X86_CPU(obj);
1569 CPUX86State *env = &cpu->env;
1570 int c, len, i;
1572 if (model_id == NULL) {
1573 model_id = "";
1575 len = strlen(model_id);
1576 memset(env->cpuid_model, 0, 48);
1577 for (i = 0; i < 48; i++) {
1578 if (i >= len) {
1579 c = '\0';
1580 } else {
1581 c = (uint8_t)model_id[i];
1583 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1587 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1588 const char *name, Error **errp)
1590 X86CPU *cpu = X86_CPU(obj);
1591 int64_t value;
1593 value = cpu->env.tsc_khz * 1000;
1594 visit_type_int(v, &value, name, errp);
1597 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1598 const char *name, Error **errp)
1600 X86CPU *cpu = X86_CPU(obj);
1601 const int64_t min = 0;
1602 const int64_t max = INT64_MAX;
1603 Error *local_err = NULL;
1604 int64_t value;
1606 visit_type_int(v, &value, name, &local_err);
1607 if (local_err) {
1608 error_propagate(errp, local_err);
1609 return;
1611 if (value < min || value > max) {
1612 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1613 name ? name : "null", value, min, max);
1614 return;
1617 cpu->env.tsc_khz = value / 1000;
1620 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1621 const char *name, Error **errp)
1623 X86CPU *cpu = X86_CPU(obj);
1624 int64_t value = cpu->env.cpuid_apic_id;
1626 visit_type_int(v, &value, name, errp);
1629 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1630 const char *name, Error **errp)
1632 X86CPU *cpu = X86_CPU(obj);
1633 DeviceState *dev = DEVICE(obj);
1634 const int64_t min = 0;
1635 const int64_t max = UINT32_MAX;
1636 Error *error = NULL;
1637 int64_t value;
1639 if (dev->realized) {
1640 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1641 "it was realized", name, object_get_typename(obj));
1642 return;
1645 visit_type_int(v, &value, name, &error);
1646 if (error) {
1647 error_propagate(errp, error);
1648 return;
1650 if (value < min || value > max) {
1651 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1652 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1653 object_get_typename(obj), name, value, min, max);
1654 return;
1657 if ((value != cpu->env.cpuid_apic_id) && cpu_exists(value)) {
1658 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1659 return;
1661 cpu->env.cpuid_apic_id = value;
1664 /* Generic getter for "feature-words" and "filtered-features" properties */
1665 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1666 const char *name, Error **errp)
1668 uint32_t *array = (uint32_t *)opaque;
1669 FeatureWord w;
1670 Error *err = NULL;
1671 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1672 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1673 X86CPUFeatureWordInfoList *list = NULL;
1675 for (w = 0; w < FEATURE_WORDS; w++) {
1676 FeatureWordInfo *wi = &feature_word_info[w];
1677 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1678 qwi->cpuid_input_eax = wi->cpuid_eax;
1679 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1680 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1681 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1682 qwi->features = array[w];
1684 /* List will be in reverse order, but order shouldn't matter */
1685 list_entries[w].next = list;
1686 list_entries[w].value = &word_infos[w];
1687 list = &list_entries[w];
1690 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1691 error_propagate(errp, err);
1694 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1695 const char *name, Error **errp)
1697 X86CPU *cpu = X86_CPU(obj);
1698 int64_t value = cpu->hyperv_spinlock_attempts;
1700 visit_type_int(v, &value, name, errp);
1703 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1704 const char *name, Error **errp)
1706 const int64_t min = 0xFFF;
1707 const int64_t max = UINT_MAX;
1708 X86CPU *cpu = X86_CPU(obj);
1709 Error *err = NULL;
1710 int64_t value;
1712 visit_type_int(v, &value, name, &err);
1713 if (err) {
1714 error_propagate(errp, err);
1715 return;
1718 if (value < min || value > max) {
1719 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1720 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1721 object_get_typename(obj), name ? name : "null",
1722 value, min, max);
1723 return;
1725 cpu->hyperv_spinlock_attempts = value;
1728 static PropertyInfo qdev_prop_spinlocks = {
1729 .name = "int",
1730 .get = x86_get_hv_spinlocks,
1731 .set = x86_set_hv_spinlocks,
1734 /* Convert all '_' in a feature string option name to '-', to make feature
1735 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1737 static inline void feat2prop(char *s)
1739 while ((s = strchr(s, '_'))) {
1740 *s = '-';
1744 /* Parse "+feature,-feature,feature=foo" CPU feature string
1746 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1747 Error **errp)
1749 X86CPU *cpu = X86_CPU(cs);
1750 char *featurestr; /* Single 'key=value" string being parsed */
1751 FeatureWord w;
1752 /* Features to be added */
1753 FeatureWordArray plus_features = { 0 };
1754 /* Features to be removed */
1755 FeatureWordArray minus_features = { 0 };
1756 uint32_t numvalue;
1757 CPUX86State *env = &cpu->env;
1758 Error *local_err = NULL;
1760 featurestr = features ? strtok(features, ",") : NULL;
1762 while (featurestr) {
1763 char *val;
1764 if (featurestr[0] == '+') {
1765 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1766 } else if (featurestr[0] == '-') {
1767 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1768 } else if ((val = strchr(featurestr, '='))) {
1769 *val = 0; val++;
1770 feat2prop(featurestr);
1771 if (!strcmp(featurestr, "xlevel")) {
1772 char *err;
1773 char num[32];
1775 numvalue = strtoul(val, &err, 0);
1776 if (!*val || *err) {
1777 error_setg(errp, "bad numerical value %s", val);
1778 return;
1780 if (numvalue < 0x80000000) {
1781 error_report("xlevel value shall always be >= 0x80000000"
1782 ", fixup will be removed in future versions");
1783 numvalue += 0x80000000;
1785 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1786 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1787 } else if (!strcmp(featurestr, "tsc-freq")) {
1788 int64_t tsc_freq;
1789 char *err;
1790 char num[32];
1792 tsc_freq = strtosz_suffix_unit(val, &err,
1793 STRTOSZ_DEFSUFFIX_B, 1000);
1794 if (tsc_freq < 0 || *err) {
1795 error_setg(errp, "bad numerical value %s", val);
1796 return;
1798 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1799 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1800 &local_err);
1801 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1802 char *err;
1803 const int min = 0xFFF;
1804 char num[32];
1805 numvalue = strtoul(val, &err, 0);
1806 if (!*val || *err) {
1807 error_setg(errp, "bad numerical value %s", val);
1808 return;
1810 if (numvalue < min) {
1811 error_report("hv-spinlocks value shall always be >= 0x%x"
1812 ", fixup will be removed in future versions",
1813 min);
1814 numvalue = min;
1816 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1817 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1818 } else {
1819 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1821 } else {
1822 feat2prop(featurestr);
1823 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1825 if (local_err) {
1826 error_propagate(errp, local_err);
1827 return;
1829 featurestr = strtok(NULL, ",");
1832 if (cpu->host_features) {
1833 for (w = 0; w < FEATURE_WORDS; w++) {
1834 env->features[w] =
1835 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1839 for (w = 0; w < FEATURE_WORDS; w++) {
1840 env->features[w] |= plus_features[w];
1841 env->features[w] &= ~minus_features[w];
1845 /* generate a composite string into buf of all cpuid names in featureset
1846 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1847 * if flags, suppress names undefined in featureset.
1849 static void listflags(char *buf, int bufsize, uint32_t fbits,
1850 const char **featureset, uint32_t flags)
1852 const char **p = &featureset[31];
1853 char *q, *b, bit;
1854 int nc;
1856 b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
1857 *buf = '\0';
1858 for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
1859 if (fbits & 1 << bit && (*p || !flags)) {
1860 if (*p)
1861 nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
1862 else
1863 nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
1864 if (bufsize <= nc) {
1865 if (b) {
1866 memcpy(b, "...", sizeof("..."));
1868 return;
1870 q += nc;
1871 bufsize -= nc;
1875 /* generate CPU information. */
1876 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1878 X86CPUDefinition *def;
1879 char buf[256];
1880 int i;
1882 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1883 def = &builtin_x86_defs[i];
1884 snprintf(buf, sizeof(buf), "%s", def->name);
1885 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1887 #ifdef CONFIG_KVM
1888 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1889 "KVM processor with all supported host features "
1890 "(only available in KVM mode)");
1891 #endif
1893 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1894 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1895 FeatureWordInfo *fw = &feature_word_info[i];
1897 listflags(buf, sizeof(buf), (uint32_t)~0, fw->feat_names, 1);
1898 (*cpu_fprintf)(f, " %s\n", buf);
1902 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1904 CpuDefinitionInfoList *cpu_list = NULL;
1905 X86CPUDefinition *def;
1906 int i;
1908 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1909 CpuDefinitionInfoList *entry;
1910 CpuDefinitionInfo *info;
1912 def = &builtin_x86_defs[i];
1913 info = g_malloc0(sizeof(*info));
1914 info->name = g_strdup(def->name);
1916 entry = g_malloc0(sizeof(*entry));
1917 entry->value = info;
1918 entry->next = cpu_list;
1919 cpu_list = entry;
1922 return cpu_list;
1925 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1926 bool migratable_only)
1928 FeatureWordInfo *wi = &feature_word_info[w];
1929 uint32_t r;
1931 if (kvm_enabled()) {
1932 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
1933 wi->cpuid_ecx,
1934 wi->cpuid_reg);
1935 } else if (tcg_enabled()) {
1936 r = wi->tcg_features;
1937 } else {
1938 return ~0;
1940 if (migratable_only) {
1941 r &= x86_cpu_get_migratable_flags(w);
1943 return r;
1947 * Filters CPU feature words based on host availability of each feature.
1949 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
1951 static int x86_cpu_filter_features(X86CPU *cpu)
1953 CPUX86State *env = &cpu->env;
1954 FeatureWord w;
1955 int rv = 0;
1957 for (w = 0; w < FEATURE_WORDS; w++) {
1958 uint32_t host_feat =
1959 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1960 uint32_t requested_features = env->features[w];
1961 env->features[w] &= host_feat;
1962 cpu->filtered_features[w] = requested_features & ~env->features[w];
1963 if (cpu->filtered_features[w]) {
1964 if (cpu->check_cpuid || cpu->enforce_cpuid) {
1965 report_unavailable_features(w, cpu->filtered_features[w]);
1967 rv = 1;
1971 return rv;
1974 /* Load data from X86CPUDefinition
1976 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
1978 CPUX86State *env = &cpu->env;
1979 const char *vendor;
1980 char host_vendor[CPUID_VENDOR_SZ + 1];
1981 FeatureWord w;
1983 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
1984 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
1985 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
1986 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
1987 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
1988 env->cpuid_xlevel2 = def->xlevel2;
1989 cpu->cache_info_passthrough = def->cache_info_passthrough;
1990 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
1991 for (w = 0; w < FEATURE_WORDS; w++) {
1992 env->features[w] = def->features[w];
1995 /* Special cases not set in the X86CPUDefinition structs: */
1996 if (kvm_enabled()) {
1997 FeatureWord w;
1998 for (w = 0; w < FEATURE_WORDS; w++) {
1999 env->features[w] |= kvm_default_features[w];
2000 env->features[w] &= ~kvm_default_unset_features[w];
2004 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2006 /* sysenter isn't supported in compatibility mode on AMD,
2007 * syscall isn't supported in compatibility mode on Intel.
2008 * Normally we advertise the actual CPU vendor, but you can
2009 * override this using the 'vendor' property if you want to use
2010 * KVM's sysenter/syscall emulation in compatibility mode and
2011 * when doing cross vendor migration
2013 vendor = def->vendor;
2014 if (kvm_enabled()) {
2015 uint32_t ebx = 0, ecx = 0, edx = 0;
2016 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2017 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2018 vendor = host_vendor;
2021 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2025 X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
2026 Error **errp)
2028 X86CPU *cpu = NULL;
2029 X86CPUClass *xcc;
2030 ObjectClass *oc;
2031 gchar **model_pieces;
2032 char *name, *features;
2033 Error *error = NULL;
2035 model_pieces = g_strsplit(cpu_model, ",", 2);
2036 if (!model_pieces[0]) {
2037 error_setg(&error, "Invalid/empty CPU model name");
2038 goto out;
2040 name = model_pieces[0];
2041 features = model_pieces[1];
2043 oc = x86_cpu_class_by_name(name);
2044 if (oc == NULL) {
2045 error_setg(&error, "Unable to find CPU definition: %s", name);
2046 goto out;
2048 xcc = X86_CPU_CLASS(oc);
2050 if (xcc->kvm_required && !kvm_enabled()) {
2051 error_setg(&error, "CPU model '%s' requires KVM", name);
2052 goto out;
2055 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2057 #ifndef CONFIG_USER_ONLY
2058 if (icc_bridge == NULL) {
2059 error_setg(&error, "Invalid icc-bridge value");
2060 goto out;
2062 qdev_set_parent_bus(DEVICE(cpu), qdev_get_child_bus(icc_bridge, "icc"));
2063 object_unref(OBJECT(cpu));
2064 #endif
2066 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2067 if (error) {
2068 goto out;
2071 out:
2072 if (error != NULL) {
2073 error_propagate(errp, error);
2074 if (cpu) {
2075 object_unref(OBJECT(cpu));
2076 cpu = NULL;
2079 g_strfreev(model_pieces);
2080 return cpu;
2083 X86CPU *cpu_x86_init(const char *cpu_model)
2085 Error *error = NULL;
2086 X86CPU *cpu;
2088 cpu = cpu_x86_create(cpu_model, NULL, &error);
2089 if (error) {
2090 goto out;
2093 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2095 out:
2096 if (error) {
2097 error_report("%s", error_get_pretty(error));
2098 error_free(error);
2099 if (cpu != NULL) {
2100 object_unref(OBJECT(cpu));
2101 cpu = NULL;
2104 return cpu;
2107 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2109 X86CPUDefinition *cpudef = data;
2110 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2112 xcc->cpu_def = cpudef;
2115 static void x86_register_cpudef_type(X86CPUDefinition *def)
2117 char *typename = x86_cpu_type_name(def->name);
2118 TypeInfo ti = {
2119 .name = typename,
2120 .parent = TYPE_X86_CPU,
2121 .class_init = x86_cpu_cpudef_class_init,
2122 .class_data = def,
2125 type_register(&ti);
2126 g_free(typename);
2129 #if !defined(CONFIG_USER_ONLY)
2131 void cpu_clear_apic_feature(CPUX86State *env)
2133 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2136 #endif /* !CONFIG_USER_ONLY */
2138 /* Initialize list of CPU models, filling some non-static fields if necessary
2140 void x86_cpudef_setup(void)
2142 int i, j;
2143 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2145 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2146 X86CPUDefinition *def = &builtin_x86_defs[i];
2148 /* Look for specific "cpudef" models that */
2149 /* have the QEMU version in .model_id */
2150 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2151 if (strcmp(model_with_versions[j], def->name) == 0) {
2152 pstrcpy(def->model_id, sizeof(def->model_id),
2153 "QEMU Virtual CPU version ");
2154 pstrcat(def->model_id, sizeof(def->model_id),
2155 qemu_get_version());
2156 break;
2162 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
2163 uint32_t *ecx, uint32_t *edx)
2165 *ebx = env->cpuid_vendor1;
2166 *edx = env->cpuid_vendor2;
2167 *ecx = env->cpuid_vendor3;
2170 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2171 uint32_t *eax, uint32_t *ebx,
2172 uint32_t *ecx, uint32_t *edx)
2174 X86CPU *cpu = x86_env_get_cpu(env);
2175 CPUState *cs = CPU(cpu);
2177 /* test if maximum index reached */
2178 if (index & 0x80000000) {
2179 if (index > env->cpuid_xlevel) {
2180 if (env->cpuid_xlevel2 > 0) {
2181 /* Handle the Centaur's CPUID instruction. */
2182 if (index > env->cpuid_xlevel2) {
2183 index = env->cpuid_xlevel2;
2184 } else if (index < 0xC0000000) {
2185 index = env->cpuid_xlevel;
2187 } else {
2188 /* Intel documentation states that invalid EAX input will
2189 * return the same information as EAX=cpuid_level
2190 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2192 index = env->cpuid_level;
2195 } else {
2196 if (index > env->cpuid_level)
2197 index = env->cpuid_level;
2200 switch(index) {
2201 case 0:
2202 *eax = env->cpuid_level;
2203 get_cpuid_vendor(env, ebx, ecx, edx);
2204 break;
2205 case 1:
2206 *eax = env->cpuid_version;
2207 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2208 *ecx = env->features[FEAT_1_ECX];
2209 *edx = env->features[FEAT_1_EDX];
2210 if (cs->nr_cores * cs->nr_threads > 1) {
2211 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2212 *edx |= 1 << 28; /* HTT bit */
2214 break;
2215 case 2:
2216 /* cache info: needed for Pentium Pro compatibility */
2217 if (cpu->cache_info_passthrough) {
2218 host_cpuid(index, 0, eax, ebx, ecx, edx);
2219 break;
2221 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2222 *ebx = 0;
2223 *ecx = 0;
2224 *edx = (L1D_DESCRIPTOR << 16) | \
2225 (L1I_DESCRIPTOR << 8) | \
2226 (L2_DESCRIPTOR);
2227 break;
2228 case 4:
2229 /* cache info: needed for Core compatibility */
2230 if (cpu->cache_info_passthrough) {
2231 host_cpuid(index, count, eax, ebx, ecx, edx);
2232 *eax &= ~0xFC000000;
2233 } else {
2234 *eax = 0;
2235 switch (count) {
2236 case 0: /* L1 dcache info */
2237 *eax |= CPUID_4_TYPE_DCACHE | \
2238 CPUID_4_LEVEL(1) | \
2239 CPUID_4_SELF_INIT_LEVEL;
2240 *ebx = (L1D_LINE_SIZE - 1) | \
2241 ((L1D_PARTITIONS - 1) << 12) | \
2242 ((L1D_ASSOCIATIVITY - 1) << 22);
2243 *ecx = L1D_SETS - 1;
2244 *edx = CPUID_4_NO_INVD_SHARING;
2245 break;
2246 case 1: /* L1 icache info */
2247 *eax |= CPUID_4_TYPE_ICACHE | \
2248 CPUID_4_LEVEL(1) | \
2249 CPUID_4_SELF_INIT_LEVEL;
2250 *ebx = (L1I_LINE_SIZE - 1) | \
2251 ((L1I_PARTITIONS - 1) << 12) | \
2252 ((L1I_ASSOCIATIVITY - 1) << 22);
2253 *ecx = L1I_SETS - 1;
2254 *edx = CPUID_4_NO_INVD_SHARING;
2255 break;
2256 case 2: /* L2 cache info */
2257 *eax |= CPUID_4_TYPE_UNIFIED | \
2258 CPUID_4_LEVEL(2) | \
2259 CPUID_4_SELF_INIT_LEVEL;
2260 if (cs->nr_threads > 1) {
2261 *eax |= (cs->nr_threads - 1) << 14;
2263 *ebx = (L2_LINE_SIZE - 1) | \
2264 ((L2_PARTITIONS - 1) << 12) | \
2265 ((L2_ASSOCIATIVITY - 1) << 22);
2266 *ecx = L2_SETS - 1;
2267 *edx = CPUID_4_NO_INVD_SHARING;
2268 break;
2269 default: /* end of info */
2270 *eax = 0;
2271 *ebx = 0;
2272 *ecx = 0;
2273 *edx = 0;
2274 break;
2278 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2279 if ((*eax & 31) && cs->nr_cores > 1) {
2280 *eax |= (cs->nr_cores - 1) << 26;
2282 break;
2283 case 5:
2284 /* mwait info: needed for Core compatibility */
2285 *eax = 0; /* Smallest monitor-line size in bytes */
2286 *ebx = 0; /* Largest monitor-line size in bytes */
2287 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2288 *edx = 0;
2289 break;
2290 case 6:
2291 /* Thermal and Power Leaf */
2292 *eax = 0;
2293 *ebx = 0;
2294 *ecx = 0;
2295 *edx = 0;
2296 break;
2297 case 7:
2298 /* Structured Extended Feature Flags Enumeration Leaf */
2299 if (count == 0) {
2300 *eax = 0; /* Maximum ECX value for sub-leaves */
2301 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2302 *ecx = 0; /* Reserved */
2303 *edx = 0; /* Reserved */
2304 } else {
2305 *eax = 0;
2306 *ebx = 0;
2307 *ecx = 0;
2308 *edx = 0;
2310 break;
2311 case 9:
2312 /* Direct Cache Access Information Leaf */
2313 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2314 *ebx = 0;
2315 *ecx = 0;
2316 *edx = 0;
2317 break;
2318 case 0xA:
2319 /* Architectural Performance Monitoring Leaf */
2320 if (kvm_enabled() && cpu->enable_pmu) {
2321 KVMState *s = cs->kvm_state;
2323 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2324 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2325 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2326 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2327 } else {
2328 *eax = 0;
2329 *ebx = 0;
2330 *ecx = 0;
2331 *edx = 0;
2333 break;
2334 case 0xD: {
2335 KVMState *s = cs->kvm_state;
2336 uint64_t kvm_mask;
2337 int i;
2339 /* Processor Extended State */
2340 *eax = 0;
2341 *ebx = 0;
2342 *ecx = 0;
2343 *edx = 0;
2344 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2345 break;
2347 kvm_mask =
2348 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2349 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2351 if (count == 0) {
2352 *ecx = 0x240;
2353 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2354 const ExtSaveArea *esa = &ext_save_areas[i];
2355 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2356 (kvm_mask & (1 << i)) != 0) {
2357 if (i < 32) {
2358 *eax |= 1 << i;
2359 } else {
2360 *edx |= 1 << (i - 32);
2362 *ecx = MAX(*ecx, esa->offset + esa->size);
2365 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2366 *ebx = *ecx;
2367 } else if (count == 1) {
2368 *eax = kvm_arch_get_supported_cpuid(s, 0xd, 1, R_EAX);
2369 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2370 const ExtSaveArea *esa = &ext_save_areas[count];
2371 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2372 (kvm_mask & (1 << count)) != 0) {
2373 *eax = esa->size;
2374 *ebx = esa->offset;
2377 break;
2379 case 0x80000000:
2380 *eax = env->cpuid_xlevel;
2381 *ebx = env->cpuid_vendor1;
2382 *edx = env->cpuid_vendor2;
2383 *ecx = env->cpuid_vendor3;
2384 break;
2385 case 0x80000001:
2386 *eax = env->cpuid_version;
2387 *ebx = 0;
2388 *ecx = env->features[FEAT_8000_0001_ECX];
2389 *edx = env->features[FEAT_8000_0001_EDX];
2391 /* The Linux kernel checks for the CMPLegacy bit and
2392 * discards multiple thread information if it is set.
2393 * So dont set it here for Intel to make Linux guests happy.
2395 if (cs->nr_cores * cs->nr_threads > 1) {
2396 uint32_t tebx, tecx, tedx;
2397 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
2398 if (tebx != CPUID_VENDOR_INTEL_1 ||
2399 tedx != CPUID_VENDOR_INTEL_2 ||
2400 tecx != CPUID_VENDOR_INTEL_3) {
2401 *ecx |= 1 << 1; /* CmpLegacy bit */
2404 break;
2405 case 0x80000002:
2406 case 0x80000003:
2407 case 0x80000004:
2408 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2409 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2410 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2411 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2412 break;
2413 case 0x80000005:
2414 /* cache info (L1 cache) */
2415 if (cpu->cache_info_passthrough) {
2416 host_cpuid(index, 0, eax, ebx, ecx, edx);
2417 break;
2419 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2420 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2421 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2422 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2423 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2424 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2425 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2426 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2427 break;
2428 case 0x80000006:
2429 /* cache info (L2 cache) */
2430 if (cpu->cache_info_passthrough) {
2431 host_cpuid(index, 0, eax, ebx, ecx, edx);
2432 break;
2434 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2435 (L2_DTLB_2M_ENTRIES << 16) | \
2436 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2437 (L2_ITLB_2M_ENTRIES);
2438 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2439 (L2_DTLB_4K_ENTRIES << 16) | \
2440 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2441 (L2_ITLB_4K_ENTRIES);
2442 *ecx = (L2_SIZE_KB_AMD << 16) | \
2443 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2444 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2445 *edx = ((L3_SIZE_KB/512) << 18) | \
2446 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2447 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2448 break;
2449 case 0x80000007:
2450 *eax = 0;
2451 *ebx = 0;
2452 *ecx = 0;
2453 *edx = env->features[FEAT_8000_0007_EDX];
2454 break;
2455 case 0x80000008:
2456 /* virtual & phys address size in low 2 bytes. */
2457 /* XXX: This value must match the one used in the MMU code. */
2458 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2459 /* 64 bit processor */
2460 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2461 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2462 } else {
2463 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2464 *eax = 0x00000024; /* 36 bits physical */
2465 } else {
2466 *eax = 0x00000020; /* 32 bits physical */
2469 *ebx = 0;
2470 *ecx = 0;
2471 *edx = 0;
2472 if (cs->nr_cores * cs->nr_threads > 1) {
2473 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2475 break;
2476 case 0x8000000A:
2477 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2478 *eax = 0x00000001; /* SVM Revision */
2479 *ebx = 0x00000010; /* nr of ASIDs */
2480 *ecx = 0;
2481 *edx = env->features[FEAT_SVM]; /* optional features */
2482 } else {
2483 *eax = 0;
2484 *ebx = 0;
2485 *ecx = 0;
2486 *edx = 0;
2488 break;
2489 case 0xC0000000:
2490 *eax = env->cpuid_xlevel2;
2491 *ebx = 0;
2492 *ecx = 0;
2493 *edx = 0;
2494 break;
2495 case 0xC0000001:
2496 /* Support for VIA CPU's CPUID instruction */
2497 *eax = env->cpuid_version;
2498 *ebx = 0;
2499 *ecx = 0;
2500 *edx = env->features[FEAT_C000_0001_EDX];
2501 break;
2502 case 0xC0000002:
2503 case 0xC0000003:
2504 case 0xC0000004:
2505 /* Reserved for the future, and now filled with zero */
2506 *eax = 0;
2507 *ebx = 0;
2508 *ecx = 0;
2509 *edx = 0;
2510 break;
2511 default:
2512 /* reserved values: zero */
2513 *eax = 0;
2514 *ebx = 0;
2515 *ecx = 0;
2516 *edx = 0;
2517 break;
2521 /* CPUClass::reset() */
2522 static void x86_cpu_reset(CPUState *s)
2524 X86CPU *cpu = X86_CPU(s);
2525 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2526 CPUX86State *env = &cpu->env;
2527 int i;
2529 xcc->parent_reset(s);
2531 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2533 tlb_flush(s, 1);
2535 env->old_exception = -1;
2537 /* init to reset state */
2539 #ifdef CONFIG_SOFTMMU
2540 env->hflags |= HF_SOFTMMU_MASK;
2541 #endif
2542 env->hflags2 |= HF2_GIF_MASK;
2544 cpu_x86_update_cr0(env, 0x60000010);
2545 env->a20_mask = ~0x0;
2546 env->smbase = 0x30000;
2548 env->idt.limit = 0xffff;
2549 env->gdt.limit = 0xffff;
2550 env->ldt.limit = 0xffff;
2551 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2552 env->tr.limit = 0xffff;
2553 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2555 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2556 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2557 DESC_R_MASK | DESC_A_MASK);
2558 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2559 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2560 DESC_A_MASK);
2561 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2562 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2563 DESC_A_MASK);
2564 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2565 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2566 DESC_A_MASK);
2567 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2568 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2569 DESC_A_MASK);
2570 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2571 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2572 DESC_A_MASK);
2574 env->eip = 0xfff0;
2575 env->regs[R_EDX] = env->cpuid_version;
2577 env->eflags = 0x2;
2579 /* FPU init */
2580 for (i = 0; i < 8; i++) {
2581 env->fptags[i] = 1;
2583 cpu_set_fpuc(env, 0x37f);
2585 env->mxcsr = 0x1f80;
2586 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2588 env->pat = 0x0007040600070406ULL;
2589 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2591 memset(env->dr, 0, sizeof(env->dr));
2592 env->dr[6] = DR6_FIXED_1;
2593 env->dr[7] = DR7_FIXED_1;
2594 cpu_breakpoint_remove_all(s, BP_CPU);
2595 cpu_watchpoint_remove_all(s, BP_CPU);
2597 env->xcr0 = 1;
2600 * SDM 11.11.5 requires:
2601 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2602 * - IA32_MTRR_PHYSMASKn.V = 0
2603 * All other bits are undefined. For simplification, zero it all.
2605 env->mtrr_deftype = 0;
2606 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2607 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2609 #if !defined(CONFIG_USER_ONLY)
2610 /* We hard-wire the BSP to the first CPU. */
2611 if (s->cpu_index == 0) {
2612 apic_designate_bsp(cpu->apic_state);
2615 s->halted = !cpu_is_bsp(cpu);
2617 if (kvm_enabled()) {
2618 kvm_arch_reset_vcpu(cpu);
2620 #endif
2623 #ifndef CONFIG_USER_ONLY
2624 bool cpu_is_bsp(X86CPU *cpu)
2626 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2629 /* TODO: remove me, when reset over QOM tree is implemented */
2630 static void x86_cpu_machine_reset_cb(void *opaque)
2632 X86CPU *cpu = opaque;
2633 cpu_reset(CPU(cpu));
2635 #endif
2637 static void mce_init(X86CPU *cpu)
2639 CPUX86State *cenv = &cpu->env;
2640 unsigned int bank;
2642 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2643 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2644 (CPUID_MCE | CPUID_MCA)) {
2645 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2646 cenv->mcg_ctl = ~(uint64_t)0;
2647 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2648 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2653 #ifndef CONFIG_USER_ONLY
2654 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2656 CPUX86State *env = &cpu->env;
2657 DeviceState *dev = DEVICE(cpu);
2658 APICCommonState *apic;
2659 const char *apic_type = "apic";
2661 if (kvm_irqchip_in_kernel()) {
2662 apic_type = "kvm-apic";
2663 } else if (xen_enabled()) {
2664 apic_type = "xen-apic";
2667 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2668 if (cpu->apic_state == NULL) {
2669 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2670 return;
2673 object_property_add_child(OBJECT(cpu), "apic",
2674 OBJECT(cpu->apic_state), NULL);
2675 qdev_prop_set_uint8(cpu->apic_state, "id", env->cpuid_apic_id);
2676 /* TODO: convert to link<> */
2677 apic = APIC_COMMON(cpu->apic_state);
2678 apic->cpu = cpu;
2681 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2683 if (cpu->apic_state == NULL) {
2684 return;
2687 if (qdev_init(cpu->apic_state)) {
2688 error_setg(errp, "APIC device '%s' could not be initialized",
2689 object_get_typename(OBJECT(cpu->apic_state)));
2690 return;
2693 #else
2694 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2697 #endif
2700 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2701 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2702 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2703 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2704 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2705 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2706 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2708 CPUState *cs = CPU(dev);
2709 X86CPU *cpu = X86_CPU(dev);
2710 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2711 CPUX86State *env = &cpu->env;
2712 Error *local_err = NULL;
2713 static bool ht_warned;
2715 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2716 env->cpuid_level = 7;
2719 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2720 * CPUID[1].EDX.
2722 if (IS_AMD_CPU(env)) {
2723 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2724 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2725 & CPUID_EXT2_AMD_ALIASES);
2729 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2730 error_setg(&local_err,
2731 kvm_enabled() ?
2732 "Host doesn't support requested features" :
2733 "TCG doesn't support requested features");
2734 goto out;
2737 #ifndef CONFIG_USER_ONLY
2738 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2740 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2741 x86_cpu_apic_create(cpu, &local_err);
2742 if (local_err != NULL) {
2743 goto out;
2746 #endif
2748 mce_init(cpu);
2749 qemu_init_vcpu(cs);
2751 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2752 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2753 * based on inputs (sockets,cores,threads), it is still better to gives
2754 * users a warning.
2756 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2757 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2759 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2760 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2761 " -smp options properly.");
2762 ht_warned = true;
2765 x86_cpu_apic_realize(cpu, &local_err);
2766 if (local_err != NULL) {
2767 goto out;
2769 cpu_reset(cs);
2771 xcc->parent_realize(dev, &local_err);
2772 out:
2773 if (local_err != NULL) {
2774 error_propagate(errp, local_err);
2775 return;
2779 /* Enables contiguous-apic-ID mode, for compatibility */
2780 static bool compat_apic_id_mode;
2782 void enable_compat_apic_id_mode(void)
2784 compat_apic_id_mode = true;
2787 /* Calculates initial APIC ID for a specific CPU index
2789 * Currently we need to be able to calculate the APIC ID from the CPU index
2790 * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have
2791 * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of
2792 * all CPUs up to max_cpus.
2794 uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index)
2796 uint32_t correct_id;
2797 static bool warned;
2799 correct_id = x86_apicid_from_cpu_idx(smp_cores, smp_threads, cpu_index);
2800 if (compat_apic_id_mode) {
2801 if (cpu_index != correct_id && !warned) {
2802 error_report("APIC IDs set in compatibility mode, "
2803 "CPU topology won't match the configuration");
2804 warned = true;
2806 return cpu_index;
2807 } else {
2808 return correct_id;
2812 static void x86_cpu_initfn(Object *obj)
2814 CPUState *cs = CPU(obj);
2815 X86CPU *cpu = X86_CPU(obj);
2816 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
2817 CPUX86State *env = &cpu->env;
2818 static int inited;
2820 cs->env_ptr = env;
2821 cpu_exec_init(env);
2823 object_property_add(obj, "family", "int",
2824 x86_cpuid_version_get_family,
2825 x86_cpuid_version_set_family, NULL, NULL, NULL);
2826 object_property_add(obj, "model", "int",
2827 x86_cpuid_version_get_model,
2828 x86_cpuid_version_set_model, NULL, NULL, NULL);
2829 object_property_add(obj, "stepping", "int",
2830 x86_cpuid_version_get_stepping,
2831 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
2832 object_property_add(obj, "level", "int",
2833 x86_cpuid_get_level,
2834 x86_cpuid_set_level, NULL, NULL, NULL);
2835 object_property_add(obj, "xlevel", "int",
2836 x86_cpuid_get_xlevel,
2837 x86_cpuid_set_xlevel, NULL, NULL, NULL);
2838 object_property_add_str(obj, "vendor",
2839 x86_cpuid_get_vendor,
2840 x86_cpuid_set_vendor, NULL);
2841 object_property_add_str(obj, "model-id",
2842 x86_cpuid_get_model_id,
2843 x86_cpuid_set_model_id, NULL);
2844 object_property_add(obj, "tsc-frequency", "int",
2845 x86_cpuid_get_tsc_freq,
2846 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
2847 object_property_add(obj, "apic-id", "int",
2848 x86_cpuid_get_apic_id,
2849 x86_cpuid_set_apic_id, NULL, NULL, NULL);
2850 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
2851 x86_cpu_get_feature_words,
2852 NULL, NULL, (void *)env->features, NULL);
2853 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
2854 x86_cpu_get_feature_words,
2855 NULL, NULL, (void *)cpu->filtered_features, NULL);
2857 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
2858 env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index);
2860 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
2862 /* init various static tables used in TCG mode */
2863 if (tcg_enabled() && !inited) {
2864 inited = 1;
2865 optimize_flags_init();
2869 static int64_t x86_cpu_get_arch_id(CPUState *cs)
2871 X86CPU *cpu = X86_CPU(cs);
2872 CPUX86State *env = &cpu->env;
2874 return env->cpuid_apic_id;
2877 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
2879 X86CPU *cpu = X86_CPU(cs);
2881 return cpu->env.cr[0] & CR0_PG_MASK;
2884 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
2886 X86CPU *cpu = X86_CPU(cs);
2888 cpu->env.eip = value;
2891 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
2893 X86CPU *cpu = X86_CPU(cs);
2895 cpu->env.eip = tb->pc - tb->cs_base;
2898 static bool x86_cpu_has_work(CPUState *cs)
2900 X86CPU *cpu = X86_CPU(cs);
2901 CPUX86State *env = &cpu->env;
2903 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
2904 CPU_INTERRUPT_POLL)) &&
2905 (env->eflags & IF_MASK)) ||
2906 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
2907 CPU_INTERRUPT_INIT |
2908 CPU_INTERRUPT_SIPI |
2909 CPU_INTERRUPT_MCE));
2912 static Property x86_cpu_properties[] = {
2913 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
2914 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
2915 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
2916 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
2917 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
2918 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
2919 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
2920 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
2921 DEFINE_PROP_END_OF_LIST()
2924 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
2926 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2927 CPUClass *cc = CPU_CLASS(oc);
2928 DeviceClass *dc = DEVICE_CLASS(oc);
2930 xcc->parent_realize = dc->realize;
2931 dc->realize = x86_cpu_realizefn;
2932 dc->bus_type = TYPE_ICC_BUS;
2933 dc->props = x86_cpu_properties;
2935 xcc->parent_reset = cc->reset;
2936 cc->reset = x86_cpu_reset;
2937 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
2939 cc->class_by_name = x86_cpu_class_by_name;
2940 cc->parse_features = x86_cpu_parse_featurestr;
2941 cc->has_work = x86_cpu_has_work;
2942 cc->do_interrupt = x86_cpu_do_interrupt;
2943 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
2944 cc->dump_state = x86_cpu_dump_state;
2945 cc->set_pc = x86_cpu_set_pc;
2946 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
2947 cc->gdb_read_register = x86_cpu_gdb_read_register;
2948 cc->gdb_write_register = x86_cpu_gdb_write_register;
2949 cc->get_arch_id = x86_cpu_get_arch_id;
2950 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
2951 #ifdef CONFIG_USER_ONLY
2952 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
2953 #else
2954 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
2955 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
2956 cc->write_elf64_note = x86_cpu_write_elf64_note;
2957 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
2958 cc->write_elf32_note = x86_cpu_write_elf32_note;
2959 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
2960 cc->vmsd = &vmstate_x86_cpu;
2961 #endif
2962 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
2963 #ifndef CONFIG_USER_ONLY
2964 cc->debug_excp_handler = breakpoint_handler;
2965 #endif
2966 cc->cpu_exec_enter = x86_cpu_exec_enter;
2967 cc->cpu_exec_exit = x86_cpu_exec_exit;
2970 static const TypeInfo x86_cpu_type_info = {
2971 .name = TYPE_X86_CPU,
2972 .parent = TYPE_CPU,
2973 .instance_size = sizeof(X86CPU),
2974 .instance_init = x86_cpu_initfn,
2975 .abstract = true,
2976 .class_size = sizeof(X86CPUClass),
2977 .class_init = x86_cpu_common_class_init,
2980 static void x86_cpu_register_types(void)
2982 int i;
2984 type_register_static(&x86_cpu_type_info);
2985 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2986 x86_register_cpudef_type(&builtin_x86_defs[i]);
2988 #ifdef CONFIG_KVM
2989 type_register_static(&host_x86_cpu_type_info);
2990 #endif
2993 type_init(x86_cpu_register_types)