memory: do not give a name to the internal exec.c regions
[qemu/ar7.git] / target-i386 / cpu.c
blob45c662dad4fb1c4b2cfb56017d14e1cfbe6ffee6
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28 #include "topology.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
53 /* Cache topology CPUID constants: */
55 /* CPUID Leaf 2 Descriptors */
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 /* CPUID Leaf 4 constants: */
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
69 #define CPUID_4_LEVEL(l) ((l) << 5)
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
79 #define ASSOC_FULL 0xFF
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
96 /* Definitions of the hardcoded cache entries we expose: */
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
140 /* TLB definitions: */
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
173 dst[CPUID_VENDOR_SZ] = '\0';
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", NULL, NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, NULL, NULL,
262 NULL, NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
277 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
278 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
279 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
280 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
281 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
282 CPUID_PSE36 | CPUID_FXSR)
283 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
284 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
285 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
286 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
287 CPUID_PAE | CPUID_SEP | CPUID_APIC)
289 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
290 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
291 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
292 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
293 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
294 /* partly implemented:
295 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
296 /* missing:
297 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
298 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
299 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
300 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
301 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
302 /* missing:
303 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
304 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
305 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
306 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
307 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
308 CPUID_EXT_RDRAND */
310 #ifdef TARGET_X86_64
311 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
312 #else
313 #define TCG_EXT2_X86_64_FEATURES 0
314 #endif
316 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
317 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
318 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
319 TCG_EXT2_X86_64_FEATURES)
320 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
321 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
322 #define TCG_EXT4_FEATURES 0
323 #define TCG_SVM_FEATURES 0
324 #define TCG_KVM_FEATURES 0
325 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
326 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
327 /* missing:
328 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
329 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
330 CPUID_7_0_EBX_RDSEED */
331 #define TCG_APM_FEATURES 0
334 typedef struct FeatureWordInfo {
335 const char **feat_names;
336 uint32_t cpuid_eax; /* Input EAX for CPUID */
337 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
338 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
339 int cpuid_reg; /* output register (R_* constant) */
340 uint32_t tcg_features; /* Feature flags supported by TCG */
341 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
342 } FeatureWordInfo;
344 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
345 [FEAT_1_EDX] = {
346 .feat_names = feature_name,
347 .cpuid_eax = 1, .cpuid_reg = R_EDX,
348 .tcg_features = TCG_FEATURES,
350 [FEAT_1_ECX] = {
351 .feat_names = ext_feature_name,
352 .cpuid_eax = 1, .cpuid_reg = R_ECX,
353 .tcg_features = TCG_EXT_FEATURES,
355 [FEAT_8000_0001_EDX] = {
356 .feat_names = ext2_feature_name,
357 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
358 .tcg_features = TCG_EXT2_FEATURES,
360 [FEAT_8000_0001_ECX] = {
361 .feat_names = ext3_feature_name,
362 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
363 .tcg_features = TCG_EXT3_FEATURES,
365 [FEAT_C000_0001_EDX] = {
366 .feat_names = ext4_feature_name,
367 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
368 .tcg_features = TCG_EXT4_FEATURES,
370 [FEAT_KVM] = {
371 .feat_names = kvm_feature_name,
372 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
373 .tcg_features = TCG_KVM_FEATURES,
375 [FEAT_SVM] = {
376 .feat_names = svm_feature_name,
377 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
378 .tcg_features = TCG_SVM_FEATURES,
380 [FEAT_7_0_EBX] = {
381 .feat_names = cpuid_7_0_ebx_feature_name,
382 .cpuid_eax = 7,
383 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
384 .cpuid_reg = R_EBX,
385 .tcg_features = TCG_7_0_EBX_FEATURES,
387 [FEAT_8000_0007_EDX] = {
388 .feat_names = cpuid_apm_edx_feature_name,
389 .cpuid_eax = 0x80000007,
390 .cpuid_reg = R_EDX,
391 .tcg_features = TCG_APM_FEATURES,
392 .unmigratable_flags = CPUID_APM_INVTSC,
396 typedef struct X86RegisterInfo32 {
397 /* Name of register */
398 const char *name;
399 /* QAPI enum value register */
400 X86CPURegister32 qapi_enum;
401 } X86RegisterInfo32;
403 #define REGISTER(reg) \
404 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
405 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
406 REGISTER(EAX),
407 REGISTER(ECX),
408 REGISTER(EDX),
409 REGISTER(EBX),
410 REGISTER(ESP),
411 REGISTER(EBP),
412 REGISTER(ESI),
413 REGISTER(EDI),
415 #undef REGISTER
417 typedef struct ExtSaveArea {
418 uint32_t feature, bits;
419 uint32_t offset, size;
420 } ExtSaveArea;
422 static const ExtSaveArea ext_save_areas[] = {
423 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
424 .offset = 0x240, .size = 0x100 },
425 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
426 .offset = 0x3c0, .size = 0x40 },
427 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
428 .offset = 0x400, .size = 0x40 },
431 const char *get_register_name_32(unsigned int reg)
433 if (reg >= CPU_NB_REGS32) {
434 return NULL;
436 return x86_reg_info_32[reg].name;
439 /* collects per-function cpuid data
441 typedef struct model_features_t {
442 uint32_t *guest_feat;
443 uint32_t *host_feat;
444 FeatureWord feat_word;
445 } model_features_t;
447 /* KVM-specific features that are automatically added to all CPU models
448 * when KVM is enabled.
450 static uint32_t kvm_default_features[FEATURE_WORDS] = {
451 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
452 (1 << KVM_FEATURE_NOP_IO_DELAY) |
453 (1 << KVM_FEATURE_CLOCKSOURCE2) |
454 (1 << KVM_FEATURE_ASYNC_PF) |
455 (1 << KVM_FEATURE_STEAL_TIME) |
456 (1 << KVM_FEATURE_PV_EOI) |
457 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
458 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
461 /* Features that are not added by default to any CPU model when KVM is enabled.
463 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
464 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
467 void x86_cpu_compat_disable_kvm_features(FeatureWord w, uint32_t features)
469 kvm_default_features[w] &= ~features;
473 * Returns the set of feature flags that are supported and migratable by
474 * QEMU, for a given FeatureWord.
476 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
478 FeatureWordInfo *wi = &feature_word_info[w];
479 uint32_t r = 0;
480 int i;
482 for (i = 0; i < 32; i++) {
483 uint32_t f = 1U << i;
484 /* If the feature name is unknown, it is not supported by QEMU yet */
485 if (!wi->feat_names[i]) {
486 continue;
488 /* Skip features known to QEMU, but explicitly marked as unmigratable */
489 if (wi->unmigratable_flags & f) {
490 continue;
492 r |= f;
494 return r;
497 void host_cpuid(uint32_t function, uint32_t count,
498 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
500 uint32_t vec[4];
502 #ifdef __x86_64__
503 asm volatile("cpuid"
504 : "=a"(vec[0]), "=b"(vec[1]),
505 "=c"(vec[2]), "=d"(vec[3])
506 : "0"(function), "c"(count) : "cc");
507 #elif defined(__i386__)
508 asm volatile("pusha \n\t"
509 "cpuid \n\t"
510 "mov %%eax, 0(%2) \n\t"
511 "mov %%ebx, 4(%2) \n\t"
512 "mov %%ecx, 8(%2) \n\t"
513 "mov %%edx, 12(%2) \n\t"
514 "popa"
515 : : "a"(function), "c"(count), "S"(vec)
516 : "memory", "cc");
517 #else
518 abort();
519 #endif
521 if (eax)
522 *eax = vec[0];
523 if (ebx)
524 *ebx = vec[1];
525 if (ecx)
526 *ecx = vec[2];
527 if (edx)
528 *edx = vec[3];
531 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
533 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
534 * a substring. ex if !NULL points to the first char after a substring,
535 * otherwise the string is assumed to sized by a terminating nul.
536 * Return lexical ordering of *s1:*s2.
538 static int sstrcmp(const char *s1, const char *e1, const char *s2,
539 const char *e2)
541 for (;;) {
542 if (!*s1 || !*s2 || *s1 != *s2)
543 return (*s1 - *s2);
544 ++s1, ++s2;
545 if (s1 == e1 && s2 == e2)
546 return (0);
547 else if (s1 == e1)
548 return (*s2);
549 else if (s2 == e2)
550 return (*s1);
554 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
555 * '|' delimited (possibly empty) strings in which case search for a match
556 * within the alternatives proceeds left to right. Return 0 for success,
557 * non-zero otherwise.
559 static int altcmp(const char *s, const char *e, const char *altstr)
561 const char *p, *q;
563 for (q = p = altstr; ; ) {
564 while (*p && *p != '|')
565 ++p;
566 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
567 return (0);
568 if (!*p)
569 return (1);
570 else
571 q = ++p;
575 /* search featureset for flag *[s..e), if found set corresponding bit in
576 * *pval and return true, otherwise return false
578 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
579 const char **featureset)
581 uint32_t mask;
582 const char **ppc;
583 bool found = false;
585 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
586 if (*ppc && !altcmp(s, e, *ppc)) {
587 *pval |= mask;
588 found = true;
591 return found;
594 static void add_flagname_to_bitmaps(const char *flagname,
595 FeatureWordArray words)
597 FeatureWord w;
598 for (w = 0; w < FEATURE_WORDS; w++) {
599 FeatureWordInfo *wi = &feature_word_info[w];
600 if (wi->feat_names &&
601 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
602 break;
605 if (w == FEATURE_WORDS) {
606 fprintf(stderr, "CPU feature %s not found\n", flagname);
610 /* CPU class name definitions: */
612 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
613 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
615 /* Return type name for a given CPU model name
616 * Caller is responsible for freeing the returned string.
618 static char *x86_cpu_type_name(const char *model_name)
620 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
623 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
625 ObjectClass *oc;
626 char *typename;
628 if (cpu_model == NULL) {
629 return NULL;
632 typename = x86_cpu_type_name(cpu_model);
633 oc = object_class_by_name(typename);
634 g_free(typename);
635 return oc;
638 struct X86CPUDefinition {
639 const char *name;
640 uint32_t level;
641 uint32_t xlevel;
642 uint32_t xlevel2;
643 /* vendor is zero-terminated, 12 character ASCII string */
644 char vendor[CPUID_VENDOR_SZ + 1];
645 int family;
646 int model;
647 int stepping;
648 FeatureWordArray features;
649 char model_id[48];
650 bool cache_info_passthrough;
653 static X86CPUDefinition builtin_x86_defs[] = {
655 .name = "qemu64",
656 .level = 4,
657 .vendor = CPUID_VENDOR_AMD,
658 .family = 6,
659 .model = 6,
660 .stepping = 3,
661 .features[FEAT_1_EDX] =
662 PPRO_FEATURES |
663 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
664 CPUID_PSE36,
665 .features[FEAT_1_ECX] =
666 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
667 .features[FEAT_8000_0001_EDX] =
668 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
669 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
670 .features[FEAT_8000_0001_ECX] =
671 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
672 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
673 .xlevel = 0x8000000A,
676 .name = "phenom",
677 .level = 5,
678 .vendor = CPUID_VENDOR_AMD,
679 .family = 16,
680 .model = 2,
681 .stepping = 3,
682 .features[FEAT_1_EDX] =
683 PPRO_FEATURES |
684 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
685 CPUID_PSE36 | CPUID_VME | CPUID_HT,
686 .features[FEAT_1_ECX] =
687 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
688 CPUID_EXT_POPCNT,
689 .features[FEAT_8000_0001_EDX] =
690 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
691 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
692 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
693 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
694 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
695 CPUID_EXT3_CR8LEG,
696 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
697 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
698 .features[FEAT_8000_0001_ECX] =
699 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
700 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
701 .features[FEAT_SVM] =
702 CPUID_SVM_NPT | CPUID_SVM_LBRV,
703 .xlevel = 0x8000001A,
704 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
707 .name = "core2duo",
708 .level = 10,
709 .vendor = CPUID_VENDOR_INTEL,
710 .family = 6,
711 .model = 15,
712 .stepping = 11,
713 .features[FEAT_1_EDX] =
714 PPRO_FEATURES |
715 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
716 CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |
717 CPUID_HT | CPUID_TM | CPUID_PBE,
718 .features[FEAT_1_ECX] =
719 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
720 CPUID_EXT_DTES64 | CPUID_EXT_DSCPL | CPUID_EXT_VMX | CPUID_EXT_EST |
721 CPUID_EXT_TM2 | CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
722 .features[FEAT_8000_0001_EDX] =
723 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
724 .features[FEAT_8000_0001_ECX] =
725 CPUID_EXT3_LAHF_LM,
726 .xlevel = 0x80000008,
727 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
730 .name = "kvm64",
731 .level = 5,
732 .vendor = CPUID_VENDOR_INTEL,
733 .family = 15,
734 .model = 6,
735 .stepping = 1,
736 /* Missing: CPUID_VME, CPUID_HT */
737 .features[FEAT_1_EDX] =
738 PPRO_FEATURES |
739 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
740 CPUID_PSE36,
741 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
742 .features[FEAT_1_ECX] =
743 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
744 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
745 .features[FEAT_8000_0001_EDX] =
746 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
747 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
748 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
749 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
750 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
751 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
752 .features[FEAT_8000_0001_ECX] =
754 .xlevel = 0x80000008,
755 .model_id = "Common KVM processor"
758 .name = "qemu32",
759 .level = 4,
760 .vendor = CPUID_VENDOR_INTEL,
761 .family = 6,
762 .model = 6,
763 .stepping = 3,
764 .features[FEAT_1_EDX] =
765 PPRO_FEATURES,
766 .features[FEAT_1_ECX] =
767 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
768 .xlevel = 0x80000004,
771 .name = "kvm32",
772 .level = 5,
773 .vendor = CPUID_VENDOR_INTEL,
774 .family = 15,
775 .model = 6,
776 .stepping = 1,
777 .features[FEAT_1_EDX] =
778 PPRO_FEATURES |
779 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
780 .features[FEAT_1_ECX] =
781 CPUID_EXT_SSE3,
782 .features[FEAT_8000_0001_EDX] =
783 PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
784 .features[FEAT_8000_0001_ECX] =
786 .xlevel = 0x80000008,
787 .model_id = "Common 32-bit KVM processor"
790 .name = "coreduo",
791 .level = 10,
792 .vendor = CPUID_VENDOR_INTEL,
793 .family = 6,
794 .model = 14,
795 .stepping = 8,
796 .features[FEAT_1_EDX] =
797 PPRO_FEATURES | CPUID_VME |
798 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_DTS | CPUID_ACPI |
799 CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
800 .features[FEAT_1_ECX] =
801 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_VMX |
802 CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
803 .features[FEAT_8000_0001_EDX] =
804 CPUID_EXT2_NX,
805 .xlevel = 0x80000008,
806 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
809 .name = "486",
810 .level = 1,
811 .vendor = CPUID_VENDOR_INTEL,
812 .family = 4,
813 .model = 8,
814 .stepping = 0,
815 .features[FEAT_1_EDX] =
816 I486_FEATURES,
817 .xlevel = 0,
820 .name = "pentium",
821 .level = 1,
822 .vendor = CPUID_VENDOR_INTEL,
823 .family = 5,
824 .model = 4,
825 .stepping = 3,
826 .features[FEAT_1_EDX] =
827 PENTIUM_FEATURES,
828 .xlevel = 0,
831 .name = "pentium2",
832 .level = 2,
833 .vendor = CPUID_VENDOR_INTEL,
834 .family = 6,
835 .model = 5,
836 .stepping = 2,
837 .features[FEAT_1_EDX] =
838 PENTIUM2_FEATURES,
839 .xlevel = 0,
842 .name = "pentium3",
843 .level = 2,
844 .vendor = CPUID_VENDOR_INTEL,
845 .family = 6,
846 .model = 7,
847 .stepping = 3,
848 .features[FEAT_1_EDX] =
849 PENTIUM3_FEATURES,
850 .xlevel = 0,
853 .name = "athlon",
854 .level = 2,
855 .vendor = CPUID_VENDOR_AMD,
856 .family = 6,
857 .model = 2,
858 .stepping = 3,
859 .features[FEAT_1_EDX] =
860 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
861 CPUID_MCA,
862 .features[FEAT_8000_0001_EDX] =
863 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
864 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
865 .xlevel = 0x80000008,
868 .name = "n270",
869 /* original is on level 10 */
870 .level = 5,
871 .vendor = CPUID_VENDOR_INTEL,
872 .family = 6,
873 .model = 28,
874 .stepping = 2,
875 .features[FEAT_1_EDX] =
876 PPRO_FEATURES |
877 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | CPUID_DTS |
878 CPUID_ACPI | CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
879 /* Some CPUs got no CPUID_SEP */
880 .features[FEAT_1_ECX] =
881 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
882 CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR |
883 CPUID_EXT_MOVBE,
884 .features[FEAT_8000_0001_EDX] =
885 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
886 CPUID_EXT2_NX,
887 .features[FEAT_8000_0001_ECX] =
888 CPUID_EXT3_LAHF_LM,
889 .xlevel = 0x8000000A,
890 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
893 .name = "Conroe",
894 .level = 4,
895 .vendor = CPUID_VENDOR_INTEL,
896 .family = 6,
897 .model = 15,
898 .stepping = 3,
899 .features[FEAT_1_EDX] =
900 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
901 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
902 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
903 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
904 CPUID_DE | CPUID_FP87,
905 .features[FEAT_1_ECX] =
906 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
907 .features[FEAT_8000_0001_EDX] =
908 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
909 .features[FEAT_8000_0001_ECX] =
910 CPUID_EXT3_LAHF_LM,
911 .xlevel = 0x8000000A,
912 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
915 .name = "Penryn",
916 .level = 4,
917 .vendor = CPUID_VENDOR_INTEL,
918 .family = 6,
919 .model = 23,
920 .stepping = 3,
921 .features[FEAT_1_EDX] =
922 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
923 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
924 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
925 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
926 CPUID_DE | CPUID_FP87,
927 .features[FEAT_1_ECX] =
928 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
929 CPUID_EXT_SSE3,
930 .features[FEAT_8000_0001_EDX] =
931 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
932 .features[FEAT_8000_0001_ECX] =
933 CPUID_EXT3_LAHF_LM,
934 .xlevel = 0x8000000A,
935 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
938 .name = "Nehalem",
939 .level = 4,
940 .vendor = CPUID_VENDOR_INTEL,
941 .family = 6,
942 .model = 26,
943 .stepping = 3,
944 .features[FEAT_1_EDX] =
945 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
946 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
947 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
948 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
949 CPUID_DE | CPUID_FP87,
950 .features[FEAT_1_ECX] =
951 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
952 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
953 .features[FEAT_8000_0001_EDX] =
954 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
955 .features[FEAT_8000_0001_ECX] =
956 CPUID_EXT3_LAHF_LM,
957 .xlevel = 0x8000000A,
958 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
961 .name = "Westmere",
962 .level = 11,
963 .vendor = CPUID_VENDOR_INTEL,
964 .family = 6,
965 .model = 44,
966 .stepping = 1,
967 .features[FEAT_1_EDX] =
968 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
969 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
970 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
971 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
972 CPUID_DE | CPUID_FP87,
973 .features[FEAT_1_ECX] =
974 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
975 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
976 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
977 .features[FEAT_8000_0001_EDX] =
978 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
979 .features[FEAT_8000_0001_ECX] =
980 CPUID_EXT3_LAHF_LM,
981 .xlevel = 0x8000000A,
982 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
985 .name = "SandyBridge",
986 .level = 0xd,
987 .vendor = CPUID_VENDOR_INTEL,
988 .family = 6,
989 .model = 42,
990 .stepping = 1,
991 .features[FEAT_1_EDX] =
992 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
993 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
994 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
995 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
996 CPUID_DE | CPUID_FP87,
997 .features[FEAT_1_ECX] =
998 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
999 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1000 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1001 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1002 CPUID_EXT_SSE3,
1003 .features[FEAT_8000_0001_EDX] =
1004 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1005 CPUID_EXT2_SYSCALL,
1006 .features[FEAT_8000_0001_ECX] =
1007 CPUID_EXT3_LAHF_LM,
1008 .xlevel = 0x8000000A,
1009 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1012 .name = "Haswell",
1013 .level = 0xd,
1014 .vendor = CPUID_VENDOR_INTEL,
1015 .family = 6,
1016 .model = 60,
1017 .stepping = 1,
1018 .features[FEAT_1_EDX] =
1019 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1020 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1021 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1022 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1023 CPUID_DE | CPUID_FP87,
1024 .features[FEAT_1_ECX] =
1025 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1026 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1027 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1028 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1029 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1030 CPUID_EXT_PCID,
1031 .features[FEAT_8000_0001_EDX] =
1032 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1033 CPUID_EXT2_SYSCALL,
1034 .features[FEAT_8000_0001_ECX] =
1035 CPUID_EXT3_LAHF_LM,
1036 .features[FEAT_7_0_EBX] =
1037 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1038 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1039 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1040 CPUID_7_0_EBX_RTM,
1041 .xlevel = 0x8000000A,
1042 .model_id = "Intel Core Processor (Haswell)",
1045 .name = "Broadwell",
1046 .level = 0xd,
1047 .vendor = CPUID_VENDOR_INTEL,
1048 .family = 6,
1049 .model = 61,
1050 .stepping = 2,
1051 .features[FEAT_1_EDX] =
1052 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1053 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1054 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1055 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1056 CPUID_DE | CPUID_FP87,
1057 .features[FEAT_1_ECX] =
1058 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1059 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1060 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1061 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1062 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1063 CPUID_EXT_PCID,
1064 .features[FEAT_8000_0001_EDX] =
1065 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1066 CPUID_EXT2_SYSCALL,
1067 .features[FEAT_8000_0001_ECX] =
1068 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1069 .features[FEAT_7_0_EBX] =
1070 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1071 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1072 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1073 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1074 CPUID_7_0_EBX_SMAP,
1075 .xlevel = 0x8000000A,
1076 .model_id = "Intel Core Processor (Broadwell)",
1079 .name = "Opteron_G1",
1080 .level = 5,
1081 .vendor = CPUID_VENDOR_AMD,
1082 .family = 15,
1083 .model = 6,
1084 .stepping = 1,
1085 .features[FEAT_1_EDX] =
1086 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1087 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1088 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1089 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1090 CPUID_DE | CPUID_FP87,
1091 .features[FEAT_1_ECX] =
1092 CPUID_EXT_SSE3,
1093 .features[FEAT_8000_0001_EDX] =
1094 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1095 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1096 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1097 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1098 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1099 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1100 .xlevel = 0x80000008,
1101 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1104 .name = "Opteron_G2",
1105 .level = 5,
1106 .vendor = CPUID_VENDOR_AMD,
1107 .family = 15,
1108 .model = 6,
1109 .stepping = 1,
1110 .features[FEAT_1_EDX] =
1111 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1112 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1113 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1114 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1115 CPUID_DE | CPUID_FP87,
1116 .features[FEAT_1_ECX] =
1117 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1118 .features[FEAT_8000_0001_EDX] =
1119 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1120 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1121 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1122 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1123 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1124 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1125 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1126 .features[FEAT_8000_0001_ECX] =
1127 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1128 .xlevel = 0x80000008,
1129 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1132 .name = "Opteron_G3",
1133 .level = 5,
1134 .vendor = CPUID_VENDOR_AMD,
1135 .family = 15,
1136 .model = 6,
1137 .stepping = 1,
1138 .features[FEAT_1_EDX] =
1139 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1140 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1141 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1142 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1143 CPUID_DE | CPUID_FP87,
1144 .features[FEAT_1_ECX] =
1145 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1146 CPUID_EXT_SSE3,
1147 .features[FEAT_8000_0001_EDX] =
1148 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1149 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1150 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1151 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1152 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1153 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1154 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1155 .features[FEAT_8000_0001_ECX] =
1156 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1157 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1158 .xlevel = 0x80000008,
1159 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1162 .name = "Opteron_G4",
1163 .level = 0xd,
1164 .vendor = CPUID_VENDOR_AMD,
1165 .family = 21,
1166 .model = 1,
1167 .stepping = 2,
1168 .features[FEAT_1_EDX] =
1169 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1170 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1171 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1172 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1173 CPUID_DE | CPUID_FP87,
1174 .features[FEAT_1_ECX] =
1175 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1176 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1177 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1178 CPUID_EXT_SSE3,
1179 .features[FEAT_8000_0001_EDX] =
1180 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1181 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1182 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1183 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1184 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1185 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1186 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1187 .features[FEAT_8000_0001_ECX] =
1188 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1189 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1190 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1191 CPUID_EXT3_LAHF_LM,
1192 .xlevel = 0x8000001A,
1193 .model_id = "AMD Opteron 62xx class CPU",
1196 .name = "Opteron_G5",
1197 .level = 0xd,
1198 .vendor = CPUID_VENDOR_AMD,
1199 .family = 21,
1200 .model = 2,
1201 .stepping = 0,
1202 .features[FEAT_1_EDX] =
1203 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1204 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1205 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1206 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1207 CPUID_DE | CPUID_FP87,
1208 .features[FEAT_1_ECX] =
1209 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1210 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1211 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1212 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1213 .features[FEAT_8000_0001_EDX] =
1214 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1215 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1216 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1217 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1218 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1219 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1220 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1221 .features[FEAT_8000_0001_ECX] =
1222 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1223 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1224 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1225 CPUID_EXT3_LAHF_LM,
1226 .xlevel = 0x8000001A,
1227 .model_id = "AMD Opteron 63xx class CPU",
1232 * x86_cpu_compat_set_features:
1233 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1234 * @w: Identifies the feature word to be changed.
1235 * @feat_add: Feature bits to be added to feature word
1236 * @feat_remove: Feature bits to be removed from feature word
1238 * Change CPU model feature bits for compatibility.
1240 * This function may be used by machine-type compatibility functions
1241 * to enable or disable feature bits on specific CPU models.
1243 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1244 uint32_t feat_add, uint32_t feat_remove)
1246 X86CPUDefinition *def;
1247 int i;
1248 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1249 def = &builtin_x86_defs[i];
1250 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1251 def->features[w] |= feat_add;
1252 def->features[w] &= ~feat_remove;
1257 #ifdef CONFIG_KVM
1259 static int cpu_x86_fill_model_id(char *str)
1261 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1262 int i;
1264 for (i = 0; i < 3; i++) {
1265 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1266 memcpy(str + i * 16 + 0, &eax, 4);
1267 memcpy(str + i * 16 + 4, &ebx, 4);
1268 memcpy(str + i * 16 + 8, &ecx, 4);
1269 memcpy(str + i * 16 + 12, &edx, 4);
1271 return 0;
1274 static X86CPUDefinition host_cpudef;
1276 static Property host_x86_cpu_properties[] = {
1277 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1278 DEFINE_PROP_END_OF_LIST()
1281 /* class_init for the "host" CPU model
1283 * This function may be called before KVM is initialized.
1285 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1287 DeviceClass *dc = DEVICE_CLASS(oc);
1288 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1289 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1291 xcc->kvm_required = true;
1293 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1294 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1296 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1297 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1298 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1299 host_cpudef.stepping = eax & 0x0F;
1301 cpu_x86_fill_model_id(host_cpudef.model_id);
1303 xcc->cpu_def = &host_cpudef;
1304 host_cpudef.cache_info_passthrough = true;
1306 /* level, xlevel, xlevel2, and the feature words are initialized on
1307 * instance_init, because they require KVM to be initialized.
1310 dc->props = host_x86_cpu_properties;
1313 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1314 bool migratable_only);
1316 static void host_x86_cpu_initfn(Object *obj)
1318 X86CPU *cpu = X86_CPU(obj);
1319 CPUX86State *env = &cpu->env;
1320 KVMState *s = kvm_state;
1321 FeatureWord w;
1323 assert(kvm_enabled());
1325 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1326 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1327 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1329 for (w = 0; w < FEATURE_WORDS; w++) {
1330 env->features[w] =
1331 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1333 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1336 static const TypeInfo host_x86_cpu_type_info = {
1337 .name = X86_CPU_TYPE_NAME("host"),
1338 .parent = TYPE_X86_CPU,
1339 .instance_init = host_x86_cpu_initfn,
1340 .class_init = host_x86_cpu_class_init,
1343 #endif
1345 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1347 FeatureWordInfo *f = &feature_word_info[w];
1348 int i;
1350 for (i = 0; i < 32; ++i) {
1351 if (1 << i & mask) {
1352 const char *reg = get_register_name_32(f->cpuid_reg);
1353 assert(reg);
1354 fprintf(stderr, "warning: %s doesn't support requested feature: "
1355 "CPUID.%02XH:%s%s%s [bit %d]\n",
1356 kvm_enabled() ? "host" : "TCG",
1357 f->cpuid_eax, reg,
1358 f->feat_names[i] ? "." : "",
1359 f->feat_names[i] ? f->feat_names[i] : "", i);
1364 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1365 const char *name, Error **errp)
1367 X86CPU *cpu = X86_CPU(obj);
1368 CPUX86State *env = &cpu->env;
1369 int64_t value;
1371 value = (env->cpuid_version >> 8) & 0xf;
1372 if (value == 0xf) {
1373 value += (env->cpuid_version >> 20) & 0xff;
1375 visit_type_int(v, &value, name, errp);
1378 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1379 const char *name, Error **errp)
1381 X86CPU *cpu = X86_CPU(obj);
1382 CPUX86State *env = &cpu->env;
1383 const int64_t min = 0;
1384 const int64_t max = 0xff + 0xf;
1385 Error *local_err = NULL;
1386 int64_t value;
1388 visit_type_int(v, &value, name, &local_err);
1389 if (local_err) {
1390 error_propagate(errp, local_err);
1391 return;
1393 if (value < min || value > max) {
1394 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1395 name ? name : "null", value, min, max);
1396 return;
1399 env->cpuid_version &= ~0xff00f00;
1400 if (value > 0x0f) {
1401 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1402 } else {
1403 env->cpuid_version |= value << 8;
1407 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1408 const char *name, Error **errp)
1410 X86CPU *cpu = X86_CPU(obj);
1411 CPUX86State *env = &cpu->env;
1412 int64_t value;
1414 value = (env->cpuid_version >> 4) & 0xf;
1415 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1416 visit_type_int(v, &value, name, errp);
1419 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1420 const char *name, Error **errp)
1422 X86CPU *cpu = X86_CPU(obj);
1423 CPUX86State *env = &cpu->env;
1424 const int64_t min = 0;
1425 const int64_t max = 0xff;
1426 Error *local_err = NULL;
1427 int64_t value;
1429 visit_type_int(v, &value, name, &local_err);
1430 if (local_err) {
1431 error_propagate(errp, local_err);
1432 return;
1434 if (value < min || value > max) {
1435 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1436 name ? name : "null", value, min, max);
1437 return;
1440 env->cpuid_version &= ~0xf00f0;
1441 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1444 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1445 void *opaque, const char *name,
1446 Error **errp)
1448 X86CPU *cpu = X86_CPU(obj);
1449 CPUX86State *env = &cpu->env;
1450 int64_t value;
1452 value = env->cpuid_version & 0xf;
1453 visit_type_int(v, &value, name, errp);
1456 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1457 void *opaque, const char *name,
1458 Error **errp)
1460 X86CPU *cpu = X86_CPU(obj);
1461 CPUX86State *env = &cpu->env;
1462 const int64_t min = 0;
1463 const int64_t max = 0xf;
1464 Error *local_err = NULL;
1465 int64_t value;
1467 visit_type_int(v, &value, name, &local_err);
1468 if (local_err) {
1469 error_propagate(errp, local_err);
1470 return;
1472 if (value < min || value > max) {
1473 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1474 name ? name : "null", value, min, max);
1475 return;
1478 env->cpuid_version &= ~0xf;
1479 env->cpuid_version |= value & 0xf;
1482 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
1483 const char *name, Error **errp)
1485 X86CPU *cpu = X86_CPU(obj);
1487 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1490 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
1491 const char *name, Error **errp)
1493 X86CPU *cpu = X86_CPU(obj);
1495 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1498 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
1499 const char *name, Error **errp)
1501 X86CPU *cpu = X86_CPU(obj);
1503 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1506 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
1507 const char *name, Error **errp)
1509 X86CPU *cpu = X86_CPU(obj);
1511 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1514 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1516 X86CPU *cpu = X86_CPU(obj);
1517 CPUX86State *env = &cpu->env;
1518 char *value;
1520 value = (char *)g_malloc(CPUID_VENDOR_SZ + 1);
1521 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1522 env->cpuid_vendor3);
1523 return value;
1526 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1527 Error **errp)
1529 X86CPU *cpu = X86_CPU(obj);
1530 CPUX86State *env = &cpu->env;
1531 int i;
1533 if (strlen(value) != CPUID_VENDOR_SZ) {
1534 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1535 "vendor", value);
1536 return;
1539 env->cpuid_vendor1 = 0;
1540 env->cpuid_vendor2 = 0;
1541 env->cpuid_vendor3 = 0;
1542 for (i = 0; i < 4; i++) {
1543 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1544 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1545 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1549 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1551 X86CPU *cpu = X86_CPU(obj);
1552 CPUX86State *env = &cpu->env;
1553 char *value;
1554 int i;
1556 value = g_malloc(48 + 1);
1557 for (i = 0; i < 48; i++) {
1558 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1560 value[48] = '\0';
1561 return value;
1564 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1565 Error **errp)
1567 X86CPU *cpu = X86_CPU(obj);
1568 CPUX86State *env = &cpu->env;
1569 int c, len, i;
1571 if (model_id == NULL) {
1572 model_id = "";
1574 len = strlen(model_id);
1575 memset(env->cpuid_model, 0, 48);
1576 for (i = 0; i < 48; i++) {
1577 if (i >= len) {
1578 c = '\0';
1579 } else {
1580 c = (uint8_t)model_id[i];
1582 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1586 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1587 const char *name, Error **errp)
1589 X86CPU *cpu = X86_CPU(obj);
1590 int64_t value;
1592 value = cpu->env.tsc_khz * 1000;
1593 visit_type_int(v, &value, name, errp);
1596 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1597 const char *name, Error **errp)
1599 X86CPU *cpu = X86_CPU(obj);
1600 const int64_t min = 0;
1601 const int64_t max = INT64_MAX;
1602 Error *local_err = NULL;
1603 int64_t value;
1605 visit_type_int(v, &value, name, &local_err);
1606 if (local_err) {
1607 error_propagate(errp, local_err);
1608 return;
1610 if (value < min || value > max) {
1611 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1612 name ? name : "null", value, min, max);
1613 return;
1616 cpu->env.tsc_khz = value / 1000;
1619 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1620 const char *name, Error **errp)
1622 X86CPU *cpu = X86_CPU(obj);
1623 int64_t value = cpu->env.cpuid_apic_id;
1625 visit_type_int(v, &value, name, errp);
1628 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1629 const char *name, Error **errp)
1631 X86CPU *cpu = X86_CPU(obj);
1632 DeviceState *dev = DEVICE(obj);
1633 const int64_t min = 0;
1634 const int64_t max = UINT32_MAX;
1635 Error *error = NULL;
1636 int64_t value;
1638 if (dev->realized) {
1639 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1640 "it was realized", name, object_get_typename(obj));
1641 return;
1644 visit_type_int(v, &value, name, &error);
1645 if (error) {
1646 error_propagate(errp, error);
1647 return;
1649 if (value < min || value > max) {
1650 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1651 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1652 object_get_typename(obj), name, value, min, max);
1653 return;
1656 if ((value != cpu->env.cpuid_apic_id) && cpu_exists(value)) {
1657 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1658 return;
1660 cpu->env.cpuid_apic_id = value;
1663 /* Generic getter for "feature-words" and "filtered-features" properties */
1664 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1665 const char *name, Error **errp)
1667 uint32_t *array = (uint32_t *)opaque;
1668 FeatureWord w;
1669 Error *err = NULL;
1670 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1671 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1672 X86CPUFeatureWordInfoList *list = NULL;
1674 for (w = 0; w < FEATURE_WORDS; w++) {
1675 FeatureWordInfo *wi = &feature_word_info[w];
1676 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1677 qwi->cpuid_input_eax = wi->cpuid_eax;
1678 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1679 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1680 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1681 qwi->features = array[w];
1683 /* List will be in reverse order, but order shouldn't matter */
1684 list_entries[w].next = list;
1685 list_entries[w].value = &word_infos[w];
1686 list = &list_entries[w];
1689 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1690 error_propagate(errp, err);
1693 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1694 const char *name, Error **errp)
1696 X86CPU *cpu = X86_CPU(obj);
1697 int64_t value = cpu->hyperv_spinlock_attempts;
1699 visit_type_int(v, &value, name, errp);
1702 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1703 const char *name, Error **errp)
1705 const int64_t min = 0xFFF;
1706 const int64_t max = UINT_MAX;
1707 X86CPU *cpu = X86_CPU(obj);
1708 Error *err = NULL;
1709 int64_t value;
1711 visit_type_int(v, &value, name, &err);
1712 if (err) {
1713 error_propagate(errp, err);
1714 return;
1717 if (value < min || value > max) {
1718 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1719 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1720 object_get_typename(obj), name ? name : "null",
1721 value, min, max);
1722 return;
1724 cpu->hyperv_spinlock_attempts = value;
1727 static PropertyInfo qdev_prop_spinlocks = {
1728 .name = "int",
1729 .get = x86_get_hv_spinlocks,
1730 .set = x86_set_hv_spinlocks,
1733 /* Convert all '_' in a feature string option name to '-', to make feature
1734 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1736 static inline void feat2prop(char *s)
1738 while ((s = strchr(s, '_'))) {
1739 *s = '-';
1743 /* Parse "+feature,-feature,feature=foo" CPU feature string
1745 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1746 Error **errp)
1748 X86CPU *cpu = X86_CPU(cs);
1749 char *featurestr; /* Single 'key=value" string being parsed */
1750 FeatureWord w;
1751 /* Features to be added */
1752 FeatureWordArray plus_features = { 0 };
1753 /* Features to be removed */
1754 FeatureWordArray minus_features = { 0 };
1755 uint32_t numvalue;
1756 CPUX86State *env = &cpu->env;
1757 Error *local_err = NULL;
1759 featurestr = features ? strtok(features, ",") : NULL;
1761 while (featurestr) {
1762 char *val;
1763 if (featurestr[0] == '+') {
1764 add_flagname_to_bitmaps(featurestr + 1, plus_features);
1765 } else if (featurestr[0] == '-') {
1766 add_flagname_to_bitmaps(featurestr + 1, minus_features);
1767 } else if ((val = strchr(featurestr, '='))) {
1768 *val = 0; val++;
1769 feat2prop(featurestr);
1770 if (!strcmp(featurestr, "xlevel")) {
1771 char *err;
1772 char num[32];
1774 numvalue = strtoul(val, &err, 0);
1775 if (!*val || *err) {
1776 error_setg(errp, "bad numerical value %s", val);
1777 return;
1779 if (numvalue < 0x80000000) {
1780 error_report("xlevel value shall always be >= 0x80000000"
1781 ", fixup will be removed in future versions");
1782 numvalue += 0x80000000;
1784 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1785 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1786 } else if (!strcmp(featurestr, "tsc-freq")) {
1787 int64_t tsc_freq;
1788 char *err;
1789 char num[32];
1791 tsc_freq = strtosz_suffix_unit(val, &err,
1792 STRTOSZ_DEFSUFFIX_B, 1000);
1793 if (tsc_freq < 0 || *err) {
1794 error_setg(errp, "bad numerical value %s", val);
1795 return;
1797 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1798 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1799 &local_err);
1800 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1801 char *err;
1802 const int min = 0xFFF;
1803 char num[32];
1804 numvalue = strtoul(val, &err, 0);
1805 if (!*val || *err) {
1806 error_setg(errp, "bad numerical value %s", val);
1807 return;
1809 if (numvalue < min) {
1810 error_report("hv-spinlocks value shall always be >= 0x%x"
1811 ", fixup will be removed in future versions",
1812 min);
1813 numvalue = min;
1815 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1816 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1817 } else {
1818 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1820 } else {
1821 feat2prop(featurestr);
1822 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1824 if (local_err) {
1825 error_propagate(errp, local_err);
1826 return;
1828 featurestr = strtok(NULL, ",");
1831 for (w = 0; w < FEATURE_WORDS; w++) {
1832 env->features[w] |= plus_features[w];
1833 env->features[w] &= ~minus_features[w];
1837 /* generate a composite string into buf of all cpuid names in featureset
1838 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1839 * if flags, suppress names undefined in featureset.
1841 static void listflags(char *buf, int bufsize, uint32_t fbits,
1842 const char **featureset, uint32_t flags)
1844 const char **p = &featureset[31];
1845 char *q, *b, bit;
1846 int nc;
1848 b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
1849 *buf = '\0';
1850 for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
1851 if (fbits & 1 << bit && (*p || !flags)) {
1852 if (*p)
1853 nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
1854 else
1855 nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
1856 if (bufsize <= nc) {
1857 if (b) {
1858 memcpy(b, "...", sizeof("..."));
1860 return;
1862 q += nc;
1863 bufsize -= nc;
1867 /* generate CPU information. */
1868 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1870 X86CPUDefinition *def;
1871 char buf[256];
1872 int i;
1874 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1875 def = &builtin_x86_defs[i];
1876 snprintf(buf, sizeof(buf), "%s", def->name);
1877 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1879 #ifdef CONFIG_KVM
1880 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1881 "KVM processor with all supported host features "
1882 "(only available in KVM mode)");
1883 #endif
1885 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1886 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1887 FeatureWordInfo *fw = &feature_word_info[i];
1889 listflags(buf, sizeof(buf), (uint32_t)~0, fw->feat_names, 1);
1890 (*cpu_fprintf)(f, " %s\n", buf);
1894 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1896 CpuDefinitionInfoList *cpu_list = NULL;
1897 X86CPUDefinition *def;
1898 int i;
1900 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1901 CpuDefinitionInfoList *entry;
1902 CpuDefinitionInfo *info;
1904 def = &builtin_x86_defs[i];
1905 info = g_malloc0(sizeof(*info));
1906 info->name = g_strdup(def->name);
1908 entry = g_malloc0(sizeof(*entry));
1909 entry->value = info;
1910 entry->next = cpu_list;
1911 cpu_list = entry;
1914 return cpu_list;
1917 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1918 bool migratable_only)
1920 FeatureWordInfo *wi = &feature_word_info[w];
1921 uint32_t r;
1923 if (kvm_enabled()) {
1924 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
1925 wi->cpuid_ecx,
1926 wi->cpuid_reg);
1927 } else if (tcg_enabled()) {
1928 r = wi->tcg_features;
1929 } else {
1930 return ~0;
1932 if (migratable_only) {
1933 r &= x86_cpu_get_migratable_flags(w);
1935 return r;
1939 * Filters CPU feature words based on host availability of each feature.
1941 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
1943 static int x86_cpu_filter_features(X86CPU *cpu)
1945 CPUX86State *env = &cpu->env;
1946 FeatureWord w;
1947 int rv = 0;
1949 for (w = 0; w < FEATURE_WORDS; w++) {
1950 uint32_t host_feat =
1951 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1952 uint32_t requested_features = env->features[w];
1953 env->features[w] &= host_feat;
1954 cpu->filtered_features[w] = requested_features & ~env->features[w];
1955 if (cpu->filtered_features[w]) {
1956 if (cpu->check_cpuid || cpu->enforce_cpuid) {
1957 report_unavailable_features(w, cpu->filtered_features[w]);
1959 rv = 1;
1963 return rv;
1966 /* Load data from X86CPUDefinition
1968 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
1970 CPUX86State *env = &cpu->env;
1971 const char *vendor;
1972 char host_vendor[CPUID_VENDOR_SZ + 1];
1973 FeatureWord w;
1975 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
1976 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
1977 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
1978 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
1979 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
1980 env->cpuid_xlevel2 = def->xlevel2;
1981 cpu->cache_info_passthrough = def->cache_info_passthrough;
1982 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
1983 for (w = 0; w < FEATURE_WORDS; w++) {
1984 env->features[w] = def->features[w];
1987 /* Special cases not set in the X86CPUDefinition structs: */
1988 if (kvm_enabled()) {
1989 FeatureWord w;
1990 for (w = 0; w < FEATURE_WORDS; w++) {
1991 env->features[w] |= kvm_default_features[w];
1992 env->features[w] &= ~kvm_default_unset_features[w];
1996 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
1998 /* sysenter isn't supported in compatibility mode on AMD,
1999 * syscall isn't supported in compatibility mode on Intel.
2000 * Normally we advertise the actual CPU vendor, but you can
2001 * override this using the 'vendor' property if you want to use
2002 * KVM's sysenter/syscall emulation in compatibility mode and
2003 * when doing cross vendor migration
2005 vendor = def->vendor;
2006 if (kvm_enabled()) {
2007 uint32_t ebx = 0, ecx = 0, edx = 0;
2008 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2009 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2010 vendor = host_vendor;
2013 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2017 X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
2018 Error **errp)
2020 X86CPU *cpu = NULL;
2021 X86CPUClass *xcc;
2022 ObjectClass *oc;
2023 gchar **model_pieces;
2024 char *name, *features;
2025 Error *error = NULL;
2027 model_pieces = g_strsplit(cpu_model, ",", 2);
2028 if (!model_pieces[0]) {
2029 error_setg(&error, "Invalid/empty CPU model name");
2030 goto out;
2032 name = model_pieces[0];
2033 features = model_pieces[1];
2035 oc = x86_cpu_class_by_name(name);
2036 if (oc == NULL) {
2037 error_setg(&error, "Unable to find CPU definition: %s", name);
2038 goto out;
2040 xcc = X86_CPU_CLASS(oc);
2042 if (xcc->kvm_required && !kvm_enabled()) {
2043 error_setg(&error, "CPU model '%s' requires KVM", name);
2044 goto out;
2047 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2049 #ifndef CONFIG_USER_ONLY
2050 if (icc_bridge == NULL) {
2051 error_setg(&error, "Invalid icc-bridge value");
2052 goto out;
2054 qdev_set_parent_bus(DEVICE(cpu), qdev_get_child_bus(icc_bridge, "icc"));
2055 object_unref(OBJECT(cpu));
2056 #endif
2058 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2059 if (error) {
2060 goto out;
2063 out:
2064 if (error != NULL) {
2065 error_propagate(errp, error);
2066 if (cpu) {
2067 object_unref(OBJECT(cpu));
2068 cpu = NULL;
2071 g_strfreev(model_pieces);
2072 return cpu;
2075 X86CPU *cpu_x86_init(const char *cpu_model)
2077 Error *error = NULL;
2078 X86CPU *cpu;
2080 cpu = cpu_x86_create(cpu_model, NULL, &error);
2081 if (error) {
2082 goto out;
2085 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2087 out:
2088 if (error) {
2089 error_report("%s", error_get_pretty(error));
2090 error_free(error);
2091 if (cpu != NULL) {
2092 object_unref(OBJECT(cpu));
2093 cpu = NULL;
2096 return cpu;
2099 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2101 X86CPUDefinition *cpudef = data;
2102 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2104 xcc->cpu_def = cpudef;
2107 static void x86_register_cpudef_type(X86CPUDefinition *def)
2109 char *typename = x86_cpu_type_name(def->name);
2110 TypeInfo ti = {
2111 .name = typename,
2112 .parent = TYPE_X86_CPU,
2113 .class_init = x86_cpu_cpudef_class_init,
2114 .class_data = def,
2117 type_register(&ti);
2118 g_free(typename);
2121 #if !defined(CONFIG_USER_ONLY)
2123 void cpu_clear_apic_feature(CPUX86State *env)
2125 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2128 #endif /* !CONFIG_USER_ONLY */
2130 /* Initialize list of CPU models, filling some non-static fields if necessary
2132 void x86_cpudef_setup(void)
2134 int i, j;
2135 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2137 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2138 X86CPUDefinition *def = &builtin_x86_defs[i];
2140 /* Look for specific "cpudef" models that */
2141 /* have the QEMU version in .model_id */
2142 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2143 if (strcmp(model_with_versions[j], def->name) == 0) {
2144 pstrcpy(def->model_id, sizeof(def->model_id),
2145 "QEMU Virtual CPU version ");
2146 pstrcat(def->model_id, sizeof(def->model_id),
2147 qemu_get_version());
2148 break;
2154 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
2155 uint32_t *ecx, uint32_t *edx)
2157 *ebx = env->cpuid_vendor1;
2158 *edx = env->cpuid_vendor2;
2159 *ecx = env->cpuid_vendor3;
2162 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2163 uint32_t *eax, uint32_t *ebx,
2164 uint32_t *ecx, uint32_t *edx)
2166 X86CPU *cpu = x86_env_get_cpu(env);
2167 CPUState *cs = CPU(cpu);
2169 /* test if maximum index reached */
2170 if (index & 0x80000000) {
2171 if (index > env->cpuid_xlevel) {
2172 if (env->cpuid_xlevel2 > 0) {
2173 /* Handle the Centaur's CPUID instruction. */
2174 if (index > env->cpuid_xlevel2) {
2175 index = env->cpuid_xlevel2;
2176 } else if (index < 0xC0000000) {
2177 index = env->cpuid_xlevel;
2179 } else {
2180 /* Intel documentation states that invalid EAX input will
2181 * return the same information as EAX=cpuid_level
2182 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2184 index = env->cpuid_level;
2187 } else {
2188 if (index > env->cpuid_level)
2189 index = env->cpuid_level;
2192 switch(index) {
2193 case 0:
2194 *eax = env->cpuid_level;
2195 get_cpuid_vendor(env, ebx, ecx, edx);
2196 break;
2197 case 1:
2198 *eax = env->cpuid_version;
2199 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2200 *ecx = env->features[FEAT_1_ECX];
2201 *edx = env->features[FEAT_1_EDX];
2202 if (cs->nr_cores * cs->nr_threads > 1) {
2203 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2204 *edx |= 1 << 28; /* HTT bit */
2206 break;
2207 case 2:
2208 /* cache info: needed for Pentium Pro compatibility */
2209 if (cpu->cache_info_passthrough) {
2210 host_cpuid(index, 0, eax, ebx, ecx, edx);
2211 break;
2213 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2214 *ebx = 0;
2215 *ecx = 0;
2216 *edx = (L1D_DESCRIPTOR << 16) | \
2217 (L1I_DESCRIPTOR << 8) | \
2218 (L2_DESCRIPTOR);
2219 break;
2220 case 4:
2221 /* cache info: needed for Core compatibility */
2222 if (cpu->cache_info_passthrough) {
2223 host_cpuid(index, count, eax, ebx, ecx, edx);
2224 *eax &= ~0xFC000000;
2225 } else {
2226 *eax = 0;
2227 switch (count) {
2228 case 0: /* L1 dcache info */
2229 *eax |= CPUID_4_TYPE_DCACHE | \
2230 CPUID_4_LEVEL(1) | \
2231 CPUID_4_SELF_INIT_LEVEL;
2232 *ebx = (L1D_LINE_SIZE - 1) | \
2233 ((L1D_PARTITIONS - 1) << 12) | \
2234 ((L1D_ASSOCIATIVITY - 1) << 22);
2235 *ecx = L1D_SETS - 1;
2236 *edx = CPUID_4_NO_INVD_SHARING;
2237 break;
2238 case 1: /* L1 icache info */
2239 *eax |= CPUID_4_TYPE_ICACHE | \
2240 CPUID_4_LEVEL(1) | \
2241 CPUID_4_SELF_INIT_LEVEL;
2242 *ebx = (L1I_LINE_SIZE - 1) | \
2243 ((L1I_PARTITIONS - 1) << 12) | \
2244 ((L1I_ASSOCIATIVITY - 1) << 22);
2245 *ecx = L1I_SETS - 1;
2246 *edx = CPUID_4_NO_INVD_SHARING;
2247 break;
2248 case 2: /* L2 cache info */
2249 *eax |= CPUID_4_TYPE_UNIFIED | \
2250 CPUID_4_LEVEL(2) | \
2251 CPUID_4_SELF_INIT_LEVEL;
2252 if (cs->nr_threads > 1) {
2253 *eax |= (cs->nr_threads - 1) << 14;
2255 *ebx = (L2_LINE_SIZE - 1) | \
2256 ((L2_PARTITIONS - 1) << 12) | \
2257 ((L2_ASSOCIATIVITY - 1) << 22);
2258 *ecx = L2_SETS - 1;
2259 *edx = CPUID_4_NO_INVD_SHARING;
2260 break;
2261 default: /* end of info */
2262 *eax = 0;
2263 *ebx = 0;
2264 *ecx = 0;
2265 *edx = 0;
2266 break;
2270 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2271 if ((*eax & 31) && cs->nr_cores > 1) {
2272 *eax |= (cs->nr_cores - 1) << 26;
2274 break;
2275 case 5:
2276 /* mwait info: needed for Core compatibility */
2277 *eax = 0; /* Smallest monitor-line size in bytes */
2278 *ebx = 0; /* Largest monitor-line size in bytes */
2279 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2280 *edx = 0;
2281 break;
2282 case 6:
2283 /* Thermal and Power Leaf */
2284 *eax = 0;
2285 *ebx = 0;
2286 *ecx = 0;
2287 *edx = 0;
2288 break;
2289 case 7:
2290 /* Structured Extended Feature Flags Enumeration Leaf */
2291 if (count == 0) {
2292 *eax = 0; /* Maximum ECX value for sub-leaves */
2293 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2294 *ecx = 0; /* Reserved */
2295 *edx = 0; /* Reserved */
2296 } else {
2297 *eax = 0;
2298 *ebx = 0;
2299 *ecx = 0;
2300 *edx = 0;
2302 break;
2303 case 9:
2304 /* Direct Cache Access Information Leaf */
2305 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2306 *ebx = 0;
2307 *ecx = 0;
2308 *edx = 0;
2309 break;
2310 case 0xA:
2311 /* Architectural Performance Monitoring Leaf */
2312 if (kvm_enabled() && cpu->enable_pmu) {
2313 KVMState *s = cs->kvm_state;
2315 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2316 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2317 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2318 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2319 } else {
2320 *eax = 0;
2321 *ebx = 0;
2322 *ecx = 0;
2323 *edx = 0;
2325 break;
2326 case 0xD: {
2327 KVMState *s = cs->kvm_state;
2328 uint64_t kvm_mask;
2329 int i;
2331 /* Processor Extended State */
2332 *eax = 0;
2333 *ebx = 0;
2334 *ecx = 0;
2335 *edx = 0;
2336 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2337 break;
2339 kvm_mask =
2340 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2341 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2343 if (count == 0) {
2344 *ecx = 0x240;
2345 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2346 const ExtSaveArea *esa = &ext_save_areas[i];
2347 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2348 (kvm_mask & (1 << i)) != 0) {
2349 if (i < 32) {
2350 *eax |= 1 << i;
2351 } else {
2352 *edx |= 1 << (i - 32);
2354 *ecx = MAX(*ecx, esa->offset + esa->size);
2357 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2358 *ebx = *ecx;
2359 } else if (count == 1) {
2360 *eax = kvm_arch_get_supported_cpuid(s, 0xd, 1, R_EAX);
2361 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2362 const ExtSaveArea *esa = &ext_save_areas[count];
2363 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2364 (kvm_mask & (1 << count)) != 0) {
2365 *eax = esa->size;
2366 *ebx = esa->offset;
2369 break;
2371 case 0x80000000:
2372 *eax = env->cpuid_xlevel;
2373 *ebx = env->cpuid_vendor1;
2374 *edx = env->cpuid_vendor2;
2375 *ecx = env->cpuid_vendor3;
2376 break;
2377 case 0x80000001:
2378 *eax = env->cpuid_version;
2379 *ebx = 0;
2380 *ecx = env->features[FEAT_8000_0001_ECX];
2381 *edx = env->features[FEAT_8000_0001_EDX];
2383 /* The Linux kernel checks for the CMPLegacy bit and
2384 * discards multiple thread information if it is set.
2385 * So dont set it here for Intel to make Linux guests happy.
2387 if (cs->nr_cores * cs->nr_threads > 1) {
2388 uint32_t tebx, tecx, tedx;
2389 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
2390 if (tebx != CPUID_VENDOR_INTEL_1 ||
2391 tedx != CPUID_VENDOR_INTEL_2 ||
2392 tecx != CPUID_VENDOR_INTEL_3) {
2393 *ecx |= 1 << 1; /* CmpLegacy bit */
2396 break;
2397 case 0x80000002:
2398 case 0x80000003:
2399 case 0x80000004:
2400 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2401 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2402 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2403 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2404 break;
2405 case 0x80000005:
2406 /* cache info (L1 cache) */
2407 if (cpu->cache_info_passthrough) {
2408 host_cpuid(index, 0, eax, ebx, ecx, edx);
2409 break;
2411 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2412 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2413 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2414 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2415 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2416 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2417 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2418 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2419 break;
2420 case 0x80000006:
2421 /* cache info (L2 cache) */
2422 if (cpu->cache_info_passthrough) {
2423 host_cpuid(index, 0, eax, ebx, ecx, edx);
2424 break;
2426 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2427 (L2_DTLB_2M_ENTRIES << 16) | \
2428 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2429 (L2_ITLB_2M_ENTRIES);
2430 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2431 (L2_DTLB_4K_ENTRIES << 16) | \
2432 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2433 (L2_ITLB_4K_ENTRIES);
2434 *ecx = (L2_SIZE_KB_AMD << 16) | \
2435 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2436 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2437 *edx = ((L3_SIZE_KB/512) << 18) | \
2438 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2439 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2440 break;
2441 case 0x80000007:
2442 *eax = 0;
2443 *ebx = 0;
2444 *ecx = 0;
2445 *edx = env->features[FEAT_8000_0007_EDX];
2446 break;
2447 case 0x80000008:
2448 /* virtual & phys address size in low 2 bytes. */
2449 /* XXX: This value must match the one used in the MMU code. */
2450 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2451 /* 64 bit processor */
2452 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2453 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2454 } else {
2455 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2456 *eax = 0x00000024; /* 36 bits physical */
2457 } else {
2458 *eax = 0x00000020; /* 32 bits physical */
2461 *ebx = 0;
2462 *ecx = 0;
2463 *edx = 0;
2464 if (cs->nr_cores * cs->nr_threads > 1) {
2465 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2467 break;
2468 case 0x8000000A:
2469 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2470 *eax = 0x00000001; /* SVM Revision */
2471 *ebx = 0x00000010; /* nr of ASIDs */
2472 *ecx = 0;
2473 *edx = env->features[FEAT_SVM]; /* optional features */
2474 } else {
2475 *eax = 0;
2476 *ebx = 0;
2477 *ecx = 0;
2478 *edx = 0;
2480 break;
2481 case 0xC0000000:
2482 *eax = env->cpuid_xlevel2;
2483 *ebx = 0;
2484 *ecx = 0;
2485 *edx = 0;
2486 break;
2487 case 0xC0000001:
2488 /* Support for VIA CPU's CPUID instruction */
2489 *eax = env->cpuid_version;
2490 *ebx = 0;
2491 *ecx = 0;
2492 *edx = env->features[FEAT_C000_0001_EDX];
2493 break;
2494 case 0xC0000002:
2495 case 0xC0000003:
2496 case 0xC0000004:
2497 /* Reserved for the future, and now filled with zero */
2498 *eax = 0;
2499 *ebx = 0;
2500 *ecx = 0;
2501 *edx = 0;
2502 break;
2503 default:
2504 /* reserved values: zero */
2505 *eax = 0;
2506 *ebx = 0;
2507 *ecx = 0;
2508 *edx = 0;
2509 break;
2513 /* CPUClass::reset() */
2514 static void x86_cpu_reset(CPUState *s)
2516 X86CPU *cpu = X86_CPU(s);
2517 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2518 CPUX86State *env = &cpu->env;
2519 int i;
2521 xcc->parent_reset(s);
2523 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2525 tlb_flush(s, 1);
2527 env->old_exception = -1;
2529 /* init to reset state */
2531 #ifdef CONFIG_SOFTMMU
2532 env->hflags |= HF_SOFTMMU_MASK;
2533 #endif
2534 env->hflags2 |= HF2_GIF_MASK;
2536 cpu_x86_update_cr0(env, 0x60000010);
2537 env->a20_mask = ~0x0;
2538 env->smbase = 0x30000;
2540 env->idt.limit = 0xffff;
2541 env->gdt.limit = 0xffff;
2542 env->ldt.limit = 0xffff;
2543 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2544 env->tr.limit = 0xffff;
2545 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2547 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2548 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2549 DESC_R_MASK | DESC_A_MASK);
2550 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2551 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2552 DESC_A_MASK);
2553 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2554 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2555 DESC_A_MASK);
2556 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2557 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2558 DESC_A_MASK);
2559 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2560 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2561 DESC_A_MASK);
2562 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2563 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2564 DESC_A_MASK);
2566 env->eip = 0xfff0;
2567 env->regs[R_EDX] = env->cpuid_version;
2569 env->eflags = 0x2;
2571 /* FPU init */
2572 for (i = 0; i < 8; i++) {
2573 env->fptags[i] = 1;
2575 env->fpuc = 0x37f;
2577 env->mxcsr = 0x1f80;
2578 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2580 env->pat = 0x0007040600070406ULL;
2581 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2583 memset(env->dr, 0, sizeof(env->dr));
2584 env->dr[6] = DR6_FIXED_1;
2585 env->dr[7] = DR7_FIXED_1;
2586 cpu_breakpoint_remove_all(s, BP_CPU);
2587 cpu_watchpoint_remove_all(s, BP_CPU);
2589 env->xcr0 = 1;
2591 #if !defined(CONFIG_USER_ONLY)
2592 /* We hard-wire the BSP to the first CPU. */
2593 if (s->cpu_index == 0) {
2594 apic_designate_bsp(cpu->apic_state);
2597 s->halted = !cpu_is_bsp(cpu);
2599 if (kvm_enabled()) {
2600 kvm_arch_reset_vcpu(cpu);
2602 #endif
2605 #ifndef CONFIG_USER_ONLY
2606 bool cpu_is_bsp(X86CPU *cpu)
2608 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2611 /* TODO: remove me, when reset over QOM tree is implemented */
2612 static void x86_cpu_machine_reset_cb(void *opaque)
2614 X86CPU *cpu = opaque;
2615 cpu_reset(CPU(cpu));
2617 #endif
2619 static void mce_init(X86CPU *cpu)
2621 CPUX86State *cenv = &cpu->env;
2622 unsigned int bank;
2624 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2625 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2626 (CPUID_MCE | CPUID_MCA)) {
2627 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2628 cenv->mcg_ctl = ~(uint64_t)0;
2629 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2630 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2635 #ifndef CONFIG_USER_ONLY
2636 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2638 CPUX86State *env = &cpu->env;
2639 DeviceState *dev = DEVICE(cpu);
2640 APICCommonState *apic;
2641 const char *apic_type = "apic";
2643 if (kvm_irqchip_in_kernel()) {
2644 apic_type = "kvm-apic";
2645 } else if (xen_enabled()) {
2646 apic_type = "xen-apic";
2649 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2650 if (cpu->apic_state == NULL) {
2651 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2652 return;
2655 object_property_add_child(OBJECT(cpu), "apic",
2656 OBJECT(cpu->apic_state), NULL);
2657 qdev_prop_set_uint8(cpu->apic_state, "id", env->cpuid_apic_id);
2658 /* TODO: convert to link<> */
2659 apic = APIC_COMMON(cpu->apic_state);
2660 apic->cpu = cpu;
2663 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2665 if (cpu->apic_state == NULL) {
2666 return;
2669 if (qdev_init(cpu->apic_state)) {
2670 error_setg(errp, "APIC device '%s' could not be initialized",
2671 object_get_typename(OBJECT(cpu->apic_state)));
2672 return;
2675 #else
2676 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2679 #endif
2681 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2683 CPUState *cs = CPU(dev);
2684 X86CPU *cpu = X86_CPU(dev);
2685 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2686 CPUX86State *env = &cpu->env;
2687 Error *local_err = NULL;
2689 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2690 env->cpuid_level = 7;
2693 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2694 * CPUID[1].EDX.
2696 if (env->cpuid_vendor1 == CPUID_VENDOR_AMD_1 &&
2697 env->cpuid_vendor2 == CPUID_VENDOR_AMD_2 &&
2698 env->cpuid_vendor3 == CPUID_VENDOR_AMD_3) {
2699 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2700 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2701 & CPUID_EXT2_AMD_ALIASES);
2705 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2706 error_setg(&local_err,
2707 kvm_enabled() ?
2708 "Host doesn't support requested features" :
2709 "TCG doesn't support requested features");
2710 goto out;
2713 #ifndef CONFIG_USER_ONLY
2714 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2716 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2717 x86_cpu_apic_create(cpu, &local_err);
2718 if (local_err != NULL) {
2719 goto out;
2722 #endif
2724 mce_init(cpu);
2725 qemu_init_vcpu(cs);
2727 x86_cpu_apic_realize(cpu, &local_err);
2728 if (local_err != NULL) {
2729 goto out;
2731 cpu_reset(cs);
2733 xcc->parent_realize(dev, &local_err);
2734 out:
2735 if (local_err != NULL) {
2736 error_propagate(errp, local_err);
2737 return;
2741 /* Enables contiguous-apic-ID mode, for compatibility */
2742 static bool compat_apic_id_mode;
2744 void enable_compat_apic_id_mode(void)
2746 compat_apic_id_mode = true;
2749 /* Calculates initial APIC ID for a specific CPU index
2751 * Currently we need to be able to calculate the APIC ID from the CPU index
2752 * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have
2753 * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of
2754 * all CPUs up to max_cpus.
2756 uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index)
2758 uint32_t correct_id;
2759 static bool warned;
2761 correct_id = x86_apicid_from_cpu_idx(smp_cores, smp_threads, cpu_index);
2762 if (compat_apic_id_mode) {
2763 if (cpu_index != correct_id && !warned) {
2764 error_report("APIC IDs set in compatibility mode, "
2765 "CPU topology won't match the configuration");
2766 warned = true;
2768 return cpu_index;
2769 } else {
2770 return correct_id;
2774 static void x86_cpu_initfn(Object *obj)
2776 CPUState *cs = CPU(obj);
2777 X86CPU *cpu = X86_CPU(obj);
2778 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
2779 CPUX86State *env = &cpu->env;
2780 static int inited;
2782 cs->env_ptr = env;
2783 cpu_exec_init(env);
2785 object_property_add(obj, "family", "int",
2786 x86_cpuid_version_get_family,
2787 x86_cpuid_version_set_family, NULL, NULL, NULL);
2788 object_property_add(obj, "model", "int",
2789 x86_cpuid_version_get_model,
2790 x86_cpuid_version_set_model, NULL, NULL, NULL);
2791 object_property_add(obj, "stepping", "int",
2792 x86_cpuid_version_get_stepping,
2793 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
2794 object_property_add(obj, "level", "int",
2795 x86_cpuid_get_level,
2796 x86_cpuid_set_level, NULL, NULL, NULL);
2797 object_property_add(obj, "xlevel", "int",
2798 x86_cpuid_get_xlevel,
2799 x86_cpuid_set_xlevel, NULL, NULL, NULL);
2800 object_property_add_str(obj, "vendor",
2801 x86_cpuid_get_vendor,
2802 x86_cpuid_set_vendor, NULL);
2803 object_property_add_str(obj, "model-id",
2804 x86_cpuid_get_model_id,
2805 x86_cpuid_set_model_id, NULL);
2806 object_property_add(obj, "tsc-frequency", "int",
2807 x86_cpuid_get_tsc_freq,
2808 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
2809 object_property_add(obj, "apic-id", "int",
2810 x86_cpuid_get_apic_id,
2811 x86_cpuid_set_apic_id, NULL, NULL, NULL);
2812 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
2813 x86_cpu_get_feature_words,
2814 NULL, NULL, (void *)env->features, NULL);
2815 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
2816 x86_cpu_get_feature_words,
2817 NULL, NULL, (void *)cpu->filtered_features, NULL);
2819 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
2820 env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index);
2822 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
2824 /* init various static tables used in TCG mode */
2825 if (tcg_enabled() && !inited) {
2826 inited = 1;
2827 optimize_flags_init();
2828 #ifndef CONFIG_USER_ONLY
2829 cpu_set_debug_excp_handler(breakpoint_handler);
2830 #endif
2834 static int64_t x86_cpu_get_arch_id(CPUState *cs)
2836 X86CPU *cpu = X86_CPU(cs);
2837 CPUX86State *env = &cpu->env;
2839 return env->cpuid_apic_id;
2842 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
2844 X86CPU *cpu = X86_CPU(cs);
2846 return cpu->env.cr[0] & CR0_PG_MASK;
2849 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
2851 X86CPU *cpu = X86_CPU(cs);
2853 cpu->env.eip = value;
2856 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
2858 X86CPU *cpu = X86_CPU(cs);
2860 cpu->env.eip = tb->pc - tb->cs_base;
2863 static bool x86_cpu_has_work(CPUState *cs)
2865 X86CPU *cpu = X86_CPU(cs);
2866 CPUX86State *env = &cpu->env;
2868 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
2869 CPU_INTERRUPT_POLL)) &&
2870 (env->eflags & IF_MASK)) ||
2871 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
2872 CPU_INTERRUPT_INIT |
2873 CPU_INTERRUPT_SIPI |
2874 CPU_INTERRUPT_MCE));
2877 static Property x86_cpu_properties[] = {
2878 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
2879 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
2880 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
2881 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
2882 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
2883 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
2884 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
2885 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
2886 DEFINE_PROP_END_OF_LIST()
2889 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
2891 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2892 CPUClass *cc = CPU_CLASS(oc);
2893 DeviceClass *dc = DEVICE_CLASS(oc);
2895 xcc->parent_realize = dc->realize;
2896 dc->realize = x86_cpu_realizefn;
2897 dc->bus_type = TYPE_ICC_BUS;
2898 dc->props = x86_cpu_properties;
2900 xcc->parent_reset = cc->reset;
2901 cc->reset = x86_cpu_reset;
2902 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
2904 cc->class_by_name = x86_cpu_class_by_name;
2905 cc->parse_features = x86_cpu_parse_featurestr;
2906 cc->has_work = x86_cpu_has_work;
2907 cc->do_interrupt = x86_cpu_do_interrupt;
2908 cc->dump_state = x86_cpu_dump_state;
2909 cc->set_pc = x86_cpu_set_pc;
2910 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
2911 cc->gdb_read_register = x86_cpu_gdb_read_register;
2912 cc->gdb_write_register = x86_cpu_gdb_write_register;
2913 cc->get_arch_id = x86_cpu_get_arch_id;
2914 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
2915 #ifdef CONFIG_USER_ONLY
2916 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
2917 #else
2918 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
2919 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
2920 cc->write_elf64_note = x86_cpu_write_elf64_note;
2921 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
2922 cc->write_elf32_note = x86_cpu_write_elf32_note;
2923 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
2924 cc->vmsd = &vmstate_x86_cpu;
2925 #endif
2926 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
2929 static const TypeInfo x86_cpu_type_info = {
2930 .name = TYPE_X86_CPU,
2931 .parent = TYPE_CPU,
2932 .instance_size = sizeof(X86CPU),
2933 .instance_init = x86_cpu_initfn,
2934 .abstract = true,
2935 .class_size = sizeof(X86CPUClass),
2936 .class_init = x86_cpu_common_class_init,
2939 static void x86_cpu_register_types(void)
2941 int i;
2943 type_register_static(&x86_cpu_type_info);
2944 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2945 x86_register_cpudef_type(&builtin_x86_defs[i]);
2947 #ifdef CONFIG_KVM
2948 type_register_static(&host_x86_cpu_type_info);
2949 #endif
2952 type_init(x86_cpu_register_types)