mirror: fix early wake from sleep due to aio
[qemu.git] / target-i386 / cpu.c
blobe7e62c5897cf3658aaae99c455d3c881edafba47
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "topology.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
38 #include "hw/hw.h"
39 #if defined(CONFIG_KVM)
40 #include <linux/kvm_para.h>
41 #endif
43 #include "sysemu/sysemu.h"
44 #include "hw/qdev-properties.h"
45 #include "hw/cpu/icc_bus.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "hw/xen/xen.h"
48 #include "hw/i386/apic_internal.h"
49 #endif
52 /* Cache topology CPUID constants: */
54 /* CPUID Leaf 2 Descriptors */
56 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
57 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
58 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
61 /* CPUID Leaf 4 constants: */
63 /* EAX: */
64 #define CPUID_4_TYPE_DCACHE 1
65 #define CPUID_4_TYPE_ICACHE 2
66 #define CPUID_4_TYPE_UNIFIED 3
68 #define CPUID_4_LEVEL(l) ((l) << 5)
70 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
71 #define CPUID_4_FULLY_ASSOC (1 << 9)
73 /* EDX: */
74 #define CPUID_4_NO_INVD_SHARING (1 << 0)
75 #define CPUID_4_INCLUSIVE (1 << 1)
76 #define CPUID_4_COMPLEX_IDX (1 << 2)
78 #define ASSOC_FULL 0xFF
80 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
81 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
82 a == 2 ? 0x2 : \
83 a == 4 ? 0x4 : \
84 a == 8 ? 0x6 : \
85 a == 16 ? 0x8 : \
86 a == 32 ? 0xA : \
87 a == 48 ? 0xB : \
88 a == 64 ? 0xC : \
89 a == 96 ? 0xD : \
90 a == 128 ? 0xE : \
91 a == ASSOC_FULL ? 0xF : \
92 0 /* invalid value */)
95 /* Definitions of the hardcoded cache entries we expose: */
97 /* L1 data cache: */
98 #define L1D_LINE_SIZE 64
99 #define L1D_ASSOCIATIVITY 8
100 #define L1D_SETS 64
101 #define L1D_PARTITIONS 1
102 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
103 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
104 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
105 #define L1D_LINES_PER_TAG 1
106 #define L1D_SIZE_KB_AMD 64
107 #define L1D_ASSOCIATIVITY_AMD 2
109 /* L1 instruction cache: */
110 #define L1I_LINE_SIZE 64
111 #define L1I_ASSOCIATIVITY 8
112 #define L1I_SETS 64
113 #define L1I_PARTITIONS 1
114 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
115 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
116 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
117 #define L1I_LINES_PER_TAG 1
118 #define L1I_SIZE_KB_AMD 64
119 #define L1I_ASSOCIATIVITY_AMD 2
121 /* Level 2 unified cache: */
122 #define L2_LINE_SIZE 64
123 #define L2_ASSOCIATIVITY 16
124 #define L2_SETS 4096
125 #define L2_PARTITIONS 1
126 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
127 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
128 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
129 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
130 #define L2_LINES_PER_TAG 1
131 #define L2_SIZE_KB_AMD 512
133 /* No L3 cache: */
134 #define L3_SIZE_KB 0 /* disabled */
135 #define L3_ASSOCIATIVITY 0 /* disabled */
136 #define L3_LINES_PER_TAG 0 /* disabled */
137 #define L3_LINE_SIZE 0 /* disabled */
139 /* TLB definitions: */
141 #define L1_DTLB_2M_ASSOC 1
142 #define L1_DTLB_2M_ENTRIES 255
143 #define L1_DTLB_4K_ASSOC 1
144 #define L1_DTLB_4K_ENTRIES 255
146 #define L1_ITLB_2M_ASSOC 1
147 #define L1_ITLB_2M_ENTRIES 255
148 #define L1_ITLB_4K_ASSOC 1
149 #define L1_ITLB_4K_ENTRIES 255
151 #define L2_DTLB_2M_ASSOC 0 /* disabled */
152 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
153 #define L2_DTLB_4K_ASSOC 4
154 #define L2_DTLB_4K_ENTRIES 512
156 #define L2_ITLB_2M_ASSOC 0 /* disabled */
157 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
158 #define L2_ITLB_4K_ASSOC 4
159 #define L2_ITLB_4K_ENTRIES 512
163 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
164 uint32_t vendor2, uint32_t vendor3)
166 int i;
167 for (i = 0; i < 4; i++) {
168 dst[i] = vendor1 >> (8 * i);
169 dst[i + 4] = vendor2 >> (8 * i);
170 dst[i + 8] = vendor3 >> (8 * i);
172 dst[CPUID_VENDOR_SZ] = '\0';
175 /* feature flags taken from "Intel Processor Identification and the CPUID
176 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
177 * between feature naming conventions, aliases may be added.
179 static const char *feature_name[] = {
180 "fpu", "vme", "de", "pse",
181 "tsc", "msr", "pae", "mce",
182 "cx8", "apic", NULL, "sep",
183 "mtrr", "pge", "mca", "cmov",
184 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
185 NULL, "ds" /* Intel dts */, "acpi", "mmx",
186 "fxsr", "sse", "sse2", "ss",
187 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 static const char *ext_feature_name[] = {
190 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
191 "ds_cpl", "vmx", "smx", "est",
192 "tm2", "ssse3", "cid", NULL,
193 "fma", "cx16", "xtpr", "pdcm",
194 NULL, "pcid", "dca", "sse4.1|sse4_1",
195 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
196 "tsc-deadline", "aes", "xsave", "osxsave",
197 "avx", "f16c", "rdrand", "hypervisor",
199 /* Feature names that are already defined on feature_name[] but are set on
200 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
201 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
202 * if and only if CPU vendor is AMD.
204 static const char *ext2_feature_name[] = {
205 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
206 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
207 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
208 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
209 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
210 "nx|xd", NULL, "mmxext", NULL /* mmx */,
211 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
212 NULL, "lm|i64", "3dnowext", "3dnow",
214 static const char *ext3_feature_name[] = {
215 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
216 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
217 "3dnowprefetch", "osvw", "ibs", "xop",
218 "skinit", "wdt", NULL, "lwp",
219 "fma4", "tce", NULL, "nodeid_msr",
220 NULL, "tbm", "topoext", "perfctr_core",
221 "perfctr_nb", NULL, NULL, NULL,
222 NULL, NULL, NULL, NULL,
225 static const char *ext4_feature_name[] = {
226 NULL, NULL, "xstore", "xstore-en",
227 NULL, NULL, "xcrypt", "xcrypt-en",
228 "ace2", "ace2-en", "phe", "phe-en",
229 "pmm", "pmm-en", NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
236 static const char *kvm_feature_name[] = {
237 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
238 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
247 static const char *svm_feature_name[] = {
248 "npt", "lbrv", "svm_lock", "nrip_save",
249 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
250 NULL, NULL, "pause_filter", NULL,
251 "pfthreshold", NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
258 static const char *cpuid_7_0_ebx_feature_name[] = {
259 "fsgsbase", NULL, NULL, "bmi1", "hle", "avx2", NULL, "smep",
260 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, NULL, NULL,
261 NULL, NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
262 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
265 typedef struct FeatureWordInfo {
266 const char **feat_names;
267 uint32_t cpuid_eax; /* Input EAX for CPUID */
268 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
269 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
270 int cpuid_reg; /* output register (R_* constant) */
271 } FeatureWordInfo;
273 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
274 [FEAT_1_EDX] = {
275 .feat_names = feature_name,
276 .cpuid_eax = 1, .cpuid_reg = R_EDX,
278 [FEAT_1_ECX] = {
279 .feat_names = ext_feature_name,
280 .cpuid_eax = 1, .cpuid_reg = R_ECX,
282 [FEAT_8000_0001_EDX] = {
283 .feat_names = ext2_feature_name,
284 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
286 [FEAT_8000_0001_ECX] = {
287 .feat_names = ext3_feature_name,
288 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
290 [FEAT_C000_0001_EDX] = {
291 .feat_names = ext4_feature_name,
292 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
294 [FEAT_KVM] = {
295 .feat_names = kvm_feature_name,
296 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
298 [FEAT_SVM] = {
299 .feat_names = svm_feature_name,
300 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
302 [FEAT_7_0_EBX] = {
303 .feat_names = cpuid_7_0_ebx_feature_name,
304 .cpuid_eax = 7,
305 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
306 .cpuid_reg = R_EBX,
310 typedef struct X86RegisterInfo32 {
311 /* Name of register */
312 const char *name;
313 /* QAPI enum value register */
314 X86CPURegister32 qapi_enum;
315 } X86RegisterInfo32;
317 #define REGISTER(reg) \
318 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
319 X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
320 REGISTER(EAX),
321 REGISTER(ECX),
322 REGISTER(EDX),
323 REGISTER(EBX),
324 REGISTER(ESP),
325 REGISTER(EBP),
326 REGISTER(ESI),
327 REGISTER(EDI),
329 #undef REGISTER
331 typedef struct ExtSaveArea {
332 uint32_t feature, bits;
333 uint32_t offset, size;
334 } ExtSaveArea;
336 static const ExtSaveArea ext_save_areas[] = {
337 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
338 .offset = 0x240, .size = 0x100 },
339 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
340 .offset = 0x3c0, .size = 0x40 },
341 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
342 .offset = 0x400, .size = 0x40 },
345 const char *get_register_name_32(unsigned int reg)
347 if (reg >= CPU_NB_REGS32) {
348 return NULL;
350 return x86_reg_info_32[reg].name;
353 /* collects per-function cpuid data
355 typedef struct model_features_t {
356 uint32_t *guest_feat;
357 uint32_t *host_feat;
358 FeatureWord feat_word;
359 } model_features_t;
361 /* KVM-specific features that are automatically added to all CPU models
362 * when KVM is enabled.
364 static uint32_t kvm_default_features[FEATURE_WORDS] = {
365 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
366 (1 << KVM_FEATURE_NOP_IO_DELAY) |
367 (1 << KVM_FEATURE_CLOCKSOURCE2) |
368 (1 << KVM_FEATURE_ASYNC_PF) |
369 (1 << KVM_FEATURE_STEAL_TIME) |
370 (1 << KVM_FEATURE_PV_EOI) |
371 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
372 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
375 void x86_cpu_compat_disable_kvm_features(FeatureWord w, uint32_t features)
377 kvm_default_features[w] &= ~features;
380 void host_cpuid(uint32_t function, uint32_t count,
381 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
383 uint32_t vec[4];
385 #ifdef __x86_64__
386 asm volatile("cpuid"
387 : "=a"(vec[0]), "=b"(vec[1]),
388 "=c"(vec[2]), "=d"(vec[3])
389 : "0"(function), "c"(count) : "cc");
390 #elif defined(__i386__)
391 asm volatile("pusha \n\t"
392 "cpuid \n\t"
393 "mov %%eax, 0(%2) \n\t"
394 "mov %%ebx, 4(%2) \n\t"
395 "mov %%ecx, 8(%2) \n\t"
396 "mov %%edx, 12(%2) \n\t"
397 "popa"
398 : : "a"(function), "c"(count), "S"(vec)
399 : "memory", "cc");
400 #else
401 abort();
402 #endif
404 if (eax)
405 *eax = vec[0];
406 if (ebx)
407 *ebx = vec[1];
408 if (ecx)
409 *ecx = vec[2];
410 if (edx)
411 *edx = vec[3];
414 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
416 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
417 * a substring. ex if !NULL points to the first char after a substring,
418 * otherwise the string is assumed to sized by a terminating nul.
419 * Return lexical ordering of *s1:*s2.
421 static int sstrcmp(const char *s1, const char *e1, const char *s2,
422 const char *e2)
424 for (;;) {
425 if (!*s1 || !*s2 || *s1 != *s2)
426 return (*s1 - *s2);
427 ++s1, ++s2;
428 if (s1 == e1 && s2 == e2)
429 return (0);
430 else if (s1 == e1)
431 return (*s2);
432 else if (s2 == e2)
433 return (*s1);
437 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
438 * '|' delimited (possibly empty) strings in which case search for a match
439 * within the alternatives proceeds left to right. Return 0 for success,
440 * non-zero otherwise.
442 static int altcmp(const char *s, const char *e, const char *altstr)
444 const char *p, *q;
446 for (q = p = altstr; ; ) {
447 while (*p && *p != '|')
448 ++p;
449 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
450 return (0);
451 if (!*p)
452 return (1);
453 else
454 q = ++p;
458 /* search featureset for flag *[s..e), if found set corresponding bit in
459 * *pval and return true, otherwise return false
461 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
462 const char **featureset)
464 uint32_t mask;
465 const char **ppc;
466 bool found = false;
468 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
469 if (*ppc && !altcmp(s, e, *ppc)) {
470 *pval |= mask;
471 found = true;
474 return found;
477 static void add_flagname_to_bitmaps(const char *flagname,
478 FeatureWordArray words)
480 FeatureWord w;
481 for (w = 0; w < FEATURE_WORDS; w++) {
482 FeatureWordInfo *wi = &feature_word_info[w];
483 if (wi->feat_names &&
484 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
485 break;
488 if (w == FEATURE_WORDS) {
489 fprintf(stderr, "CPU feature %s not found\n", flagname);
493 /* CPU class name definitions: */
495 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
496 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
498 /* Return type name for a given CPU model name
499 * Caller is responsible for freeing the returned string.
501 static char *x86_cpu_type_name(const char *model_name)
503 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
506 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
508 ObjectClass *oc;
509 char *typename;
511 if (cpu_model == NULL) {
512 return NULL;
515 typename = x86_cpu_type_name(cpu_model);
516 oc = object_class_by_name(typename);
517 g_free(typename);
518 return oc;
521 struct X86CPUDefinition {
522 const char *name;
523 uint32_t level;
524 uint32_t xlevel;
525 uint32_t xlevel2;
526 /* vendor is zero-terminated, 12 character ASCII string */
527 char vendor[CPUID_VENDOR_SZ + 1];
528 int family;
529 int model;
530 int stepping;
531 FeatureWordArray features;
532 char model_id[48];
533 bool cache_info_passthrough;
536 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
537 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
538 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
539 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
540 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
541 CPUID_PSE36 | CPUID_FXSR)
542 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
543 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
544 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
545 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
546 CPUID_PAE | CPUID_SEP | CPUID_APIC)
548 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
549 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
550 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
551 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
552 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
553 /* partly implemented:
554 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64)
555 CPUID_PSE36 (needed for Solaris) */
556 /* missing:
557 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
558 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
559 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
560 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
561 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
562 /* missing:
563 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
564 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
565 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
566 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
567 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
568 CPUID_EXT_RDRAND */
569 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
570 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
571 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT)
572 /* missing:
573 CPUID_EXT2_PDPE1GB */
574 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
575 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
576 #define TCG_SVM_FEATURES 0
577 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP \
578 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
579 /* missing:
580 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
581 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
582 CPUID_7_0_EBX_RDSEED */
584 static X86CPUDefinition builtin_x86_defs[] = {
586 .name = "qemu64",
587 .level = 4,
588 .vendor = CPUID_VENDOR_AMD,
589 .family = 6,
590 .model = 6,
591 .stepping = 3,
592 .features[FEAT_1_EDX] =
593 PPRO_FEATURES |
594 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
595 CPUID_PSE36,
596 .features[FEAT_1_ECX] =
597 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
598 .features[FEAT_8000_0001_EDX] =
599 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
600 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
601 .features[FEAT_8000_0001_ECX] =
602 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
603 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
604 .xlevel = 0x8000000A,
607 .name = "phenom",
608 .level = 5,
609 .vendor = CPUID_VENDOR_AMD,
610 .family = 16,
611 .model = 2,
612 .stepping = 3,
613 .features[FEAT_1_EDX] =
614 PPRO_FEATURES |
615 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
616 CPUID_PSE36 | CPUID_VME | CPUID_HT,
617 .features[FEAT_1_ECX] =
618 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
619 CPUID_EXT_POPCNT,
620 .features[FEAT_8000_0001_EDX] =
621 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
622 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
623 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
624 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
625 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
626 CPUID_EXT3_CR8LEG,
627 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
628 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
629 .features[FEAT_8000_0001_ECX] =
630 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
631 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
632 .features[FEAT_SVM] =
633 CPUID_SVM_NPT | CPUID_SVM_LBRV,
634 .xlevel = 0x8000001A,
635 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
638 .name = "core2duo",
639 .level = 10,
640 .vendor = CPUID_VENDOR_INTEL,
641 .family = 6,
642 .model = 15,
643 .stepping = 11,
644 .features[FEAT_1_EDX] =
645 PPRO_FEATURES |
646 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
647 CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |
648 CPUID_HT | CPUID_TM | CPUID_PBE,
649 .features[FEAT_1_ECX] =
650 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
651 CPUID_EXT_DTES64 | CPUID_EXT_DSCPL | CPUID_EXT_VMX | CPUID_EXT_EST |
652 CPUID_EXT_TM2 | CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
653 .features[FEAT_8000_0001_EDX] =
654 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
655 .features[FEAT_8000_0001_ECX] =
656 CPUID_EXT3_LAHF_LM,
657 .xlevel = 0x80000008,
658 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
661 .name = "kvm64",
662 .level = 5,
663 .vendor = CPUID_VENDOR_INTEL,
664 .family = 15,
665 .model = 6,
666 .stepping = 1,
667 /* Missing: CPUID_VME, CPUID_HT */
668 .features[FEAT_1_EDX] =
669 PPRO_FEATURES |
670 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
671 CPUID_PSE36,
672 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
673 .features[FEAT_1_ECX] =
674 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
675 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
676 .features[FEAT_8000_0001_EDX] =
677 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
678 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
679 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
680 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
681 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
682 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
683 .features[FEAT_8000_0001_ECX] =
685 .xlevel = 0x80000008,
686 .model_id = "Common KVM processor"
689 .name = "qemu32",
690 .level = 4,
691 .vendor = CPUID_VENDOR_INTEL,
692 .family = 6,
693 .model = 6,
694 .stepping = 3,
695 .features[FEAT_1_EDX] =
696 PPRO_FEATURES,
697 .features[FEAT_1_ECX] =
698 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
699 .xlevel = 0x80000004,
702 .name = "kvm32",
703 .level = 5,
704 .vendor = CPUID_VENDOR_INTEL,
705 .family = 15,
706 .model = 6,
707 .stepping = 1,
708 .features[FEAT_1_EDX] =
709 PPRO_FEATURES |
710 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
711 .features[FEAT_1_ECX] =
712 CPUID_EXT_SSE3,
713 .features[FEAT_8000_0001_EDX] =
714 PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
715 .features[FEAT_8000_0001_ECX] =
717 .xlevel = 0x80000008,
718 .model_id = "Common 32-bit KVM processor"
721 .name = "coreduo",
722 .level = 10,
723 .vendor = CPUID_VENDOR_INTEL,
724 .family = 6,
725 .model = 14,
726 .stepping = 8,
727 .features[FEAT_1_EDX] =
728 PPRO_FEATURES | CPUID_VME |
729 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_DTS | CPUID_ACPI |
730 CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
731 .features[FEAT_1_ECX] =
732 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_VMX |
733 CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
734 .features[FEAT_8000_0001_EDX] =
735 CPUID_EXT2_NX,
736 .xlevel = 0x80000008,
737 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
740 .name = "486",
741 .level = 1,
742 .vendor = CPUID_VENDOR_INTEL,
743 .family = 4,
744 .model = 8,
745 .stepping = 0,
746 .features[FEAT_1_EDX] =
747 I486_FEATURES,
748 .xlevel = 0,
751 .name = "pentium",
752 .level = 1,
753 .vendor = CPUID_VENDOR_INTEL,
754 .family = 5,
755 .model = 4,
756 .stepping = 3,
757 .features[FEAT_1_EDX] =
758 PENTIUM_FEATURES,
759 .xlevel = 0,
762 .name = "pentium2",
763 .level = 2,
764 .vendor = CPUID_VENDOR_INTEL,
765 .family = 6,
766 .model = 5,
767 .stepping = 2,
768 .features[FEAT_1_EDX] =
769 PENTIUM2_FEATURES,
770 .xlevel = 0,
773 .name = "pentium3",
774 .level = 2,
775 .vendor = CPUID_VENDOR_INTEL,
776 .family = 6,
777 .model = 7,
778 .stepping = 3,
779 .features[FEAT_1_EDX] =
780 PENTIUM3_FEATURES,
781 .xlevel = 0,
784 .name = "athlon",
785 .level = 2,
786 .vendor = CPUID_VENDOR_AMD,
787 .family = 6,
788 .model = 2,
789 .stepping = 3,
790 .features[FEAT_1_EDX] =
791 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
792 CPUID_MCA,
793 .features[FEAT_8000_0001_EDX] =
794 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
795 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
796 .xlevel = 0x80000008,
799 .name = "n270",
800 /* original is on level 10 */
801 .level = 5,
802 .vendor = CPUID_VENDOR_INTEL,
803 .family = 6,
804 .model = 28,
805 .stepping = 2,
806 .features[FEAT_1_EDX] =
807 PPRO_FEATURES |
808 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | CPUID_DTS |
809 CPUID_ACPI | CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
810 /* Some CPUs got no CPUID_SEP */
811 .features[FEAT_1_ECX] =
812 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
813 CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR |
814 CPUID_EXT_MOVBE,
815 .features[FEAT_8000_0001_EDX] =
816 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
817 CPUID_EXT2_NX,
818 .features[FEAT_8000_0001_ECX] =
819 CPUID_EXT3_LAHF_LM,
820 .xlevel = 0x8000000A,
821 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
824 .name = "Conroe",
825 .level = 4,
826 .vendor = CPUID_VENDOR_INTEL,
827 .family = 6,
828 .model = 15,
829 .stepping = 3,
830 .features[FEAT_1_EDX] =
831 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
832 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
833 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
834 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
835 CPUID_DE | CPUID_FP87,
836 .features[FEAT_1_ECX] =
837 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
838 .features[FEAT_8000_0001_EDX] =
839 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
840 .features[FEAT_8000_0001_ECX] =
841 CPUID_EXT3_LAHF_LM,
842 .xlevel = 0x8000000A,
843 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
846 .name = "Penryn",
847 .level = 4,
848 .vendor = CPUID_VENDOR_INTEL,
849 .family = 6,
850 .model = 23,
851 .stepping = 3,
852 .features[FEAT_1_EDX] =
853 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
854 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
855 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
856 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
857 CPUID_DE | CPUID_FP87,
858 .features[FEAT_1_ECX] =
859 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
860 CPUID_EXT_SSE3,
861 .features[FEAT_8000_0001_EDX] =
862 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
863 .features[FEAT_8000_0001_ECX] =
864 CPUID_EXT3_LAHF_LM,
865 .xlevel = 0x8000000A,
866 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
869 .name = "Nehalem",
870 .level = 4,
871 .vendor = CPUID_VENDOR_INTEL,
872 .family = 6,
873 .model = 26,
874 .stepping = 3,
875 .features[FEAT_1_EDX] =
876 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
877 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
878 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
879 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
880 CPUID_DE | CPUID_FP87,
881 .features[FEAT_1_ECX] =
882 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
883 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
884 .features[FEAT_8000_0001_EDX] =
885 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
886 .features[FEAT_8000_0001_ECX] =
887 CPUID_EXT3_LAHF_LM,
888 .xlevel = 0x8000000A,
889 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
892 .name = "Westmere",
893 .level = 11,
894 .vendor = CPUID_VENDOR_INTEL,
895 .family = 6,
896 .model = 44,
897 .stepping = 1,
898 .features[FEAT_1_EDX] =
899 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
900 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
901 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
902 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
903 CPUID_DE | CPUID_FP87,
904 .features[FEAT_1_ECX] =
905 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
906 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
907 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
908 .features[FEAT_8000_0001_EDX] =
909 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
910 .features[FEAT_8000_0001_ECX] =
911 CPUID_EXT3_LAHF_LM,
912 .xlevel = 0x8000000A,
913 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
916 .name = "SandyBridge",
917 .level = 0xd,
918 .vendor = CPUID_VENDOR_INTEL,
919 .family = 6,
920 .model = 42,
921 .stepping = 1,
922 .features[FEAT_1_EDX] =
923 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
924 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
925 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
926 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
927 CPUID_DE | CPUID_FP87,
928 .features[FEAT_1_ECX] =
929 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
930 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
931 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
932 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
933 CPUID_EXT_SSE3,
934 .features[FEAT_8000_0001_EDX] =
935 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
936 CPUID_EXT2_SYSCALL,
937 .features[FEAT_8000_0001_ECX] =
938 CPUID_EXT3_LAHF_LM,
939 .xlevel = 0x8000000A,
940 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
943 .name = "Haswell",
944 .level = 0xd,
945 .vendor = CPUID_VENDOR_INTEL,
946 .family = 6,
947 .model = 60,
948 .stepping = 1,
949 .features[FEAT_1_EDX] =
950 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
951 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
952 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
953 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
954 CPUID_DE | CPUID_FP87,
955 .features[FEAT_1_ECX] =
956 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
957 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
958 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
959 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
960 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
961 CPUID_EXT_PCID,
962 .features[FEAT_8000_0001_EDX] =
963 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
964 CPUID_EXT2_SYSCALL,
965 .features[FEAT_8000_0001_ECX] =
966 CPUID_EXT3_LAHF_LM,
967 .features[FEAT_7_0_EBX] =
968 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
969 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
970 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
971 CPUID_7_0_EBX_RTM,
972 .xlevel = 0x8000000A,
973 .model_id = "Intel Core Processor (Haswell)",
976 .name = "Opteron_G1",
977 .level = 5,
978 .vendor = CPUID_VENDOR_AMD,
979 .family = 15,
980 .model = 6,
981 .stepping = 1,
982 .features[FEAT_1_EDX] =
983 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
984 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
985 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
986 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
987 CPUID_DE | CPUID_FP87,
988 .features[FEAT_1_ECX] =
989 CPUID_EXT_SSE3,
990 .features[FEAT_8000_0001_EDX] =
991 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
992 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
993 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
994 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
995 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
996 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
997 .xlevel = 0x80000008,
998 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1001 .name = "Opteron_G2",
1002 .level = 5,
1003 .vendor = CPUID_VENDOR_AMD,
1004 .family = 15,
1005 .model = 6,
1006 .stepping = 1,
1007 .features[FEAT_1_EDX] =
1008 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1009 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1010 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1011 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1012 CPUID_DE | CPUID_FP87,
1013 .features[FEAT_1_ECX] =
1014 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1015 .features[FEAT_8000_0001_EDX] =
1016 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1017 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1018 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1019 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1020 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1021 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1022 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1023 .features[FEAT_8000_0001_ECX] =
1024 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1025 .xlevel = 0x80000008,
1026 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1029 .name = "Opteron_G3",
1030 .level = 5,
1031 .vendor = CPUID_VENDOR_AMD,
1032 .family = 15,
1033 .model = 6,
1034 .stepping = 1,
1035 .features[FEAT_1_EDX] =
1036 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1037 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1038 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1039 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1040 CPUID_DE | CPUID_FP87,
1041 .features[FEAT_1_ECX] =
1042 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1043 CPUID_EXT_SSE3,
1044 .features[FEAT_8000_0001_EDX] =
1045 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1046 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1047 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1048 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1049 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1050 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1051 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1052 .features[FEAT_8000_0001_ECX] =
1053 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1054 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1055 .xlevel = 0x80000008,
1056 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1059 .name = "Opteron_G4",
1060 .level = 0xd,
1061 .vendor = CPUID_VENDOR_AMD,
1062 .family = 21,
1063 .model = 1,
1064 .stepping = 2,
1065 .features[FEAT_1_EDX] =
1066 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1067 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1068 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1069 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1070 CPUID_DE | CPUID_FP87,
1071 .features[FEAT_1_ECX] =
1072 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1073 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1074 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1075 CPUID_EXT_SSE3,
1076 .features[FEAT_8000_0001_EDX] =
1077 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1078 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1079 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1080 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1081 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1082 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1083 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1084 .features[FEAT_8000_0001_ECX] =
1085 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1086 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1087 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1088 CPUID_EXT3_LAHF_LM,
1089 .xlevel = 0x8000001A,
1090 .model_id = "AMD Opteron 62xx class CPU",
1093 .name = "Opteron_G5",
1094 .level = 0xd,
1095 .vendor = CPUID_VENDOR_AMD,
1096 .family = 21,
1097 .model = 2,
1098 .stepping = 0,
1099 .features[FEAT_1_EDX] =
1100 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1101 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1102 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1103 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1104 CPUID_DE | CPUID_FP87,
1105 .features[FEAT_1_ECX] =
1106 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1107 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1108 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1109 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1110 .features[FEAT_8000_0001_EDX] =
1111 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1112 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1113 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1114 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1115 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1116 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1117 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1118 .features[FEAT_8000_0001_ECX] =
1119 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1120 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1121 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1122 CPUID_EXT3_LAHF_LM,
1123 .xlevel = 0x8000001A,
1124 .model_id = "AMD Opteron 63xx class CPU",
1129 * x86_cpu_compat_set_features:
1130 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1131 * @w: Identifies the feature word to be changed.
1132 * @feat_add: Feature bits to be added to feature word
1133 * @feat_remove: Feature bits to be removed from feature word
1135 * Change CPU model feature bits for compatibility.
1137 * This function may be used by machine-type compatibility functions
1138 * to enable or disable feature bits on specific CPU models.
1140 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1141 uint32_t feat_add, uint32_t feat_remove)
1143 X86CPUDefinition *def;
1144 int i;
1145 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1146 def = &builtin_x86_defs[i];
1147 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1148 def->features[w] |= feat_add;
1149 def->features[w] &= ~feat_remove;
1154 #ifdef CONFIG_KVM
1156 static int cpu_x86_fill_model_id(char *str)
1158 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1159 int i;
1161 for (i = 0; i < 3; i++) {
1162 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1163 memcpy(str + i * 16 + 0, &eax, 4);
1164 memcpy(str + i * 16 + 4, &ebx, 4);
1165 memcpy(str + i * 16 + 8, &ecx, 4);
1166 memcpy(str + i * 16 + 12, &edx, 4);
1168 return 0;
1171 static X86CPUDefinition host_cpudef;
1173 /* class_init for the "host" CPU model
1175 * This function may be called before KVM is initialized.
1177 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1179 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1180 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1182 xcc->kvm_required = true;
1184 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1185 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1187 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1188 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1189 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1190 host_cpudef.stepping = eax & 0x0F;
1192 cpu_x86_fill_model_id(host_cpudef.model_id);
1194 xcc->cpu_def = &host_cpudef;
1195 host_cpudef.cache_info_passthrough = true;
1197 /* level, xlevel, xlevel2, and the feature words are initialized on
1198 * instance_init, because they require KVM to be initialized.
1202 static void host_x86_cpu_initfn(Object *obj)
1204 X86CPU *cpu = X86_CPU(obj);
1205 CPUX86State *env = &cpu->env;
1206 KVMState *s = kvm_state;
1207 FeatureWord w;
1209 assert(kvm_enabled());
1211 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1212 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1213 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1215 for (w = 0; w < FEATURE_WORDS; w++) {
1216 FeatureWordInfo *wi = &feature_word_info[w];
1217 env->features[w] =
1218 kvm_arch_get_supported_cpuid(s, wi->cpuid_eax, wi->cpuid_ecx,
1219 wi->cpuid_reg);
1221 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1224 static const TypeInfo host_x86_cpu_type_info = {
1225 .name = X86_CPU_TYPE_NAME("host"),
1226 .parent = TYPE_X86_CPU,
1227 .instance_init = host_x86_cpu_initfn,
1228 .class_init = host_x86_cpu_class_init,
1231 #endif
1233 static int unavailable_host_feature(FeatureWordInfo *f, uint32_t mask)
1235 int i;
1237 for (i = 0; i < 32; ++i)
1238 if (1 << i & mask) {
1239 const char *reg = get_register_name_32(f->cpuid_reg);
1240 assert(reg);
1241 fprintf(stderr, "warning: host doesn't support requested feature: "
1242 "CPUID.%02XH:%s%s%s [bit %d]\n",
1243 f->cpuid_eax, reg,
1244 f->feat_names[i] ? "." : "",
1245 f->feat_names[i] ? f->feat_names[i] : "", i);
1246 break;
1248 return 0;
1251 /* Check if all requested cpu flags are making their way to the guest
1253 * Returns 0 if all flags are supported by the host, non-zero otherwise.
1255 * This function may be called only if KVM is enabled.
1257 static int kvm_check_features_against_host(KVMState *s, X86CPU *cpu)
1259 CPUX86State *env = &cpu->env;
1260 int rv = 0;
1261 FeatureWord w;
1263 assert(kvm_enabled());
1265 for (w = 0; w < FEATURE_WORDS; w++) {
1266 FeatureWordInfo *wi = &feature_word_info[w];
1267 uint32_t guest_feat = env->features[w];
1268 uint32_t host_feat = kvm_arch_get_supported_cpuid(s, wi->cpuid_eax,
1269 wi->cpuid_ecx,
1270 wi->cpuid_reg);
1271 uint32_t mask;
1272 for (mask = 1; mask; mask <<= 1) {
1273 if (guest_feat & mask && !(host_feat & mask)) {
1274 unavailable_host_feature(wi, mask);
1275 rv = 1;
1279 return rv;
1282 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1283 const char *name, Error **errp)
1285 X86CPU *cpu = X86_CPU(obj);
1286 CPUX86State *env = &cpu->env;
1287 int64_t value;
1289 value = (env->cpuid_version >> 8) & 0xf;
1290 if (value == 0xf) {
1291 value += (env->cpuid_version >> 20) & 0xff;
1293 visit_type_int(v, &value, name, errp);
1296 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1297 const char *name, Error **errp)
1299 X86CPU *cpu = X86_CPU(obj);
1300 CPUX86State *env = &cpu->env;
1301 const int64_t min = 0;
1302 const int64_t max = 0xff + 0xf;
1303 int64_t value;
1305 visit_type_int(v, &value, name, errp);
1306 if (error_is_set(errp)) {
1307 return;
1309 if (value < min || value > max) {
1310 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1311 name ? name : "null", value, min, max);
1312 return;
1315 env->cpuid_version &= ~0xff00f00;
1316 if (value > 0x0f) {
1317 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1318 } else {
1319 env->cpuid_version |= value << 8;
1323 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1324 const char *name, Error **errp)
1326 X86CPU *cpu = X86_CPU(obj);
1327 CPUX86State *env = &cpu->env;
1328 int64_t value;
1330 value = (env->cpuid_version >> 4) & 0xf;
1331 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1332 visit_type_int(v, &value, name, errp);
1335 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1336 const char *name, Error **errp)
1338 X86CPU *cpu = X86_CPU(obj);
1339 CPUX86State *env = &cpu->env;
1340 const int64_t min = 0;
1341 const int64_t max = 0xff;
1342 int64_t value;
1344 visit_type_int(v, &value, name, errp);
1345 if (error_is_set(errp)) {
1346 return;
1348 if (value < min || value > max) {
1349 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1350 name ? name : "null", value, min, max);
1351 return;
1354 env->cpuid_version &= ~0xf00f0;
1355 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1358 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1359 void *opaque, const char *name,
1360 Error **errp)
1362 X86CPU *cpu = X86_CPU(obj);
1363 CPUX86State *env = &cpu->env;
1364 int64_t value;
1366 value = env->cpuid_version & 0xf;
1367 visit_type_int(v, &value, name, errp);
1370 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1371 void *opaque, const char *name,
1372 Error **errp)
1374 X86CPU *cpu = X86_CPU(obj);
1375 CPUX86State *env = &cpu->env;
1376 const int64_t min = 0;
1377 const int64_t max = 0xf;
1378 int64_t value;
1380 visit_type_int(v, &value, name, errp);
1381 if (error_is_set(errp)) {
1382 return;
1384 if (value < min || value > max) {
1385 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1386 name ? name : "null", value, min, max);
1387 return;
1390 env->cpuid_version &= ~0xf;
1391 env->cpuid_version |= value & 0xf;
1394 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
1395 const char *name, Error **errp)
1397 X86CPU *cpu = X86_CPU(obj);
1399 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1402 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
1403 const char *name, Error **errp)
1405 X86CPU *cpu = X86_CPU(obj);
1407 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1410 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
1411 const char *name, Error **errp)
1413 X86CPU *cpu = X86_CPU(obj);
1415 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1418 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
1419 const char *name, Error **errp)
1421 X86CPU *cpu = X86_CPU(obj);
1423 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1426 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1428 X86CPU *cpu = X86_CPU(obj);
1429 CPUX86State *env = &cpu->env;
1430 char *value;
1432 value = (char *)g_malloc(CPUID_VENDOR_SZ + 1);
1433 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1434 env->cpuid_vendor3);
1435 return value;
1438 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1439 Error **errp)
1441 X86CPU *cpu = X86_CPU(obj);
1442 CPUX86State *env = &cpu->env;
1443 int i;
1445 if (strlen(value) != CPUID_VENDOR_SZ) {
1446 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1447 "vendor", value);
1448 return;
1451 env->cpuid_vendor1 = 0;
1452 env->cpuid_vendor2 = 0;
1453 env->cpuid_vendor3 = 0;
1454 for (i = 0; i < 4; i++) {
1455 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1456 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1457 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1461 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1463 X86CPU *cpu = X86_CPU(obj);
1464 CPUX86State *env = &cpu->env;
1465 char *value;
1466 int i;
1468 value = g_malloc(48 + 1);
1469 for (i = 0; i < 48; i++) {
1470 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1472 value[48] = '\0';
1473 return value;
1476 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1477 Error **errp)
1479 X86CPU *cpu = X86_CPU(obj);
1480 CPUX86State *env = &cpu->env;
1481 int c, len, i;
1483 if (model_id == NULL) {
1484 model_id = "";
1486 len = strlen(model_id);
1487 memset(env->cpuid_model, 0, 48);
1488 for (i = 0; i < 48; i++) {
1489 if (i >= len) {
1490 c = '\0';
1491 } else {
1492 c = (uint8_t)model_id[i];
1494 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1498 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1499 const char *name, Error **errp)
1501 X86CPU *cpu = X86_CPU(obj);
1502 int64_t value;
1504 value = cpu->env.tsc_khz * 1000;
1505 visit_type_int(v, &value, name, errp);
1508 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1509 const char *name, Error **errp)
1511 X86CPU *cpu = X86_CPU(obj);
1512 const int64_t min = 0;
1513 const int64_t max = INT64_MAX;
1514 int64_t value;
1516 visit_type_int(v, &value, name, errp);
1517 if (error_is_set(errp)) {
1518 return;
1520 if (value < min || value > max) {
1521 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1522 name ? name : "null", value, min, max);
1523 return;
1526 cpu->env.tsc_khz = value / 1000;
1529 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1530 const char *name, Error **errp)
1532 X86CPU *cpu = X86_CPU(obj);
1533 int64_t value = cpu->env.cpuid_apic_id;
1535 visit_type_int(v, &value, name, errp);
1538 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1539 const char *name, Error **errp)
1541 X86CPU *cpu = X86_CPU(obj);
1542 DeviceState *dev = DEVICE(obj);
1543 const int64_t min = 0;
1544 const int64_t max = UINT32_MAX;
1545 Error *error = NULL;
1546 int64_t value;
1548 if (dev->realized) {
1549 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1550 "it was realized", name, object_get_typename(obj));
1551 return;
1554 visit_type_int(v, &value, name, &error);
1555 if (error) {
1556 error_propagate(errp, error);
1557 return;
1559 if (value < min || value > max) {
1560 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1561 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1562 object_get_typename(obj), name, value, min, max);
1563 return;
1566 if ((value != cpu->env.cpuid_apic_id) && cpu_exists(value)) {
1567 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1568 return;
1570 cpu->env.cpuid_apic_id = value;
1573 /* Generic getter for "feature-words" and "filtered-features" properties */
1574 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1575 const char *name, Error **errp)
1577 uint32_t *array = (uint32_t *)opaque;
1578 FeatureWord w;
1579 Error *err = NULL;
1580 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1581 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1582 X86CPUFeatureWordInfoList *list = NULL;
1584 for (w = 0; w < FEATURE_WORDS; w++) {
1585 FeatureWordInfo *wi = &feature_word_info[w];
1586 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1587 qwi->cpuid_input_eax = wi->cpuid_eax;
1588 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1589 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1590 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1591 qwi->features = array[w];
1593 /* List will be in reverse order, but order shouldn't matter */
1594 list_entries[w].next = list;
1595 list_entries[w].value = &word_infos[w];
1596 list = &list_entries[w];
1599 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1600 error_propagate(errp, err);
1603 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1604 const char *name, Error **errp)
1606 X86CPU *cpu = X86_CPU(obj);
1607 int64_t value = cpu->hyperv_spinlock_attempts;
1609 visit_type_int(v, &value, name, errp);
1612 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1613 const char *name, Error **errp)
1615 const int64_t min = 0xFFF;
1616 const int64_t max = UINT_MAX;
1617 X86CPU *cpu = X86_CPU(obj);
1618 Error *err = NULL;
1619 int64_t value;
1621 visit_type_int(v, &value, name, &err);
1622 if (err) {
1623 error_propagate(errp, err);
1624 return;
1627 if (value < min || value > max) {
1628 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1629 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1630 object_get_typename(obj), name ? name : "null",
1631 value, min, max);
1632 return;
1634 cpu->hyperv_spinlock_attempts = value;
1637 static PropertyInfo qdev_prop_spinlocks = {
1638 .name = "int",
1639 .get = x86_get_hv_spinlocks,
1640 .set = x86_set_hv_spinlocks,
1643 /* Convert all '_' in a feature string option name to '-', to make feature
1644 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1646 static inline void feat2prop(char *s)
1648 while ((s = strchr(s, '_'))) {
1649 *s = '-';
1653 /* Parse "+feature,-feature,feature=foo" CPU feature string
1655 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1656 Error **errp)
1658 X86CPU *cpu = X86_CPU(cs);
1659 char *featurestr; /* Single 'key=value" string being parsed */
1660 /* Features to be added */
1661 FeatureWordArray plus_features = { 0 };
1662 /* Features to be removed */
1663 FeatureWordArray minus_features = { 0 };
1664 uint32_t numvalue;
1665 CPUX86State *env = &cpu->env;
1666 Error *local_err = NULL;
1668 featurestr = features ? strtok(features, ",") : NULL;
1670 while (featurestr) {
1671 char *val;
1672 if (featurestr[0] == '+') {
1673 add_flagname_to_bitmaps(featurestr + 1, plus_features);
1674 } else if (featurestr[0] == '-') {
1675 add_flagname_to_bitmaps(featurestr + 1, minus_features);
1676 } else if ((val = strchr(featurestr, '='))) {
1677 *val = 0; val++;
1678 feat2prop(featurestr);
1679 if (!strcmp(featurestr, "xlevel")) {
1680 char *err;
1681 char num[32];
1683 numvalue = strtoul(val, &err, 0);
1684 if (!*val || *err) {
1685 error_setg(&local_err, "bad numerical value %s", val);
1686 goto out;
1688 if (numvalue < 0x80000000) {
1689 error_report("xlevel value shall always be >= 0x80000000"
1690 ", fixup will be removed in future versions");
1691 numvalue += 0x80000000;
1693 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1694 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1695 } else if (!strcmp(featurestr, "tsc-freq")) {
1696 int64_t tsc_freq;
1697 char *err;
1698 char num[32];
1700 tsc_freq = strtosz_suffix_unit(val, &err,
1701 STRTOSZ_DEFSUFFIX_B, 1000);
1702 if (tsc_freq < 0 || *err) {
1703 error_setg(&local_err, "bad numerical value %s", val);
1704 goto out;
1706 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1707 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1708 &local_err);
1709 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1710 char *err;
1711 const int min = 0xFFF;
1712 char num[32];
1713 numvalue = strtoul(val, &err, 0);
1714 if (!*val || *err) {
1715 error_setg(&local_err, "bad numerical value %s", val);
1716 goto out;
1718 if (numvalue < min) {
1719 error_report("hv-spinlocks value shall always be >= 0x%x"
1720 ", fixup will be removed in future versions",
1721 min);
1722 numvalue = min;
1724 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1725 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1726 } else {
1727 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1729 } else {
1730 feat2prop(featurestr);
1731 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1733 if (local_err) {
1734 error_propagate(errp, local_err);
1735 goto out;
1737 featurestr = strtok(NULL, ",");
1739 env->features[FEAT_1_EDX] |= plus_features[FEAT_1_EDX];
1740 env->features[FEAT_1_ECX] |= plus_features[FEAT_1_ECX];
1741 env->features[FEAT_8000_0001_EDX] |= plus_features[FEAT_8000_0001_EDX];
1742 env->features[FEAT_8000_0001_ECX] |= plus_features[FEAT_8000_0001_ECX];
1743 env->features[FEAT_C000_0001_EDX] |= plus_features[FEAT_C000_0001_EDX];
1744 env->features[FEAT_KVM] |= plus_features[FEAT_KVM];
1745 env->features[FEAT_SVM] |= plus_features[FEAT_SVM];
1746 env->features[FEAT_7_0_EBX] |= plus_features[FEAT_7_0_EBX];
1747 env->features[FEAT_1_EDX] &= ~minus_features[FEAT_1_EDX];
1748 env->features[FEAT_1_ECX] &= ~minus_features[FEAT_1_ECX];
1749 env->features[FEAT_8000_0001_EDX] &= ~minus_features[FEAT_8000_0001_EDX];
1750 env->features[FEAT_8000_0001_ECX] &= ~minus_features[FEAT_8000_0001_ECX];
1751 env->features[FEAT_C000_0001_EDX] &= ~minus_features[FEAT_C000_0001_EDX];
1752 env->features[FEAT_KVM] &= ~minus_features[FEAT_KVM];
1753 env->features[FEAT_SVM] &= ~minus_features[FEAT_SVM];
1754 env->features[FEAT_7_0_EBX] &= ~minus_features[FEAT_7_0_EBX];
1756 out:
1757 return;
1760 /* generate a composite string into buf of all cpuid names in featureset
1761 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1762 * if flags, suppress names undefined in featureset.
1764 static void listflags(char *buf, int bufsize, uint32_t fbits,
1765 const char **featureset, uint32_t flags)
1767 const char **p = &featureset[31];
1768 char *q, *b, bit;
1769 int nc;
1771 b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
1772 *buf = '\0';
1773 for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
1774 if (fbits & 1 << bit && (*p || !flags)) {
1775 if (*p)
1776 nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
1777 else
1778 nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
1779 if (bufsize <= nc) {
1780 if (b) {
1781 memcpy(b, "...", sizeof("..."));
1783 return;
1785 q += nc;
1786 bufsize -= nc;
1790 /* generate CPU information. */
1791 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1793 X86CPUDefinition *def;
1794 char buf[256];
1795 int i;
1797 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1798 def = &builtin_x86_defs[i];
1799 snprintf(buf, sizeof(buf), "%s", def->name);
1800 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1802 #ifdef CONFIG_KVM
1803 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1804 "KVM processor with all supported host features "
1805 "(only available in KVM mode)");
1806 #endif
1808 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1809 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1810 FeatureWordInfo *fw = &feature_word_info[i];
1812 listflags(buf, sizeof(buf), (uint32_t)~0, fw->feat_names, 1);
1813 (*cpu_fprintf)(f, " %s\n", buf);
1817 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1819 CpuDefinitionInfoList *cpu_list = NULL;
1820 X86CPUDefinition *def;
1821 int i;
1823 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1824 CpuDefinitionInfoList *entry;
1825 CpuDefinitionInfo *info;
1827 def = &builtin_x86_defs[i];
1828 info = g_malloc0(sizeof(*info));
1829 info->name = g_strdup(def->name);
1831 entry = g_malloc0(sizeof(*entry));
1832 entry->value = info;
1833 entry->next = cpu_list;
1834 cpu_list = entry;
1837 return cpu_list;
1840 static void filter_features_for_kvm(X86CPU *cpu)
1842 CPUX86State *env = &cpu->env;
1843 KVMState *s = kvm_state;
1844 FeatureWord w;
1846 for (w = 0; w < FEATURE_WORDS; w++) {
1847 FeatureWordInfo *wi = &feature_word_info[w];
1848 uint32_t host_feat = kvm_arch_get_supported_cpuid(s, wi->cpuid_eax,
1849 wi->cpuid_ecx,
1850 wi->cpuid_reg);
1851 uint32_t requested_features = env->features[w];
1852 env->features[w] &= host_feat;
1853 cpu->filtered_features[w] = requested_features & ~env->features[w];
1857 /* Load data from X86CPUDefinition
1859 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
1861 CPUX86State *env = &cpu->env;
1862 const char *vendor;
1863 char host_vendor[CPUID_VENDOR_SZ + 1];
1865 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
1866 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
1867 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
1868 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
1869 env->features[FEAT_1_EDX] = def->features[FEAT_1_EDX];
1870 env->features[FEAT_1_ECX] = def->features[FEAT_1_ECX];
1871 env->features[FEAT_8000_0001_EDX] = def->features[FEAT_8000_0001_EDX];
1872 env->features[FEAT_8000_0001_ECX] = def->features[FEAT_8000_0001_ECX];
1873 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
1874 env->features[FEAT_KVM] = def->features[FEAT_KVM];
1875 env->features[FEAT_SVM] = def->features[FEAT_SVM];
1876 env->features[FEAT_C000_0001_EDX] = def->features[FEAT_C000_0001_EDX];
1877 env->features[FEAT_7_0_EBX] = def->features[FEAT_7_0_EBX];
1878 env->cpuid_xlevel2 = def->xlevel2;
1879 cpu->cache_info_passthrough = def->cache_info_passthrough;
1881 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
1883 /* Special cases not set in the X86CPUDefinition structs: */
1884 if (kvm_enabled()) {
1885 FeatureWord w;
1886 for (w = 0; w < FEATURE_WORDS; w++) {
1887 env->features[w] |= kvm_default_features[w];
1891 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
1893 /* sysenter isn't supported in compatibility mode on AMD,
1894 * syscall isn't supported in compatibility mode on Intel.
1895 * Normally we advertise the actual CPU vendor, but you can
1896 * override this using the 'vendor' property if you want to use
1897 * KVM's sysenter/syscall emulation in compatibility mode and
1898 * when doing cross vendor migration
1900 vendor = def->vendor;
1901 if (kvm_enabled()) {
1902 uint32_t ebx = 0, ecx = 0, edx = 0;
1903 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
1904 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
1905 vendor = host_vendor;
1908 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
1912 X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
1913 Error **errp)
1915 X86CPU *cpu = NULL;
1916 X86CPUClass *xcc;
1917 ObjectClass *oc;
1918 gchar **model_pieces;
1919 char *name, *features;
1920 Error *error = NULL;
1922 model_pieces = g_strsplit(cpu_model, ",", 2);
1923 if (!model_pieces[0]) {
1924 error_setg(&error, "Invalid/empty CPU model name");
1925 goto out;
1927 name = model_pieces[0];
1928 features = model_pieces[1];
1930 oc = x86_cpu_class_by_name(name);
1931 if (oc == NULL) {
1932 error_setg(&error, "Unable to find CPU definition: %s", name);
1933 goto out;
1935 xcc = X86_CPU_CLASS(oc);
1937 if (xcc->kvm_required && !kvm_enabled()) {
1938 error_setg(&error, "CPU model '%s' requires KVM", name);
1939 goto out;
1942 cpu = X86_CPU(object_new(object_class_get_name(oc)));
1944 #ifndef CONFIG_USER_ONLY
1945 if (icc_bridge == NULL) {
1946 error_setg(&error, "Invalid icc-bridge value");
1947 goto out;
1949 qdev_set_parent_bus(DEVICE(cpu), qdev_get_child_bus(icc_bridge, "icc"));
1950 object_unref(OBJECT(cpu));
1951 #endif
1953 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
1954 if (error) {
1955 goto out;
1958 out:
1959 if (error != NULL) {
1960 error_propagate(errp, error);
1961 if (cpu) {
1962 object_unref(OBJECT(cpu));
1963 cpu = NULL;
1966 g_strfreev(model_pieces);
1967 return cpu;
1970 X86CPU *cpu_x86_init(const char *cpu_model)
1972 Error *error = NULL;
1973 X86CPU *cpu;
1975 cpu = cpu_x86_create(cpu_model, NULL, &error);
1976 if (error) {
1977 goto out;
1980 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
1982 out:
1983 if (error) {
1984 error_report("%s", error_get_pretty(error));
1985 error_free(error);
1986 if (cpu != NULL) {
1987 object_unref(OBJECT(cpu));
1988 cpu = NULL;
1991 return cpu;
1994 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
1996 X86CPUDefinition *cpudef = data;
1997 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1999 xcc->cpu_def = cpudef;
2002 static void x86_register_cpudef_type(X86CPUDefinition *def)
2004 char *typename = x86_cpu_type_name(def->name);
2005 TypeInfo ti = {
2006 .name = typename,
2007 .parent = TYPE_X86_CPU,
2008 .class_init = x86_cpu_cpudef_class_init,
2009 .class_data = def,
2012 type_register(&ti);
2013 g_free(typename);
2016 #if !defined(CONFIG_USER_ONLY)
2018 void cpu_clear_apic_feature(CPUX86State *env)
2020 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2023 #endif /* !CONFIG_USER_ONLY */
2025 /* Initialize list of CPU models, filling some non-static fields if necessary
2027 void x86_cpudef_setup(void)
2029 int i, j;
2030 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2032 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2033 X86CPUDefinition *def = &builtin_x86_defs[i];
2035 /* Look for specific "cpudef" models that */
2036 /* have the QEMU version in .model_id */
2037 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2038 if (strcmp(model_with_versions[j], def->name) == 0) {
2039 pstrcpy(def->model_id, sizeof(def->model_id),
2040 "QEMU Virtual CPU version ");
2041 pstrcat(def->model_id, sizeof(def->model_id),
2042 qemu_get_version());
2043 break;
2049 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
2050 uint32_t *ecx, uint32_t *edx)
2052 *ebx = env->cpuid_vendor1;
2053 *edx = env->cpuid_vendor2;
2054 *ecx = env->cpuid_vendor3;
2057 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2058 uint32_t *eax, uint32_t *ebx,
2059 uint32_t *ecx, uint32_t *edx)
2061 X86CPU *cpu = x86_env_get_cpu(env);
2062 CPUState *cs = CPU(cpu);
2064 /* test if maximum index reached */
2065 if (index & 0x80000000) {
2066 if (index > env->cpuid_xlevel) {
2067 if (env->cpuid_xlevel2 > 0) {
2068 /* Handle the Centaur's CPUID instruction. */
2069 if (index > env->cpuid_xlevel2) {
2070 index = env->cpuid_xlevel2;
2071 } else if (index < 0xC0000000) {
2072 index = env->cpuid_xlevel;
2074 } else {
2075 /* Intel documentation states that invalid EAX input will
2076 * return the same information as EAX=cpuid_level
2077 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2079 index = env->cpuid_level;
2082 } else {
2083 if (index > env->cpuid_level)
2084 index = env->cpuid_level;
2087 switch(index) {
2088 case 0:
2089 *eax = env->cpuid_level;
2090 get_cpuid_vendor(env, ebx, ecx, edx);
2091 break;
2092 case 1:
2093 *eax = env->cpuid_version;
2094 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2095 *ecx = env->features[FEAT_1_ECX];
2096 *edx = env->features[FEAT_1_EDX];
2097 if (cs->nr_cores * cs->nr_threads > 1) {
2098 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2099 *edx |= 1 << 28; /* HTT bit */
2101 break;
2102 case 2:
2103 /* cache info: needed for Pentium Pro compatibility */
2104 if (cpu->cache_info_passthrough) {
2105 host_cpuid(index, 0, eax, ebx, ecx, edx);
2106 break;
2108 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2109 *ebx = 0;
2110 *ecx = 0;
2111 *edx = (L1D_DESCRIPTOR << 16) | \
2112 (L1I_DESCRIPTOR << 8) | \
2113 (L2_DESCRIPTOR);
2114 break;
2115 case 4:
2116 /* cache info: needed for Core compatibility */
2117 if (cpu->cache_info_passthrough) {
2118 host_cpuid(index, count, eax, ebx, ecx, edx);
2119 *eax &= ~0xFC000000;
2120 } else {
2121 *eax = 0;
2122 switch (count) {
2123 case 0: /* L1 dcache info */
2124 *eax |= CPUID_4_TYPE_DCACHE | \
2125 CPUID_4_LEVEL(1) | \
2126 CPUID_4_SELF_INIT_LEVEL;
2127 *ebx = (L1D_LINE_SIZE - 1) | \
2128 ((L1D_PARTITIONS - 1) << 12) | \
2129 ((L1D_ASSOCIATIVITY - 1) << 22);
2130 *ecx = L1D_SETS - 1;
2131 *edx = CPUID_4_NO_INVD_SHARING;
2132 break;
2133 case 1: /* L1 icache info */
2134 *eax |= CPUID_4_TYPE_ICACHE | \
2135 CPUID_4_LEVEL(1) | \
2136 CPUID_4_SELF_INIT_LEVEL;
2137 *ebx = (L1I_LINE_SIZE - 1) | \
2138 ((L1I_PARTITIONS - 1) << 12) | \
2139 ((L1I_ASSOCIATIVITY - 1) << 22);
2140 *ecx = L1I_SETS - 1;
2141 *edx = CPUID_4_NO_INVD_SHARING;
2142 break;
2143 case 2: /* L2 cache info */
2144 *eax |= CPUID_4_TYPE_UNIFIED | \
2145 CPUID_4_LEVEL(2) | \
2146 CPUID_4_SELF_INIT_LEVEL;
2147 if (cs->nr_threads > 1) {
2148 *eax |= (cs->nr_threads - 1) << 14;
2150 *ebx = (L2_LINE_SIZE - 1) | \
2151 ((L2_PARTITIONS - 1) << 12) | \
2152 ((L2_ASSOCIATIVITY - 1) << 22);
2153 *ecx = L2_SETS - 1;
2154 *edx = CPUID_4_NO_INVD_SHARING;
2155 break;
2156 default: /* end of info */
2157 *eax = 0;
2158 *ebx = 0;
2159 *ecx = 0;
2160 *edx = 0;
2161 break;
2165 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2166 if ((*eax & 31) && cs->nr_cores > 1) {
2167 *eax |= (cs->nr_cores - 1) << 26;
2169 break;
2170 case 5:
2171 /* mwait info: needed for Core compatibility */
2172 *eax = 0; /* Smallest monitor-line size in bytes */
2173 *ebx = 0; /* Largest monitor-line size in bytes */
2174 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2175 *edx = 0;
2176 break;
2177 case 6:
2178 /* Thermal and Power Leaf */
2179 *eax = 0;
2180 *ebx = 0;
2181 *ecx = 0;
2182 *edx = 0;
2183 break;
2184 case 7:
2185 /* Structured Extended Feature Flags Enumeration Leaf */
2186 if (count == 0) {
2187 *eax = 0; /* Maximum ECX value for sub-leaves */
2188 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2189 *ecx = 0; /* Reserved */
2190 *edx = 0; /* Reserved */
2191 } else {
2192 *eax = 0;
2193 *ebx = 0;
2194 *ecx = 0;
2195 *edx = 0;
2197 break;
2198 case 9:
2199 /* Direct Cache Access Information Leaf */
2200 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2201 *ebx = 0;
2202 *ecx = 0;
2203 *edx = 0;
2204 break;
2205 case 0xA:
2206 /* Architectural Performance Monitoring Leaf */
2207 if (kvm_enabled() && cpu->enable_pmu) {
2208 KVMState *s = cs->kvm_state;
2210 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2211 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2212 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2213 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2214 } else {
2215 *eax = 0;
2216 *ebx = 0;
2217 *ecx = 0;
2218 *edx = 0;
2220 break;
2221 case 0xD: {
2222 KVMState *s = cs->kvm_state;
2223 uint64_t kvm_mask;
2224 int i;
2226 /* Processor Extended State */
2227 *eax = 0;
2228 *ebx = 0;
2229 *ecx = 0;
2230 *edx = 0;
2231 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2232 break;
2234 kvm_mask =
2235 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2236 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2238 if (count == 0) {
2239 *ecx = 0x240;
2240 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2241 const ExtSaveArea *esa = &ext_save_areas[i];
2242 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2243 (kvm_mask & (1 << i)) != 0) {
2244 if (i < 32) {
2245 *eax |= 1 << i;
2246 } else {
2247 *edx |= 1 << (i - 32);
2249 *ecx = MAX(*ecx, esa->offset + esa->size);
2252 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2253 *ebx = *ecx;
2254 } else if (count == 1) {
2255 *eax = kvm_arch_get_supported_cpuid(s, 0xd, 1, R_EAX);
2256 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2257 const ExtSaveArea *esa = &ext_save_areas[count];
2258 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2259 (kvm_mask & (1 << count)) != 0) {
2260 *eax = esa->size;
2261 *ebx = esa->offset;
2264 break;
2266 case 0x80000000:
2267 *eax = env->cpuid_xlevel;
2268 *ebx = env->cpuid_vendor1;
2269 *edx = env->cpuid_vendor2;
2270 *ecx = env->cpuid_vendor3;
2271 break;
2272 case 0x80000001:
2273 *eax = env->cpuid_version;
2274 *ebx = 0;
2275 *ecx = env->features[FEAT_8000_0001_ECX];
2276 *edx = env->features[FEAT_8000_0001_EDX];
2278 /* The Linux kernel checks for the CMPLegacy bit and
2279 * discards multiple thread information if it is set.
2280 * So dont set it here for Intel to make Linux guests happy.
2282 if (cs->nr_cores * cs->nr_threads > 1) {
2283 uint32_t tebx, tecx, tedx;
2284 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
2285 if (tebx != CPUID_VENDOR_INTEL_1 ||
2286 tedx != CPUID_VENDOR_INTEL_2 ||
2287 tecx != CPUID_VENDOR_INTEL_3) {
2288 *ecx |= 1 << 1; /* CmpLegacy bit */
2291 break;
2292 case 0x80000002:
2293 case 0x80000003:
2294 case 0x80000004:
2295 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2296 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2297 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2298 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2299 break;
2300 case 0x80000005:
2301 /* cache info (L1 cache) */
2302 if (cpu->cache_info_passthrough) {
2303 host_cpuid(index, 0, eax, ebx, ecx, edx);
2304 break;
2306 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2307 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2308 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2309 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2310 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2311 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2312 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2313 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2314 break;
2315 case 0x80000006:
2316 /* cache info (L2 cache) */
2317 if (cpu->cache_info_passthrough) {
2318 host_cpuid(index, 0, eax, ebx, ecx, edx);
2319 break;
2321 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2322 (L2_DTLB_2M_ENTRIES << 16) | \
2323 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2324 (L2_ITLB_2M_ENTRIES);
2325 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2326 (L2_DTLB_4K_ENTRIES << 16) | \
2327 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2328 (L2_ITLB_4K_ENTRIES);
2329 *ecx = (L2_SIZE_KB_AMD << 16) | \
2330 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2331 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2332 *edx = ((L3_SIZE_KB/512) << 18) | \
2333 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2334 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2335 break;
2336 case 0x80000008:
2337 /* virtual & phys address size in low 2 bytes. */
2338 /* XXX: This value must match the one used in the MMU code. */
2339 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2340 /* 64 bit processor */
2341 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2342 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2343 } else {
2344 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2345 *eax = 0x00000024; /* 36 bits physical */
2346 } else {
2347 *eax = 0x00000020; /* 32 bits physical */
2350 *ebx = 0;
2351 *ecx = 0;
2352 *edx = 0;
2353 if (cs->nr_cores * cs->nr_threads > 1) {
2354 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2356 break;
2357 case 0x8000000A:
2358 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2359 *eax = 0x00000001; /* SVM Revision */
2360 *ebx = 0x00000010; /* nr of ASIDs */
2361 *ecx = 0;
2362 *edx = env->features[FEAT_SVM]; /* optional features */
2363 } else {
2364 *eax = 0;
2365 *ebx = 0;
2366 *ecx = 0;
2367 *edx = 0;
2369 break;
2370 case 0xC0000000:
2371 *eax = env->cpuid_xlevel2;
2372 *ebx = 0;
2373 *ecx = 0;
2374 *edx = 0;
2375 break;
2376 case 0xC0000001:
2377 /* Support for VIA CPU's CPUID instruction */
2378 *eax = env->cpuid_version;
2379 *ebx = 0;
2380 *ecx = 0;
2381 *edx = env->features[FEAT_C000_0001_EDX];
2382 break;
2383 case 0xC0000002:
2384 case 0xC0000003:
2385 case 0xC0000004:
2386 /* Reserved for the future, and now filled with zero */
2387 *eax = 0;
2388 *ebx = 0;
2389 *ecx = 0;
2390 *edx = 0;
2391 break;
2392 default:
2393 /* reserved values: zero */
2394 *eax = 0;
2395 *ebx = 0;
2396 *ecx = 0;
2397 *edx = 0;
2398 break;
2402 /* CPUClass::reset() */
2403 static void x86_cpu_reset(CPUState *s)
2405 X86CPU *cpu = X86_CPU(s);
2406 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2407 CPUX86State *env = &cpu->env;
2408 int i;
2410 xcc->parent_reset(s);
2413 memset(env, 0, offsetof(CPUX86State, pat));
2415 tlb_flush(s, 1);
2417 env->old_exception = -1;
2419 /* init to reset state */
2421 #ifdef CONFIG_SOFTMMU
2422 env->hflags |= HF_SOFTMMU_MASK;
2423 #endif
2424 env->hflags2 |= HF2_GIF_MASK;
2426 cpu_x86_update_cr0(env, 0x60000010);
2427 env->a20_mask = ~0x0;
2428 env->smbase = 0x30000;
2430 env->idt.limit = 0xffff;
2431 env->gdt.limit = 0xffff;
2432 env->ldt.limit = 0xffff;
2433 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2434 env->tr.limit = 0xffff;
2435 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2437 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2438 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2439 DESC_R_MASK | DESC_A_MASK);
2440 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2441 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2442 DESC_A_MASK);
2443 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2444 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2445 DESC_A_MASK);
2446 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2447 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2448 DESC_A_MASK);
2449 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2450 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2451 DESC_A_MASK);
2452 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2453 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2454 DESC_A_MASK);
2456 env->eip = 0xfff0;
2457 env->regs[R_EDX] = env->cpuid_version;
2459 env->eflags = 0x2;
2461 /* FPU init */
2462 for (i = 0; i < 8; i++) {
2463 env->fptags[i] = 1;
2465 env->fpuc = 0x37f;
2467 env->mxcsr = 0x1f80;
2468 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2470 env->pat = 0x0007040600070406ULL;
2471 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2473 memset(env->dr, 0, sizeof(env->dr));
2474 env->dr[6] = DR6_FIXED_1;
2475 env->dr[7] = DR7_FIXED_1;
2476 cpu_breakpoint_remove_all(s, BP_CPU);
2477 cpu_watchpoint_remove_all(s, BP_CPU);
2479 env->tsc_adjust = 0;
2480 env->tsc = 0;
2482 #if !defined(CONFIG_USER_ONLY)
2483 /* We hard-wire the BSP to the first CPU. */
2484 if (s->cpu_index == 0) {
2485 apic_designate_bsp(cpu->apic_state);
2488 s->halted = !cpu_is_bsp(cpu);
2489 #endif
2492 #ifndef CONFIG_USER_ONLY
2493 bool cpu_is_bsp(X86CPU *cpu)
2495 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2498 /* TODO: remove me, when reset over QOM tree is implemented */
2499 static void x86_cpu_machine_reset_cb(void *opaque)
2501 X86CPU *cpu = opaque;
2502 cpu_reset(CPU(cpu));
2504 #endif
2506 static void mce_init(X86CPU *cpu)
2508 CPUX86State *cenv = &cpu->env;
2509 unsigned int bank;
2511 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2512 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2513 (CPUID_MCE | CPUID_MCA)) {
2514 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2515 cenv->mcg_ctl = ~(uint64_t)0;
2516 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2517 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2522 #ifndef CONFIG_USER_ONLY
2523 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2525 CPUX86State *env = &cpu->env;
2526 DeviceState *dev = DEVICE(cpu);
2527 APICCommonState *apic;
2528 const char *apic_type = "apic";
2530 if (kvm_irqchip_in_kernel()) {
2531 apic_type = "kvm-apic";
2532 } else if (xen_enabled()) {
2533 apic_type = "xen-apic";
2536 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2537 if (cpu->apic_state == NULL) {
2538 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2539 return;
2542 object_property_add_child(OBJECT(cpu), "apic",
2543 OBJECT(cpu->apic_state), NULL);
2544 qdev_prop_set_uint8(cpu->apic_state, "id", env->cpuid_apic_id);
2545 /* TODO: convert to link<> */
2546 apic = APIC_COMMON(cpu->apic_state);
2547 apic->cpu = cpu;
2550 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2552 if (cpu->apic_state == NULL) {
2553 return;
2556 if (qdev_init(cpu->apic_state)) {
2557 error_setg(errp, "APIC device '%s' could not be initialized",
2558 object_get_typename(OBJECT(cpu->apic_state)));
2559 return;
2562 #else
2563 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2566 #endif
2568 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2570 CPUState *cs = CPU(dev);
2571 X86CPU *cpu = X86_CPU(dev);
2572 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2573 CPUX86State *env = &cpu->env;
2574 Error *local_err = NULL;
2576 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2577 env->cpuid_level = 7;
2580 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2581 * CPUID[1].EDX.
2583 if (env->cpuid_vendor1 == CPUID_VENDOR_AMD_1 &&
2584 env->cpuid_vendor2 == CPUID_VENDOR_AMD_2 &&
2585 env->cpuid_vendor3 == CPUID_VENDOR_AMD_3) {
2586 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2587 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2588 & CPUID_EXT2_AMD_ALIASES);
2591 if (!kvm_enabled()) {
2592 env->features[FEAT_1_EDX] &= TCG_FEATURES;
2593 env->features[FEAT_1_ECX] &= TCG_EXT_FEATURES;
2594 env->features[FEAT_8000_0001_EDX] &= (TCG_EXT2_FEATURES
2595 #ifdef TARGET_X86_64
2596 | CPUID_EXT2_SYSCALL | CPUID_EXT2_LM
2597 #endif
2599 env->features[FEAT_8000_0001_ECX] &= TCG_EXT3_FEATURES;
2600 env->features[FEAT_SVM] &= TCG_SVM_FEATURES;
2601 } else {
2602 KVMState *s = kvm_state;
2603 if ((cpu->check_cpuid || cpu->enforce_cpuid)
2604 && kvm_check_features_against_host(s, cpu) && cpu->enforce_cpuid) {
2605 error_setg(&local_err,
2606 "Host's CPU doesn't support requested features");
2607 goto out;
2609 filter_features_for_kvm(cpu);
2612 #ifndef CONFIG_USER_ONLY
2613 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2615 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2616 x86_cpu_apic_create(cpu, &local_err);
2617 if (local_err != NULL) {
2618 goto out;
2621 #endif
2623 mce_init(cpu);
2624 qemu_init_vcpu(cs);
2626 x86_cpu_apic_realize(cpu, &local_err);
2627 if (local_err != NULL) {
2628 goto out;
2630 cpu_reset(cs);
2632 xcc->parent_realize(dev, &local_err);
2633 out:
2634 if (local_err != NULL) {
2635 error_propagate(errp, local_err);
2636 return;
2640 /* Enables contiguous-apic-ID mode, for compatibility */
2641 static bool compat_apic_id_mode;
2643 void enable_compat_apic_id_mode(void)
2645 compat_apic_id_mode = true;
2648 /* Calculates initial APIC ID for a specific CPU index
2650 * Currently we need to be able to calculate the APIC ID from the CPU index
2651 * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have
2652 * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of
2653 * all CPUs up to max_cpus.
2655 uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index)
2657 uint32_t correct_id;
2658 static bool warned;
2660 correct_id = x86_apicid_from_cpu_idx(smp_cores, smp_threads, cpu_index);
2661 if (compat_apic_id_mode) {
2662 if (cpu_index != correct_id && !warned) {
2663 error_report("APIC IDs set in compatibility mode, "
2664 "CPU topology won't match the configuration");
2665 warned = true;
2667 return cpu_index;
2668 } else {
2669 return correct_id;
2673 static void x86_cpu_initfn(Object *obj)
2675 CPUState *cs = CPU(obj);
2676 X86CPU *cpu = X86_CPU(obj);
2677 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
2678 CPUX86State *env = &cpu->env;
2679 static int inited;
2681 cs->env_ptr = env;
2682 cpu_exec_init(env);
2684 object_property_add(obj, "family", "int",
2685 x86_cpuid_version_get_family,
2686 x86_cpuid_version_set_family, NULL, NULL, NULL);
2687 object_property_add(obj, "model", "int",
2688 x86_cpuid_version_get_model,
2689 x86_cpuid_version_set_model, NULL, NULL, NULL);
2690 object_property_add(obj, "stepping", "int",
2691 x86_cpuid_version_get_stepping,
2692 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
2693 object_property_add(obj, "level", "int",
2694 x86_cpuid_get_level,
2695 x86_cpuid_set_level, NULL, NULL, NULL);
2696 object_property_add(obj, "xlevel", "int",
2697 x86_cpuid_get_xlevel,
2698 x86_cpuid_set_xlevel, NULL, NULL, NULL);
2699 object_property_add_str(obj, "vendor",
2700 x86_cpuid_get_vendor,
2701 x86_cpuid_set_vendor, NULL);
2702 object_property_add_str(obj, "model-id",
2703 x86_cpuid_get_model_id,
2704 x86_cpuid_set_model_id, NULL);
2705 object_property_add(obj, "tsc-frequency", "int",
2706 x86_cpuid_get_tsc_freq,
2707 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
2708 object_property_add(obj, "apic-id", "int",
2709 x86_cpuid_get_apic_id,
2710 x86_cpuid_set_apic_id, NULL, NULL, NULL);
2711 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
2712 x86_cpu_get_feature_words,
2713 NULL, NULL, (void *)env->features, NULL);
2714 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
2715 x86_cpu_get_feature_words,
2716 NULL, NULL, (void *)cpu->filtered_features, NULL);
2718 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
2719 env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index);
2721 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
2723 /* init various static tables used in TCG mode */
2724 if (tcg_enabled() && !inited) {
2725 inited = 1;
2726 optimize_flags_init();
2727 #ifndef CONFIG_USER_ONLY
2728 cpu_set_debug_excp_handler(breakpoint_handler);
2729 #endif
2733 static int64_t x86_cpu_get_arch_id(CPUState *cs)
2735 X86CPU *cpu = X86_CPU(cs);
2736 CPUX86State *env = &cpu->env;
2738 return env->cpuid_apic_id;
2741 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
2743 X86CPU *cpu = X86_CPU(cs);
2745 return cpu->env.cr[0] & CR0_PG_MASK;
2748 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
2750 X86CPU *cpu = X86_CPU(cs);
2752 cpu->env.eip = value;
2755 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
2757 X86CPU *cpu = X86_CPU(cs);
2759 cpu->env.eip = tb->pc - tb->cs_base;
2762 static bool x86_cpu_has_work(CPUState *cs)
2764 X86CPU *cpu = X86_CPU(cs);
2765 CPUX86State *env = &cpu->env;
2767 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
2768 CPU_INTERRUPT_POLL)) &&
2769 (env->eflags & IF_MASK)) ||
2770 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
2771 CPU_INTERRUPT_INIT |
2772 CPU_INTERRUPT_SIPI |
2773 CPU_INTERRUPT_MCE));
2776 static Property x86_cpu_properties[] = {
2777 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
2778 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
2779 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
2780 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
2781 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
2782 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
2783 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
2784 DEFINE_PROP_END_OF_LIST()
2787 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
2789 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2790 CPUClass *cc = CPU_CLASS(oc);
2791 DeviceClass *dc = DEVICE_CLASS(oc);
2793 xcc->parent_realize = dc->realize;
2794 dc->realize = x86_cpu_realizefn;
2795 dc->bus_type = TYPE_ICC_BUS;
2796 dc->props = x86_cpu_properties;
2798 xcc->parent_reset = cc->reset;
2799 cc->reset = x86_cpu_reset;
2800 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
2802 cc->class_by_name = x86_cpu_class_by_name;
2803 cc->parse_features = x86_cpu_parse_featurestr;
2804 cc->has_work = x86_cpu_has_work;
2805 cc->do_interrupt = x86_cpu_do_interrupt;
2806 cc->dump_state = x86_cpu_dump_state;
2807 cc->set_pc = x86_cpu_set_pc;
2808 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
2809 cc->gdb_read_register = x86_cpu_gdb_read_register;
2810 cc->gdb_write_register = x86_cpu_gdb_write_register;
2811 cc->get_arch_id = x86_cpu_get_arch_id;
2812 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
2813 #ifdef CONFIG_USER_ONLY
2814 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
2815 #else
2816 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
2817 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
2818 cc->write_elf64_note = x86_cpu_write_elf64_note;
2819 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
2820 cc->write_elf32_note = x86_cpu_write_elf32_note;
2821 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
2822 cc->vmsd = &vmstate_x86_cpu;
2823 #endif
2824 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
2827 static const TypeInfo x86_cpu_type_info = {
2828 .name = TYPE_X86_CPU,
2829 .parent = TYPE_CPU,
2830 .instance_size = sizeof(X86CPU),
2831 .instance_init = x86_cpu_initfn,
2832 .abstract = true,
2833 .class_size = sizeof(X86CPUClass),
2834 .class_init = x86_cpu_common_class_init,
2837 static void x86_cpu_register_types(void)
2839 int i;
2841 type_register_static(&x86_cpu_type_info);
2842 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2843 x86_register_cpudef_type(&builtin_x86_defs[i]);
2845 #ifdef CONFIG_KVM
2846 type_register_static(&host_x86_cpu_type_info);
2847 #endif
2850 type_init(x86_cpu_register_types)