2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "qemu-option.h"
28 #include "qemu-config.h"
30 #include "qapi/qapi-visit-core.h"
31 #include "arch_init.h"
36 #if defined(CONFIG_KVM)
37 #include <linux/kvm_para.h>
40 /* feature flags taken from "Intel Processor Identification and the CPUID
41 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
42 * between feature naming conventions, aliases may be added.
44 static const char *feature_name
[] = {
45 "fpu", "vme", "de", "pse",
46 "tsc", "msr", "pae", "mce",
47 "cx8", "apic", NULL
, "sep",
48 "mtrr", "pge", "mca", "cmov",
49 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
50 NULL
, "ds" /* Intel dts */, "acpi", "mmx",
51 "fxsr", "sse", "sse2", "ss",
52 "ht" /* Intel htt */, "tm", "ia64", "pbe",
54 static const char *ext_feature_name
[] = {
55 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
56 "ds_cpl", "vmx", "smx", "est",
57 "tm2", "ssse3", "cid", NULL
,
58 "fma", "cx16", "xtpr", "pdcm",
59 NULL
, "pcid", "dca", "sse4.1|sse4_1",
60 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
61 "tsc-deadline", "aes", "xsave", "osxsave",
62 "avx", NULL
, NULL
, "hypervisor",
64 /* Feature names that are already defined on feature_name[] but are set on
65 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
66 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
67 * if and only if CPU vendor is AMD.
69 static const char *ext2_feature_name
[] = {
70 NULL
/* fpu */, NULL
/* vme */, NULL
/* de */, NULL
/* pse */,
71 NULL
/* tsc */, NULL
/* msr */, NULL
/* pae */, NULL
/* mce */,
72 NULL
/* cx8 */ /* AMD CMPXCHG8B */, NULL
/* apic */, NULL
, "syscall",
73 NULL
/* mtrr */, NULL
/* pge */, NULL
/* mca */, NULL
/* cmov */,
74 NULL
/* pat */, NULL
/* pse36 */, NULL
, NULL
/* Linux mp */,
75 "nx|xd", NULL
, "mmxext", NULL
/* mmx */,
76 NULL
/* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
77 NULL
, "lm|i64", "3dnowext", "3dnow",
79 static const char *ext3_feature_name
[] = {
80 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
81 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
82 "3dnowprefetch", "osvw", "ibs", "xop",
83 "skinit", "wdt", NULL
, NULL
,
84 "fma4", NULL
, "cvt16", "nodeid_msr",
85 NULL
, NULL
, NULL
, NULL
,
86 NULL
, NULL
, NULL
, NULL
,
87 NULL
, NULL
, NULL
, NULL
,
90 static const char *kvm_feature_name
[] = {
91 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
92 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", NULL
,
93 NULL
, NULL
, NULL
, NULL
,
94 NULL
, NULL
, NULL
, NULL
,
95 NULL
, NULL
, NULL
, NULL
,
96 NULL
, NULL
, NULL
, NULL
,
97 NULL
, NULL
, NULL
, NULL
,
98 NULL
, NULL
, NULL
, NULL
,
101 static const char *svm_feature_name
[] = {
102 "npt", "lbrv", "svm_lock", "nrip_save",
103 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
104 NULL
, NULL
, "pause_filter", NULL
,
105 "pfthreshold", NULL
, NULL
, NULL
,
106 NULL
, NULL
, NULL
, NULL
,
107 NULL
, NULL
, NULL
, NULL
,
108 NULL
, NULL
, NULL
, NULL
,
109 NULL
, NULL
, NULL
, NULL
,
112 static const char *cpuid_7_0_ebx_feature_name
[] = {
113 "fsgsbase", NULL
, NULL
, "bmi1", "hle", "avx2", NULL
, "smep",
114 "bmi2", "erms", "invpcid", "rtm", NULL
, NULL
, NULL
, NULL
,
115 NULL
, NULL
, NULL
, NULL
, "smap", NULL
, NULL
, NULL
,
116 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
119 /* collects per-function cpuid data
121 typedef struct model_features_t
{
122 uint32_t *guest_feat
;
125 const char **flag_names
;
130 int enforce_cpuid
= 0;
132 #if defined(CONFIG_KVM)
133 static uint32_t kvm_default_features
= (1 << KVM_FEATURE_CLOCKSOURCE
) |
134 (1 << KVM_FEATURE_NOP_IO_DELAY
) |
135 (1 << KVM_FEATURE_MMU_OP
) |
136 (1 << KVM_FEATURE_CLOCKSOURCE2
) |
137 (1 << KVM_FEATURE_ASYNC_PF
) |
138 (1 << KVM_FEATURE_STEAL_TIME
) |
139 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT
);
140 static const uint32_t kvm_pv_eoi_features
= (0x1 << KVM_FEATURE_PV_EOI
);
142 static uint32_t kvm_default_features
= 0;
143 static const uint32_t kvm_pv_eoi_features
= 0;
146 void enable_kvm_pv_eoi(void)
148 kvm_default_features
|= kvm_pv_eoi_features
;
151 void host_cpuid(uint32_t function
, uint32_t count
,
152 uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
154 #if defined(CONFIG_KVM)
159 : "=a"(vec
[0]), "=b"(vec
[1]),
160 "=c"(vec
[2]), "=d"(vec
[3])
161 : "0"(function
), "c"(count
) : "cc");
163 asm volatile("pusha \n\t"
165 "mov %%eax, 0(%2) \n\t"
166 "mov %%ebx, 4(%2) \n\t"
167 "mov %%ecx, 8(%2) \n\t"
168 "mov %%edx, 12(%2) \n\t"
170 : : "a"(function
), "c"(count
), "S"(vec
)
185 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
187 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
188 * a substring. ex if !NULL points to the first char after a substring,
189 * otherwise the string is assumed to sized by a terminating nul.
190 * Return lexical ordering of *s1:*s2.
192 static int sstrcmp(const char *s1
, const char *e1
, const char *s2
,
196 if (!*s1
|| !*s2
|| *s1
!= *s2
)
199 if (s1
== e1
&& s2
== e2
)
208 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
209 * '|' delimited (possibly empty) strings in which case search for a match
210 * within the alternatives proceeds left to right. Return 0 for success,
211 * non-zero otherwise.
213 static int altcmp(const char *s
, const char *e
, const char *altstr
)
217 for (q
= p
= altstr
; ; ) {
218 while (*p
&& *p
!= '|')
220 if ((q
== p
&& !*s
) || (q
!= p
&& !sstrcmp(s
, e
, q
, p
)))
229 /* search featureset for flag *[s..e), if found set corresponding bit in
230 * *pval and return true, otherwise return false
232 static bool lookup_feature(uint32_t *pval
, const char *s
, const char *e
,
233 const char **featureset
)
239 for (mask
= 1, ppc
= featureset
; mask
; mask
<<= 1, ++ppc
) {
240 if (*ppc
&& !altcmp(s
, e
, *ppc
)) {
248 static void add_flagname_to_bitmaps(const char *flagname
, uint32_t *features
,
249 uint32_t *ext_features
,
250 uint32_t *ext2_features
,
251 uint32_t *ext3_features
,
252 uint32_t *kvm_features
,
253 uint32_t *svm_features
,
254 uint32_t *cpuid_7_0_ebx_features
)
256 if (!lookup_feature(features
, flagname
, NULL
, feature_name
) &&
257 !lookup_feature(ext_features
, flagname
, NULL
, ext_feature_name
) &&
258 !lookup_feature(ext2_features
, flagname
, NULL
, ext2_feature_name
) &&
259 !lookup_feature(ext3_features
, flagname
, NULL
, ext3_feature_name
) &&
260 !lookup_feature(kvm_features
, flagname
, NULL
, kvm_feature_name
) &&
261 !lookup_feature(svm_features
, flagname
, NULL
, svm_feature_name
) &&
262 !lookup_feature(cpuid_7_0_ebx_features
, flagname
, NULL
,
263 cpuid_7_0_ebx_feature_name
))
264 fprintf(stderr
, "CPU feature %s not found\n", flagname
);
267 typedef struct x86_def_t
{
268 struct x86_def_t
*next
;
271 uint32_t vendor1
, vendor2
, vendor3
;
276 uint32_t features
, ext_features
, ext2_features
, ext3_features
;
277 uint32_t kvm_features
, svm_features
;
281 /* Store the results of Centaur's CPUID instructions */
282 uint32_t ext4_features
;
284 /* The feature bits on CPUID[EAX=7,ECX=0].EBX */
285 uint32_t cpuid_7_0_ebx_features
;
288 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
289 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
290 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
291 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
292 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
293 CPUID_PSE36 | CPUID_FXSR)
294 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
295 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
296 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
297 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
298 CPUID_PAE | CPUID_SEP | CPUID_APIC)
300 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
301 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
302 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
303 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
304 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
305 /* partly implemented:
306 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64)
307 CPUID_PSE36 (needed for Solaris) */
309 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
310 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | \
311 CPUID_EXT_CX16 | CPUID_EXT_POPCNT | \
312 CPUID_EXT_HYPERVISOR)
314 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
315 CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_XSAVE */
316 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
317 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
318 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT)
320 CPUID_EXT2_PDPE1GB */
321 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
322 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
323 #define TCG_SVM_FEATURES 0
324 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP)
326 /* maintains list of cpu model definitions
328 static x86_def_t
*x86_defs
= {NULL
};
330 /* built-in cpu model definitions (deprecated)
332 static x86_def_t builtin_x86_defs
[] = {
336 .vendor1
= CPUID_VENDOR_AMD_1
,
337 .vendor2
= CPUID_VENDOR_AMD_2
,
338 .vendor3
= CPUID_VENDOR_AMD_3
,
342 .features
= PPRO_FEATURES
|
343 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
345 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_CX16
| CPUID_EXT_POPCNT
,
346 .ext2_features
= (PPRO_FEATURES
& CPUID_EXT2_AMD_ALIASES
) |
347 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
348 .ext3_features
= CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
349 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
350 .xlevel
= 0x8000000A,
355 .vendor1
= CPUID_VENDOR_AMD_1
,
356 .vendor2
= CPUID_VENDOR_AMD_2
,
357 .vendor3
= CPUID_VENDOR_AMD_3
,
361 .features
= PPRO_FEATURES
|
362 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
363 CPUID_PSE36
| CPUID_VME
| CPUID_HT
,
364 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_CX16
|
366 .ext2_features
= (PPRO_FEATURES
& CPUID_EXT2_AMD_ALIASES
) |
367 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
|
368 CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
| CPUID_EXT2_MMXEXT
|
369 CPUID_EXT2_FFXSR
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
,
370 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
372 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
373 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
374 .ext3_features
= CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
375 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
376 .svm_features
= CPUID_SVM_NPT
| CPUID_SVM_LBRV
,
377 .xlevel
= 0x8000001A,
378 .model_id
= "AMD Phenom(tm) 9550 Quad-Core Processor"
386 .features
= PPRO_FEATURES
|
387 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
388 CPUID_PSE36
| CPUID_VME
| CPUID_DTS
| CPUID_ACPI
| CPUID_SS
|
389 CPUID_HT
| CPUID_TM
| CPUID_PBE
,
390 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
391 CPUID_EXT_DTES64
| CPUID_EXT_DSCPL
| CPUID_EXT_VMX
| CPUID_EXT_EST
|
392 CPUID_EXT_TM2
| CPUID_EXT_CX16
| CPUID_EXT_XTPR
| CPUID_EXT_PDCM
,
393 .ext2_features
= CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
394 .ext3_features
= CPUID_EXT3_LAHF_LM
,
395 .xlevel
= 0x80000008,
396 .model_id
= "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
401 .vendor1
= CPUID_VENDOR_INTEL_1
,
402 .vendor2
= CPUID_VENDOR_INTEL_2
,
403 .vendor3
= CPUID_VENDOR_INTEL_3
,
407 /* Missing: CPUID_VME, CPUID_HT */
408 .features
= PPRO_FEATURES
|
409 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
411 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
412 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
413 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
414 .ext2_features
= (PPRO_FEATURES
& CPUID_EXT2_AMD_ALIASES
) |
415 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
416 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
417 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
418 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
419 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
421 .xlevel
= 0x80000008,
422 .model_id
= "Common KVM processor"
430 .features
= PPRO_FEATURES
,
431 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_POPCNT
,
432 .xlevel
= 0x80000004,
440 .features
= PPRO_FEATURES
|
441 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_PSE36
,
442 .ext_features
= CPUID_EXT_SSE3
,
443 .ext2_features
= PPRO_FEATURES
& CPUID_EXT2_AMD_ALIASES
,
445 .xlevel
= 0x80000008,
446 .model_id
= "Common 32-bit KVM processor"
454 .features
= PPRO_FEATURES
| CPUID_VME
|
455 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_DTS
| CPUID_ACPI
|
456 CPUID_SS
| CPUID_HT
| CPUID_TM
| CPUID_PBE
,
457 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_VMX
|
458 CPUID_EXT_EST
| CPUID_EXT_TM2
| CPUID_EXT_XTPR
| CPUID_EXT_PDCM
,
459 .ext2_features
= CPUID_EXT2_NX
,
460 .xlevel
= 0x80000008,
461 .model_id
= "Genuine Intel(R) CPU T2600 @ 2.16GHz",
469 .features
= I486_FEATURES
,
478 .features
= PENTIUM_FEATURES
,
487 .features
= PENTIUM2_FEATURES
,
496 .features
= PENTIUM3_FEATURES
,
502 .vendor1
= CPUID_VENDOR_AMD_1
,
503 .vendor2
= CPUID_VENDOR_AMD_2
,
504 .vendor3
= CPUID_VENDOR_AMD_3
,
508 .features
= PPRO_FEATURES
| CPUID_PSE36
| CPUID_VME
| CPUID_MTRR
|
510 .ext2_features
= (PPRO_FEATURES
& CPUID_EXT2_AMD_ALIASES
) |
511 CPUID_EXT2_MMXEXT
| CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
,
512 .xlevel
= 0x80000008,
516 /* original is on level 10 */
521 .features
= PPRO_FEATURES
|
522 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_VME
| CPUID_DTS
|
523 CPUID_ACPI
| CPUID_SS
| CPUID_HT
| CPUID_TM
| CPUID_PBE
,
524 /* Some CPUs got no CPUID_SEP */
525 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
526 CPUID_EXT_DSCPL
| CPUID_EXT_EST
| CPUID_EXT_TM2
| CPUID_EXT_XTPR
,
527 .ext2_features
= (PPRO_FEATURES
& CPUID_EXT2_AMD_ALIASES
) |
529 .ext3_features
= CPUID_EXT3_LAHF_LM
,
530 .xlevel
= 0x8000000A,
531 .model_id
= "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
536 .vendor1
= CPUID_VENDOR_INTEL_1
,
537 .vendor2
= CPUID_VENDOR_INTEL_2
,
538 .vendor3
= CPUID_VENDOR_INTEL_3
,
542 .features
= CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
543 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
544 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
545 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
546 CPUID_DE
| CPUID_FP87
,
547 .ext_features
= CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
548 .ext2_features
= CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
549 .ext3_features
= CPUID_EXT3_LAHF_LM
,
550 .xlevel
= 0x8000000A,
551 .model_id
= "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
556 .vendor1
= CPUID_VENDOR_INTEL_1
,
557 .vendor2
= CPUID_VENDOR_INTEL_2
,
558 .vendor3
= CPUID_VENDOR_INTEL_3
,
562 .features
= CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
563 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
564 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
565 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
566 CPUID_DE
| CPUID_FP87
,
567 .ext_features
= CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
569 .ext2_features
= CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
570 .ext3_features
= CPUID_EXT3_LAHF_LM
,
571 .xlevel
= 0x8000000A,
572 .model_id
= "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
577 .vendor1
= CPUID_VENDOR_INTEL_1
,
578 .vendor2
= CPUID_VENDOR_INTEL_2
,
579 .vendor3
= CPUID_VENDOR_INTEL_3
,
583 .features
= CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
584 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
585 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
586 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
587 CPUID_DE
| CPUID_FP87
,
588 .ext_features
= CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
589 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
590 .ext2_features
= CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
591 .ext3_features
= CPUID_EXT3_LAHF_LM
,
592 .xlevel
= 0x8000000A,
593 .model_id
= "Intel Core i7 9xx (Nehalem Class Core i7)",
598 .vendor1
= CPUID_VENDOR_INTEL_1
,
599 .vendor2
= CPUID_VENDOR_INTEL_2
,
600 .vendor3
= CPUID_VENDOR_INTEL_3
,
604 .features
= CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
605 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
606 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
607 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
608 CPUID_DE
| CPUID_FP87
,
609 .ext_features
= CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
610 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
612 .ext2_features
= CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
613 .ext3_features
= CPUID_EXT3_LAHF_LM
,
614 .xlevel
= 0x8000000A,
615 .model_id
= "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
618 .name
= "SandyBridge",
620 .vendor1
= CPUID_VENDOR_INTEL_1
,
621 .vendor2
= CPUID_VENDOR_INTEL_2
,
622 .vendor3
= CPUID_VENDOR_INTEL_3
,
626 .features
= CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
627 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
628 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
629 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
630 CPUID_DE
| CPUID_FP87
,
631 .ext_features
= CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
632 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
633 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
634 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
636 .ext2_features
= CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
638 .ext3_features
= CPUID_EXT3_LAHF_LM
,
639 .xlevel
= 0x8000000A,
640 .model_id
= "Intel Xeon E312xx (Sandy Bridge)",
643 .name
= "Opteron_G1",
645 .vendor1
= CPUID_VENDOR_AMD_1
,
646 .vendor2
= CPUID_VENDOR_AMD_2
,
647 .vendor3
= CPUID_VENDOR_AMD_3
,
651 .features
= CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
652 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
653 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
654 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
655 CPUID_DE
| CPUID_FP87
,
656 .ext_features
= CPUID_EXT_SSE3
,
657 .ext2_features
= CPUID_EXT2_LM
| CPUID_EXT2_FXSR
| CPUID_EXT2_MMX
|
658 CPUID_EXT2_NX
| CPUID_EXT2_PSE36
| CPUID_EXT2_PAT
|
659 CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
| CPUID_EXT2_PGE
|
660 CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_APIC
|
661 CPUID_EXT2_CX8
| CPUID_EXT2_MCE
| CPUID_EXT2_PAE
| CPUID_EXT2_MSR
|
662 CPUID_EXT2_TSC
| CPUID_EXT2_PSE
| CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
663 .xlevel
= 0x80000008,
664 .model_id
= "AMD Opteron 240 (Gen 1 Class Opteron)",
667 .name
= "Opteron_G2",
669 .vendor1
= CPUID_VENDOR_AMD_1
,
670 .vendor2
= CPUID_VENDOR_AMD_2
,
671 .vendor3
= CPUID_VENDOR_AMD_3
,
675 .features
= CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
676 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
677 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
678 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
679 CPUID_DE
| CPUID_FP87
,
680 .ext_features
= CPUID_EXT_CX16
| CPUID_EXT_SSE3
,
681 .ext2_features
= CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_FXSR
|
682 CPUID_EXT2_MMX
| CPUID_EXT2_NX
| CPUID_EXT2_PSE36
|
683 CPUID_EXT2_PAT
| CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
|
684 CPUID_EXT2_PGE
| CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
|
685 CPUID_EXT2_APIC
| CPUID_EXT2_CX8
| CPUID_EXT2_MCE
|
686 CPUID_EXT2_PAE
| CPUID_EXT2_MSR
| CPUID_EXT2_TSC
| CPUID_EXT2_PSE
|
687 CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
688 .ext3_features
= CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
689 .xlevel
= 0x80000008,
690 .model_id
= "AMD Opteron 22xx (Gen 2 Class Opteron)",
693 .name
= "Opteron_G3",
695 .vendor1
= CPUID_VENDOR_AMD_1
,
696 .vendor2
= CPUID_VENDOR_AMD_2
,
697 .vendor3
= CPUID_VENDOR_AMD_3
,
701 .features
= CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
702 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
703 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
704 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
705 CPUID_DE
| CPUID_FP87
,
706 .ext_features
= CPUID_EXT_POPCNT
| CPUID_EXT_CX16
| CPUID_EXT_MONITOR
|
708 .ext2_features
= CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_FXSR
|
709 CPUID_EXT2_MMX
| CPUID_EXT2_NX
| CPUID_EXT2_PSE36
|
710 CPUID_EXT2_PAT
| CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
|
711 CPUID_EXT2_PGE
| CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
|
712 CPUID_EXT2_APIC
| CPUID_EXT2_CX8
| CPUID_EXT2_MCE
|
713 CPUID_EXT2_PAE
| CPUID_EXT2_MSR
| CPUID_EXT2_TSC
| CPUID_EXT2_PSE
|
714 CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
715 .ext3_features
= CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
|
716 CPUID_EXT3_ABM
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
717 .xlevel
= 0x80000008,
718 .model_id
= "AMD Opteron 23xx (Gen 3 Class Opteron)",
721 .name
= "Opteron_G4",
723 .vendor1
= CPUID_VENDOR_AMD_1
,
724 .vendor2
= CPUID_VENDOR_AMD_2
,
725 .vendor3
= CPUID_VENDOR_AMD_3
,
729 .features
= CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
730 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
731 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
732 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
733 CPUID_DE
| CPUID_FP87
,
734 .ext_features
= CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
735 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
736 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
738 .ext2_features
= CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
|
739 CPUID_EXT2_PDPE1GB
| CPUID_EXT2_FXSR
| CPUID_EXT2_MMX
|
740 CPUID_EXT2_NX
| CPUID_EXT2_PSE36
| CPUID_EXT2_PAT
|
741 CPUID_EXT2_CMOV
| CPUID_EXT2_MCA
| CPUID_EXT2_PGE
|
742 CPUID_EXT2_MTRR
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_APIC
|
743 CPUID_EXT2_CX8
| CPUID_EXT2_MCE
| CPUID_EXT2_PAE
| CPUID_EXT2_MSR
|
744 CPUID_EXT2_TSC
| CPUID_EXT2_PSE
| CPUID_EXT2_DE
| CPUID_EXT2_FPU
,
745 .ext3_features
= CPUID_EXT3_FMA4
| CPUID_EXT3_XOP
|
746 CPUID_EXT3_3DNOWPREFETCH
| CPUID_EXT3_MISALIGNSSE
|
747 CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
| CPUID_EXT3_SVM
|
749 .xlevel
= 0x8000001A,
750 .model_id
= "AMD Opteron 62xx class CPU",
754 static int cpu_x86_fill_model_id(char *str
)
756 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
759 for (i
= 0; i
< 3; i
++) {
760 host_cpuid(0x80000002 + i
, 0, &eax
, &ebx
, &ecx
, &edx
);
761 memcpy(str
+ i
* 16 + 0, &eax
, 4);
762 memcpy(str
+ i
* 16 + 4, &ebx
, 4);
763 memcpy(str
+ i
* 16 + 8, &ecx
, 4);
764 memcpy(str
+ i
* 16 + 12, &edx
, 4);
769 static int cpu_x86_fill_host(x86_def_t
*x86_cpu_def
)
771 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
773 x86_cpu_def
->name
= "host";
774 host_cpuid(0x0, 0, &eax
, &ebx
, &ecx
, &edx
);
775 x86_cpu_def
->level
= eax
;
776 x86_cpu_def
->vendor1
= ebx
;
777 x86_cpu_def
->vendor2
= edx
;
778 x86_cpu_def
->vendor3
= ecx
;
780 host_cpuid(0x1, 0, &eax
, &ebx
, &ecx
, &edx
);
781 x86_cpu_def
->family
= ((eax
>> 8) & 0x0F) + ((eax
>> 20) & 0xFF);
782 x86_cpu_def
->model
= ((eax
>> 4) & 0x0F) | ((eax
& 0xF0000) >> 12);
783 x86_cpu_def
->stepping
= eax
& 0x0F;
784 x86_cpu_def
->ext_features
= ecx
;
785 x86_cpu_def
->features
= edx
;
787 if (kvm_enabled() && x86_cpu_def
->level
>= 7) {
788 x86_cpu_def
->cpuid_7_0_ebx_features
= kvm_arch_get_supported_cpuid(kvm_state
, 0x7, 0, R_EBX
);
790 x86_cpu_def
->cpuid_7_0_ebx_features
= 0;
793 host_cpuid(0x80000000, 0, &eax
, &ebx
, &ecx
, &edx
);
794 x86_cpu_def
->xlevel
= eax
;
796 host_cpuid(0x80000001, 0, &eax
, &ebx
, &ecx
, &edx
);
797 x86_cpu_def
->ext2_features
= edx
;
798 x86_cpu_def
->ext3_features
= ecx
;
799 cpu_x86_fill_model_id(x86_cpu_def
->model_id
);
800 x86_cpu_def
->vendor_override
= 0;
802 /* Call Centaur's CPUID instruction. */
803 if (x86_cpu_def
->vendor1
== CPUID_VENDOR_VIA_1
&&
804 x86_cpu_def
->vendor2
== CPUID_VENDOR_VIA_2
&&
805 x86_cpu_def
->vendor3
== CPUID_VENDOR_VIA_3
) {
806 host_cpuid(0xC0000000, 0, &eax
, &ebx
, &ecx
, &edx
);
807 if (eax
>= 0xC0000001) {
808 /* Support VIA max extended level */
809 x86_cpu_def
->xlevel2
= eax
;
810 host_cpuid(0xC0000001, 0, &eax
, &ebx
, &ecx
, &edx
);
811 x86_cpu_def
->ext4_features
= edx
;
816 * Every SVM feature requires emulation support in KVM - so we can't just
817 * read the host features here. KVM might even support SVM features not
818 * available on the host hardware. Just set all bits and mask out the
819 * unsupported ones later.
821 x86_cpu_def
->svm_features
= -1;
826 static int unavailable_host_feature(struct model_features_t
*f
, uint32_t mask
)
830 for (i
= 0; i
< 32; ++i
)
832 fprintf(stderr
, "warning: host cpuid %04x_%04x lacks requested"
833 " flag '%s' [0x%08x]\n",
834 f
->cpuid
>> 16, f
->cpuid
& 0xffff,
835 f
->flag_names
[i
] ? f
->flag_names
[i
] : "[reserved]", mask
);
841 /* best effort attempt to inform user requested cpu flags aren't making
842 * their way to the guest. Note: ft[].check_feat ideally should be
843 * specified via a guest_def field to suppress report of extraneous flags.
845 static int check_features_against_host(x86_def_t
*guest_def
)
850 struct model_features_t ft
[] = {
851 {&guest_def
->features
, &host_def
.features
,
852 ~0, feature_name
, 0x00000000},
853 {&guest_def
->ext_features
, &host_def
.ext_features
,
854 ~CPUID_EXT_HYPERVISOR
, ext_feature_name
, 0x00000001},
855 {&guest_def
->ext2_features
, &host_def
.ext2_features
,
856 ~PPRO_FEATURES
, ext2_feature_name
, 0x80000000},
857 {&guest_def
->ext3_features
, &host_def
.ext3_features
,
858 ~CPUID_EXT3_SVM
, ext3_feature_name
, 0x80000001}};
860 cpu_x86_fill_host(&host_def
);
861 for (rv
= 0, i
= 0; i
< ARRAY_SIZE(ft
); ++i
)
862 for (mask
= 1; mask
; mask
<<= 1)
863 if (ft
[i
].check_feat
& mask
&& *ft
[i
].guest_feat
& mask
&&
864 !(*ft
[i
].host_feat
& mask
)) {
865 unavailable_host_feature(&ft
[i
], mask
);
871 static void x86_cpuid_version_get_family(Object
*obj
, Visitor
*v
, void *opaque
,
872 const char *name
, Error
**errp
)
874 X86CPU
*cpu
= X86_CPU(obj
);
875 CPUX86State
*env
= &cpu
->env
;
878 value
= (env
->cpuid_version
>> 8) & 0xf;
880 value
+= (env
->cpuid_version
>> 20) & 0xff;
882 visit_type_int(v
, &value
, name
, errp
);
885 static void x86_cpuid_version_set_family(Object
*obj
, Visitor
*v
, void *opaque
,
886 const char *name
, Error
**errp
)
888 X86CPU
*cpu
= X86_CPU(obj
);
889 CPUX86State
*env
= &cpu
->env
;
890 const int64_t min
= 0;
891 const int64_t max
= 0xff + 0xf;
894 visit_type_int(v
, &value
, name
, errp
);
895 if (error_is_set(errp
)) {
898 if (value
< min
|| value
> max
) {
899 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
900 name
? name
: "null", value
, min
, max
);
904 env
->cpuid_version
&= ~0xff00f00;
906 env
->cpuid_version
|= 0xf00 | ((value
- 0x0f) << 20);
908 env
->cpuid_version
|= value
<< 8;
912 static void x86_cpuid_version_get_model(Object
*obj
, Visitor
*v
, void *opaque
,
913 const char *name
, Error
**errp
)
915 X86CPU
*cpu
= X86_CPU(obj
);
916 CPUX86State
*env
= &cpu
->env
;
919 value
= (env
->cpuid_version
>> 4) & 0xf;
920 value
|= ((env
->cpuid_version
>> 16) & 0xf) << 4;
921 visit_type_int(v
, &value
, name
, errp
);
924 static void x86_cpuid_version_set_model(Object
*obj
, Visitor
*v
, void *opaque
,
925 const char *name
, Error
**errp
)
927 X86CPU
*cpu
= X86_CPU(obj
);
928 CPUX86State
*env
= &cpu
->env
;
929 const int64_t min
= 0;
930 const int64_t max
= 0xff;
933 visit_type_int(v
, &value
, name
, errp
);
934 if (error_is_set(errp
)) {
937 if (value
< min
|| value
> max
) {
938 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
939 name
? name
: "null", value
, min
, max
);
943 env
->cpuid_version
&= ~0xf00f0;
944 env
->cpuid_version
|= ((value
& 0xf) << 4) | ((value
>> 4) << 16);
947 static void x86_cpuid_version_get_stepping(Object
*obj
, Visitor
*v
,
948 void *opaque
, const char *name
,
951 X86CPU
*cpu
= X86_CPU(obj
);
952 CPUX86State
*env
= &cpu
->env
;
955 value
= env
->cpuid_version
& 0xf;
956 visit_type_int(v
, &value
, name
, errp
);
959 static void x86_cpuid_version_set_stepping(Object
*obj
, Visitor
*v
,
960 void *opaque
, const char *name
,
963 X86CPU
*cpu
= X86_CPU(obj
);
964 CPUX86State
*env
= &cpu
->env
;
965 const int64_t min
= 0;
966 const int64_t max
= 0xf;
969 visit_type_int(v
, &value
, name
, errp
);
970 if (error_is_set(errp
)) {
973 if (value
< min
|| value
> max
) {
974 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
975 name
? name
: "null", value
, min
, max
);
979 env
->cpuid_version
&= ~0xf;
980 env
->cpuid_version
|= value
& 0xf;
983 static void x86_cpuid_get_level(Object
*obj
, Visitor
*v
, void *opaque
,
984 const char *name
, Error
**errp
)
986 X86CPU
*cpu
= X86_CPU(obj
);
988 visit_type_uint32(v
, &cpu
->env
.cpuid_level
, name
, errp
);
991 static void x86_cpuid_set_level(Object
*obj
, Visitor
*v
, void *opaque
,
992 const char *name
, Error
**errp
)
994 X86CPU
*cpu
= X86_CPU(obj
);
996 visit_type_uint32(v
, &cpu
->env
.cpuid_level
, name
, errp
);
999 static void x86_cpuid_get_xlevel(Object
*obj
, Visitor
*v
, void *opaque
,
1000 const char *name
, Error
**errp
)
1002 X86CPU
*cpu
= X86_CPU(obj
);
1004 visit_type_uint32(v
, &cpu
->env
.cpuid_xlevel
, name
, errp
);
1007 static void x86_cpuid_set_xlevel(Object
*obj
, Visitor
*v
, void *opaque
,
1008 const char *name
, Error
**errp
)
1010 X86CPU
*cpu
= X86_CPU(obj
);
1012 visit_type_uint32(v
, &cpu
->env
.cpuid_xlevel
, name
, errp
);
1015 static char *x86_cpuid_get_vendor(Object
*obj
, Error
**errp
)
1017 X86CPU
*cpu
= X86_CPU(obj
);
1018 CPUX86State
*env
= &cpu
->env
;
1022 value
= (char *)g_malloc(12 + 1);
1023 for (i
= 0; i
< 4; i
++) {
1024 value
[i
] = env
->cpuid_vendor1
>> (8 * i
);
1025 value
[i
+ 4] = env
->cpuid_vendor2
>> (8 * i
);
1026 value
[i
+ 8] = env
->cpuid_vendor3
>> (8 * i
);
1032 static void x86_cpuid_set_vendor(Object
*obj
, const char *value
,
1035 X86CPU
*cpu
= X86_CPU(obj
);
1036 CPUX86State
*env
= &cpu
->env
;
1039 if (strlen(value
) != 12) {
1040 error_set(errp
, QERR_PROPERTY_VALUE_BAD
, "",
1045 env
->cpuid_vendor1
= 0;
1046 env
->cpuid_vendor2
= 0;
1047 env
->cpuid_vendor3
= 0;
1048 for (i
= 0; i
< 4; i
++) {
1049 env
->cpuid_vendor1
|= ((uint8_t)value
[i
]) << (8 * i
);
1050 env
->cpuid_vendor2
|= ((uint8_t)value
[i
+ 4]) << (8 * i
);
1051 env
->cpuid_vendor3
|= ((uint8_t)value
[i
+ 8]) << (8 * i
);
1053 env
->cpuid_vendor_override
= 1;
1056 static char *x86_cpuid_get_model_id(Object
*obj
, Error
**errp
)
1058 X86CPU
*cpu
= X86_CPU(obj
);
1059 CPUX86State
*env
= &cpu
->env
;
1063 value
= g_malloc(48 + 1);
1064 for (i
= 0; i
< 48; i
++) {
1065 value
[i
] = env
->cpuid_model
[i
>> 2] >> (8 * (i
& 3));
1071 static void x86_cpuid_set_model_id(Object
*obj
, const char *model_id
,
1074 X86CPU
*cpu
= X86_CPU(obj
);
1075 CPUX86State
*env
= &cpu
->env
;
1078 if (model_id
== NULL
) {
1081 len
= strlen(model_id
);
1082 memset(env
->cpuid_model
, 0, 48);
1083 for (i
= 0; i
< 48; i
++) {
1087 c
= (uint8_t)model_id
[i
];
1089 env
->cpuid_model
[i
>> 2] |= c
<< (8 * (i
& 3));
1093 static void x86_cpuid_get_tsc_freq(Object
*obj
, Visitor
*v
, void *opaque
,
1094 const char *name
, Error
**errp
)
1096 X86CPU
*cpu
= X86_CPU(obj
);
1099 value
= cpu
->env
.tsc_khz
* 1000;
1100 visit_type_int(v
, &value
, name
, errp
);
1103 static void x86_cpuid_set_tsc_freq(Object
*obj
, Visitor
*v
, void *opaque
,
1104 const char *name
, Error
**errp
)
1106 X86CPU
*cpu
= X86_CPU(obj
);
1107 const int64_t min
= 0;
1108 const int64_t max
= INT64_MAX
;
1111 visit_type_int(v
, &value
, name
, errp
);
1112 if (error_is_set(errp
)) {
1115 if (value
< min
|| value
> max
) {
1116 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
1117 name
? name
: "null", value
, min
, max
);
1121 cpu
->env
.tsc_khz
= value
/ 1000;
1124 static int cpu_x86_find_by_name(x86_def_t
*x86_cpu_def
, const char *cpu_model
)
1129 char *s
= g_strdup(cpu_model
);
1130 char *featurestr
, *name
= strtok(s
, ",");
1131 /* Features to be added*/
1132 uint32_t plus_features
= 0, plus_ext_features
= 0;
1133 uint32_t plus_ext2_features
= 0, plus_ext3_features
= 0;
1134 uint32_t plus_kvm_features
= kvm_default_features
, plus_svm_features
= 0;
1135 uint32_t plus_7_0_ebx_features
= 0;
1136 /* Features to be removed */
1137 uint32_t minus_features
= 0, minus_ext_features
= 0;
1138 uint32_t minus_ext2_features
= 0, minus_ext3_features
= 0;
1139 uint32_t minus_kvm_features
= 0, minus_svm_features
= 0;
1140 uint32_t minus_7_0_ebx_features
= 0;
1143 for (def
= x86_defs
; def
; def
= def
->next
)
1144 if (name
&& !strcmp(name
, def
->name
))
1146 if (kvm_enabled() && name
&& strcmp(name
, "host") == 0) {
1147 cpu_x86_fill_host(x86_cpu_def
);
1151 memcpy(x86_cpu_def
, def
, sizeof(*def
));
1154 add_flagname_to_bitmaps("hypervisor", &plus_features
,
1155 &plus_ext_features
, &plus_ext2_features
, &plus_ext3_features
,
1156 &plus_kvm_features
, &plus_svm_features
, &plus_7_0_ebx_features
);
1158 featurestr
= strtok(NULL
, ",");
1160 while (featurestr
) {
1162 if (featurestr
[0] == '+') {
1163 add_flagname_to_bitmaps(featurestr
+ 1, &plus_features
,
1164 &plus_ext_features
, &plus_ext2_features
,
1165 &plus_ext3_features
, &plus_kvm_features
,
1166 &plus_svm_features
, &plus_7_0_ebx_features
);
1167 } else if (featurestr
[0] == '-') {
1168 add_flagname_to_bitmaps(featurestr
+ 1, &minus_features
,
1169 &minus_ext_features
, &minus_ext2_features
,
1170 &minus_ext3_features
, &minus_kvm_features
,
1171 &minus_svm_features
, &minus_7_0_ebx_features
);
1172 } else if ((val
= strchr(featurestr
, '='))) {
1174 if (!strcmp(featurestr
, "family")) {
1176 numvalue
= strtoul(val
, &err
, 0);
1177 if (!*val
|| *err
|| numvalue
> 0xff + 0xf) {
1178 fprintf(stderr
, "bad numerical value %s\n", val
);
1181 x86_cpu_def
->family
= numvalue
;
1182 } else if (!strcmp(featurestr
, "model")) {
1184 numvalue
= strtoul(val
, &err
, 0);
1185 if (!*val
|| *err
|| numvalue
> 0xff) {
1186 fprintf(stderr
, "bad numerical value %s\n", val
);
1189 x86_cpu_def
->model
= numvalue
;
1190 } else if (!strcmp(featurestr
, "stepping")) {
1192 numvalue
= strtoul(val
, &err
, 0);
1193 if (!*val
|| *err
|| numvalue
> 0xf) {
1194 fprintf(stderr
, "bad numerical value %s\n", val
);
1197 x86_cpu_def
->stepping
= numvalue
;
1198 } else if (!strcmp(featurestr
, "level")) {
1200 numvalue
= strtoul(val
, &err
, 0);
1201 if (!*val
|| *err
) {
1202 fprintf(stderr
, "bad numerical value %s\n", val
);
1205 x86_cpu_def
->level
= numvalue
;
1206 } else if (!strcmp(featurestr
, "xlevel")) {
1208 numvalue
= strtoul(val
, &err
, 0);
1209 if (!*val
|| *err
) {
1210 fprintf(stderr
, "bad numerical value %s\n", val
);
1213 if (numvalue
< 0x80000000) {
1214 numvalue
+= 0x80000000;
1216 x86_cpu_def
->xlevel
= numvalue
;
1217 } else if (!strcmp(featurestr
, "vendor")) {
1218 if (strlen(val
) != 12) {
1219 fprintf(stderr
, "vendor string must be 12 chars long\n");
1222 x86_cpu_def
->vendor1
= 0;
1223 x86_cpu_def
->vendor2
= 0;
1224 x86_cpu_def
->vendor3
= 0;
1225 for(i
= 0; i
< 4; i
++) {
1226 x86_cpu_def
->vendor1
|= ((uint8_t)val
[i
]) << (8 * i
);
1227 x86_cpu_def
->vendor2
|= ((uint8_t)val
[i
+ 4]) << (8 * i
);
1228 x86_cpu_def
->vendor3
|= ((uint8_t)val
[i
+ 8]) << (8 * i
);
1230 x86_cpu_def
->vendor_override
= 1;
1231 } else if (!strcmp(featurestr
, "model_id")) {
1232 pstrcpy(x86_cpu_def
->model_id
, sizeof(x86_cpu_def
->model_id
),
1234 } else if (!strcmp(featurestr
, "tsc_freq")) {
1238 tsc_freq
= strtosz_suffix_unit(val
, &err
,
1239 STRTOSZ_DEFSUFFIX_B
, 1000);
1240 if (tsc_freq
< 0 || *err
) {
1241 fprintf(stderr
, "bad numerical value %s\n", val
);
1244 x86_cpu_def
->tsc_khz
= tsc_freq
/ 1000;
1245 } else if (!strcmp(featurestr
, "hv_spinlocks")) {
1247 numvalue
= strtoul(val
, &err
, 0);
1248 if (!*val
|| *err
) {
1249 fprintf(stderr
, "bad numerical value %s\n", val
);
1252 hyperv_set_spinlock_retries(numvalue
);
1254 fprintf(stderr
, "unrecognized feature %s\n", featurestr
);
1257 } else if (!strcmp(featurestr
, "check")) {
1259 } else if (!strcmp(featurestr
, "enforce")) {
1260 check_cpuid
= enforce_cpuid
= 1;
1261 } else if (!strcmp(featurestr
, "hv_relaxed")) {
1262 hyperv_enable_relaxed_timing(true);
1263 } else if (!strcmp(featurestr
, "hv_vapic")) {
1264 hyperv_enable_vapic_recommended(true);
1266 fprintf(stderr
, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr
);
1269 featurestr
= strtok(NULL
, ",");
1271 x86_cpu_def
->features
|= plus_features
;
1272 x86_cpu_def
->ext_features
|= plus_ext_features
;
1273 x86_cpu_def
->ext2_features
|= plus_ext2_features
;
1274 x86_cpu_def
->ext3_features
|= plus_ext3_features
;
1275 x86_cpu_def
->kvm_features
|= plus_kvm_features
;
1276 x86_cpu_def
->svm_features
|= plus_svm_features
;
1277 x86_cpu_def
->cpuid_7_0_ebx_features
|= plus_7_0_ebx_features
;
1278 x86_cpu_def
->features
&= ~minus_features
;
1279 x86_cpu_def
->ext_features
&= ~minus_ext_features
;
1280 x86_cpu_def
->ext2_features
&= ~minus_ext2_features
;
1281 x86_cpu_def
->ext3_features
&= ~minus_ext3_features
;
1282 x86_cpu_def
->kvm_features
&= ~minus_kvm_features
;
1283 x86_cpu_def
->svm_features
&= ~minus_svm_features
;
1284 x86_cpu_def
->cpuid_7_0_ebx_features
&= ~minus_7_0_ebx_features
;
1286 if (check_features_against_host(x86_cpu_def
) && enforce_cpuid
)
1289 if (x86_cpu_def
->cpuid_7_0_ebx_features
&& x86_cpu_def
->level
< 7) {
1290 x86_cpu_def
->level
= 7;
1300 /* generate a composite string into buf of all cpuid names in featureset
1301 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1302 * if flags, suppress names undefined in featureset.
1304 static void listflags(char *buf
, int bufsize
, uint32_t fbits
,
1305 const char **featureset
, uint32_t flags
)
1307 const char **p
= &featureset
[31];
1311 b
= 4 <= bufsize
? buf
+ (bufsize
-= 3) - 1 : NULL
;
1313 for (q
= buf
, bit
= 31; fbits
&& bufsize
; --p
, fbits
&= ~(1 << bit
), --bit
)
1314 if (fbits
& 1 << bit
&& (*p
|| !flags
)) {
1316 nc
= snprintf(q
, bufsize
, "%s%s", q
== buf
? "" : " ", *p
);
1318 nc
= snprintf(q
, bufsize
, "%s[%d]", q
== buf
? "" : " ", bit
);
1319 if (bufsize
<= nc
) {
1321 memcpy(b
, "...", sizeof("..."));
1330 /* generate CPU information. */
1331 void x86_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
1336 for (def
= x86_defs
; def
; def
= def
->next
) {
1337 snprintf(buf
, sizeof(buf
), "%s", def
->name
);
1338 (*cpu_fprintf
)(f
, "x86 %16s %-48s\n", buf
, def
->model_id
);
1340 if (kvm_enabled()) {
1341 (*cpu_fprintf
)(f
, "x86 %16s\n", "[host]");
1343 (*cpu_fprintf
)(f
, "\nRecognized CPUID flags:\n");
1344 listflags(buf
, sizeof(buf
), (uint32_t)~0, feature_name
, 1);
1345 (*cpu_fprintf
)(f
, " %s\n", buf
);
1346 listflags(buf
, sizeof(buf
), (uint32_t)~0, ext_feature_name
, 1);
1347 (*cpu_fprintf
)(f
, " %s\n", buf
);
1348 listflags(buf
, sizeof(buf
), (uint32_t)~0, ext2_feature_name
, 1);
1349 (*cpu_fprintf
)(f
, " %s\n", buf
);
1350 listflags(buf
, sizeof(buf
), (uint32_t)~0, ext3_feature_name
, 1);
1351 (*cpu_fprintf
)(f
, " %s\n", buf
);
1354 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
1356 CpuDefinitionInfoList
*cpu_list
= NULL
;
1359 for (def
= x86_defs
; def
; def
= def
->next
) {
1360 CpuDefinitionInfoList
*entry
;
1361 CpuDefinitionInfo
*info
;
1363 info
= g_malloc0(sizeof(*info
));
1364 info
->name
= g_strdup(def
->name
);
1366 entry
= g_malloc0(sizeof(*entry
));
1367 entry
->value
= info
;
1368 entry
->next
= cpu_list
;
1376 static void filter_features_for_kvm(X86CPU
*cpu
)
1378 CPUX86State
*env
= &cpu
->env
;
1379 KVMState
*s
= kvm_state
;
1381 env
->cpuid_features
&=
1382 kvm_arch_get_supported_cpuid(s
, 1, 0, R_EDX
);
1383 env
->cpuid_ext_features
&=
1384 kvm_arch_get_supported_cpuid(s
, 1, 0, R_ECX
);
1385 env
->cpuid_ext2_features
&=
1386 kvm_arch_get_supported_cpuid(s
, 0x80000001, 0, R_EDX
);
1387 env
->cpuid_ext3_features
&=
1388 kvm_arch_get_supported_cpuid(s
, 0x80000001, 0, R_ECX
);
1389 env
->cpuid_svm_features
&=
1390 kvm_arch_get_supported_cpuid(s
, 0x8000000A, 0, R_EDX
);
1391 env
->cpuid_7_0_ebx_features
&=
1392 kvm_arch_get_supported_cpuid(s
, 7, 0, R_EBX
);
1393 env
->cpuid_kvm_features
&=
1394 kvm_arch_get_supported_cpuid(s
, KVM_CPUID_FEATURES
, 0, R_EAX
);
1395 env
->cpuid_ext4_features
&=
1396 kvm_arch_get_supported_cpuid(s
, 0xC0000001, 0, R_EDX
);
1401 int cpu_x86_register(X86CPU
*cpu
, const char *cpu_model
)
1403 CPUX86State
*env
= &cpu
->env
;
1404 x86_def_t def1
, *def
= &def1
;
1405 Error
*error
= NULL
;
1407 memset(def
, 0, sizeof(*def
));
1409 if (cpu_x86_find_by_name(def
, cpu_model
) < 0)
1412 env
->cpuid_vendor1
= def
->vendor1
;
1413 env
->cpuid_vendor2
= def
->vendor2
;
1414 env
->cpuid_vendor3
= def
->vendor3
;
1416 env
->cpuid_vendor1
= CPUID_VENDOR_INTEL_1
;
1417 env
->cpuid_vendor2
= CPUID_VENDOR_INTEL_2
;
1418 env
->cpuid_vendor3
= CPUID_VENDOR_INTEL_3
;
1420 env
->cpuid_vendor_override
= def
->vendor_override
;
1421 object_property_set_int(OBJECT(cpu
), def
->level
, "level", &error
);
1422 object_property_set_int(OBJECT(cpu
), def
->family
, "family", &error
);
1423 object_property_set_int(OBJECT(cpu
), def
->model
, "model", &error
);
1424 object_property_set_int(OBJECT(cpu
), def
->stepping
, "stepping", &error
);
1425 env
->cpuid_features
= def
->features
;
1426 env
->cpuid_ext_features
= def
->ext_features
;
1427 env
->cpuid_ext2_features
= def
->ext2_features
;
1428 env
->cpuid_ext3_features
= def
->ext3_features
;
1429 object_property_set_int(OBJECT(cpu
), def
->xlevel
, "xlevel", &error
);
1430 env
->cpuid_kvm_features
= def
->kvm_features
;
1431 env
->cpuid_svm_features
= def
->svm_features
;
1432 env
->cpuid_ext4_features
= def
->ext4_features
;
1433 env
->cpuid_7_0_ebx_features
= def
->cpuid_7_0_ebx_features
;
1434 env
->cpuid_xlevel2
= def
->xlevel2
;
1435 object_property_set_int(OBJECT(cpu
), (int64_t)def
->tsc_khz
* 1000,
1436 "tsc-frequency", &error
);
1438 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
1441 if (env
->cpuid_vendor1
== CPUID_VENDOR_AMD_1
&&
1442 env
->cpuid_vendor2
== CPUID_VENDOR_AMD_2
&&
1443 env
->cpuid_vendor3
== CPUID_VENDOR_AMD_3
) {
1444 env
->cpuid_ext2_features
&= ~CPUID_EXT2_AMD_ALIASES
;
1445 env
->cpuid_ext2_features
|= (def
->features
& CPUID_EXT2_AMD_ALIASES
);
1448 if (!kvm_enabled()) {
1449 env
->cpuid_features
&= TCG_FEATURES
;
1450 env
->cpuid_ext_features
&= TCG_EXT_FEATURES
;
1451 env
->cpuid_ext2_features
&= (TCG_EXT2_FEATURES
1452 #ifdef TARGET_X86_64
1453 | CPUID_EXT2_SYSCALL
| CPUID_EXT2_LM
1456 env
->cpuid_ext3_features
&= TCG_EXT3_FEATURES
;
1457 env
->cpuid_svm_features
&= TCG_SVM_FEATURES
;
1460 filter_features_for_kvm(cpu
);
1463 object_property_set_str(OBJECT(cpu
), def
->model_id
, "model-id", &error
);
1464 if (error_is_set(&error
)) {
1471 #if !defined(CONFIG_USER_ONLY)
1473 void cpu_clear_apic_feature(CPUX86State
*env
)
1475 env
->cpuid_features
&= ~CPUID_APIC
;
1478 #endif /* !CONFIG_USER_ONLY */
1480 /* Initialize list of CPU models, filling some non-static fields if necessary
1482 void x86_cpudef_setup(void)
1485 static const char *model_with_versions
[] = { "qemu32", "qemu64", "athlon" };
1487 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); ++i
) {
1488 x86_def_t
*def
= &builtin_x86_defs
[i
];
1489 def
->next
= x86_defs
;
1491 /* Look for specific "cpudef" models that */
1492 /* have the QEMU version in .model_id */
1493 for (j
= 0; j
< ARRAY_SIZE(model_with_versions
); j
++) {
1494 if (strcmp(model_with_versions
[j
], def
->name
) == 0) {
1495 pstrcpy(def
->model_id
, sizeof(def
->model_id
),
1496 "QEMU Virtual CPU version ");
1497 pstrcat(def
->model_id
, sizeof(def
->model_id
),
1498 qemu_get_version());
1507 static void get_cpuid_vendor(CPUX86State
*env
, uint32_t *ebx
,
1508 uint32_t *ecx
, uint32_t *edx
)
1510 *ebx
= env
->cpuid_vendor1
;
1511 *edx
= env
->cpuid_vendor2
;
1512 *ecx
= env
->cpuid_vendor3
;
1514 /* sysenter isn't supported on compatibility mode on AMD, syscall
1515 * isn't supported in compatibility mode on Intel.
1516 * Normally we advertise the actual cpu vendor, but you can override
1517 * this if you want to use KVM's sysenter/syscall emulation
1518 * in compatibility mode and when doing cross vendor migration
1520 if (kvm_enabled() && ! env
->cpuid_vendor_override
) {
1521 host_cpuid(0, 0, NULL
, ebx
, ecx
, edx
);
1525 void cpu_x86_cpuid(CPUX86State
*env
, uint32_t index
, uint32_t count
,
1526 uint32_t *eax
, uint32_t *ebx
,
1527 uint32_t *ecx
, uint32_t *edx
)
1529 /* test if maximum index reached */
1530 if (index
& 0x80000000) {
1531 if (index
> env
->cpuid_xlevel
) {
1532 if (env
->cpuid_xlevel2
> 0) {
1533 /* Handle the Centaur's CPUID instruction. */
1534 if (index
> env
->cpuid_xlevel2
) {
1535 index
= env
->cpuid_xlevel2
;
1536 } else if (index
< 0xC0000000) {
1537 index
= env
->cpuid_xlevel
;
1540 index
= env
->cpuid_xlevel
;
1544 if (index
> env
->cpuid_level
)
1545 index
= env
->cpuid_level
;
1550 *eax
= env
->cpuid_level
;
1551 get_cpuid_vendor(env
, ebx
, ecx
, edx
);
1554 *eax
= env
->cpuid_version
;
1555 *ebx
= (env
->cpuid_apic_id
<< 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1556 *ecx
= env
->cpuid_ext_features
;
1557 *edx
= env
->cpuid_features
;
1558 if (env
->nr_cores
* env
->nr_threads
> 1) {
1559 *ebx
|= (env
->nr_cores
* env
->nr_threads
) << 16;
1560 *edx
|= 1 << 28; /* HTT bit */
1564 /* cache info: needed for Pentium Pro compatibility */
1571 /* cache info: needed for Core compatibility */
1572 if (env
->nr_cores
> 1) {
1573 *eax
= (env
->nr_cores
- 1) << 26;
1578 case 0: /* L1 dcache info */
1584 case 1: /* L1 icache info */
1590 case 2: /* L2 cache info */
1592 if (env
->nr_threads
> 1) {
1593 *eax
|= (env
->nr_threads
- 1) << 14;
1599 default: /* end of info */
1608 /* mwait info: needed for Core compatibility */
1609 *eax
= 0; /* Smallest monitor-line size in bytes */
1610 *ebx
= 0; /* Largest monitor-line size in bytes */
1611 *ecx
= CPUID_MWAIT_EMX
| CPUID_MWAIT_IBE
;
1615 /* Thermal and Power Leaf */
1622 /* Structured Extended Feature Flags Enumeration Leaf */
1624 *eax
= 0; /* Maximum ECX value for sub-leaves */
1625 *ebx
= env
->cpuid_7_0_ebx_features
; /* Feature flags */
1626 *ecx
= 0; /* Reserved */
1627 *edx
= 0; /* Reserved */
1636 /* Direct Cache Access Information Leaf */
1637 *eax
= 0; /* Bits 0-31 in DCA_CAP MSR */
1643 /* Architectural Performance Monitoring Leaf */
1644 if (kvm_enabled()) {
1645 KVMState
*s
= env
->kvm_state
;
1647 *eax
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EAX
);
1648 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EBX
);
1649 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_ECX
);
1650 *edx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EDX
);
1659 /* Processor Extended State */
1660 if (!(env
->cpuid_ext_features
& CPUID_EXT_XSAVE
)) {
1667 if (kvm_enabled()) {
1668 KVMState
*s
= env
->kvm_state
;
1670 *eax
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_EAX
);
1671 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_EBX
);
1672 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_ECX
);
1673 *edx
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_EDX
);
1682 *eax
= env
->cpuid_xlevel
;
1683 *ebx
= env
->cpuid_vendor1
;
1684 *edx
= env
->cpuid_vendor2
;
1685 *ecx
= env
->cpuid_vendor3
;
1688 *eax
= env
->cpuid_version
;
1690 *ecx
= env
->cpuid_ext3_features
;
1691 *edx
= env
->cpuid_ext2_features
;
1693 /* The Linux kernel checks for the CMPLegacy bit and
1694 * discards multiple thread information if it is set.
1695 * So dont set it here for Intel to make Linux guests happy.
1697 if (env
->nr_cores
* env
->nr_threads
> 1) {
1698 uint32_t tebx
, tecx
, tedx
;
1699 get_cpuid_vendor(env
, &tebx
, &tecx
, &tedx
);
1700 if (tebx
!= CPUID_VENDOR_INTEL_1
||
1701 tedx
!= CPUID_VENDOR_INTEL_2
||
1702 tecx
!= CPUID_VENDOR_INTEL_3
) {
1703 *ecx
|= 1 << 1; /* CmpLegacy bit */
1710 *eax
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
1711 *ebx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
1712 *ecx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
1713 *edx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
1716 /* cache info (L1 cache) */
1723 /* cache info (L2 cache) */
1730 /* virtual & phys address size in low 2 bytes. */
1731 /* XXX: This value must match the one used in the MMU code. */
1732 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
) {
1733 /* 64 bit processor */
1734 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1735 *eax
= 0x00003028; /* 48 bits virtual, 40 bits physical */
1737 if (env
->cpuid_features
& CPUID_PSE36
)
1738 *eax
= 0x00000024; /* 36 bits physical */
1740 *eax
= 0x00000020; /* 32 bits physical */
1745 if (env
->nr_cores
* env
->nr_threads
> 1) {
1746 *ecx
|= (env
->nr_cores
* env
->nr_threads
) - 1;
1750 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
) {
1751 *eax
= 0x00000001; /* SVM Revision */
1752 *ebx
= 0x00000010; /* nr of ASIDs */
1754 *edx
= env
->cpuid_svm_features
; /* optional features */
1763 *eax
= env
->cpuid_xlevel2
;
1769 /* Support for VIA CPU's CPUID instruction */
1770 *eax
= env
->cpuid_version
;
1773 *edx
= env
->cpuid_ext4_features
;
1778 /* Reserved for the future, and now filled with zero */
1785 /* reserved values: zero */
1794 /* CPUClass::reset() */
1795 static void x86_cpu_reset(CPUState
*s
)
1797 X86CPU
*cpu
= X86_CPU(s
);
1798 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(cpu
);
1799 CPUX86State
*env
= &cpu
->env
;
1802 if (qemu_loglevel_mask(CPU_LOG_RESET
)) {
1803 qemu_log("CPU Reset (CPU %d)\n", env
->cpu_index
);
1804 log_cpu_state(env
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
1807 xcc
->parent_reset(s
);
1810 memset(env
, 0, offsetof(CPUX86State
, breakpoints
));
1814 env
->old_exception
= -1;
1816 /* init to reset state */
1818 #ifdef CONFIG_SOFTMMU
1819 env
->hflags
|= HF_SOFTMMU_MASK
;
1821 env
->hflags2
|= HF2_GIF_MASK
;
1823 cpu_x86_update_cr0(env
, 0x60000010);
1824 env
->a20_mask
= ~0x0;
1825 env
->smbase
= 0x30000;
1827 env
->idt
.limit
= 0xffff;
1828 env
->gdt
.limit
= 0xffff;
1829 env
->ldt
.limit
= 0xffff;
1830 env
->ldt
.flags
= DESC_P_MASK
| (2 << DESC_TYPE_SHIFT
);
1831 env
->tr
.limit
= 0xffff;
1832 env
->tr
.flags
= DESC_P_MASK
| (11 << DESC_TYPE_SHIFT
);
1834 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff,
1835 DESC_P_MASK
| DESC_S_MASK
| DESC_CS_MASK
|
1836 DESC_R_MASK
| DESC_A_MASK
);
1837 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff,
1838 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1840 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff,
1841 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1843 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff,
1844 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1846 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff,
1847 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1849 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff,
1850 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1854 env
->regs
[R_EDX
] = env
->cpuid_version
;
1859 for (i
= 0; i
< 8; i
++) {
1864 env
->mxcsr
= 0x1f80;
1866 env
->pat
= 0x0007040600070406ULL
;
1867 env
->msr_ia32_misc_enable
= MSR_IA32_MISC_ENABLE_DEFAULT
;
1869 memset(env
->dr
, 0, sizeof(env
->dr
));
1870 env
->dr
[6] = DR6_FIXED_1
;
1871 env
->dr
[7] = DR7_FIXED_1
;
1872 cpu_breakpoint_remove_all(env
, BP_CPU
);
1873 cpu_watchpoint_remove_all(env
, BP_CPU
);
1875 #if !defined(CONFIG_USER_ONLY)
1876 /* We hard-wire the BSP to the first CPU. */
1877 if (env
->cpu_index
== 0) {
1878 apic_designate_bsp(env
->apic_state
);
1881 env
->halted
= !cpu_is_bsp(cpu
);
1885 #ifndef CONFIG_USER_ONLY
1886 bool cpu_is_bsp(X86CPU
*cpu
)
1888 return cpu_get_apic_base(cpu
->env
.apic_state
) & MSR_IA32_APICBASE_BSP
;
1891 /* TODO: remove me, when reset over QOM tree is implemented */
1892 static void x86_cpu_machine_reset_cb(void *opaque
)
1894 X86CPU
*cpu
= opaque
;
1895 cpu_reset(CPU(cpu
));
1899 static void mce_init(X86CPU
*cpu
)
1901 CPUX86State
*cenv
= &cpu
->env
;
1904 if (((cenv
->cpuid_version
>> 8) & 0xf) >= 6
1905 && (cenv
->cpuid_features
& (CPUID_MCE
| CPUID_MCA
)) ==
1906 (CPUID_MCE
| CPUID_MCA
)) {
1907 cenv
->mcg_cap
= MCE_CAP_DEF
| MCE_BANKS_DEF
;
1908 cenv
->mcg_ctl
= ~(uint64_t)0;
1909 for (bank
= 0; bank
< MCE_BANKS_DEF
; bank
++) {
1910 cenv
->mce_banks
[bank
* 4] = ~(uint64_t)0;
1915 void x86_cpu_realize(Object
*obj
, Error
**errp
)
1917 X86CPU
*cpu
= X86_CPU(obj
);
1919 #ifndef CONFIG_USER_ONLY
1920 qemu_register_reset(x86_cpu_machine_reset_cb
, cpu
);
1924 qemu_init_vcpu(&cpu
->env
);
1925 cpu_reset(CPU(cpu
));
1928 static void x86_cpu_initfn(Object
*obj
)
1930 X86CPU
*cpu
= X86_CPU(obj
);
1931 CPUX86State
*env
= &cpu
->env
;
1936 object_property_add(obj
, "family", "int",
1937 x86_cpuid_version_get_family
,
1938 x86_cpuid_version_set_family
, NULL
, NULL
, NULL
);
1939 object_property_add(obj
, "model", "int",
1940 x86_cpuid_version_get_model
,
1941 x86_cpuid_version_set_model
, NULL
, NULL
, NULL
);
1942 object_property_add(obj
, "stepping", "int",
1943 x86_cpuid_version_get_stepping
,
1944 x86_cpuid_version_set_stepping
, NULL
, NULL
, NULL
);
1945 object_property_add(obj
, "level", "int",
1946 x86_cpuid_get_level
,
1947 x86_cpuid_set_level
, NULL
, NULL
, NULL
);
1948 object_property_add(obj
, "xlevel", "int",
1949 x86_cpuid_get_xlevel
,
1950 x86_cpuid_set_xlevel
, NULL
, NULL
, NULL
);
1951 object_property_add_str(obj
, "vendor",
1952 x86_cpuid_get_vendor
,
1953 x86_cpuid_set_vendor
, NULL
);
1954 object_property_add_str(obj
, "model-id",
1955 x86_cpuid_get_model_id
,
1956 x86_cpuid_set_model_id
, NULL
);
1957 object_property_add(obj
, "tsc-frequency", "int",
1958 x86_cpuid_get_tsc_freq
,
1959 x86_cpuid_set_tsc_freq
, NULL
, NULL
, NULL
);
1961 env
->cpuid_apic_id
= env
->cpu_index
;
1963 /* init various static tables used in TCG mode */
1964 if (tcg_enabled() && !inited
) {
1966 optimize_flags_init();
1967 #ifndef CONFIG_USER_ONLY
1968 cpu_set_debug_excp_handler(breakpoint_handler
);
1973 static void x86_cpu_common_class_init(ObjectClass
*oc
, void *data
)
1975 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
1976 CPUClass
*cc
= CPU_CLASS(oc
);
1978 xcc
->parent_reset
= cc
->reset
;
1979 cc
->reset
= x86_cpu_reset
;
1982 static const TypeInfo x86_cpu_type_info
= {
1983 .name
= TYPE_X86_CPU
,
1985 .instance_size
= sizeof(X86CPU
),
1986 .instance_init
= x86_cpu_initfn
,
1988 .class_size
= sizeof(X86CPUClass
),
1989 .class_init
= x86_cpu_common_class_init
,
1992 static void x86_cpu_register_types(void)
1994 type_register_static(&x86_cpu_type_info
);
1997 type_init(x86_cpu_register_types
)