2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "qemu-option.h"
28 #include "qemu-config.h"
30 #include "qapi/qapi-visit-core.h"
36 /* feature flags taken from "Intel Processor Identification and the CPUID
37 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
38 * between feature naming conventions, aliases may be added.
40 static const char *feature_name
[] = {
41 "fpu", "vme", "de", "pse",
42 "tsc", "msr", "pae", "mce",
43 "cx8", "apic", NULL
, "sep",
44 "mtrr", "pge", "mca", "cmov",
45 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
46 NULL
, "ds" /* Intel dts */, "acpi", "mmx",
47 "fxsr", "sse", "sse2", "ss",
48 "ht" /* Intel htt */, "tm", "ia64", "pbe",
50 static const char *ext_feature_name
[] = {
51 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
52 "ds_cpl", "vmx", "smx", "est",
53 "tm2", "ssse3", "cid", NULL
,
54 "fma", "cx16", "xtpr", "pdcm",
55 NULL
, "pcid", "dca", "sse4.1|sse4_1",
56 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
57 "tsc-deadline", "aes", "xsave", "osxsave",
58 "avx", NULL
, NULL
, "hypervisor",
60 static const char *ext2_feature_name
[] = {
61 "fpu", "vme", "de", "pse",
62 "tsc", "msr", "pae", "mce",
63 "cx8" /* AMD CMPXCHG8B */, "apic", NULL
, "syscall",
64 "mtrr", "pge", "mca", "cmov",
65 "pat", "pse36", NULL
, NULL
/* Linux mp */,
66 "nx|xd", NULL
, "mmxext", "mmx",
67 "fxsr", "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
68 NULL
, "lm|i64", "3dnowext", "3dnow",
70 static const char *ext3_feature_name
[] = {
71 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
72 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
73 "3dnowprefetch", "osvw", "ibs", "xop",
74 "skinit", "wdt", NULL
, NULL
,
75 "fma4", NULL
, "cvt16", "nodeid_msr",
76 NULL
, NULL
, NULL
, NULL
,
77 NULL
, NULL
, NULL
, NULL
,
78 NULL
, NULL
, NULL
, NULL
,
81 static const char *kvm_feature_name
[] = {
82 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", "kvm_asyncpf", NULL
, "kvm_pv_eoi", NULL
,
83 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
84 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
85 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
88 static const char *svm_feature_name
[] = {
89 "npt", "lbrv", "svm_lock", "nrip_save",
90 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
91 NULL
, NULL
, "pause_filter", NULL
,
92 "pfthreshold", NULL
, NULL
, NULL
,
93 NULL
, NULL
, NULL
, NULL
,
94 NULL
, NULL
, NULL
, NULL
,
95 NULL
, NULL
, NULL
, NULL
,
96 NULL
, NULL
, NULL
, NULL
,
99 /* collects per-function cpuid data
101 typedef struct model_features_t
{
102 uint32_t *guest_feat
;
105 const char **flag_names
;
110 int enforce_cpuid
= 0;
112 void host_cpuid(uint32_t function
, uint32_t count
,
113 uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
115 #if defined(CONFIG_KVM)
120 : "=a"(vec
[0]), "=b"(vec
[1]),
121 "=c"(vec
[2]), "=d"(vec
[3])
122 : "0"(function
), "c"(count
) : "cc");
124 asm volatile("pusha \n\t"
126 "mov %%eax, 0(%2) \n\t"
127 "mov %%ebx, 4(%2) \n\t"
128 "mov %%ecx, 8(%2) \n\t"
129 "mov %%edx, 12(%2) \n\t"
131 : : "a"(function
), "c"(count
), "S"(vec
)
146 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
148 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
149 * a substring. ex if !NULL points to the first char after a substring,
150 * otherwise the string is assumed to sized by a terminating nul.
151 * Return lexical ordering of *s1:*s2.
153 static int sstrcmp(const char *s1
, const char *e1
, const char *s2
,
157 if (!*s1
|| !*s2
|| *s1
!= *s2
)
160 if (s1
== e1
&& s2
== e2
)
169 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
170 * '|' delimited (possibly empty) strings in which case search for a match
171 * within the alternatives proceeds left to right. Return 0 for success,
172 * non-zero otherwise.
174 static int altcmp(const char *s
, const char *e
, const char *altstr
)
178 for (q
= p
= altstr
; ; ) {
179 while (*p
&& *p
!= '|')
181 if ((q
== p
&& !*s
) || (q
!= p
&& !sstrcmp(s
, e
, q
, p
)))
190 /* search featureset for flag *[s..e), if found set corresponding bit in
191 * *pval and return true, otherwise return false
193 static bool lookup_feature(uint32_t *pval
, const char *s
, const char *e
,
194 const char **featureset
)
200 for (mask
= 1, ppc
= featureset
; mask
; mask
<<= 1, ++ppc
) {
201 if (*ppc
&& !altcmp(s
, e
, *ppc
)) {
209 static void add_flagname_to_bitmaps(const char *flagname
, uint32_t *features
,
210 uint32_t *ext_features
,
211 uint32_t *ext2_features
,
212 uint32_t *ext3_features
,
213 uint32_t *kvm_features
,
214 uint32_t *svm_features
)
216 if (!lookup_feature(features
, flagname
, NULL
, feature_name
) &&
217 !lookup_feature(ext_features
, flagname
, NULL
, ext_feature_name
) &&
218 !lookup_feature(ext2_features
, flagname
, NULL
, ext2_feature_name
) &&
219 !lookup_feature(ext3_features
, flagname
, NULL
, ext3_feature_name
) &&
220 !lookup_feature(kvm_features
, flagname
, NULL
, kvm_feature_name
) &&
221 !lookup_feature(svm_features
, flagname
, NULL
, svm_feature_name
))
222 fprintf(stderr
, "CPU feature %s not found\n", flagname
);
225 typedef struct x86_def_t
{
226 struct x86_def_t
*next
;
229 uint32_t vendor1
, vendor2
, vendor3
;
234 uint32_t features
, ext_features
, ext2_features
, ext3_features
;
235 uint32_t kvm_features
, svm_features
;
240 /* Store the results of Centaur's CPUID instructions */
241 uint32_t ext4_features
;
243 /* The feature bits on CPUID[EAX=7,ECX=0].EBX */
244 uint32_t cpuid_7_0_ebx_features
;
247 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
248 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
249 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
250 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
251 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
252 CPUID_PSE36 | CPUID_FXSR)
253 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
254 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
255 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
256 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
257 CPUID_PAE | CPUID_SEP | CPUID_APIC)
258 #define EXT2_FEATURE_MASK 0x0183F3FF
260 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
261 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
262 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
263 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
264 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
265 /* partly implemented:
266 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64)
267 CPUID_PSE36 (needed for Solaris) */
269 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
270 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | \
271 CPUID_EXT_CX16 | CPUID_EXT_POPCNT | \
272 CPUID_EXT_HYPERVISOR)
274 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
275 CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_XSAVE */
276 #define TCG_EXT2_FEATURES ((TCG_FEATURES & EXT2_FEATURE_MASK) | \
277 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
278 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT)
280 CPUID_EXT2_PDPE1GB */
281 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
282 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
283 #define TCG_SVM_FEATURES 0
285 /* maintains list of cpu model definitions
287 static x86_def_t
*x86_defs
= {NULL
};
289 /* built-in cpu model definitions (deprecated)
291 static x86_def_t builtin_x86_defs
[] = {
295 .vendor1
= CPUID_VENDOR_AMD_1
,
296 .vendor2
= CPUID_VENDOR_AMD_2
,
297 .vendor3
= CPUID_VENDOR_AMD_3
,
301 .features
= PPRO_FEATURES
|
302 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
304 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_CX16
| CPUID_EXT_POPCNT
,
305 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) |
306 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
307 .ext3_features
= CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
308 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
309 .xlevel
= 0x8000000A,
314 .vendor1
= CPUID_VENDOR_AMD_1
,
315 .vendor2
= CPUID_VENDOR_AMD_2
,
316 .vendor3
= CPUID_VENDOR_AMD_3
,
320 .features
= PPRO_FEATURES
|
321 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
322 CPUID_PSE36
| CPUID_VME
| CPUID_HT
,
323 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_CX16
|
325 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) |
326 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
|
327 CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
| CPUID_EXT2_MMXEXT
|
328 CPUID_EXT2_FFXSR
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
,
329 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
331 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
332 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
333 .ext3_features
= CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
334 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
335 .svm_features
= CPUID_SVM_NPT
| CPUID_SVM_LBRV
,
336 .xlevel
= 0x8000001A,
337 .model_id
= "AMD Phenom(tm) 9550 Quad-Core Processor"
345 .features
= PPRO_FEATURES
|
346 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
347 CPUID_PSE36
| CPUID_VME
| CPUID_DTS
| CPUID_ACPI
| CPUID_SS
|
348 CPUID_HT
| CPUID_TM
| CPUID_PBE
,
349 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
350 CPUID_EXT_DTES64
| CPUID_EXT_DSCPL
| CPUID_EXT_VMX
| CPUID_EXT_EST
|
351 CPUID_EXT_TM2
| CPUID_EXT_CX16
| CPUID_EXT_XTPR
| CPUID_EXT_PDCM
,
352 .ext2_features
= CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
353 .ext3_features
= CPUID_EXT3_LAHF_LM
,
354 .xlevel
= 0x80000008,
355 .model_id
= "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
360 .vendor1
= CPUID_VENDOR_INTEL_1
,
361 .vendor2
= CPUID_VENDOR_INTEL_2
,
362 .vendor3
= CPUID_VENDOR_INTEL_3
,
366 /* Missing: CPUID_VME, CPUID_HT */
367 .features
= PPRO_FEATURES
|
368 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
370 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
371 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
372 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
373 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) |
374 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
375 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
376 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
377 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
378 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
380 .xlevel
= 0x80000008,
381 .model_id
= "Common KVM processor"
389 .features
= PPRO_FEATURES
,
390 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_POPCNT
,
391 .xlevel
= 0x80000004,
399 .features
= PPRO_FEATURES
|
400 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_PSE36
,
401 .ext_features
= CPUID_EXT_SSE3
,
402 .ext2_features
= PPRO_FEATURES
& EXT2_FEATURE_MASK
,
404 .xlevel
= 0x80000008,
405 .model_id
= "Common 32-bit KVM processor"
413 .features
= PPRO_FEATURES
| CPUID_VME
|
414 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_DTS
| CPUID_ACPI
|
415 CPUID_SS
| CPUID_HT
| CPUID_TM
| CPUID_PBE
,
416 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_VMX
|
417 CPUID_EXT_EST
| CPUID_EXT_TM2
| CPUID_EXT_XTPR
| CPUID_EXT_PDCM
,
418 .ext2_features
= CPUID_EXT2_NX
,
419 .xlevel
= 0x80000008,
420 .model_id
= "Genuine Intel(R) CPU T2600 @ 2.16GHz",
428 .features
= I486_FEATURES
,
437 .features
= PENTIUM_FEATURES
,
446 .features
= PENTIUM2_FEATURES
,
455 .features
= PENTIUM3_FEATURES
,
461 .vendor1
= CPUID_VENDOR_AMD_1
,
462 .vendor2
= CPUID_VENDOR_AMD_2
,
463 .vendor3
= CPUID_VENDOR_AMD_3
,
467 .features
= PPRO_FEATURES
| CPUID_PSE36
| CPUID_VME
| CPUID_MTRR
| CPUID_MCA
,
468 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) | CPUID_EXT2_MMXEXT
| CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
,
469 .xlevel
= 0x80000008,
473 /* original is on level 10 */
478 .features
= PPRO_FEATURES
|
479 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_VME
| CPUID_DTS
|
480 CPUID_ACPI
| CPUID_SS
| CPUID_HT
| CPUID_TM
| CPUID_PBE
,
481 /* Some CPUs got no CPUID_SEP */
482 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
483 CPUID_EXT_DSCPL
| CPUID_EXT_EST
| CPUID_EXT_TM2
| CPUID_EXT_XTPR
,
484 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) | CPUID_EXT2_NX
,
485 .ext3_features
= CPUID_EXT3_LAHF_LM
,
486 .xlevel
= 0x8000000A,
487 .model_id
= "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
491 static int cpu_x86_fill_model_id(char *str
)
493 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
496 for (i
= 0; i
< 3; i
++) {
497 host_cpuid(0x80000002 + i
, 0, &eax
, &ebx
, &ecx
, &edx
);
498 memcpy(str
+ i
* 16 + 0, &eax
, 4);
499 memcpy(str
+ i
* 16 + 4, &ebx
, 4);
500 memcpy(str
+ i
* 16 + 8, &ecx
, 4);
501 memcpy(str
+ i
* 16 + 12, &edx
, 4);
506 static int cpu_x86_fill_host(x86_def_t
*x86_cpu_def
)
508 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
510 x86_cpu_def
->name
= "host";
511 host_cpuid(0x0, 0, &eax
, &ebx
, &ecx
, &edx
);
512 x86_cpu_def
->level
= eax
;
513 x86_cpu_def
->vendor1
= ebx
;
514 x86_cpu_def
->vendor2
= edx
;
515 x86_cpu_def
->vendor3
= ecx
;
517 host_cpuid(0x1, 0, &eax
, &ebx
, &ecx
, &edx
);
518 x86_cpu_def
->family
= ((eax
>> 8) & 0x0F) + ((eax
>> 20) & 0xFF);
519 x86_cpu_def
->model
= ((eax
>> 4) & 0x0F) | ((eax
& 0xF0000) >> 12);
520 x86_cpu_def
->stepping
= eax
& 0x0F;
521 x86_cpu_def
->ext_features
= ecx
;
522 x86_cpu_def
->features
= edx
;
524 if (kvm_enabled() && x86_cpu_def
->level
>= 7) {
525 x86_cpu_def
->cpuid_7_0_ebx_features
= kvm_arch_get_supported_cpuid(kvm_state
, 0x7, 0, R_EBX
);
527 x86_cpu_def
->cpuid_7_0_ebx_features
= 0;
530 host_cpuid(0x80000000, 0, &eax
, &ebx
, &ecx
, &edx
);
531 x86_cpu_def
->xlevel
= eax
;
533 host_cpuid(0x80000001, 0, &eax
, &ebx
, &ecx
, &edx
);
534 x86_cpu_def
->ext2_features
= edx
;
535 x86_cpu_def
->ext3_features
= ecx
;
536 cpu_x86_fill_model_id(x86_cpu_def
->model_id
);
537 x86_cpu_def
->vendor_override
= 0;
539 /* Call Centaur's CPUID instruction. */
540 if (x86_cpu_def
->vendor1
== CPUID_VENDOR_VIA_1
&&
541 x86_cpu_def
->vendor2
== CPUID_VENDOR_VIA_2
&&
542 x86_cpu_def
->vendor3
== CPUID_VENDOR_VIA_3
) {
543 host_cpuid(0xC0000000, 0, &eax
, &ebx
, &ecx
, &edx
);
544 if (eax
>= 0xC0000001) {
545 /* Support VIA max extended level */
546 x86_cpu_def
->xlevel2
= eax
;
547 host_cpuid(0xC0000001, 0, &eax
, &ebx
, &ecx
, &edx
);
548 x86_cpu_def
->ext4_features
= edx
;
553 * Every SVM feature requires emulation support in KVM - so we can't just
554 * read the host features here. KVM might even support SVM features not
555 * available on the host hardware. Just set all bits and mask out the
556 * unsupported ones later.
558 x86_cpu_def
->svm_features
= -1;
563 static int unavailable_host_feature(struct model_features_t
*f
, uint32_t mask
)
567 for (i
= 0; i
< 32; ++i
)
569 fprintf(stderr
, "warning: host cpuid %04x_%04x lacks requested"
570 " flag '%s' [0x%08x]\n",
571 f
->cpuid
>> 16, f
->cpuid
& 0xffff,
572 f
->flag_names
[i
] ? f
->flag_names
[i
] : "[reserved]", mask
);
578 /* best effort attempt to inform user requested cpu flags aren't making
579 * their way to the guest. Note: ft[].check_feat ideally should be
580 * specified via a guest_def field to suppress report of extraneous flags.
582 static int check_features_against_host(x86_def_t
*guest_def
)
587 struct model_features_t ft
[] = {
588 {&guest_def
->features
, &host_def
.features
,
589 ~0, feature_name
, 0x00000000},
590 {&guest_def
->ext_features
, &host_def
.ext_features
,
591 ~CPUID_EXT_HYPERVISOR
, ext_feature_name
, 0x00000001},
592 {&guest_def
->ext2_features
, &host_def
.ext2_features
,
593 ~PPRO_FEATURES
, ext2_feature_name
, 0x80000000},
594 {&guest_def
->ext3_features
, &host_def
.ext3_features
,
595 ~CPUID_EXT3_SVM
, ext3_feature_name
, 0x80000001}};
597 cpu_x86_fill_host(&host_def
);
598 for (rv
= 0, i
= 0; i
< ARRAY_SIZE(ft
); ++i
)
599 for (mask
= 1; mask
; mask
<<= 1)
600 if (ft
[i
].check_feat
& mask
&& *ft
[i
].guest_feat
& mask
&&
601 !(*ft
[i
].host_feat
& mask
)) {
602 unavailable_host_feature(&ft
[i
], mask
);
608 static void x86_cpuid_version_get_family(Object
*obj
, Visitor
*v
, void *opaque
,
609 const char *name
, Error
**errp
)
611 X86CPU
*cpu
= X86_CPU(obj
);
612 CPUX86State
*env
= &cpu
->env
;
615 value
= (env
->cpuid_version
>> 8) & 0xf;
617 value
+= (env
->cpuid_version
>> 20) & 0xff;
619 visit_type_int(v
, &value
, name
, errp
);
622 static void x86_cpuid_version_set_family(Object
*obj
, Visitor
*v
, void *opaque
,
623 const char *name
, Error
**errp
)
625 X86CPU
*cpu
= X86_CPU(obj
);
626 CPUX86State
*env
= &cpu
->env
;
627 const int64_t min
= 0;
628 const int64_t max
= 0xff + 0xf;
631 visit_type_int(v
, &value
, name
, errp
);
632 if (error_is_set(errp
)) {
635 if (value
< min
|| value
> max
) {
636 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
637 name
? name
: "null", value
, min
, max
);
641 env
->cpuid_version
&= ~0xff00f00;
643 env
->cpuid_version
|= 0xf00 | ((value
- 0x0f) << 20);
645 env
->cpuid_version
|= value
<< 8;
649 static void x86_cpuid_version_get_model(Object
*obj
, Visitor
*v
, void *opaque
,
650 const char *name
, Error
**errp
)
652 X86CPU
*cpu
= X86_CPU(obj
);
653 CPUX86State
*env
= &cpu
->env
;
656 value
= (env
->cpuid_version
>> 4) & 0xf;
657 value
|= ((env
->cpuid_version
>> 16) & 0xf) << 4;
658 visit_type_int(v
, &value
, name
, errp
);
661 static void x86_cpuid_version_set_model(Object
*obj
, Visitor
*v
, void *opaque
,
662 const char *name
, Error
**errp
)
664 X86CPU
*cpu
= X86_CPU(obj
);
665 CPUX86State
*env
= &cpu
->env
;
666 const int64_t min
= 0;
667 const int64_t max
= 0xff;
670 visit_type_int(v
, &value
, name
, errp
);
671 if (error_is_set(errp
)) {
674 if (value
< min
|| value
> max
) {
675 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
676 name
? name
: "null", value
, min
, max
);
680 env
->cpuid_version
&= ~0xf00f0;
681 env
->cpuid_version
|= ((value
& 0xf) << 4) | ((value
>> 4) << 16);
684 static void x86_cpuid_version_get_stepping(Object
*obj
, Visitor
*v
,
685 void *opaque
, const char *name
,
688 X86CPU
*cpu
= X86_CPU(obj
);
689 CPUX86State
*env
= &cpu
->env
;
692 value
= env
->cpuid_version
& 0xf;
693 visit_type_int(v
, &value
, name
, errp
);
696 static void x86_cpuid_version_set_stepping(Object
*obj
, Visitor
*v
,
697 void *opaque
, const char *name
,
700 X86CPU
*cpu
= X86_CPU(obj
);
701 CPUX86State
*env
= &cpu
->env
;
702 const int64_t min
= 0;
703 const int64_t max
= 0xf;
706 visit_type_int(v
, &value
, name
, errp
);
707 if (error_is_set(errp
)) {
710 if (value
< min
|| value
> max
) {
711 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
712 name
? name
: "null", value
, min
, max
);
716 env
->cpuid_version
&= ~0xf;
717 env
->cpuid_version
|= value
& 0xf;
720 static void x86_cpuid_get_level(Object
*obj
, Visitor
*v
, void *opaque
,
721 const char *name
, Error
**errp
)
723 X86CPU
*cpu
= X86_CPU(obj
);
725 visit_type_uint32(v
, &cpu
->env
.cpuid_level
, name
, errp
);
728 static void x86_cpuid_set_level(Object
*obj
, Visitor
*v
, void *opaque
,
729 const char *name
, Error
**errp
)
731 X86CPU
*cpu
= X86_CPU(obj
);
733 visit_type_uint32(v
, &cpu
->env
.cpuid_level
, name
, errp
);
736 static void x86_cpuid_get_xlevel(Object
*obj
, Visitor
*v
, void *opaque
,
737 const char *name
, Error
**errp
)
739 X86CPU
*cpu
= X86_CPU(obj
);
741 visit_type_uint32(v
, &cpu
->env
.cpuid_xlevel
, name
, errp
);
744 static void x86_cpuid_set_xlevel(Object
*obj
, Visitor
*v
, void *opaque
,
745 const char *name
, Error
**errp
)
747 X86CPU
*cpu
= X86_CPU(obj
);
749 visit_type_uint32(v
, &cpu
->env
.cpuid_xlevel
, name
, errp
);
752 static char *x86_cpuid_get_vendor(Object
*obj
, Error
**errp
)
754 X86CPU
*cpu
= X86_CPU(obj
);
755 CPUX86State
*env
= &cpu
->env
;
759 value
= (char *)g_malloc(12 + 1);
760 for (i
= 0; i
< 4; i
++) {
761 value
[i
] = env
->cpuid_vendor1
>> (8 * i
);
762 value
[i
+ 4] = env
->cpuid_vendor2
>> (8 * i
);
763 value
[i
+ 8] = env
->cpuid_vendor3
>> (8 * i
);
769 static void x86_cpuid_set_vendor(Object
*obj
, const char *value
,
772 X86CPU
*cpu
= X86_CPU(obj
);
773 CPUX86State
*env
= &cpu
->env
;
776 if (strlen(value
) != 12) {
777 error_set(errp
, QERR_PROPERTY_VALUE_BAD
, "",
782 env
->cpuid_vendor1
= 0;
783 env
->cpuid_vendor2
= 0;
784 env
->cpuid_vendor3
= 0;
785 for (i
= 0; i
< 4; i
++) {
786 env
->cpuid_vendor1
|= ((uint8_t)value
[i
]) << (8 * i
);
787 env
->cpuid_vendor2
|= ((uint8_t)value
[i
+ 4]) << (8 * i
);
788 env
->cpuid_vendor3
|= ((uint8_t)value
[i
+ 8]) << (8 * i
);
790 env
->cpuid_vendor_override
= 1;
793 static char *x86_cpuid_get_model_id(Object
*obj
, Error
**errp
)
795 X86CPU
*cpu
= X86_CPU(obj
);
796 CPUX86State
*env
= &cpu
->env
;
800 value
= g_malloc(48 + 1);
801 for (i
= 0; i
< 48; i
++) {
802 value
[i
] = env
->cpuid_model
[i
>> 2] >> (8 * (i
& 3));
808 static void x86_cpuid_set_model_id(Object
*obj
, const char *model_id
,
811 X86CPU
*cpu
= X86_CPU(obj
);
812 CPUX86State
*env
= &cpu
->env
;
815 if (model_id
== NULL
) {
818 len
= strlen(model_id
);
819 memset(env
->cpuid_model
, 0, 48);
820 for (i
= 0; i
< 48; i
++) {
824 c
= (uint8_t)model_id
[i
];
826 env
->cpuid_model
[i
>> 2] |= c
<< (8 * (i
& 3));
830 static void x86_cpuid_get_tsc_freq(Object
*obj
, Visitor
*v
, void *opaque
,
831 const char *name
, Error
**errp
)
833 X86CPU
*cpu
= X86_CPU(obj
);
836 value
= cpu
->env
.tsc_khz
* 1000;
837 visit_type_int(v
, &value
, name
, errp
);
840 static void x86_cpuid_set_tsc_freq(Object
*obj
, Visitor
*v
, void *opaque
,
841 const char *name
, Error
**errp
)
843 X86CPU
*cpu
= X86_CPU(obj
);
844 const int64_t min
= 0;
845 const int64_t max
= INT_MAX
;
848 visit_type_int(v
, &value
, name
, errp
);
849 if (error_is_set(errp
)) {
852 if (value
< min
|| value
> max
) {
853 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
854 name
? name
: "null", value
, min
, max
);
858 cpu
->env
.tsc_khz
= value
/ 1000;
861 static int cpu_x86_find_by_name(x86_def_t
*x86_cpu_def
, const char *cpu_model
)
866 char *s
= g_strdup(cpu_model
);
867 char *featurestr
, *name
= strtok(s
, ",");
868 /* Features to be added*/
869 uint32_t plus_features
= 0, plus_ext_features
= 0;
870 uint32_t plus_ext2_features
= 0, plus_ext3_features
= 0;
871 uint32_t plus_kvm_features
= 0, plus_svm_features
= 0;
872 /* Features to be removed */
873 uint32_t minus_features
= 0, minus_ext_features
= 0;
874 uint32_t minus_ext2_features
= 0, minus_ext3_features
= 0;
875 uint32_t minus_kvm_features
= 0, minus_svm_features
= 0;
878 for (def
= x86_defs
; def
; def
= def
->next
)
879 if (name
&& !strcmp(name
, def
->name
))
881 if (kvm_enabled() && name
&& strcmp(name
, "host") == 0) {
882 cpu_x86_fill_host(x86_cpu_def
);
886 memcpy(x86_cpu_def
, def
, sizeof(*def
));
889 plus_kvm_features
= ~0; /* not supported bits will be filtered out later */
891 add_flagname_to_bitmaps("hypervisor", &plus_features
,
892 &plus_ext_features
, &plus_ext2_features
, &plus_ext3_features
,
893 &plus_kvm_features
, &plus_svm_features
);
895 featurestr
= strtok(NULL
, ",");
899 if (featurestr
[0] == '+') {
900 add_flagname_to_bitmaps(featurestr
+ 1, &plus_features
,
901 &plus_ext_features
, &plus_ext2_features
,
902 &plus_ext3_features
, &plus_kvm_features
,
904 } else if (featurestr
[0] == '-') {
905 add_flagname_to_bitmaps(featurestr
+ 1, &minus_features
,
906 &minus_ext_features
, &minus_ext2_features
,
907 &minus_ext3_features
, &minus_kvm_features
,
908 &minus_svm_features
);
909 } else if ((val
= strchr(featurestr
, '='))) {
911 if (!strcmp(featurestr
, "family")) {
913 numvalue
= strtoul(val
, &err
, 0);
914 if (!*val
|| *err
|| numvalue
> 0xff + 0xf) {
915 fprintf(stderr
, "bad numerical value %s\n", val
);
918 x86_cpu_def
->family
= numvalue
;
919 } else if (!strcmp(featurestr
, "model")) {
921 numvalue
= strtoul(val
, &err
, 0);
922 if (!*val
|| *err
|| numvalue
> 0xff) {
923 fprintf(stderr
, "bad numerical value %s\n", val
);
926 x86_cpu_def
->model
= numvalue
;
927 } else if (!strcmp(featurestr
, "stepping")) {
929 numvalue
= strtoul(val
, &err
, 0);
930 if (!*val
|| *err
|| numvalue
> 0xf) {
931 fprintf(stderr
, "bad numerical value %s\n", val
);
934 x86_cpu_def
->stepping
= numvalue
;
935 } else if (!strcmp(featurestr
, "level")) {
937 numvalue
= strtoul(val
, &err
, 0);
939 fprintf(stderr
, "bad numerical value %s\n", val
);
942 x86_cpu_def
->level
= numvalue
;
943 } else if (!strcmp(featurestr
, "xlevel")) {
945 numvalue
= strtoul(val
, &err
, 0);
947 fprintf(stderr
, "bad numerical value %s\n", val
);
950 if (numvalue
< 0x80000000) {
951 numvalue
+= 0x80000000;
953 x86_cpu_def
->xlevel
= numvalue
;
954 } else if (!strcmp(featurestr
, "vendor")) {
955 if (strlen(val
) != 12) {
956 fprintf(stderr
, "vendor string must be 12 chars long\n");
959 x86_cpu_def
->vendor1
= 0;
960 x86_cpu_def
->vendor2
= 0;
961 x86_cpu_def
->vendor3
= 0;
962 for(i
= 0; i
< 4; i
++) {
963 x86_cpu_def
->vendor1
|= ((uint8_t)val
[i
]) << (8 * i
);
964 x86_cpu_def
->vendor2
|= ((uint8_t)val
[i
+ 4]) << (8 * i
);
965 x86_cpu_def
->vendor3
|= ((uint8_t)val
[i
+ 8]) << (8 * i
);
967 x86_cpu_def
->vendor_override
= 1;
968 } else if (!strcmp(featurestr
, "model_id")) {
969 pstrcpy(x86_cpu_def
->model_id
, sizeof(x86_cpu_def
->model_id
),
971 } else if (!strcmp(featurestr
, "tsc_freq")) {
975 tsc_freq
= strtosz_suffix_unit(val
, &err
,
976 STRTOSZ_DEFSUFFIX_B
, 1000);
977 if (tsc_freq
< 0 || *err
) {
978 fprintf(stderr
, "bad numerical value %s\n", val
);
981 x86_cpu_def
->tsc_khz
= tsc_freq
/ 1000;
982 } else if (!strcmp(featurestr
, "hv_spinlocks")) {
984 numvalue
= strtoul(val
, &err
, 0);
986 fprintf(stderr
, "bad numerical value %s\n", val
);
989 hyperv_set_spinlock_retries(numvalue
);
991 fprintf(stderr
, "unrecognized feature %s\n", featurestr
);
994 } else if (!strcmp(featurestr
, "check")) {
996 } else if (!strcmp(featurestr
, "enforce")) {
997 check_cpuid
= enforce_cpuid
= 1;
998 } else if (!strcmp(featurestr
, "hv_relaxed")) {
999 hyperv_enable_relaxed_timing(true);
1000 } else if (!strcmp(featurestr
, "hv_vapic")) {
1001 hyperv_enable_vapic_recommended(true);
1003 fprintf(stderr
, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr
);
1006 featurestr
= strtok(NULL
, ",");
1008 x86_cpu_def
->features
|= plus_features
;
1009 x86_cpu_def
->ext_features
|= plus_ext_features
;
1010 x86_cpu_def
->ext2_features
|= plus_ext2_features
;
1011 x86_cpu_def
->ext3_features
|= plus_ext3_features
;
1012 x86_cpu_def
->kvm_features
|= plus_kvm_features
;
1013 x86_cpu_def
->svm_features
|= plus_svm_features
;
1014 x86_cpu_def
->features
&= ~minus_features
;
1015 x86_cpu_def
->ext_features
&= ~minus_ext_features
;
1016 x86_cpu_def
->ext2_features
&= ~minus_ext2_features
;
1017 x86_cpu_def
->ext3_features
&= ~minus_ext3_features
;
1018 x86_cpu_def
->kvm_features
&= ~minus_kvm_features
;
1019 x86_cpu_def
->svm_features
&= ~minus_svm_features
;
1021 if (check_features_against_host(x86_cpu_def
) && enforce_cpuid
)
1032 /* generate a composite string into buf of all cpuid names in featureset
1033 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1034 * if flags, suppress names undefined in featureset.
1036 static void listflags(char *buf
, int bufsize
, uint32_t fbits
,
1037 const char **featureset
, uint32_t flags
)
1039 const char **p
= &featureset
[31];
1043 b
= 4 <= bufsize
? buf
+ (bufsize
-= 3) - 1 : NULL
;
1045 for (q
= buf
, bit
= 31; fbits
&& bufsize
; --p
, fbits
&= ~(1 << bit
), --bit
)
1046 if (fbits
& 1 << bit
&& (*p
|| !flags
)) {
1048 nc
= snprintf(q
, bufsize
, "%s%s", q
== buf
? "" : " ", *p
);
1050 nc
= snprintf(q
, bufsize
, "%s[%d]", q
== buf
? "" : " ", bit
);
1051 if (bufsize
<= nc
) {
1053 memcpy(b
, "...", sizeof("..."));
1062 /* generate CPU information:
1063 * -? list model names
1064 * -?model list model names/IDs
1065 * -?dump output all model (x86_def_t) data
1066 * -?cpuid list all recognized cpuid flag names
1068 void x86_cpu_list(FILE *f
, fprintf_function cpu_fprintf
, const char *optarg
)
1070 unsigned char model
= !strcmp("?model", optarg
);
1071 unsigned char dump
= !strcmp("?dump", optarg
);
1072 unsigned char cpuid
= !strcmp("?cpuid", optarg
);
1077 (*cpu_fprintf
)(f
, "Recognized CPUID flags:\n");
1078 listflags(buf
, sizeof (buf
), (uint32_t)~0, feature_name
, 1);
1079 (*cpu_fprintf
)(f
, " f_edx: %s\n", buf
);
1080 listflags(buf
, sizeof (buf
), (uint32_t)~0, ext_feature_name
, 1);
1081 (*cpu_fprintf
)(f
, " f_ecx: %s\n", buf
);
1082 listflags(buf
, sizeof (buf
), (uint32_t)~0, ext2_feature_name
, 1);
1083 (*cpu_fprintf
)(f
, " extf_edx: %s\n", buf
);
1084 listflags(buf
, sizeof (buf
), (uint32_t)~0, ext3_feature_name
, 1);
1085 (*cpu_fprintf
)(f
, " extf_ecx: %s\n", buf
);
1088 for (def
= x86_defs
; def
; def
= def
->next
) {
1089 snprintf(buf
, sizeof (buf
), def
->flags
? "[%s]": "%s", def
->name
);
1090 if (model
|| dump
) {
1091 (*cpu_fprintf
)(f
, "x86 %16s %-48s\n", buf
, def
->model_id
);
1093 (*cpu_fprintf
)(f
, "x86 %16s\n", buf
);
1096 memcpy(buf
, &def
->vendor1
, sizeof (def
->vendor1
));
1097 memcpy(buf
+ 4, &def
->vendor2
, sizeof (def
->vendor2
));
1098 memcpy(buf
+ 8, &def
->vendor3
, sizeof (def
->vendor3
));
1101 " family %d model %d stepping %d level %d xlevel 0x%x"
1103 def
->family
, def
->model
, def
->stepping
, def
->level
,
1105 listflags(buf
, sizeof (buf
), def
->features
, feature_name
, 0);
1106 (*cpu_fprintf
)(f
, " feature_edx %08x (%s)\n", def
->features
,
1108 listflags(buf
, sizeof (buf
), def
->ext_features
, ext_feature_name
,
1110 (*cpu_fprintf
)(f
, " feature_ecx %08x (%s)\n", def
->ext_features
,
1112 listflags(buf
, sizeof (buf
), def
->ext2_features
, ext2_feature_name
,
1114 (*cpu_fprintf
)(f
, " extfeature_edx %08x (%s)\n",
1115 def
->ext2_features
, buf
);
1116 listflags(buf
, sizeof (buf
), def
->ext3_features
, ext3_feature_name
,
1118 (*cpu_fprintf
)(f
, " extfeature_ecx %08x (%s)\n",
1119 def
->ext3_features
, buf
);
1120 (*cpu_fprintf
)(f
, "\n");
1123 if (kvm_enabled()) {
1124 (*cpu_fprintf
)(f
, "x86 %16s\n", "[host]");
1128 int cpu_x86_register(X86CPU
*cpu
, const char *cpu_model
)
1130 CPUX86State
*env
= &cpu
->env
;
1131 x86_def_t def1
, *def
= &def1
;
1132 Error
*error
= NULL
;
1134 memset(def
, 0, sizeof(*def
));
1136 if (cpu_x86_find_by_name(def
, cpu_model
) < 0)
1139 env
->cpuid_vendor1
= def
->vendor1
;
1140 env
->cpuid_vendor2
= def
->vendor2
;
1141 env
->cpuid_vendor3
= def
->vendor3
;
1143 env
->cpuid_vendor1
= CPUID_VENDOR_INTEL_1
;
1144 env
->cpuid_vendor2
= CPUID_VENDOR_INTEL_2
;
1145 env
->cpuid_vendor3
= CPUID_VENDOR_INTEL_3
;
1147 env
->cpuid_vendor_override
= def
->vendor_override
;
1148 object_property_set_int(OBJECT(cpu
), def
->level
, "level", &error
);
1149 object_property_set_int(OBJECT(cpu
), def
->family
, "family", &error
);
1150 object_property_set_int(OBJECT(cpu
), def
->model
, "model", &error
);
1151 object_property_set_int(OBJECT(cpu
), def
->stepping
, "stepping", &error
);
1152 env
->cpuid_features
= def
->features
;
1153 env
->cpuid_ext_features
= def
->ext_features
;
1154 env
->cpuid_ext2_features
= def
->ext2_features
;
1155 env
->cpuid_ext3_features
= def
->ext3_features
;
1156 object_property_set_int(OBJECT(cpu
), def
->xlevel
, "xlevel", &error
);
1157 env
->cpuid_kvm_features
= def
->kvm_features
;
1158 env
->cpuid_svm_features
= def
->svm_features
;
1159 env
->cpuid_ext4_features
= def
->ext4_features
;
1160 env
->cpuid_7_0_ebx
= def
->cpuid_7_0_ebx_features
;
1161 env
->cpuid_xlevel2
= def
->xlevel2
;
1162 object_property_set_int(OBJECT(cpu
), (int64_t)def
->tsc_khz
* 1000,
1163 "tsc-frequency", &error
);
1164 if (!kvm_enabled()) {
1165 env
->cpuid_features
&= TCG_FEATURES
;
1166 env
->cpuid_ext_features
&= TCG_EXT_FEATURES
;
1167 env
->cpuid_ext2_features
&= (TCG_EXT2_FEATURES
1168 #ifdef TARGET_X86_64
1169 | CPUID_EXT2_SYSCALL
| CPUID_EXT2_LM
1172 env
->cpuid_ext3_features
&= TCG_EXT3_FEATURES
;
1173 env
->cpuid_svm_features
&= TCG_SVM_FEATURES
;
1175 object_property_set_str(OBJECT(cpu
), def
->model_id
, "model-id", &error
);
1176 if (error_is_set(&error
)) {
1183 #if !defined(CONFIG_USER_ONLY)
1184 /* copy vendor id string to 32 bit register, nul pad as needed
1186 static void cpyid(const char *s
, uint32_t *id
)
1188 char *d
= (char *)id
;
1191 for (i
= sizeof (*id
); i
--; )
1192 *d
++ = *s
? *s
++ : '\0';
1195 /* interpret radix and convert from string to arbitrary scalar,
1196 * otherwise flag failure
1198 #define setscalar(pval, str, perr) \
1203 ul = strtoul(str, &pend, 0); \
1204 *str && !*pend ? (*pval = ul) : (*perr = 1); \
1207 /* map cpuid options to feature bits, otherwise return failure
1208 * (option tags in *str are delimited by whitespace)
1210 static void setfeatures(uint32_t *pval
, const char *str
,
1211 const char **featureset
, int *perr
)
1215 for (q
= p
= str
; *p
|| *q
; q
= p
) {
1218 while (*p
&& !iswhite(*p
))
1222 if (!lookup_feature(pval
, q
, p
, featureset
)) {
1223 fprintf(stderr
, "error: feature \"%.*s\" not available in set\n",
1231 /* map config file options to x86_def_t form
1233 static int cpudef_setfield(const char *name
, const char *str
, void *opaque
)
1235 x86_def_t
*def
= opaque
;
1238 if (!strcmp(name
, "name")) {
1239 g_free((void *)def
->name
);
1240 def
->name
= g_strdup(str
);
1241 } else if (!strcmp(name
, "model_id")) {
1242 strncpy(def
->model_id
, str
, sizeof (def
->model_id
));
1243 } else if (!strcmp(name
, "level")) {
1244 setscalar(&def
->level
, str
, &err
)
1245 } else if (!strcmp(name
, "vendor")) {
1246 cpyid(&str
[0], &def
->vendor1
);
1247 cpyid(&str
[4], &def
->vendor2
);
1248 cpyid(&str
[8], &def
->vendor3
);
1249 } else if (!strcmp(name
, "family")) {
1250 setscalar(&def
->family
, str
, &err
)
1251 } else if (!strcmp(name
, "model")) {
1252 setscalar(&def
->model
, str
, &err
)
1253 } else if (!strcmp(name
, "stepping")) {
1254 setscalar(&def
->stepping
, str
, &err
)
1255 } else if (!strcmp(name
, "feature_edx")) {
1256 setfeatures(&def
->features
, str
, feature_name
, &err
);
1257 } else if (!strcmp(name
, "feature_ecx")) {
1258 setfeatures(&def
->ext_features
, str
, ext_feature_name
, &err
);
1259 } else if (!strcmp(name
, "extfeature_edx")) {
1260 setfeatures(&def
->ext2_features
, str
, ext2_feature_name
, &err
);
1261 } else if (!strcmp(name
, "extfeature_ecx")) {
1262 setfeatures(&def
->ext3_features
, str
, ext3_feature_name
, &err
);
1263 } else if (!strcmp(name
, "xlevel")) {
1264 setscalar(&def
->xlevel
, str
, &err
)
1266 fprintf(stderr
, "error: unknown option [%s = %s]\n", name
, str
);
1270 fprintf(stderr
, "error: bad option value [%s = %s]\n", name
, str
);
1276 /* register config file entry as x86_def_t
1278 static int cpudef_register(QemuOpts
*opts
, void *opaque
)
1280 x86_def_t
*def
= g_malloc0(sizeof (x86_def_t
));
1282 qemu_opt_foreach(opts
, cpudef_setfield
, def
, 1);
1283 def
->next
= x86_defs
;
1288 void cpu_clear_apic_feature(CPUX86State
*env
)
1290 env
->cpuid_features
&= ~CPUID_APIC
;
1293 #endif /* !CONFIG_USER_ONLY */
1295 /* register "cpudef" models defined in configuration file. Here we first
1296 * preload any built-in definitions
1298 void x86_cpudef_setup(void)
1301 static const char *model_with_versions
[] = { "qemu32", "qemu64", "athlon" };
1303 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); ++i
) {
1304 builtin_x86_defs
[i
].next
= x86_defs
;
1305 builtin_x86_defs
[i
].flags
= 1;
1307 /* Look for specific "cpudef" models that */
1308 /* have the QEMU version in .model_id */
1309 for (j
= 0; j
< ARRAY_SIZE(model_with_versions
); j
++) {
1310 if (strcmp(model_with_versions
[j
], builtin_x86_defs
[i
].name
) == 0) {
1311 pstrcpy(builtin_x86_defs
[i
].model_id
, sizeof(builtin_x86_defs
[i
].model_id
), "QEMU Virtual CPU version ");
1312 pstrcat(builtin_x86_defs
[i
].model_id
, sizeof(builtin_x86_defs
[i
].model_id
), qemu_get_version());
1317 x86_defs
= &builtin_x86_defs
[i
];
1319 #if !defined(CONFIG_USER_ONLY)
1320 qemu_opts_foreach(qemu_find_opts("cpudef"), cpudef_register
, NULL
, 0);
1324 static void get_cpuid_vendor(CPUX86State
*env
, uint32_t *ebx
,
1325 uint32_t *ecx
, uint32_t *edx
)
1327 *ebx
= env
->cpuid_vendor1
;
1328 *edx
= env
->cpuid_vendor2
;
1329 *ecx
= env
->cpuid_vendor3
;
1331 /* sysenter isn't supported on compatibility mode on AMD, syscall
1332 * isn't supported in compatibility mode on Intel.
1333 * Normally we advertise the actual cpu vendor, but you can override
1334 * this if you want to use KVM's sysenter/syscall emulation
1335 * in compatibility mode and when doing cross vendor migration
1337 if (kvm_enabled() && ! env
->cpuid_vendor_override
) {
1338 host_cpuid(0, 0, NULL
, ebx
, ecx
, edx
);
1342 void cpu_x86_cpuid(CPUX86State
*env
, uint32_t index
, uint32_t count
,
1343 uint32_t *eax
, uint32_t *ebx
,
1344 uint32_t *ecx
, uint32_t *edx
)
1346 /* test if maximum index reached */
1347 if (index
& 0x80000000) {
1348 if (index
> env
->cpuid_xlevel
) {
1349 if (env
->cpuid_xlevel2
> 0) {
1350 /* Handle the Centaur's CPUID instruction. */
1351 if (index
> env
->cpuid_xlevel2
) {
1352 index
= env
->cpuid_xlevel2
;
1353 } else if (index
< 0xC0000000) {
1354 index
= env
->cpuid_xlevel
;
1357 index
= env
->cpuid_xlevel
;
1361 if (index
> env
->cpuid_level
)
1362 index
= env
->cpuid_level
;
1367 *eax
= env
->cpuid_level
;
1368 get_cpuid_vendor(env
, ebx
, ecx
, edx
);
1371 *eax
= env
->cpuid_version
;
1372 *ebx
= (env
->cpuid_apic_id
<< 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1373 *ecx
= env
->cpuid_ext_features
;
1374 *edx
= env
->cpuid_features
;
1375 if (env
->nr_cores
* env
->nr_threads
> 1) {
1376 *ebx
|= (env
->nr_cores
* env
->nr_threads
) << 16;
1377 *edx
|= 1 << 28; /* HTT bit */
1381 /* cache info: needed for Pentium Pro compatibility */
1388 /* cache info: needed for Core compatibility */
1389 if (env
->nr_cores
> 1) {
1390 *eax
= (env
->nr_cores
- 1) << 26;
1395 case 0: /* L1 dcache info */
1401 case 1: /* L1 icache info */
1407 case 2: /* L2 cache info */
1409 if (env
->nr_threads
> 1) {
1410 *eax
|= (env
->nr_threads
- 1) << 14;
1416 default: /* end of info */
1425 /* mwait info: needed for Core compatibility */
1426 *eax
= 0; /* Smallest monitor-line size in bytes */
1427 *ebx
= 0; /* Largest monitor-line size in bytes */
1428 *ecx
= CPUID_MWAIT_EMX
| CPUID_MWAIT_IBE
;
1432 /* Thermal and Power Leaf */
1439 /* Structured Extended Feature Flags Enumeration Leaf */
1441 *eax
= 0; /* Maximum ECX value for sub-leaves */
1442 *ebx
= env
->cpuid_7_0_ebx
; /* Feature flags */
1443 *ecx
= 0; /* Reserved */
1444 *edx
= 0; /* Reserved */
1453 /* Direct Cache Access Information Leaf */
1454 *eax
= 0; /* Bits 0-31 in DCA_CAP MSR */
1460 /* Architectural Performance Monitoring Leaf */
1461 if (kvm_enabled()) {
1462 KVMState
*s
= env
->kvm_state
;
1464 *eax
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EAX
);
1465 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EBX
);
1466 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_ECX
);
1467 *edx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EDX
);
1476 /* Processor Extended State */
1477 if (!(env
->cpuid_ext_features
& CPUID_EXT_XSAVE
)) {
1484 if (kvm_enabled()) {
1485 KVMState
*s
= env
->kvm_state
;
1487 *eax
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_EAX
);
1488 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_EBX
);
1489 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_ECX
);
1490 *edx
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_EDX
);
1499 *eax
= env
->cpuid_xlevel
;
1500 *ebx
= env
->cpuid_vendor1
;
1501 *edx
= env
->cpuid_vendor2
;
1502 *ecx
= env
->cpuid_vendor3
;
1505 *eax
= env
->cpuid_version
;
1507 *ecx
= env
->cpuid_ext3_features
;
1508 *edx
= env
->cpuid_ext2_features
;
1510 /* The Linux kernel checks for the CMPLegacy bit and
1511 * discards multiple thread information if it is set.
1512 * So dont set it here for Intel to make Linux guests happy.
1514 if (env
->nr_cores
* env
->nr_threads
> 1) {
1515 uint32_t tebx
, tecx
, tedx
;
1516 get_cpuid_vendor(env
, &tebx
, &tecx
, &tedx
);
1517 if (tebx
!= CPUID_VENDOR_INTEL_1
||
1518 tedx
!= CPUID_VENDOR_INTEL_2
||
1519 tecx
!= CPUID_VENDOR_INTEL_3
) {
1520 *ecx
|= 1 << 1; /* CmpLegacy bit */
1527 *eax
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
1528 *ebx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
1529 *ecx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
1530 *edx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
1533 /* cache info (L1 cache) */
1540 /* cache info (L2 cache) */
1547 /* virtual & phys address size in low 2 bytes. */
1548 /* XXX: This value must match the one used in the MMU code. */
1549 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
) {
1550 /* 64 bit processor */
1551 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1552 *eax
= 0x00003028; /* 48 bits virtual, 40 bits physical */
1554 if (env
->cpuid_features
& CPUID_PSE36
)
1555 *eax
= 0x00000024; /* 36 bits physical */
1557 *eax
= 0x00000020; /* 32 bits physical */
1562 if (env
->nr_cores
* env
->nr_threads
> 1) {
1563 *ecx
|= (env
->nr_cores
* env
->nr_threads
) - 1;
1567 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
) {
1568 *eax
= 0x00000001; /* SVM Revision */
1569 *ebx
= 0x00000010; /* nr of ASIDs */
1571 *edx
= env
->cpuid_svm_features
; /* optional features */
1580 *eax
= env
->cpuid_xlevel2
;
1586 /* Support for VIA CPU's CPUID instruction */
1587 *eax
= env
->cpuid_version
;
1590 *edx
= env
->cpuid_ext4_features
;
1595 /* Reserved for the future, and now filled with zero */
1602 /* reserved values: zero */
1611 /* CPUClass::reset() */
1612 static void x86_cpu_reset(CPUState
*s
)
1614 X86CPU
*cpu
= X86_CPU(s
);
1615 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(cpu
);
1616 CPUX86State
*env
= &cpu
->env
;
1619 if (qemu_loglevel_mask(CPU_LOG_RESET
)) {
1620 qemu_log("CPU Reset (CPU %d)\n", env
->cpu_index
);
1621 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1624 xcc
->parent_reset(s
);
1627 memset(env
, 0, offsetof(CPUX86State
, breakpoints
));
1631 env
->old_exception
= -1;
1633 /* init to reset state */
1635 #ifdef CONFIG_SOFTMMU
1636 env
->hflags
|= HF_SOFTMMU_MASK
;
1638 env
->hflags2
|= HF2_GIF_MASK
;
1640 cpu_x86_update_cr0(env
, 0x60000010);
1641 env
->a20_mask
= ~0x0;
1642 env
->smbase
= 0x30000;
1644 env
->idt
.limit
= 0xffff;
1645 env
->gdt
.limit
= 0xffff;
1646 env
->ldt
.limit
= 0xffff;
1647 env
->ldt
.flags
= DESC_P_MASK
| (2 << DESC_TYPE_SHIFT
);
1648 env
->tr
.limit
= 0xffff;
1649 env
->tr
.flags
= DESC_P_MASK
| (11 << DESC_TYPE_SHIFT
);
1651 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff,
1652 DESC_P_MASK
| DESC_S_MASK
| DESC_CS_MASK
|
1653 DESC_R_MASK
| DESC_A_MASK
);
1654 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff,
1655 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1657 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff,
1658 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1660 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff,
1661 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1663 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff,
1664 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1666 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff,
1667 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1671 env
->regs
[R_EDX
] = env
->cpuid_version
;
1676 for (i
= 0; i
< 8; i
++) {
1681 env
->mxcsr
= 0x1f80;
1683 env
->pat
= 0x0007040600070406ULL
;
1684 env
->msr_ia32_misc_enable
= MSR_IA32_MISC_ENABLE_DEFAULT
;
1686 memset(env
->dr
, 0, sizeof(env
->dr
));
1687 env
->dr
[6] = DR6_FIXED_1
;
1688 env
->dr
[7] = DR7_FIXED_1
;
1689 cpu_breakpoint_remove_all(env
, BP_CPU
);
1690 cpu_watchpoint_remove_all(env
, BP_CPU
);
1692 #if !defined(CONFIG_USER_ONLY)
1693 /* We hard-wire the BSP to the first CPU. */
1694 if (env
->cpu_index
== 0) {
1695 apic_designate_bsp(env
->apic_state
);
1698 env
->halted
= !cpu_is_bsp(cpu
);
1702 #ifndef CONFIG_USER_ONLY
1703 bool cpu_is_bsp(X86CPU
*cpu
)
1705 return cpu_get_apic_base(cpu
->env
.apic_state
) & MSR_IA32_APICBASE_BSP
;
1708 /* TODO: remove me, when reset over QOM tree is implemented */
1709 static void x86_cpu_machine_reset_cb(void *opaque
)
1711 X86CPU
*cpu
= opaque
;
1712 cpu_reset(CPU(cpu
));
1716 static void mce_init(X86CPU
*cpu
)
1718 CPUX86State
*cenv
= &cpu
->env
;
1721 if (((cenv
->cpuid_version
>> 8) & 0xf) >= 6
1722 && (cenv
->cpuid_features
& (CPUID_MCE
| CPUID_MCA
)) ==
1723 (CPUID_MCE
| CPUID_MCA
)) {
1724 cenv
->mcg_cap
= MCE_CAP_DEF
| MCE_BANKS_DEF
;
1725 cenv
->mcg_ctl
= ~(uint64_t)0;
1726 for (bank
= 0; bank
< MCE_BANKS_DEF
; bank
++) {
1727 cenv
->mce_banks
[bank
* 4] = ~(uint64_t)0;
1732 void x86_cpu_realize(Object
*obj
, Error
**errp
)
1734 X86CPU
*cpu
= X86_CPU(obj
);
1736 #ifndef CONFIG_USER_ONLY
1737 qemu_register_reset(x86_cpu_machine_reset_cb
, cpu
);
1741 qemu_init_vcpu(&cpu
->env
);
1742 cpu_reset(CPU(cpu
));
1745 static void x86_cpu_initfn(Object
*obj
)
1747 X86CPU
*cpu
= X86_CPU(obj
);
1748 CPUX86State
*env
= &cpu
->env
;
1753 object_property_add(obj
, "family", "int",
1754 x86_cpuid_version_get_family
,
1755 x86_cpuid_version_set_family
, NULL
, NULL
, NULL
);
1756 object_property_add(obj
, "model", "int",
1757 x86_cpuid_version_get_model
,
1758 x86_cpuid_version_set_model
, NULL
, NULL
, NULL
);
1759 object_property_add(obj
, "stepping", "int",
1760 x86_cpuid_version_get_stepping
,
1761 x86_cpuid_version_set_stepping
, NULL
, NULL
, NULL
);
1762 object_property_add(obj
, "level", "int",
1763 x86_cpuid_get_level
,
1764 x86_cpuid_set_level
, NULL
, NULL
, NULL
);
1765 object_property_add(obj
, "xlevel", "int",
1766 x86_cpuid_get_xlevel
,
1767 x86_cpuid_set_xlevel
, NULL
, NULL
, NULL
);
1768 object_property_add_str(obj
, "vendor",
1769 x86_cpuid_get_vendor
,
1770 x86_cpuid_set_vendor
, NULL
);
1771 object_property_add_str(obj
, "model-id",
1772 x86_cpuid_get_model_id
,
1773 x86_cpuid_set_model_id
, NULL
);
1774 object_property_add(obj
, "tsc-frequency", "int",
1775 x86_cpuid_get_tsc_freq
,
1776 x86_cpuid_set_tsc_freq
, NULL
, NULL
, NULL
);
1778 env
->cpuid_apic_id
= env
->cpu_index
;
1780 /* init various static tables used in TCG mode */
1781 if (tcg_enabled() && !inited
) {
1783 optimize_flags_init();
1784 #ifndef CONFIG_USER_ONLY
1785 cpu_set_debug_excp_handler(breakpoint_handler
);
1790 static void x86_cpu_common_class_init(ObjectClass
*oc
, void *data
)
1792 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
1793 CPUClass
*cc
= CPU_CLASS(oc
);
1795 xcc
->parent_reset
= cc
->reset
;
1796 cc
->reset
= x86_cpu_reset
;
1799 static const TypeInfo x86_cpu_type_info
= {
1800 .name
= TYPE_X86_CPU
,
1802 .instance_size
= sizeof(X86CPU
),
1803 .instance_init
= x86_cpu_initfn
,
1805 .class_size
= sizeof(X86CPUClass
),
1806 .class_init
= x86_cpu_common_class_init
,
1809 static void x86_cpu_register_types(void)
1811 type_register_static(&x86_cpu_type_info
);
1814 type_init(x86_cpu_register_types
)