2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "qemu-option.h"
28 #include "qemu-config.h"
30 #include "qapi/qapi-visit-core.h"
31 #include "arch_init.h"
37 /* feature flags taken from "Intel Processor Identification and the CPUID
38 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
39 * between feature naming conventions, aliases may be added.
41 static const char *feature_name
[] = {
42 "fpu", "vme", "de", "pse",
43 "tsc", "msr", "pae", "mce",
44 "cx8", "apic", NULL
, "sep",
45 "mtrr", "pge", "mca", "cmov",
46 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
47 NULL
, "ds" /* Intel dts */, "acpi", "mmx",
48 "fxsr", "sse", "sse2", "ss",
49 "ht" /* Intel htt */, "tm", "ia64", "pbe",
51 static const char *ext_feature_name
[] = {
52 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
53 "ds_cpl", "vmx", "smx", "est",
54 "tm2", "ssse3", "cid", NULL
,
55 "fma", "cx16", "xtpr", "pdcm",
56 NULL
, "pcid", "dca", "sse4.1|sse4_1",
57 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
58 "tsc-deadline", "aes", "xsave", "osxsave",
59 "avx", NULL
, NULL
, "hypervisor",
61 static const char *ext2_feature_name
[] = {
62 "fpu", "vme", "de", "pse",
63 "tsc", "msr", "pae", "mce",
64 "cx8" /* AMD CMPXCHG8B */, "apic", NULL
, "syscall",
65 "mtrr", "pge", "mca", "cmov",
66 "pat", "pse36", NULL
, NULL
/* Linux mp */,
67 "nx|xd", NULL
, "mmxext", "mmx",
68 "fxsr", "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
69 NULL
, "lm|i64", "3dnowext", "3dnow",
71 static const char *ext3_feature_name
[] = {
72 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
73 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
74 "3dnowprefetch", "osvw", "ibs", "xop",
75 "skinit", "wdt", NULL
, NULL
,
76 "fma4", NULL
, "cvt16", "nodeid_msr",
77 NULL
, NULL
, NULL
, NULL
,
78 NULL
, NULL
, NULL
, NULL
,
79 NULL
, NULL
, NULL
, NULL
,
82 static const char *kvm_feature_name
[] = {
83 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", "kvm_asyncpf", NULL
, "kvm_pv_eoi", NULL
,
84 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
85 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
86 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
89 static const char *svm_feature_name
[] = {
90 "npt", "lbrv", "svm_lock", "nrip_save",
91 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
92 NULL
, NULL
, "pause_filter", NULL
,
93 "pfthreshold", NULL
, NULL
, NULL
,
94 NULL
, NULL
, NULL
, NULL
,
95 NULL
, NULL
, NULL
, NULL
,
96 NULL
, NULL
, NULL
, NULL
,
97 NULL
, NULL
, NULL
, NULL
,
100 /* collects per-function cpuid data
102 typedef struct model_features_t
{
103 uint32_t *guest_feat
;
106 const char **flag_names
;
111 int enforce_cpuid
= 0;
113 void host_cpuid(uint32_t function
, uint32_t count
,
114 uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
116 #if defined(CONFIG_KVM)
121 : "=a"(vec
[0]), "=b"(vec
[1]),
122 "=c"(vec
[2]), "=d"(vec
[3])
123 : "0"(function
), "c"(count
) : "cc");
125 asm volatile("pusha \n\t"
127 "mov %%eax, 0(%2) \n\t"
128 "mov %%ebx, 4(%2) \n\t"
129 "mov %%ecx, 8(%2) \n\t"
130 "mov %%edx, 12(%2) \n\t"
132 : : "a"(function
), "c"(count
), "S"(vec
)
147 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
149 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
150 * a substring. ex if !NULL points to the first char after a substring,
151 * otherwise the string is assumed to sized by a terminating nul.
152 * Return lexical ordering of *s1:*s2.
154 static int sstrcmp(const char *s1
, const char *e1
, const char *s2
,
158 if (!*s1
|| !*s2
|| *s1
!= *s2
)
161 if (s1
== e1
&& s2
== e2
)
170 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
171 * '|' delimited (possibly empty) strings in which case search for a match
172 * within the alternatives proceeds left to right. Return 0 for success,
173 * non-zero otherwise.
175 static int altcmp(const char *s
, const char *e
, const char *altstr
)
179 for (q
= p
= altstr
; ; ) {
180 while (*p
&& *p
!= '|')
182 if ((q
== p
&& !*s
) || (q
!= p
&& !sstrcmp(s
, e
, q
, p
)))
191 /* search featureset for flag *[s..e), if found set corresponding bit in
192 * *pval and return true, otherwise return false
194 static bool lookup_feature(uint32_t *pval
, const char *s
, const char *e
,
195 const char **featureset
)
201 for (mask
= 1, ppc
= featureset
; mask
; mask
<<= 1, ++ppc
) {
202 if (*ppc
&& !altcmp(s
, e
, *ppc
)) {
210 static void add_flagname_to_bitmaps(const char *flagname
, uint32_t *features
,
211 uint32_t *ext_features
,
212 uint32_t *ext2_features
,
213 uint32_t *ext3_features
,
214 uint32_t *kvm_features
,
215 uint32_t *svm_features
)
217 if (!lookup_feature(features
, flagname
, NULL
, feature_name
) &&
218 !lookup_feature(ext_features
, flagname
, NULL
, ext_feature_name
) &&
219 !lookup_feature(ext2_features
, flagname
, NULL
, ext2_feature_name
) &&
220 !lookup_feature(ext3_features
, flagname
, NULL
, ext3_feature_name
) &&
221 !lookup_feature(kvm_features
, flagname
, NULL
, kvm_feature_name
) &&
222 !lookup_feature(svm_features
, flagname
, NULL
, svm_feature_name
))
223 fprintf(stderr
, "CPU feature %s not found\n", flagname
);
226 typedef struct x86_def_t
{
227 struct x86_def_t
*next
;
230 uint32_t vendor1
, vendor2
, vendor3
;
235 uint32_t features
, ext_features
, ext2_features
, ext3_features
;
236 uint32_t kvm_features
, svm_features
;
241 /* Store the results of Centaur's CPUID instructions */
242 uint32_t ext4_features
;
244 /* The feature bits on CPUID[EAX=7,ECX=0].EBX */
245 uint32_t cpuid_7_0_ebx_features
;
248 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
249 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
250 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
251 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
252 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
253 CPUID_PSE36 | CPUID_FXSR)
254 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
255 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
256 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
257 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
258 CPUID_PAE | CPUID_SEP | CPUID_APIC)
259 #define EXT2_FEATURE_MASK 0x0183F3FF
261 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
262 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
263 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
264 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
265 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
266 /* partly implemented:
267 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64)
268 CPUID_PSE36 (needed for Solaris) */
270 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
271 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | \
272 CPUID_EXT_CX16 | CPUID_EXT_POPCNT | \
273 CPUID_EXT_HYPERVISOR)
275 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
276 CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_XSAVE */
277 #define TCG_EXT2_FEATURES ((TCG_FEATURES & EXT2_FEATURE_MASK) | \
278 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
279 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT)
281 CPUID_EXT2_PDPE1GB */
282 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
283 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
284 #define TCG_SVM_FEATURES 0
286 /* maintains list of cpu model definitions
288 static x86_def_t
*x86_defs
= {NULL
};
290 /* built-in cpu model definitions (deprecated)
292 static x86_def_t builtin_x86_defs
[] = {
296 .vendor1
= CPUID_VENDOR_AMD_1
,
297 .vendor2
= CPUID_VENDOR_AMD_2
,
298 .vendor3
= CPUID_VENDOR_AMD_3
,
302 .features
= PPRO_FEATURES
|
303 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
305 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_CX16
| CPUID_EXT_POPCNT
,
306 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) |
307 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
308 .ext3_features
= CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
309 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
310 .xlevel
= 0x8000000A,
315 .vendor1
= CPUID_VENDOR_AMD_1
,
316 .vendor2
= CPUID_VENDOR_AMD_2
,
317 .vendor3
= CPUID_VENDOR_AMD_3
,
321 .features
= PPRO_FEATURES
|
322 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
323 CPUID_PSE36
| CPUID_VME
| CPUID_HT
,
324 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_CX16
|
326 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) |
327 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
|
328 CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
| CPUID_EXT2_MMXEXT
|
329 CPUID_EXT2_FFXSR
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
,
330 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
332 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
333 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
334 .ext3_features
= CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
335 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
336 .svm_features
= CPUID_SVM_NPT
| CPUID_SVM_LBRV
,
337 .xlevel
= 0x8000001A,
338 .model_id
= "AMD Phenom(tm) 9550 Quad-Core Processor"
346 .features
= PPRO_FEATURES
|
347 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
348 CPUID_PSE36
| CPUID_VME
| CPUID_DTS
| CPUID_ACPI
| CPUID_SS
|
349 CPUID_HT
| CPUID_TM
| CPUID_PBE
,
350 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
351 CPUID_EXT_DTES64
| CPUID_EXT_DSCPL
| CPUID_EXT_VMX
| CPUID_EXT_EST
|
352 CPUID_EXT_TM2
| CPUID_EXT_CX16
| CPUID_EXT_XTPR
| CPUID_EXT_PDCM
,
353 .ext2_features
= CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
354 .ext3_features
= CPUID_EXT3_LAHF_LM
,
355 .xlevel
= 0x80000008,
356 .model_id
= "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
361 .vendor1
= CPUID_VENDOR_INTEL_1
,
362 .vendor2
= CPUID_VENDOR_INTEL_2
,
363 .vendor3
= CPUID_VENDOR_INTEL_3
,
367 /* Missing: CPUID_VME, CPUID_HT */
368 .features
= PPRO_FEATURES
|
369 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
371 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
372 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
373 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
374 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) |
375 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
376 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
377 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
378 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
379 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
381 .xlevel
= 0x80000008,
382 .model_id
= "Common KVM processor"
390 .features
= PPRO_FEATURES
,
391 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_POPCNT
,
392 .xlevel
= 0x80000004,
400 .features
= PPRO_FEATURES
|
401 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_PSE36
,
402 .ext_features
= CPUID_EXT_SSE3
,
403 .ext2_features
= PPRO_FEATURES
& EXT2_FEATURE_MASK
,
405 .xlevel
= 0x80000008,
406 .model_id
= "Common 32-bit KVM processor"
414 .features
= PPRO_FEATURES
| CPUID_VME
|
415 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_DTS
| CPUID_ACPI
|
416 CPUID_SS
| CPUID_HT
| CPUID_TM
| CPUID_PBE
,
417 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_VMX
|
418 CPUID_EXT_EST
| CPUID_EXT_TM2
| CPUID_EXT_XTPR
| CPUID_EXT_PDCM
,
419 .ext2_features
= CPUID_EXT2_NX
,
420 .xlevel
= 0x80000008,
421 .model_id
= "Genuine Intel(R) CPU T2600 @ 2.16GHz",
429 .features
= I486_FEATURES
,
438 .features
= PENTIUM_FEATURES
,
447 .features
= PENTIUM2_FEATURES
,
456 .features
= PENTIUM3_FEATURES
,
462 .vendor1
= CPUID_VENDOR_AMD_1
,
463 .vendor2
= CPUID_VENDOR_AMD_2
,
464 .vendor3
= CPUID_VENDOR_AMD_3
,
468 .features
= PPRO_FEATURES
| CPUID_PSE36
| CPUID_VME
| CPUID_MTRR
| CPUID_MCA
,
469 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) | CPUID_EXT2_MMXEXT
| CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
,
470 .xlevel
= 0x80000008,
474 /* original is on level 10 */
479 .features
= PPRO_FEATURES
|
480 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_VME
| CPUID_DTS
|
481 CPUID_ACPI
| CPUID_SS
| CPUID_HT
| CPUID_TM
| CPUID_PBE
,
482 /* Some CPUs got no CPUID_SEP */
483 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
484 CPUID_EXT_DSCPL
| CPUID_EXT_EST
| CPUID_EXT_TM2
| CPUID_EXT_XTPR
,
485 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) | CPUID_EXT2_NX
,
486 .ext3_features
= CPUID_EXT3_LAHF_LM
,
487 .xlevel
= 0x8000000A,
488 .model_id
= "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
492 static int cpu_x86_fill_model_id(char *str
)
494 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
497 for (i
= 0; i
< 3; i
++) {
498 host_cpuid(0x80000002 + i
, 0, &eax
, &ebx
, &ecx
, &edx
);
499 memcpy(str
+ i
* 16 + 0, &eax
, 4);
500 memcpy(str
+ i
* 16 + 4, &ebx
, 4);
501 memcpy(str
+ i
* 16 + 8, &ecx
, 4);
502 memcpy(str
+ i
* 16 + 12, &edx
, 4);
507 static int cpu_x86_fill_host(x86_def_t
*x86_cpu_def
)
509 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
511 x86_cpu_def
->name
= "host";
512 host_cpuid(0x0, 0, &eax
, &ebx
, &ecx
, &edx
);
513 x86_cpu_def
->level
= eax
;
514 x86_cpu_def
->vendor1
= ebx
;
515 x86_cpu_def
->vendor2
= edx
;
516 x86_cpu_def
->vendor3
= ecx
;
518 host_cpuid(0x1, 0, &eax
, &ebx
, &ecx
, &edx
);
519 x86_cpu_def
->family
= ((eax
>> 8) & 0x0F) + ((eax
>> 20) & 0xFF);
520 x86_cpu_def
->model
= ((eax
>> 4) & 0x0F) | ((eax
& 0xF0000) >> 12);
521 x86_cpu_def
->stepping
= eax
& 0x0F;
522 x86_cpu_def
->ext_features
= ecx
;
523 x86_cpu_def
->features
= edx
;
525 if (kvm_enabled() && x86_cpu_def
->level
>= 7) {
526 x86_cpu_def
->cpuid_7_0_ebx_features
= kvm_arch_get_supported_cpuid(kvm_state
, 0x7, 0, R_EBX
);
528 x86_cpu_def
->cpuid_7_0_ebx_features
= 0;
531 host_cpuid(0x80000000, 0, &eax
, &ebx
, &ecx
, &edx
);
532 x86_cpu_def
->xlevel
= eax
;
534 host_cpuid(0x80000001, 0, &eax
, &ebx
, &ecx
, &edx
);
535 x86_cpu_def
->ext2_features
= edx
;
536 x86_cpu_def
->ext3_features
= ecx
;
537 cpu_x86_fill_model_id(x86_cpu_def
->model_id
);
538 x86_cpu_def
->vendor_override
= 0;
540 /* Call Centaur's CPUID instruction. */
541 if (x86_cpu_def
->vendor1
== CPUID_VENDOR_VIA_1
&&
542 x86_cpu_def
->vendor2
== CPUID_VENDOR_VIA_2
&&
543 x86_cpu_def
->vendor3
== CPUID_VENDOR_VIA_3
) {
544 host_cpuid(0xC0000000, 0, &eax
, &ebx
, &ecx
, &edx
);
545 if (eax
>= 0xC0000001) {
546 /* Support VIA max extended level */
547 x86_cpu_def
->xlevel2
= eax
;
548 host_cpuid(0xC0000001, 0, &eax
, &ebx
, &ecx
, &edx
);
549 x86_cpu_def
->ext4_features
= edx
;
554 * Every SVM feature requires emulation support in KVM - so we can't just
555 * read the host features here. KVM might even support SVM features not
556 * available on the host hardware. Just set all bits and mask out the
557 * unsupported ones later.
559 x86_cpu_def
->svm_features
= -1;
564 static int unavailable_host_feature(struct model_features_t
*f
, uint32_t mask
)
568 for (i
= 0; i
< 32; ++i
)
570 fprintf(stderr
, "warning: host cpuid %04x_%04x lacks requested"
571 " flag '%s' [0x%08x]\n",
572 f
->cpuid
>> 16, f
->cpuid
& 0xffff,
573 f
->flag_names
[i
] ? f
->flag_names
[i
] : "[reserved]", mask
);
579 /* best effort attempt to inform user requested cpu flags aren't making
580 * their way to the guest. Note: ft[].check_feat ideally should be
581 * specified via a guest_def field to suppress report of extraneous flags.
583 static int check_features_against_host(x86_def_t
*guest_def
)
588 struct model_features_t ft
[] = {
589 {&guest_def
->features
, &host_def
.features
,
590 ~0, feature_name
, 0x00000000},
591 {&guest_def
->ext_features
, &host_def
.ext_features
,
592 ~CPUID_EXT_HYPERVISOR
, ext_feature_name
, 0x00000001},
593 {&guest_def
->ext2_features
, &host_def
.ext2_features
,
594 ~PPRO_FEATURES
, ext2_feature_name
, 0x80000000},
595 {&guest_def
->ext3_features
, &host_def
.ext3_features
,
596 ~CPUID_EXT3_SVM
, ext3_feature_name
, 0x80000001}};
598 cpu_x86_fill_host(&host_def
);
599 for (rv
= 0, i
= 0; i
< ARRAY_SIZE(ft
); ++i
)
600 for (mask
= 1; mask
; mask
<<= 1)
601 if (ft
[i
].check_feat
& mask
&& *ft
[i
].guest_feat
& mask
&&
602 !(*ft
[i
].host_feat
& mask
)) {
603 unavailable_host_feature(&ft
[i
], mask
);
609 static void x86_cpuid_version_get_family(Object
*obj
, Visitor
*v
, void *opaque
,
610 const char *name
, Error
**errp
)
612 X86CPU
*cpu
= X86_CPU(obj
);
613 CPUX86State
*env
= &cpu
->env
;
616 value
= (env
->cpuid_version
>> 8) & 0xf;
618 value
+= (env
->cpuid_version
>> 20) & 0xff;
620 visit_type_int(v
, &value
, name
, errp
);
623 static void x86_cpuid_version_set_family(Object
*obj
, Visitor
*v
, void *opaque
,
624 const char *name
, Error
**errp
)
626 X86CPU
*cpu
= X86_CPU(obj
);
627 CPUX86State
*env
= &cpu
->env
;
628 const int64_t min
= 0;
629 const int64_t max
= 0xff + 0xf;
632 visit_type_int(v
, &value
, name
, errp
);
633 if (error_is_set(errp
)) {
636 if (value
< min
|| value
> max
) {
637 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
638 name
? name
: "null", value
, min
, max
);
642 env
->cpuid_version
&= ~0xff00f00;
644 env
->cpuid_version
|= 0xf00 | ((value
- 0x0f) << 20);
646 env
->cpuid_version
|= value
<< 8;
650 static void x86_cpuid_version_get_model(Object
*obj
, Visitor
*v
, void *opaque
,
651 const char *name
, Error
**errp
)
653 X86CPU
*cpu
= X86_CPU(obj
);
654 CPUX86State
*env
= &cpu
->env
;
657 value
= (env
->cpuid_version
>> 4) & 0xf;
658 value
|= ((env
->cpuid_version
>> 16) & 0xf) << 4;
659 visit_type_int(v
, &value
, name
, errp
);
662 static void x86_cpuid_version_set_model(Object
*obj
, Visitor
*v
, void *opaque
,
663 const char *name
, Error
**errp
)
665 X86CPU
*cpu
= X86_CPU(obj
);
666 CPUX86State
*env
= &cpu
->env
;
667 const int64_t min
= 0;
668 const int64_t max
= 0xff;
671 visit_type_int(v
, &value
, name
, errp
);
672 if (error_is_set(errp
)) {
675 if (value
< min
|| value
> max
) {
676 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
677 name
? name
: "null", value
, min
, max
);
681 env
->cpuid_version
&= ~0xf00f0;
682 env
->cpuid_version
|= ((value
& 0xf) << 4) | ((value
>> 4) << 16);
685 static void x86_cpuid_version_get_stepping(Object
*obj
, Visitor
*v
,
686 void *opaque
, const char *name
,
689 X86CPU
*cpu
= X86_CPU(obj
);
690 CPUX86State
*env
= &cpu
->env
;
693 value
= env
->cpuid_version
& 0xf;
694 visit_type_int(v
, &value
, name
, errp
);
697 static void x86_cpuid_version_set_stepping(Object
*obj
, Visitor
*v
,
698 void *opaque
, const char *name
,
701 X86CPU
*cpu
= X86_CPU(obj
);
702 CPUX86State
*env
= &cpu
->env
;
703 const int64_t min
= 0;
704 const int64_t max
= 0xf;
707 visit_type_int(v
, &value
, name
, errp
);
708 if (error_is_set(errp
)) {
711 if (value
< min
|| value
> max
) {
712 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
713 name
? name
: "null", value
, min
, max
);
717 env
->cpuid_version
&= ~0xf;
718 env
->cpuid_version
|= value
& 0xf;
721 static void x86_cpuid_get_level(Object
*obj
, Visitor
*v
, void *opaque
,
722 const char *name
, Error
**errp
)
724 X86CPU
*cpu
= X86_CPU(obj
);
726 visit_type_uint32(v
, &cpu
->env
.cpuid_level
, name
, errp
);
729 static void x86_cpuid_set_level(Object
*obj
, Visitor
*v
, void *opaque
,
730 const char *name
, Error
**errp
)
732 X86CPU
*cpu
= X86_CPU(obj
);
734 visit_type_uint32(v
, &cpu
->env
.cpuid_level
, name
, errp
);
737 static void x86_cpuid_get_xlevel(Object
*obj
, Visitor
*v
, void *opaque
,
738 const char *name
, Error
**errp
)
740 X86CPU
*cpu
= X86_CPU(obj
);
742 visit_type_uint32(v
, &cpu
->env
.cpuid_xlevel
, name
, errp
);
745 static void x86_cpuid_set_xlevel(Object
*obj
, Visitor
*v
, void *opaque
,
746 const char *name
, Error
**errp
)
748 X86CPU
*cpu
= X86_CPU(obj
);
750 visit_type_uint32(v
, &cpu
->env
.cpuid_xlevel
, name
, errp
);
753 static char *x86_cpuid_get_vendor(Object
*obj
, Error
**errp
)
755 X86CPU
*cpu
= X86_CPU(obj
);
756 CPUX86State
*env
= &cpu
->env
;
760 value
= (char *)g_malloc(12 + 1);
761 for (i
= 0; i
< 4; i
++) {
762 value
[i
] = env
->cpuid_vendor1
>> (8 * i
);
763 value
[i
+ 4] = env
->cpuid_vendor2
>> (8 * i
);
764 value
[i
+ 8] = env
->cpuid_vendor3
>> (8 * i
);
770 static void x86_cpuid_set_vendor(Object
*obj
, const char *value
,
773 X86CPU
*cpu
= X86_CPU(obj
);
774 CPUX86State
*env
= &cpu
->env
;
777 if (strlen(value
) != 12) {
778 error_set(errp
, QERR_PROPERTY_VALUE_BAD
, "",
783 env
->cpuid_vendor1
= 0;
784 env
->cpuid_vendor2
= 0;
785 env
->cpuid_vendor3
= 0;
786 for (i
= 0; i
< 4; i
++) {
787 env
->cpuid_vendor1
|= ((uint8_t)value
[i
]) << (8 * i
);
788 env
->cpuid_vendor2
|= ((uint8_t)value
[i
+ 4]) << (8 * i
);
789 env
->cpuid_vendor3
|= ((uint8_t)value
[i
+ 8]) << (8 * i
);
791 env
->cpuid_vendor_override
= 1;
794 static char *x86_cpuid_get_model_id(Object
*obj
, Error
**errp
)
796 X86CPU
*cpu
= X86_CPU(obj
);
797 CPUX86State
*env
= &cpu
->env
;
801 value
= g_malloc(48 + 1);
802 for (i
= 0; i
< 48; i
++) {
803 value
[i
] = env
->cpuid_model
[i
>> 2] >> (8 * (i
& 3));
809 static void x86_cpuid_set_model_id(Object
*obj
, const char *model_id
,
812 X86CPU
*cpu
= X86_CPU(obj
);
813 CPUX86State
*env
= &cpu
->env
;
816 if (model_id
== NULL
) {
819 len
= strlen(model_id
);
820 memset(env
->cpuid_model
, 0, 48);
821 for (i
= 0; i
< 48; i
++) {
825 c
= (uint8_t)model_id
[i
];
827 env
->cpuid_model
[i
>> 2] |= c
<< (8 * (i
& 3));
831 static void x86_cpuid_get_tsc_freq(Object
*obj
, Visitor
*v
, void *opaque
,
832 const char *name
, Error
**errp
)
834 X86CPU
*cpu
= X86_CPU(obj
);
837 value
= cpu
->env
.tsc_khz
* 1000;
838 visit_type_int(v
, &value
, name
, errp
);
841 static void x86_cpuid_set_tsc_freq(Object
*obj
, Visitor
*v
, void *opaque
,
842 const char *name
, Error
**errp
)
844 X86CPU
*cpu
= X86_CPU(obj
);
845 const int64_t min
= 0;
846 const int64_t max
= INT_MAX
;
849 visit_type_int(v
, &value
, name
, errp
);
850 if (error_is_set(errp
)) {
853 if (value
< min
|| value
> max
) {
854 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
855 name
? name
: "null", value
, min
, max
);
859 cpu
->env
.tsc_khz
= value
/ 1000;
862 static int cpu_x86_find_by_name(x86_def_t
*x86_cpu_def
, const char *cpu_model
)
867 char *s
= g_strdup(cpu_model
);
868 char *featurestr
, *name
= strtok(s
, ",");
869 /* Features to be added*/
870 uint32_t plus_features
= 0, plus_ext_features
= 0;
871 uint32_t plus_ext2_features
= 0, plus_ext3_features
= 0;
872 uint32_t plus_kvm_features
= 0, plus_svm_features
= 0;
873 /* Features to be removed */
874 uint32_t minus_features
= 0, minus_ext_features
= 0;
875 uint32_t minus_ext2_features
= 0, minus_ext3_features
= 0;
876 uint32_t minus_kvm_features
= 0, minus_svm_features
= 0;
879 for (def
= x86_defs
; def
; def
= def
->next
)
880 if (name
&& !strcmp(name
, def
->name
))
882 if (kvm_enabled() && name
&& strcmp(name
, "host") == 0) {
883 cpu_x86_fill_host(x86_cpu_def
);
887 memcpy(x86_cpu_def
, def
, sizeof(*def
));
890 plus_kvm_features
= ~0; /* not supported bits will be filtered out later */
892 add_flagname_to_bitmaps("hypervisor", &plus_features
,
893 &plus_ext_features
, &plus_ext2_features
, &plus_ext3_features
,
894 &plus_kvm_features
, &plus_svm_features
);
896 featurestr
= strtok(NULL
, ",");
900 if (featurestr
[0] == '+') {
901 add_flagname_to_bitmaps(featurestr
+ 1, &plus_features
,
902 &plus_ext_features
, &plus_ext2_features
,
903 &plus_ext3_features
, &plus_kvm_features
,
905 } else if (featurestr
[0] == '-') {
906 add_flagname_to_bitmaps(featurestr
+ 1, &minus_features
,
907 &minus_ext_features
, &minus_ext2_features
,
908 &minus_ext3_features
, &minus_kvm_features
,
909 &minus_svm_features
);
910 } else if ((val
= strchr(featurestr
, '='))) {
912 if (!strcmp(featurestr
, "family")) {
914 numvalue
= strtoul(val
, &err
, 0);
915 if (!*val
|| *err
|| numvalue
> 0xff + 0xf) {
916 fprintf(stderr
, "bad numerical value %s\n", val
);
919 x86_cpu_def
->family
= numvalue
;
920 } else if (!strcmp(featurestr
, "model")) {
922 numvalue
= strtoul(val
, &err
, 0);
923 if (!*val
|| *err
|| numvalue
> 0xff) {
924 fprintf(stderr
, "bad numerical value %s\n", val
);
927 x86_cpu_def
->model
= numvalue
;
928 } else if (!strcmp(featurestr
, "stepping")) {
930 numvalue
= strtoul(val
, &err
, 0);
931 if (!*val
|| *err
|| numvalue
> 0xf) {
932 fprintf(stderr
, "bad numerical value %s\n", val
);
935 x86_cpu_def
->stepping
= numvalue
;
936 } else if (!strcmp(featurestr
, "level")) {
938 numvalue
= strtoul(val
, &err
, 0);
940 fprintf(stderr
, "bad numerical value %s\n", val
);
943 x86_cpu_def
->level
= numvalue
;
944 } else if (!strcmp(featurestr
, "xlevel")) {
946 numvalue
= strtoul(val
, &err
, 0);
948 fprintf(stderr
, "bad numerical value %s\n", val
);
951 if (numvalue
< 0x80000000) {
952 numvalue
+= 0x80000000;
954 x86_cpu_def
->xlevel
= numvalue
;
955 } else if (!strcmp(featurestr
, "vendor")) {
956 if (strlen(val
) != 12) {
957 fprintf(stderr
, "vendor string must be 12 chars long\n");
960 x86_cpu_def
->vendor1
= 0;
961 x86_cpu_def
->vendor2
= 0;
962 x86_cpu_def
->vendor3
= 0;
963 for(i
= 0; i
< 4; i
++) {
964 x86_cpu_def
->vendor1
|= ((uint8_t)val
[i
]) << (8 * i
);
965 x86_cpu_def
->vendor2
|= ((uint8_t)val
[i
+ 4]) << (8 * i
);
966 x86_cpu_def
->vendor3
|= ((uint8_t)val
[i
+ 8]) << (8 * i
);
968 x86_cpu_def
->vendor_override
= 1;
969 } else if (!strcmp(featurestr
, "model_id")) {
970 pstrcpy(x86_cpu_def
->model_id
, sizeof(x86_cpu_def
->model_id
),
972 } else if (!strcmp(featurestr
, "tsc_freq")) {
976 tsc_freq
= strtosz_suffix_unit(val
, &err
,
977 STRTOSZ_DEFSUFFIX_B
, 1000);
978 if (tsc_freq
< 0 || *err
) {
979 fprintf(stderr
, "bad numerical value %s\n", val
);
982 x86_cpu_def
->tsc_khz
= tsc_freq
/ 1000;
983 } else if (!strcmp(featurestr
, "hv_spinlocks")) {
985 numvalue
= strtoul(val
, &err
, 0);
987 fprintf(stderr
, "bad numerical value %s\n", val
);
990 hyperv_set_spinlock_retries(numvalue
);
992 fprintf(stderr
, "unrecognized feature %s\n", featurestr
);
995 } else if (!strcmp(featurestr
, "check")) {
997 } else if (!strcmp(featurestr
, "enforce")) {
998 check_cpuid
= enforce_cpuid
= 1;
999 } else if (!strcmp(featurestr
, "hv_relaxed")) {
1000 hyperv_enable_relaxed_timing(true);
1001 } else if (!strcmp(featurestr
, "hv_vapic")) {
1002 hyperv_enable_vapic_recommended(true);
1004 fprintf(stderr
, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr
);
1007 featurestr
= strtok(NULL
, ",");
1009 x86_cpu_def
->features
|= plus_features
;
1010 x86_cpu_def
->ext_features
|= plus_ext_features
;
1011 x86_cpu_def
->ext2_features
|= plus_ext2_features
;
1012 x86_cpu_def
->ext3_features
|= plus_ext3_features
;
1013 x86_cpu_def
->kvm_features
|= plus_kvm_features
;
1014 x86_cpu_def
->svm_features
|= plus_svm_features
;
1015 x86_cpu_def
->features
&= ~minus_features
;
1016 x86_cpu_def
->ext_features
&= ~minus_ext_features
;
1017 x86_cpu_def
->ext2_features
&= ~minus_ext2_features
;
1018 x86_cpu_def
->ext3_features
&= ~minus_ext3_features
;
1019 x86_cpu_def
->kvm_features
&= ~minus_kvm_features
;
1020 x86_cpu_def
->svm_features
&= ~minus_svm_features
;
1022 if (check_features_against_host(x86_cpu_def
) && enforce_cpuid
)
1033 /* generate a composite string into buf of all cpuid names in featureset
1034 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1035 * if flags, suppress names undefined in featureset.
1037 static void listflags(char *buf
, int bufsize
, uint32_t fbits
,
1038 const char **featureset
, uint32_t flags
)
1040 const char **p
= &featureset
[31];
1044 b
= 4 <= bufsize
? buf
+ (bufsize
-= 3) - 1 : NULL
;
1046 for (q
= buf
, bit
= 31; fbits
&& bufsize
; --p
, fbits
&= ~(1 << bit
), --bit
)
1047 if (fbits
& 1 << bit
&& (*p
|| !flags
)) {
1049 nc
= snprintf(q
, bufsize
, "%s%s", q
== buf
? "" : " ", *p
);
1051 nc
= snprintf(q
, bufsize
, "%s[%d]", q
== buf
? "" : " ", bit
);
1052 if (bufsize
<= nc
) {
1054 memcpy(b
, "...", sizeof("..."));
1063 /* generate CPU information:
1064 * -? list model names
1065 * -?model list model names/IDs
1066 * -?dump output all model (x86_def_t) data
1067 * -?cpuid list all recognized cpuid flag names
1069 void x86_cpu_list(FILE *f
, fprintf_function cpu_fprintf
, const char *optarg
)
1071 unsigned char model
= !strcmp("?model", optarg
);
1072 unsigned char dump
= !strcmp("?dump", optarg
);
1073 unsigned char cpuid
= !strcmp("?cpuid", optarg
);
1078 (*cpu_fprintf
)(f
, "Recognized CPUID flags:\n");
1079 listflags(buf
, sizeof (buf
), (uint32_t)~0, feature_name
, 1);
1080 (*cpu_fprintf
)(f
, " f_edx: %s\n", buf
);
1081 listflags(buf
, sizeof (buf
), (uint32_t)~0, ext_feature_name
, 1);
1082 (*cpu_fprintf
)(f
, " f_ecx: %s\n", buf
);
1083 listflags(buf
, sizeof (buf
), (uint32_t)~0, ext2_feature_name
, 1);
1084 (*cpu_fprintf
)(f
, " extf_edx: %s\n", buf
);
1085 listflags(buf
, sizeof (buf
), (uint32_t)~0, ext3_feature_name
, 1);
1086 (*cpu_fprintf
)(f
, " extf_ecx: %s\n", buf
);
1089 for (def
= x86_defs
; def
; def
= def
->next
) {
1090 snprintf(buf
, sizeof (buf
), def
->flags
? "[%s]": "%s", def
->name
);
1091 if (model
|| dump
) {
1092 (*cpu_fprintf
)(f
, "x86 %16s %-48s\n", buf
, def
->model_id
);
1094 (*cpu_fprintf
)(f
, "x86 %16s\n", buf
);
1097 memcpy(buf
, &def
->vendor1
, sizeof (def
->vendor1
));
1098 memcpy(buf
+ 4, &def
->vendor2
, sizeof (def
->vendor2
));
1099 memcpy(buf
+ 8, &def
->vendor3
, sizeof (def
->vendor3
));
1102 " family %d model %d stepping %d level %d xlevel 0x%x"
1104 def
->family
, def
->model
, def
->stepping
, def
->level
,
1106 listflags(buf
, sizeof (buf
), def
->features
, feature_name
, 0);
1107 (*cpu_fprintf
)(f
, " feature_edx %08x (%s)\n", def
->features
,
1109 listflags(buf
, sizeof (buf
), def
->ext_features
, ext_feature_name
,
1111 (*cpu_fprintf
)(f
, " feature_ecx %08x (%s)\n", def
->ext_features
,
1113 listflags(buf
, sizeof (buf
), def
->ext2_features
, ext2_feature_name
,
1115 (*cpu_fprintf
)(f
, " extfeature_edx %08x (%s)\n",
1116 def
->ext2_features
, buf
);
1117 listflags(buf
, sizeof (buf
), def
->ext3_features
, ext3_feature_name
,
1119 (*cpu_fprintf
)(f
, " extfeature_ecx %08x (%s)\n",
1120 def
->ext3_features
, buf
);
1121 (*cpu_fprintf
)(f
, "\n");
1124 if (kvm_enabled()) {
1125 (*cpu_fprintf
)(f
, "x86 %16s\n", "[host]");
1129 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
1131 CpuDefinitionInfoList
*cpu_list
= NULL
;
1134 for (def
= x86_defs
; def
; def
= def
->next
) {
1135 CpuDefinitionInfoList
*entry
;
1136 CpuDefinitionInfo
*info
;
1138 info
= g_malloc0(sizeof(*info
));
1139 info
->name
= g_strdup(def
->name
);
1141 entry
= g_malloc0(sizeof(*entry
));
1142 entry
->value
= info
;
1143 entry
->next
= cpu_list
;
1150 int cpu_x86_register(X86CPU
*cpu
, const char *cpu_model
)
1152 CPUX86State
*env
= &cpu
->env
;
1153 x86_def_t def1
, *def
= &def1
;
1154 Error
*error
= NULL
;
1156 memset(def
, 0, sizeof(*def
));
1158 if (cpu_x86_find_by_name(def
, cpu_model
) < 0)
1161 env
->cpuid_vendor1
= def
->vendor1
;
1162 env
->cpuid_vendor2
= def
->vendor2
;
1163 env
->cpuid_vendor3
= def
->vendor3
;
1165 env
->cpuid_vendor1
= CPUID_VENDOR_INTEL_1
;
1166 env
->cpuid_vendor2
= CPUID_VENDOR_INTEL_2
;
1167 env
->cpuid_vendor3
= CPUID_VENDOR_INTEL_3
;
1169 env
->cpuid_vendor_override
= def
->vendor_override
;
1170 object_property_set_int(OBJECT(cpu
), def
->level
, "level", &error
);
1171 object_property_set_int(OBJECT(cpu
), def
->family
, "family", &error
);
1172 object_property_set_int(OBJECT(cpu
), def
->model
, "model", &error
);
1173 object_property_set_int(OBJECT(cpu
), def
->stepping
, "stepping", &error
);
1174 env
->cpuid_features
= def
->features
;
1175 env
->cpuid_ext_features
= def
->ext_features
;
1176 env
->cpuid_ext2_features
= def
->ext2_features
;
1177 env
->cpuid_ext3_features
= def
->ext3_features
;
1178 object_property_set_int(OBJECT(cpu
), def
->xlevel
, "xlevel", &error
);
1179 env
->cpuid_kvm_features
= def
->kvm_features
;
1180 env
->cpuid_svm_features
= def
->svm_features
;
1181 env
->cpuid_ext4_features
= def
->ext4_features
;
1182 env
->cpuid_7_0_ebx
= def
->cpuid_7_0_ebx_features
;
1183 env
->cpuid_xlevel2
= def
->xlevel2
;
1184 object_property_set_int(OBJECT(cpu
), (int64_t)def
->tsc_khz
* 1000,
1185 "tsc-frequency", &error
);
1186 if (!kvm_enabled()) {
1187 env
->cpuid_features
&= TCG_FEATURES
;
1188 env
->cpuid_ext_features
&= TCG_EXT_FEATURES
;
1189 env
->cpuid_ext2_features
&= (TCG_EXT2_FEATURES
1190 #ifdef TARGET_X86_64
1191 | CPUID_EXT2_SYSCALL
| CPUID_EXT2_LM
1194 env
->cpuid_ext3_features
&= TCG_EXT3_FEATURES
;
1195 env
->cpuid_svm_features
&= TCG_SVM_FEATURES
;
1197 object_property_set_str(OBJECT(cpu
), def
->model_id
, "model-id", &error
);
1198 if (error_is_set(&error
)) {
1205 #if !defined(CONFIG_USER_ONLY)
1206 /* copy vendor id string to 32 bit register, nul pad as needed
1208 static void cpyid(const char *s
, uint32_t *id
)
1210 char *d
= (char *)id
;
1213 for (i
= sizeof (*id
); i
--; )
1214 *d
++ = *s
? *s
++ : '\0';
1217 /* interpret radix and convert from string to arbitrary scalar,
1218 * otherwise flag failure
1220 #define setscalar(pval, str, perr) \
1225 ul = strtoul(str, &pend, 0); \
1226 *str && !*pend ? (*pval = ul) : (*perr = 1); \
1229 /* map cpuid options to feature bits, otherwise return failure
1230 * (option tags in *str are delimited by whitespace)
1232 static void setfeatures(uint32_t *pval
, const char *str
,
1233 const char **featureset
, int *perr
)
1237 for (q
= p
= str
; *p
|| *q
; q
= p
) {
1240 while (*p
&& !iswhite(*p
))
1244 if (!lookup_feature(pval
, q
, p
, featureset
)) {
1245 fprintf(stderr
, "error: feature \"%.*s\" not available in set\n",
1253 /* map config file options to x86_def_t form
1255 static int cpudef_setfield(const char *name
, const char *str
, void *opaque
)
1257 x86_def_t
*def
= opaque
;
1260 if (!strcmp(name
, "name")) {
1261 g_free((void *)def
->name
);
1262 def
->name
= g_strdup(str
);
1263 } else if (!strcmp(name
, "model_id")) {
1264 strncpy(def
->model_id
, str
, sizeof (def
->model_id
));
1265 } else if (!strcmp(name
, "level")) {
1266 setscalar(&def
->level
, str
, &err
)
1267 } else if (!strcmp(name
, "vendor")) {
1268 cpyid(&str
[0], &def
->vendor1
);
1269 cpyid(&str
[4], &def
->vendor2
);
1270 cpyid(&str
[8], &def
->vendor3
);
1271 } else if (!strcmp(name
, "family")) {
1272 setscalar(&def
->family
, str
, &err
)
1273 } else if (!strcmp(name
, "model")) {
1274 setscalar(&def
->model
, str
, &err
)
1275 } else if (!strcmp(name
, "stepping")) {
1276 setscalar(&def
->stepping
, str
, &err
)
1277 } else if (!strcmp(name
, "feature_edx")) {
1278 setfeatures(&def
->features
, str
, feature_name
, &err
);
1279 } else if (!strcmp(name
, "feature_ecx")) {
1280 setfeatures(&def
->ext_features
, str
, ext_feature_name
, &err
);
1281 } else if (!strcmp(name
, "extfeature_edx")) {
1282 setfeatures(&def
->ext2_features
, str
, ext2_feature_name
, &err
);
1283 } else if (!strcmp(name
, "extfeature_ecx")) {
1284 setfeatures(&def
->ext3_features
, str
, ext3_feature_name
, &err
);
1285 } else if (!strcmp(name
, "xlevel")) {
1286 setscalar(&def
->xlevel
, str
, &err
)
1288 fprintf(stderr
, "error: unknown option [%s = %s]\n", name
, str
);
1292 fprintf(stderr
, "error: bad option value [%s = %s]\n", name
, str
);
1298 /* register config file entry as x86_def_t
1300 static int cpudef_register(QemuOpts
*opts
, void *opaque
)
1302 x86_def_t
*def
= g_malloc0(sizeof (x86_def_t
));
1304 qemu_opt_foreach(opts
, cpudef_setfield
, def
, 1);
1305 def
->next
= x86_defs
;
1310 void cpu_clear_apic_feature(CPUX86State
*env
)
1312 env
->cpuid_features
&= ~CPUID_APIC
;
1315 #endif /* !CONFIG_USER_ONLY */
1317 /* register "cpudef" models defined in configuration file. Here we first
1318 * preload any built-in definitions
1320 void x86_cpudef_setup(void)
1323 static const char *model_with_versions
[] = { "qemu32", "qemu64", "athlon" };
1325 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); ++i
) {
1326 builtin_x86_defs
[i
].next
= x86_defs
;
1327 builtin_x86_defs
[i
].flags
= 1;
1329 /* Look for specific "cpudef" models that */
1330 /* have the QEMU version in .model_id */
1331 for (j
= 0; j
< ARRAY_SIZE(model_with_versions
); j
++) {
1332 if (strcmp(model_with_versions
[j
], builtin_x86_defs
[i
].name
) == 0) {
1333 pstrcpy(builtin_x86_defs
[i
].model_id
, sizeof(builtin_x86_defs
[i
].model_id
), "QEMU Virtual CPU version ");
1334 pstrcat(builtin_x86_defs
[i
].model_id
, sizeof(builtin_x86_defs
[i
].model_id
), qemu_get_version());
1339 x86_defs
= &builtin_x86_defs
[i
];
1341 #if !defined(CONFIG_USER_ONLY)
1342 qemu_opts_foreach(qemu_find_opts("cpudef"), cpudef_register
, NULL
, 0);
1346 static void get_cpuid_vendor(CPUX86State
*env
, uint32_t *ebx
,
1347 uint32_t *ecx
, uint32_t *edx
)
1349 *ebx
= env
->cpuid_vendor1
;
1350 *edx
= env
->cpuid_vendor2
;
1351 *ecx
= env
->cpuid_vendor3
;
1353 /* sysenter isn't supported on compatibility mode on AMD, syscall
1354 * isn't supported in compatibility mode on Intel.
1355 * Normally we advertise the actual cpu vendor, but you can override
1356 * this if you want to use KVM's sysenter/syscall emulation
1357 * in compatibility mode and when doing cross vendor migration
1359 if (kvm_enabled() && ! env
->cpuid_vendor_override
) {
1360 host_cpuid(0, 0, NULL
, ebx
, ecx
, edx
);
1364 void cpu_x86_cpuid(CPUX86State
*env
, uint32_t index
, uint32_t count
,
1365 uint32_t *eax
, uint32_t *ebx
,
1366 uint32_t *ecx
, uint32_t *edx
)
1368 /* test if maximum index reached */
1369 if (index
& 0x80000000) {
1370 if (index
> env
->cpuid_xlevel
) {
1371 if (env
->cpuid_xlevel2
> 0) {
1372 /* Handle the Centaur's CPUID instruction. */
1373 if (index
> env
->cpuid_xlevel2
) {
1374 index
= env
->cpuid_xlevel2
;
1375 } else if (index
< 0xC0000000) {
1376 index
= env
->cpuid_xlevel
;
1379 index
= env
->cpuid_xlevel
;
1383 if (index
> env
->cpuid_level
)
1384 index
= env
->cpuid_level
;
1389 *eax
= env
->cpuid_level
;
1390 get_cpuid_vendor(env
, ebx
, ecx
, edx
);
1393 *eax
= env
->cpuid_version
;
1394 *ebx
= (env
->cpuid_apic_id
<< 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1395 *ecx
= env
->cpuid_ext_features
;
1396 *edx
= env
->cpuid_features
;
1397 if (env
->nr_cores
* env
->nr_threads
> 1) {
1398 *ebx
|= (env
->nr_cores
* env
->nr_threads
) << 16;
1399 *edx
|= 1 << 28; /* HTT bit */
1403 /* cache info: needed for Pentium Pro compatibility */
1410 /* cache info: needed for Core compatibility */
1411 if (env
->nr_cores
> 1) {
1412 *eax
= (env
->nr_cores
- 1) << 26;
1417 case 0: /* L1 dcache info */
1423 case 1: /* L1 icache info */
1429 case 2: /* L2 cache info */
1431 if (env
->nr_threads
> 1) {
1432 *eax
|= (env
->nr_threads
- 1) << 14;
1438 default: /* end of info */
1447 /* mwait info: needed for Core compatibility */
1448 *eax
= 0; /* Smallest monitor-line size in bytes */
1449 *ebx
= 0; /* Largest monitor-line size in bytes */
1450 *ecx
= CPUID_MWAIT_EMX
| CPUID_MWAIT_IBE
;
1454 /* Thermal and Power Leaf */
1461 /* Structured Extended Feature Flags Enumeration Leaf */
1463 *eax
= 0; /* Maximum ECX value for sub-leaves */
1464 *ebx
= env
->cpuid_7_0_ebx
; /* Feature flags */
1465 *ecx
= 0; /* Reserved */
1466 *edx
= 0; /* Reserved */
1475 /* Direct Cache Access Information Leaf */
1476 *eax
= 0; /* Bits 0-31 in DCA_CAP MSR */
1482 /* Architectural Performance Monitoring Leaf */
1483 if (kvm_enabled()) {
1484 KVMState
*s
= env
->kvm_state
;
1486 *eax
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EAX
);
1487 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EBX
);
1488 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_ECX
);
1489 *edx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EDX
);
1498 /* Processor Extended State */
1499 if (!(env
->cpuid_ext_features
& CPUID_EXT_XSAVE
)) {
1506 if (kvm_enabled()) {
1507 KVMState
*s
= env
->kvm_state
;
1509 *eax
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_EAX
);
1510 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_EBX
);
1511 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_ECX
);
1512 *edx
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_EDX
);
1521 *eax
= env
->cpuid_xlevel
;
1522 *ebx
= env
->cpuid_vendor1
;
1523 *edx
= env
->cpuid_vendor2
;
1524 *ecx
= env
->cpuid_vendor3
;
1527 *eax
= env
->cpuid_version
;
1529 *ecx
= env
->cpuid_ext3_features
;
1530 *edx
= env
->cpuid_ext2_features
;
1532 /* The Linux kernel checks for the CMPLegacy bit and
1533 * discards multiple thread information if it is set.
1534 * So dont set it here for Intel to make Linux guests happy.
1536 if (env
->nr_cores
* env
->nr_threads
> 1) {
1537 uint32_t tebx
, tecx
, tedx
;
1538 get_cpuid_vendor(env
, &tebx
, &tecx
, &tedx
);
1539 if (tebx
!= CPUID_VENDOR_INTEL_1
||
1540 tedx
!= CPUID_VENDOR_INTEL_2
||
1541 tecx
!= CPUID_VENDOR_INTEL_3
) {
1542 *ecx
|= 1 << 1; /* CmpLegacy bit */
1549 *eax
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
1550 *ebx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
1551 *ecx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
1552 *edx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
1555 /* cache info (L1 cache) */
1562 /* cache info (L2 cache) */
1569 /* virtual & phys address size in low 2 bytes. */
1570 /* XXX: This value must match the one used in the MMU code. */
1571 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
) {
1572 /* 64 bit processor */
1573 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1574 *eax
= 0x00003028; /* 48 bits virtual, 40 bits physical */
1576 if (env
->cpuid_features
& CPUID_PSE36
)
1577 *eax
= 0x00000024; /* 36 bits physical */
1579 *eax
= 0x00000020; /* 32 bits physical */
1584 if (env
->nr_cores
* env
->nr_threads
> 1) {
1585 *ecx
|= (env
->nr_cores
* env
->nr_threads
) - 1;
1589 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
) {
1590 *eax
= 0x00000001; /* SVM Revision */
1591 *ebx
= 0x00000010; /* nr of ASIDs */
1593 *edx
= env
->cpuid_svm_features
; /* optional features */
1602 *eax
= env
->cpuid_xlevel2
;
1608 /* Support for VIA CPU's CPUID instruction */
1609 *eax
= env
->cpuid_version
;
1612 *edx
= env
->cpuid_ext4_features
;
1617 /* Reserved for the future, and now filled with zero */
1624 /* reserved values: zero */
1633 /* CPUClass::reset() */
1634 static void x86_cpu_reset(CPUState
*s
)
1636 X86CPU
*cpu
= X86_CPU(s
);
1637 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(cpu
);
1638 CPUX86State
*env
= &cpu
->env
;
1641 if (qemu_loglevel_mask(CPU_LOG_RESET
)) {
1642 qemu_log("CPU Reset (CPU %d)\n", env
->cpu_index
);
1643 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1646 xcc
->parent_reset(s
);
1649 memset(env
, 0, offsetof(CPUX86State
, breakpoints
));
1653 env
->old_exception
= -1;
1655 /* init to reset state */
1657 #ifdef CONFIG_SOFTMMU
1658 env
->hflags
|= HF_SOFTMMU_MASK
;
1660 env
->hflags2
|= HF2_GIF_MASK
;
1662 cpu_x86_update_cr0(env
, 0x60000010);
1663 env
->a20_mask
= ~0x0;
1664 env
->smbase
= 0x30000;
1666 env
->idt
.limit
= 0xffff;
1667 env
->gdt
.limit
= 0xffff;
1668 env
->ldt
.limit
= 0xffff;
1669 env
->ldt
.flags
= DESC_P_MASK
| (2 << DESC_TYPE_SHIFT
);
1670 env
->tr
.limit
= 0xffff;
1671 env
->tr
.flags
= DESC_P_MASK
| (11 << DESC_TYPE_SHIFT
);
1673 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff,
1674 DESC_P_MASK
| DESC_S_MASK
| DESC_CS_MASK
|
1675 DESC_R_MASK
| DESC_A_MASK
);
1676 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff,
1677 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1679 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff,
1680 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1682 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff,
1683 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1685 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff,
1686 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1688 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff,
1689 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1693 env
->regs
[R_EDX
] = env
->cpuid_version
;
1698 for (i
= 0; i
< 8; i
++) {
1703 env
->mxcsr
= 0x1f80;
1705 env
->pat
= 0x0007040600070406ULL
;
1706 env
->msr_ia32_misc_enable
= MSR_IA32_MISC_ENABLE_DEFAULT
;
1708 memset(env
->dr
, 0, sizeof(env
->dr
));
1709 env
->dr
[6] = DR6_FIXED_1
;
1710 env
->dr
[7] = DR7_FIXED_1
;
1711 cpu_breakpoint_remove_all(env
, BP_CPU
);
1712 cpu_watchpoint_remove_all(env
, BP_CPU
);
1714 #if !defined(CONFIG_USER_ONLY)
1715 /* We hard-wire the BSP to the first CPU. */
1716 if (env
->cpu_index
== 0) {
1717 apic_designate_bsp(env
->apic_state
);
1720 env
->halted
= !cpu_is_bsp(cpu
);
1724 #ifndef CONFIG_USER_ONLY
1725 bool cpu_is_bsp(X86CPU
*cpu
)
1727 return cpu_get_apic_base(cpu
->env
.apic_state
) & MSR_IA32_APICBASE_BSP
;
1730 /* TODO: remove me, when reset over QOM tree is implemented */
1731 static void x86_cpu_machine_reset_cb(void *opaque
)
1733 X86CPU
*cpu
= opaque
;
1734 cpu_reset(CPU(cpu
));
1738 static void mce_init(X86CPU
*cpu
)
1740 CPUX86State
*cenv
= &cpu
->env
;
1743 if (((cenv
->cpuid_version
>> 8) & 0xf) >= 6
1744 && (cenv
->cpuid_features
& (CPUID_MCE
| CPUID_MCA
)) ==
1745 (CPUID_MCE
| CPUID_MCA
)) {
1746 cenv
->mcg_cap
= MCE_CAP_DEF
| MCE_BANKS_DEF
;
1747 cenv
->mcg_ctl
= ~(uint64_t)0;
1748 for (bank
= 0; bank
< MCE_BANKS_DEF
; bank
++) {
1749 cenv
->mce_banks
[bank
* 4] = ~(uint64_t)0;
1754 void x86_cpu_realize(Object
*obj
, Error
**errp
)
1756 X86CPU
*cpu
= X86_CPU(obj
);
1758 #ifndef CONFIG_USER_ONLY
1759 qemu_register_reset(x86_cpu_machine_reset_cb
, cpu
);
1763 qemu_init_vcpu(&cpu
->env
);
1764 cpu_reset(CPU(cpu
));
1767 static void x86_cpu_initfn(Object
*obj
)
1769 X86CPU
*cpu
= X86_CPU(obj
);
1770 CPUX86State
*env
= &cpu
->env
;
1775 object_property_add(obj
, "family", "int",
1776 x86_cpuid_version_get_family
,
1777 x86_cpuid_version_set_family
, NULL
, NULL
, NULL
);
1778 object_property_add(obj
, "model", "int",
1779 x86_cpuid_version_get_model
,
1780 x86_cpuid_version_set_model
, NULL
, NULL
, NULL
);
1781 object_property_add(obj
, "stepping", "int",
1782 x86_cpuid_version_get_stepping
,
1783 x86_cpuid_version_set_stepping
, NULL
, NULL
, NULL
);
1784 object_property_add(obj
, "level", "int",
1785 x86_cpuid_get_level
,
1786 x86_cpuid_set_level
, NULL
, NULL
, NULL
);
1787 object_property_add(obj
, "xlevel", "int",
1788 x86_cpuid_get_xlevel
,
1789 x86_cpuid_set_xlevel
, NULL
, NULL
, NULL
);
1790 object_property_add_str(obj
, "vendor",
1791 x86_cpuid_get_vendor
,
1792 x86_cpuid_set_vendor
, NULL
);
1793 object_property_add_str(obj
, "model-id",
1794 x86_cpuid_get_model_id
,
1795 x86_cpuid_set_model_id
, NULL
);
1796 object_property_add(obj
, "tsc-frequency", "int",
1797 x86_cpuid_get_tsc_freq
,
1798 x86_cpuid_set_tsc_freq
, NULL
, NULL
, NULL
);
1800 env
->cpuid_apic_id
= env
->cpu_index
;
1802 /* init various static tables used in TCG mode */
1803 if (tcg_enabled() && !inited
) {
1805 optimize_flags_init();
1806 #ifndef CONFIG_USER_ONLY
1807 cpu_set_debug_excp_handler(breakpoint_handler
);
1812 static void x86_cpu_common_class_init(ObjectClass
*oc
, void *data
)
1814 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
1815 CPUClass
*cc
= CPU_CLASS(oc
);
1817 xcc
->parent_reset
= cc
->reset
;
1818 cc
->reset
= x86_cpu_reset
;
1821 static const TypeInfo x86_cpu_type_info
= {
1822 .name
= TYPE_X86_CPU
,
1824 .instance_size
= sizeof(X86CPU
),
1825 .instance_init
= x86_cpu_initfn
,
1827 .class_size
= sizeof(X86CPUClass
),
1828 .class_init
= x86_cpu_common_class_init
,
1831 static void x86_cpu_register_types(void)
1833 type_register_static(&x86_cpu_type_info
);
1836 type_init(x86_cpu_register_types
)