Merge branch 'x86cpu_qom_tcg_v2' of git://github.com/imammedo/qemu
[qemu.git] / target-i386 / cpu.c
blob880cfea3f8babe84baab03f321c235e34156580c
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
24 #include "cpu.h"
25 #include "kvm.h"
27 #include "qemu-option.h"
28 #include "qemu-config.h"
30 #include "qapi/qapi-visit-core.h"
32 #include "hyperv.h"
34 #include "hw/hw.h"
36 /* feature flags taken from "Intel Processor Identification and the CPUID
37 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
38 * between feature naming conventions, aliases may be added.
40 static const char *feature_name[] = {
41 "fpu", "vme", "de", "pse",
42 "tsc", "msr", "pae", "mce",
43 "cx8", "apic", NULL, "sep",
44 "mtrr", "pge", "mca", "cmov",
45 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
46 NULL, "ds" /* Intel dts */, "acpi", "mmx",
47 "fxsr", "sse", "sse2", "ss",
48 "ht" /* Intel htt */, "tm", "ia64", "pbe",
50 static const char *ext_feature_name[] = {
51 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
52 "ds_cpl", "vmx", "smx", "est",
53 "tm2", "ssse3", "cid", NULL,
54 "fma", "cx16", "xtpr", "pdcm",
55 NULL, "pcid", "dca", "sse4.1|sse4_1",
56 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
57 "tsc-deadline", "aes", "xsave", "osxsave",
58 "avx", NULL, NULL, "hypervisor",
60 static const char *ext2_feature_name[] = {
61 "fpu", "vme", "de", "pse",
62 "tsc", "msr", "pae", "mce",
63 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall",
64 "mtrr", "pge", "mca", "cmov",
65 "pat", "pse36", NULL, NULL /* Linux mp */,
66 "nx|xd", NULL, "mmxext", "mmx",
67 "fxsr", "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
68 NULL, "lm|i64", "3dnowext", "3dnow",
70 static const char *ext3_feature_name[] = {
71 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
72 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
73 "3dnowprefetch", "osvw", "ibs", "xop",
74 "skinit", "wdt", NULL, NULL,
75 "fma4", NULL, "cvt16", "nodeid_msr",
76 NULL, NULL, NULL, NULL,
77 NULL, NULL, NULL, NULL,
78 NULL, NULL, NULL, NULL,
81 static const char *kvm_feature_name[] = {
82 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", "kvm_asyncpf", NULL, "kvm_pv_eoi", NULL,
83 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
84 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
85 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
88 static const char *svm_feature_name[] = {
89 "npt", "lbrv", "svm_lock", "nrip_save",
90 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
91 NULL, NULL, "pause_filter", NULL,
92 "pfthreshold", NULL, NULL, NULL,
93 NULL, NULL, NULL, NULL,
94 NULL, NULL, NULL, NULL,
95 NULL, NULL, NULL, NULL,
96 NULL, NULL, NULL, NULL,
99 /* collects per-function cpuid data
101 typedef struct model_features_t {
102 uint32_t *guest_feat;
103 uint32_t *host_feat;
104 uint32_t check_feat;
105 const char **flag_names;
106 uint32_t cpuid;
107 } model_features_t;
109 int check_cpuid = 0;
110 int enforce_cpuid = 0;
112 void host_cpuid(uint32_t function, uint32_t count,
113 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
115 #if defined(CONFIG_KVM)
116 uint32_t vec[4];
118 #ifdef __x86_64__
119 asm volatile("cpuid"
120 : "=a"(vec[0]), "=b"(vec[1]),
121 "=c"(vec[2]), "=d"(vec[3])
122 : "0"(function), "c"(count) : "cc");
123 #else
124 asm volatile("pusha \n\t"
125 "cpuid \n\t"
126 "mov %%eax, 0(%2) \n\t"
127 "mov %%ebx, 4(%2) \n\t"
128 "mov %%ecx, 8(%2) \n\t"
129 "mov %%edx, 12(%2) \n\t"
130 "popa"
131 : : "a"(function), "c"(count), "S"(vec)
132 : "memory", "cc");
133 #endif
135 if (eax)
136 *eax = vec[0];
137 if (ebx)
138 *ebx = vec[1];
139 if (ecx)
140 *ecx = vec[2];
141 if (edx)
142 *edx = vec[3];
143 #endif
146 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
148 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
149 * a substring. ex if !NULL points to the first char after a substring,
150 * otherwise the string is assumed to sized by a terminating nul.
151 * Return lexical ordering of *s1:*s2.
153 static int sstrcmp(const char *s1, const char *e1, const char *s2,
154 const char *e2)
156 for (;;) {
157 if (!*s1 || !*s2 || *s1 != *s2)
158 return (*s1 - *s2);
159 ++s1, ++s2;
160 if (s1 == e1 && s2 == e2)
161 return (0);
162 else if (s1 == e1)
163 return (*s2);
164 else if (s2 == e2)
165 return (*s1);
169 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
170 * '|' delimited (possibly empty) strings in which case search for a match
171 * within the alternatives proceeds left to right. Return 0 for success,
172 * non-zero otherwise.
174 static int altcmp(const char *s, const char *e, const char *altstr)
176 const char *p, *q;
178 for (q = p = altstr; ; ) {
179 while (*p && *p != '|')
180 ++p;
181 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
182 return (0);
183 if (!*p)
184 return (1);
185 else
186 q = ++p;
190 /* search featureset for flag *[s..e), if found set corresponding bit in
191 * *pval and return true, otherwise return false
193 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
194 const char **featureset)
196 uint32_t mask;
197 const char **ppc;
198 bool found = false;
200 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
201 if (*ppc && !altcmp(s, e, *ppc)) {
202 *pval |= mask;
203 found = true;
206 return found;
209 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
210 uint32_t *ext_features,
211 uint32_t *ext2_features,
212 uint32_t *ext3_features,
213 uint32_t *kvm_features,
214 uint32_t *svm_features)
216 if (!lookup_feature(features, flagname, NULL, feature_name) &&
217 !lookup_feature(ext_features, flagname, NULL, ext_feature_name) &&
218 !lookup_feature(ext2_features, flagname, NULL, ext2_feature_name) &&
219 !lookup_feature(ext3_features, flagname, NULL, ext3_feature_name) &&
220 !lookup_feature(kvm_features, flagname, NULL, kvm_feature_name) &&
221 !lookup_feature(svm_features, flagname, NULL, svm_feature_name))
222 fprintf(stderr, "CPU feature %s not found\n", flagname);
225 typedef struct x86_def_t {
226 struct x86_def_t *next;
227 const char *name;
228 uint32_t level;
229 uint32_t vendor1, vendor2, vendor3;
230 int family;
231 int model;
232 int stepping;
233 int tsc_khz;
234 uint32_t features, ext_features, ext2_features, ext3_features;
235 uint32_t kvm_features, svm_features;
236 uint32_t xlevel;
237 char model_id[48];
238 int vendor_override;
239 uint32_t flags;
240 /* Store the results of Centaur's CPUID instructions */
241 uint32_t ext4_features;
242 uint32_t xlevel2;
243 /* The feature bits on CPUID[EAX=7,ECX=0].EBX */
244 uint32_t cpuid_7_0_ebx_features;
245 } x86_def_t;
247 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
248 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
249 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
250 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
251 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
252 CPUID_PSE36 | CPUID_FXSR)
253 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
254 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
255 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
256 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
257 CPUID_PAE | CPUID_SEP | CPUID_APIC)
258 #define EXT2_FEATURE_MASK 0x0183F3FF
260 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
261 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
262 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
263 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
264 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
265 /* partly implemented:
266 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64)
267 CPUID_PSE36 (needed for Solaris) */
268 /* missing:
269 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
270 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | \
271 CPUID_EXT_CX16 | CPUID_EXT_POPCNT | \
272 CPUID_EXT_HYPERVISOR)
273 /* missing:
274 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
275 CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_XSAVE */
276 #define TCG_EXT2_FEATURES ((TCG_FEATURES & EXT2_FEATURE_MASK) | \
277 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
278 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT)
279 /* missing:
280 CPUID_EXT2_PDPE1GB */
281 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
282 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
283 #define TCG_SVM_FEATURES 0
285 /* maintains list of cpu model definitions
287 static x86_def_t *x86_defs = {NULL};
289 /* built-in cpu model definitions (deprecated)
291 static x86_def_t builtin_x86_defs[] = {
293 .name = "qemu64",
294 .level = 4,
295 .vendor1 = CPUID_VENDOR_AMD_1,
296 .vendor2 = CPUID_VENDOR_AMD_2,
297 .vendor3 = CPUID_VENDOR_AMD_3,
298 .family = 6,
299 .model = 2,
300 .stepping = 3,
301 .features = PPRO_FEATURES |
302 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
303 CPUID_PSE36,
304 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
305 .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) |
306 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
307 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
308 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
309 .xlevel = 0x8000000A,
312 .name = "phenom",
313 .level = 5,
314 .vendor1 = CPUID_VENDOR_AMD_1,
315 .vendor2 = CPUID_VENDOR_AMD_2,
316 .vendor3 = CPUID_VENDOR_AMD_3,
317 .family = 16,
318 .model = 2,
319 .stepping = 3,
320 .features = PPRO_FEATURES |
321 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
322 CPUID_PSE36 | CPUID_VME | CPUID_HT,
323 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
324 CPUID_EXT_POPCNT,
325 .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) |
326 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
327 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
328 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
329 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
330 CPUID_EXT3_CR8LEG,
331 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
332 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
333 .ext3_features = CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
334 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
335 .svm_features = CPUID_SVM_NPT | CPUID_SVM_LBRV,
336 .xlevel = 0x8000001A,
337 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
340 .name = "core2duo",
341 .level = 10,
342 .family = 6,
343 .model = 15,
344 .stepping = 11,
345 .features = PPRO_FEATURES |
346 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
347 CPUID_PSE36 | CPUID_VME | CPUID_DTS | CPUID_ACPI | CPUID_SS |
348 CPUID_HT | CPUID_TM | CPUID_PBE,
349 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
350 CPUID_EXT_DTES64 | CPUID_EXT_DSCPL | CPUID_EXT_VMX | CPUID_EXT_EST |
351 CPUID_EXT_TM2 | CPUID_EXT_CX16 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
352 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
353 .ext3_features = CPUID_EXT3_LAHF_LM,
354 .xlevel = 0x80000008,
355 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
358 .name = "kvm64",
359 .level = 5,
360 .vendor1 = CPUID_VENDOR_INTEL_1,
361 .vendor2 = CPUID_VENDOR_INTEL_2,
362 .vendor3 = CPUID_VENDOR_INTEL_3,
363 .family = 15,
364 .model = 6,
365 .stepping = 1,
366 /* Missing: CPUID_VME, CPUID_HT */
367 .features = PPRO_FEATURES |
368 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
369 CPUID_PSE36,
370 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
371 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_CX16,
372 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
373 .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) |
374 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
375 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
376 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
377 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
378 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
379 .ext3_features = 0,
380 .xlevel = 0x80000008,
381 .model_id = "Common KVM processor"
384 .name = "qemu32",
385 .level = 4,
386 .family = 6,
387 .model = 3,
388 .stepping = 3,
389 .features = PPRO_FEATURES,
390 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
391 .xlevel = 0x80000004,
394 .name = "kvm32",
395 .level = 5,
396 .family = 15,
397 .model = 6,
398 .stepping = 1,
399 .features = PPRO_FEATURES |
400 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
401 .ext_features = CPUID_EXT_SSE3,
402 .ext2_features = PPRO_FEATURES & EXT2_FEATURE_MASK,
403 .ext3_features = 0,
404 .xlevel = 0x80000008,
405 .model_id = "Common 32-bit KVM processor"
408 .name = "coreduo",
409 .level = 10,
410 .family = 6,
411 .model = 14,
412 .stepping = 8,
413 .features = PPRO_FEATURES | CPUID_VME |
414 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_DTS | CPUID_ACPI |
415 CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
416 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_VMX |
417 CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR | CPUID_EXT_PDCM,
418 .ext2_features = CPUID_EXT2_NX,
419 .xlevel = 0x80000008,
420 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
423 .name = "486",
424 .level = 1,
425 .family = 4,
426 .model = 0,
427 .stepping = 0,
428 .features = I486_FEATURES,
429 .xlevel = 0,
432 .name = "pentium",
433 .level = 1,
434 .family = 5,
435 .model = 4,
436 .stepping = 3,
437 .features = PENTIUM_FEATURES,
438 .xlevel = 0,
441 .name = "pentium2",
442 .level = 2,
443 .family = 6,
444 .model = 5,
445 .stepping = 2,
446 .features = PENTIUM2_FEATURES,
447 .xlevel = 0,
450 .name = "pentium3",
451 .level = 2,
452 .family = 6,
453 .model = 7,
454 .stepping = 3,
455 .features = PENTIUM3_FEATURES,
456 .xlevel = 0,
459 .name = "athlon",
460 .level = 2,
461 .vendor1 = CPUID_VENDOR_AMD_1,
462 .vendor2 = CPUID_VENDOR_AMD_2,
463 .vendor3 = CPUID_VENDOR_AMD_3,
464 .family = 6,
465 .model = 2,
466 .stepping = 3,
467 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
468 .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
469 .xlevel = 0x80000008,
472 .name = "n270",
473 /* original is on level 10 */
474 .level = 5,
475 .family = 6,
476 .model = 28,
477 .stepping = 2,
478 .features = PPRO_FEATURES |
479 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | CPUID_DTS |
480 CPUID_ACPI | CPUID_SS | CPUID_HT | CPUID_TM | CPUID_PBE,
481 /* Some CPUs got no CPUID_SEP */
482 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
483 CPUID_EXT_DSCPL | CPUID_EXT_EST | CPUID_EXT_TM2 | CPUID_EXT_XTPR,
484 .ext2_features = (PPRO_FEATURES & EXT2_FEATURE_MASK) | CPUID_EXT2_NX,
485 .ext3_features = CPUID_EXT3_LAHF_LM,
486 .xlevel = 0x8000000A,
487 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
491 static int cpu_x86_fill_model_id(char *str)
493 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
494 int i;
496 for (i = 0; i < 3; i++) {
497 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
498 memcpy(str + i * 16 + 0, &eax, 4);
499 memcpy(str + i * 16 + 4, &ebx, 4);
500 memcpy(str + i * 16 + 8, &ecx, 4);
501 memcpy(str + i * 16 + 12, &edx, 4);
503 return 0;
506 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
508 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
510 x86_cpu_def->name = "host";
511 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
512 x86_cpu_def->level = eax;
513 x86_cpu_def->vendor1 = ebx;
514 x86_cpu_def->vendor2 = edx;
515 x86_cpu_def->vendor3 = ecx;
517 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
518 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
519 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
520 x86_cpu_def->stepping = eax & 0x0F;
521 x86_cpu_def->ext_features = ecx;
522 x86_cpu_def->features = edx;
524 if (kvm_enabled() && x86_cpu_def->level >= 7) {
525 x86_cpu_def->cpuid_7_0_ebx_features = kvm_arch_get_supported_cpuid(kvm_state, 0x7, 0, R_EBX);
526 } else {
527 x86_cpu_def->cpuid_7_0_ebx_features = 0;
530 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
531 x86_cpu_def->xlevel = eax;
533 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
534 x86_cpu_def->ext2_features = edx;
535 x86_cpu_def->ext3_features = ecx;
536 cpu_x86_fill_model_id(x86_cpu_def->model_id);
537 x86_cpu_def->vendor_override = 0;
539 /* Call Centaur's CPUID instruction. */
540 if (x86_cpu_def->vendor1 == CPUID_VENDOR_VIA_1 &&
541 x86_cpu_def->vendor2 == CPUID_VENDOR_VIA_2 &&
542 x86_cpu_def->vendor3 == CPUID_VENDOR_VIA_3) {
543 host_cpuid(0xC0000000, 0, &eax, &ebx, &ecx, &edx);
544 if (eax >= 0xC0000001) {
545 /* Support VIA max extended level */
546 x86_cpu_def->xlevel2 = eax;
547 host_cpuid(0xC0000001, 0, &eax, &ebx, &ecx, &edx);
548 x86_cpu_def->ext4_features = edx;
553 * Every SVM feature requires emulation support in KVM - so we can't just
554 * read the host features here. KVM might even support SVM features not
555 * available on the host hardware. Just set all bits and mask out the
556 * unsupported ones later.
558 x86_cpu_def->svm_features = -1;
560 return 0;
563 static int unavailable_host_feature(struct model_features_t *f, uint32_t mask)
565 int i;
567 for (i = 0; i < 32; ++i)
568 if (1 << i & mask) {
569 fprintf(stderr, "warning: host cpuid %04x_%04x lacks requested"
570 " flag '%s' [0x%08x]\n",
571 f->cpuid >> 16, f->cpuid & 0xffff,
572 f->flag_names[i] ? f->flag_names[i] : "[reserved]", mask);
573 break;
575 return 0;
578 /* best effort attempt to inform user requested cpu flags aren't making
579 * their way to the guest. Note: ft[].check_feat ideally should be
580 * specified via a guest_def field to suppress report of extraneous flags.
582 static int check_features_against_host(x86_def_t *guest_def)
584 x86_def_t host_def;
585 uint32_t mask;
586 int rv, i;
587 struct model_features_t ft[] = {
588 {&guest_def->features, &host_def.features,
589 ~0, feature_name, 0x00000000},
590 {&guest_def->ext_features, &host_def.ext_features,
591 ~CPUID_EXT_HYPERVISOR, ext_feature_name, 0x00000001},
592 {&guest_def->ext2_features, &host_def.ext2_features,
593 ~PPRO_FEATURES, ext2_feature_name, 0x80000000},
594 {&guest_def->ext3_features, &host_def.ext3_features,
595 ~CPUID_EXT3_SVM, ext3_feature_name, 0x80000001}};
597 cpu_x86_fill_host(&host_def);
598 for (rv = 0, i = 0; i < ARRAY_SIZE(ft); ++i)
599 for (mask = 1; mask; mask <<= 1)
600 if (ft[i].check_feat & mask && *ft[i].guest_feat & mask &&
601 !(*ft[i].host_feat & mask)) {
602 unavailable_host_feature(&ft[i], mask);
603 rv = 1;
605 return rv;
608 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
609 const char *name, Error **errp)
611 X86CPU *cpu = X86_CPU(obj);
612 CPUX86State *env = &cpu->env;
613 int64_t value;
615 value = (env->cpuid_version >> 8) & 0xf;
616 if (value == 0xf) {
617 value += (env->cpuid_version >> 20) & 0xff;
619 visit_type_int(v, &value, name, errp);
622 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
623 const char *name, Error **errp)
625 X86CPU *cpu = X86_CPU(obj);
626 CPUX86State *env = &cpu->env;
627 const int64_t min = 0;
628 const int64_t max = 0xff + 0xf;
629 int64_t value;
631 visit_type_int(v, &value, name, errp);
632 if (error_is_set(errp)) {
633 return;
635 if (value < min || value > max) {
636 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
637 name ? name : "null", value, min, max);
638 return;
641 env->cpuid_version &= ~0xff00f00;
642 if (value > 0x0f) {
643 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
644 } else {
645 env->cpuid_version |= value << 8;
649 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
650 const char *name, Error **errp)
652 X86CPU *cpu = X86_CPU(obj);
653 CPUX86State *env = &cpu->env;
654 int64_t value;
656 value = (env->cpuid_version >> 4) & 0xf;
657 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
658 visit_type_int(v, &value, name, errp);
661 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
662 const char *name, Error **errp)
664 X86CPU *cpu = X86_CPU(obj);
665 CPUX86State *env = &cpu->env;
666 const int64_t min = 0;
667 const int64_t max = 0xff;
668 int64_t value;
670 visit_type_int(v, &value, name, errp);
671 if (error_is_set(errp)) {
672 return;
674 if (value < min || value > max) {
675 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
676 name ? name : "null", value, min, max);
677 return;
680 env->cpuid_version &= ~0xf00f0;
681 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
684 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
685 void *opaque, const char *name,
686 Error **errp)
688 X86CPU *cpu = X86_CPU(obj);
689 CPUX86State *env = &cpu->env;
690 int64_t value;
692 value = env->cpuid_version & 0xf;
693 visit_type_int(v, &value, name, errp);
696 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
697 void *opaque, const char *name,
698 Error **errp)
700 X86CPU *cpu = X86_CPU(obj);
701 CPUX86State *env = &cpu->env;
702 const int64_t min = 0;
703 const int64_t max = 0xf;
704 int64_t value;
706 visit_type_int(v, &value, name, errp);
707 if (error_is_set(errp)) {
708 return;
710 if (value < min || value > max) {
711 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
712 name ? name : "null", value, min, max);
713 return;
716 env->cpuid_version &= ~0xf;
717 env->cpuid_version |= value & 0xf;
720 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
721 const char *name, Error **errp)
723 X86CPU *cpu = X86_CPU(obj);
725 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
728 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
729 const char *name, Error **errp)
731 X86CPU *cpu = X86_CPU(obj);
733 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
736 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
737 const char *name, Error **errp)
739 X86CPU *cpu = X86_CPU(obj);
741 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
744 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
745 const char *name, Error **errp)
747 X86CPU *cpu = X86_CPU(obj);
749 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
752 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
754 X86CPU *cpu = X86_CPU(obj);
755 CPUX86State *env = &cpu->env;
756 char *value;
757 int i;
759 value = (char *)g_malloc(12 + 1);
760 for (i = 0; i < 4; i++) {
761 value[i ] = env->cpuid_vendor1 >> (8 * i);
762 value[i + 4] = env->cpuid_vendor2 >> (8 * i);
763 value[i + 8] = env->cpuid_vendor3 >> (8 * i);
765 value[12] = '\0';
766 return value;
769 static void x86_cpuid_set_vendor(Object *obj, const char *value,
770 Error **errp)
772 X86CPU *cpu = X86_CPU(obj);
773 CPUX86State *env = &cpu->env;
774 int i;
776 if (strlen(value) != 12) {
777 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
778 "vendor", value);
779 return;
782 env->cpuid_vendor1 = 0;
783 env->cpuid_vendor2 = 0;
784 env->cpuid_vendor3 = 0;
785 for (i = 0; i < 4; i++) {
786 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
787 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
788 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
790 env->cpuid_vendor_override = 1;
793 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
795 X86CPU *cpu = X86_CPU(obj);
796 CPUX86State *env = &cpu->env;
797 char *value;
798 int i;
800 value = g_malloc(48 + 1);
801 for (i = 0; i < 48; i++) {
802 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
804 value[48] = '\0';
805 return value;
808 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
809 Error **errp)
811 X86CPU *cpu = X86_CPU(obj);
812 CPUX86State *env = &cpu->env;
813 int c, len, i;
815 if (model_id == NULL) {
816 model_id = "";
818 len = strlen(model_id);
819 memset(env->cpuid_model, 0, 48);
820 for (i = 0; i < 48; i++) {
821 if (i >= len) {
822 c = '\0';
823 } else {
824 c = (uint8_t)model_id[i];
826 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
830 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
831 const char *name, Error **errp)
833 X86CPU *cpu = X86_CPU(obj);
834 int64_t value;
836 value = cpu->env.tsc_khz * 1000;
837 visit_type_int(v, &value, name, errp);
840 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
841 const char *name, Error **errp)
843 X86CPU *cpu = X86_CPU(obj);
844 const int64_t min = 0;
845 const int64_t max = INT_MAX;
846 int64_t value;
848 visit_type_int(v, &value, name, errp);
849 if (error_is_set(errp)) {
850 return;
852 if (value < min || value > max) {
853 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
854 name ? name : "null", value, min, max);
855 return;
858 cpu->env.tsc_khz = value / 1000;
861 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
863 unsigned int i;
864 x86_def_t *def;
866 char *s = g_strdup(cpu_model);
867 char *featurestr, *name = strtok(s, ",");
868 /* Features to be added*/
869 uint32_t plus_features = 0, plus_ext_features = 0;
870 uint32_t plus_ext2_features = 0, plus_ext3_features = 0;
871 uint32_t plus_kvm_features = 0, plus_svm_features = 0;
872 /* Features to be removed */
873 uint32_t minus_features = 0, minus_ext_features = 0;
874 uint32_t minus_ext2_features = 0, minus_ext3_features = 0;
875 uint32_t minus_kvm_features = 0, minus_svm_features = 0;
876 uint32_t numvalue;
878 for (def = x86_defs; def; def = def->next)
879 if (name && !strcmp(name, def->name))
880 break;
881 if (kvm_enabled() && name && strcmp(name, "host") == 0) {
882 cpu_x86_fill_host(x86_cpu_def);
883 } else if (!def) {
884 goto error;
885 } else {
886 memcpy(x86_cpu_def, def, sizeof(*def));
889 plus_kvm_features = ~0; /* not supported bits will be filtered out later */
891 add_flagname_to_bitmaps("hypervisor", &plus_features,
892 &plus_ext_features, &plus_ext2_features, &plus_ext3_features,
893 &plus_kvm_features, &plus_svm_features);
895 featurestr = strtok(NULL, ",");
897 while (featurestr) {
898 char *val;
899 if (featurestr[0] == '+') {
900 add_flagname_to_bitmaps(featurestr + 1, &plus_features,
901 &plus_ext_features, &plus_ext2_features,
902 &plus_ext3_features, &plus_kvm_features,
903 &plus_svm_features);
904 } else if (featurestr[0] == '-') {
905 add_flagname_to_bitmaps(featurestr + 1, &minus_features,
906 &minus_ext_features, &minus_ext2_features,
907 &minus_ext3_features, &minus_kvm_features,
908 &minus_svm_features);
909 } else if ((val = strchr(featurestr, '='))) {
910 *val = 0; val++;
911 if (!strcmp(featurestr, "family")) {
912 char *err;
913 numvalue = strtoul(val, &err, 0);
914 if (!*val || *err || numvalue > 0xff + 0xf) {
915 fprintf(stderr, "bad numerical value %s\n", val);
916 goto error;
918 x86_cpu_def->family = numvalue;
919 } else if (!strcmp(featurestr, "model")) {
920 char *err;
921 numvalue = strtoul(val, &err, 0);
922 if (!*val || *err || numvalue > 0xff) {
923 fprintf(stderr, "bad numerical value %s\n", val);
924 goto error;
926 x86_cpu_def->model = numvalue;
927 } else if (!strcmp(featurestr, "stepping")) {
928 char *err;
929 numvalue = strtoul(val, &err, 0);
930 if (!*val || *err || numvalue > 0xf) {
931 fprintf(stderr, "bad numerical value %s\n", val);
932 goto error;
934 x86_cpu_def->stepping = numvalue ;
935 } else if (!strcmp(featurestr, "level")) {
936 char *err;
937 numvalue = strtoul(val, &err, 0);
938 if (!*val || *err) {
939 fprintf(stderr, "bad numerical value %s\n", val);
940 goto error;
942 x86_cpu_def->level = numvalue;
943 } else if (!strcmp(featurestr, "xlevel")) {
944 char *err;
945 numvalue = strtoul(val, &err, 0);
946 if (!*val || *err) {
947 fprintf(stderr, "bad numerical value %s\n", val);
948 goto error;
950 if (numvalue < 0x80000000) {
951 numvalue += 0x80000000;
953 x86_cpu_def->xlevel = numvalue;
954 } else if (!strcmp(featurestr, "vendor")) {
955 if (strlen(val) != 12) {
956 fprintf(stderr, "vendor string must be 12 chars long\n");
957 goto error;
959 x86_cpu_def->vendor1 = 0;
960 x86_cpu_def->vendor2 = 0;
961 x86_cpu_def->vendor3 = 0;
962 for(i = 0; i < 4; i++) {
963 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
964 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
965 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
967 x86_cpu_def->vendor_override = 1;
968 } else if (!strcmp(featurestr, "model_id")) {
969 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
970 val);
971 } else if (!strcmp(featurestr, "tsc_freq")) {
972 int64_t tsc_freq;
973 char *err;
975 tsc_freq = strtosz_suffix_unit(val, &err,
976 STRTOSZ_DEFSUFFIX_B, 1000);
977 if (tsc_freq < 0 || *err) {
978 fprintf(stderr, "bad numerical value %s\n", val);
979 goto error;
981 x86_cpu_def->tsc_khz = tsc_freq / 1000;
982 } else if (!strcmp(featurestr, "hv_spinlocks")) {
983 char *err;
984 numvalue = strtoul(val, &err, 0);
985 if (!*val || *err) {
986 fprintf(stderr, "bad numerical value %s\n", val);
987 goto error;
989 hyperv_set_spinlock_retries(numvalue);
990 } else {
991 fprintf(stderr, "unrecognized feature %s\n", featurestr);
992 goto error;
994 } else if (!strcmp(featurestr, "check")) {
995 check_cpuid = 1;
996 } else if (!strcmp(featurestr, "enforce")) {
997 check_cpuid = enforce_cpuid = 1;
998 } else if (!strcmp(featurestr, "hv_relaxed")) {
999 hyperv_enable_relaxed_timing(true);
1000 } else if (!strcmp(featurestr, "hv_vapic")) {
1001 hyperv_enable_vapic_recommended(true);
1002 } else {
1003 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
1004 goto error;
1006 featurestr = strtok(NULL, ",");
1008 x86_cpu_def->features |= plus_features;
1009 x86_cpu_def->ext_features |= plus_ext_features;
1010 x86_cpu_def->ext2_features |= plus_ext2_features;
1011 x86_cpu_def->ext3_features |= plus_ext3_features;
1012 x86_cpu_def->kvm_features |= plus_kvm_features;
1013 x86_cpu_def->svm_features |= plus_svm_features;
1014 x86_cpu_def->features &= ~minus_features;
1015 x86_cpu_def->ext_features &= ~minus_ext_features;
1016 x86_cpu_def->ext2_features &= ~minus_ext2_features;
1017 x86_cpu_def->ext3_features &= ~minus_ext3_features;
1018 x86_cpu_def->kvm_features &= ~minus_kvm_features;
1019 x86_cpu_def->svm_features &= ~minus_svm_features;
1020 if (check_cpuid) {
1021 if (check_features_against_host(x86_cpu_def) && enforce_cpuid)
1022 goto error;
1024 g_free(s);
1025 return 0;
1027 error:
1028 g_free(s);
1029 return -1;
1032 /* generate a composite string into buf of all cpuid names in featureset
1033 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1034 * if flags, suppress names undefined in featureset.
1036 static void listflags(char *buf, int bufsize, uint32_t fbits,
1037 const char **featureset, uint32_t flags)
1039 const char **p = &featureset[31];
1040 char *q, *b, bit;
1041 int nc;
1043 b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
1044 *buf = '\0';
1045 for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
1046 if (fbits & 1 << bit && (*p || !flags)) {
1047 if (*p)
1048 nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
1049 else
1050 nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
1051 if (bufsize <= nc) {
1052 if (b) {
1053 memcpy(b, "...", sizeof("..."));
1055 return;
1057 q += nc;
1058 bufsize -= nc;
1062 /* generate CPU information:
1063 * -? list model names
1064 * -?model list model names/IDs
1065 * -?dump output all model (x86_def_t) data
1066 * -?cpuid list all recognized cpuid flag names
1068 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf, const char *optarg)
1070 unsigned char model = !strcmp("?model", optarg);
1071 unsigned char dump = !strcmp("?dump", optarg);
1072 unsigned char cpuid = !strcmp("?cpuid", optarg);
1073 x86_def_t *def;
1074 char buf[256];
1076 if (cpuid) {
1077 (*cpu_fprintf)(f, "Recognized CPUID flags:\n");
1078 listflags(buf, sizeof (buf), (uint32_t)~0, feature_name, 1);
1079 (*cpu_fprintf)(f, " f_edx: %s\n", buf);
1080 listflags(buf, sizeof (buf), (uint32_t)~0, ext_feature_name, 1);
1081 (*cpu_fprintf)(f, " f_ecx: %s\n", buf);
1082 listflags(buf, sizeof (buf), (uint32_t)~0, ext2_feature_name, 1);
1083 (*cpu_fprintf)(f, " extf_edx: %s\n", buf);
1084 listflags(buf, sizeof (buf), (uint32_t)~0, ext3_feature_name, 1);
1085 (*cpu_fprintf)(f, " extf_ecx: %s\n", buf);
1086 return;
1088 for (def = x86_defs; def; def = def->next) {
1089 snprintf(buf, sizeof (buf), def->flags ? "[%s]": "%s", def->name);
1090 if (model || dump) {
1091 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1092 } else {
1093 (*cpu_fprintf)(f, "x86 %16s\n", buf);
1095 if (dump) {
1096 memcpy(buf, &def->vendor1, sizeof (def->vendor1));
1097 memcpy(buf + 4, &def->vendor2, sizeof (def->vendor2));
1098 memcpy(buf + 8, &def->vendor3, sizeof (def->vendor3));
1099 buf[12] = '\0';
1100 (*cpu_fprintf)(f,
1101 " family %d model %d stepping %d level %d xlevel 0x%x"
1102 " vendor \"%s\"\n",
1103 def->family, def->model, def->stepping, def->level,
1104 def->xlevel, buf);
1105 listflags(buf, sizeof (buf), def->features, feature_name, 0);
1106 (*cpu_fprintf)(f, " feature_edx %08x (%s)\n", def->features,
1107 buf);
1108 listflags(buf, sizeof (buf), def->ext_features, ext_feature_name,
1110 (*cpu_fprintf)(f, " feature_ecx %08x (%s)\n", def->ext_features,
1111 buf);
1112 listflags(buf, sizeof (buf), def->ext2_features, ext2_feature_name,
1114 (*cpu_fprintf)(f, " extfeature_edx %08x (%s)\n",
1115 def->ext2_features, buf);
1116 listflags(buf, sizeof (buf), def->ext3_features, ext3_feature_name,
1118 (*cpu_fprintf)(f, " extfeature_ecx %08x (%s)\n",
1119 def->ext3_features, buf);
1120 (*cpu_fprintf)(f, "\n");
1123 if (kvm_enabled()) {
1124 (*cpu_fprintf)(f, "x86 %16s\n", "[host]");
1128 int cpu_x86_register(X86CPU *cpu, const char *cpu_model)
1130 CPUX86State *env = &cpu->env;
1131 x86_def_t def1, *def = &def1;
1132 Error *error = NULL;
1134 memset(def, 0, sizeof(*def));
1136 if (cpu_x86_find_by_name(def, cpu_model) < 0)
1137 return -1;
1138 if (def->vendor1) {
1139 env->cpuid_vendor1 = def->vendor1;
1140 env->cpuid_vendor2 = def->vendor2;
1141 env->cpuid_vendor3 = def->vendor3;
1142 } else {
1143 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
1144 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
1145 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
1147 env->cpuid_vendor_override = def->vendor_override;
1148 object_property_set_int(OBJECT(cpu), def->level, "level", &error);
1149 object_property_set_int(OBJECT(cpu), def->family, "family", &error);
1150 object_property_set_int(OBJECT(cpu), def->model, "model", &error);
1151 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", &error);
1152 env->cpuid_features = def->features;
1153 env->cpuid_ext_features = def->ext_features;
1154 env->cpuid_ext2_features = def->ext2_features;
1155 env->cpuid_ext3_features = def->ext3_features;
1156 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", &error);
1157 env->cpuid_kvm_features = def->kvm_features;
1158 env->cpuid_svm_features = def->svm_features;
1159 env->cpuid_ext4_features = def->ext4_features;
1160 env->cpuid_7_0_ebx = def->cpuid_7_0_ebx_features;
1161 env->cpuid_xlevel2 = def->xlevel2;
1162 object_property_set_int(OBJECT(cpu), (int64_t)def->tsc_khz * 1000,
1163 "tsc-frequency", &error);
1164 if (!kvm_enabled()) {
1165 env->cpuid_features &= TCG_FEATURES;
1166 env->cpuid_ext_features &= TCG_EXT_FEATURES;
1167 env->cpuid_ext2_features &= (TCG_EXT2_FEATURES
1168 #ifdef TARGET_X86_64
1169 | CPUID_EXT2_SYSCALL | CPUID_EXT2_LM
1170 #endif
1172 env->cpuid_ext3_features &= TCG_EXT3_FEATURES;
1173 env->cpuid_svm_features &= TCG_SVM_FEATURES;
1175 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", &error);
1176 if (error_is_set(&error)) {
1177 error_free(error);
1178 return -1;
1180 return 0;
1183 #if !defined(CONFIG_USER_ONLY)
1184 /* copy vendor id string to 32 bit register, nul pad as needed
1186 static void cpyid(const char *s, uint32_t *id)
1188 char *d = (char *)id;
1189 char i;
1191 for (i = sizeof (*id); i--; )
1192 *d++ = *s ? *s++ : '\0';
1195 /* interpret radix and convert from string to arbitrary scalar,
1196 * otherwise flag failure
1198 #define setscalar(pval, str, perr) \
1200 char *pend; \
1201 unsigned long ul; \
1203 ul = strtoul(str, &pend, 0); \
1204 *str && !*pend ? (*pval = ul) : (*perr = 1); \
1207 /* map cpuid options to feature bits, otherwise return failure
1208 * (option tags in *str are delimited by whitespace)
1210 static void setfeatures(uint32_t *pval, const char *str,
1211 const char **featureset, int *perr)
1213 const char *p, *q;
1215 for (q = p = str; *p || *q; q = p) {
1216 while (iswhite(*p))
1217 q = ++p;
1218 while (*p && !iswhite(*p))
1219 ++p;
1220 if (!*q && !*p)
1221 return;
1222 if (!lookup_feature(pval, q, p, featureset)) {
1223 fprintf(stderr, "error: feature \"%.*s\" not available in set\n",
1224 (int)(p - q), q);
1225 *perr = 1;
1226 return;
1231 /* map config file options to x86_def_t form
1233 static int cpudef_setfield(const char *name, const char *str, void *opaque)
1235 x86_def_t *def = opaque;
1236 int err = 0;
1238 if (!strcmp(name, "name")) {
1239 g_free((void *)def->name);
1240 def->name = g_strdup(str);
1241 } else if (!strcmp(name, "model_id")) {
1242 strncpy(def->model_id, str, sizeof (def->model_id));
1243 } else if (!strcmp(name, "level")) {
1244 setscalar(&def->level, str, &err)
1245 } else if (!strcmp(name, "vendor")) {
1246 cpyid(&str[0], &def->vendor1);
1247 cpyid(&str[4], &def->vendor2);
1248 cpyid(&str[8], &def->vendor3);
1249 } else if (!strcmp(name, "family")) {
1250 setscalar(&def->family, str, &err)
1251 } else if (!strcmp(name, "model")) {
1252 setscalar(&def->model, str, &err)
1253 } else if (!strcmp(name, "stepping")) {
1254 setscalar(&def->stepping, str, &err)
1255 } else if (!strcmp(name, "feature_edx")) {
1256 setfeatures(&def->features, str, feature_name, &err);
1257 } else if (!strcmp(name, "feature_ecx")) {
1258 setfeatures(&def->ext_features, str, ext_feature_name, &err);
1259 } else if (!strcmp(name, "extfeature_edx")) {
1260 setfeatures(&def->ext2_features, str, ext2_feature_name, &err);
1261 } else if (!strcmp(name, "extfeature_ecx")) {
1262 setfeatures(&def->ext3_features, str, ext3_feature_name, &err);
1263 } else if (!strcmp(name, "xlevel")) {
1264 setscalar(&def->xlevel, str, &err)
1265 } else {
1266 fprintf(stderr, "error: unknown option [%s = %s]\n", name, str);
1267 return (1);
1269 if (err) {
1270 fprintf(stderr, "error: bad option value [%s = %s]\n", name, str);
1271 return (1);
1273 return (0);
1276 /* register config file entry as x86_def_t
1278 static int cpudef_register(QemuOpts *opts, void *opaque)
1280 x86_def_t *def = g_malloc0(sizeof (x86_def_t));
1282 qemu_opt_foreach(opts, cpudef_setfield, def, 1);
1283 def->next = x86_defs;
1284 x86_defs = def;
1285 return (0);
1288 void cpu_clear_apic_feature(CPUX86State *env)
1290 env->cpuid_features &= ~CPUID_APIC;
1293 #endif /* !CONFIG_USER_ONLY */
1295 /* register "cpudef" models defined in configuration file. Here we first
1296 * preload any built-in definitions
1298 void x86_cpudef_setup(void)
1300 int i, j;
1301 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
1303 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
1304 builtin_x86_defs[i].next = x86_defs;
1305 builtin_x86_defs[i].flags = 1;
1307 /* Look for specific "cpudef" models that */
1308 /* have the QEMU version in .model_id */
1309 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
1310 if (strcmp(model_with_versions[j], builtin_x86_defs[i].name) == 0) {
1311 pstrcpy(builtin_x86_defs[i].model_id, sizeof(builtin_x86_defs[i].model_id), "QEMU Virtual CPU version ");
1312 pstrcat(builtin_x86_defs[i].model_id, sizeof(builtin_x86_defs[i].model_id), qemu_get_version());
1313 break;
1317 x86_defs = &builtin_x86_defs[i];
1319 #if !defined(CONFIG_USER_ONLY)
1320 qemu_opts_foreach(qemu_find_opts("cpudef"), cpudef_register, NULL, 0);
1321 #endif
1324 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
1325 uint32_t *ecx, uint32_t *edx)
1327 *ebx = env->cpuid_vendor1;
1328 *edx = env->cpuid_vendor2;
1329 *ecx = env->cpuid_vendor3;
1331 /* sysenter isn't supported on compatibility mode on AMD, syscall
1332 * isn't supported in compatibility mode on Intel.
1333 * Normally we advertise the actual cpu vendor, but you can override
1334 * this if you want to use KVM's sysenter/syscall emulation
1335 * in compatibility mode and when doing cross vendor migration
1337 if (kvm_enabled() && ! env->cpuid_vendor_override) {
1338 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1342 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1343 uint32_t *eax, uint32_t *ebx,
1344 uint32_t *ecx, uint32_t *edx)
1346 /* test if maximum index reached */
1347 if (index & 0x80000000) {
1348 if (index > env->cpuid_xlevel) {
1349 if (env->cpuid_xlevel2 > 0) {
1350 /* Handle the Centaur's CPUID instruction. */
1351 if (index > env->cpuid_xlevel2) {
1352 index = env->cpuid_xlevel2;
1353 } else if (index < 0xC0000000) {
1354 index = env->cpuid_xlevel;
1356 } else {
1357 index = env->cpuid_xlevel;
1360 } else {
1361 if (index > env->cpuid_level)
1362 index = env->cpuid_level;
1365 switch(index) {
1366 case 0:
1367 *eax = env->cpuid_level;
1368 get_cpuid_vendor(env, ebx, ecx, edx);
1369 break;
1370 case 1:
1371 *eax = env->cpuid_version;
1372 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1373 *ecx = env->cpuid_ext_features;
1374 *edx = env->cpuid_features;
1375 if (env->nr_cores * env->nr_threads > 1) {
1376 *ebx |= (env->nr_cores * env->nr_threads) << 16;
1377 *edx |= 1 << 28; /* HTT bit */
1379 break;
1380 case 2:
1381 /* cache info: needed for Pentium Pro compatibility */
1382 *eax = 1;
1383 *ebx = 0;
1384 *ecx = 0;
1385 *edx = 0x2c307d;
1386 break;
1387 case 4:
1388 /* cache info: needed for Core compatibility */
1389 if (env->nr_cores > 1) {
1390 *eax = (env->nr_cores - 1) << 26;
1391 } else {
1392 *eax = 0;
1394 switch (count) {
1395 case 0: /* L1 dcache info */
1396 *eax |= 0x0000121;
1397 *ebx = 0x1c0003f;
1398 *ecx = 0x000003f;
1399 *edx = 0x0000001;
1400 break;
1401 case 1: /* L1 icache info */
1402 *eax |= 0x0000122;
1403 *ebx = 0x1c0003f;
1404 *ecx = 0x000003f;
1405 *edx = 0x0000001;
1406 break;
1407 case 2: /* L2 cache info */
1408 *eax |= 0x0000143;
1409 if (env->nr_threads > 1) {
1410 *eax |= (env->nr_threads - 1) << 14;
1412 *ebx = 0x3c0003f;
1413 *ecx = 0x0000fff;
1414 *edx = 0x0000001;
1415 break;
1416 default: /* end of info */
1417 *eax = 0;
1418 *ebx = 0;
1419 *ecx = 0;
1420 *edx = 0;
1421 break;
1423 break;
1424 case 5:
1425 /* mwait info: needed for Core compatibility */
1426 *eax = 0; /* Smallest monitor-line size in bytes */
1427 *ebx = 0; /* Largest monitor-line size in bytes */
1428 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1429 *edx = 0;
1430 break;
1431 case 6:
1432 /* Thermal and Power Leaf */
1433 *eax = 0;
1434 *ebx = 0;
1435 *ecx = 0;
1436 *edx = 0;
1437 break;
1438 case 7:
1439 /* Structured Extended Feature Flags Enumeration Leaf */
1440 if (count == 0) {
1441 *eax = 0; /* Maximum ECX value for sub-leaves */
1442 *ebx = env->cpuid_7_0_ebx; /* Feature flags */
1443 *ecx = 0; /* Reserved */
1444 *edx = 0; /* Reserved */
1445 } else {
1446 *eax = 0;
1447 *ebx = 0;
1448 *ecx = 0;
1449 *edx = 0;
1451 break;
1452 case 9:
1453 /* Direct Cache Access Information Leaf */
1454 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1455 *ebx = 0;
1456 *ecx = 0;
1457 *edx = 0;
1458 break;
1459 case 0xA:
1460 /* Architectural Performance Monitoring Leaf */
1461 if (kvm_enabled()) {
1462 KVMState *s = env->kvm_state;
1464 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
1465 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
1466 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
1467 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
1468 } else {
1469 *eax = 0;
1470 *ebx = 0;
1471 *ecx = 0;
1472 *edx = 0;
1474 break;
1475 case 0xD:
1476 /* Processor Extended State */
1477 if (!(env->cpuid_ext_features & CPUID_EXT_XSAVE)) {
1478 *eax = 0;
1479 *ebx = 0;
1480 *ecx = 0;
1481 *edx = 0;
1482 break;
1484 if (kvm_enabled()) {
1485 KVMState *s = env->kvm_state;
1487 *eax = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EAX);
1488 *ebx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EBX);
1489 *ecx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_ECX);
1490 *edx = kvm_arch_get_supported_cpuid(s, 0xd, count, R_EDX);
1491 } else {
1492 *eax = 0;
1493 *ebx = 0;
1494 *ecx = 0;
1495 *edx = 0;
1497 break;
1498 case 0x80000000:
1499 *eax = env->cpuid_xlevel;
1500 *ebx = env->cpuid_vendor1;
1501 *edx = env->cpuid_vendor2;
1502 *ecx = env->cpuid_vendor3;
1503 break;
1504 case 0x80000001:
1505 *eax = env->cpuid_version;
1506 *ebx = 0;
1507 *ecx = env->cpuid_ext3_features;
1508 *edx = env->cpuid_ext2_features;
1510 /* The Linux kernel checks for the CMPLegacy bit and
1511 * discards multiple thread information if it is set.
1512 * So dont set it here for Intel to make Linux guests happy.
1514 if (env->nr_cores * env->nr_threads > 1) {
1515 uint32_t tebx, tecx, tedx;
1516 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
1517 if (tebx != CPUID_VENDOR_INTEL_1 ||
1518 tedx != CPUID_VENDOR_INTEL_2 ||
1519 tecx != CPUID_VENDOR_INTEL_3) {
1520 *ecx |= 1 << 1; /* CmpLegacy bit */
1523 break;
1524 case 0x80000002:
1525 case 0x80000003:
1526 case 0x80000004:
1527 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1528 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1529 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1530 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1531 break;
1532 case 0x80000005:
1533 /* cache info (L1 cache) */
1534 *eax = 0x01ff01ff;
1535 *ebx = 0x01ff01ff;
1536 *ecx = 0x40020140;
1537 *edx = 0x40020140;
1538 break;
1539 case 0x80000006:
1540 /* cache info (L2 cache) */
1541 *eax = 0;
1542 *ebx = 0x42004200;
1543 *ecx = 0x02008140;
1544 *edx = 0;
1545 break;
1546 case 0x80000008:
1547 /* virtual & phys address size in low 2 bytes. */
1548 /* XXX: This value must match the one used in the MMU code. */
1549 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1550 /* 64 bit processor */
1551 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1552 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1553 } else {
1554 if (env->cpuid_features & CPUID_PSE36)
1555 *eax = 0x00000024; /* 36 bits physical */
1556 else
1557 *eax = 0x00000020; /* 32 bits physical */
1559 *ebx = 0;
1560 *ecx = 0;
1561 *edx = 0;
1562 if (env->nr_cores * env->nr_threads > 1) {
1563 *ecx |= (env->nr_cores * env->nr_threads) - 1;
1565 break;
1566 case 0x8000000A:
1567 if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
1568 *eax = 0x00000001; /* SVM Revision */
1569 *ebx = 0x00000010; /* nr of ASIDs */
1570 *ecx = 0;
1571 *edx = env->cpuid_svm_features; /* optional features */
1572 } else {
1573 *eax = 0;
1574 *ebx = 0;
1575 *ecx = 0;
1576 *edx = 0;
1578 break;
1579 case 0xC0000000:
1580 *eax = env->cpuid_xlevel2;
1581 *ebx = 0;
1582 *ecx = 0;
1583 *edx = 0;
1584 break;
1585 case 0xC0000001:
1586 /* Support for VIA CPU's CPUID instruction */
1587 *eax = env->cpuid_version;
1588 *ebx = 0;
1589 *ecx = 0;
1590 *edx = env->cpuid_ext4_features;
1591 break;
1592 case 0xC0000002:
1593 case 0xC0000003:
1594 case 0xC0000004:
1595 /* Reserved for the future, and now filled with zero */
1596 *eax = 0;
1597 *ebx = 0;
1598 *ecx = 0;
1599 *edx = 0;
1600 break;
1601 default:
1602 /* reserved values: zero */
1603 *eax = 0;
1604 *ebx = 0;
1605 *ecx = 0;
1606 *edx = 0;
1607 break;
1611 /* CPUClass::reset() */
1612 static void x86_cpu_reset(CPUState *s)
1614 X86CPU *cpu = X86_CPU(s);
1615 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
1616 CPUX86State *env = &cpu->env;
1617 int i;
1619 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
1620 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
1621 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1624 xcc->parent_reset(s);
1627 memset(env, 0, offsetof(CPUX86State, breakpoints));
1629 tlb_flush(env, 1);
1631 env->old_exception = -1;
1633 /* init to reset state */
1635 #ifdef CONFIG_SOFTMMU
1636 env->hflags |= HF_SOFTMMU_MASK;
1637 #endif
1638 env->hflags2 |= HF2_GIF_MASK;
1640 cpu_x86_update_cr0(env, 0x60000010);
1641 env->a20_mask = ~0x0;
1642 env->smbase = 0x30000;
1644 env->idt.limit = 0xffff;
1645 env->gdt.limit = 0xffff;
1646 env->ldt.limit = 0xffff;
1647 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
1648 env->tr.limit = 0xffff;
1649 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
1651 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
1652 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
1653 DESC_R_MASK | DESC_A_MASK);
1654 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
1655 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1656 DESC_A_MASK);
1657 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
1658 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1659 DESC_A_MASK);
1660 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
1661 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1662 DESC_A_MASK);
1663 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
1664 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1665 DESC_A_MASK);
1666 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
1667 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
1668 DESC_A_MASK);
1670 env->eip = 0xfff0;
1671 env->regs[R_EDX] = env->cpuid_version;
1673 env->eflags = 0x2;
1675 /* FPU init */
1676 for (i = 0; i < 8; i++) {
1677 env->fptags[i] = 1;
1679 env->fpuc = 0x37f;
1681 env->mxcsr = 0x1f80;
1683 env->pat = 0x0007040600070406ULL;
1684 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
1686 memset(env->dr, 0, sizeof(env->dr));
1687 env->dr[6] = DR6_FIXED_1;
1688 env->dr[7] = DR7_FIXED_1;
1689 cpu_breakpoint_remove_all(env, BP_CPU);
1690 cpu_watchpoint_remove_all(env, BP_CPU);
1692 #if !defined(CONFIG_USER_ONLY)
1693 /* We hard-wire the BSP to the first CPU. */
1694 if (env->cpu_index == 0) {
1695 apic_designate_bsp(env->apic_state);
1698 env->halted = !cpu_is_bsp(cpu);
1699 #endif
1702 #ifndef CONFIG_USER_ONLY
1703 bool cpu_is_bsp(X86CPU *cpu)
1705 return cpu_get_apic_base(cpu->env.apic_state) & MSR_IA32_APICBASE_BSP;
1708 /* TODO: remove me, when reset over QOM tree is implemented */
1709 static void x86_cpu_machine_reset_cb(void *opaque)
1711 X86CPU *cpu = opaque;
1712 cpu_reset(CPU(cpu));
1714 #endif
1716 static void mce_init(X86CPU *cpu)
1718 CPUX86State *cenv = &cpu->env;
1719 unsigned int bank;
1721 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1722 && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1723 (CPUID_MCE | CPUID_MCA)) {
1724 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1725 cenv->mcg_ctl = ~(uint64_t)0;
1726 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1727 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1732 void x86_cpu_realize(Object *obj, Error **errp)
1734 X86CPU *cpu = X86_CPU(obj);
1736 #ifndef CONFIG_USER_ONLY
1737 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
1738 #endif
1740 mce_init(cpu);
1741 qemu_init_vcpu(&cpu->env);
1742 cpu_reset(CPU(cpu));
1745 static void x86_cpu_initfn(Object *obj)
1747 X86CPU *cpu = X86_CPU(obj);
1748 CPUX86State *env = &cpu->env;
1749 static int inited;
1751 cpu_exec_init(env);
1753 object_property_add(obj, "family", "int",
1754 x86_cpuid_version_get_family,
1755 x86_cpuid_version_set_family, NULL, NULL, NULL);
1756 object_property_add(obj, "model", "int",
1757 x86_cpuid_version_get_model,
1758 x86_cpuid_version_set_model, NULL, NULL, NULL);
1759 object_property_add(obj, "stepping", "int",
1760 x86_cpuid_version_get_stepping,
1761 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
1762 object_property_add(obj, "level", "int",
1763 x86_cpuid_get_level,
1764 x86_cpuid_set_level, NULL, NULL, NULL);
1765 object_property_add(obj, "xlevel", "int",
1766 x86_cpuid_get_xlevel,
1767 x86_cpuid_set_xlevel, NULL, NULL, NULL);
1768 object_property_add_str(obj, "vendor",
1769 x86_cpuid_get_vendor,
1770 x86_cpuid_set_vendor, NULL);
1771 object_property_add_str(obj, "model-id",
1772 x86_cpuid_get_model_id,
1773 x86_cpuid_set_model_id, NULL);
1774 object_property_add(obj, "tsc-frequency", "int",
1775 x86_cpuid_get_tsc_freq,
1776 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
1778 env->cpuid_apic_id = env->cpu_index;
1780 /* init various static tables used in TCG mode */
1781 if (tcg_enabled() && !inited) {
1782 inited = 1;
1783 optimize_flags_init();
1784 #ifndef CONFIG_USER_ONLY
1785 cpu_set_debug_excp_handler(breakpoint_handler);
1786 #endif
1790 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
1792 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1793 CPUClass *cc = CPU_CLASS(oc);
1795 xcc->parent_reset = cc->reset;
1796 cc->reset = x86_cpu_reset;
1799 static const TypeInfo x86_cpu_type_info = {
1800 .name = TYPE_X86_CPU,
1801 .parent = TYPE_CPU,
1802 .instance_size = sizeof(X86CPU),
1803 .instance_init = x86_cpu_initfn,
1804 .abstract = false,
1805 .class_size = sizeof(X86CPUClass),
1806 .class_init = x86_cpu_common_class_init,
1809 static void x86_cpu_register_types(void)
1811 type_register_static(&x86_cpu_type_info);
1814 type_init(x86_cpu_register_types)