target-microblaze: mmu: Prepare for 64-bit addresses
[qemu/ar7.git] / target / i386 / cpu.c
blob94260412e2299b575021ced7a9af5d757b18cf67
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
28 #include "kvm_i386.h"
29 #include "sev_i386.h"
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
43 #include "standard-headers/asm-x86/kvm_para.h"
45 #include "sysemu/sysemu.h"
46 #include "hw/qdev-properties.h"
47 #include "hw/i386/topology.h"
48 #ifndef CONFIG_USER_ONLY
49 #include "exec/address-spaces.h"
50 #include "hw/hw.h"
51 #include "hw/xen/xen.h"
52 #include "hw/i386/apic_internal.h"
53 #endif
55 #include "disas/capstone.h"
57 /* Helpers for building CPUID[2] descriptors: */
59 struct CPUID2CacheDescriptorInfo {
60 enum CacheType type;
61 int level;
62 int size;
63 int line_size;
64 int associativity;
67 #define KiB 1024
68 #define MiB (1024 * 1024)
71 * Known CPUID 2 cache descriptors.
72 * From Intel SDM Volume 2A, CPUID instruction
74 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
75 [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB,
76 .associativity = 4, .line_size = 32, },
77 [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB,
78 .associativity = 4, .line_size = 32, },
79 [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
80 .associativity = 4, .line_size = 64, },
81 [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
82 .associativity = 2, .line_size = 32, },
83 [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
84 .associativity = 4, .line_size = 32, },
85 [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
86 .associativity = 4, .line_size = 64, },
87 [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB,
88 .associativity = 6, .line_size = 64, },
89 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
90 .associativity = 2, .line_size = 64, },
91 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
92 .associativity = 8, .line_size = 64, },
93 /* lines per sector is not supported cpuid2_cache_descriptor(),
94 * so descriptors 0x22, 0x23 are not included
96 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
97 .associativity = 16, .line_size = 64, },
98 /* lines per sector is not supported cpuid2_cache_descriptor(),
99 * so descriptors 0x25, 0x20 are not included
101 [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
102 .associativity = 8, .line_size = 64, },
103 [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
104 .associativity = 8, .line_size = 64, },
105 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
106 .associativity = 4, .line_size = 32, },
107 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
108 .associativity = 4, .line_size = 32, },
109 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
112 .associativity = 4, .line_size = 32, },
113 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
114 .associativity = 4, .line_size = 32, },
115 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
116 .associativity = 4, .line_size = 64, },
117 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
118 .associativity = 8, .line_size = 64, },
119 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
120 .associativity = 12, .line_size = 64, },
121 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
122 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
123 .associativity = 12, .line_size = 64, },
124 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
125 .associativity = 16, .line_size = 64, },
126 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
131 .associativity = 24, .line_size = 64, },
132 [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
133 .associativity = 8, .line_size = 64, },
134 [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
135 .associativity = 4, .line_size = 64, },
136 [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
137 .associativity = 4, .line_size = 64, },
138 [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
141 .associativity = 4, .line_size = 64, },
142 /* lines per sector is not supported cpuid2_cache_descriptor(),
143 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
145 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
146 .associativity = 8, .line_size = 64, },
147 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
148 .associativity = 2, .line_size = 64, },
149 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
150 .associativity = 8, .line_size = 64, },
151 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
152 .associativity = 8, .line_size = 32, },
153 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 32, },
155 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
156 .associativity = 8, .line_size = 32, },
157 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
158 .associativity = 8, .line_size = 32, },
159 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
160 .associativity = 4, .line_size = 64, },
161 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
162 .associativity = 8, .line_size = 64, },
163 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 4, .line_size = 64, },
167 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 8, .line_size = 64, },
171 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 8, .line_size = 64, },
173 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
176 .associativity = 12, .line_size = 64, },
177 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
178 .associativity = 12, .line_size = 64, },
179 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
182 .associativity = 16, .line_size = 64, },
183 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
184 .associativity = 16, .line_size = 64, },
185 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
188 .associativity = 24, .line_size = 64, },
189 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
190 .associativity = 24, .line_size = 64, },
191 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
192 .associativity = 24, .line_size = 64, },
196 * "CPUID leaf 2 does not report cache descriptor information,
197 * use CPUID leaf 4 to query cache parameters"
199 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
202 * Return a CPUID 2 cache descriptor for a given cache.
203 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
205 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
207 int i;
209 assert(cache->size > 0);
210 assert(cache->level > 0);
211 assert(cache->line_size > 0);
212 assert(cache->associativity > 0);
213 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
214 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
215 if (d->level == cache->level && d->type == cache->type &&
216 d->size == cache->size && d->line_size == cache->line_size &&
217 d->associativity == cache->associativity) {
218 return i;
222 return CACHE_DESCRIPTOR_UNAVAILABLE;
225 /* CPUID Leaf 4 constants: */
227 /* EAX: */
228 #define CACHE_TYPE_D 1
229 #define CACHE_TYPE_I 2
230 #define CACHE_TYPE_UNIFIED 3
232 #define CACHE_LEVEL(l) (l << 5)
234 #define CACHE_SELF_INIT_LEVEL (1 << 8)
236 /* EDX: */
237 #define CACHE_NO_INVD_SHARING (1 << 0)
238 #define CACHE_INCLUSIVE (1 << 1)
239 #define CACHE_COMPLEX_IDX (1 << 2)
241 /* Encode CacheType for CPUID[4].EAX */
242 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \
243 ((t) == ICACHE) ? CACHE_TYPE_I : \
244 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
245 0 /* Invalid value */)
248 /* Encode cache info for CPUID[4] */
249 static void encode_cache_cpuid4(CPUCacheInfo *cache,
250 int num_apic_ids, int num_cores,
251 uint32_t *eax, uint32_t *ebx,
252 uint32_t *ecx, uint32_t *edx)
254 assert(cache->size == cache->line_size * cache->associativity *
255 cache->partitions * cache->sets);
257 assert(num_apic_ids > 0);
258 *eax = CACHE_TYPE(cache->type) |
259 CACHE_LEVEL(cache->level) |
260 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
261 ((num_cores - 1) << 26) |
262 ((num_apic_ids - 1) << 14);
264 assert(cache->line_size > 0);
265 assert(cache->partitions > 0);
266 assert(cache->associativity > 0);
267 /* We don't implement fully-associative caches */
268 assert(cache->associativity < cache->sets);
269 *ebx = (cache->line_size - 1) |
270 ((cache->partitions - 1) << 12) |
271 ((cache->associativity - 1) << 22);
273 assert(cache->sets > 0);
274 *ecx = cache->sets - 1;
276 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
277 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
278 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
281 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
282 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
284 assert(cache->size % 1024 == 0);
285 assert(cache->lines_per_tag > 0);
286 assert(cache->associativity > 0);
287 assert(cache->line_size > 0);
288 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
289 (cache->lines_per_tag << 8) | (cache->line_size);
292 #define ASSOC_FULL 0xFF
294 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
295 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
296 a == 2 ? 0x2 : \
297 a == 4 ? 0x4 : \
298 a == 8 ? 0x6 : \
299 a == 16 ? 0x8 : \
300 a == 32 ? 0xA : \
301 a == 48 ? 0xB : \
302 a == 64 ? 0xC : \
303 a == 96 ? 0xD : \
304 a == 128 ? 0xE : \
305 a == ASSOC_FULL ? 0xF : \
306 0 /* invalid value */)
309 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
310 * @l3 can be NULL.
312 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
313 CPUCacheInfo *l3,
314 uint32_t *ecx, uint32_t *edx)
316 assert(l2->size % 1024 == 0);
317 assert(l2->associativity > 0);
318 assert(l2->lines_per_tag > 0);
319 assert(l2->line_size > 0);
320 *ecx = ((l2->size / 1024) << 16) |
321 (AMD_ENC_ASSOC(l2->associativity) << 12) |
322 (l2->lines_per_tag << 8) | (l2->line_size);
324 if (l3) {
325 assert(l3->size % (512 * 1024) == 0);
326 assert(l3->associativity > 0);
327 assert(l3->lines_per_tag > 0);
328 assert(l3->line_size > 0);
329 *edx = ((l3->size / (512 * 1024)) << 18) |
330 (AMD_ENC_ASSOC(l3->associativity) << 12) |
331 (l3->lines_per_tag << 8) | (l3->line_size);
332 } else {
333 *edx = 0;
338 * Definitions of the hardcoded cache entries we expose:
339 * These are legacy cache values. If there is a need to change any
340 * of these values please use builtin_x86_defs
343 /* L1 data cache: */
344 static CPUCacheInfo legacy_l1d_cache = {
345 .type = DCACHE,
346 .level = 1,
347 .size = 32 * KiB,
348 .self_init = 1,
349 .line_size = 64,
350 .associativity = 8,
351 .sets = 64,
352 .partitions = 1,
353 .no_invd_sharing = true,
356 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
357 static CPUCacheInfo legacy_l1d_cache_amd = {
358 .type = DCACHE,
359 .level = 1,
360 .size = 64 * KiB,
361 .self_init = 1,
362 .line_size = 64,
363 .associativity = 2,
364 .sets = 512,
365 .partitions = 1,
366 .lines_per_tag = 1,
367 .no_invd_sharing = true,
370 /* L1 instruction cache: */
371 static CPUCacheInfo legacy_l1i_cache = {
372 .type = ICACHE,
373 .level = 1,
374 .size = 32 * KiB,
375 .self_init = 1,
376 .line_size = 64,
377 .associativity = 8,
378 .sets = 64,
379 .partitions = 1,
380 .no_invd_sharing = true,
383 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
384 static CPUCacheInfo legacy_l1i_cache_amd = {
385 .type = ICACHE,
386 .level = 1,
387 .size = 64 * KiB,
388 .self_init = 1,
389 .line_size = 64,
390 .associativity = 2,
391 .sets = 512,
392 .partitions = 1,
393 .lines_per_tag = 1,
394 .no_invd_sharing = true,
397 /* Level 2 unified cache: */
398 static CPUCacheInfo legacy_l2_cache = {
399 .type = UNIFIED_CACHE,
400 .level = 2,
401 .size = 4 * MiB,
402 .self_init = 1,
403 .line_size = 64,
404 .associativity = 16,
405 .sets = 4096,
406 .partitions = 1,
407 .no_invd_sharing = true,
410 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
411 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
412 .type = UNIFIED_CACHE,
413 .level = 2,
414 .size = 2 * MiB,
415 .line_size = 64,
416 .associativity = 8,
420 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
421 static CPUCacheInfo legacy_l2_cache_amd = {
422 .type = UNIFIED_CACHE,
423 .level = 2,
424 .size = 512 * KiB,
425 .line_size = 64,
426 .lines_per_tag = 1,
427 .associativity = 16,
428 .sets = 512,
429 .partitions = 1,
432 /* Level 3 unified cache: */
433 static CPUCacheInfo legacy_l3_cache = {
434 .type = UNIFIED_CACHE,
435 .level = 3,
436 .size = 16 * MiB,
437 .line_size = 64,
438 .associativity = 16,
439 .sets = 16384,
440 .partitions = 1,
441 .lines_per_tag = 1,
442 .self_init = true,
443 .inclusive = true,
444 .complex_indexing = true,
447 /* TLB definitions: */
449 #define L1_DTLB_2M_ASSOC 1
450 #define L1_DTLB_2M_ENTRIES 255
451 #define L1_DTLB_4K_ASSOC 1
452 #define L1_DTLB_4K_ENTRIES 255
454 #define L1_ITLB_2M_ASSOC 1
455 #define L1_ITLB_2M_ENTRIES 255
456 #define L1_ITLB_4K_ASSOC 1
457 #define L1_ITLB_4K_ENTRIES 255
459 #define L2_DTLB_2M_ASSOC 0 /* disabled */
460 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
461 #define L2_DTLB_4K_ASSOC 4
462 #define L2_DTLB_4K_ENTRIES 512
464 #define L2_ITLB_2M_ASSOC 0 /* disabled */
465 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
466 #define L2_ITLB_4K_ASSOC 4
467 #define L2_ITLB_4K_ENTRIES 512
469 /* CPUID Leaf 0x14 constants: */
470 #define INTEL_PT_MAX_SUBLEAF 0x1
472 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
473 * MSR can be accessed;
474 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
475 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
476 * of Intel PT MSRs across warm reset;
477 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
479 #define INTEL_PT_MINIMAL_EBX 0xf
481 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
482 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
483 * accessed;
484 * bit[01]: ToPA tables can hold any number of output entries, up to the
485 * maximum allowed by the MaskOrTableOffset field of
486 * IA32_RTIT_OUTPUT_MASK_PTRS;
487 * bit[02]: Support Single-Range Output scheme;
489 #define INTEL_PT_MINIMAL_ECX 0x7
490 /* generated packets which contain IP payloads have LIP values */
491 #define INTEL_PT_IP_LIP (1 << 31)
492 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
493 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
494 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
495 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
496 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
498 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
499 uint32_t vendor2, uint32_t vendor3)
501 int i;
502 for (i = 0; i < 4; i++) {
503 dst[i] = vendor1 >> (8 * i);
504 dst[i + 4] = vendor2 >> (8 * i);
505 dst[i + 8] = vendor3 >> (8 * i);
507 dst[CPUID_VENDOR_SZ] = '\0';
510 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
511 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
512 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
513 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
514 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
515 CPUID_PSE36 | CPUID_FXSR)
516 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
517 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
518 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
519 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
520 CPUID_PAE | CPUID_SEP | CPUID_APIC)
522 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
523 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
524 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
525 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
526 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
527 /* partly implemented:
528 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
529 /* missing:
530 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
531 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
532 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
533 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
534 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
535 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
536 /* missing:
537 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
538 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
539 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
540 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
541 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
543 #ifdef TARGET_X86_64
544 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
545 #else
546 #define TCG_EXT2_X86_64_FEATURES 0
547 #endif
549 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
550 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
551 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
552 TCG_EXT2_X86_64_FEATURES)
553 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
554 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
555 #define TCG_EXT4_FEATURES 0
556 #define TCG_SVM_FEATURES 0
557 #define TCG_KVM_FEATURES 0
558 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
559 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
560 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
561 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
562 CPUID_7_0_EBX_ERMS)
563 /* missing:
564 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
565 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
566 CPUID_7_0_EBX_RDSEED */
567 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
568 CPUID_7_0_ECX_LA57)
569 #define TCG_7_0_EDX_FEATURES 0
570 #define TCG_APM_FEATURES 0
571 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
572 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
573 /* missing:
574 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
576 typedef struct FeatureWordInfo {
577 /* feature flags names are taken from "Intel Processor Identification and
578 * the CPUID Instruction" and AMD's "CPUID Specification".
579 * In cases of disagreement between feature naming conventions,
580 * aliases may be added.
582 const char *feat_names[32];
583 uint32_t cpuid_eax; /* Input EAX for CPUID */
584 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
585 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
586 int cpuid_reg; /* output register (R_* constant) */
587 uint32_t tcg_features; /* Feature flags supported by TCG */
588 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
589 uint32_t migratable_flags; /* Feature flags known to be migratable */
590 /* Features that shouldn't be auto-enabled by "-cpu host" */
591 uint32_t no_autoenable_flags;
592 } FeatureWordInfo;
594 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
595 [FEAT_1_EDX] = {
596 .feat_names = {
597 "fpu", "vme", "de", "pse",
598 "tsc", "msr", "pae", "mce",
599 "cx8", "apic", NULL, "sep",
600 "mtrr", "pge", "mca", "cmov",
601 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
602 NULL, "ds" /* Intel dts */, "acpi", "mmx",
603 "fxsr", "sse", "sse2", "ss",
604 "ht" /* Intel htt */, "tm", "ia64", "pbe",
606 .cpuid_eax = 1, .cpuid_reg = R_EDX,
607 .tcg_features = TCG_FEATURES,
609 [FEAT_1_ECX] = {
610 .feat_names = {
611 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
612 "ds-cpl", "vmx", "smx", "est",
613 "tm2", "ssse3", "cid", NULL,
614 "fma", "cx16", "xtpr", "pdcm",
615 NULL, "pcid", "dca", "sse4.1",
616 "sse4.2", "x2apic", "movbe", "popcnt",
617 "tsc-deadline", "aes", "xsave", "osxsave",
618 "avx", "f16c", "rdrand", "hypervisor",
620 .cpuid_eax = 1, .cpuid_reg = R_ECX,
621 .tcg_features = TCG_EXT_FEATURES,
623 /* Feature names that are already defined on feature_name[] but
624 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
625 * names on feat_names below. They are copied automatically
626 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
628 [FEAT_8000_0001_EDX] = {
629 .feat_names = {
630 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
631 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
632 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
633 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
634 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
635 "nx", NULL, "mmxext", NULL /* mmx */,
636 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
637 NULL, "lm", "3dnowext", "3dnow",
639 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
640 .tcg_features = TCG_EXT2_FEATURES,
642 [FEAT_8000_0001_ECX] = {
643 .feat_names = {
644 "lahf-lm", "cmp-legacy", "svm", "extapic",
645 "cr8legacy", "abm", "sse4a", "misalignsse",
646 "3dnowprefetch", "osvw", "ibs", "xop",
647 "skinit", "wdt", NULL, "lwp",
648 "fma4", "tce", NULL, "nodeid-msr",
649 NULL, "tbm", "topoext", "perfctr-core",
650 "perfctr-nb", NULL, NULL, NULL,
651 NULL, NULL, NULL, NULL,
653 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
654 .tcg_features = TCG_EXT3_FEATURES,
656 [FEAT_C000_0001_EDX] = {
657 .feat_names = {
658 NULL, NULL, "xstore", "xstore-en",
659 NULL, NULL, "xcrypt", "xcrypt-en",
660 "ace2", "ace2-en", "phe", "phe-en",
661 "pmm", "pmm-en", NULL, NULL,
662 NULL, NULL, NULL, NULL,
663 NULL, NULL, NULL, NULL,
664 NULL, NULL, NULL, NULL,
665 NULL, NULL, NULL, NULL,
667 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
668 .tcg_features = TCG_EXT4_FEATURES,
670 [FEAT_KVM] = {
671 .feat_names = {
672 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
673 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
674 NULL, "kvm-pv-tlb-flush", NULL, NULL,
675 NULL, NULL, NULL, NULL,
676 NULL, NULL, NULL, NULL,
677 NULL, NULL, NULL, NULL,
678 "kvmclock-stable-bit", NULL, NULL, NULL,
679 NULL, NULL, NULL, NULL,
681 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
682 .tcg_features = TCG_KVM_FEATURES,
684 [FEAT_KVM_HINTS] = {
685 .feat_names = {
686 "kvm-hint-dedicated", NULL, NULL, NULL,
687 NULL, NULL, NULL, NULL,
688 NULL, NULL, NULL, NULL,
689 NULL, NULL, NULL, NULL,
690 NULL, NULL, NULL, NULL,
691 NULL, NULL, NULL, NULL,
692 NULL, NULL, NULL, NULL,
693 NULL, NULL, NULL, NULL,
695 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
696 .tcg_features = TCG_KVM_FEATURES,
698 * KVM hints aren't auto-enabled by -cpu host, they need to be
699 * explicitly enabled in the command-line.
701 .no_autoenable_flags = ~0U,
703 [FEAT_HYPERV_EAX] = {
704 .feat_names = {
705 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
706 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
707 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
708 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
709 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
710 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
711 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
712 NULL, NULL,
713 NULL, NULL, NULL, NULL,
714 NULL, NULL, NULL, NULL,
715 NULL, NULL, NULL, NULL,
716 NULL, NULL, NULL, NULL,
718 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
720 [FEAT_HYPERV_EBX] = {
721 .feat_names = {
722 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
723 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
724 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
725 NULL /* hv_create_port */, NULL /* hv_connect_port */,
726 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
727 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
728 NULL, NULL,
729 NULL, NULL, NULL, NULL,
730 NULL, NULL, NULL, NULL,
731 NULL, NULL, NULL, NULL,
732 NULL, NULL, NULL, NULL,
734 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
736 [FEAT_HYPERV_EDX] = {
737 .feat_names = {
738 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
739 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
740 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
741 NULL, NULL,
742 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
743 NULL, NULL, NULL, NULL,
744 NULL, NULL, NULL, NULL,
745 NULL, NULL, NULL, NULL,
746 NULL, NULL, NULL, NULL,
747 NULL, NULL, NULL, NULL,
749 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
751 [FEAT_SVM] = {
752 .feat_names = {
753 "npt", "lbrv", "svm-lock", "nrip-save",
754 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
755 NULL, NULL, "pause-filter", NULL,
756 "pfthreshold", NULL, NULL, NULL,
757 NULL, NULL, NULL, NULL,
758 NULL, NULL, NULL, NULL,
759 NULL, NULL, NULL, NULL,
760 NULL, NULL, NULL, NULL,
762 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
763 .tcg_features = TCG_SVM_FEATURES,
765 [FEAT_7_0_EBX] = {
766 .feat_names = {
767 "fsgsbase", "tsc-adjust", NULL, "bmi1",
768 "hle", "avx2", NULL, "smep",
769 "bmi2", "erms", "invpcid", "rtm",
770 NULL, NULL, "mpx", NULL,
771 "avx512f", "avx512dq", "rdseed", "adx",
772 "smap", "avx512ifma", "pcommit", "clflushopt",
773 "clwb", "intel-pt", "avx512pf", "avx512er",
774 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
776 .cpuid_eax = 7,
777 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
778 .cpuid_reg = R_EBX,
779 .tcg_features = TCG_7_0_EBX_FEATURES,
781 [FEAT_7_0_ECX] = {
782 .feat_names = {
783 NULL, "avx512vbmi", "umip", "pku",
784 "ospke", NULL, "avx512vbmi2", NULL,
785 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
786 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
787 "la57", NULL, NULL, NULL,
788 NULL, NULL, "rdpid", NULL,
789 NULL, "cldemote", NULL, NULL,
790 NULL, NULL, NULL, NULL,
792 .cpuid_eax = 7,
793 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
794 .cpuid_reg = R_ECX,
795 .tcg_features = TCG_7_0_ECX_FEATURES,
797 [FEAT_7_0_EDX] = {
798 .feat_names = {
799 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
800 NULL, NULL, NULL, NULL,
801 NULL, NULL, NULL, NULL,
802 NULL, NULL, NULL, NULL,
803 NULL, NULL, NULL, NULL,
804 NULL, NULL, NULL, NULL,
805 NULL, NULL, "spec-ctrl", NULL,
806 NULL, NULL, NULL, "ssbd",
808 .cpuid_eax = 7,
809 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
810 .cpuid_reg = R_EDX,
811 .tcg_features = TCG_7_0_EDX_FEATURES,
813 [FEAT_8000_0007_EDX] = {
814 .feat_names = {
815 NULL, NULL, NULL, NULL,
816 NULL, NULL, NULL, NULL,
817 "invtsc", NULL, NULL, NULL,
818 NULL, NULL, NULL, NULL,
819 NULL, NULL, NULL, NULL,
820 NULL, NULL, NULL, NULL,
821 NULL, NULL, NULL, NULL,
822 NULL, NULL, NULL, NULL,
824 .cpuid_eax = 0x80000007,
825 .cpuid_reg = R_EDX,
826 .tcg_features = TCG_APM_FEATURES,
827 .unmigratable_flags = CPUID_APM_INVTSC,
829 [FEAT_8000_0008_EBX] = {
830 .feat_names = {
831 NULL, NULL, NULL, NULL,
832 NULL, NULL, NULL, NULL,
833 NULL, NULL, NULL, NULL,
834 "ibpb", NULL, NULL, NULL,
835 NULL, NULL, NULL, NULL,
836 NULL, NULL, NULL, NULL,
837 NULL, "virt-ssbd", NULL, NULL,
838 NULL, NULL, NULL, NULL,
840 .cpuid_eax = 0x80000008,
841 .cpuid_reg = R_EBX,
842 .tcg_features = 0,
843 .unmigratable_flags = 0,
845 [FEAT_XSAVE] = {
846 .feat_names = {
847 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
848 NULL, NULL, NULL, NULL,
849 NULL, NULL, NULL, NULL,
850 NULL, NULL, NULL, NULL,
851 NULL, NULL, NULL, NULL,
852 NULL, NULL, NULL, NULL,
853 NULL, NULL, NULL, NULL,
854 NULL, NULL, NULL, NULL,
856 .cpuid_eax = 0xd,
857 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
858 .cpuid_reg = R_EAX,
859 .tcg_features = TCG_XSAVE_FEATURES,
861 [FEAT_6_EAX] = {
862 .feat_names = {
863 NULL, NULL, "arat", NULL,
864 NULL, NULL, NULL, NULL,
865 NULL, NULL, NULL, NULL,
866 NULL, NULL, NULL, NULL,
867 NULL, NULL, NULL, NULL,
868 NULL, NULL, NULL, NULL,
869 NULL, NULL, NULL, NULL,
870 NULL, NULL, NULL, NULL,
872 .cpuid_eax = 6, .cpuid_reg = R_EAX,
873 .tcg_features = TCG_6_EAX_FEATURES,
875 [FEAT_XSAVE_COMP_LO] = {
876 .cpuid_eax = 0xD,
877 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
878 .cpuid_reg = R_EAX,
879 .tcg_features = ~0U,
880 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
881 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
882 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
883 XSTATE_PKRU_MASK,
885 [FEAT_XSAVE_COMP_HI] = {
886 .cpuid_eax = 0xD,
887 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
888 .cpuid_reg = R_EDX,
889 .tcg_features = ~0U,
893 typedef struct X86RegisterInfo32 {
894 /* Name of register */
895 const char *name;
896 /* QAPI enum value register */
897 X86CPURegister32 qapi_enum;
898 } X86RegisterInfo32;
900 #define REGISTER(reg) \
901 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
902 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
903 REGISTER(EAX),
904 REGISTER(ECX),
905 REGISTER(EDX),
906 REGISTER(EBX),
907 REGISTER(ESP),
908 REGISTER(EBP),
909 REGISTER(ESI),
910 REGISTER(EDI),
912 #undef REGISTER
914 typedef struct ExtSaveArea {
915 uint32_t feature, bits;
916 uint32_t offset, size;
917 } ExtSaveArea;
919 static const ExtSaveArea x86_ext_save_areas[] = {
920 [XSTATE_FP_BIT] = {
921 /* x87 FP state component is always enabled if XSAVE is supported */
922 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
923 /* x87 state is in the legacy region of the XSAVE area */
924 .offset = 0,
925 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
927 [XSTATE_SSE_BIT] = {
928 /* SSE state component is always enabled if XSAVE is supported */
929 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
930 /* SSE state is in the legacy region of the XSAVE area */
931 .offset = 0,
932 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
934 [XSTATE_YMM_BIT] =
935 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
936 .offset = offsetof(X86XSaveArea, avx_state),
937 .size = sizeof(XSaveAVX) },
938 [XSTATE_BNDREGS_BIT] =
939 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
940 .offset = offsetof(X86XSaveArea, bndreg_state),
941 .size = sizeof(XSaveBNDREG) },
942 [XSTATE_BNDCSR_BIT] =
943 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
944 .offset = offsetof(X86XSaveArea, bndcsr_state),
945 .size = sizeof(XSaveBNDCSR) },
946 [XSTATE_OPMASK_BIT] =
947 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
948 .offset = offsetof(X86XSaveArea, opmask_state),
949 .size = sizeof(XSaveOpmask) },
950 [XSTATE_ZMM_Hi256_BIT] =
951 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
952 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
953 .size = sizeof(XSaveZMM_Hi256) },
954 [XSTATE_Hi16_ZMM_BIT] =
955 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
956 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
957 .size = sizeof(XSaveHi16_ZMM) },
958 [XSTATE_PKRU_BIT] =
959 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
960 .offset = offsetof(X86XSaveArea, pkru_state),
961 .size = sizeof(XSavePKRU) },
964 static uint32_t xsave_area_size(uint64_t mask)
966 int i;
967 uint64_t ret = 0;
969 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
970 const ExtSaveArea *esa = &x86_ext_save_areas[i];
971 if ((mask >> i) & 1) {
972 ret = MAX(ret, esa->offset + esa->size);
975 return ret;
978 static inline bool accel_uses_host_cpuid(void)
980 return kvm_enabled() || hvf_enabled();
983 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
985 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
986 cpu->env.features[FEAT_XSAVE_COMP_LO];
989 const char *get_register_name_32(unsigned int reg)
991 if (reg >= CPU_NB_REGS32) {
992 return NULL;
994 return x86_reg_info_32[reg].name;
998 * Returns the set of feature flags that are supported and migratable by
999 * QEMU, for a given FeatureWord.
1001 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1003 FeatureWordInfo *wi = &feature_word_info[w];
1004 uint32_t r = 0;
1005 int i;
1007 for (i = 0; i < 32; i++) {
1008 uint32_t f = 1U << i;
1010 /* If the feature name is known, it is implicitly considered migratable,
1011 * unless it is explicitly set in unmigratable_flags */
1012 if ((wi->migratable_flags & f) ||
1013 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1014 r |= f;
1017 return r;
1020 void host_cpuid(uint32_t function, uint32_t count,
1021 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1023 uint32_t vec[4];
1025 #ifdef __x86_64__
1026 asm volatile("cpuid"
1027 : "=a"(vec[0]), "=b"(vec[1]),
1028 "=c"(vec[2]), "=d"(vec[3])
1029 : "0"(function), "c"(count) : "cc");
1030 #elif defined(__i386__)
1031 asm volatile("pusha \n\t"
1032 "cpuid \n\t"
1033 "mov %%eax, 0(%2) \n\t"
1034 "mov %%ebx, 4(%2) \n\t"
1035 "mov %%ecx, 8(%2) \n\t"
1036 "mov %%edx, 12(%2) \n\t"
1037 "popa"
1038 : : "a"(function), "c"(count), "S"(vec)
1039 : "memory", "cc");
1040 #else
1041 abort();
1042 #endif
1044 if (eax)
1045 *eax = vec[0];
1046 if (ebx)
1047 *ebx = vec[1];
1048 if (ecx)
1049 *ecx = vec[2];
1050 if (edx)
1051 *edx = vec[3];
1054 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1056 uint32_t eax, ebx, ecx, edx;
1058 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1059 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1061 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1062 if (family) {
1063 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1065 if (model) {
1066 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1068 if (stepping) {
1069 *stepping = eax & 0x0F;
1073 /* CPU class name definitions: */
1075 /* Return type name for a given CPU model name
1076 * Caller is responsible for freeing the returned string.
1078 static char *x86_cpu_type_name(const char *model_name)
1080 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1083 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1085 ObjectClass *oc;
1086 char *typename = x86_cpu_type_name(cpu_model);
1087 oc = object_class_by_name(typename);
1088 g_free(typename);
1089 return oc;
1092 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1094 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1095 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1096 return g_strndup(class_name,
1097 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1100 struct X86CPUDefinition {
1101 const char *name;
1102 uint32_t level;
1103 uint32_t xlevel;
1104 /* vendor is zero-terminated, 12 character ASCII string */
1105 char vendor[CPUID_VENDOR_SZ + 1];
1106 int family;
1107 int model;
1108 int stepping;
1109 FeatureWordArray features;
1110 const char *model_id;
1111 CPUCaches *cache_info;
1114 static CPUCaches epyc_cache_info = {
1115 .l1d_cache = {
1116 .type = DCACHE,
1117 .level = 1,
1118 .size = 32 * KiB,
1119 .line_size = 64,
1120 .associativity = 8,
1121 .partitions = 1,
1122 .sets = 64,
1123 .lines_per_tag = 1,
1124 .self_init = 1,
1125 .no_invd_sharing = true,
1127 .l1i_cache = {
1128 .type = ICACHE,
1129 .level = 1,
1130 .size = 64 * KiB,
1131 .line_size = 64,
1132 .associativity = 4,
1133 .partitions = 1,
1134 .sets = 256,
1135 .lines_per_tag = 1,
1136 .self_init = 1,
1137 .no_invd_sharing = true,
1139 .l2_cache = {
1140 .type = UNIFIED_CACHE,
1141 .level = 2,
1142 .size = 512 * KiB,
1143 .line_size = 64,
1144 .associativity = 8,
1145 .partitions = 1,
1146 .sets = 1024,
1147 .lines_per_tag = 1,
1149 .l3_cache = {
1150 .type = UNIFIED_CACHE,
1151 .level = 3,
1152 .size = 8 * MiB,
1153 .line_size = 64,
1154 .associativity = 16,
1155 .partitions = 1,
1156 .sets = 8192,
1157 .lines_per_tag = 1,
1158 .self_init = true,
1159 .inclusive = true,
1160 .complex_indexing = true,
1164 static X86CPUDefinition builtin_x86_defs[] = {
1166 .name = "qemu64",
1167 .level = 0xd,
1168 .vendor = CPUID_VENDOR_AMD,
1169 .family = 6,
1170 .model = 6,
1171 .stepping = 3,
1172 .features[FEAT_1_EDX] =
1173 PPRO_FEATURES |
1174 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1175 CPUID_PSE36,
1176 .features[FEAT_1_ECX] =
1177 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1178 .features[FEAT_8000_0001_EDX] =
1179 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1180 .features[FEAT_8000_0001_ECX] =
1181 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1182 .xlevel = 0x8000000A,
1183 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1186 .name = "phenom",
1187 .level = 5,
1188 .vendor = CPUID_VENDOR_AMD,
1189 .family = 16,
1190 .model = 2,
1191 .stepping = 3,
1192 /* Missing: CPUID_HT */
1193 .features[FEAT_1_EDX] =
1194 PPRO_FEATURES |
1195 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1196 CPUID_PSE36 | CPUID_VME,
1197 .features[FEAT_1_ECX] =
1198 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1199 CPUID_EXT_POPCNT,
1200 .features[FEAT_8000_0001_EDX] =
1201 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1202 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1203 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1204 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1205 CPUID_EXT3_CR8LEG,
1206 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1207 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1208 .features[FEAT_8000_0001_ECX] =
1209 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1210 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1211 /* Missing: CPUID_SVM_LBRV */
1212 .features[FEAT_SVM] =
1213 CPUID_SVM_NPT,
1214 .xlevel = 0x8000001A,
1215 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1218 .name = "core2duo",
1219 .level = 10,
1220 .vendor = CPUID_VENDOR_INTEL,
1221 .family = 6,
1222 .model = 15,
1223 .stepping = 11,
1224 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1225 .features[FEAT_1_EDX] =
1226 PPRO_FEATURES |
1227 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1228 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1229 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1230 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1231 .features[FEAT_1_ECX] =
1232 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1233 CPUID_EXT_CX16,
1234 .features[FEAT_8000_0001_EDX] =
1235 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1236 .features[FEAT_8000_0001_ECX] =
1237 CPUID_EXT3_LAHF_LM,
1238 .xlevel = 0x80000008,
1239 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1242 .name = "kvm64",
1243 .level = 0xd,
1244 .vendor = CPUID_VENDOR_INTEL,
1245 .family = 15,
1246 .model = 6,
1247 .stepping = 1,
1248 /* Missing: CPUID_HT */
1249 .features[FEAT_1_EDX] =
1250 PPRO_FEATURES | CPUID_VME |
1251 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1252 CPUID_PSE36,
1253 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1254 .features[FEAT_1_ECX] =
1255 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1256 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1257 .features[FEAT_8000_0001_EDX] =
1258 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1259 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1260 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1261 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1262 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1263 .features[FEAT_8000_0001_ECX] =
1265 .xlevel = 0x80000008,
1266 .model_id = "Common KVM processor"
1269 .name = "qemu32",
1270 .level = 4,
1271 .vendor = CPUID_VENDOR_INTEL,
1272 .family = 6,
1273 .model = 6,
1274 .stepping = 3,
1275 .features[FEAT_1_EDX] =
1276 PPRO_FEATURES,
1277 .features[FEAT_1_ECX] =
1278 CPUID_EXT_SSE3,
1279 .xlevel = 0x80000004,
1280 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1283 .name = "kvm32",
1284 .level = 5,
1285 .vendor = CPUID_VENDOR_INTEL,
1286 .family = 15,
1287 .model = 6,
1288 .stepping = 1,
1289 .features[FEAT_1_EDX] =
1290 PPRO_FEATURES | CPUID_VME |
1291 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1292 .features[FEAT_1_ECX] =
1293 CPUID_EXT_SSE3,
1294 .features[FEAT_8000_0001_ECX] =
1296 .xlevel = 0x80000008,
1297 .model_id = "Common 32-bit KVM processor"
1300 .name = "coreduo",
1301 .level = 10,
1302 .vendor = CPUID_VENDOR_INTEL,
1303 .family = 6,
1304 .model = 14,
1305 .stepping = 8,
1306 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1307 .features[FEAT_1_EDX] =
1308 PPRO_FEATURES | CPUID_VME |
1309 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1310 CPUID_SS,
1311 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1312 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1313 .features[FEAT_1_ECX] =
1314 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1315 .features[FEAT_8000_0001_EDX] =
1316 CPUID_EXT2_NX,
1317 .xlevel = 0x80000008,
1318 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1321 .name = "486",
1322 .level = 1,
1323 .vendor = CPUID_VENDOR_INTEL,
1324 .family = 4,
1325 .model = 8,
1326 .stepping = 0,
1327 .features[FEAT_1_EDX] =
1328 I486_FEATURES,
1329 .xlevel = 0,
1330 .model_id = "",
1333 .name = "pentium",
1334 .level = 1,
1335 .vendor = CPUID_VENDOR_INTEL,
1336 .family = 5,
1337 .model = 4,
1338 .stepping = 3,
1339 .features[FEAT_1_EDX] =
1340 PENTIUM_FEATURES,
1341 .xlevel = 0,
1342 .model_id = "",
1345 .name = "pentium2",
1346 .level = 2,
1347 .vendor = CPUID_VENDOR_INTEL,
1348 .family = 6,
1349 .model = 5,
1350 .stepping = 2,
1351 .features[FEAT_1_EDX] =
1352 PENTIUM2_FEATURES,
1353 .xlevel = 0,
1354 .model_id = "",
1357 .name = "pentium3",
1358 .level = 3,
1359 .vendor = CPUID_VENDOR_INTEL,
1360 .family = 6,
1361 .model = 7,
1362 .stepping = 3,
1363 .features[FEAT_1_EDX] =
1364 PENTIUM3_FEATURES,
1365 .xlevel = 0,
1366 .model_id = "",
1369 .name = "athlon",
1370 .level = 2,
1371 .vendor = CPUID_VENDOR_AMD,
1372 .family = 6,
1373 .model = 2,
1374 .stepping = 3,
1375 .features[FEAT_1_EDX] =
1376 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1377 CPUID_MCA,
1378 .features[FEAT_8000_0001_EDX] =
1379 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1380 .xlevel = 0x80000008,
1381 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1384 .name = "n270",
1385 .level = 10,
1386 .vendor = CPUID_VENDOR_INTEL,
1387 .family = 6,
1388 .model = 28,
1389 .stepping = 2,
1390 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1391 .features[FEAT_1_EDX] =
1392 PPRO_FEATURES |
1393 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1394 CPUID_ACPI | CPUID_SS,
1395 /* Some CPUs got no CPUID_SEP */
1396 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1397 * CPUID_EXT_XTPR */
1398 .features[FEAT_1_ECX] =
1399 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1400 CPUID_EXT_MOVBE,
1401 .features[FEAT_8000_0001_EDX] =
1402 CPUID_EXT2_NX,
1403 .features[FEAT_8000_0001_ECX] =
1404 CPUID_EXT3_LAHF_LM,
1405 .xlevel = 0x80000008,
1406 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1409 .name = "Conroe",
1410 .level = 10,
1411 .vendor = CPUID_VENDOR_INTEL,
1412 .family = 6,
1413 .model = 15,
1414 .stepping = 3,
1415 .features[FEAT_1_EDX] =
1416 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1417 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1418 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1419 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1420 CPUID_DE | CPUID_FP87,
1421 .features[FEAT_1_ECX] =
1422 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1423 .features[FEAT_8000_0001_EDX] =
1424 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1425 .features[FEAT_8000_0001_ECX] =
1426 CPUID_EXT3_LAHF_LM,
1427 .xlevel = 0x80000008,
1428 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1431 .name = "Penryn",
1432 .level = 10,
1433 .vendor = CPUID_VENDOR_INTEL,
1434 .family = 6,
1435 .model = 23,
1436 .stepping = 3,
1437 .features[FEAT_1_EDX] =
1438 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1439 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1440 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1441 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1442 CPUID_DE | CPUID_FP87,
1443 .features[FEAT_1_ECX] =
1444 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1445 CPUID_EXT_SSE3,
1446 .features[FEAT_8000_0001_EDX] =
1447 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1448 .features[FEAT_8000_0001_ECX] =
1449 CPUID_EXT3_LAHF_LM,
1450 .xlevel = 0x80000008,
1451 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1454 .name = "Nehalem",
1455 .level = 11,
1456 .vendor = CPUID_VENDOR_INTEL,
1457 .family = 6,
1458 .model = 26,
1459 .stepping = 3,
1460 .features[FEAT_1_EDX] =
1461 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1462 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1463 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1464 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1465 CPUID_DE | CPUID_FP87,
1466 .features[FEAT_1_ECX] =
1467 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1468 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1469 .features[FEAT_8000_0001_EDX] =
1470 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1471 .features[FEAT_8000_0001_ECX] =
1472 CPUID_EXT3_LAHF_LM,
1473 .xlevel = 0x80000008,
1474 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1477 .name = "Nehalem-IBRS",
1478 .level = 11,
1479 .vendor = CPUID_VENDOR_INTEL,
1480 .family = 6,
1481 .model = 26,
1482 .stepping = 3,
1483 .features[FEAT_1_EDX] =
1484 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1485 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1486 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1487 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1488 CPUID_DE | CPUID_FP87,
1489 .features[FEAT_1_ECX] =
1490 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1491 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1492 .features[FEAT_7_0_EDX] =
1493 CPUID_7_0_EDX_SPEC_CTRL,
1494 .features[FEAT_8000_0001_EDX] =
1495 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1496 .features[FEAT_8000_0001_ECX] =
1497 CPUID_EXT3_LAHF_LM,
1498 .xlevel = 0x80000008,
1499 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1502 .name = "Westmere",
1503 .level = 11,
1504 .vendor = CPUID_VENDOR_INTEL,
1505 .family = 6,
1506 .model = 44,
1507 .stepping = 1,
1508 .features[FEAT_1_EDX] =
1509 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1510 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1511 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1512 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1513 CPUID_DE | CPUID_FP87,
1514 .features[FEAT_1_ECX] =
1515 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1516 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1517 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1518 .features[FEAT_8000_0001_EDX] =
1519 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1520 .features[FEAT_8000_0001_ECX] =
1521 CPUID_EXT3_LAHF_LM,
1522 .features[FEAT_6_EAX] =
1523 CPUID_6_EAX_ARAT,
1524 .xlevel = 0x80000008,
1525 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1528 .name = "Westmere-IBRS",
1529 .level = 11,
1530 .vendor = CPUID_VENDOR_INTEL,
1531 .family = 6,
1532 .model = 44,
1533 .stepping = 1,
1534 .features[FEAT_1_EDX] =
1535 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1536 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1537 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1538 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1539 CPUID_DE | CPUID_FP87,
1540 .features[FEAT_1_ECX] =
1541 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1542 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1543 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1544 .features[FEAT_8000_0001_EDX] =
1545 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1546 .features[FEAT_8000_0001_ECX] =
1547 CPUID_EXT3_LAHF_LM,
1548 .features[FEAT_7_0_EDX] =
1549 CPUID_7_0_EDX_SPEC_CTRL,
1550 .features[FEAT_6_EAX] =
1551 CPUID_6_EAX_ARAT,
1552 .xlevel = 0x80000008,
1553 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1556 .name = "SandyBridge",
1557 .level = 0xd,
1558 .vendor = CPUID_VENDOR_INTEL,
1559 .family = 6,
1560 .model = 42,
1561 .stepping = 1,
1562 .features[FEAT_1_EDX] =
1563 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1564 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1565 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1566 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1567 CPUID_DE | CPUID_FP87,
1568 .features[FEAT_1_ECX] =
1569 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1570 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1571 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1572 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1573 CPUID_EXT_SSE3,
1574 .features[FEAT_8000_0001_EDX] =
1575 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1576 CPUID_EXT2_SYSCALL,
1577 .features[FEAT_8000_0001_ECX] =
1578 CPUID_EXT3_LAHF_LM,
1579 .features[FEAT_XSAVE] =
1580 CPUID_XSAVE_XSAVEOPT,
1581 .features[FEAT_6_EAX] =
1582 CPUID_6_EAX_ARAT,
1583 .xlevel = 0x80000008,
1584 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1587 .name = "SandyBridge-IBRS",
1588 .level = 0xd,
1589 .vendor = CPUID_VENDOR_INTEL,
1590 .family = 6,
1591 .model = 42,
1592 .stepping = 1,
1593 .features[FEAT_1_EDX] =
1594 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1595 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1596 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1597 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1598 CPUID_DE | CPUID_FP87,
1599 .features[FEAT_1_ECX] =
1600 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1601 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1602 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1603 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1604 CPUID_EXT_SSE3,
1605 .features[FEAT_8000_0001_EDX] =
1606 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1607 CPUID_EXT2_SYSCALL,
1608 .features[FEAT_8000_0001_ECX] =
1609 CPUID_EXT3_LAHF_LM,
1610 .features[FEAT_7_0_EDX] =
1611 CPUID_7_0_EDX_SPEC_CTRL,
1612 .features[FEAT_XSAVE] =
1613 CPUID_XSAVE_XSAVEOPT,
1614 .features[FEAT_6_EAX] =
1615 CPUID_6_EAX_ARAT,
1616 .xlevel = 0x80000008,
1617 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1620 .name = "IvyBridge",
1621 .level = 0xd,
1622 .vendor = CPUID_VENDOR_INTEL,
1623 .family = 6,
1624 .model = 58,
1625 .stepping = 9,
1626 .features[FEAT_1_EDX] =
1627 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1628 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1629 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1630 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1631 CPUID_DE | CPUID_FP87,
1632 .features[FEAT_1_ECX] =
1633 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1634 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1635 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1636 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1637 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1638 .features[FEAT_7_0_EBX] =
1639 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1640 CPUID_7_0_EBX_ERMS,
1641 .features[FEAT_8000_0001_EDX] =
1642 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1643 CPUID_EXT2_SYSCALL,
1644 .features[FEAT_8000_0001_ECX] =
1645 CPUID_EXT3_LAHF_LM,
1646 .features[FEAT_XSAVE] =
1647 CPUID_XSAVE_XSAVEOPT,
1648 .features[FEAT_6_EAX] =
1649 CPUID_6_EAX_ARAT,
1650 .xlevel = 0x80000008,
1651 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1654 .name = "IvyBridge-IBRS",
1655 .level = 0xd,
1656 .vendor = CPUID_VENDOR_INTEL,
1657 .family = 6,
1658 .model = 58,
1659 .stepping = 9,
1660 .features[FEAT_1_EDX] =
1661 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1662 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1663 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1664 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1665 CPUID_DE | CPUID_FP87,
1666 .features[FEAT_1_ECX] =
1667 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1668 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1669 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1670 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1671 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1672 .features[FEAT_7_0_EBX] =
1673 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1674 CPUID_7_0_EBX_ERMS,
1675 .features[FEAT_8000_0001_EDX] =
1676 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1677 CPUID_EXT2_SYSCALL,
1678 .features[FEAT_8000_0001_ECX] =
1679 CPUID_EXT3_LAHF_LM,
1680 .features[FEAT_7_0_EDX] =
1681 CPUID_7_0_EDX_SPEC_CTRL,
1682 .features[FEAT_XSAVE] =
1683 CPUID_XSAVE_XSAVEOPT,
1684 .features[FEAT_6_EAX] =
1685 CPUID_6_EAX_ARAT,
1686 .xlevel = 0x80000008,
1687 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1690 .name = "Haswell-noTSX",
1691 .level = 0xd,
1692 .vendor = CPUID_VENDOR_INTEL,
1693 .family = 6,
1694 .model = 60,
1695 .stepping = 1,
1696 .features[FEAT_1_EDX] =
1697 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1698 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1699 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1700 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1701 CPUID_DE | CPUID_FP87,
1702 .features[FEAT_1_ECX] =
1703 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1704 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1705 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1706 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1707 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1708 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1709 .features[FEAT_8000_0001_EDX] =
1710 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1711 CPUID_EXT2_SYSCALL,
1712 .features[FEAT_8000_0001_ECX] =
1713 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1714 .features[FEAT_7_0_EBX] =
1715 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1716 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1717 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1718 .features[FEAT_XSAVE] =
1719 CPUID_XSAVE_XSAVEOPT,
1720 .features[FEAT_6_EAX] =
1721 CPUID_6_EAX_ARAT,
1722 .xlevel = 0x80000008,
1723 .model_id = "Intel Core Processor (Haswell, no TSX)",
1726 .name = "Haswell-noTSX-IBRS",
1727 .level = 0xd,
1728 .vendor = CPUID_VENDOR_INTEL,
1729 .family = 6,
1730 .model = 60,
1731 .stepping = 1,
1732 .features[FEAT_1_EDX] =
1733 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1734 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1735 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1736 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1737 CPUID_DE | CPUID_FP87,
1738 .features[FEAT_1_ECX] =
1739 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1740 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1741 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1742 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1743 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1744 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1745 .features[FEAT_8000_0001_EDX] =
1746 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1747 CPUID_EXT2_SYSCALL,
1748 .features[FEAT_8000_0001_ECX] =
1749 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1750 .features[FEAT_7_0_EDX] =
1751 CPUID_7_0_EDX_SPEC_CTRL,
1752 .features[FEAT_7_0_EBX] =
1753 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1754 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1755 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1756 .features[FEAT_XSAVE] =
1757 CPUID_XSAVE_XSAVEOPT,
1758 .features[FEAT_6_EAX] =
1759 CPUID_6_EAX_ARAT,
1760 .xlevel = 0x80000008,
1761 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1764 .name = "Haswell",
1765 .level = 0xd,
1766 .vendor = CPUID_VENDOR_INTEL,
1767 .family = 6,
1768 .model = 60,
1769 .stepping = 4,
1770 .features[FEAT_1_EDX] =
1771 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1772 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1773 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1774 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1775 CPUID_DE | CPUID_FP87,
1776 .features[FEAT_1_ECX] =
1777 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1778 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1779 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1780 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1781 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1782 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1783 .features[FEAT_8000_0001_EDX] =
1784 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1785 CPUID_EXT2_SYSCALL,
1786 .features[FEAT_8000_0001_ECX] =
1787 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1788 .features[FEAT_7_0_EBX] =
1789 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1790 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1791 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1792 CPUID_7_0_EBX_RTM,
1793 .features[FEAT_XSAVE] =
1794 CPUID_XSAVE_XSAVEOPT,
1795 .features[FEAT_6_EAX] =
1796 CPUID_6_EAX_ARAT,
1797 .xlevel = 0x80000008,
1798 .model_id = "Intel Core Processor (Haswell)",
1801 .name = "Haswell-IBRS",
1802 .level = 0xd,
1803 .vendor = CPUID_VENDOR_INTEL,
1804 .family = 6,
1805 .model = 60,
1806 .stepping = 4,
1807 .features[FEAT_1_EDX] =
1808 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1809 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1810 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1811 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1812 CPUID_DE | CPUID_FP87,
1813 .features[FEAT_1_ECX] =
1814 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1815 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1816 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1817 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1818 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1819 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1820 .features[FEAT_8000_0001_EDX] =
1821 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1822 CPUID_EXT2_SYSCALL,
1823 .features[FEAT_8000_0001_ECX] =
1824 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1825 .features[FEAT_7_0_EDX] =
1826 CPUID_7_0_EDX_SPEC_CTRL,
1827 .features[FEAT_7_0_EBX] =
1828 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1829 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1830 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1831 CPUID_7_0_EBX_RTM,
1832 .features[FEAT_XSAVE] =
1833 CPUID_XSAVE_XSAVEOPT,
1834 .features[FEAT_6_EAX] =
1835 CPUID_6_EAX_ARAT,
1836 .xlevel = 0x80000008,
1837 .model_id = "Intel Core Processor (Haswell, IBRS)",
1840 .name = "Broadwell-noTSX",
1841 .level = 0xd,
1842 .vendor = CPUID_VENDOR_INTEL,
1843 .family = 6,
1844 .model = 61,
1845 .stepping = 2,
1846 .features[FEAT_1_EDX] =
1847 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1848 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1849 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1850 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1851 CPUID_DE | CPUID_FP87,
1852 .features[FEAT_1_ECX] =
1853 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1854 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1855 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1856 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1857 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1858 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1859 .features[FEAT_8000_0001_EDX] =
1860 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1861 CPUID_EXT2_SYSCALL,
1862 .features[FEAT_8000_0001_ECX] =
1863 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1864 .features[FEAT_7_0_EBX] =
1865 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1866 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1867 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1868 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1869 CPUID_7_0_EBX_SMAP,
1870 .features[FEAT_XSAVE] =
1871 CPUID_XSAVE_XSAVEOPT,
1872 .features[FEAT_6_EAX] =
1873 CPUID_6_EAX_ARAT,
1874 .xlevel = 0x80000008,
1875 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1878 .name = "Broadwell-noTSX-IBRS",
1879 .level = 0xd,
1880 .vendor = CPUID_VENDOR_INTEL,
1881 .family = 6,
1882 .model = 61,
1883 .stepping = 2,
1884 .features[FEAT_1_EDX] =
1885 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1886 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1887 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1888 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1889 CPUID_DE | CPUID_FP87,
1890 .features[FEAT_1_ECX] =
1891 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1892 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1893 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1894 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1895 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1896 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1897 .features[FEAT_8000_0001_EDX] =
1898 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1899 CPUID_EXT2_SYSCALL,
1900 .features[FEAT_8000_0001_ECX] =
1901 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1902 .features[FEAT_7_0_EDX] =
1903 CPUID_7_0_EDX_SPEC_CTRL,
1904 .features[FEAT_7_0_EBX] =
1905 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1906 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1907 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1908 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1909 CPUID_7_0_EBX_SMAP,
1910 .features[FEAT_XSAVE] =
1911 CPUID_XSAVE_XSAVEOPT,
1912 .features[FEAT_6_EAX] =
1913 CPUID_6_EAX_ARAT,
1914 .xlevel = 0x80000008,
1915 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
1918 .name = "Broadwell",
1919 .level = 0xd,
1920 .vendor = CPUID_VENDOR_INTEL,
1921 .family = 6,
1922 .model = 61,
1923 .stepping = 2,
1924 .features[FEAT_1_EDX] =
1925 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1926 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1927 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1928 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1929 CPUID_DE | CPUID_FP87,
1930 .features[FEAT_1_ECX] =
1931 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1932 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1933 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1934 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1935 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1936 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1937 .features[FEAT_8000_0001_EDX] =
1938 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1939 CPUID_EXT2_SYSCALL,
1940 .features[FEAT_8000_0001_ECX] =
1941 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1942 .features[FEAT_7_0_EBX] =
1943 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1944 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1945 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1946 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1947 CPUID_7_0_EBX_SMAP,
1948 .features[FEAT_XSAVE] =
1949 CPUID_XSAVE_XSAVEOPT,
1950 .features[FEAT_6_EAX] =
1951 CPUID_6_EAX_ARAT,
1952 .xlevel = 0x80000008,
1953 .model_id = "Intel Core Processor (Broadwell)",
1956 .name = "Broadwell-IBRS",
1957 .level = 0xd,
1958 .vendor = CPUID_VENDOR_INTEL,
1959 .family = 6,
1960 .model = 61,
1961 .stepping = 2,
1962 .features[FEAT_1_EDX] =
1963 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1964 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1965 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1966 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1967 CPUID_DE | CPUID_FP87,
1968 .features[FEAT_1_ECX] =
1969 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1970 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1971 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1972 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1973 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1974 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1975 .features[FEAT_8000_0001_EDX] =
1976 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1977 CPUID_EXT2_SYSCALL,
1978 .features[FEAT_8000_0001_ECX] =
1979 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1980 .features[FEAT_7_0_EDX] =
1981 CPUID_7_0_EDX_SPEC_CTRL,
1982 .features[FEAT_7_0_EBX] =
1983 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1984 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1985 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1986 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1987 CPUID_7_0_EBX_SMAP,
1988 .features[FEAT_XSAVE] =
1989 CPUID_XSAVE_XSAVEOPT,
1990 .features[FEAT_6_EAX] =
1991 CPUID_6_EAX_ARAT,
1992 .xlevel = 0x80000008,
1993 .model_id = "Intel Core Processor (Broadwell, IBRS)",
1996 .name = "Skylake-Client",
1997 .level = 0xd,
1998 .vendor = CPUID_VENDOR_INTEL,
1999 .family = 6,
2000 .model = 94,
2001 .stepping = 3,
2002 .features[FEAT_1_EDX] =
2003 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2004 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2005 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2006 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2007 CPUID_DE | CPUID_FP87,
2008 .features[FEAT_1_ECX] =
2009 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2010 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2011 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2012 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2013 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2014 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2015 .features[FEAT_8000_0001_EDX] =
2016 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2017 CPUID_EXT2_SYSCALL,
2018 .features[FEAT_8000_0001_ECX] =
2019 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2020 .features[FEAT_7_0_EBX] =
2021 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2022 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2023 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2024 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2025 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2026 /* Missing: XSAVES (not supported by some Linux versions,
2027 * including v4.1 to v4.12).
2028 * KVM doesn't yet expose any XSAVES state save component,
2029 * and the only one defined in Skylake (processor tracing)
2030 * probably will block migration anyway.
2032 .features[FEAT_XSAVE] =
2033 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2034 CPUID_XSAVE_XGETBV1,
2035 .features[FEAT_6_EAX] =
2036 CPUID_6_EAX_ARAT,
2037 .xlevel = 0x80000008,
2038 .model_id = "Intel Core Processor (Skylake)",
2041 .name = "Skylake-Client-IBRS",
2042 .level = 0xd,
2043 .vendor = CPUID_VENDOR_INTEL,
2044 .family = 6,
2045 .model = 94,
2046 .stepping = 3,
2047 .features[FEAT_1_EDX] =
2048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2052 CPUID_DE | CPUID_FP87,
2053 .features[FEAT_1_ECX] =
2054 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2055 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2056 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2057 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2058 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2059 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2060 .features[FEAT_8000_0001_EDX] =
2061 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2062 CPUID_EXT2_SYSCALL,
2063 .features[FEAT_8000_0001_ECX] =
2064 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2065 .features[FEAT_7_0_EDX] =
2066 CPUID_7_0_EDX_SPEC_CTRL,
2067 .features[FEAT_7_0_EBX] =
2068 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2069 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2070 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2071 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2072 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2073 /* Missing: XSAVES (not supported by some Linux versions,
2074 * including v4.1 to v4.12).
2075 * KVM doesn't yet expose any XSAVES state save component,
2076 * and the only one defined in Skylake (processor tracing)
2077 * probably will block migration anyway.
2079 .features[FEAT_XSAVE] =
2080 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2081 CPUID_XSAVE_XGETBV1,
2082 .features[FEAT_6_EAX] =
2083 CPUID_6_EAX_ARAT,
2084 .xlevel = 0x80000008,
2085 .model_id = "Intel Core Processor (Skylake, IBRS)",
2088 .name = "Skylake-Server",
2089 .level = 0xd,
2090 .vendor = CPUID_VENDOR_INTEL,
2091 .family = 6,
2092 .model = 85,
2093 .stepping = 4,
2094 .features[FEAT_1_EDX] =
2095 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2096 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2097 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2098 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2099 CPUID_DE | CPUID_FP87,
2100 .features[FEAT_1_ECX] =
2101 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2102 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2103 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2104 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2105 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2106 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2107 .features[FEAT_8000_0001_EDX] =
2108 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2109 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2110 .features[FEAT_8000_0001_ECX] =
2111 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2112 .features[FEAT_7_0_EBX] =
2113 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2114 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2115 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2116 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2117 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2118 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2119 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2120 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2121 /* Missing: XSAVES (not supported by some Linux versions,
2122 * including v4.1 to v4.12).
2123 * KVM doesn't yet expose any XSAVES state save component,
2124 * and the only one defined in Skylake (processor tracing)
2125 * probably will block migration anyway.
2127 .features[FEAT_XSAVE] =
2128 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2129 CPUID_XSAVE_XGETBV1,
2130 .features[FEAT_6_EAX] =
2131 CPUID_6_EAX_ARAT,
2132 .xlevel = 0x80000008,
2133 .model_id = "Intel Xeon Processor (Skylake)",
2136 .name = "Skylake-Server-IBRS",
2137 .level = 0xd,
2138 .vendor = CPUID_VENDOR_INTEL,
2139 .family = 6,
2140 .model = 85,
2141 .stepping = 4,
2142 .features[FEAT_1_EDX] =
2143 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2144 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2145 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2146 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2147 CPUID_DE | CPUID_FP87,
2148 .features[FEAT_1_ECX] =
2149 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2150 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2151 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2152 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2153 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2154 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2155 .features[FEAT_8000_0001_EDX] =
2156 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2157 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2158 .features[FEAT_8000_0001_ECX] =
2159 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2160 .features[FEAT_7_0_EDX] =
2161 CPUID_7_0_EDX_SPEC_CTRL,
2162 .features[FEAT_7_0_EBX] =
2163 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2164 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2165 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2166 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2167 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2168 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2169 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2170 CPUID_7_0_EBX_AVX512VL,
2171 /* Missing: XSAVES (not supported by some Linux versions,
2172 * including v4.1 to v4.12).
2173 * KVM doesn't yet expose any XSAVES state save component,
2174 * and the only one defined in Skylake (processor tracing)
2175 * probably will block migration anyway.
2177 .features[FEAT_XSAVE] =
2178 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2179 CPUID_XSAVE_XGETBV1,
2180 .features[FEAT_6_EAX] =
2181 CPUID_6_EAX_ARAT,
2182 .xlevel = 0x80000008,
2183 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2186 .name = "KnightsMill",
2187 .level = 0xd,
2188 .vendor = CPUID_VENDOR_INTEL,
2189 .family = 6,
2190 .model = 133,
2191 .stepping = 0,
2192 .features[FEAT_1_EDX] =
2193 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2194 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2195 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2196 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2197 CPUID_PSE | CPUID_DE | CPUID_FP87,
2198 .features[FEAT_1_ECX] =
2199 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2200 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2201 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2202 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2203 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2204 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2205 .features[FEAT_8000_0001_EDX] =
2206 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2207 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2208 .features[FEAT_8000_0001_ECX] =
2209 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2210 .features[FEAT_7_0_EBX] =
2211 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2212 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2213 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2214 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2215 CPUID_7_0_EBX_AVX512ER,
2216 .features[FEAT_7_0_ECX] =
2217 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2218 .features[FEAT_7_0_EDX] =
2219 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2220 .features[FEAT_XSAVE] =
2221 CPUID_XSAVE_XSAVEOPT,
2222 .features[FEAT_6_EAX] =
2223 CPUID_6_EAX_ARAT,
2224 .xlevel = 0x80000008,
2225 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2228 .name = "Opteron_G1",
2229 .level = 5,
2230 .vendor = CPUID_VENDOR_AMD,
2231 .family = 15,
2232 .model = 6,
2233 .stepping = 1,
2234 .features[FEAT_1_EDX] =
2235 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2236 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2237 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2238 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2239 CPUID_DE | CPUID_FP87,
2240 .features[FEAT_1_ECX] =
2241 CPUID_EXT_SSE3,
2242 .features[FEAT_8000_0001_EDX] =
2243 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2244 .xlevel = 0x80000008,
2245 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2248 .name = "Opteron_G2",
2249 .level = 5,
2250 .vendor = CPUID_VENDOR_AMD,
2251 .family = 15,
2252 .model = 6,
2253 .stepping = 1,
2254 .features[FEAT_1_EDX] =
2255 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2256 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2257 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2258 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2259 CPUID_DE | CPUID_FP87,
2260 .features[FEAT_1_ECX] =
2261 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2262 /* Missing: CPUID_EXT2_RDTSCP */
2263 .features[FEAT_8000_0001_EDX] =
2264 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2265 .features[FEAT_8000_0001_ECX] =
2266 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2267 .xlevel = 0x80000008,
2268 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2271 .name = "Opteron_G3",
2272 .level = 5,
2273 .vendor = CPUID_VENDOR_AMD,
2274 .family = 16,
2275 .model = 2,
2276 .stepping = 3,
2277 .features[FEAT_1_EDX] =
2278 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2279 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2280 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2281 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2282 CPUID_DE | CPUID_FP87,
2283 .features[FEAT_1_ECX] =
2284 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2285 CPUID_EXT_SSE3,
2286 /* Missing: CPUID_EXT2_RDTSCP */
2287 .features[FEAT_8000_0001_EDX] =
2288 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2289 .features[FEAT_8000_0001_ECX] =
2290 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2291 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2292 .xlevel = 0x80000008,
2293 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2296 .name = "Opteron_G4",
2297 .level = 0xd,
2298 .vendor = CPUID_VENDOR_AMD,
2299 .family = 21,
2300 .model = 1,
2301 .stepping = 2,
2302 .features[FEAT_1_EDX] =
2303 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2304 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2305 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2306 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2307 CPUID_DE | CPUID_FP87,
2308 .features[FEAT_1_ECX] =
2309 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2310 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2311 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2312 CPUID_EXT_SSE3,
2313 /* Missing: CPUID_EXT2_RDTSCP */
2314 .features[FEAT_8000_0001_EDX] =
2315 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2316 CPUID_EXT2_SYSCALL,
2317 .features[FEAT_8000_0001_ECX] =
2318 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2319 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2320 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2321 CPUID_EXT3_LAHF_LM,
2322 /* no xsaveopt! */
2323 .xlevel = 0x8000001A,
2324 .model_id = "AMD Opteron 62xx class CPU",
2327 .name = "Opteron_G5",
2328 .level = 0xd,
2329 .vendor = CPUID_VENDOR_AMD,
2330 .family = 21,
2331 .model = 2,
2332 .stepping = 0,
2333 .features[FEAT_1_EDX] =
2334 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2335 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2336 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2337 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2338 CPUID_DE | CPUID_FP87,
2339 .features[FEAT_1_ECX] =
2340 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2341 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2342 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2343 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2344 /* Missing: CPUID_EXT2_RDTSCP */
2345 .features[FEAT_8000_0001_EDX] =
2346 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2347 CPUID_EXT2_SYSCALL,
2348 .features[FEAT_8000_0001_ECX] =
2349 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2350 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2351 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2352 CPUID_EXT3_LAHF_LM,
2353 /* no xsaveopt! */
2354 .xlevel = 0x8000001A,
2355 .model_id = "AMD Opteron 63xx class CPU",
2358 .name = "EPYC",
2359 .level = 0xd,
2360 .vendor = CPUID_VENDOR_AMD,
2361 .family = 23,
2362 .model = 1,
2363 .stepping = 2,
2364 .features[FEAT_1_EDX] =
2365 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2366 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2367 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2368 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2369 CPUID_VME | CPUID_FP87,
2370 .features[FEAT_1_ECX] =
2371 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2372 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2373 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2374 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2375 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2376 .features[FEAT_8000_0001_EDX] =
2377 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2378 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2379 CPUID_EXT2_SYSCALL,
2380 .features[FEAT_8000_0001_ECX] =
2381 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2382 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2383 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2384 .features[FEAT_7_0_EBX] =
2385 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2386 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2387 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2388 CPUID_7_0_EBX_SHA_NI,
2389 /* Missing: XSAVES (not supported by some Linux versions,
2390 * including v4.1 to v4.12).
2391 * KVM doesn't yet expose any XSAVES state save component.
2393 .features[FEAT_XSAVE] =
2394 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2395 CPUID_XSAVE_XGETBV1,
2396 .features[FEAT_6_EAX] =
2397 CPUID_6_EAX_ARAT,
2398 .xlevel = 0x8000000A,
2399 .model_id = "AMD EPYC Processor",
2400 .cache_info = &epyc_cache_info,
2403 .name = "EPYC-IBPB",
2404 .level = 0xd,
2405 .vendor = CPUID_VENDOR_AMD,
2406 .family = 23,
2407 .model = 1,
2408 .stepping = 2,
2409 .features[FEAT_1_EDX] =
2410 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2411 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2412 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2413 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2414 CPUID_VME | CPUID_FP87,
2415 .features[FEAT_1_ECX] =
2416 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2417 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2418 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2419 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2420 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2421 .features[FEAT_8000_0001_EDX] =
2422 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2423 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2424 CPUID_EXT2_SYSCALL,
2425 .features[FEAT_8000_0001_ECX] =
2426 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2427 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2428 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2429 .features[FEAT_8000_0008_EBX] =
2430 CPUID_8000_0008_EBX_IBPB,
2431 .features[FEAT_7_0_EBX] =
2432 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2433 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2434 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2435 CPUID_7_0_EBX_SHA_NI,
2436 /* Missing: XSAVES (not supported by some Linux versions,
2437 * including v4.1 to v4.12).
2438 * KVM doesn't yet expose any XSAVES state save component.
2440 .features[FEAT_XSAVE] =
2441 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2442 CPUID_XSAVE_XGETBV1,
2443 .features[FEAT_6_EAX] =
2444 CPUID_6_EAX_ARAT,
2445 .xlevel = 0x8000000A,
2446 .model_id = "AMD EPYC Processor (with IBPB)",
2447 .cache_info = &epyc_cache_info,
2451 typedef struct PropValue {
2452 const char *prop, *value;
2453 } PropValue;
2455 /* KVM-specific features that are automatically added/removed
2456 * from all CPU models when KVM is enabled.
2458 static PropValue kvm_default_props[] = {
2459 { "kvmclock", "on" },
2460 { "kvm-nopiodelay", "on" },
2461 { "kvm-asyncpf", "on" },
2462 { "kvm-steal-time", "on" },
2463 { "kvm-pv-eoi", "on" },
2464 { "kvmclock-stable-bit", "on" },
2465 { "x2apic", "on" },
2466 { "acpi", "off" },
2467 { "monitor", "off" },
2468 { "svm", "off" },
2469 { NULL, NULL },
2472 /* TCG-specific defaults that override all CPU models when using TCG
2474 static PropValue tcg_default_props[] = {
2475 { "vme", "off" },
2476 { NULL, NULL },
2480 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2482 PropValue *pv;
2483 for (pv = kvm_default_props; pv->prop; pv++) {
2484 if (!strcmp(pv->prop, prop)) {
2485 pv->value = value;
2486 break;
2490 /* It is valid to call this function only for properties that
2491 * are already present in the kvm_default_props table.
2493 assert(pv->prop);
2496 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2497 bool migratable_only);
2499 static bool lmce_supported(void)
2501 uint64_t mce_cap = 0;
2503 #ifdef CONFIG_KVM
2504 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2505 return false;
2507 #endif
2509 return !!(mce_cap & MCG_LMCE_P);
2512 #define CPUID_MODEL_ID_SZ 48
2515 * cpu_x86_fill_model_id:
2516 * Get CPUID model ID string from host CPU.
2518 * @str should have at least CPUID_MODEL_ID_SZ bytes
2520 * The function does NOT add a null terminator to the string
2521 * automatically.
2523 static int cpu_x86_fill_model_id(char *str)
2525 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2526 int i;
2528 for (i = 0; i < 3; i++) {
2529 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2530 memcpy(str + i * 16 + 0, &eax, 4);
2531 memcpy(str + i * 16 + 4, &ebx, 4);
2532 memcpy(str + i * 16 + 8, &ecx, 4);
2533 memcpy(str + i * 16 + 12, &edx, 4);
2535 return 0;
2538 static Property max_x86_cpu_properties[] = {
2539 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2540 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2541 DEFINE_PROP_END_OF_LIST()
2544 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2546 DeviceClass *dc = DEVICE_CLASS(oc);
2547 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2549 xcc->ordering = 9;
2551 xcc->model_description =
2552 "Enables all features supported by the accelerator in the current host";
2554 dc->props = max_x86_cpu_properties;
2557 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2559 static void max_x86_cpu_initfn(Object *obj)
2561 X86CPU *cpu = X86_CPU(obj);
2562 CPUX86State *env = &cpu->env;
2563 KVMState *s = kvm_state;
2565 /* We can't fill the features array here because we don't know yet if
2566 * "migratable" is true or false.
2568 cpu->max_features = true;
2570 if (accel_uses_host_cpuid()) {
2571 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2572 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2573 int family, model, stepping;
2574 X86CPUDefinition host_cpudef = { };
2575 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2577 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2578 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2580 host_vendor_fms(vendor, &family, &model, &stepping);
2582 cpu_x86_fill_model_id(model_id);
2584 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2585 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2586 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2587 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2588 &error_abort);
2589 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2590 &error_abort);
2592 if (kvm_enabled()) {
2593 env->cpuid_min_level =
2594 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2595 env->cpuid_min_xlevel =
2596 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2597 env->cpuid_min_xlevel2 =
2598 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2599 } else {
2600 env->cpuid_min_level =
2601 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2602 env->cpuid_min_xlevel =
2603 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2604 env->cpuid_min_xlevel2 =
2605 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2608 if (lmce_supported()) {
2609 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2611 } else {
2612 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2613 "vendor", &error_abort);
2614 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2615 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2616 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2617 object_property_set_str(OBJECT(cpu),
2618 "QEMU TCG CPU version " QEMU_HW_VERSION,
2619 "model-id", &error_abort);
2622 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2625 static const TypeInfo max_x86_cpu_type_info = {
2626 .name = X86_CPU_TYPE_NAME("max"),
2627 .parent = TYPE_X86_CPU,
2628 .instance_init = max_x86_cpu_initfn,
2629 .class_init = max_x86_cpu_class_init,
2632 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2633 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2635 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2637 xcc->host_cpuid_required = true;
2638 xcc->ordering = 8;
2640 if (kvm_enabled()) {
2641 xcc->model_description =
2642 "KVM processor with all supported host features ";
2643 } else if (hvf_enabled()) {
2644 xcc->model_description =
2645 "HVF processor with all supported host features ";
2649 static const TypeInfo host_x86_cpu_type_info = {
2650 .name = X86_CPU_TYPE_NAME("host"),
2651 .parent = X86_CPU_TYPE_NAME("max"),
2652 .class_init = host_x86_cpu_class_init,
2655 #endif
2657 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2659 FeatureWordInfo *f = &feature_word_info[w];
2660 int i;
2662 for (i = 0; i < 32; ++i) {
2663 if ((1UL << i) & mask) {
2664 const char *reg = get_register_name_32(f->cpuid_reg);
2665 assert(reg);
2666 warn_report("%s doesn't support requested feature: "
2667 "CPUID.%02XH:%s%s%s [bit %d]",
2668 accel_uses_host_cpuid() ? "host" : "TCG",
2669 f->cpuid_eax, reg,
2670 f->feat_names[i] ? "." : "",
2671 f->feat_names[i] ? f->feat_names[i] : "", i);
2676 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2677 const char *name, void *opaque,
2678 Error **errp)
2680 X86CPU *cpu = X86_CPU(obj);
2681 CPUX86State *env = &cpu->env;
2682 int64_t value;
2684 value = (env->cpuid_version >> 8) & 0xf;
2685 if (value == 0xf) {
2686 value += (env->cpuid_version >> 20) & 0xff;
2688 visit_type_int(v, name, &value, errp);
2691 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2692 const char *name, void *opaque,
2693 Error **errp)
2695 X86CPU *cpu = X86_CPU(obj);
2696 CPUX86State *env = &cpu->env;
2697 const int64_t min = 0;
2698 const int64_t max = 0xff + 0xf;
2699 Error *local_err = NULL;
2700 int64_t value;
2702 visit_type_int(v, name, &value, &local_err);
2703 if (local_err) {
2704 error_propagate(errp, local_err);
2705 return;
2707 if (value < min || value > max) {
2708 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2709 name ? name : "null", value, min, max);
2710 return;
2713 env->cpuid_version &= ~0xff00f00;
2714 if (value > 0x0f) {
2715 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2716 } else {
2717 env->cpuid_version |= value << 8;
2721 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2722 const char *name, void *opaque,
2723 Error **errp)
2725 X86CPU *cpu = X86_CPU(obj);
2726 CPUX86State *env = &cpu->env;
2727 int64_t value;
2729 value = (env->cpuid_version >> 4) & 0xf;
2730 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2731 visit_type_int(v, name, &value, errp);
2734 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2735 const char *name, void *opaque,
2736 Error **errp)
2738 X86CPU *cpu = X86_CPU(obj);
2739 CPUX86State *env = &cpu->env;
2740 const int64_t min = 0;
2741 const int64_t max = 0xff;
2742 Error *local_err = NULL;
2743 int64_t value;
2745 visit_type_int(v, name, &value, &local_err);
2746 if (local_err) {
2747 error_propagate(errp, local_err);
2748 return;
2750 if (value < min || value > max) {
2751 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2752 name ? name : "null", value, min, max);
2753 return;
2756 env->cpuid_version &= ~0xf00f0;
2757 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2760 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2761 const char *name, void *opaque,
2762 Error **errp)
2764 X86CPU *cpu = X86_CPU(obj);
2765 CPUX86State *env = &cpu->env;
2766 int64_t value;
2768 value = env->cpuid_version & 0xf;
2769 visit_type_int(v, name, &value, errp);
2772 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2773 const char *name, void *opaque,
2774 Error **errp)
2776 X86CPU *cpu = X86_CPU(obj);
2777 CPUX86State *env = &cpu->env;
2778 const int64_t min = 0;
2779 const int64_t max = 0xf;
2780 Error *local_err = NULL;
2781 int64_t value;
2783 visit_type_int(v, name, &value, &local_err);
2784 if (local_err) {
2785 error_propagate(errp, local_err);
2786 return;
2788 if (value < min || value > max) {
2789 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2790 name ? name : "null", value, min, max);
2791 return;
2794 env->cpuid_version &= ~0xf;
2795 env->cpuid_version |= value & 0xf;
2798 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2800 X86CPU *cpu = X86_CPU(obj);
2801 CPUX86State *env = &cpu->env;
2802 char *value;
2804 value = g_malloc(CPUID_VENDOR_SZ + 1);
2805 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2806 env->cpuid_vendor3);
2807 return value;
2810 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2811 Error **errp)
2813 X86CPU *cpu = X86_CPU(obj);
2814 CPUX86State *env = &cpu->env;
2815 int i;
2817 if (strlen(value) != CPUID_VENDOR_SZ) {
2818 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2819 return;
2822 env->cpuid_vendor1 = 0;
2823 env->cpuid_vendor2 = 0;
2824 env->cpuid_vendor3 = 0;
2825 for (i = 0; i < 4; i++) {
2826 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2827 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2828 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2832 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2834 X86CPU *cpu = X86_CPU(obj);
2835 CPUX86State *env = &cpu->env;
2836 char *value;
2837 int i;
2839 value = g_malloc(48 + 1);
2840 for (i = 0; i < 48; i++) {
2841 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2843 value[48] = '\0';
2844 return value;
2847 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2848 Error **errp)
2850 X86CPU *cpu = X86_CPU(obj);
2851 CPUX86State *env = &cpu->env;
2852 int c, len, i;
2854 if (model_id == NULL) {
2855 model_id = "";
2857 len = strlen(model_id);
2858 memset(env->cpuid_model, 0, 48);
2859 for (i = 0; i < 48; i++) {
2860 if (i >= len) {
2861 c = '\0';
2862 } else {
2863 c = (uint8_t)model_id[i];
2865 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2869 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2870 void *opaque, Error **errp)
2872 X86CPU *cpu = X86_CPU(obj);
2873 int64_t value;
2875 value = cpu->env.tsc_khz * 1000;
2876 visit_type_int(v, name, &value, errp);
2879 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2880 void *opaque, Error **errp)
2882 X86CPU *cpu = X86_CPU(obj);
2883 const int64_t min = 0;
2884 const int64_t max = INT64_MAX;
2885 Error *local_err = NULL;
2886 int64_t value;
2888 visit_type_int(v, name, &value, &local_err);
2889 if (local_err) {
2890 error_propagate(errp, local_err);
2891 return;
2893 if (value < min || value > max) {
2894 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2895 name ? name : "null", value, min, max);
2896 return;
2899 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2902 /* Generic getter for "feature-words" and "filtered-features" properties */
2903 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2904 const char *name, void *opaque,
2905 Error **errp)
2907 uint32_t *array = (uint32_t *)opaque;
2908 FeatureWord w;
2909 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2910 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2911 X86CPUFeatureWordInfoList *list = NULL;
2913 for (w = 0; w < FEATURE_WORDS; w++) {
2914 FeatureWordInfo *wi = &feature_word_info[w];
2915 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2916 qwi->cpuid_input_eax = wi->cpuid_eax;
2917 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2918 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2919 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2920 qwi->features = array[w];
2922 /* List will be in reverse order, but order shouldn't matter */
2923 list_entries[w].next = list;
2924 list_entries[w].value = &word_infos[w];
2925 list = &list_entries[w];
2928 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2931 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2932 void *opaque, Error **errp)
2934 X86CPU *cpu = X86_CPU(obj);
2935 int64_t value = cpu->hyperv_spinlock_attempts;
2937 visit_type_int(v, name, &value, errp);
2940 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2941 void *opaque, Error **errp)
2943 const int64_t min = 0xFFF;
2944 const int64_t max = UINT_MAX;
2945 X86CPU *cpu = X86_CPU(obj);
2946 Error *err = NULL;
2947 int64_t value;
2949 visit_type_int(v, name, &value, &err);
2950 if (err) {
2951 error_propagate(errp, err);
2952 return;
2955 if (value < min || value > max) {
2956 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2957 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2958 object_get_typename(obj), name ? name : "null",
2959 value, min, max);
2960 return;
2962 cpu->hyperv_spinlock_attempts = value;
2965 static const PropertyInfo qdev_prop_spinlocks = {
2966 .name = "int",
2967 .get = x86_get_hv_spinlocks,
2968 .set = x86_set_hv_spinlocks,
2971 /* Convert all '_' in a feature string option name to '-', to make feature
2972 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2974 static inline void feat2prop(char *s)
2976 while ((s = strchr(s, '_'))) {
2977 *s = '-';
2981 /* Return the feature property name for a feature flag bit */
2982 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2984 /* XSAVE components are automatically enabled by other features,
2985 * so return the original feature name instead
2987 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2988 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2990 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2991 x86_ext_save_areas[comp].bits) {
2992 w = x86_ext_save_areas[comp].feature;
2993 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2997 assert(bitnr < 32);
2998 assert(w < FEATURE_WORDS);
2999 return feature_word_info[w].feat_names[bitnr];
3002 /* Compatibily hack to maintain legacy +-feat semantic,
3003 * where +-feat overwrites any feature set by
3004 * feat=on|feat even if the later is parsed after +-feat
3005 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3007 static GList *plus_features, *minus_features;
3009 static gint compare_string(gconstpointer a, gconstpointer b)
3011 return g_strcmp0(a, b);
3014 /* Parse "+feature,-feature,feature=foo" CPU feature string
3016 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3017 Error **errp)
3019 char *featurestr; /* Single 'key=value" string being parsed */
3020 static bool cpu_globals_initialized;
3021 bool ambiguous = false;
3023 if (cpu_globals_initialized) {
3024 return;
3026 cpu_globals_initialized = true;
3028 if (!features) {
3029 return;
3032 for (featurestr = strtok(features, ",");
3033 featurestr;
3034 featurestr = strtok(NULL, ",")) {
3035 const char *name;
3036 const char *val = NULL;
3037 char *eq = NULL;
3038 char num[32];
3039 GlobalProperty *prop;
3041 /* Compatibility syntax: */
3042 if (featurestr[0] == '+') {
3043 plus_features = g_list_append(plus_features,
3044 g_strdup(featurestr + 1));
3045 continue;
3046 } else if (featurestr[0] == '-') {
3047 minus_features = g_list_append(minus_features,
3048 g_strdup(featurestr + 1));
3049 continue;
3052 eq = strchr(featurestr, '=');
3053 if (eq) {
3054 *eq++ = 0;
3055 val = eq;
3056 } else {
3057 val = "on";
3060 feat2prop(featurestr);
3061 name = featurestr;
3063 if (g_list_find_custom(plus_features, name, compare_string)) {
3064 warn_report("Ambiguous CPU model string. "
3065 "Don't mix both \"+%s\" and \"%s=%s\"",
3066 name, name, val);
3067 ambiguous = true;
3069 if (g_list_find_custom(minus_features, name, compare_string)) {
3070 warn_report("Ambiguous CPU model string. "
3071 "Don't mix both \"-%s\" and \"%s=%s\"",
3072 name, name, val);
3073 ambiguous = true;
3076 /* Special case: */
3077 if (!strcmp(name, "tsc-freq")) {
3078 int ret;
3079 uint64_t tsc_freq;
3081 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3082 if (ret < 0 || tsc_freq > INT64_MAX) {
3083 error_setg(errp, "bad numerical value %s", val);
3084 return;
3086 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3087 val = num;
3088 name = "tsc-frequency";
3091 prop = g_new0(typeof(*prop), 1);
3092 prop->driver = typename;
3093 prop->property = g_strdup(name);
3094 prop->value = g_strdup(val);
3095 prop->errp = &error_fatal;
3096 qdev_prop_register_global(prop);
3099 if (ambiguous) {
3100 warn_report("Compatibility of ambiguous CPU model "
3101 "strings won't be kept on future QEMU versions");
3105 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3106 static int x86_cpu_filter_features(X86CPU *cpu);
3108 /* Check for missing features that may prevent the CPU class from
3109 * running using the current machine and accelerator.
3111 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3112 strList **missing_feats)
3114 X86CPU *xc;
3115 FeatureWord w;
3116 Error *err = NULL;
3117 strList **next = missing_feats;
3119 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3120 strList *new = g_new0(strList, 1);
3121 new->value = g_strdup("kvm");
3122 *missing_feats = new;
3123 return;
3126 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3128 x86_cpu_expand_features(xc, &err);
3129 if (err) {
3130 /* Errors at x86_cpu_expand_features should never happen,
3131 * but in case it does, just report the model as not
3132 * runnable at all using the "type" property.
3134 strList *new = g_new0(strList, 1);
3135 new->value = g_strdup("type");
3136 *next = new;
3137 next = &new->next;
3140 x86_cpu_filter_features(xc);
3142 for (w = 0; w < FEATURE_WORDS; w++) {
3143 uint32_t filtered = xc->filtered_features[w];
3144 int i;
3145 for (i = 0; i < 32; i++) {
3146 if (filtered & (1UL << i)) {
3147 strList *new = g_new0(strList, 1);
3148 new->value = g_strdup(x86_cpu_feature_name(w, i));
3149 *next = new;
3150 next = &new->next;
3155 object_unref(OBJECT(xc));
3158 /* Print all cpuid feature names in featureset
3160 static void listflags(FILE *f, fprintf_function print, const char **featureset)
3162 int bit;
3163 bool first = true;
3165 for (bit = 0; bit < 32; bit++) {
3166 if (featureset[bit]) {
3167 print(f, "%s%s", first ? "" : " ", featureset[bit]);
3168 first = false;
3173 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3174 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3176 ObjectClass *class_a = (ObjectClass *)a;
3177 ObjectClass *class_b = (ObjectClass *)b;
3178 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3179 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3180 const char *name_a, *name_b;
3182 if (cc_a->ordering != cc_b->ordering) {
3183 return cc_a->ordering - cc_b->ordering;
3184 } else {
3185 name_a = object_class_get_name(class_a);
3186 name_b = object_class_get_name(class_b);
3187 return strcmp(name_a, name_b);
3191 static GSList *get_sorted_cpu_model_list(void)
3193 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3194 list = g_slist_sort(list, x86_cpu_list_compare);
3195 return list;
3198 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3200 ObjectClass *oc = data;
3201 X86CPUClass *cc = X86_CPU_CLASS(oc);
3202 CPUListState *s = user_data;
3203 char *name = x86_cpu_class_get_model_name(cc);
3204 const char *desc = cc->model_description;
3205 if (!desc && cc->cpu_def) {
3206 desc = cc->cpu_def->model_id;
3209 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
3210 name, desc);
3211 g_free(name);
3214 /* list available CPU models and flags */
3215 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3217 int i;
3218 CPUListState s = {
3219 .file = f,
3220 .cpu_fprintf = cpu_fprintf,
3222 GSList *list;
3224 (*cpu_fprintf)(f, "Available CPUs:\n");
3225 list = get_sorted_cpu_model_list();
3226 g_slist_foreach(list, x86_cpu_list_entry, &s);
3227 g_slist_free(list);
3229 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3230 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3231 FeatureWordInfo *fw = &feature_word_info[i];
3233 (*cpu_fprintf)(f, " ");
3234 listflags(f, cpu_fprintf, fw->feat_names);
3235 (*cpu_fprintf)(f, "\n");
3239 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3241 ObjectClass *oc = data;
3242 X86CPUClass *cc = X86_CPU_CLASS(oc);
3243 CpuDefinitionInfoList **cpu_list = user_data;
3244 CpuDefinitionInfoList *entry;
3245 CpuDefinitionInfo *info;
3247 info = g_malloc0(sizeof(*info));
3248 info->name = x86_cpu_class_get_model_name(cc);
3249 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3250 info->has_unavailable_features = true;
3251 info->q_typename = g_strdup(object_class_get_name(oc));
3252 info->migration_safe = cc->migration_safe;
3253 info->has_migration_safe = true;
3254 info->q_static = cc->static_model;
3256 entry = g_malloc0(sizeof(*entry));
3257 entry->value = info;
3258 entry->next = *cpu_list;
3259 *cpu_list = entry;
3262 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3264 CpuDefinitionInfoList *cpu_list = NULL;
3265 GSList *list = get_sorted_cpu_model_list();
3266 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3267 g_slist_free(list);
3268 return cpu_list;
3271 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3272 bool migratable_only)
3274 FeatureWordInfo *wi = &feature_word_info[w];
3275 uint32_t r;
3277 if (kvm_enabled()) {
3278 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3279 wi->cpuid_ecx,
3280 wi->cpuid_reg);
3281 } else if (hvf_enabled()) {
3282 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3283 wi->cpuid_ecx,
3284 wi->cpuid_reg);
3285 } else if (tcg_enabled()) {
3286 r = wi->tcg_features;
3287 } else {
3288 return ~0;
3290 if (migratable_only) {
3291 r &= x86_cpu_get_migratable_flags(w);
3293 return r;
3296 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3298 FeatureWord w;
3300 for (w = 0; w < FEATURE_WORDS; w++) {
3301 report_unavailable_features(w, cpu->filtered_features[w]);
3305 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3307 PropValue *pv;
3308 for (pv = props; pv->prop; pv++) {
3309 if (!pv->value) {
3310 continue;
3312 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3313 &error_abort);
3317 /* Load data from X86CPUDefinition into a X86CPU object
3319 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3321 CPUX86State *env = &cpu->env;
3322 const char *vendor;
3323 char host_vendor[CPUID_VENDOR_SZ + 1];
3324 FeatureWord w;
3326 /*NOTE: any property set by this function should be returned by
3327 * x86_cpu_static_props(), so static expansion of
3328 * query-cpu-model-expansion is always complete.
3331 /* CPU models only set _minimum_ values for level/xlevel: */
3332 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3333 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3335 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3336 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3337 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3338 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3339 for (w = 0; w < FEATURE_WORDS; w++) {
3340 env->features[w] = def->features[w];
3343 /* Store Cache information from the X86CPUDefinition if available */
3344 env->cache_info = def->cache_info;
3345 cpu->legacy_cache = def->cache_info ? 0 : 1;
3347 /* Special cases not set in the X86CPUDefinition structs: */
3348 /* TODO: in-kernel irqchip for hvf */
3349 if (kvm_enabled()) {
3350 if (!kvm_irqchip_in_kernel()) {
3351 x86_cpu_change_kvm_default("x2apic", "off");
3354 x86_cpu_apply_props(cpu, kvm_default_props);
3355 } else if (tcg_enabled()) {
3356 x86_cpu_apply_props(cpu, tcg_default_props);
3359 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3361 /* sysenter isn't supported in compatibility mode on AMD,
3362 * syscall isn't supported in compatibility mode on Intel.
3363 * Normally we advertise the actual CPU vendor, but you can
3364 * override this using the 'vendor' property if you want to use
3365 * KVM's sysenter/syscall emulation in compatibility mode and
3366 * when doing cross vendor migration
3368 vendor = def->vendor;
3369 if (accel_uses_host_cpuid()) {
3370 uint32_t ebx = 0, ecx = 0, edx = 0;
3371 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3372 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3373 vendor = host_vendor;
3376 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3380 /* Return a QDict containing keys for all properties that can be included
3381 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3382 * must be included in the dictionary.
3384 static QDict *x86_cpu_static_props(void)
3386 FeatureWord w;
3387 int i;
3388 static const char *props[] = {
3389 "min-level",
3390 "min-xlevel",
3391 "family",
3392 "model",
3393 "stepping",
3394 "model-id",
3395 "vendor",
3396 "lmce",
3397 NULL,
3399 static QDict *d;
3401 if (d) {
3402 return d;
3405 d = qdict_new();
3406 for (i = 0; props[i]; i++) {
3407 qdict_put_null(d, props[i]);
3410 for (w = 0; w < FEATURE_WORDS; w++) {
3411 FeatureWordInfo *fi = &feature_word_info[w];
3412 int bit;
3413 for (bit = 0; bit < 32; bit++) {
3414 if (!fi->feat_names[bit]) {
3415 continue;
3417 qdict_put_null(d, fi->feat_names[bit]);
3421 return d;
3424 /* Add an entry to @props dict, with the value for property. */
3425 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3427 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3428 &error_abort);
3430 qdict_put_obj(props, prop, value);
3433 /* Convert CPU model data from X86CPU object to a property dictionary
3434 * that can recreate exactly the same CPU model.
3436 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3438 QDict *sprops = x86_cpu_static_props();
3439 const QDictEntry *e;
3441 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3442 const char *prop = qdict_entry_key(e);
3443 x86_cpu_expand_prop(cpu, props, prop);
3447 /* Convert CPU model data from X86CPU object to a property dictionary
3448 * that can recreate exactly the same CPU model, including every
3449 * writeable QOM property.
3451 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3453 ObjectPropertyIterator iter;
3454 ObjectProperty *prop;
3456 object_property_iter_init(&iter, OBJECT(cpu));
3457 while ((prop = object_property_iter_next(&iter))) {
3458 /* skip read-only or write-only properties */
3459 if (!prop->get || !prop->set) {
3460 continue;
3463 /* "hotplugged" is the only property that is configurable
3464 * on the command-line but will be set differently on CPUs
3465 * created using "-cpu ... -smp ..." and by CPUs created
3466 * on the fly by x86_cpu_from_model() for querying. Skip it.
3468 if (!strcmp(prop->name, "hotplugged")) {
3469 continue;
3471 x86_cpu_expand_prop(cpu, props, prop->name);
3475 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3477 const QDictEntry *prop;
3478 Error *err = NULL;
3480 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3481 object_property_set_qobject(obj, qdict_entry_value(prop),
3482 qdict_entry_key(prop), &err);
3483 if (err) {
3484 break;
3488 error_propagate(errp, err);
3491 /* Create X86CPU object according to model+props specification */
3492 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3494 X86CPU *xc = NULL;
3495 X86CPUClass *xcc;
3496 Error *err = NULL;
3498 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3499 if (xcc == NULL) {
3500 error_setg(&err, "CPU model '%s' not found", model);
3501 goto out;
3504 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3505 if (props) {
3506 object_apply_props(OBJECT(xc), props, &err);
3507 if (err) {
3508 goto out;
3512 x86_cpu_expand_features(xc, &err);
3513 if (err) {
3514 goto out;
3517 out:
3518 if (err) {
3519 error_propagate(errp, err);
3520 object_unref(OBJECT(xc));
3521 xc = NULL;
3523 return xc;
3526 CpuModelExpansionInfo *
3527 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3528 CpuModelInfo *model,
3529 Error **errp)
3531 X86CPU *xc = NULL;
3532 Error *err = NULL;
3533 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3534 QDict *props = NULL;
3535 const char *base_name;
3537 xc = x86_cpu_from_model(model->name,
3538 model->has_props ?
3539 qobject_to(QDict, model->props) :
3540 NULL, &err);
3541 if (err) {
3542 goto out;
3545 props = qdict_new();
3547 switch (type) {
3548 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3549 /* Static expansion will be based on "base" only */
3550 base_name = "base";
3551 x86_cpu_to_dict(xc, props);
3552 break;
3553 case CPU_MODEL_EXPANSION_TYPE_FULL:
3554 /* As we don't return every single property, full expansion needs
3555 * to keep the original model name+props, and add extra
3556 * properties on top of that.
3558 base_name = model->name;
3559 x86_cpu_to_dict_full(xc, props);
3560 break;
3561 default:
3562 error_setg(&err, "Unsupportted expansion type");
3563 goto out;
3566 if (!props) {
3567 props = qdict_new();
3569 x86_cpu_to_dict(xc, props);
3571 ret->model = g_new0(CpuModelInfo, 1);
3572 ret->model->name = g_strdup(base_name);
3573 ret->model->props = QOBJECT(props);
3574 ret->model->has_props = true;
3576 out:
3577 object_unref(OBJECT(xc));
3578 if (err) {
3579 error_propagate(errp, err);
3580 qapi_free_CpuModelExpansionInfo(ret);
3581 ret = NULL;
3583 return ret;
3586 static gchar *x86_gdb_arch_name(CPUState *cs)
3588 #ifdef TARGET_X86_64
3589 return g_strdup("i386:x86-64");
3590 #else
3591 return g_strdup("i386");
3592 #endif
3595 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3597 X86CPUDefinition *cpudef = data;
3598 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3600 xcc->cpu_def = cpudef;
3601 xcc->migration_safe = true;
3604 static void x86_register_cpudef_type(X86CPUDefinition *def)
3606 char *typename = x86_cpu_type_name(def->name);
3607 TypeInfo ti = {
3608 .name = typename,
3609 .parent = TYPE_X86_CPU,
3610 .class_init = x86_cpu_cpudef_class_init,
3611 .class_data = def,
3614 /* AMD aliases are handled at runtime based on CPUID vendor, so
3615 * they shouldn't be set on the CPU model table.
3617 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3618 /* catch mistakes instead of silently truncating model_id when too long */
3619 assert(def->model_id && strlen(def->model_id) <= 48);
3622 type_register(&ti);
3623 g_free(typename);
3626 #if !defined(CONFIG_USER_ONLY)
3628 void cpu_clear_apic_feature(CPUX86State *env)
3630 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3633 #endif /* !CONFIG_USER_ONLY */
3635 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3636 uint32_t *eax, uint32_t *ebx,
3637 uint32_t *ecx, uint32_t *edx)
3639 X86CPU *cpu = x86_env_get_cpu(env);
3640 CPUState *cs = CPU(cpu);
3641 uint32_t pkg_offset;
3642 uint32_t limit;
3643 uint32_t signature[3];
3645 /* Calculate & apply limits for different index ranges */
3646 if (index >= 0xC0000000) {
3647 limit = env->cpuid_xlevel2;
3648 } else if (index >= 0x80000000) {
3649 limit = env->cpuid_xlevel;
3650 } else if (index >= 0x40000000) {
3651 limit = 0x40000001;
3652 } else {
3653 limit = env->cpuid_level;
3656 if (index > limit) {
3657 /* Intel documentation states that invalid EAX input will
3658 * return the same information as EAX=cpuid_level
3659 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3661 index = env->cpuid_level;
3664 switch(index) {
3665 case 0:
3666 *eax = env->cpuid_level;
3667 *ebx = env->cpuid_vendor1;
3668 *edx = env->cpuid_vendor2;
3669 *ecx = env->cpuid_vendor3;
3670 break;
3671 case 1:
3672 *eax = env->cpuid_version;
3673 *ebx = (cpu->apic_id << 24) |
3674 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3675 *ecx = env->features[FEAT_1_ECX];
3676 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3677 *ecx |= CPUID_EXT_OSXSAVE;
3679 *edx = env->features[FEAT_1_EDX];
3680 if (cs->nr_cores * cs->nr_threads > 1) {
3681 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3682 *edx |= CPUID_HT;
3684 break;
3685 case 2:
3686 /* cache info: needed for Pentium Pro compatibility */
3687 if (cpu->cache_info_passthrough) {
3688 host_cpuid(index, 0, eax, ebx, ecx, edx);
3689 break;
3691 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3692 *ebx = 0;
3693 if (!cpu->enable_l3_cache) {
3694 *ecx = 0;
3695 } else {
3696 if (env->cache_info && !cpu->legacy_cache) {
3697 *ecx = cpuid2_cache_descriptor(&env->cache_info->l3_cache);
3698 } else {
3699 *ecx = cpuid2_cache_descriptor(&legacy_l3_cache);
3702 if (env->cache_info && !cpu->legacy_cache) {
3703 *edx = (cpuid2_cache_descriptor(&env->cache_info->l1d_cache) << 16) |
3704 (cpuid2_cache_descriptor(&env->cache_info->l1i_cache) << 8) |
3705 (cpuid2_cache_descriptor(&env->cache_info->l2_cache));
3706 } else {
3707 *edx = (cpuid2_cache_descriptor(&legacy_l1d_cache) << 16) |
3708 (cpuid2_cache_descriptor(&legacy_l1i_cache) << 8) |
3709 (cpuid2_cache_descriptor(&legacy_l2_cache_cpuid2));
3711 break;
3712 case 4:
3713 /* cache info: needed for Core compatibility */
3714 if (cpu->cache_info_passthrough) {
3715 host_cpuid(index, count, eax, ebx, ecx, edx);
3716 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3717 *eax &= ~0xFC000000;
3718 if ((*eax & 31) && cs->nr_cores > 1) {
3719 *eax |= (cs->nr_cores - 1) << 26;
3721 } else {
3722 *eax = 0;
3723 CPUCacheInfo *l1d, *l1i, *l2, *l3;
3724 if (env->cache_info && !cpu->legacy_cache) {
3725 l1d = &env->cache_info->l1d_cache;
3726 l1i = &env->cache_info->l1i_cache;
3727 l2 = &env->cache_info->l2_cache;
3728 l3 = &env->cache_info->l3_cache;
3729 } else {
3730 l1d = &legacy_l1d_cache;
3731 l1i = &legacy_l1i_cache;
3732 l2 = &legacy_l2_cache;
3733 l3 = &legacy_l3_cache;
3735 switch (count) {
3736 case 0: /* L1 dcache info */
3737 encode_cache_cpuid4(l1d, 1, cs->nr_cores,
3738 eax, ebx, ecx, edx);
3739 break;
3740 case 1: /* L1 icache info */
3741 encode_cache_cpuid4(l1i, 1, cs->nr_cores,
3742 eax, ebx, ecx, edx);
3743 break;
3744 case 2: /* L2 cache info */
3745 encode_cache_cpuid4(l2, cs->nr_threads, cs->nr_cores,
3746 eax, ebx, ecx, edx);
3747 break;
3748 case 3: /* L3 cache info */
3749 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3750 if (cpu->enable_l3_cache) {
3751 encode_cache_cpuid4(l3, (1 << pkg_offset), cs->nr_cores,
3752 eax, ebx, ecx, edx);
3753 break;
3755 /* fall through */
3756 default: /* end of info */
3757 *eax = *ebx = *ecx = *edx = 0;
3758 break;
3761 break;
3762 case 5:
3763 /* mwait info: needed for Core compatibility */
3764 *eax = 0; /* Smallest monitor-line size in bytes */
3765 *ebx = 0; /* Largest monitor-line size in bytes */
3766 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3767 *edx = 0;
3768 break;
3769 case 6:
3770 /* Thermal and Power Leaf */
3771 *eax = env->features[FEAT_6_EAX];
3772 *ebx = 0;
3773 *ecx = 0;
3774 *edx = 0;
3775 break;
3776 case 7:
3777 /* Structured Extended Feature Flags Enumeration Leaf */
3778 if (count == 0) {
3779 *eax = 0; /* Maximum ECX value for sub-leaves */
3780 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3781 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3782 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3783 *ecx |= CPUID_7_0_ECX_OSPKE;
3785 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3786 } else {
3787 *eax = 0;
3788 *ebx = 0;
3789 *ecx = 0;
3790 *edx = 0;
3792 break;
3793 case 9:
3794 /* Direct Cache Access Information Leaf */
3795 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3796 *ebx = 0;
3797 *ecx = 0;
3798 *edx = 0;
3799 break;
3800 case 0xA:
3801 /* Architectural Performance Monitoring Leaf */
3802 if (kvm_enabled() && cpu->enable_pmu) {
3803 KVMState *s = cs->kvm_state;
3805 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3806 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3807 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3808 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3809 } else if (hvf_enabled() && cpu->enable_pmu) {
3810 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3811 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3812 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3813 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3814 } else {
3815 *eax = 0;
3816 *ebx = 0;
3817 *ecx = 0;
3818 *edx = 0;
3820 break;
3821 case 0xB:
3822 /* Extended Topology Enumeration Leaf */
3823 if (!cpu->enable_cpuid_0xb) {
3824 *eax = *ebx = *ecx = *edx = 0;
3825 break;
3828 *ecx = count & 0xff;
3829 *edx = cpu->apic_id;
3831 switch (count) {
3832 case 0:
3833 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3834 *ebx = cs->nr_threads;
3835 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3836 break;
3837 case 1:
3838 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3839 *ebx = cs->nr_cores * cs->nr_threads;
3840 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3841 break;
3842 default:
3843 *eax = 0;
3844 *ebx = 0;
3845 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3848 assert(!(*eax & ~0x1f));
3849 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3850 break;
3851 case 0xD: {
3852 /* Processor Extended State */
3853 *eax = 0;
3854 *ebx = 0;
3855 *ecx = 0;
3856 *edx = 0;
3857 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3858 break;
3861 if (count == 0) {
3862 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3863 *eax = env->features[FEAT_XSAVE_COMP_LO];
3864 *edx = env->features[FEAT_XSAVE_COMP_HI];
3865 *ebx = *ecx;
3866 } else if (count == 1) {
3867 *eax = env->features[FEAT_XSAVE];
3868 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3869 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3870 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3871 *eax = esa->size;
3872 *ebx = esa->offset;
3875 break;
3877 case 0x14: {
3878 /* Intel Processor Trace Enumeration */
3879 *eax = 0;
3880 *ebx = 0;
3881 *ecx = 0;
3882 *edx = 0;
3883 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
3884 !kvm_enabled()) {
3885 break;
3888 if (count == 0) {
3889 *eax = INTEL_PT_MAX_SUBLEAF;
3890 *ebx = INTEL_PT_MINIMAL_EBX;
3891 *ecx = INTEL_PT_MINIMAL_ECX;
3892 } else if (count == 1) {
3893 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
3894 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
3896 break;
3898 case 0x40000000:
3900 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3901 * set here, but we restrict to TCG none the less.
3903 if (tcg_enabled() && cpu->expose_tcg) {
3904 memcpy(signature, "TCGTCGTCGTCG", 12);
3905 *eax = 0x40000001;
3906 *ebx = signature[0];
3907 *ecx = signature[1];
3908 *edx = signature[2];
3909 } else {
3910 *eax = 0;
3911 *ebx = 0;
3912 *ecx = 0;
3913 *edx = 0;
3915 break;
3916 case 0x40000001:
3917 *eax = 0;
3918 *ebx = 0;
3919 *ecx = 0;
3920 *edx = 0;
3921 break;
3922 case 0x80000000:
3923 *eax = env->cpuid_xlevel;
3924 *ebx = env->cpuid_vendor1;
3925 *edx = env->cpuid_vendor2;
3926 *ecx = env->cpuid_vendor3;
3927 break;
3928 case 0x80000001:
3929 *eax = env->cpuid_version;
3930 *ebx = 0;
3931 *ecx = env->features[FEAT_8000_0001_ECX];
3932 *edx = env->features[FEAT_8000_0001_EDX];
3934 /* The Linux kernel checks for the CMPLegacy bit and
3935 * discards multiple thread information if it is set.
3936 * So don't set it here for Intel to make Linux guests happy.
3938 if (cs->nr_cores * cs->nr_threads > 1) {
3939 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3940 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3941 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3942 *ecx |= 1 << 1; /* CmpLegacy bit */
3945 break;
3946 case 0x80000002:
3947 case 0x80000003:
3948 case 0x80000004:
3949 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3950 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3951 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3952 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3953 break;
3954 case 0x80000005:
3955 /* cache info (L1 cache) */
3956 if (cpu->cache_info_passthrough) {
3957 host_cpuid(index, 0, eax, ebx, ecx, edx);
3958 break;
3960 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3961 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3962 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3963 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3964 if (env->cache_info && !cpu->legacy_cache) {
3965 *ecx = encode_cache_cpuid80000005(&env->cache_info->l1d_cache);
3966 *edx = encode_cache_cpuid80000005(&env->cache_info->l1i_cache);
3967 } else {
3968 *ecx = encode_cache_cpuid80000005(&legacy_l1d_cache_amd);
3969 *edx = encode_cache_cpuid80000005(&legacy_l1i_cache_amd);
3971 break;
3972 case 0x80000006:
3973 /* cache info (L2 cache) */
3974 if (cpu->cache_info_passthrough) {
3975 host_cpuid(index, 0, eax, ebx, ecx, edx);
3976 break;
3978 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3979 (L2_DTLB_2M_ENTRIES << 16) | \
3980 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3981 (L2_ITLB_2M_ENTRIES);
3982 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3983 (L2_DTLB_4K_ENTRIES << 16) | \
3984 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3985 (L2_ITLB_4K_ENTRIES);
3986 if (env->cache_info && !cpu->legacy_cache) {
3987 encode_cache_cpuid80000006(&env->cache_info->l2_cache,
3988 cpu->enable_l3_cache ?
3989 &env->cache_info->l3_cache : NULL,
3990 ecx, edx);
3991 } else {
3992 encode_cache_cpuid80000006(&legacy_l2_cache_amd,
3993 cpu->enable_l3_cache ?
3994 &legacy_l3_cache : NULL,
3995 ecx, edx);
3997 break;
3998 case 0x80000007:
3999 *eax = 0;
4000 *ebx = 0;
4001 *ecx = 0;
4002 *edx = env->features[FEAT_8000_0007_EDX];
4003 break;
4004 case 0x80000008:
4005 /* virtual & phys address size in low 2 bytes. */
4006 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4007 /* 64 bit processor */
4008 *eax = cpu->phys_bits; /* configurable physical bits */
4009 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4010 *eax |= 0x00003900; /* 57 bits virtual */
4011 } else {
4012 *eax |= 0x00003000; /* 48 bits virtual */
4014 } else {
4015 *eax = cpu->phys_bits;
4017 *ebx = env->features[FEAT_8000_0008_EBX];
4018 *ecx = 0;
4019 *edx = 0;
4020 if (cs->nr_cores * cs->nr_threads > 1) {
4021 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4023 break;
4024 case 0x8000000A:
4025 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4026 *eax = 0x00000001; /* SVM Revision */
4027 *ebx = 0x00000010; /* nr of ASIDs */
4028 *ecx = 0;
4029 *edx = env->features[FEAT_SVM]; /* optional features */
4030 } else {
4031 *eax = 0;
4032 *ebx = 0;
4033 *ecx = 0;
4034 *edx = 0;
4036 break;
4037 case 0xC0000000:
4038 *eax = env->cpuid_xlevel2;
4039 *ebx = 0;
4040 *ecx = 0;
4041 *edx = 0;
4042 break;
4043 case 0xC0000001:
4044 /* Support for VIA CPU's CPUID instruction */
4045 *eax = env->cpuid_version;
4046 *ebx = 0;
4047 *ecx = 0;
4048 *edx = env->features[FEAT_C000_0001_EDX];
4049 break;
4050 case 0xC0000002:
4051 case 0xC0000003:
4052 case 0xC0000004:
4053 /* Reserved for the future, and now filled with zero */
4054 *eax = 0;
4055 *ebx = 0;
4056 *ecx = 0;
4057 *edx = 0;
4058 break;
4059 case 0x8000001F:
4060 *eax = sev_enabled() ? 0x2 : 0;
4061 *ebx = sev_get_cbit_position();
4062 *ebx |= sev_get_reduced_phys_bits() << 6;
4063 *ecx = 0;
4064 *edx = 0;
4065 break;
4066 default:
4067 /* reserved values: zero */
4068 *eax = 0;
4069 *ebx = 0;
4070 *ecx = 0;
4071 *edx = 0;
4072 break;
4076 /* CPUClass::reset() */
4077 static void x86_cpu_reset(CPUState *s)
4079 X86CPU *cpu = X86_CPU(s);
4080 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4081 CPUX86State *env = &cpu->env;
4082 target_ulong cr4;
4083 uint64_t xcr0;
4084 int i;
4086 xcc->parent_reset(s);
4088 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4090 env->old_exception = -1;
4092 /* init to reset state */
4094 env->hflags2 |= HF2_GIF_MASK;
4096 cpu_x86_update_cr0(env, 0x60000010);
4097 env->a20_mask = ~0x0;
4098 env->smbase = 0x30000;
4099 env->msr_smi_count = 0;
4101 env->idt.limit = 0xffff;
4102 env->gdt.limit = 0xffff;
4103 env->ldt.limit = 0xffff;
4104 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4105 env->tr.limit = 0xffff;
4106 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4108 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4109 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4110 DESC_R_MASK | DESC_A_MASK);
4111 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4112 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4113 DESC_A_MASK);
4114 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4115 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4116 DESC_A_MASK);
4117 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4118 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4119 DESC_A_MASK);
4120 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4121 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4122 DESC_A_MASK);
4123 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4124 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4125 DESC_A_MASK);
4127 env->eip = 0xfff0;
4128 env->regs[R_EDX] = env->cpuid_version;
4130 env->eflags = 0x2;
4132 /* FPU init */
4133 for (i = 0; i < 8; i++) {
4134 env->fptags[i] = 1;
4136 cpu_set_fpuc(env, 0x37f);
4138 env->mxcsr = 0x1f80;
4139 /* All units are in INIT state. */
4140 env->xstate_bv = 0;
4142 env->pat = 0x0007040600070406ULL;
4143 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4145 memset(env->dr, 0, sizeof(env->dr));
4146 env->dr[6] = DR6_FIXED_1;
4147 env->dr[7] = DR7_FIXED_1;
4148 cpu_breakpoint_remove_all(s, BP_CPU);
4149 cpu_watchpoint_remove_all(s, BP_CPU);
4151 cr4 = 0;
4152 xcr0 = XSTATE_FP_MASK;
4154 #ifdef CONFIG_USER_ONLY
4155 /* Enable all the features for user-mode. */
4156 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4157 xcr0 |= XSTATE_SSE_MASK;
4159 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4160 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4161 if (env->features[esa->feature] & esa->bits) {
4162 xcr0 |= 1ull << i;
4166 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4167 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4169 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4170 cr4 |= CR4_FSGSBASE_MASK;
4172 #endif
4174 env->xcr0 = xcr0;
4175 cpu_x86_update_cr4(env, cr4);
4178 * SDM 11.11.5 requires:
4179 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4180 * - IA32_MTRR_PHYSMASKn.V = 0
4181 * All other bits are undefined. For simplification, zero it all.
4183 env->mtrr_deftype = 0;
4184 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4185 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4187 env->interrupt_injected = -1;
4188 env->exception_injected = -1;
4189 env->nmi_injected = false;
4190 #if !defined(CONFIG_USER_ONLY)
4191 /* We hard-wire the BSP to the first CPU. */
4192 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4194 s->halted = !cpu_is_bsp(cpu);
4196 if (kvm_enabled()) {
4197 kvm_arch_reset_vcpu(cpu);
4199 else if (hvf_enabled()) {
4200 hvf_reset_vcpu(s);
4202 #endif
4205 #ifndef CONFIG_USER_ONLY
4206 bool cpu_is_bsp(X86CPU *cpu)
4208 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4211 /* TODO: remove me, when reset over QOM tree is implemented */
4212 static void x86_cpu_machine_reset_cb(void *opaque)
4214 X86CPU *cpu = opaque;
4215 cpu_reset(CPU(cpu));
4217 #endif
4219 static void mce_init(X86CPU *cpu)
4221 CPUX86State *cenv = &cpu->env;
4222 unsigned int bank;
4224 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4225 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4226 (CPUID_MCE | CPUID_MCA)) {
4227 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4228 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4229 cenv->mcg_ctl = ~(uint64_t)0;
4230 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4231 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4236 #ifndef CONFIG_USER_ONLY
4237 APICCommonClass *apic_get_class(void)
4239 const char *apic_type = "apic";
4241 /* TODO: in-kernel irqchip for hvf */
4242 if (kvm_apic_in_kernel()) {
4243 apic_type = "kvm-apic";
4244 } else if (xen_enabled()) {
4245 apic_type = "xen-apic";
4248 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4251 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4253 APICCommonState *apic;
4254 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4256 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4258 object_property_add_child(OBJECT(cpu), "lapic",
4259 OBJECT(cpu->apic_state), &error_abort);
4260 object_unref(OBJECT(cpu->apic_state));
4262 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4263 /* TODO: convert to link<> */
4264 apic = APIC_COMMON(cpu->apic_state);
4265 apic->cpu = cpu;
4266 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4269 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4271 APICCommonState *apic;
4272 static bool apic_mmio_map_once;
4274 if (cpu->apic_state == NULL) {
4275 return;
4277 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4278 errp);
4280 /* Map APIC MMIO area */
4281 apic = APIC_COMMON(cpu->apic_state);
4282 if (!apic_mmio_map_once) {
4283 memory_region_add_subregion_overlap(get_system_memory(),
4284 apic->apicbase &
4285 MSR_IA32_APICBASE_BASE,
4286 &apic->io_memory,
4287 0x1000);
4288 apic_mmio_map_once = true;
4292 static void x86_cpu_machine_done(Notifier *n, void *unused)
4294 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4295 MemoryRegion *smram =
4296 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4298 if (smram) {
4299 cpu->smram = g_new(MemoryRegion, 1);
4300 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4301 smram, 0, 1ull << 32);
4302 memory_region_set_enabled(cpu->smram, true);
4303 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4306 #else
4307 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4310 #endif
4312 /* Note: Only safe for use on x86(-64) hosts */
4313 static uint32_t x86_host_phys_bits(void)
4315 uint32_t eax;
4316 uint32_t host_phys_bits;
4318 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4319 if (eax >= 0x80000008) {
4320 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4321 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4322 * at 23:16 that can specify a maximum physical address bits for
4323 * the guest that can override this value; but I've not seen
4324 * anything with that set.
4326 host_phys_bits = eax & 0xff;
4327 } else {
4328 /* It's an odd 64 bit machine that doesn't have the leaf for
4329 * physical address bits; fall back to 36 that's most older
4330 * Intel.
4332 host_phys_bits = 36;
4335 return host_phys_bits;
4338 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4340 if (*min < value) {
4341 *min = value;
4345 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4346 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4348 CPUX86State *env = &cpu->env;
4349 FeatureWordInfo *fi = &feature_word_info[w];
4350 uint32_t eax = fi->cpuid_eax;
4351 uint32_t region = eax & 0xF0000000;
4353 if (!env->features[w]) {
4354 return;
4357 switch (region) {
4358 case 0x00000000:
4359 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4360 break;
4361 case 0x80000000:
4362 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4363 break;
4364 case 0xC0000000:
4365 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4366 break;
4370 /* Calculate XSAVE components based on the configured CPU feature flags */
4371 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4373 CPUX86State *env = &cpu->env;
4374 int i;
4375 uint64_t mask;
4377 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4378 return;
4381 mask = 0;
4382 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4383 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4384 if (env->features[esa->feature] & esa->bits) {
4385 mask |= (1ULL << i);
4389 env->features[FEAT_XSAVE_COMP_LO] = mask;
4390 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4393 /***** Steps involved on loading and filtering CPUID data
4395 * When initializing and realizing a CPU object, the steps
4396 * involved in setting up CPUID data are:
4398 * 1) Loading CPU model definition (X86CPUDefinition). This is
4399 * implemented by x86_cpu_load_def() and should be completely
4400 * transparent, as it is done automatically by instance_init.
4401 * No code should need to look at X86CPUDefinition structs
4402 * outside instance_init.
4404 * 2) CPU expansion. This is done by realize before CPUID
4405 * filtering, and will make sure host/accelerator data is
4406 * loaded for CPU models that depend on host capabilities
4407 * (e.g. "host"). Done by x86_cpu_expand_features().
4409 * 3) CPUID filtering. This initializes extra data related to
4410 * CPUID, and checks if the host supports all capabilities
4411 * required by the CPU. Runnability of a CPU model is
4412 * determined at this step. Done by x86_cpu_filter_features().
4414 * Some operations don't require all steps to be performed.
4415 * More precisely:
4417 * - CPU instance creation (instance_init) will run only CPU
4418 * model loading. CPU expansion can't run at instance_init-time
4419 * because host/accelerator data may be not available yet.
4420 * - CPU realization will perform both CPU model expansion and CPUID
4421 * filtering, and return an error in case one of them fails.
4422 * - query-cpu-definitions needs to run all 3 steps. It needs
4423 * to run CPUID filtering, as the 'unavailable-features'
4424 * field is set based on the filtering results.
4425 * - The query-cpu-model-expansion QMP command only needs to run
4426 * CPU model loading and CPU expansion. It should not filter
4427 * any CPUID data based on host capabilities.
4430 /* Expand CPU configuration data, based on configured features
4431 * and host/accelerator capabilities when appropriate.
4433 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4435 CPUX86State *env = &cpu->env;
4436 FeatureWord w;
4437 GList *l;
4438 Error *local_err = NULL;
4440 /*TODO: Now cpu->max_features doesn't overwrite features
4441 * set using QOM properties, and we can convert
4442 * plus_features & minus_features to global properties
4443 * inside x86_cpu_parse_featurestr() too.
4445 if (cpu->max_features) {
4446 for (w = 0; w < FEATURE_WORDS; w++) {
4447 /* Override only features that weren't set explicitly
4448 * by the user.
4450 env->features[w] |=
4451 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4452 ~env->user_features[w] & \
4453 ~feature_word_info[w].no_autoenable_flags;
4457 for (l = plus_features; l; l = l->next) {
4458 const char *prop = l->data;
4459 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4460 if (local_err) {
4461 goto out;
4465 for (l = minus_features; l; l = l->next) {
4466 const char *prop = l->data;
4467 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4468 if (local_err) {
4469 goto out;
4473 if (!kvm_enabled() || !cpu->expose_kvm) {
4474 env->features[FEAT_KVM] = 0;
4477 x86_cpu_enable_xsave_components(cpu);
4479 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4480 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4481 if (cpu->full_cpuid_auto_level) {
4482 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4483 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4484 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4485 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4486 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4487 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4488 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4489 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4490 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4491 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4492 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4493 /* SVM requires CPUID[0x8000000A] */
4494 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4495 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4498 /* SEV requires CPUID[0x8000001F] */
4499 if (sev_enabled()) {
4500 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4504 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4505 if (env->cpuid_level == UINT32_MAX) {
4506 env->cpuid_level = env->cpuid_min_level;
4508 if (env->cpuid_xlevel == UINT32_MAX) {
4509 env->cpuid_xlevel = env->cpuid_min_xlevel;
4511 if (env->cpuid_xlevel2 == UINT32_MAX) {
4512 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4515 out:
4516 if (local_err != NULL) {
4517 error_propagate(errp, local_err);
4522 * Finishes initialization of CPUID data, filters CPU feature
4523 * words based on host availability of each feature.
4525 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4527 static int x86_cpu_filter_features(X86CPU *cpu)
4529 CPUX86State *env = &cpu->env;
4530 FeatureWord w;
4531 int rv = 0;
4533 for (w = 0; w < FEATURE_WORDS; w++) {
4534 uint32_t host_feat =
4535 x86_cpu_get_supported_feature_word(w, false);
4536 uint32_t requested_features = env->features[w];
4537 env->features[w] &= host_feat;
4538 cpu->filtered_features[w] = requested_features & ~env->features[w];
4539 if (cpu->filtered_features[w]) {
4540 rv = 1;
4544 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4545 kvm_enabled()) {
4546 KVMState *s = CPU(cpu)->kvm_state;
4547 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4548 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4549 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4550 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4551 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4553 if (!eax_0 ||
4554 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4555 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4556 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4557 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4558 INTEL_PT_ADDR_RANGES_NUM) ||
4559 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4560 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4561 (ecx_0 & INTEL_PT_IP_LIP)) {
4563 * Processor Trace capabilities aren't configurable, so if the
4564 * host can't emulate the capabilities we report on
4565 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4567 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4568 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4569 rv = 1;
4573 return rv;
4576 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4577 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4578 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4579 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4580 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4581 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4582 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4584 CPUState *cs = CPU(dev);
4585 X86CPU *cpu = X86_CPU(dev);
4586 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4587 CPUX86State *env = &cpu->env;
4588 Error *local_err = NULL;
4589 static bool ht_warned;
4591 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4592 char *name = x86_cpu_class_get_model_name(xcc);
4593 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4594 g_free(name);
4595 goto out;
4598 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4599 error_setg(errp, "apic-id property was not initialized properly");
4600 return;
4603 x86_cpu_expand_features(cpu, &local_err);
4604 if (local_err) {
4605 goto out;
4608 if (x86_cpu_filter_features(cpu) &&
4609 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4610 x86_cpu_report_filtered_features(cpu);
4611 if (cpu->enforce_cpuid) {
4612 error_setg(&local_err,
4613 accel_uses_host_cpuid() ?
4614 "Host doesn't support requested features" :
4615 "TCG doesn't support requested features");
4616 goto out;
4620 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4621 * CPUID[1].EDX.
4623 if (IS_AMD_CPU(env)) {
4624 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4625 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4626 & CPUID_EXT2_AMD_ALIASES);
4629 /* For 64bit systems think about the number of physical bits to present.
4630 * ideally this should be the same as the host; anything other than matching
4631 * the host can cause incorrect guest behaviour.
4632 * QEMU used to pick the magic value of 40 bits that corresponds to
4633 * consumer AMD devices but nothing else.
4635 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4636 if (accel_uses_host_cpuid()) {
4637 uint32_t host_phys_bits = x86_host_phys_bits();
4638 static bool warned;
4640 if (cpu->host_phys_bits) {
4641 /* The user asked for us to use the host physical bits */
4642 cpu->phys_bits = host_phys_bits;
4645 /* Print a warning if the user set it to a value that's not the
4646 * host value.
4648 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4649 !warned) {
4650 warn_report("Host physical bits (%u)"
4651 " does not match phys-bits property (%u)",
4652 host_phys_bits, cpu->phys_bits);
4653 warned = true;
4656 if (cpu->phys_bits &&
4657 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4658 cpu->phys_bits < 32)) {
4659 error_setg(errp, "phys-bits should be between 32 and %u "
4660 " (but is %u)",
4661 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4662 return;
4664 } else {
4665 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4666 error_setg(errp, "TCG only supports phys-bits=%u",
4667 TCG_PHYS_ADDR_BITS);
4668 return;
4671 /* 0 means it was not explicitly set by the user (or by machine
4672 * compat_props or by the host code above). In this case, the default
4673 * is the value used by TCG (40).
4675 if (cpu->phys_bits == 0) {
4676 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4678 } else {
4679 /* For 32 bit systems don't use the user set value, but keep
4680 * phys_bits consistent with what we tell the guest.
4682 if (cpu->phys_bits != 0) {
4683 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4684 return;
4687 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4688 cpu->phys_bits = 36;
4689 } else {
4690 cpu->phys_bits = 32;
4693 cpu_exec_realizefn(cs, &local_err);
4694 if (local_err != NULL) {
4695 error_propagate(errp, local_err);
4696 return;
4699 #ifndef CONFIG_USER_ONLY
4700 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4702 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4703 x86_cpu_apic_create(cpu, &local_err);
4704 if (local_err != NULL) {
4705 goto out;
4708 #endif
4710 mce_init(cpu);
4712 #ifndef CONFIG_USER_ONLY
4713 if (tcg_enabled()) {
4714 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4715 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4717 /* Outer container... */
4718 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4719 memory_region_set_enabled(cpu->cpu_as_root, true);
4721 /* ... with two regions inside: normal system memory with low
4722 * priority, and...
4724 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4725 get_system_memory(), 0, ~0ull);
4726 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4727 memory_region_set_enabled(cpu->cpu_as_mem, true);
4729 cs->num_ases = 2;
4730 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4731 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4733 /* ... SMRAM with higher priority, linked from /machine/smram. */
4734 cpu->machine_done.notify = x86_cpu_machine_done;
4735 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4737 #endif
4739 qemu_init_vcpu(cs);
4741 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4742 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4743 * based on inputs (sockets,cores,threads), it is still better to gives
4744 * users a warning.
4746 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4747 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4749 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4750 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4751 " -smp options properly.");
4752 ht_warned = true;
4755 x86_cpu_apic_realize(cpu, &local_err);
4756 if (local_err != NULL) {
4757 goto out;
4759 cpu_reset(cs);
4761 xcc->parent_realize(dev, &local_err);
4763 out:
4764 if (local_err != NULL) {
4765 error_propagate(errp, local_err);
4766 return;
4770 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4772 X86CPU *cpu = X86_CPU(dev);
4773 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4774 Error *local_err = NULL;
4776 #ifndef CONFIG_USER_ONLY
4777 cpu_remove_sync(CPU(dev));
4778 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4779 #endif
4781 if (cpu->apic_state) {
4782 object_unparent(OBJECT(cpu->apic_state));
4783 cpu->apic_state = NULL;
4786 xcc->parent_unrealize(dev, &local_err);
4787 if (local_err != NULL) {
4788 error_propagate(errp, local_err);
4789 return;
4793 typedef struct BitProperty {
4794 FeatureWord w;
4795 uint32_t mask;
4796 } BitProperty;
4798 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4799 void *opaque, Error **errp)
4801 X86CPU *cpu = X86_CPU(obj);
4802 BitProperty *fp = opaque;
4803 uint32_t f = cpu->env.features[fp->w];
4804 bool value = (f & fp->mask) == fp->mask;
4805 visit_type_bool(v, name, &value, errp);
4808 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4809 void *opaque, Error **errp)
4811 DeviceState *dev = DEVICE(obj);
4812 X86CPU *cpu = X86_CPU(obj);
4813 BitProperty *fp = opaque;
4814 Error *local_err = NULL;
4815 bool value;
4817 if (dev->realized) {
4818 qdev_prop_set_after_realize(dev, name, errp);
4819 return;
4822 visit_type_bool(v, name, &value, &local_err);
4823 if (local_err) {
4824 error_propagate(errp, local_err);
4825 return;
4828 if (value) {
4829 cpu->env.features[fp->w] |= fp->mask;
4830 } else {
4831 cpu->env.features[fp->w] &= ~fp->mask;
4833 cpu->env.user_features[fp->w] |= fp->mask;
4836 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4837 void *opaque)
4839 BitProperty *prop = opaque;
4840 g_free(prop);
4843 /* Register a boolean property to get/set a single bit in a uint32_t field.
4845 * The same property name can be registered multiple times to make it affect
4846 * multiple bits in the same FeatureWord. In that case, the getter will return
4847 * true only if all bits are set.
4849 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4850 const char *prop_name,
4851 FeatureWord w,
4852 int bitnr)
4854 BitProperty *fp;
4855 ObjectProperty *op;
4856 uint32_t mask = (1UL << bitnr);
4858 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4859 if (op) {
4860 fp = op->opaque;
4861 assert(fp->w == w);
4862 fp->mask |= mask;
4863 } else {
4864 fp = g_new0(BitProperty, 1);
4865 fp->w = w;
4866 fp->mask = mask;
4867 object_property_add(OBJECT(cpu), prop_name, "bool",
4868 x86_cpu_get_bit_prop,
4869 x86_cpu_set_bit_prop,
4870 x86_cpu_release_bit_prop, fp, &error_abort);
4874 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4875 FeatureWord w,
4876 int bitnr)
4878 FeatureWordInfo *fi = &feature_word_info[w];
4879 const char *name = fi->feat_names[bitnr];
4881 if (!name) {
4882 return;
4885 /* Property names should use "-" instead of "_".
4886 * Old names containing underscores are registered as aliases
4887 * using object_property_add_alias()
4889 assert(!strchr(name, '_'));
4890 /* aliases don't use "|" delimiters anymore, they are registered
4891 * manually using object_property_add_alias() */
4892 assert(!strchr(name, '|'));
4893 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
4896 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
4898 X86CPU *cpu = X86_CPU(cs);
4899 CPUX86State *env = &cpu->env;
4900 GuestPanicInformation *panic_info = NULL;
4902 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
4903 panic_info = g_malloc0(sizeof(GuestPanicInformation));
4905 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
4907 assert(HV_CRASH_PARAMS >= 5);
4908 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
4909 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
4910 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
4911 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
4912 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
4915 return panic_info;
4917 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
4918 const char *name, void *opaque,
4919 Error **errp)
4921 CPUState *cs = CPU(obj);
4922 GuestPanicInformation *panic_info;
4924 if (!cs->crash_occurred) {
4925 error_setg(errp, "No crash occured");
4926 return;
4929 panic_info = x86_cpu_get_crash_info(cs);
4930 if (panic_info == NULL) {
4931 error_setg(errp, "No crash information");
4932 return;
4935 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4936 errp);
4937 qapi_free_GuestPanicInformation(panic_info);
4940 static void x86_cpu_initfn(Object *obj)
4942 CPUState *cs = CPU(obj);
4943 X86CPU *cpu = X86_CPU(obj);
4944 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4945 CPUX86State *env = &cpu->env;
4946 FeatureWord w;
4948 cs->env_ptr = env;
4950 object_property_add(obj, "family", "int",
4951 x86_cpuid_version_get_family,
4952 x86_cpuid_version_set_family, NULL, NULL, NULL);
4953 object_property_add(obj, "model", "int",
4954 x86_cpuid_version_get_model,
4955 x86_cpuid_version_set_model, NULL, NULL, NULL);
4956 object_property_add(obj, "stepping", "int",
4957 x86_cpuid_version_get_stepping,
4958 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4959 object_property_add_str(obj, "vendor",
4960 x86_cpuid_get_vendor,
4961 x86_cpuid_set_vendor, NULL);
4962 object_property_add_str(obj, "model-id",
4963 x86_cpuid_get_model_id,
4964 x86_cpuid_set_model_id, NULL);
4965 object_property_add(obj, "tsc-frequency", "int",
4966 x86_cpuid_get_tsc_freq,
4967 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4968 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4969 x86_cpu_get_feature_words,
4970 NULL, NULL, (void *)env->features, NULL);
4971 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4972 x86_cpu_get_feature_words,
4973 NULL, NULL, (void *)cpu->filtered_features, NULL);
4975 object_property_add(obj, "crash-information", "GuestPanicInformation",
4976 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4978 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4980 for (w = 0; w < FEATURE_WORDS; w++) {
4981 int bitnr;
4983 for (bitnr = 0; bitnr < 32; bitnr++) {
4984 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4988 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4989 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4990 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4991 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4992 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4993 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4994 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4996 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4997 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4998 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4999 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5000 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5001 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5002 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5003 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5004 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5005 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5006 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5007 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5008 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5009 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5010 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5011 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5012 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5013 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5014 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5015 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5016 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5018 if (xcc->cpu_def) {
5019 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5023 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5025 X86CPU *cpu = X86_CPU(cs);
5027 return cpu->apic_id;
5030 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5032 X86CPU *cpu = X86_CPU(cs);
5034 return cpu->env.cr[0] & CR0_PG_MASK;
5037 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5039 X86CPU *cpu = X86_CPU(cs);
5041 cpu->env.eip = value;
5044 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5046 X86CPU *cpu = X86_CPU(cs);
5048 cpu->env.eip = tb->pc - tb->cs_base;
5051 static bool x86_cpu_has_work(CPUState *cs)
5053 X86CPU *cpu = X86_CPU(cs);
5054 CPUX86State *env = &cpu->env;
5056 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
5057 CPU_INTERRUPT_POLL)) &&
5058 (env->eflags & IF_MASK)) ||
5059 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
5060 CPU_INTERRUPT_INIT |
5061 CPU_INTERRUPT_SIPI |
5062 CPU_INTERRUPT_MCE)) ||
5063 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
5064 !(env->hflags & HF_SMM_MASK));
5067 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5069 X86CPU *cpu = X86_CPU(cs);
5070 CPUX86State *env = &cpu->env;
5072 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5073 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5074 : bfd_mach_i386_i8086);
5075 info->print_insn = print_insn_i386;
5077 info->cap_arch = CS_ARCH_X86;
5078 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5079 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5080 : CS_MODE_16);
5081 info->cap_insn_unit = 1;
5082 info->cap_insn_split = 8;
5085 void x86_update_hflags(CPUX86State *env)
5087 uint32_t hflags;
5088 #define HFLAG_COPY_MASK \
5089 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5090 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5091 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5092 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5094 hflags = env->hflags & HFLAG_COPY_MASK;
5095 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5096 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5097 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5098 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5099 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5101 if (env->cr[4] & CR4_OSFXSR_MASK) {
5102 hflags |= HF_OSFXSR_MASK;
5105 if (env->efer & MSR_EFER_LMA) {
5106 hflags |= HF_LMA_MASK;
5109 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5110 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5111 } else {
5112 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5113 (DESC_B_SHIFT - HF_CS32_SHIFT);
5114 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5115 (DESC_B_SHIFT - HF_SS32_SHIFT);
5116 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5117 !(hflags & HF_CS32_MASK)) {
5118 hflags |= HF_ADDSEG_MASK;
5119 } else {
5120 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5121 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5124 env->hflags = hflags;
5127 static Property x86_cpu_properties[] = {
5128 #ifdef CONFIG_USER_ONLY
5129 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5130 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5131 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5132 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5133 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5134 #else
5135 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5136 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5137 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5138 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5139 #endif
5140 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5141 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5142 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5143 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5144 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5145 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5146 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5147 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5148 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5149 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5150 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5151 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5152 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5153 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5154 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5155 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5156 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5157 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5158 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5159 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5160 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5161 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5162 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5163 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5164 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5165 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5166 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5167 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5168 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5169 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5170 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5171 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5172 false),
5173 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5174 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5176 * lecacy_cache defaults to CPU model being chosen. This is set in
5177 * x86_cpu_load_def based on cache_info which is initialized in
5178 * builtin_x86_defs
5180 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, false),
5183 * From "Requirements for Implementing the Microsoft
5184 * Hypervisor Interface":
5185 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5187 * "Starting with Windows Server 2012 and Windows 8, if
5188 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5189 * the hypervisor imposes no specific limit to the number of VPs.
5190 * In this case, Windows Server 2012 guest VMs may use more than
5191 * 64 VPs, up to the maximum supported number of processors applicable
5192 * to the specific Windows version being used."
5194 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5195 DEFINE_PROP_END_OF_LIST()
5198 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5200 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5201 CPUClass *cc = CPU_CLASS(oc);
5202 DeviceClass *dc = DEVICE_CLASS(oc);
5204 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5205 &xcc->parent_realize);
5206 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5207 &xcc->parent_unrealize);
5208 dc->props = x86_cpu_properties;
5210 xcc->parent_reset = cc->reset;
5211 cc->reset = x86_cpu_reset;
5212 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5214 cc->class_by_name = x86_cpu_class_by_name;
5215 cc->parse_features = x86_cpu_parse_featurestr;
5216 cc->has_work = x86_cpu_has_work;
5217 #ifdef CONFIG_TCG
5218 cc->do_interrupt = x86_cpu_do_interrupt;
5219 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5220 #endif
5221 cc->dump_state = x86_cpu_dump_state;
5222 cc->get_crash_info = x86_cpu_get_crash_info;
5223 cc->set_pc = x86_cpu_set_pc;
5224 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5225 cc->gdb_read_register = x86_cpu_gdb_read_register;
5226 cc->gdb_write_register = x86_cpu_gdb_write_register;
5227 cc->get_arch_id = x86_cpu_get_arch_id;
5228 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5229 #ifdef CONFIG_USER_ONLY
5230 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5231 #else
5232 cc->asidx_from_attrs = x86_asidx_from_attrs;
5233 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5234 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5235 cc->write_elf64_note = x86_cpu_write_elf64_note;
5236 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5237 cc->write_elf32_note = x86_cpu_write_elf32_note;
5238 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5239 cc->vmsd = &vmstate_x86_cpu;
5240 #endif
5241 cc->gdb_arch_name = x86_gdb_arch_name;
5242 #ifdef TARGET_X86_64
5243 cc->gdb_core_xml_file = "i386-64bit.xml";
5244 cc->gdb_num_core_regs = 57;
5245 #else
5246 cc->gdb_core_xml_file = "i386-32bit.xml";
5247 cc->gdb_num_core_regs = 41;
5248 #endif
5249 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5250 cc->debug_excp_handler = breakpoint_handler;
5251 #endif
5252 cc->cpu_exec_enter = x86_cpu_exec_enter;
5253 cc->cpu_exec_exit = x86_cpu_exec_exit;
5254 #ifdef CONFIG_TCG
5255 cc->tcg_initialize = tcg_x86_init;
5256 #endif
5257 cc->disas_set_info = x86_disas_set_info;
5259 dc->user_creatable = true;
5262 static const TypeInfo x86_cpu_type_info = {
5263 .name = TYPE_X86_CPU,
5264 .parent = TYPE_CPU,
5265 .instance_size = sizeof(X86CPU),
5266 .instance_init = x86_cpu_initfn,
5267 .abstract = true,
5268 .class_size = sizeof(X86CPUClass),
5269 .class_init = x86_cpu_common_class_init,
5273 /* "base" CPU model, used by query-cpu-model-expansion */
5274 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5276 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5278 xcc->static_model = true;
5279 xcc->migration_safe = true;
5280 xcc->model_description = "base CPU model type with no features enabled";
5281 xcc->ordering = 8;
5284 static const TypeInfo x86_base_cpu_type_info = {
5285 .name = X86_CPU_TYPE_NAME("base"),
5286 .parent = TYPE_X86_CPU,
5287 .class_init = x86_cpu_base_class_init,
5290 static void x86_cpu_register_types(void)
5292 int i;
5294 type_register_static(&x86_cpu_type_info);
5295 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5296 x86_register_cpudef_type(&builtin_x86_defs[i]);
5298 type_register_static(&max_x86_cpu_type_info);
5299 type_register_static(&x86_base_cpu_type_info);
5300 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5301 type_register_static(&host_x86_cpu_type_info);
5302 #endif
5305 type_init(x86_cpu_register_types)