target/i386: Add the 'model-id' for Skylake -v3 CPU models
[qemu/ar7.git] / target / i386 / cpu.c
blob32efa468526023ae20ca34fbacef4d9c2bf8a041
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/reset.h"
30 #include "sysemu/hvf.h"
31 #include "sysemu/cpus.h"
32 #include "kvm_i386.h"
33 #include "sev_i386.h"
35 #include "qemu/error-report.h"
36 #include "qemu/module.h"
37 #include "qemu/option.h"
38 #include "qemu/config-file.h"
39 #include "qapi/error.h"
40 #include "qapi/qapi-visit-machine.h"
41 #include "qapi/qapi-visit-run-state.h"
42 #include "qapi/qmp/qdict.h"
43 #include "qapi/qmp/qerror.h"
44 #include "qapi/visitor.h"
45 #include "qom/qom-qobject.h"
46 #include "sysemu/arch_init.h"
47 #include "qapi/qapi-commands-machine-target.h"
49 #include "standard-headers/asm-x86/kvm_para.h"
51 #include "sysemu/sysemu.h"
52 #include "sysemu/tcg.h"
53 #include "hw/qdev-properties.h"
54 #include "hw/i386/topology.h"
55 #ifndef CONFIG_USER_ONLY
56 #include "exec/address-spaces.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
62 #include "disas/capstone.h"
64 /* Helpers for building CPUID[2] descriptors: */
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
211 int i;
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
229 /* CPUID Leaf 4 constants: */
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
236 #define CACHE_LEVEL(l) (l << 5)
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
296 #define ASSOC_FULL 0xFF
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
344 * Define the constants to build the cpu topology. Right now, TOPOEXT
345 * feature is enabled only on EPYC. So, these constants are based on
346 * EPYC supported configurations. We may need to handle the cases if
347 * these values change in future.
349 /* Maximum core complexes in a node */
350 #define MAX_CCX 2
351 /* Maximum cores in a core complex */
352 #define MAX_CORES_IN_CCX 4
353 /* Maximum cores in a node */
354 #define MAX_CORES_IN_NODE 8
355 /* Maximum nodes in a socket */
356 #define MAX_NODES_PER_SOCKET 4
359 * Figure out the number of nodes required to build this config.
360 * Max cores in a node is 8
362 static int nodes_in_socket(int nr_cores)
364 int nodes;
366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
368 /* Hardware does not support config with 3 nodes, return 4 in that case */
369 return (nodes == 3) ? 4 : nodes;
373 * Decide the number of cores in a core complex with the given nr_cores using
374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
376 * L3 cache is shared across all cores in a core complex. So, this will also
377 * tell us how many cores are sharing the L3 cache.
379 static int cores_in_core_complex(int nr_cores)
381 int nodes;
383 /* Check if we can fit all the cores in one core complex */
384 if (nr_cores <= MAX_CORES_IN_CCX) {
385 return nr_cores;
387 /* Get the number of nodes required to build this config */
388 nodes = nodes_in_socket(nr_cores);
391 * Divide the cores accros all the core complexes
392 * Return rounded up value
394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
397 /* Encode cache info for CPUID[8000001D] */
398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
399 uint32_t *eax, uint32_t *ebx,
400 uint32_t *ecx, uint32_t *edx)
402 uint32_t l3_cores;
403 assert(cache->size == cache->line_size * cache->associativity *
404 cache->partitions * cache->sets);
406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
409 /* L3 is shared among multiple cores */
410 if (cache->level == 3) {
411 l3_cores = cores_in_core_complex(cs->nr_cores);
412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
413 } else {
414 *eax |= ((cs->nr_threads - 1) << 14);
417 assert(cache->line_size > 0);
418 assert(cache->partitions > 0);
419 assert(cache->associativity > 0);
420 /* We don't implement fully-associative caches */
421 assert(cache->associativity < cache->sets);
422 *ebx = (cache->line_size - 1) |
423 ((cache->partitions - 1) << 12) |
424 ((cache->associativity - 1) << 22);
426 assert(cache->sets > 0);
427 *ecx = cache->sets - 1;
429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
430 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
434 /* Data structure to hold the configuration info for a given core index */
435 struct core_topology {
436 /* core complex id of the current core index */
437 int ccx_id;
439 * Adjusted core index for this core in the topology
440 * This can be 0,1,2,3 with max 4 cores in a core complex
442 int core_id;
443 /* Node id for this core index */
444 int node_id;
445 /* Number of nodes in this config */
446 int num_nodes;
450 * Build the configuration closely match the EPYC hardware. Using the EPYC
451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
452 * right now. This could change in future.
453 * nr_cores : Total number of cores in the config
454 * core_id : Core index of the current CPU
455 * topo : Data structure to hold all the config info for this core index
457 static void build_core_topology(int nr_cores, int core_id,
458 struct core_topology *topo)
460 int nodes, cores_in_ccx;
462 /* First get the number of nodes required */
463 nodes = nodes_in_socket(nr_cores);
465 cores_in_ccx = cores_in_core_complex(nr_cores);
467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
469 topo->core_id = core_id % cores_in_ccx;
470 topo->num_nodes = nodes;
473 /* Encode cache info for CPUID[8000001E] */
474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
475 uint32_t *eax, uint32_t *ebx,
476 uint32_t *ecx, uint32_t *edx)
478 struct core_topology topo = {0};
479 unsigned long nodes;
480 int shift;
482 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
483 *eax = cpu->apic_id;
485 * CPUID_Fn8000001E_EBX
486 * 31:16 Reserved
487 * 15:8 Threads per core (The number of threads per core is
488 * Threads per core + 1)
489 * 7:0 Core id (see bit decoding below)
490 * SMT:
491 * 4:3 node id
492 * 2 Core complex id
493 * 1:0 Core id
494 * Non SMT:
495 * 5:4 node id
496 * 3 Core complex id
497 * 1:0 Core id
499 if (cs->nr_threads - 1) {
500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
501 (topo.ccx_id << 2) | topo.core_id;
502 } else {
503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
506 * CPUID_Fn8000001E_ECX
507 * 31:11 Reserved
508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
509 * 7:0 Node id (see bit decoding below)
510 * 2 Socket id
511 * 1:0 Node id
513 if (topo.num_nodes <= 4) {
514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
515 topo.node_id;
516 } else {
518 * Node id fix up. Actual hardware supports up to 4 nodes. But with
519 * more than 32 cores, we may end up with more than 4 nodes.
520 * Node id is a combination of socket id and node id. Only requirement
521 * here is that this number should be unique accross the system.
522 * Shift the socket id to accommodate more nodes. We dont expect both
523 * socket id and node id to be big number at the same time. This is not
524 * an ideal config but we need to to support it. Max nodes we can have
525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
526 * 5 bits for nodes. Find the left most set bit to represent the total
527 * number of nodes. find_last_bit returns last set bit(0 based). Left
528 * shift(+1) the socket id to represent all the nodes.
530 nodes = topo.num_nodes - 1;
531 shift = find_last_bit(&nodes, 8);
532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
533 topo.node_id;
535 *edx = 0;
539 * Definitions of the hardcoded cache entries we expose:
540 * These are legacy cache values. If there is a need to change any
541 * of these values please use builtin_x86_defs
544 /* L1 data cache: */
545 static CPUCacheInfo legacy_l1d_cache = {
546 .type = DATA_CACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1d_cache_amd = {
559 .type = DATA_CACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
571 /* L1 instruction cache: */
572 static CPUCacheInfo legacy_l1i_cache = {
573 .type = INSTRUCTION_CACHE,
574 .level = 1,
575 .size = 32 * KiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 8,
579 .sets = 64,
580 .partitions = 1,
581 .no_invd_sharing = true,
584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
585 static CPUCacheInfo legacy_l1i_cache_amd = {
586 .type = INSTRUCTION_CACHE,
587 .level = 1,
588 .size = 64 * KiB,
589 .self_init = 1,
590 .line_size = 64,
591 .associativity = 2,
592 .sets = 512,
593 .partitions = 1,
594 .lines_per_tag = 1,
595 .no_invd_sharing = true,
598 /* Level 2 unified cache: */
599 static CPUCacheInfo legacy_l2_cache = {
600 .type = UNIFIED_CACHE,
601 .level = 2,
602 .size = 4 * MiB,
603 .self_init = 1,
604 .line_size = 64,
605 .associativity = 16,
606 .sets = 4096,
607 .partitions = 1,
608 .no_invd_sharing = true,
611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
612 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
613 .type = UNIFIED_CACHE,
614 .level = 2,
615 .size = 2 * MiB,
616 .line_size = 64,
617 .associativity = 8,
621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
622 static CPUCacheInfo legacy_l2_cache_amd = {
623 .type = UNIFIED_CACHE,
624 .level = 2,
625 .size = 512 * KiB,
626 .line_size = 64,
627 .lines_per_tag = 1,
628 .associativity = 16,
629 .sets = 512,
630 .partitions = 1,
633 /* Level 3 unified cache: */
634 static CPUCacheInfo legacy_l3_cache = {
635 .type = UNIFIED_CACHE,
636 .level = 3,
637 .size = 16 * MiB,
638 .line_size = 64,
639 .associativity = 16,
640 .sets = 16384,
641 .partitions = 1,
642 .lines_per_tag = 1,
643 .self_init = true,
644 .inclusive = true,
645 .complex_indexing = true,
648 /* TLB definitions: */
650 #define L1_DTLB_2M_ASSOC 1
651 #define L1_DTLB_2M_ENTRIES 255
652 #define L1_DTLB_4K_ASSOC 1
653 #define L1_DTLB_4K_ENTRIES 255
655 #define L1_ITLB_2M_ASSOC 1
656 #define L1_ITLB_2M_ENTRIES 255
657 #define L1_ITLB_4K_ASSOC 1
658 #define L1_ITLB_4K_ENTRIES 255
660 #define L2_DTLB_2M_ASSOC 0 /* disabled */
661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
662 #define L2_DTLB_4K_ASSOC 4
663 #define L2_DTLB_4K_ENTRIES 512
665 #define L2_ITLB_2M_ASSOC 0 /* disabled */
666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
667 #define L2_ITLB_4K_ASSOC 4
668 #define L2_ITLB_4K_ENTRIES 512
670 /* CPUID Leaf 0x14 constants: */
671 #define INTEL_PT_MAX_SUBLEAF 0x1
673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
674 * MSR can be accessed;
675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
677 * of Intel PT MSRs across warm reset;
678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
680 #define INTEL_PT_MINIMAL_EBX 0xf
682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
684 * accessed;
685 * bit[01]: ToPA tables can hold any number of output entries, up to the
686 * maximum allowed by the MaskOrTableOffset field of
687 * IA32_RTIT_OUTPUT_MASK_PTRS;
688 * bit[02]: Support Single-Range Output scheme;
690 #define INTEL_PT_MINIMAL_ECX 0x7
691 /* generated packets which contain IP payloads have LIP values */
692 #define INTEL_PT_IP_LIP (1 << 31)
693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
700 uint32_t vendor2, uint32_t vendor3)
702 int i;
703 for (i = 0; i < 4; i++) {
704 dst[i] = vendor1 >> (8 * i);
705 dst[i + 4] = vendor2 >> (8 * i);
706 dst[i + 8] = vendor3 >> (8 * i);
708 dst[CPUID_VENDOR_SZ] = '\0';
711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
716 CPUID_PSE36 | CPUID_FXSR)
717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
721 CPUID_PAE | CPUID_SEP | CPUID_APIC)
723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
728 /* partly implemented:
729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
730 /* missing:
731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
737 CPUID_EXT_RDRAND)
738 /* missing:
739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
743 CPUID_EXT_F16C */
745 #ifdef TARGET_X86_64
746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
747 #else
748 #define TCG_EXT2_X86_64_FEATURES 0
749 #endif
751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
754 TCG_EXT2_X86_64_FEATURES)
755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
757 #define TCG_EXT4_FEATURES 0
758 #define TCG_SVM_FEATURES CPUID_SVM_NPT
759 #define TCG_KVM_FEATURES 0
760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
764 CPUID_7_0_EBX_ERMS)
765 /* missing:
766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
768 CPUID_7_0_EBX_RDSEED */
769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
771 CPUID_7_0_ECX_LA57)
772 #define TCG_7_0_EDX_FEATURES 0
773 #define TCG_7_1_EAX_FEATURES 0
774 #define TCG_APM_FEATURES 0
775 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
776 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
777 /* missing:
778 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
780 typedef enum FeatureWordType {
781 CPUID_FEATURE_WORD,
782 MSR_FEATURE_WORD,
783 } FeatureWordType;
785 typedef struct FeatureWordInfo {
786 FeatureWordType type;
787 /* feature flags names are taken from "Intel Processor Identification and
788 * the CPUID Instruction" and AMD's "CPUID Specification".
789 * In cases of disagreement between feature naming conventions,
790 * aliases may be added.
792 const char *feat_names[64];
793 union {
794 /* If type==CPUID_FEATURE_WORD */
795 struct {
796 uint32_t eax; /* Input EAX for CPUID */
797 bool needs_ecx; /* CPUID instruction uses ECX as input */
798 uint32_t ecx; /* Input ECX value for CPUID */
799 int reg; /* output register (R_* constant) */
800 } cpuid;
801 /* If type==MSR_FEATURE_WORD */
802 struct {
803 uint32_t index;
804 } msr;
806 uint64_t tcg_features; /* Feature flags supported by TCG */
807 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */
808 uint64_t migratable_flags; /* Feature flags known to be migratable */
809 /* Features that shouldn't be auto-enabled by "-cpu host" */
810 uint64_t no_autoenable_flags;
811 } FeatureWordInfo;
813 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
814 [FEAT_1_EDX] = {
815 .type = CPUID_FEATURE_WORD,
816 .feat_names = {
817 "fpu", "vme", "de", "pse",
818 "tsc", "msr", "pae", "mce",
819 "cx8", "apic", NULL, "sep",
820 "mtrr", "pge", "mca", "cmov",
821 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
822 NULL, "ds" /* Intel dts */, "acpi", "mmx",
823 "fxsr", "sse", "sse2", "ss",
824 "ht" /* Intel htt */, "tm", "ia64", "pbe",
826 .cpuid = {.eax = 1, .reg = R_EDX, },
827 .tcg_features = TCG_FEATURES,
829 [FEAT_1_ECX] = {
830 .type = CPUID_FEATURE_WORD,
831 .feat_names = {
832 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
833 "ds-cpl", "vmx", "smx", "est",
834 "tm2", "ssse3", "cid", NULL,
835 "fma", "cx16", "xtpr", "pdcm",
836 NULL, "pcid", "dca", "sse4.1",
837 "sse4.2", "x2apic", "movbe", "popcnt",
838 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
839 "avx", "f16c", "rdrand", "hypervisor",
841 .cpuid = { .eax = 1, .reg = R_ECX, },
842 .tcg_features = TCG_EXT_FEATURES,
844 /* Feature names that are already defined on feature_name[] but
845 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
846 * names on feat_names below. They are copied automatically
847 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
849 [FEAT_8000_0001_EDX] = {
850 .type = CPUID_FEATURE_WORD,
851 .feat_names = {
852 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
853 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
854 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
855 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
856 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
857 "nx", NULL, "mmxext", NULL /* mmx */,
858 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
859 NULL, "lm", "3dnowext", "3dnow",
861 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
862 .tcg_features = TCG_EXT2_FEATURES,
864 [FEAT_8000_0001_ECX] = {
865 .type = CPUID_FEATURE_WORD,
866 .feat_names = {
867 "lahf-lm", "cmp-legacy", "svm", "extapic",
868 "cr8legacy", "abm", "sse4a", "misalignsse",
869 "3dnowprefetch", "osvw", "ibs", "xop",
870 "skinit", "wdt", NULL, "lwp",
871 "fma4", "tce", NULL, "nodeid-msr",
872 NULL, "tbm", "topoext", "perfctr-core",
873 "perfctr-nb", NULL, NULL, NULL,
874 NULL, NULL, NULL, NULL,
876 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
877 .tcg_features = TCG_EXT3_FEATURES,
879 * TOPOEXT is always allowed but can't be enabled blindly by
880 * "-cpu host", as it requires consistent cache topology info
881 * to be provided so it doesn't confuse guests.
883 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
885 [FEAT_C000_0001_EDX] = {
886 .type = CPUID_FEATURE_WORD,
887 .feat_names = {
888 NULL, NULL, "xstore", "xstore-en",
889 NULL, NULL, "xcrypt", "xcrypt-en",
890 "ace2", "ace2-en", "phe", "phe-en",
891 "pmm", "pmm-en", NULL, NULL,
892 NULL, NULL, NULL, NULL,
893 NULL, NULL, NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
897 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
898 .tcg_features = TCG_EXT4_FEATURES,
900 [FEAT_KVM] = {
901 .type = CPUID_FEATURE_WORD,
902 .feat_names = {
903 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
904 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
905 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
906 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL,
907 NULL, NULL, NULL, NULL,
908 NULL, NULL, NULL, NULL,
909 "kvmclock-stable-bit", NULL, NULL, NULL,
910 NULL, NULL, NULL, NULL,
912 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
913 .tcg_features = TCG_KVM_FEATURES,
915 [FEAT_KVM_HINTS] = {
916 .type = CPUID_FEATURE_WORD,
917 .feat_names = {
918 "kvm-hint-dedicated", NULL, NULL, NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
927 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
928 .tcg_features = TCG_KVM_FEATURES,
930 * KVM hints aren't auto-enabled by -cpu host, they need to be
931 * explicitly enabled in the command-line.
933 .no_autoenable_flags = ~0U,
936 * .feat_names are commented out for Hyper-V enlightenments because we
937 * don't want to have two different ways for enabling them on QEMU command
938 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
939 * enabling several feature bits simultaneously, exposing these bits
940 * individually may just confuse guests.
942 [FEAT_HYPERV_EAX] = {
943 .type = CPUID_FEATURE_WORD,
944 .feat_names = {
945 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
946 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
947 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
948 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
949 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
950 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
951 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
952 NULL, NULL,
953 NULL, NULL, NULL, NULL,
954 NULL, NULL, NULL, NULL,
955 NULL, NULL, NULL, NULL,
956 NULL, NULL, NULL, NULL,
958 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
960 [FEAT_HYPERV_EBX] = {
961 .type = CPUID_FEATURE_WORD,
962 .feat_names = {
963 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
964 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
965 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
966 NULL /* hv_create_port */, NULL /* hv_connect_port */,
967 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
968 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
969 NULL, NULL,
970 NULL, NULL, NULL, NULL,
971 NULL, NULL, NULL, NULL,
972 NULL, NULL, NULL, NULL,
973 NULL, NULL, NULL, NULL,
975 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
977 [FEAT_HYPERV_EDX] = {
978 .type = CPUID_FEATURE_WORD,
979 .feat_names = {
980 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
981 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
982 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
983 NULL, NULL,
984 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
985 NULL, NULL, NULL, NULL,
986 NULL, NULL, NULL, NULL,
987 NULL, NULL, NULL, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
991 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
993 [FEAT_HV_RECOMM_EAX] = {
994 .type = CPUID_FEATURE_WORD,
995 .feat_names = {
996 NULL /* hv_recommend_pv_as_switch */,
997 NULL /* hv_recommend_pv_tlbflush_local */,
998 NULL /* hv_recommend_pv_tlbflush_remote */,
999 NULL /* hv_recommend_msr_apic_access */,
1000 NULL /* hv_recommend_msr_reset */,
1001 NULL /* hv_recommend_relaxed_timing */,
1002 NULL /* hv_recommend_dma_remapping */,
1003 NULL /* hv_recommend_int_remapping */,
1004 NULL /* hv_recommend_x2apic_msrs */,
1005 NULL /* hv_recommend_autoeoi_deprecation */,
1006 NULL /* hv_recommend_pv_ipi */,
1007 NULL /* hv_recommend_ex_hypercalls */,
1008 NULL /* hv_hypervisor_is_nested */,
1009 NULL /* hv_recommend_int_mbec */,
1010 NULL /* hv_recommend_evmcs */,
1011 NULL,
1012 NULL, NULL, NULL, NULL,
1013 NULL, NULL, NULL, NULL,
1014 NULL, NULL, NULL, NULL,
1015 NULL, NULL, NULL, NULL,
1017 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1019 [FEAT_HV_NESTED_EAX] = {
1020 .type = CPUID_FEATURE_WORD,
1021 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1023 [FEAT_SVM] = {
1024 .type = CPUID_FEATURE_WORD,
1025 .feat_names = {
1026 "npt", "lbrv", "svm-lock", "nrip-save",
1027 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1028 NULL, NULL, "pause-filter", NULL,
1029 "pfthreshold", NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1035 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1036 .tcg_features = TCG_SVM_FEATURES,
1038 [FEAT_7_0_EBX] = {
1039 .type = CPUID_FEATURE_WORD,
1040 .feat_names = {
1041 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1042 "hle", "avx2", NULL, "smep",
1043 "bmi2", "erms", "invpcid", "rtm",
1044 NULL, NULL, "mpx", NULL,
1045 "avx512f", "avx512dq", "rdseed", "adx",
1046 "smap", "avx512ifma", "pcommit", "clflushopt",
1047 "clwb", "intel-pt", "avx512pf", "avx512er",
1048 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1050 .cpuid = {
1051 .eax = 7,
1052 .needs_ecx = true, .ecx = 0,
1053 .reg = R_EBX,
1055 .tcg_features = TCG_7_0_EBX_FEATURES,
1057 [FEAT_7_0_ECX] = {
1058 .type = CPUID_FEATURE_WORD,
1059 .feat_names = {
1060 NULL, "avx512vbmi", "umip", "pku",
1061 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL,
1062 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1063 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1064 "la57", NULL, NULL, NULL,
1065 NULL, NULL, "rdpid", NULL,
1066 NULL, "cldemote", NULL, "movdiri",
1067 "movdir64b", NULL, NULL, NULL,
1069 .cpuid = {
1070 .eax = 7,
1071 .needs_ecx = true, .ecx = 0,
1072 .reg = R_ECX,
1074 .tcg_features = TCG_7_0_ECX_FEATURES,
1076 [FEAT_7_0_EDX] = {
1077 .type = CPUID_FEATURE_WORD,
1078 .feat_names = {
1079 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1080 NULL, NULL, NULL, NULL,
1081 NULL, NULL, "md-clear", NULL,
1082 NULL, NULL, NULL, NULL,
1083 NULL, NULL, NULL /* pconfig */, NULL,
1084 NULL, NULL, NULL, NULL,
1085 NULL, NULL, "spec-ctrl", "stibp",
1086 NULL, "arch-capabilities", "core-capability", "ssbd",
1088 .cpuid = {
1089 .eax = 7,
1090 .needs_ecx = true, .ecx = 0,
1091 .reg = R_EDX,
1093 .tcg_features = TCG_7_0_EDX_FEATURES,
1095 [FEAT_7_1_EAX] = {
1096 .type = CPUID_FEATURE_WORD,
1097 .feat_names = {
1098 NULL, NULL, NULL, NULL,
1099 NULL, "avx512-bf16", NULL, NULL,
1100 NULL, NULL, NULL, NULL,
1101 NULL, NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 NULL, NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1107 .cpuid = {
1108 .eax = 7,
1109 .needs_ecx = true, .ecx = 1,
1110 .reg = R_EAX,
1112 .tcg_features = TCG_7_1_EAX_FEATURES,
1114 [FEAT_8000_0007_EDX] = {
1115 .type = CPUID_FEATURE_WORD,
1116 .feat_names = {
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 "invtsc", NULL, NULL, NULL,
1120 NULL, NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL,
1122 NULL, NULL, NULL, NULL,
1123 NULL, NULL, NULL, NULL,
1124 NULL, NULL, NULL, NULL,
1126 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1127 .tcg_features = TCG_APM_FEATURES,
1128 .unmigratable_flags = CPUID_APM_INVTSC,
1130 [FEAT_8000_0008_EBX] = {
1131 .type = CPUID_FEATURE_WORD,
1132 .feat_names = {
1133 "clzero", NULL, "xsaveerptr", NULL,
1134 NULL, NULL, NULL, NULL,
1135 NULL, "wbnoinvd", NULL, NULL,
1136 "ibpb", NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1140 NULL, NULL, NULL, NULL,
1142 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1143 .tcg_features = 0,
1144 .unmigratable_flags = 0,
1146 [FEAT_XSAVE] = {
1147 .type = CPUID_FEATURE_WORD,
1148 .feat_names = {
1149 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1150 NULL, NULL, NULL, NULL,
1151 NULL, NULL, NULL, NULL,
1152 NULL, NULL, NULL, NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1158 .cpuid = {
1159 .eax = 0xd,
1160 .needs_ecx = true, .ecx = 1,
1161 .reg = R_EAX,
1163 .tcg_features = TCG_XSAVE_FEATURES,
1165 [FEAT_6_EAX] = {
1166 .type = CPUID_FEATURE_WORD,
1167 .feat_names = {
1168 NULL, NULL, "arat", NULL,
1169 NULL, NULL, NULL, NULL,
1170 NULL, NULL, NULL, NULL,
1171 NULL, NULL, NULL, NULL,
1172 NULL, NULL, NULL, NULL,
1173 NULL, NULL, NULL, NULL,
1174 NULL, NULL, NULL, NULL,
1175 NULL, NULL, NULL, NULL,
1177 .cpuid = { .eax = 6, .reg = R_EAX, },
1178 .tcg_features = TCG_6_EAX_FEATURES,
1180 [FEAT_XSAVE_COMP_LO] = {
1181 .type = CPUID_FEATURE_WORD,
1182 .cpuid = {
1183 .eax = 0xD,
1184 .needs_ecx = true, .ecx = 0,
1185 .reg = R_EAX,
1187 .tcg_features = ~0U,
1188 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1189 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1190 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1191 XSTATE_PKRU_MASK,
1193 [FEAT_XSAVE_COMP_HI] = {
1194 .type = CPUID_FEATURE_WORD,
1195 .cpuid = {
1196 .eax = 0xD,
1197 .needs_ecx = true, .ecx = 0,
1198 .reg = R_EDX,
1200 .tcg_features = ~0U,
1202 /*Below are MSR exposed features*/
1203 [FEAT_ARCH_CAPABILITIES] = {
1204 .type = MSR_FEATURE_WORD,
1205 .feat_names = {
1206 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1207 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl",
1208 "taa-no", NULL, NULL, NULL,
1209 NULL, NULL, NULL, NULL,
1210 NULL, NULL, NULL, NULL,
1211 NULL, NULL, NULL, NULL,
1212 NULL, NULL, NULL, NULL,
1213 NULL, NULL, NULL, NULL,
1215 .msr = {
1216 .index = MSR_IA32_ARCH_CAPABILITIES,
1219 [FEAT_CORE_CAPABILITY] = {
1220 .type = MSR_FEATURE_WORD,
1221 .feat_names = {
1222 NULL, NULL, NULL, NULL,
1223 NULL, "split-lock-detect", NULL, NULL,
1224 NULL, NULL, NULL, NULL,
1225 NULL, NULL, NULL, NULL,
1226 NULL, NULL, NULL, NULL,
1227 NULL, NULL, NULL, NULL,
1228 NULL, NULL, NULL, NULL,
1229 NULL, NULL, NULL, NULL,
1231 .msr = {
1232 .index = MSR_IA32_CORE_CAPABILITY,
1236 [FEAT_VMX_PROCBASED_CTLS] = {
1237 .type = MSR_FEATURE_WORD,
1238 .feat_names = {
1239 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset",
1240 NULL, NULL, NULL, "vmx-hlt-exit",
1241 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit",
1242 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit",
1243 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit",
1244 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit",
1245 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf",
1246 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls",
1248 .msr = {
1249 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1253 [FEAT_VMX_SECONDARY_CTLS] = {
1254 .type = MSR_FEATURE_WORD,
1255 .feat_names = {
1256 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit",
1257 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest",
1258 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit",
1259 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit",
1260 "vmx-rdseed-exit", "vmx-pml", NULL, NULL,
1261 "vmx-xsaves", NULL, NULL, NULL,
1262 NULL, NULL, NULL, NULL,
1263 NULL, NULL, NULL, NULL,
1265 .msr = {
1266 .index = MSR_IA32_VMX_PROCBASED_CTLS2,
1270 [FEAT_VMX_PINBASED_CTLS] = {
1271 .type = MSR_FEATURE_WORD,
1272 .feat_names = {
1273 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit",
1274 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr",
1275 NULL, NULL, NULL, NULL,
1276 NULL, NULL, NULL, NULL,
1277 NULL, NULL, NULL, NULL,
1278 NULL, NULL, NULL, NULL,
1279 NULL, NULL, NULL, NULL,
1280 NULL, NULL, NULL, NULL,
1282 .msr = {
1283 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1287 [FEAT_VMX_EXIT_CTLS] = {
1288 .type = MSR_FEATURE_WORD,
1290 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from
1291 * the LM CPUID bit.
1293 .feat_names = {
1294 NULL, NULL, "vmx-exit-nosave-debugctl", NULL,
1295 NULL, NULL, NULL, NULL,
1296 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL,
1297 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr",
1298 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat",
1299 "vmx-exit-save-efer", "vmx-exit-load-efer",
1300 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs",
1301 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL,
1302 NULL, NULL, NULL, NULL,
1304 .msr = {
1305 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS,
1309 [FEAT_VMX_ENTRY_CTLS] = {
1310 .type = MSR_FEATURE_WORD,
1311 .feat_names = {
1312 NULL, NULL, "vmx-entry-noload-debugctl", NULL,
1313 NULL, NULL, NULL, NULL,
1314 NULL, "vmx-entry-ia32e-mode", NULL, NULL,
1315 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer",
1316 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL,
1317 NULL, NULL, NULL, NULL,
1318 NULL, NULL, NULL, NULL,
1319 NULL, NULL, NULL, NULL,
1321 .msr = {
1322 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1326 [FEAT_VMX_MISC] = {
1327 .type = MSR_FEATURE_WORD,
1328 .feat_names = {
1329 NULL, NULL, NULL, NULL,
1330 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown",
1331 "vmx-activity-wait-sipi", NULL, NULL, NULL,
1332 NULL, NULL, NULL, NULL,
1333 NULL, NULL, NULL, NULL,
1334 NULL, NULL, NULL, NULL,
1335 NULL, NULL, NULL, NULL,
1336 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL,
1338 .msr = {
1339 .index = MSR_IA32_VMX_MISC,
1343 [FEAT_VMX_EPT_VPID_CAPS] = {
1344 .type = MSR_FEATURE_WORD,
1345 .feat_names = {
1346 "vmx-ept-execonly", NULL, NULL, NULL,
1347 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5",
1348 NULL, NULL, NULL, NULL,
1349 NULL, NULL, NULL, NULL,
1350 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL,
1351 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL,
1352 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL,
1353 NULL, NULL, NULL, NULL,
1354 "vmx-invvpid", NULL, NULL, NULL,
1355 NULL, NULL, NULL, NULL,
1356 "vmx-invvpid-single-addr", "vmx-invept-single-context",
1357 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals",
1358 NULL, NULL, NULL, NULL,
1359 NULL, NULL, NULL, NULL,
1360 NULL, NULL, NULL, NULL,
1361 NULL, NULL, NULL, NULL,
1362 NULL, NULL, NULL, NULL,
1364 .msr = {
1365 .index = MSR_IA32_VMX_EPT_VPID_CAP,
1369 [FEAT_VMX_BASIC] = {
1370 .type = MSR_FEATURE_WORD,
1371 .feat_names = {
1372 [54] = "vmx-ins-outs",
1373 [55] = "vmx-true-ctls",
1375 .msr = {
1376 .index = MSR_IA32_VMX_BASIC,
1378 /* Just to be safe - we don't support setting the MSEG version field. */
1379 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR,
1382 [FEAT_VMX_VMFUNC] = {
1383 .type = MSR_FEATURE_WORD,
1384 .feat_names = {
1385 [0] = "vmx-eptp-switching",
1387 .msr = {
1388 .index = MSR_IA32_VMX_VMFUNC,
1394 typedef struct FeatureMask {
1395 FeatureWord index;
1396 uint64_t mask;
1397 } FeatureMask;
1399 typedef struct FeatureDep {
1400 FeatureMask from, to;
1401 } FeatureDep;
1403 static FeatureDep feature_dependencies[] = {
1405 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES },
1406 .to = { FEAT_ARCH_CAPABILITIES, ~0ull },
1409 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY },
1410 .to = { FEAT_CORE_CAPABILITY, ~0ull },
1413 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1414 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull },
1417 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1418 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull },
1421 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1422 .to = { FEAT_VMX_EXIT_CTLS, ~0ull },
1425 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1426 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull },
1429 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1430 .to = { FEAT_VMX_MISC, ~0ull },
1433 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1434 .to = { FEAT_VMX_BASIC, ~0ull },
1437 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM },
1438 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE },
1441 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS },
1442 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull },
1445 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES },
1446 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES },
1449 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND },
1450 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING },
1453 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID },
1454 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID },
1457 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED },
1458 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING },
1461 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP },
1462 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP },
1465 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1466 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull },
1469 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1470 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST },
1473 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID },
1474 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 },
1477 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC },
1478 .to = { FEAT_VMX_VMFUNC, ~0ull },
1482 typedef struct X86RegisterInfo32 {
1483 /* Name of register */
1484 const char *name;
1485 /* QAPI enum value register */
1486 X86CPURegister32 qapi_enum;
1487 } X86RegisterInfo32;
1489 #define REGISTER(reg) \
1490 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1491 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1492 REGISTER(EAX),
1493 REGISTER(ECX),
1494 REGISTER(EDX),
1495 REGISTER(EBX),
1496 REGISTER(ESP),
1497 REGISTER(EBP),
1498 REGISTER(ESI),
1499 REGISTER(EDI),
1501 #undef REGISTER
1503 typedef struct ExtSaveArea {
1504 uint32_t feature, bits;
1505 uint32_t offset, size;
1506 } ExtSaveArea;
1508 static const ExtSaveArea x86_ext_save_areas[] = {
1509 [XSTATE_FP_BIT] = {
1510 /* x87 FP state component is always enabled if XSAVE is supported */
1511 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1512 /* x87 state is in the legacy region of the XSAVE area */
1513 .offset = 0,
1514 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1516 [XSTATE_SSE_BIT] = {
1517 /* SSE state component is always enabled if XSAVE is supported */
1518 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1519 /* SSE state is in the legacy region of the XSAVE area */
1520 .offset = 0,
1521 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1523 [XSTATE_YMM_BIT] =
1524 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1525 .offset = offsetof(X86XSaveArea, avx_state),
1526 .size = sizeof(XSaveAVX) },
1527 [XSTATE_BNDREGS_BIT] =
1528 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1529 .offset = offsetof(X86XSaveArea, bndreg_state),
1530 .size = sizeof(XSaveBNDREG) },
1531 [XSTATE_BNDCSR_BIT] =
1532 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1533 .offset = offsetof(X86XSaveArea, bndcsr_state),
1534 .size = sizeof(XSaveBNDCSR) },
1535 [XSTATE_OPMASK_BIT] =
1536 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1537 .offset = offsetof(X86XSaveArea, opmask_state),
1538 .size = sizeof(XSaveOpmask) },
1539 [XSTATE_ZMM_Hi256_BIT] =
1540 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1541 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1542 .size = sizeof(XSaveZMM_Hi256) },
1543 [XSTATE_Hi16_ZMM_BIT] =
1544 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1545 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1546 .size = sizeof(XSaveHi16_ZMM) },
1547 [XSTATE_PKRU_BIT] =
1548 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1549 .offset = offsetof(X86XSaveArea, pkru_state),
1550 .size = sizeof(XSavePKRU) },
1553 static uint32_t xsave_area_size(uint64_t mask)
1555 int i;
1556 uint64_t ret = 0;
1558 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1559 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1560 if ((mask >> i) & 1) {
1561 ret = MAX(ret, esa->offset + esa->size);
1564 return ret;
1567 static inline bool accel_uses_host_cpuid(void)
1569 return kvm_enabled() || hvf_enabled();
1572 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1574 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1575 cpu->env.features[FEAT_XSAVE_COMP_LO];
1578 const char *get_register_name_32(unsigned int reg)
1580 if (reg >= CPU_NB_REGS32) {
1581 return NULL;
1583 return x86_reg_info_32[reg].name;
1587 * Returns the set of feature flags that are supported and migratable by
1588 * QEMU, for a given FeatureWord.
1590 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w)
1592 FeatureWordInfo *wi = &feature_word_info[w];
1593 uint64_t r = 0;
1594 int i;
1596 for (i = 0; i < 64; i++) {
1597 uint64_t f = 1ULL << i;
1599 /* If the feature name is known, it is implicitly considered migratable,
1600 * unless it is explicitly set in unmigratable_flags */
1601 if ((wi->migratable_flags & f) ||
1602 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1603 r |= f;
1606 return r;
1609 void host_cpuid(uint32_t function, uint32_t count,
1610 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1612 uint32_t vec[4];
1614 #ifdef __x86_64__
1615 asm volatile("cpuid"
1616 : "=a"(vec[0]), "=b"(vec[1]),
1617 "=c"(vec[2]), "=d"(vec[3])
1618 : "0"(function), "c"(count) : "cc");
1619 #elif defined(__i386__)
1620 asm volatile("pusha \n\t"
1621 "cpuid \n\t"
1622 "mov %%eax, 0(%2) \n\t"
1623 "mov %%ebx, 4(%2) \n\t"
1624 "mov %%ecx, 8(%2) \n\t"
1625 "mov %%edx, 12(%2) \n\t"
1626 "popa"
1627 : : "a"(function), "c"(count), "S"(vec)
1628 : "memory", "cc");
1629 #else
1630 abort();
1631 #endif
1633 if (eax)
1634 *eax = vec[0];
1635 if (ebx)
1636 *ebx = vec[1];
1637 if (ecx)
1638 *ecx = vec[2];
1639 if (edx)
1640 *edx = vec[3];
1643 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1645 uint32_t eax, ebx, ecx, edx;
1647 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1648 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1650 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1651 if (family) {
1652 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1654 if (model) {
1655 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1657 if (stepping) {
1658 *stepping = eax & 0x0F;
1662 /* CPU class name definitions: */
1664 /* Return type name for a given CPU model name
1665 * Caller is responsible for freeing the returned string.
1667 static char *x86_cpu_type_name(const char *model_name)
1669 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1672 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1674 g_autofree char *typename = x86_cpu_type_name(cpu_model);
1675 return object_class_by_name(typename);
1678 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1680 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1681 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1682 return g_strndup(class_name,
1683 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1686 typedef struct PropValue {
1687 const char *prop, *value;
1688 } PropValue;
1690 typedef struct X86CPUVersionDefinition {
1691 X86CPUVersion version;
1692 const char *alias;
1693 PropValue *props;
1694 } X86CPUVersionDefinition;
1696 /* Base definition for a CPU model */
1697 typedef struct X86CPUDefinition {
1698 const char *name;
1699 uint32_t level;
1700 uint32_t xlevel;
1701 /* vendor is zero-terminated, 12 character ASCII string */
1702 char vendor[CPUID_VENDOR_SZ + 1];
1703 int family;
1704 int model;
1705 int stepping;
1706 FeatureWordArray features;
1707 const char *model_id;
1708 CPUCaches *cache_info;
1710 * Definitions for alternative versions of CPU model.
1711 * List is terminated by item with version == 0.
1712 * If NULL, version 1 will be registered automatically.
1714 const X86CPUVersionDefinition *versions;
1715 } X86CPUDefinition;
1717 /* Reference to a specific CPU model version */
1718 struct X86CPUModel {
1719 /* Base CPU definition */
1720 X86CPUDefinition *cpudef;
1721 /* CPU model version */
1722 X86CPUVersion version;
1724 * If true, this is an alias CPU model.
1725 * This matters only for "-cpu help" and query-cpu-definitions
1727 bool is_alias;
1730 /* Get full model name for CPU version */
1731 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef,
1732 X86CPUVersion version)
1734 assert(version > 0);
1735 return g_strdup_printf("%s-v%d", cpudef->name, (int)version);
1738 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def)
1740 /* When X86CPUDefinition::versions is NULL, we register only v1 */
1741 static const X86CPUVersionDefinition default_version_list[] = {
1742 { 1 },
1743 { /* end of list */ }
1746 return def->versions ?: default_version_list;
1749 static CPUCaches epyc_cache_info = {
1750 .l1d_cache = &(CPUCacheInfo) {
1751 .type = DATA_CACHE,
1752 .level = 1,
1753 .size = 32 * KiB,
1754 .line_size = 64,
1755 .associativity = 8,
1756 .partitions = 1,
1757 .sets = 64,
1758 .lines_per_tag = 1,
1759 .self_init = 1,
1760 .no_invd_sharing = true,
1762 .l1i_cache = &(CPUCacheInfo) {
1763 .type = INSTRUCTION_CACHE,
1764 .level = 1,
1765 .size = 64 * KiB,
1766 .line_size = 64,
1767 .associativity = 4,
1768 .partitions = 1,
1769 .sets = 256,
1770 .lines_per_tag = 1,
1771 .self_init = 1,
1772 .no_invd_sharing = true,
1774 .l2_cache = &(CPUCacheInfo) {
1775 .type = UNIFIED_CACHE,
1776 .level = 2,
1777 .size = 512 * KiB,
1778 .line_size = 64,
1779 .associativity = 8,
1780 .partitions = 1,
1781 .sets = 1024,
1782 .lines_per_tag = 1,
1784 .l3_cache = &(CPUCacheInfo) {
1785 .type = UNIFIED_CACHE,
1786 .level = 3,
1787 .size = 8 * MiB,
1788 .line_size = 64,
1789 .associativity = 16,
1790 .partitions = 1,
1791 .sets = 8192,
1792 .lines_per_tag = 1,
1793 .self_init = true,
1794 .inclusive = true,
1795 .complex_indexing = true,
1799 /* The following VMX features are not supported by KVM and are left out in the
1800 * CPU definitions:
1802 * Dual-monitor support (all processors)
1803 * Entry to SMM
1804 * Deactivate dual-monitor treatment
1805 * Number of CR3-target values
1806 * Shutdown activity state
1807 * Wait-for-SIPI activity state
1808 * PAUSE-loop exiting (Westmere and newer)
1809 * EPT-violation #VE (Broadwell and newer)
1810 * Inject event with insn length=0 (Skylake and newer)
1811 * Conceal non-root operation from PT
1812 * Conceal VM exits from PT
1813 * Conceal VM entries from PT
1814 * Enable ENCLS exiting
1815 * Mode-based execute control (XS/XU)
1816 s TSC scaling (Skylake Server and newer)
1817 * GPA translation for PT (IceLake and newer)
1818 * User wait and pause
1819 * ENCLV exiting
1820 * Load IA32_RTIT_CTL
1821 * Clear IA32_RTIT_CTL
1822 * Advanced VM-exit information for EPT violations
1823 * Sub-page write permissions
1824 * PT in VMX operation
1827 static X86CPUDefinition builtin_x86_defs[] = {
1829 .name = "qemu64",
1830 .level = 0xd,
1831 .vendor = CPUID_VENDOR_AMD,
1832 .family = 6,
1833 .model = 6,
1834 .stepping = 3,
1835 .features[FEAT_1_EDX] =
1836 PPRO_FEATURES |
1837 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1838 CPUID_PSE36,
1839 .features[FEAT_1_ECX] =
1840 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1841 .features[FEAT_8000_0001_EDX] =
1842 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1843 .features[FEAT_8000_0001_ECX] =
1844 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1845 .xlevel = 0x8000000A,
1846 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1849 .name = "phenom",
1850 .level = 5,
1851 .vendor = CPUID_VENDOR_AMD,
1852 .family = 16,
1853 .model = 2,
1854 .stepping = 3,
1855 /* Missing: CPUID_HT */
1856 .features[FEAT_1_EDX] =
1857 PPRO_FEATURES |
1858 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1859 CPUID_PSE36 | CPUID_VME,
1860 .features[FEAT_1_ECX] =
1861 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1862 CPUID_EXT_POPCNT,
1863 .features[FEAT_8000_0001_EDX] =
1864 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1865 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1866 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1867 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1868 CPUID_EXT3_CR8LEG,
1869 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1870 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1871 .features[FEAT_8000_0001_ECX] =
1872 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1873 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1874 /* Missing: CPUID_SVM_LBRV */
1875 .features[FEAT_SVM] =
1876 CPUID_SVM_NPT,
1877 .xlevel = 0x8000001A,
1878 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1881 .name = "core2duo",
1882 .level = 10,
1883 .vendor = CPUID_VENDOR_INTEL,
1884 .family = 6,
1885 .model = 15,
1886 .stepping = 11,
1887 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1888 .features[FEAT_1_EDX] =
1889 PPRO_FEATURES |
1890 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1891 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1892 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1893 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1894 .features[FEAT_1_ECX] =
1895 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1896 CPUID_EXT_CX16,
1897 .features[FEAT_8000_0001_EDX] =
1898 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1899 .features[FEAT_8000_0001_ECX] =
1900 CPUID_EXT3_LAHF_LM,
1901 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
1902 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1903 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1904 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1905 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1906 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
1907 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1908 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1909 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1910 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1911 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1912 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1913 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1914 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
1915 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
1916 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
1917 .features[FEAT_VMX_SECONDARY_CTLS] =
1918 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
1919 .xlevel = 0x80000008,
1920 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1923 .name = "kvm64",
1924 .level = 0xd,
1925 .vendor = CPUID_VENDOR_INTEL,
1926 .family = 15,
1927 .model = 6,
1928 .stepping = 1,
1929 /* Missing: CPUID_HT */
1930 .features[FEAT_1_EDX] =
1931 PPRO_FEATURES | CPUID_VME |
1932 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1933 CPUID_PSE36,
1934 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1935 .features[FEAT_1_ECX] =
1936 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1937 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1938 .features[FEAT_8000_0001_EDX] =
1939 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1940 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1941 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1942 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1943 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1944 .features[FEAT_8000_0001_ECX] =
1946 /* VMX features from Cedar Mill/Prescott */
1947 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1948 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1949 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1950 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1951 VMX_PIN_BASED_NMI_EXITING,
1952 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1953 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1954 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1955 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1956 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1957 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1958 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1959 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING,
1960 .xlevel = 0x80000008,
1961 .model_id = "Common KVM processor"
1964 .name = "qemu32",
1965 .level = 4,
1966 .vendor = CPUID_VENDOR_INTEL,
1967 .family = 6,
1968 .model = 6,
1969 .stepping = 3,
1970 .features[FEAT_1_EDX] =
1971 PPRO_FEATURES,
1972 .features[FEAT_1_ECX] =
1973 CPUID_EXT_SSE3,
1974 .xlevel = 0x80000004,
1975 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1978 .name = "kvm32",
1979 .level = 5,
1980 .vendor = CPUID_VENDOR_INTEL,
1981 .family = 15,
1982 .model = 6,
1983 .stepping = 1,
1984 .features[FEAT_1_EDX] =
1985 PPRO_FEATURES | CPUID_VME |
1986 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1987 .features[FEAT_1_ECX] =
1988 CPUID_EXT_SSE3,
1989 .features[FEAT_8000_0001_ECX] =
1991 /* VMX features from Yonah */
1992 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1993 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1994 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1995 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1996 VMX_PIN_BASED_NMI_EXITING,
1997 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1998 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1999 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2000 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2001 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
2002 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
2003 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
2004 .xlevel = 0x80000008,
2005 .model_id = "Common 32-bit KVM processor"
2008 .name = "coreduo",
2009 .level = 10,
2010 .vendor = CPUID_VENDOR_INTEL,
2011 .family = 6,
2012 .model = 14,
2013 .stepping = 8,
2014 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2015 .features[FEAT_1_EDX] =
2016 PPRO_FEATURES | CPUID_VME |
2017 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
2018 CPUID_SS,
2019 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
2020 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
2021 .features[FEAT_1_ECX] =
2022 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
2023 .features[FEAT_8000_0001_EDX] =
2024 CPUID_EXT2_NX,
2025 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2026 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2027 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2028 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2029 VMX_PIN_BASED_NMI_EXITING,
2030 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2031 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2032 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2033 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2034 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
2035 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
2036 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
2037 .xlevel = 0x80000008,
2038 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
2041 .name = "486",
2042 .level = 1,
2043 .vendor = CPUID_VENDOR_INTEL,
2044 .family = 4,
2045 .model = 8,
2046 .stepping = 0,
2047 .features[FEAT_1_EDX] =
2048 I486_FEATURES,
2049 .xlevel = 0,
2050 .model_id = "",
2053 .name = "pentium",
2054 .level = 1,
2055 .vendor = CPUID_VENDOR_INTEL,
2056 .family = 5,
2057 .model = 4,
2058 .stepping = 3,
2059 .features[FEAT_1_EDX] =
2060 PENTIUM_FEATURES,
2061 .xlevel = 0,
2062 .model_id = "",
2065 .name = "pentium2",
2066 .level = 2,
2067 .vendor = CPUID_VENDOR_INTEL,
2068 .family = 6,
2069 .model = 5,
2070 .stepping = 2,
2071 .features[FEAT_1_EDX] =
2072 PENTIUM2_FEATURES,
2073 .xlevel = 0,
2074 .model_id = "",
2077 .name = "pentium3",
2078 .level = 3,
2079 .vendor = CPUID_VENDOR_INTEL,
2080 .family = 6,
2081 .model = 7,
2082 .stepping = 3,
2083 .features[FEAT_1_EDX] =
2084 PENTIUM3_FEATURES,
2085 .xlevel = 0,
2086 .model_id = "",
2089 .name = "athlon",
2090 .level = 2,
2091 .vendor = CPUID_VENDOR_AMD,
2092 .family = 6,
2093 .model = 2,
2094 .stepping = 3,
2095 .features[FEAT_1_EDX] =
2096 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
2097 CPUID_MCA,
2098 .features[FEAT_8000_0001_EDX] =
2099 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
2100 .xlevel = 0x80000008,
2101 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
2104 .name = "n270",
2105 .level = 10,
2106 .vendor = CPUID_VENDOR_INTEL,
2107 .family = 6,
2108 .model = 28,
2109 .stepping = 2,
2110 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2111 .features[FEAT_1_EDX] =
2112 PPRO_FEATURES |
2113 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
2114 CPUID_ACPI | CPUID_SS,
2115 /* Some CPUs got no CPUID_SEP */
2116 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
2117 * CPUID_EXT_XTPR */
2118 .features[FEAT_1_ECX] =
2119 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
2120 CPUID_EXT_MOVBE,
2121 .features[FEAT_8000_0001_EDX] =
2122 CPUID_EXT2_NX,
2123 .features[FEAT_8000_0001_ECX] =
2124 CPUID_EXT3_LAHF_LM,
2125 .xlevel = 0x80000008,
2126 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
2129 .name = "Conroe",
2130 .level = 10,
2131 .vendor = CPUID_VENDOR_INTEL,
2132 .family = 6,
2133 .model = 15,
2134 .stepping = 3,
2135 .features[FEAT_1_EDX] =
2136 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2137 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2138 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2139 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2140 CPUID_DE | CPUID_FP87,
2141 .features[FEAT_1_ECX] =
2142 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2143 .features[FEAT_8000_0001_EDX] =
2144 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2145 .features[FEAT_8000_0001_ECX] =
2146 CPUID_EXT3_LAHF_LM,
2147 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2148 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2149 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2150 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2151 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2152 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2153 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2154 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2155 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2156 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2157 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2158 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2159 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2160 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2161 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2162 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2163 .features[FEAT_VMX_SECONDARY_CTLS] =
2164 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
2165 .xlevel = 0x80000008,
2166 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
2169 .name = "Penryn",
2170 .level = 10,
2171 .vendor = CPUID_VENDOR_INTEL,
2172 .family = 6,
2173 .model = 23,
2174 .stepping = 3,
2175 .features[FEAT_1_EDX] =
2176 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2180 CPUID_DE | CPUID_FP87,
2181 .features[FEAT_1_ECX] =
2182 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2183 CPUID_EXT_SSE3,
2184 .features[FEAT_8000_0001_EDX] =
2185 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2186 .features[FEAT_8000_0001_ECX] =
2187 CPUID_EXT3_LAHF_LM,
2188 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2189 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2190 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2191 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT |
2192 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2193 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2194 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2195 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2196 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2197 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2198 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2199 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2200 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2201 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2202 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2203 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2204 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2205 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2206 .features[FEAT_VMX_SECONDARY_CTLS] =
2207 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2208 VMX_SECONDARY_EXEC_WBINVD_EXITING,
2209 .xlevel = 0x80000008,
2210 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
2213 .name = "Nehalem",
2214 .level = 11,
2215 .vendor = CPUID_VENDOR_INTEL,
2216 .family = 6,
2217 .model = 26,
2218 .stepping = 3,
2219 .features[FEAT_1_EDX] =
2220 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2221 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2222 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2223 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2224 CPUID_DE | CPUID_FP87,
2225 .features[FEAT_1_ECX] =
2226 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2227 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2228 .features[FEAT_8000_0001_EDX] =
2229 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2230 .features[FEAT_8000_0001_ECX] =
2231 CPUID_EXT3_LAHF_LM,
2232 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2233 MSR_VMX_BASIC_TRUE_CTLS,
2234 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2235 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2236 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2237 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2238 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2239 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2240 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2241 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2242 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2243 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2244 .features[FEAT_VMX_EXIT_CTLS] =
2245 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2246 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2247 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2248 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2249 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2250 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2251 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2252 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2253 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2254 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2255 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2256 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2257 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2258 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2259 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2260 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2261 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2262 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2263 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2264 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2265 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2266 .features[FEAT_VMX_SECONDARY_CTLS] =
2267 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2268 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2269 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2270 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2271 VMX_SECONDARY_EXEC_ENABLE_VPID,
2272 .xlevel = 0x80000008,
2273 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
2274 .versions = (X86CPUVersionDefinition[]) {
2275 { .version = 1 },
2277 .version = 2,
2278 .alias = "Nehalem-IBRS",
2279 .props = (PropValue[]) {
2280 { "spec-ctrl", "on" },
2281 { "model-id",
2282 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" },
2283 { /* end of list */ }
2286 { /* end of list */ }
2290 .name = "Westmere",
2291 .level = 11,
2292 .vendor = CPUID_VENDOR_INTEL,
2293 .family = 6,
2294 .model = 44,
2295 .stepping = 1,
2296 .features[FEAT_1_EDX] =
2297 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2298 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2299 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2300 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2301 CPUID_DE | CPUID_FP87,
2302 .features[FEAT_1_ECX] =
2303 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2304 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2305 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2306 .features[FEAT_8000_0001_EDX] =
2307 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2308 .features[FEAT_8000_0001_ECX] =
2309 CPUID_EXT3_LAHF_LM,
2310 .features[FEAT_6_EAX] =
2311 CPUID_6_EAX_ARAT,
2312 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2313 MSR_VMX_BASIC_TRUE_CTLS,
2314 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2315 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2316 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2317 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2318 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2319 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2320 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2321 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2322 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2323 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2324 .features[FEAT_VMX_EXIT_CTLS] =
2325 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2326 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2327 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2328 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2329 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2330 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2331 MSR_VMX_MISC_STORE_LMA,
2332 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2333 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2334 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2335 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2336 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2337 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2338 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2339 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2340 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2341 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2342 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2343 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2344 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2345 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2346 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2347 .features[FEAT_VMX_SECONDARY_CTLS] =
2348 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2349 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2350 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2351 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2352 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2353 .xlevel = 0x80000008,
2354 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
2355 .versions = (X86CPUVersionDefinition[]) {
2356 { .version = 1 },
2358 .version = 2,
2359 .alias = "Westmere-IBRS",
2360 .props = (PropValue[]) {
2361 { "spec-ctrl", "on" },
2362 { "model-id",
2363 "Westmere E56xx/L56xx/X56xx (IBRS update)" },
2364 { /* end of list */ }
2367 { /* end of list */ }
2371 .name = "SandyBridge",
2372 .level = 0xd,
2373 .vendor = CPUID_VENDOR_INTEL,
2374 .family = 6,
2375 .model = 42,
2376 .stepping = 1,
2377 .features[FEAT_1_EDX] =
2378 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2379 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2380 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2381 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2382 CPUID_DE | CPUID_FP87,
2383 .features[FEAT_1_ECX] =
2384 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2385 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2386 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2387 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2388 CPUID_EXT_SSE3,
2389 .features[FEAT_8000_0001_EDX] =
2390 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2391 CPUID_EXT2_SYSCALL,
2392 .features[FEAT_8000_0001_ECX] =
2393 CPUID_EXT3_LAHF_LM,
2394 .features[FEAT_XSAVE] =
2395 CPUID_XSAVE_XSAVEOPT,
2396 .features[FEAT_6_EAX] =
2397 CPUID_6_EAX_ARAT,
2398 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2399 MSR_VMX_BASIC_TRUE_CTLS,
2400 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2401 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2402 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2403 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2404 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2405 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2406 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2407 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2408 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2409 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2410 .features[FEAT_VMX_EXIT_CTLS] =
2411 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2412 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2413 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2414 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2415 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2416 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2417 MSR_VMX_MISC_STORE_LMA,
2418 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2419 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2420 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2421 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2422 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2423 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2424 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2425 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2426 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2427 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2428 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2429 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2430 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2431 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2432 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2433 .features[FEAT_VMX_SECONDARY_CTLS] =
2434 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2435 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2436 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2437 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2438 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2439 .xlevel = 0x80000008,
2440 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
2441 .versions = (X86CPUVersionDefinition[]) {
2442 { .version = 1 },
2444 .version = 2,
2445 .alias = "SandyBridge-IBRS",
2446 .props = (PropValue[]) {
2447 { "spec-ctrl", "on" },
2448 { "model-id",
2449 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" },
2450 { /* end of list */ }
2453 { /* end of list */ }
2457 .name = "IvyBridge",
2458 .level = 0xd,
2459 .vendor = CPUID_VENDOR_INTEL,
2460 .family = 6,
2461 .model = 58,
2462 .stepping = 9,
2463 .features[FEAT_1_EDX] =
2464 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2465 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2466 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2467 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2468 CPUID_DE | CPUID_FP87,
2469 .features[FEAT_1_ECX] =
2470 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2471 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2472 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2473 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2474 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2475 .features[FEAT_7_0_EBX] =
2476 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2477 CPUID_7_0_EBX_ERMS,
2478 .features[FEAT_8000_0001_EDX] =
2479 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2480 CPUID_EXT2_SYSCALL,
2481 .features[FEAT_8000_0001_ECX] =
2482 CPUID_EXT3_LAHF_LM,
2483 .features[FEAT_XSAVE] =
2484 CPUID_XSAVE_XSAVEOPT,
2485 .features[FEAT_6_EAX] =
2486 CPUID_6_EAX_ARAT,
2487 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2488 MSR_VMX_BASIC_TRUE_CTLS,
2489 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2490 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2491 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2492 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2493 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2494 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2495 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2496 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2497 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2498 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2499 .features[FEAT_VMX_EXIT_CTLS] =
2500 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2501 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2502 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2503 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2504 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2505 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2506 MSR_VMX_MISC_STORE_LMA,
2507 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2508 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2509 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2510 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2511 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2512 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2513 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2514 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2515 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2516 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2517 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2518 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2519 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2520 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2521 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2522 .features[FEAT_VMX_SECONDARY_CTLS] =
2523 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2524 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2525 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2526 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2527 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2528 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2529 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2530 VMX_SECONDARY_EXEC_RDRAND_EXITING,
2531 .xlevel = 0x80000008,
2532 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
2533 .versions = (X86CPUVersionDefinition[]) {
2534 { .version = 1 },
2536 .version = 2,
2537 .alias = "IvyBridge-IBRS",
2538 .props = (PropValue[]) {
2539 { "spec-ctrl", "on" },
2540 { "model-id",
2541 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" },
2542 { /* end of list */ }
2545 { /* end of list */ }
2549 .name = "Haswell",
2550 .level = 0xd,
2551 .vendor = CPUID_VENDOR_INTEL,
2552 .family = 6,
2553 .model = 60,
2554 .stepping = 4,
2555 .features[FEAT_1_EDX] =
2556 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2557 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2558 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2559 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2560 CPUID_DE | CPUID_FP87,
2561 .features[FEAT_1_ECX] =
2562 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2563 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2564 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2565 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2566 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2567 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2568 .features[FEAT_8000_0001_EDX] =
2569 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2570 CPUID_EXT2_SYSCALL,
2571 .features[FEAT_8000_0001_ECX] =
2572 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2573 .features[FEAT_7_0_EBX] =
2574 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2575 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2576 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2577 CPUID_7_0_EBX_RTM,
2578 .features[FEAT_XSAVE] =
2579 CPUID_XSAVE_XSAVEOPT,
2580 .features[FEAT_6_EAX] =
2581 CPUID_6_EAX_ARAT,
2582 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2583 MSR_VMX_BASIC_TRUE_CTLS,
2584 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2585 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2586 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2587 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2588 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2589 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2590 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2591 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2592 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2593 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2594 .features[FEAT_VMX_EXIT_CTLS] =
2595 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2596 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2597 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2598 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2599 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2600 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2601 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2602 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2603 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2604 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2605 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2606 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2607 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2608 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2609 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2610 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2611 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2612 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2613 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2614 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2615 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2616 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2617 .features[FEAT_VMX_SECONDARY_CTLS] =
2618 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2619 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2620 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2621 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2622 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2623 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2624 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2625 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2626 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
2627 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2628 .xlevel = 0x80000008,
2629 .model_id = "Intel Core Processor (Haswell)",
2630 .versions = (X86CPUVersionDefinition[]) {
2631 { .version = 1 },
2633 .version = 2,
2634 .alias = "Haswell-noTSX",
2635 .props = (PropValue[]) {
2636 { "hle", "off" },
2637 { "rtm", "off" },
2638 { "stepping", "1" },
2639 { "model-id", "Intel Core Processor (Haswell, no TSX)", },
2640 { /* end of list */ }
2644 .version = 3,
2645 .alias = "Haswell-IBRS",
2646 .props = (PropValue[]) {
2647 /* Restore TSX features removed by -v2 above */
2648 { "hle", "on" },
2649 { "rtm", "on" },
2651 * Haswell and Haswell-IBRS had stepping=4 in
2652 * QEMU 4.0 and older
2654 { "stepping", "4" },
2655 { "spec-ctrl", "on" },
2656 { "model-id",
2657 "Intel Core Processor (Haswell, IBRS)" },
2658 { /* end of list */ }
2662 .version = 4,
2663 .alias = "Haswell-noTSX-IBRS",
2664 .props = (PropValue[]) {
2665 { "hle", "off" },
2666 { "rtm", "off" },
2667 /* spec-ctrl was already enabled by -v3 above */
2668 { "stepping", "1" },
2669 { "model-id",
2670 "Intel Core Processor (Haswell, no TSX, IBRS)" },
2671 { /* end of list */ }
2674 { /* end of list */ }
2678 .name = "Broadwell",
2679 .level = 0xd,
2680 .vendor = CPUID_VENDOR_INTEL,
2681 .family = 6,
2682 .model = 61,
2683 .stepping = 2,
2684 .features[FEAT_1_EDX] =
2685 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2686 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2687 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2688 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2689 CPUID_DE | CPUID_FP87,
2690 .features[FEAT_1_ECX] =
2691 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2692 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2693 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2694 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2695 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2696 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2697 .features[FEAT_8000_0001_EDX] =
2698 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2699 CPUID_EXT2_SYSCALL,
2700 .features[FEAT_8000_0001_ECX] =
2701 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2702 .features[FEAT_7_0_EBX] =
2703 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2704 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2705 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2706 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2707 CPUID_7_0_EBX_SMAP,
2708 .features[FEAT_XSAVE] =
2709 CPUID_XSAVE_XSAVEOPT,
2710 .features[FEAT_6_EAX] =
2711 CPUID_6_EAX_ARAT,
2712 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2713 MSR_VMX_BASIC_TRUE_CTLS,
2714 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2715 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2716 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2717 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2718 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2719 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2720 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2721 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2722 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2723 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2724 .features[FEAT_VMX_EXIT_CTLS] =
2725 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2726 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2727 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2728 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2729 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2730 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2731 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2732 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2733 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2734 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2735 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2736 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2737 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2738 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2739 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2740 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2741 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2742 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2743 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2744 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2745 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2746 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2747 .features[FEAT_VMX_SECONDARY_CTLS] =
2748 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2749 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2750 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2751 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2752 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2753 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2754 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2755 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2756 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2757 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2758 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2759 .xlevel = 0x80000008,
2760 .model_id = "Intel Core Processor (Broadwell)",
2761 .versions = (X86CPUVersionDefinition[]) {
2762 { .version = 1 },
2764 .version = 2,
2765 .alias = "Broadwell-noTSX",
2766 .props = (PropValue[]) {
2767 { "hle", "off" },
2768 { "rtm", "off" },
2769 { "model-id", "Intel Core Processor (Broadwell, no TSX)", },
2770 { /* end of list */ }
2774 .version = 3,
2775 .alias = "Broadwell-IBRS",
2776 .props = (PropValue[]) {
2777 /* Restore TSX features removed by -v2 above */
2778 { "hle", "on" },
2779 { "rtm", "on" },
2780 { "spec-ctrl", "on" },
2781 { "model-id",
2782 "Intel Core Processor (Broadwell, IBRS)" },
2783 { /* end of list */ }
2787 .version = 4,
2788 .alias = "Broadwell-noTSX-IBRS",
2789 .props = (PropValue[]) {
2790 { "hle", "off" },
2791 { "rtm", "off" },
2792 /* spec-ctrl was already enabled by -v3 above */
2793 { "model-id",
2794 "Intel Core Processor (Broadwell, no TSX, IBRS)" },
2795 { /* end of list */ }
2798 { /* end of list */ }
2802 .name = "Skylake-Client",
2803 .level = 0xd,
2804 .vendor = CPUID_VENDOR_INTEL,
2805 .family = 6,
2806 .model = 94,
2807 .stepping = 3,
2808 .features[FEAT_1_EDX] =
2809 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2810 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2811 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2812 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2813 CPUID_DE | CPUID_FP87,
2814 .features[FEAT_1_ECX] =
2815 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2816 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2817 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2818 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2819 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2820 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2821 .features[FEAT_8000_0001_EDX] =
2822 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2823 CPUID_EXT2_SYSCALL,
2824 .features[FEAT_8000_0001_ECX] =
2825 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2826 .features[FEAT_7_0_EBX] =
2827 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2828 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2829 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2830 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2831 CPUID_7_0_EBX_SMAP,
2832 /* Missing: XSAVES (not supported by some Linux versions,
2833 * including v4.1 to v4.12).
2834 * KVM doesn't yet expose any XSAVES state save component,
2835 * and the only one defined in Skylake (processor tracing)
2836 * probably will block migration anyway.
2838 .features[FEAT_XSAVE] =
2839 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2840 CPUID_XSAVE_XGETBV1,
2841 .features[FEAT_6_EAX] =
2842 CPUID_6_EAX_ARAT,
2843 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2844 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2845 MSR_VMX_BASIC_TRUE_CTLS,
2846 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2847 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2848 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2849 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2850 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2851 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2852 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2853 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2854 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2855 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2856 .features[FEAT_VMX_EXIT_CTLS] =
2857 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2858 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2859 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2860 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2861 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2862 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2863 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2864 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2865 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2866 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2867 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2868 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2869 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2870 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2871 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2872 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2873 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2874 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2875 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2876 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2877 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2878 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2879 .features[FEAT_VMX_SECONDARY_CTLS] =
2880 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2881 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2882 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2883 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2884 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2885 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2886 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2887 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2888 .xlevel = 0x80000008,
2889 .model_id = "Intel Core Processor (Skylake)",
2890 .versions = (X86CPUVersionDefinition[]) {
2891 { .version = 1 },
2893 .version = 2,
2894 .alias = "Skylake-Client-IBRS",
2895 .props = (PropValue[]) {
2896 { "spec-ctrl", "on" },
2897 { "model-id",
2898 "Intel Core Processor (Skylake, IBRS)" },
2899 { /* end of list */ }
2903 .version = 3,
2904 .alias = "Skylake-Client-noTSX-IBRS",
2905 .props = (PropValue[]) {
2906 { "hle", "off" },
2907 { "rtm", "off" },
2908 { "model-id",
2909 "Intel Core Processor (Skylake, IBRS, no TSX)" },
2910 { /* end of list */ }
2913 { /* end of list */ }
2917 .name = "Skylake-Server",
2918 .level = 0xd,
2919 .vendor = CPUID_VENDOR_INTEL,
2920 .family = 6,
2921 .model = 85,
2922 .stepping = 4,
2923 .features[FEAT_1_EDX] =
2924 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2925 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2926 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2927 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2928 CPUID_DE | CPUID_FP87,
2929 .features[FEAT_1_ECX] =
2930 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2931 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2932 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2933 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2934 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2935 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2936 .features[FEAT_8000_0001_EDX] =
2937 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2938 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2939 .features[FEAT_8000_0001_ECX] =
2940 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2941 .features[FEAT_7_0_EBX] =
2942 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2943 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2944 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2945 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2946 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2947 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2948 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2949 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2950 .features[FEAT_7_0_ECX] =
2951 CPUID_7_0_ECX_PKU,
2952 /* Missing: XSAVES (not supported by some Linux versions,
2953 * including v4.1 to v4.12).
2954 * KVM doesn't yet expose any XSAVES state save component,
2955 * and the only one defined in Skylake (processor tracing)
2956 * probably will block migration anyway.
2958 .features[FEAT_XSAVE] =
2959 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2960 CPUID_XSAVE_XGETBV1,
2961 .features[FEAT_6_EAX] =
2962 CPUID_6_EAX_ARAT,
2963 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2964 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2965 MSR_VMX_BASIC_TRUE_CTLS,
2966 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2967 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2968 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2969 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2970 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2971 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2972 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2973 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2974 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2975 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2976 .features[FEAT_VMX_EXIT_CTLS] =
2977 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2978 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2979 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2980 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2981 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2982 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2983 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2984 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2985 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2986 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2987 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2988 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2989 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2990 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2991 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2992 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2993 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2994 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2995 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2996 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2997 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2998 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2999 .features[FEAT_VMX_SECONDARY_CTLS] =
3000 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3001 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3002 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3003 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3004 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3005 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3006 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3007 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3008 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3009 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3010 .xlevel = 0x80000008,
3011 .model_id = "Intel Xeon Processor (Skylake)",
3012 .versions = (X86CPUVersionDefinition[]) {
3013 { .version = 1 },
3015 .version = 2,
3016 .alias = "Skylake-Server-IBRS",
3017 .props = (PropValue[]) {
3018 /* clflushopt was not added to Skylake-Server-IBRS */
3019 /* TODO: add -v3 including clflushopt */
3020 { "clflushopt", "off" },
3021 { "spec-ctrl", "on" },
3022 { "model-id",
3023 "Intel Xeon Processor (Skylake, IBRS)" },
3024 { /* end of list */ }
3028 .version = 3,
3029 .alias = "Skylake-Server-noTSX-IBRS",
3030 .props = (PropValue[]) {
3031 { "hle", "off" },
3032 { "rtm", "off" },
3033 { "model-id",
3034 "Intel Xeon Processor (Skylake, IBRS, no TSX)" },
3035 { /* end of list */ }
3038 { /* end of list */ }
3042 .name = "Cascadelake-Server",
3043 .level = 0xd,
3044 .vendor = CPUID_VENDOR_INTEL,
3045 .family = 6,
3046 .model = 85,
3047 .stepping = 6,
3048 .features[FEAT_1_EDX] =
3049 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3050 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3051 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3052 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3053 CPUID_DE | CPUID_FP87,
3054 .features[FEAT_1_ECX] =
3055 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3056 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3057 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3058 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3059 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3060 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3061 .features[FEAT_8000_0001_EDX] =
3062 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3063 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3064 .features[FEAT_8000_0001_ECX] =
3065 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3066 .features[FEAT_7_0_EBX] =
3067 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3068 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3069 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3070 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3071 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3072 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3073 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3074 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3075 .features[FEAT_7_0_ECX] =
3076 CPUID_7_0_ECX_PKU |
3077 CPUID_7_0_ECX_AVX512VNNI,
3078 .features[FEAT_7_0_EDX] =
3079 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3080 /* Missing: XSAVES (not supported by some Linux versions,
3081 * including v4.1 to v4.12).
3082 * KVM doesn't yet expose any XSAVES state save component,
3083 * and the only one defined in Skylake (processor tracing)
3084 * probably will block migration anyway.
3086 .features[FEAT_XSAVE] =
3087 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3088 CPUID_XSAVE_XGETBV1,
3089 .features[FEAT_6_EAX] =
3090 CPUID_6_EAX_ARAT,
3091 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3092 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3093 MSR_VMX_BASIC_TRUE_CTLS,
3094 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3095 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3096 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3097 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3098 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3099 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3100 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3101 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3102 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3103 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3104 .features[FEAT_VMX_EXIT_CTLS] =
3105 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3106 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3107 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3108 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3109 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3110 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3111 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3112 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3113 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3114 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3115 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3116 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3117 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3118 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3119 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3120 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3121 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3122 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3123 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3124 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3125 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3126 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3127 .features[FEAT_VMX_SECONDARY_CTLS] =
3128 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3129 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3130 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3131 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3132 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3133 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3134 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3135 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3136 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3137 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3138 .xlevel = 0x80000008,
3139 .model_id = "Intel Xeon Processor (Cascadelake)",
3140 .versions = (X86CPUVersionDefinition[]) {
3141 { .version = 1 },
3142 { .version = 2,
3143 .props = (PropValue[]) {
3144 { "arch-capabilities", "on" },
3145 { "rdctl-no", "on" },
3146 { "ibrs-all", "on" },
3147 { "skip-l1dfl-vmentry", "on" },
3148 { "mds-no", "on" },
3149 { /* end of list */ }
3152 { .version = 3,
3153 .alias = "Cascadelake-Server-noTSX",
3154 .props = (PropValue[]) {
3155 { "hle", "off" },
3156 { "rtm", "off" },
3157 { /* end of list */ }
3160 { /* end of list */ }
3164 .name = "Cooperlake",
3165 .level = 0xd,
3166 .vendor = CPUID_VENDOR_INTEL,
3167 .family = 6,
3168 .model = 85,
3169 .stepping = 10,
3170 .features[FEAT_1_EDX] =
3171 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3172 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3173 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3174 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3175 CPUID_DE | CPUID_FP87,
3176 .features[FEAT_1_ECX] =
3177 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3178 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3179 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3180 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3181 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3182 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3183 .features[FEAT_8000_0001_EDX] =
3184 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3185 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3186 .features[FEAT_8000_0001_ECX] =
3187 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3188 .features[FEAT_7_0_EBX] =
3189 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3190 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3191 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3192 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3193 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3194 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3195 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3196 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3197 .features[FEAT_7_0_ECX] =
3198 CPUID_7_0_ECX_PKU |
3199 CPUID_7_0_ECX_AVX512VNNI,
3200 .features[FEAT_7_0_EDX] =
3201 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP |
3202 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES,
3203 .features[FEAT_ARCH_CAPABILITIES] =
3204 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL |
3205 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO |
3206 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO,
3207 .features[FEAT_7_1_EAX] =
3208 CPUID_7_1_EAX_AVX512_BF16,
3210 * Missing: XSAVES (not supported by some Linux versions,
3211 * including v4.1 to v4.12).
3212 * KVM doesn't yet expose any XSAVES state save component,
3213 * and the only one defined in Skylake (processor tracing)
3214 * probably will block migration anyway.
3216 .features[FEAT_XSAVE] =
3217 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3218 CPUID_XSAVE_XGETBV1,
3219 .features[FEAT_6_EAX] =
3220 CPUID_6_EAX_ARAT,
3221 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3222 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3223 MSR_VMX_BASIC_TRUE_CTLS,
3224 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3225 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3226 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3227 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3228 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3229 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3230 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3231 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3232 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3233 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3234 .features[FEAT_VMX_EXIT_CTLS] =
3235 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3236 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3237 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3238 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3239 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3240 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3241 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3242 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3243 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3244 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3245 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3246 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3247 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3248 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3249 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3250 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3251 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3252 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3253 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3254 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3255 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3256 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3257 .features[FEAT_VMX_SECONDARY_CTLS] =
3258 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3259 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3260 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3261 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3262 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3263 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3264 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3265 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3266 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3267 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3268 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3269 .xlevel = 0x80000008,
3270 .model_id = "Intel Xeon Processor (Cooperlake)",
3273 .name = "Icelake-Client",
3274 .level = 0xd,
3275 .vendor = CPUID_VENDOR_INTEL,
3276 .family = 6,
3277 .model = 126,
3278 .stepping = 0,
3279 .features[FEAT_1_EDX] =
3280 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3281 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3282 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3283 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3284 CPUID_DE | CPUID_FP87,
3285 .features[FEAT_1_ECX] =
3286 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3287 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3288 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3289 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3290 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3291 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3292 .features[FEAT_8000_0001_EDX] =
3293 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
3294 CPUID_EXT2_SYSCALL,
3295 .features[FEAT_8000_0001_ECX] =
3296 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3297 .features[FEAT_8000_0008_EBX] =
3298 CPUID_8000_0008_EBX_WBNOINVD,
3299 .features[FEAT_7_0_EBX] =
3300 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3301 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3302 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3303 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3304 CPUID_7_0_EBX_SMAP,
3305 .features[FEAT_7_0_ECX] =
3306 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3307 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3308 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3309 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3310 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3311 .features[FEAT_7_0_EDX] =
3312 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3313 /* Missing: XSAVES (not supported by some Linux versions,
3314 * including v4.1 to v4.12).
3315 * KVM doesn't yet expose any XSAVES state save component,
3316 * and the only one defined in Skylake (processor tracing)
3317 * probably will block migration anyway.
3319 .features[FEAT_XSAVE] =
3320 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3321 CPUID_XSAVE_XGETBV1,
3322 .features[FEAT_6_EAX] =
3323 CPUID_6_EAX_ARAT,
3324 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3325 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3326 MSR_VMX_BASIC_TRUE_CTLS,
3327 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3328 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3329 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3330 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3331 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3332 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3333 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3334 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3335 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3336 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3337 .features[FEAT_VMX_EXIT_CTLS] =
3338 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3339 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3340 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3341 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3342 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3343 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3344 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3345 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3346 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3347 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
3348 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3349 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3350 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3351 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3352 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3353 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3354 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3355 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3356 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3357 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3358 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3359 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3360 .features[FEAT_VMX_SECONDARY_CTLS] =
3361 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3362 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3363 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3364 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3365 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3366 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3367 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3368 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3369 .xlevel = 0x80000008,
3370 .model_id = "Intel Core Processor (Icelake)",
3371 .versions = (X86CPUVersionDefinition[]) {
3372 { .version = 1 },
3374 .version = 2,
3375 .alias = "Icelake-Client-noTSX",
3376 .props = (PropValue[]) {
3377 { "hle", "off" },
3378 { "rtm", "off" },
3379 { /* end of list */ }
3382 { /* end of list */ }
3386 .name = "Icelake-Server",
3387 .level = 0xd,
3388 .vendor = CPUID_VENDOR_INTEL,
3389 .family = 6,
3390 .model = 134,
3391 .stepping = 0,
3392 .features[FEAT_1_EDX] =
3393 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3394 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3395 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3396 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3397 CPUID_DE | CPUID_FP87,
3398 .features[FEAT_1_ECX] =
3399 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3400 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3401 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3402 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3403 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3404 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3405 .features[FEAT_8000_0001_EDX] =
3406 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3407 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3408 .features[FEAT_8000_0001_ECX] =
3409 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3410 .features[FEAT_8000_0008_EBX] =
3411 CPUID_8000_0008_EBX_WBNOINVD,
3412 .features[FEAT_7_0_EBX] =
3413 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3414 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3415 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3416 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3417 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3418 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3419 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3420 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3421 .features[FEAT_7_0_ECX] =
3422 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3423 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3424 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3425 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3426 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
3427 .features[FEAT_7_0_EDX] =
3428 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3429 /* Missing: XSAVES (not supported by some Linux versions,
3430 * including v4.1 to v4.12).
3431 * KVM doesn't yet expose any XSAVES state save component,
3432 * and the only one defined in Skylake (processor tracing)
3433 * probably will block migration anyway.
3435 .features[FEAT_XSAVE] =
3436 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3437 CPUID_XSAVE_XGETBV1,
3438 .features[FEAT_6_EAX] =
3439 CPUID_6_EAX_ARAT,
3440 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3441 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3442 MSR_VMX_BASIC_TRUE_CTLS,
3443 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3444 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3445 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3446 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3447 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3448 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3449 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3450 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3451 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3452 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3453 .features[FEAT_VMX_EXIT_CTLS] =
3454 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3455 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3456 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3457 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3458 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3459 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3460 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3461 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3462 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3463 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3464 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3465 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3466 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3467 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3468 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3469 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3470 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3471 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3472 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3473 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3474 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3475 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3476 .features[FEAT_VMX_SECONDARY_CTLS] =
3477 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3478 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3479 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3480 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3481 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3482 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3483 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3484 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3485 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
3486 .xlevel = 0x80000008,
3487 .model_id = "Intel Xeon Processor (Icelake)",
3488 .versions = (X86CPUVersionDefinition[]) {
3489 { .version = 1 },
3491 .version = 2,
3492 .alias = "Icelake-Server-noTSX",
3493 .props = (PropValue[]) {
3494 { "hle", "off" },
3495 { "rtm", "off" },
3496 { /* end of list */ }
3499 { /* end of list */ }
3503 .name = "Denverton",
3504 .level = 21,
3505 .vendor = CPUID_VENDOR_INTEL,
3506 .family = 6,
3507 .model = 95,
3508 .stepping = 1,
3509 .features[FEAT_1_EDX] =
3510 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
3511 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
3512 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3513 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR |
3514 CPUID_SSE | CPUID_SSE2,
3515 .features[FEAT_1_ECX] =
3516 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3517 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 |
3518 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3519 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER |
3520 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND,
3521 .features[FEAT_8000_0001_EDX] =
3522 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
3523 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
3524 .features[FEAT_8000_0001_ECX] =
3525 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3526 .features[FEAT_7_0_EBX] =
3527 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS |
3528 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP |
3529 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI,
3530 .features[FEAT_7_0_EDX] =
3531 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES |
3532 CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3534 * Missing: XSAVES (not supported by some Linux versions,
3535 * including v4.1 to v4.12).
3536 * KVM doesn't yet expose any XSAVES state save component,
3537 * and the only one defined in Skylake (processor tracing)
3538 * probably will block migration anyway.
3540 .features[FEAT_XSAVE] =
3541 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1,
3542 .features[FEAT_6_EAX] =
3543 CPUID_6_EAX_ARAT,
3544 .features[FEAT_ARCH_CAPABILITIES] =
3545 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY,
3546 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3547 MSR_VMX_BASIC_TRUE_CTLS,
3548 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3549 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3550 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3551 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3552 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3553 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3554 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3555 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3556 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3557 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3558 .features[FEAT_VMX_EXIT_CTLS] =
3559 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3560 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3561 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3562 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3563 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3564 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3565 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3566 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3567 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3568 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3569 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3570 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3571 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3572 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3573 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3574 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3575 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3576 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3577 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3578 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3579 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3580 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3581 .features[FEAT_VMX_SECONDARY_CTLS] =
3582 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3583 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3584 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3585 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3586 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3587 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3588 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3589 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3590 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3591 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3592 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3593 .xlevel = 0x80000008,
3594 .model_id = "Intel Atom Processor (Denverton)",
3597 .name = "Snowridge",
3598 .level = 27,
3599 .vendor = CPUID_VENDOR_INTEL,
3600 .family = 6,
3601 .model = 134,
3602 .stepping = 1,
3603 .features[FEAT_1_EDX] =
3604 /* missing: CPUID_PN CPUID_IA64 */
3605 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
3606 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
3607 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
3608 CPUID_CX8 | CPUID_APIC | CPUID_SEP |
3609 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3610 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
3611 CPUID_MMX |
3612 CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
3613 .features[FEAT_1_ECX] =
3614 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3615 CPUID_EXT_SSSE3 |
3616 CPUID_EXT_CX16 |
3617 CPUID_EXT_SSE41 |
3618 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3619 CPUID_EXT_POPCNT |
3620 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
3621 CPUID_EXT_RDRAND,
3622 .features[FEAT_8000_0001_EDX] =
3623 CPUID_EXT2_SYSCALL |
3624 CPUID_EXT2_NX |
3625 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3626 CPUID_EXT2_LM,
3627 .features[FEAT_8000_0001_ECX] =
3628 CPUID_EXT3_LAHF_LM |
3629 CPUID_EXT3_3DNOWPREFETCH,
3630 .features[FEAT_7_0_EBX] =
3631 CPUID_7_0_EBX_FSGSBASE |
3632 CPUID_7_0_EBX_SMEP |
3633 CPUID_7_0_EBX_ERMS |
3634 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
3635 CPUID_7_0_EBX_RDSEED |
3636 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3637 CPUID_7_0_EBX_CLWB |
3638 CPUID_7_0_EBX_SHA_NI,
3639 .features[FEAT_7_0_ECX] =
3640 CPUID_7_0_ECX_UMIP |
3641 /* missing bit 5 */
3642 CPUID_7_0_ECX_GFNI |
3643 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
3644 CPUID_7_0_ECX_MOVDIR64B,
3645 .features[FEAT_7_0_EDX] =
3646 CPUID_7_0_EDX_SPEC_CTRL |
3647 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
3648 CPUID_7_0_EDX_CORE_CAPABILITY,
3649 .features[FEAT_CORE_CAPABILITY] =
3650 MSR_CORE_CAP_SPLIT_LOCK_DETECT,
3652 * Missing: XSAVES (not supported by some Linux versions,
3653 * including v4.1 to v4.12).
3654 * KVM doesn't yet expose any XSAVES state save component,
3655 * and the only one defined in Skylake (processor tracing)
3656 * probably will block migration anyway.
3658 .features[FEAT_XSAVE] =
3659 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3660 CPUID_XSAVE_XGETBV1,
3661 .features[FEAT_6_EAX] =
3662 CPUID_6_EAX_ARAT,
3663 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3664 MSR_VMX_BASIC_TRUE_CTLS,
3665 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3666 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3667 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3668 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3669 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3670 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3671 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3672 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3673 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3674 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3675 .features[FEAT_VMX_EXIT_CTLS] =
3676 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3677 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3678 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3679 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3680 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3681 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3682 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3683 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3684 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3685 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3686 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3687 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3688 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3689 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3690 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3691 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3692 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3693 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3694 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3695 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3696 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3697 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3698 .features[FEAT_VMX_SECONDARY_CTLS] =
3699 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3700 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3701 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3702 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3703 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3704 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3705 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3706 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3707 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3708 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3709 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3710 .xlevel = 0x80000008,
3711 .model_id = "Intel Atom Processor (SnowRidge)",
3712 .versions = (X86CPUVersionDefinition[]) {
3713 { .version = 1 },
3715 .version = 2,
3716 .props = (PropValue[]) {
3717 { "mpx", "off" },
3718 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" },
3719 { /* end of list */ },
3722 { /* end of list */ },
3726 .name = "KnightsMill",
3727 .level = 0xd,
3728 .vendor = CPUID_VENDOR_INTEL,
3729 .family = 6,
3730 .model = 133,
3731 .stepping = 0,
3732 .features[FEAT_1_EDX] =
3733 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
3734 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
3735 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
3736 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
3737 CPUID_PSE | CPUID_DE | CPUID_FP87,
3738 .features[FEAT_1_ECX] =
3739 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3740 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3741 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3742 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3743 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3744 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3745 .features[FEAT_8000_0001_EDX] =
3746 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3747 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3748 .features[FEAT_8000_0001_ECX] =
3749 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3750 .features[FEAT_7_0_EBX] =
3751 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3752 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
3753 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
3754 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
3755 CPUID_7_0_EBX_AVX512ER,
3756 .features[FEAT_7_0_ECX] =
3757 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3758 .features[FEAT_7_0_EDX] =
3759 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
3760 .features[FEAT_XSAVE] =
3761 CPUID_XSAVE_XSAVEOPT,
3762 .features[FEAT_6_EAX] =
3763 CPUID_6_EAX_ARAT,
3764 .xlevel = 0x80000008,
3765 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
3768 .name = "Opteron_G1",
3769 .level = 5,
3770 .vendor = CPUID_VENDOR_AMD,
3771 .family = 15,
3772 .model = 6,
3773 .stepping = 1,
3774 .features[FEAT_1_EDX] =
3775 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3776 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3777 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3778 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3779 CPUID_DE | CPUID_FP87,
3780 .features[FEAT_1_ECX] =
3781 CPUID_EXT_SSE3,
3782 .features[FEAT_8000_0001_EDX] =
3783 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3784 .xlevel = 0x80000008,
3785 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
3788 .name = "Opteron_G2",
3789 .level = 5,
3790 .vendor = CPUID_VENDOR_AMD,
3791 .family = 15,
3792 .model = 6,
3793 .stepping = 1,
3794 .features[FEAT_1_EDX] =
3795 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3796 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3797 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3798 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3799 CPUID_DE | CPUID_FP87,
3800 .features[FEAT_1_ECX] =
3801 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
3802 .features[FEAT_8000_0001_EDX] =
3803 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3804 .features[FEAT_8000_0001_ECX] =
3805 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3806 .xlevel = 0x80000008,
3807 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
3810 .name = "Opteron_G3",
3811 .level = 5,
3812 .vendor = CPUID_VENDOR_AMD,
3813 .family = 16,
3814 .model = 2,
3815 .stepping = 3,
3816 .features[FEAT_1_EDX] =
3817 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3818 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3819 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3820 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3821 CPUID_DE | CPUID_FP87,
3822 .features[FEAT_1_ECX] =
3823 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
3824 CPUID_EXT_SSE3,
3825 .features[FEAT_8000_0001_EDX] =
3826 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
3827 CPUID_EXT2_RDTSCP,
3828 .features[FEAT_8000_0001_ECX] =
3829 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
3830 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3831 .xlevel = 0x80000008,
3832 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
3835 .name = "Opteron_G4",
3836 .level = 0xd,
3837 .vendor = CPUID_VENDOR_AMD,
3838 .family = 21,
3839 .model = 1,
3840 .stepping = 2,
3841 .features[FEAT_1_EDX] =
3842 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3843 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3844 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3845 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3846 CPUID_DE | CPUID_FP87,
3847 .features[FEAT_1_ECX] =
3848 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3849 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3850 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
3851 CPUID_EXT_SSE3,
3852 .features[FEAT_8000_0001_EDX] =
3853 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3854 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3855 .features[FEAT_8000_0001_ECX] =
3856 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3857 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3858 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3859 CPUID_EXT3_LAHF_LM,
3860 .features[FEAT_SVM] =
3861 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3862 /* no xsaveopt! */
3863 .xlevel = 0x8000001A,
3864 .model_id = "AMD Opteron 62xx class CPU",
3867 .name = "Opteron_G5",
3868 .level = 0xd,
3869 .vendor = CPUID_VENDOR_AMD,
3870 .family = 21,
3871 .model = 2,
3872 .stepping = 0,
3873 .features[FEAT_1_EDX] =
3874 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3875 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3876 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3877 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3878 CPUID_DE | CPUID_FP87,
3879 .features[FEAT_1_ECX] =
3880 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
3881 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
3882 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
3883 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3884 .features[FEAT_8000_0001_EDX] =
3885 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3886 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3887 .features[FEAT_8000_0001_ECX] =
3888 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3889 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3890 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3891 CPUID_EXT3_LAHF_LM,
3892 .features[FEAT_SVM] =
3893 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3894 /* no xsaveopt! */
3895 .xlevel = 0x8000001A,
3896 .model_id = "AMD Opteron 63xx class CPU",
3899 .name = "EPYC",
3900 .level = 0xd,
3901 .vendor = CPUID_VENDOR_AMD,
3902 .family = 23,
3903 .model = 1,
3904 .stepping = 2,
3905 .features[FEAT_1_EDX] =
3906 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3907 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3908 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3909 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3910 CPUID_VME | CPUID_FP87,
3911 .features[FEAT_1_ECX] =
3912 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3913 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
3914 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3915 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3916 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3917 .features[FEAT_8000_0001_EDX] =
3918 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3919 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3920 CPUID_EXT2_SYSCALL,
3921 .features[FEAT_8000_0001_ECX] =
3922 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3923 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3924 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3925 CPUID_EXT3_TOPOEXT,
3926 .features[FEAT_7_0_EBX] =
3927 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3928 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3929 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3930 CPUID_7_0_EBX_SHA_NI,
3931 /* Missing: XSAVES (not supported by some Linux versions,
3932 * including v4.1 to v4.12).
3933 * KVM doesn't yet expose any XSAVES state save component.
3935 .features[FEAT_XSAVE] =
3936 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3937 CPUID_XSAVE_XGETBV1,
3938 .features[FEAT_6_EAX] =
3939 CPUID_6_EAX_ARAT,
3940 .features[FEAT_SVM] =
3941 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3942 .xlevel = 0x8000001E,
3943 .model_id = "AMD EPYC Processor",
3944 .cache_info = &epyc_cache_info,
3945 .versions = (X86CPUVersionDefinition[]) {
3946 { .version = 1 },
3948 .version = 2,
3949 .alias = "EPYC-IBPB",
3950 .props = (PropValue[]) {
3951 { "ibpb", "on" },
3952 { "model-id",
3953 "AMD EPYC Processor (with IBPB)" },
3954 { /* end of list */ }
3957 { /* end of list */ }
3961 .name = "Dhyana",
3962 .level = 0xd,
3963 .vendor = CPUID_VENDOR_HYGON,
3964 .family = 24,
3965 .model = 0,
3966 .stepping = 1,
3967 .features[FEAT_1_EDX] =
3968 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3969 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3970 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3971 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3972 CPUID_VME | CPUID_FP87,
3973 .features[FEAT_1_ECX] =
3974 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3975 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
3976 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3977 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3978 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
3979 .features[FEAT_8000_0001_EDX] =
3980 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3981 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3982 CPUID_EXT2_SYSCALL,
3983 .features[FEAT_8000_0001_ECX] =
3984 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3985 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3986 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3987 CPUID_EXT3_TOPOEXT,
3988 .features[FEAT_8000_0008_EBX] =
3989 CPUID_8000_0008_EBX_IBPB,
3990 .features[FEAT_7_0_EBX] =
3991 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3992 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3993 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
3995 * Missing: XSAVES (not supported by some Linux versions,
3996 * including v4.1 to v4.12).
3997 * KVM doesn't yet expose any XSAVES state save component.
3999 .features[FEAT_XSAVE] =
4000 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
4001 CPUID_XSAVE_XGETBV1,
4002 .features[FEAT_6_EAX] =
4003 CPUID_6_EAX_ARAT,
4004 .features[FEAT_SVM] =
4005 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
4006 .xlevel = 0x8000001E,
4007 .model_id = "Hygon Dhyana Processor",
4008 .cache_info = &epyc_cache_info,
4012 /* KVM-specific features that are automatically added/removed
4013 * from all CPU models when KVM is enabled.
4015 static PropValue kvm_default_props[] = {
4016 { "kvmclock", "on" },
4017 { "kvm-nopiodelay", "on" },
4018 { "kvm-asyncpf", "on" },
4019 { "kvm-steal-time", "on" },
4020 { "kvm-pv-eoi", "on" },
4021 { "kvmclock-stable-bit", "on" },
4022 { "x2apic", "on" },
4023 { "acpi", "off" },
4024 { "monitor", "off" },
4025 { "svm", "off" },
4026 { NULL, NULL },
4029 /* TCG-specific defaults that override all CPU models when using TCG
4031 static PropValue tcg_default_props[] = {
4032 { "vme", "off" },
4033 { NULL, NULL },
4038 * We resolve CPU model aliases using -v1 when using "-machine
4039 * none", but this is just for compatibility while libvirt isn't
4040 * adapted to resolve CPU model versions before creating VMs.
4041 * See "Runnability guarantee of CPU models" at * qemu-deprecated.texi.
4043 X86CPUVersion default_cpu_version = 1;
4045 void x86_cpu_set_default_version(X86CPUVersion version)
4047 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */
4048 assert(version != CPU_VERSION_AUTO);
4049 default_cpu_version = version;
4052 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model)
4054 int v = 0;
4055 const X86CPUVersionDefinition *vdef =
4056 x86_cpu_def_get_versions(model->cpudef);
4057 while (vdef->version) {
4058 v = vdef->version;
4059 vdef++;
4061 return v;
4064 /* Return the actual version being used for a specific CPU model */
4065 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
4067 X86CPUVersion v = model->version;
4068 if (v == CPU_VERSION_AUTO) {
4069 v = default_cpu_version;
4071 if (v == CPU_VERSION_LATEST) {
4072 return x86_cpu_model_last_version(model);
4074 return v;
4077 void x86_cpu_change_kvm_default(const char *prop, const char *value)
4079 PropValue *pv;
4080 for (pv = kvm_default_props; pv->prop; pv++) {
4081 if (!strcmp(pv->prop, prop)) {
4082 pv->value = value;
4083 break;
4087 /* It is valid to call this function only for properties that
4088 * are already present in the kvm_default_props table.
4090 assert(pv->prop);
4093 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4094 bool migratable_only);
4096 static bool lmce_supported(void)
4098 uint64_t mce_cap = 0;
4100 #ifdef CONFIG_KVM
4101 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
4102 return false;
4104 #endif
4106 return !!(mce_cap & MCG_LMCE_P);
4109 #define CPUID_MODEL_ID_SZ 48
4112 * cpu_x86_fill_model_id:
4113 * Get CPUID model ID string from host CPU.
4115 * @str should have at least CPUID_MODEL_ID_SZ bytes
4117 * The function does NOT add a null terminator to the string
4118 * automatically.
4120 static int cpu_x86_fill_model_id(char *str)
4122 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
4123 int i;
4125 for (i = 0; i < 3; i++) {
4126 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
4127 memcpy(str + i * 16 + 0, &eax, 4);
4128 memcpy(str + i * 16 + 4, &ebx, 4);
4129 memcpy(str + i * 16 + 8, &ecx, 4);
4130 memcpy(str + i * 16 + 12, &edx, 4);
4132 return 0;
4135 static Property max_x86_cpu_properties[] = {
4136 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
4137 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
4138 DEFINE_PROP_END_OF_LIST()
4141 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
4143 DeviceClass *dc = DEVICE_CLASS(oc);
4144 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4146 xcc->ordering = 9;
4148 xcc->model_description =
4149 "Enables all features supported by the accelerator in the current host";
4151 device_class_set_props(dc, max_x86_cpu_properties);
4154 static void max_x86_cpu_initfn(Object *obj)
4156 X86CPU *cpu = X86_CPU(obj);
4157 CPUX86State *env = &cpu->env;
4158 KVMState *s = kvm_state;
4160 /* We can't fill the features array here because we don't know yet if
4161 * "migratable" is true or false.
4163 cpu->max_features = true;
4165 if (accel_uses_host_cpuid()) {
4166 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
4167 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
4168 int family, model, stepping;
4170 host_vendor_fms(vendor, &family, &model, &stepping);
4171 cpu_x86_fill_model_id(model_id);
4173 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
4174 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
4175 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
4176 object_property_set_int(OBJECT(cpu), stepping, "stepping",
4177 &error_abort);
4178 object_property_set_str(OBJECT(cpu), model_id, "model-id",
4179 &error_abort);
4181 if (kvm_enabled()) {
4182 env->cpuid_min_level =
4183 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
4184 env->cpuid_min_xlevel =
4185 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
4186 env->cpuid_min_xlevel2 =
4187 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
4188 } else {
4189 env->cpuid_min_level =
4190 hvf_get_supported_cpuid(0x0, 0, R_EAX);
4191 env->cpuid_min_xlevel =
4192 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
4193 env->cpuid_min_xlevel2 =
4194 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
4197 if (lmce_supported()) {
4198 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
4200 } else {
4201 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
4202 "vendor", &error_abort);
4203 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
4204 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
4205 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
4206 object_property_set_str(OBJECT(cpu),
4207 "QEMU TCG CPU version " QEMU_HW_VERSION,
4208 "model-id", &error_abort);
4211 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
4214 static const TypeInfo max_x86_cpu_type_info = {
4215 .name = X86_CPU_TYPE_NAME("max"),
4216 .parent = TYPE_X86_CPU,
4217 .instance_init = max_x86_cpu_initfn,
4218 .class_init = max_x86_cpu_class_init,
4221 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4222 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
4224 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4226 xcc->host_cpuid_required = true;
4227 xcc->ordering = 8;
4229 #if defined(CONFIG_KVM)
4230 xcc->model_description =
4231 "KVM processor with all supported host features ";
4232 #elif defined(CONFIG_HVF)
4233 xcc->model_description =
4234 "HVF processor with all supported host features ";
4235 #endif
4238 static const TypeInfo host_x86_cpu_type_info = {
4239 .name = X86_CPU_TYPE_NAME("host"),
4240 .parent = X86_CPU_TYPE_NAME("max"),
4241 .class_init = host_x86_cpu_class_init,
4244 #endif
4246 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
4248 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
4250 switch (f->type) {
4251 case CPUID_FEATURE_WORD:
4253 const char *reg = get_register_name_32(f->cpuid.reg);
4254 assert(reg);
4255 return g_strdup_printf("CPUID.%02XH:%s",
4256 f->cpuid.eax, reg);
4258 case MSR_FEATURE_WORD:
4259 return g_strdup_printf("MSR(%02XH)",
4260 f->msr.index);
4263 return NULL;
4266 static bool x86_cpu_have_filtered_features(X86CPU *cpu)
4268 FeatureWord w;
4270 for (w = 0; w < FEATURE_WORDS; w++) {
4271 if (cpu->filtered_features[w]) {
4272 return true;
4276 return false;
4279 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
4280 const char *verbose_prefix)
4282 CPUX86State *env = &cpu->env;
4283 FeatureWordInfo *f = &feature_word_info[w];
4284 int i;
4286 if (!cpu->force_features) {
4287 env->features[w] &= ~mask;
4289 cpu->filtered_features[w] |= mask;
4291 if (!verbose_prefix) {
4292 return;
4295 for (i = 0; i < 64; ++i) {
4296 if ((1ULL << i) & mask) {
4297 g_autofree char *feat_word_str = feature_word_description(f, i);
4298 warn_report("%s: %s%s%s [bit %d]",
4299 verbose_prefix,
4300 feat_word_str,
4301 f->feat_names[i] ? "." : "",
4302 f->feat_names[i] ? f->feat_names[i] : "", i);
4307 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
4308 const char *name, void *opaque,
4309 Error **errp)
4311 X86CPU *cpu = X86_CPU(obj);
4312 CPUX86State *env = &cpu->env;
4313 int64_t value;
4315 value = (env->cpuid_version >> 8) & 0xf;
4316 if (value == 0xf) {
4317 value += (env->cpuid_version >> 20) & 0xff;
4319 visit_type_int(v, name, &value, errp);
4322 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
4323 const char *name, void *opaque,
4324 Error **errp)
4326 X86CPU *cpu = X86_CPU(obj);
4327 CPUX86State *env = &cpu->env;
4328 const int64_t min = 0;
4329 const int64_t max = 0xff + 0xf;
4330 Error *local_err = NULL;
4331 int64_t value;
4333 visit_type_int(v, name, &value, &local_err);
4334 if (local_err) {
4335 error_propagate(errp, local_err);
4336 return;
4338 if (value < min || value > max) {
4339 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4340 name ? name : "null", value, min, max);
4341 return;
4344 env->cpuid_version &= ~0xff00f00;
4345 if (value > 0x0f) {
4346 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
4347 } else {
4348 env->cpuid_version |= value << 8;
4352 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
4353 const char *name, void *opaque,
4354 Error **errp)
4356 X86CPU *cpu = X86_CPU(obj);
4357 CPUX86State *env = &cpu->env;
4358 int64_t value;
4360 value = (env->cpuid_version >> 4) & 0xf;
4361 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
4362 visit_type_int(v, name, &value, errp);
4365 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
4366 const char *name, void *opaque,
4367 Error **errp)
4369 X86CPU *cpu = X86_CPU(obj);
4370 CPUX86State *env = &cpu->env;
4371 const int64_t min = 0;
4372 const int64_t max = 0xff;
4373 Error *local_err = NULL;
4374 int64_t value;
4376 visit_type_int(v, name, &value, &local_err);
4377 if (local_err) {
4378 error_propagate(errp, local_err);
4379 return;
4381 if (value < min || value > max) {
4382 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4383 name ? name : "null", value, min, max);
4384 return;
4387 env->cpuid_version &= ~0xf00f0;
4388 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
4391 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
4392 const char *name, void *opaque,
4393 Error **errp)
4395 X86CPU *cpu = X86_CPU(obj);
4396 CPUX86State *env = &cpu->env;
4397 int64_t value;
4399 value = env->cpuid_version & 0xf;
4400 visit_type_int(v, name, &value, errp);
4403 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
4404 const char *name, void *opaque,
4405 Error **errp)
4407 X86CPU *cpu = X86_CPU(obj);
4408 CPUX86State *env = &cpu->env;
4409 const int64_t min = 0;
4410 const int64_t max = 0xf;
4411 Error *local_err = NULL;
4412 int64_t value;
4414 visit_type_int(v, name, &value, &local_err);
4415 if (local_err) {
4416 error_propagate(errp, local_err);
4417 return;
4419 if (value < min || value > max) {
4420 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4421 name ? name : "null", value, min, max);
4422 return;
4425 env->cpuid_version &= ~0xf;
4426 env->cpuid_version |= value & 0xf;
4429 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
4431 X86CPU *cpu = X86_CPU(obj);
4432 CPUX86State *env = &cpu->env;
4433 char *value;
4435 value = g_malloc(CPUID_VENDOR_SZ + 1);
4436 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
4437 env->cpuid_vendor3);
4438 return value;
4441 static void x86_cpuid_set_vendor(Object *obj, const char *value,
4442 Error **errp)
4444 X86CPU *cpu = X86_CPU(obj);
4445 CPUX86State *env = &cpu->env;
4446 int i;
4448 if (strlen(value) != CPUID_VENDOR_SZ) {
4449 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
4450 return;
4453 env->cpuid_vendor1 = 0;
4454 env->cpuid_vendor2 = 0;
4455 env->cpuid_vendor3 = 0;
4456 for (i = 0; i < 4; i++) {
4457 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
4458 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
4459 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
4463 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
4465 X86CPU *cpu = X86_CPU(obj);
4466 CPUX86State *env = &cpu->env;
4467 char *value;
4468 int i;
4470 value = g_malloc(48 + 1);
4471 for (i = 0; i < 48; i++) {
4472 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
4474 value[48] = '\0';
4475 return value;
4478 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
4479 Error **errp)
4481 X86CPU *cpu = X86_CPU(obj);
4482 CPUX86State *env = &cpu->env;
4483 int c, len, i;
4485 if (model_id == NULL) {
4486 model_id = "";
4488 len = strlen(model_id);
4489 memset(env->cpuid_model, 0, 48);
4490 for (i = 0; i < 48; i++) {
4491 if (i >= len) {
4492 c = '\0';
4493 } else {
4494 c = (uint8_t)model_id[i];
4496 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
4500 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
4501 void *opaque, Error **errp)
4503 X86CPU *cpu = X86_CPU(obj);
4504 int64_t value;
4506 value = cpu->env.tsc_khz * 1000;
4507 visit_type_int(v, name, &value, errp);
4510 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
4511 void *opaque, Error **errp)
4513 X86CPU *cpu = X86_CPU(obj);
4514 const int64_t min = 0;
4515 const int64_t max = INT64_MAX;
4516 Error *local_err = NULL;
4517 int64_t value;
4519 visit_type_int(v, name, &value, &local_err);
4520 if (local_err) {
4521 error_propagate(errp, local_err);
4522 return;
4524 if (value < min || value > max) {
4525 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4526 name ? name : "null", value, min, max);
4527 return;
4530 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
4533 /* Generic getter for "feature-words" and "filtered-features" properties */
4534 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
4535 const char *name, void *opaque,
4536 Error **errp)
4538 uint64_t *array = (uint64_t *)opaque;
4539 FeatureWord w;
4540 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
4541 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
4542 X86CPUFeatureWordInfoList *list = NULL;
4544 for (w = 0; w < FEATURE_WORDS; w++) {
4545 FeatureWordInfo *wi = &feature_word_info[w];
4547 * We didn't have MSR features when "feature-words" was
4548 * introduced. Therefore skipped other type entries.
4550 if (wi->type != CPUID_FEATURE_WORD) {
4551 continue;
4553 X86CPUFeatureWordInfo *qwi = &word_infos[w];
4554 qwi->cpuid_input_eax = wi->cpuid.eax;
4555 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
4556 qwi->cpuid_input_ecx = wi->cpuid.ecx;
4557 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
4558 qwi->features = array[w];
4560 /* List will be in reverse order, but order shouldn't matter */
4561 list_entries[w].next = list;
4562 list_entries[w].value = &word_infos[w];
4563 list = &list_entries[w];
4566 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
4569 /* Convert all '_' in a feature string option name to '-', to make feature
4570 * name conform to QOM property naming rule, which uses '-' instead of '_'.
4572 static inline void feat2prop(char *s)
4574 while ((s = strchr(s, '_'))) {
4575 *s = '-';
4579 /* Return the feature property name for a feature flag bit */
4580 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
4582 const char *name;
4583 /* XSAVE components are automatically enabled by other features,
4584 * so return the original feature name instead
4586 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
4587 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
4589 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
4590 x86_ext_save_areas[comp].bits) {
4591 w = x86_ext_save_areas[comp].feature;
4592 bitnr = ctz32(x86_ext_save_areas[comp].bits);
4596 assert(bitnr < 64);
4597 assert(w < FEATURE_WORDS);
4598 name = feature_word_info[w].feat_names[bitnr];
4599 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD));
4600 return name;
4603 /* Compatibily hack to maintain legacy +-feat semantic,
4604 * where +-feat overwrites any feature set by
4605 * feat=on|feat even if the later is parsed after +-feat
4606 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
4608 static GList *plus_features, *minus_features;
4610 static gint compare_string(gconstpointer a, gconstpointer b)
4612 return g_strcmp0(a, b);
4615 /* Parse "+feature,-feature,feature=foo" CPU feature string
4617 static void x86_cpu_parse_featurestr(const char *typename, char *features,
4618 Error **errp)
4620 char *featurestr; /* Single 'key=value" string being parsed */
4621 static bool cpu_globals_initialized;
4622 bool ambiguous = false;
4624 if (cpu_globals_initialized) {
4625 return;
4627 cpu_globals_initialized = true;
4629 if (!features) {
4630 return;
4633 for (featurestr = strtok(features, ",");
4634 featurestr;
4635 featurestr = strtok(NULL, ",")) {
4636 const char *name;
4637 const char *val = NULL;
4638 char *eq = NULL;
4639 char num[32];
4640 GlobalProperty *prop;
4642 /* Compatibility syntax: */
4643 if (featurestr[0] == '+') {
4644 plus_features = g_list_append(plus_features,
4645 g_strdup(featurestr + 1));
4646 continue;
4647 } else if (featurestr[0] == '-') {
4648 minus_features = g_list_append(minus_features,
4649 g_strdup(featurestr + 1));
4650 continue;
4653 eq = strchr(featurestr, '=');
4654 if (eq) {
4655 *eq++ = 0;
4656 val = eq;
4657 } else {
4658 val = "on";
4661 feat2prop(featurestr);
4662 name = featurestr;
4664 if (g_list_find_custom(plus_features, name, compare_string)) {
4665 warn_report("Ambiguous CPU model string. "
4666 "Don't mix both \"+%s\" and \"%s=%s\"",
4667 name, name, val);
4668 ambiguous = true;
4670 if (g_list_find_custom(minus_features, name, compare_string)) {
4671 warn_report("Ambiguous CPU model string. "
4672 "Don't mix both \"-%s\" and \"%s=%s\"",
4673 name, name, val);
4674 ambiguous = true;
4677 /* Special case: */
4678 if (!strcmp(name, "tsc-freq")) {
4679 int ret;
4680 uint64_t tsc_freq;
4682 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
4683 if (ret < 0 || tsc_freq > INT64_MAX) {
4684 error_setg(errp, "bad numerical value %s", val);
4685 return;
4687 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
4688 val = num;
4689 name = "tsc-frequency";
4692 prop = g_new0(typeof(*prop), 1);
4693 prop->driver = typename;
4694 prop->property = g_strdup(name);
4695 prop->value = g_strdup(val);
4696 qdev_prop_register_global(prop);
4699 if (ambiguous) {
4700 warn_report("Compatibility of ambiguous CPU model "
4701 "strings won't be kept on future QEMU versions");
4705 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
4706 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
4708 /* Build a list with the name of all features on a feature word array */
4709 static void x86_cpu_list_feature_names(FeatureWordArray features,
4710 strList **feat_names)
4712 FeatureWord w;
4713 strList **next = feat_names;
4715 for (w = 0; w < FEATURE_WORDS; w++) {
4716 uint64_t filtered = features[w];
4717 int i;
4718 for (i = 0; i < 64; i++) {
4719 if (filtered & (1ULL << i)) {
4720 strList *new = g_new0(strList, 1);
4721 new->value = g_strdup(x86_cpu_feature_name(w, i));
4722 *next = new;
4723 next = &new->next;
4729 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
4730 const char *name, void *opaque,
4731 Error **errp)
4733 X86CPU *xc = X86_CPU(obj);
4734 strList *result = NULL;
4736 x86_cpu_list_feature_names(xc->filtered_features, &result);
4737 visit_type_strList(v, "unavailable-features", &result, errp);
4740 /* Check for missing features that may prevent the CPU class from
4741 * running using the current machine and accelerator.
4743 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
4744 strList **missing_feats)
4746 X86CPU *xc;
4747 Error *err = NULL;
4748 strList **next = missing_feats;
4750 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4751 strList *new = g_new0(strList, 1);
4752 new->value = g_strdup("kvm");
4753 *missing_feats = new;
4754 return;
4757 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
4759 x86_cpu_expand_features(xc, &err);
4760 if (err) {
4761 /* Errors at x86_cpu_expand_features should never happen,
4762 * but in case it does, just report the model as not
4763 * runnable at all using the "type" property.
4765 strList *new = g_new0(strList, 1);
4766 new->value = g_strdup("type");
4767 *next = new;
4768 next = &new->next;
4771 x86_cpu_filter_features(xc, false);
4773 x86_cpu_list_feature_names(xc->filtered_features, next);
4775 object_unref(OBJECT(xc));
4778 /* Print all cpuid feature names in featureset
4780 static void listflags(GList *features)
4782 size_t len = 0;
4783 GList *tmp;
4785 for (tmp = features; tmp; tmp = tmp->next) {
4786 const char *name = tmp->data;
4787 if ((len + strlen(name) + 1) >= 75) {
4788 qemu_printf("\n");
4789 len = 0;
4791 qemu_printf("%s%s", len == 0 ? " " : " ", name);
4792 len += strlen(name) + 1;
4794 qemu_printf("\n");
4797 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
4798 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
4800 ObjectClass *class_a = (ObjectClass *)a;
4801 ObjectClass *class_b = (ObjectClass *)b;
4802 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
4803 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
4804 int ret;
4806 if (cc_a->ordering != cc_b->ordering) {
4807 ret = cc_a->ordering - cc_b->ordering;
4808 } else {
4809 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a);
4810 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b);
4811 ret = strcmp(name_a, name_b);
4813 return ret;
4816 static GSList *get_sorted_cpu_model_list(void)
4818 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
4819 list = g_slist_sort(list, x86_cpu_list_compare);
4820 return list;
4823 static char *x86_cpu_class_get_model_id(X86CPUClass *xc)
4825 Object *obj = object_new_with_class(OBJECT_CLASS(xc));
4826 char *r = object_property_get_str(obj, "model-id", &error_abort);
4827 object_unref(obj);
4828 return r;
4831 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc)
4833 X86CPUVersion version;
4835 if (!cc->model || !cc->model->is_alias) {
4836 return NULL;
4838 version = x86_cpu_model_resolve_version(cc->model);
4839 if (version <= 0) {
4840 return NULL;
4842 return x86_cpu_versioned_model_name(cc->model->cpudef, version);
4845 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
4847 ObjectClass *oc = data;
4848 X86CPUClass *cc = X86_CPU_CLASS(oc);
4849 g_autofree char *name = x86_cpu_class_get_model_name(cc);
4850 g_autofree char *desc = g_strdup(cc->model_description);
4851 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc);
4853 if (!desc && alias_of) {
4854 if (cc->model && cc->model->version == CPU_VERSION_AUTO) {
4855 desc = g_strdup("(alias configured by machine type)");
4856 } else {
4857 desc = g_strdup_printf("(alias of %s)", alias_of);
4860 if (!desc) {
4861 desc = x86_cpu_class_get_model_id(cc);
4864 qemu_printf("x86 %-20s %-48s\n", name, desc);
4867 /* list available CPU models and flags */
4868 void x86_cpu_list(void)
4870 int i, j;
4871 GSList *list;
4872 GList *names = NULL;
4874 qemu_printf("Available CPUs:\n");
4875 list = get_sorted_cpu_model_list();
4876 g_slist_foreach(list, x86_cpu_list_entry, NULL);
4877 g_slist_free(list);
4879 names = NULL;
4880 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
4881 FeatureWordInfo *fw = &feature_word_info[i];
4882 for (j = 0; j < 64; j++) {
4883 if (fw->feat_names[j]) {
4884 names = g_list_append(names, (gpointer)fw->feat_names[j]);
4889 names = g_list_sort(names, (GCompareFunc)strcmp);
4891 qemu_printf("\nRecognized CPUID flags:\n");
4892 listflags(names);
4893 qemu_printf("\n");
4894 g_list_free(names);
4897 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
4899 ObjectClass *oc = data;
4900 X86CPUClass *cc = X86_CPU_CLASS(oc);
4901 CpuDefinitionInfoList **cpu_list = user_data;
4902 CpuDefinitionInfoList *entry;
4903 CpuDefinitionInfo *info;
4905 info = g_malloc0(sizeof(*info));
4906 info->name = x86_cpu_class_get_model_name(cc);
4907 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
4908 info->has_unavailable_features = true;
4909 info->q_typename = g_strdup(object_class_get_name(oc));
4910 info->migration_safe = cc->migration_safe;
4911 info->has_migration_safe = true;
4912 info->q_static = cc->static_model;
4914 * Old machine types won't report aliases, so that alias translation
4915 * doesn't break compatibility with previous QEMU versions.
4917 if (default_cpu_version != CPU_VERSION_LEGACY) {
4918 info->alias_of = x86_cpu_class_get_alias_of(cc);
4919 info->has_alias_of = !!info->alias_of;
4922 entry = g_malloc0(sizeof(*entry));
4923 entry->value = info;
4924 entry->next = *cpu_list;
4925 *cpu_list = entry;
4928 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
4930 CpuDefinitionInfoList *cpu_list = NULL;
4931 GSList *list = get_sorted_cpu_model_list();
4932 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
4933 g_slist_free(list);
4934 return cpu_list;
4937 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4938 bool migratable_only)
4940 FeatureWordInfo *wi = &feature_word_info[w];
4941 uint64_t r = 0;
4943 if (kvm_enabled()) {
4944 switch (wi->type) {
4945 case CPUID_FEATURE_WORD:
4946 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
4947 wi->cpuid.ecx,
4948 wi->cpuid.reg);
4949 break;
4950 case MSR_FEATURE_WORD:
4951 r = kvm_arch_get_supported_msr_feature(kvm_state,
4952 wi->msr.index);
4953 break;
4955 } else if (hvf_enabled()) {
4956 if (wi->type != CPUID_FEATURE_WORD) {
4957 return 0;
4959 r = hvf_get_supported_cpuid(wi->cpuid.eax,
4960 wi->cpuid.ecx,
4961 wi->cpuid.reg);
4962 } else if (tcg_enabled()) {
4963 r = wi->tcg_features;
4964 } else {
4965 return ~0;
4967 if (migratable_only) {
4968 r &= x86_cpu_get_migratable_flags(w);
4970 return r;
4973 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
4975 PropValue *pv;
4976 for (pv = props; pv->prop; pv++) {
4977 if (!pv->value) {
4978 continue;
4980 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
4981 &error_abort);
4985 /* Apply properties for the CPU model version specified in model */
4986 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
4988 const X86CPUVersionDefinition *vdef;
4989 X86CPUVersion version = x86_cpu_model_resolve_version(model);
4991 if (version == CPU_VERSION_LEGACY) {
4992 return;
4995 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) {
4996 PropValue *p;
4998 for (p = vdef->props; p && p->prop; p++) {
4999 object_property_parse(OBJECT(cpu), p->value, p->prop,
5000 &error_abort);
5003 if (vdef->version == version) {
5004 break;
5009 * If we reached the end of the list, version number was invalid
5011 assert(vdef->version == version);
5014 /* Load data from X86CPUDefinition into a X86CPU object
5016 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp)
5018 X86CPUDefinition *def = model->cpudef;
5019 CPUX86State *env = &cpu->env;
5020 const char *vendor;
5021 char host_vendor[CPUID_VENDOR_SZ + 1];
5022 FeatureWord w;
5024 /*NOTE: any property set by this function should be returned by
5025 * x86_cpu_static_props(), so static expansion of
5026 * query-cpu-model-expansion is always complete.
5029 /* CPU models only set _minimum_ values for level/xlevel: */
5030 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
5031 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
5033 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
5034 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
5035 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
5036 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
5037 for (w = 0; w < FEATURE_WORDS; w++) {
5038 env->features[w] = def->features[w];
5041 /* legacy-cache defaults to 'off' if CPU model provides cache info */
5042 cpu->legacy_cache = !def->cache_info;
5044 /* Special cases not set in the X86CPUDefinition structs: */
5045 /* TODO: in-kernel irqchip for hvf */
5046 if (kvm_enabled()) {
5047 if (!kvm_irqchip_in_kernel()) {
5048 x86_cpu_change_kvm_default("x2apic", "off");
5051 x86_cpu_apply_props(cpu, kvm_default_props);
5052 } else if (tcg_enabled()) {
5053 x86_cpu_apply_props(cpu, tcg_default_props);
5056 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
5058 /* sysenter isn't supported in compatibility mode on AMD,
5059 * syscall isn't supported in compatibility mode on Intel.
5060 * Normally we advertise the actual CPU vendor, but you can
5061 * override this using the 'vendor' property if you want to use
5062 * KVM's sysenter/syscall emulation in compatibility mode and
5063 * when doing cross vendor migration
5065 vendor = def->vendor;
5066 if (accel_uses_host_cpuid()) {
5067 uint32_t ebx = 0, ecx = 0, edx = 0;
5068 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
5069 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
5070 vendor = host_vendor;
5073 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
5075 x86_cpu_apply_version_props(cpu, model);
5078 #ifndef CONFIG_USER_ONLY
5079 /* Return a QDict containing keys for all properties that can be included
5080 * in static expansion of CPU models. All properties set by x86_cpu_load_model()
5081 * must be included in the dictionary.
5083 static QDict *x86_cpu_static_props(void)
5085 FeatureWord w;
5086 int i;
5087 static const char *props[] = {
5088 "min-level",
5089 "min-xlevel",
5090 "family",
5091 "model",
5092 "stepping",
5093 "model-id",
5094 "vendor",
5095 "lmce",
5096 NULL,
5098 static QDict *d;
5100 if (d) {
5101 return d;
5104 d = qdict_new();
5105 for (i = 0; props[i]; i++) {
5106 qdict_put_null(d, props[i]);
5109 for (w = 0; w < FEATURE_WORDS; w++) {
5110 FeatureWordInfo *fi = &feature_word_info[w];
5111 int bit;
5112 for (bit = 0; bit < 64; bit++) {
5113 if (!fi->feat_names[bit]) {
5114 continue;
5116 qdict_put_null(d, fi->feat_names[bit]);
5120 return d;
5123 /* Add an entry to @props dict, with the value for property. */
5124 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
5126 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
5127 &error_abort);
5129 qdict_put_obj(props, prop, value);
5132 /* Convert CPU model data from X86CPU object to a property dictionary
5133 * that can recreate exactly the same CPU model.
5135 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
5137 QDict *sprops = x86_cpu_static_props();
5138 const QDictEntry *e;
5140 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
5141 const char *prop = qdict_entry_key(e);
5142 x86_cpu_expand_prop(cpu, props, prop);
5146 /* Convert CPU model data from X86CPU object to a property dictionary
5147 * that can recreate exactly the same CPU model, including every
5148 * writeable QOM property.
5150 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
5152 ObjectPropertyIterator iter;
5153 ObjectProperty *prop;
5155 object_property_iter_init(&iter, OBJECT(cpu));
5156 while ((prop = object_property_iter_next(&iter))) {
5157 /* skip read-only or write-only properties */
5158 if (!prop->get || !prop->set) {
5159 continue;
5162 /* "hotplugged" is the only property that is configurable
5163 * on the command-line but will be set differently on CPUs
5164 * created using "-cpu ... -smp ..." and by CPUs created
5165 * on the fly by x86_cpu_from_model() for querying. Skip it.
5167 if (!strcmp(prop->name, "hotplugged")) {
5168 continue;
5170 x86_cpu_expand_prop(cpu, props, prop->name);
5174 static void object_apply_props(Object *obj, QDict *props, Error **errp)
5176 const QDictEntry *prop;
5177 Error *err = NULL;
5179 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
5180 object_property_set_qobject(obj, qdict_entry_value(prop),
5181 qdict_entry_key(prop), &err);
5182 if (err) {
5183 break;
5187 error_propagate(errp, err);
5190 /* Create X86CPU object according to model+props specification */
5191 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
5193 X86CPU *xc = NULL;
5194 X86CPUClass *xcc;
5195 Error *err = NULL;
5197 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
5198 if (xcc == NULL) {
5199 error_setg(&err, "CPU model '%s' not found", model);
5200 goto out;
5203 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
5204 if (props) {
5205 object_apply_props(OBJECT(xc), props, &err);
5206 if (err) {
5207 goto out;
5211 x86_cpu_expand_features(xc, &err);
5212 if (err) {
5213 goto out;
5216 out:
5217 if (err) {
5218 error_propagate(errp, err);
5219 object_unref(OBJECT(xc));
5220 xc = NULL;
5222 return xc;
5225 CpuModelExpansionInfo *
5226 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
5227 CpuModelInfo *model,
5228 Error **errp)
5230 X86CPU *xc = NULL;
5231 Error *err = NULL;
5232 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
5233 QDict *props = NULL;
5234 const char *base_name;
5236 xc = x86_cpu_from_model(model->name,
5237 model->has_props ?
5238 qobject_to(QDict, model->props) :
5239 NULL, &err);
5240 if (err) {
5241 goto out;
5244 props = qdict_new();
5245 ret->model = g_new0(CpuModelInfo, 1);
5246 ret->model->props = QOBJECT(props);
5247 ret->model->has_props = true;
5249 switch (type) {
5250 case CPU_MODEL_EXPANSION_TYPE_STATIC:
5251 /* Static expansion will be based on "base" only */
5252 base_name = "base";
5253 x86_cpu_to_dict(xc, props);
5254 break;
5255 case CPU_MODEL_EXPANSION_TYPE_FULL:
5256 /* As we don't return every single property, full expansion needs
5257 * to keep the original model name+props, and add extra
5258 * properties on top of that.
5260 base_name = model->name;
5261 x86_cpu_to_dict_full(xc, props);
5262 break;
5263 default:
5264 error_setg(&err, "Unsupported expansion type");
5265 goto out;
5268 x86_cpu_to_dict(xc, props);
5270 ret->model->name = g_strdup(base_name);
5272 out:
5273 object_unref(OBJECT(xc));
5274 if (err) {
5275 error_propagate(errp, err);
5276 qapi_free_CpuModelExpansionInfo(ret);
5277 ret = NULL;
5279 return ret;
5281 #endif /* !CONFIG_USER_ONLY */
5283 static gchar *x86_gdb_arch_name(CPUState *cs)
5285 #ifdef TARGET_X86_64
5286 return g_strdup("i386:x86-64");
5287 #else
5288 return g_strdup("i386");
5289 #endif
5292 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
5294 X86CPUModel *model = data;
5295 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5297 xcc->model = model;
5298 xcc->migration_safe = true;
5301 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model)
5303 g_autofree char *typename = x86_cpu_type_name(name);
5304 TypeInfo ti = {
5305 .name = typename,
5306 .parent = TYPE_X86_CPU,
5307 .class_init = x86_cpu_cpudef_class_init,
5308 .class_data = model,
5311 type_register(&ti);
5314 static void x86_register_cpudef_types(X86CPUDefinition *def)
5316 X86CPUModel *m;
5317 const X86CPUVersionDefinition *vdef;
5319 /* AMD aliases are handled at runtime based on CPUID vendor, so
5320 * they shouldn't be set on the CPU model table.
5322 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
5323 /* catch mistakes instead of silently truncating model_id when too long */
5324 assert(def->model_id && strlen(def->model_id) <= 48);
5326 /* Unversioned model: */
5327 m = g_new0(X86CPUModel, 1);
5328 m->cpudef = def;
5329 m->version = CPU_VERSION_AUTO;
5330 m->is_alias = true;
5331 x86_register_cpu_model_type(def->name, m);
5333 /* Versioned models: */
5335 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
5336 X86CPUModel *m = g_new0(X86CPUModel, 1);
5337 g_autofree char *name =
5338 x86_cpu_versioned_model_name(def, vdef->version);
5339 m->cpudef = def;
5340 m->version = vdef->version;
5341 x86_register_cpu_model_type(name, m);
5343 if (vdef->alias) {
5344 X86CPUModel *am = g_new0(X86CPUModel, 1);
5345 am->cpudef = def;
5346 am->version = vdef->version;
5347 am->is_alias = true;
5348 x86_register_cpu_model_type(vdef->alias, am);
5354 #if !defined(CONFIG_USER_ONLY)
5356 void cpu_clear_apic_feature(CPUX86State *env)
5358 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
5361 #endif /* !CONFIG_USER_ONLY */
5363 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
5364 uint32_t *eax, uint32_t *ebx,
5365 uint32_t *ecx, uint32_t *edx)
5367 X86CPU *cpu = env_archcpu(env);
5368 CPUState *cs = env_cpu(env);
5369 uint32_t die_offset;
5370 uint32_t limit;
5371 uint32_t signature[3];
5373 /* Calculate & apply limits for different index ranges */
5374 if (index >= 0xC0000000) {
5375 limit = env->cpuid_xlevel2;
5376 } else if (index >= 0x80000000) {
5377 limit = env->cpuid_xlevel;
5378 } else if (index >= 0x40000000) {
5379 limit = 0x40000001;
5380 } else {
5381 limit = env->cpuid_level;
5384 if (index > limit) {
5385 /* Intel documentation states that invalid EAX input will
5386 * return the same information as EAX=cpuid_level
5387 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
5389 index = env->cpuid_level;
5392 switch(index) {
5393 case 0:
5394 *eax = env->cpuid_level;
5395 *ebx = env->cpuid_vendor1;
5396 *edx = env->cpuid_vendor2;
5397 *ecx = env->cpuid_vendor3;
5398 break;
5399 case 1:
5400 *eax = env->cpuid_version;
5401 *ebx = (cpu->apic_id << 24) |
5402 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
5403 *ecx = env->features[FEAT_1_ECX];
5404 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
5405 *ecx |= CPUID_EXT_OSXSAVE;
5407 *edx = env->features[FEAT_1_EDX];
5408 if (cs->nr_cores * cs->nr_threads > 1) {
5409 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
5410 *edx |= CPUID_HT;
5412 break;
5413 case 2:
5414 /* cache info: needed for Pentium Pro compatibility */
5415 if (cpu->cache_info_passthrough) {
5416 host_cpuid(index, 0, eax, ebx, ecx, edx);
5417 break;
5419 *eax = 1; /* Number of CPUID[EAX=2] calls required */
5420 *ebx = 0;
5421 if (!cpu->enable_l3_cache) {
5422 *ecx = 0;
5423 } else {
5424 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
5426 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
5427 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
5428 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
5429 break;
5430 case 4:
5431 /* cache info: needed for Core compatibility */
5432 if (cpu->cache_info_passthrough) {
5433 host_cpuid(index, count, eax, ebx, ecx, edx);
5434 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
5435 *eax &= ~0xFC000000;
5436 if ((*eax & 31) && cs->nr_cores > 1) {
5437 *eax |= (cs->nr_cores - 1) << 26;
5439 } else {
5440 *eax = 0;
5441 switch (count) {
5442 case 0: /* L1 dcache info */
5443 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
5444 1, cs->nr_cores,
5445 eax, ebx, ecx, edx);
5446 break;
5447 case 1: /* L1 icache info */
5448 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
5449 1, cs->nr_cores,
5450 eax, ebx, ecx, edx);
5451 break;
5452 case 2: /* L2 cache info */
5453 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
5454 cs->nr_threads, cs->nr_cores,
5455 eax, ebx, ecx, edx);
5456 break;
5457 case 3: /* L3 cache info */
5458 die_offset = apicid_die_offset(env->nr_dies,
5459 cs->nr_cores, cs->nr_threads);
5460 if (cpu->enable_l3_cache) {
5461 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
5462 (1 << die_offset), cs->nr_cores,
5463 eax, ebx, ecx, edx);
5464 break;
5466 /* fall through */
5467 default: /* end of info */
5468 *eax = *ebx = *ecx = *edx = 0;
5469 break;
5472 break;
5473 case 5:
5474 /* MONITOR/MWAIT Leaf */
5475 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
5476 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
5477 *ecx = cpu->mwait.ecx; /* flags */
5478 *edx = cpu->mwait.edx; /* mwait substates */
5479 break;
5480 case 6:
5481 /* Thermal and Power Leaf */
5482 *eax = env->features[FEAT_6_EAX];
5483 *ebx = 0;
5484 *ecx = 0;
5485 *edx = 0;
5486 break;
5487 case 7:
5488 /* Structured Extended Feature Flags Enumeration Leaf */
5489 if (count == 0) {
5490 /* Maximum ECX value for sub-leaves */
5491 *eax = env->cpuid_level_func7;
5492 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
5493 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
5494 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
5495 *ecx |= CPUID_7_0_ECX_OSPKE;
5497 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
5498 } else if (count == 1) {
5499 *eax = env->features[FEAT_7_1_EAX];
5500 *ebx = 0;
5501 *ecx = 0;
5502 *edx = 0;
5503 } else {
5504 *eax = 0;
5505 *ebx = 0;
5506 *ecx = 0;
5507 *edx = 0;
5509 break;
5510 case 9:
5511 /* Direct Cache Access Information Leaf */
5512 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
5513 *ebx = 0;
5514 *ecx = 0;
5515 *edx = 0;
5516 break;
5517 case 0xA:
5518 /* Architectural Performance Monitoring Leaf */
5519 if (kvm_enabled() && cpu->enable_pmu) {
5520 KVMState *s = cs->kvm_state;
5522 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
5523 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
5524 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
5525 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
5526 } else if (hvf_enabled() && cpu->enable_pmu) {
5527 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
5528 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
5529 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
5530 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
5531 } else {
5532 *eax = 0;
5533 *ebx = 0;
5534 *ecx = 0;
5535 *edx = 0;
5537 break;
5538 case 0xB:
5539 /* Extended Topology Enumeration Leaf */
5540 if (!cpu->enable_cpuid_0xb) {
5541 *eax = *ebx = *ecx = *edx = 0;
5542 break;
5545 *ecx = count & 0xff;
5546 *edx = cpu->apic_id;
5548 switch (count) {
5549 case 0:
5550 *eax = apicid_core_offset(env->nr_dies,
5551 cs->nr_cores, cs->nr_threads);
5552 *ebx = cs->nr_threads;
5553 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5554 break;
5555 case 1:
5556 *eax = apicid_pkg_offset(env->nr_dies,
5557 cs->nr_cores, cs->nr_threads);
5558 *ebx = cs->nr_cores * cs->nr_threads;
5559 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5560 break;
5561 default:
5562 *eax = 0;
5563 *ebx = 0;
5564 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5567 assert(!(*eax & ~0x1f));
5568 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5569 break;
5570 case 0x1F:
5571 /* V2 Extended Topology Enumeration Leaf */
5572 if (env->nr_dies < 2) {
5573 *eax = *ebx = *ecx = *edx = 0;
5574 break;
5577 *ecx = count & 0xff;
5578 *edx = cpu->apic_id;
5579 switch (count) {
5580 case 0:
5581 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores,
5582 cs->nr_threads);
5583 *ebx = cs->nr_threads;
5584 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5585 break;
5586 case 1:
5587 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores,
5588 cs->nr_threads);
5589 *ebx = cs->nr_cores * cs->nr_threads;
5590 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5591 break;
5592 case 2:
5593 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores,
5594 cs->nr_threads);
5595 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
5596 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
5597 break;
5598 default:
5599 *eax = 0;
5600 *ebx = 0;
5601 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5603 assert(!(*eax & ~0x1f));
5604 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5605 break;
5606 case 0xD: {
5607 /* Processor Extended State */
5608 *eax = 0;
5609 *ebx = 0;
5610 *ecx = 0;
5611 *edx = 0;
5612 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
5613 break;
5616 if (count == 0) {
5617 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
5618 *eax = env->features[FEAT_XSAVE_COMP_LO];
5619 *edx = env->features[FEAT_XSAVE_COMP_HI];
5621 * The initial value of xcr0 and ebx == 0, On host without kvm
5622 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0
5623 * even through guest update xcr0, this will crash some legacy guest
5624 * (e.g., CentOS 6), So set ebx == ecx to workaroud it.
5626 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0);
5627 } else if (count == 1) {
5628 *eax = env->features[FEAT_XSAVE];
5629 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
5630 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
5631 const ExtSaveArea *esa = &x86_ext_save_areas[count];
5632 *eax = esa->size;
5633 *ebx = esa->offset;
5636 break;
5638 case 0x14: {
5639 /* Intel Processor Trace Enumeration */
5640 *eax = 0;
5641 *ebx = 0;
5642 *ecx = 0;
5643 *edx = 0;
5644 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
5645 !kvm_enabled()) {
5646 break;
5649 if (count == 0) {
5650 *eax = INTEL_PT_MAX_SUBLEAF;
5651 *ebx = INTEL_PT_MINIMAL_EBX;
5652 *ecx = INTEL_PT_MINIMAL_ECX;
5653 } else if (count == 1) {
5654 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
5655 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
5657 break;
5659 case 0x40000000:
5661 * CPUID code in kvm_arch_init_vcpu() ignores stuff
5662 * set here, but we restrict to TCG none the less.
5664 if (tcg_enabled() && cpu->expose_tcg) {
5665 memcpy(signature, "TCGTCGTCGTCG", 12);
5666 *eax = 0x40000001;
5667 *ebx = signature[0];
5668 *ecx = signature[1];
5669 *edx = signature[2];
5670 } else {
5671 *eax = 0;
5672 *ebx = 0;
5673 *ecx = 0;
5674 *edx = 0;
5676 break;
5677 case 0x40000001:
5678 *eax = 0;
5679 *ebx = 0;
5680 *ecx = 0;
5681 *edx = 0;
5682 break;
5683 case 0x80000000:
5684 *eax = env->cpuid_xlevel;
5685 *ebx = env->cpuid_vendor1;
5686 *edx = env->cpuid_vendor2;
5687 *ecx = env->cpuid_vendor3;
5688 break;
5689 case 0x80000001:
5690 *eax = env->cpuid_version;
5691 *ebx = 0;
5692 *ecx = env->features[FEAT_8000_0001_ECX];
5693 *edx = env->features[FEAT_8000_0001_EDX];
5695 /* The Linux kernel checks for the CMPLegacy bit and
5696 * discards multiple thread information if it is set.
5697 * So don't set it here for Intel to make Linux guests happy.
5699 if (cs->nr_cores * cs->nr_threads > 1) {
5700 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
5701 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
5702 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
5703 *ecx |= 1 << 1; /* CmpLegacy bit */
5706 break;
5707 case 0x80000002:
5708 case 0x80000003:
5709 case 0x80000004:
5710 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
5711 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
5712 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
5713 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
5714 break;
5715 case 0x80000005:
5716 /* cache info (L1 cache) */
5717 if (cpu->cache_info_passthrough) {
5718 host_cpuid(index, 0, eax, ebx, ecx, edx);
5719 break;
5721 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
5722 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
5723 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
5724 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
5725 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
5726 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
5727 break;
5728 case 0x80000006:
5729 /* cache info (L2 cache) */
5730 if (cpu->cache_info_passthrough) {
5731 host_cpuid(index, 0, eax, ebx, ecx, edx);
5732 break;
5734 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
5735 (L2_DTLB_2M_ENTRIES << 16) | \
5736 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
5737 (L2_ITLB_2M_ENTRIES);
5738 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
5739 (L2_DTLB_4K_ENTRIES << 16) | \
5740 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
5741 (L2_ITLB_4K_ENTRIES);
5742 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
5743 cpu->enable_l3_cache ?
5744 env->cache_info_amd.l3_cache : NULL,
5745 ecx, edx);
5746 break;
5747 case 0x80000007:
5748 *eax = 0;
5749 *ebx = 0;
5750 *ecx = 0;
5751 *edx = env->features[FEAT_8000_0007_EDX];
5752 break;
5753 case 0x80000008:
5754 /* virtual & phys address size in low 2 bytes. */
5755 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5756 /* 64 bit processor */
5757 *eax = cpu->phys_bits; /* configurable physical bits */
5758 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
5759 *eax |= 0x00003900; /* 57 bits virtual */
5760 } else {
5761 *eax |= 0x00003000; /* 48 bits virtual */
5763 } else {
5764 *eax = cpu->phys_bits;
5766 *ebx = env->features[FEAT_8000_0008_EBX];
5767 *ecx = 0;
5768 *edx = 0;
5769 if (cs->nr_cores * cs->nr_threads > 1) {
5770 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
5772 break;
5773 case 0x8000000A:
5774 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5775 *eax = 0x00000001; /* SVM Revision */
5776 *ebx = 0x00000010; /* nr of ASIDs */
5777 *ecx = 0;
5778 *edx = env->features[FEAT_SVM]; /* optional features */
5779 } else {
5780 *eax = 0;
5781 *ebx = 0;
5782 *ecx = 0;
5783 *edx = 0;
5785 break;
5786 case 0x8000001D:
5787 *eax = 0;
5788 if (cpu->cache_info_passthrough) {
5789 host_cpuid(index, count, eax, ebx, ecx, edx);
5790 break;
5792 switch (count) {
5793 case 0: /* L1 dcache info */
5794 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
5795 eax, ebx, ecx, edx);
5796 break;
5797 case 1: /* L1 icache info */
5798 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
5799 eax, ebx, ecx, edx);
5800 break;
5801 case 2: /* L2 cache info */
5802 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
5803 eax, ebx, ecx, edx);
5804 break;
5805 case 3: /* L3 cache info */
5806 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
5807 eax, ebx, ecx, edx);
5808 break;
5809 default: /* end of info */
5810 *eax = *ebx = *ecx = *edx = 0;
5811 break;
5813 break;
5814 case 0x8000001E:
5815 assert(cpu->core_id <= 255);
5816 encode_topo_cpuid8000001e(cs, cpu,
5817 eax, ebx, ecx, edx);
5818 break;
5819 case 0xC0000000:
5820 *eax = env->cpuid_xlevel2;
5821 *ebx = 0;
5822 *ecx = 0;
5823 *edx = 0;
5824 break;
5825 case 0xC0000001:
5826 /* Support for VIA CPU's CPUID instruction */
5827 *eax = env->cpuid_version;
5828 *ebx = 0;
5829 *ecx = 0;
5830 *edx = env->features[FEAT_C000_0001_EDX];
5831 break;
5832 case 0xC0000002:
5833 case 0xC0000003:
5834 case 0xC0000004:
5835 /* Reserved for the future, and now filled with zero */
5836 *eax = 0;
5837 *ebx = 0;
5838 *ecx = 0;
5839 *edx = 0;
5840 break;
5841 case 0x8000001F:
5842 *eax = sev_enabled() ? 0x2 : 0;
5843 *ebx = sev_get_cbit_position();
5844 *ebx |= sev_get_reduced_phys_bits() << 6;
5845 *ecx = 0;
5846 *edx = 0;
5847 break;
5848 default:
5849 /* reserved values: zero */
5850 *eax = 0;
5851 *ebx = 0;
5852 *ecx = 0;
5853 *edx = 0;
5854 break;
5858 /* CPUClass::reset() */
5859 static void x86_cpu_reset(CPUState *s)
5861 X86CPU *cpu = X86_CPU(s);
5862 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
5863 CPUX86State *env = &cpu->env;
5864 target_ulong cr4;
5865 uint64_t xcr0;
5866 int i;
5868 xcc->parent_reset(s);
5870 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
5872 env->old_exception = -1;
5874 /* init to reset state */
5876 env->hflags2 |= HF2_GIF_MASK;
5878 cpu_x86_update_cr0(env, 0x60000010);
5879 env->a20_mask = ~0x0;
5880 env->smbase = 0x30000;
5881 env->msr_smi_count = 0;
5883 env->idt.limit = 0xffff;
5884 env->gdt.limit = 0xffff;
5885 env->ldt.limit = 0xffff;
5886 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
5887 env->tr.limit = 0xffff;
5888 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
5890 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
5891 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
5892 DESC_R_MASK | DESC_A_MASK);
5893 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
5894 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5895 DESC_A_MASK);
5896 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
5897 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5898 DESC_A_MASK);
5899 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
5900 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5901 DESC_A_MASK);
5902 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
5903 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5904 DESC_A_MASK);
5905 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
5906 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5907 DESC_A_MASK);
5909 env->eip = 0xfff0;
5910 env->regs[R_EDX] = env->cpuid_version;
5912 env->eflags = 0x2;
5914 /* FPU init */
5915 for (i = 0; i < 8; i++) {
5916 env->fptags[i] = 1;
5918 cpu_set_fpuc(env, 0x37f);
5920 env->mxcsr = 0x1f80;
5921 /* All units are in INIT state. */
5922 env->xstate_bv = 0;
5924 env->pat = 0x0007040600070406ULL;
5925 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
5926 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
5927 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
5930 memset(env->dr, 0, sizeof(env->dr));
5931 env->dr[6] = DR6_FIXED_1;
5932 env->dr[7] = DR7_FIXED_1;
5933 cpu_breakpoint_remove_all(s, BP_CPU);
5934 cpu_watchpoint_remove_all(s, BP_CPU);
5936 cr4 = 0;
5937 xcr0 = XSTATE_FP_MASK;
5939 #ifdef CONFIG_USER_ONLY
5940 /* Enable all the features for user-mode. */
5941 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
5942 xcr0 |= XSTATE_SSE_MASK;
5944 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
5945 const ExtSaveArea *esa = &x86_ext_save_areas[i];
5946 if (env->features[esa->feature] & esa->bits) {
5947 xcr0 |= 1ull << i;
5951 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
5952 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
5954 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
5955 cr4 |= CR4_FSGSBASE_MASK;
5957 #endif
5959 env->xcr0 = xcr0;
5960 cpu_x86_update_cr4(env, cr4);
5963 * SDM 11.11.5 requires:
5964 * - IA32_MTRR_DEF_TYPE MSR.E = 0
5965 * - IA32_MTRR_PHYSMASKn.V = 0
5966 * All other bits are undefined. For simplification, zero it all.
5968 env->mtrr_deftype = 0;
5969 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
5970 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
5972 env->interrupt_injected = -1;
5973 env->exception_nr = -1;
5974 env->exception_pending = 0;
5975 env->exception_injected = 0;
5976 env->exception_has_payload = false;
5977 env->exception_payload = 0;
5978 env->nmi_injected = false;
5979 #if !defined(CONFIG_USER_ONLY)
5980 /* We hard-wire the BSP to the first CPU. */
5981 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
5983 s->halted = !cpu_is_bsp(cpu);
5985 if (kvm_enabled()) {
5986 kvm_arch_reset_vcpu(cpu);
5988 else if (hvf_enabled()) {
5989 hvf_reset_vcpu(s);
5991 #endif
5994 #ifndef CONFIG_USER_ONLY
5995 bool cpu_is_bsp(X86CPU *cpu)
5997 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
6000 /* TODO: remove me, when reset over QOM tree is implemented */
6001 static void x86_cpu_machine_reset_cb(void *opaque)
6003 X86CPU *cpu = opaque;
6004 cpu_reset(CPU(cpu));
6006 #endif
6008 static void mce_init(X86CPU *cpu)
6010 CPUX86State *cenv = &cpu->env;
6011 unsigned int bank;
6013 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
6014 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
6015 (CPUID_MCE | CPUID_MCA)) {
6016 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
6017 (cpu->enable_lmce ? MCG_LMCE_P : 0);
6018 cenv->mcg_ctl = ~(uint64_t)0;
6019 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
6020 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
6025 #ifndef CONFIG_USER_ONLY
6026 APICCommonClass *apic_get_class(void)
6028 const char *apic_type = "apic";
6030 /* TODO: in-kernel irqchip for hvf */
6031 if (kvm_apic_in_kernel()) {
6032 apic_type = "kvm-apic";
6033 } else if (xen_enabled()) {
6034 apic_type = "xen-apic";
6037 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
6040 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
6042 APICCommonState *apic;
6043 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
6045 cpu->apic_state = DEVICE(object_new_with_class(apic_class));
6047 object_property_add_child(OBJECT(cpu), "lapic",
6048 OBJECT(cpu->apic_state), &error_abort);
6049 object_unref(OBJECT(cpu->apic_state));
6051 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
6052 /* TODO: convert to link<> */
6053 apic = APIC_COMMON(cpu->apic_state);
6054 apic->cpu = cpu;
6055 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
6058 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6060 APICCommonState *apic;
6061 static bool apic_mmio_map_once;
6063 if (cpu->apic_state == NULL) {
6064 return;
6066 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
6067 errp);
6069 /* Map APIC MMIO area */
6070 apic = APIC_COMMON(cpu->apic_state);
6071 if (!apic_mmio_map_once) {
6072 memory_region_add_subregion_overlap(get_system_memory(),
6073 apic->apicbase &
6074 MSR_IA32_APICBASE_BASE,
6075 &apic->io_memory,
6076 0x1000);
6077 apic_mmio_map_once = true;
6081 static void x86_cpu_machine_done(Notifier *n, void *unused)
6083 X86CPU *cpu = container_of(n, X86CPU, machine_done);
6084 MemoryRegion *smram =
6085 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
6087 if (smram) {
6088 cpu->smram = g_new(MemoryRegion, 1);
6089 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
6090 smram, 0, 1ull << 32);
6091 memory_region_set_enabled(cpu->smram, true);
6092 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
6095 #else
6096 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6099 #endif
6101 /* Note: Only safe for use on x86(-64) hosts */
6102 static uint32_t x86_host_phys_bits(void)
6104 uint32_t eax;
6105 uint32_t host_phys_bits;
6107 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
6108 if (eax >= 0x80000008) {
6109 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
6110 /* Note: According to AMD doc 25481 rev 2.34 they have a field
6111 * at 23:16 that can specify a maximum physical address bits for
6112 * the guest that can override this value; but I've not seen
6113 * anything with that set.
6115 host_phys_bits = eax & 0xff;
6116 } else {
6117 /* It's an odd 64 bit machine that doesn't have the leaf for
6118 * physical address bits; fall back to 36 that's most older
6119 * Intel.
6121 host_phys_bits = 36;
6124 return host_phys_bits;
6127 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
6129 if (*min < value) {
6130 *min = value;
6134 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
6135 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
6137 CPUX86State *env = &cpu->env;
6138 FeatureWordInfo *fi = &feature_word_info[w];
6139 uint32_t eax = fi->cpuid.eax;
6140 uint32_t region = eax & 0xF0000000;
6142 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
6143 if (!env->features[w]) {
6144 return;
6147 switch (region) {
6148 case 0x00000000:
6149 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
6150 break;
6151 case 0x80000000:
6152 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
6153 break;
6154 case 0xC0000000:
6155 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
6156 break;
6159 if (eax == 7) {
6160 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7,
6161 fi->cpuid.ecx);
6165 /* Calculate XSAVE components based on the configured CPU feature flags */
6166 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
6168 CPUX86State *env = &cpu->env;
6169 int i;
6170 uint64_t mask;
6172 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
6173 return;
6176 mask = 0;
6177 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
6178 const ExtSaveArea *esa = &x86_ext_save_areas[i];
6179 if (env->features[esa->feature] & esa->bits) {
6180 mask |= (1ULL << i);
6184 env->features[FEAT_XSAVE_COMP_LO] = mask;
6185 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
6188 /***** Steps involved on loading and filtering CPUID data
6190 * When initializing and realizing a CPU object, the steps
6191 * involved in setting up CPUID data are:
6193 * 1) Loading CPU model definition (X86CPUDefinition). This is
6194 * implemented by x86_cpu_load_model() and should be completely
6195 * transparent, as it is done automatically by instance_init.
6196 * No code should need to look at X86CPUDefinition structs
6197 * outside instance_init.
6199 * 2) CPU expansion. This is done by realize before CPUID
6200 * filtering, and will make sure host/accelerator data is
6201 * loaded for CPU models that depend on host capabilities
6202 * (e.g. "host"). Done by x86_cpu_expand_features().
6204 * 3) CPUID filtering. This initializes extra data related to
6205 * CPUID, and checks if the host supports all capabilities
6206 * required by the CPU. Runnability of a CPU model is
6207 * determined at this step. Done by x86_cpu_filter_features().
6209 * Some operations don't require all steps to be performed.
6210 * More precisely:
6212 * - CPU instance creation (instance_init) will run only CPU
6213 * model loading. CPU expansion can't run at instance_init-time
6214 * because host/accelerator data may be not available yet.
6215 * - CPU realization will perform both CPU model expansion and CPUID
6216 * filtering, and return an error in case one of them fails.
6217 * - query-cpu-definitions needs to run all 3 steps. It needs
6218 * to run CPUID filtering, as the 'unavailable-features'
6219 * field is set based on the filtering results.
6220 * - The query-cpu-model-expansion QMP command only needs to run
6221 * CPU model loading and CPU expansion. It should not filter
6222 * any CPUID data based on host capabilities.
6225 /* Expand CPU configuration data, based on configured features
6226 * and host/accelerator capabilities when appropriate.
6228 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
6230 CPUX86State *env = &cpu->env;
6231 FeatureWord w;
6232 int i;
6233 GList *l;
6234 Error *local_err = NULL;
6236 for (l = plus_features; l; l = l->next) {
6237 const char *prop = l->data;
6238 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
6239 if (local_err) {
6240 goto out;
6244 for (l = minus_features; l; l = l->next) {
6245 const char *prop = l->data;
6246 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
6247 if (local_err) {
6248 goto out;
6252 /*TODO: Now cpu->max_features doesn't overwrite features
6253 * set using QOM properties, and we can convert
6254 * plus_features & minus_features to global properties
6255 * inside x86_cpu_parse_featurestr() too.
6257 if (cpu->max_features) {
6258 for (w = 0; w < FEATURE_WORDS; w++) {
6259 /* Override only features that weren't set explicitly
6260 * by the user.
6262 env->features[w] |=
6263 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
6264 ~env->user_features[w] & \
6265 ~feature_word_info[w].no_autoenable_flags;
6269 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
6270 FeatureDep *d = &feature_dependencies[i];
6271 if (!(env->features[d->from.index] & d->from.mask)) {
6272 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask;
6274 /* Not an error unless the dependent feature was added explicitly. */
6275 mark_unavailable_features(cpu, d->to.index,
6276 unavailable_features & env->user_features[d->to.index],
6277 "This feature depends on other features that were not requested");
6279 env->user_features[d->to.index] |= unavailable_features;
6280 env->features[d->to.index] &= ~unavailable_features;
6284 if (!kvm_enabled() || !cpu->expose_kvm) {
6285 env->features[FEAT_KVM] = 0;
6288 x86_cpu_enable_xsave_components(cpu);
6290 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
6291 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
6292 if (cpu->full_cpuid_auto_level) {
6293 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
6294 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
6295 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
6296 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
6297 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
6298 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
6299 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
6300 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
6301 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
6302 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
6303 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
6304 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
6306 /* Intel Processor Trace requires CPUID[0x14] */
6307 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
6308 kvm_enabled() && cpu->intel_pt_auto_level) {
6309 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
6312 /* CPU topology with multi-dies support requires CPUID[0x1F] */
6313 if (env->nr_dies > 1) {
6314 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
6317 /* SVM requires CPUID[0x8000000A] */
6318 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
6319 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
6322 /* SEV requires CPUID[0x8000001F] */
6323 if (sev_enabled()) {
6324 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
6328 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
6329 if (env->cpuid_level_func7 == UINT32_MAX) {
6330 env->cpuid_level_func7 = env->cpuid_min_level_func7;
6332 if (env->cpuid_level == UINT32_MAX) {
6333 env->cpuid_level = env->cpuid_min_level;
6335 if (env->cpuid_xlevel == UINT32_MAX) {
6336 env->cpuid_xlevel = env->cpuid_min_xlevel;
6338 if (env->cpuid_xlevel2 == UINT32_MAX) {
6339 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
6342 out:
6343 if (local_err != NULL) {
6344 error_propagate(errp, local_err);
6349 * Finishes initialization of CPUID data, filters CPU feature
6350 * words based on host availability of each feature.
6352 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
6354 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
6356 CPUX86State *env = &cpu->env;
6357 FeatureWord w;
6358 const char *prefix = NULL;
6360 if (verbose) {
6361 prefix = accel_uses_host_cpuid()
6362 ? "host doesn't support requested feature"
6363 : "TCG doesn't support requested feature";
6366 for (w = 0; w < FEATURE_WORDS; w++) {
6367 uint64_t host_feat =
6368 x86_cpu_get_supported_feature_word(w, false);
6369 uint64_t requested_features = env->features[w];
6370 uint64_t unavailable_features = requested_features & ~host_feat;
6371 mark_unavailable_features(cpu, w, unavailable_features, prefix);
6374 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
6375 kvm_enabled()) {
6376 KVMState *s = CPU(cpu)->kvm_state;
6377 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
6378 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
6379 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
6380 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
6381 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
6383 if (!eax_0 ||
6384 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
6385 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
6386 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
6387 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
6388 INTEL_PT_ADDR_RANGES_NUM) ||
6389 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
6390 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
6391 (ecx_0 & INTEL_PT_IP_LIP)) {
6393 * Processor Trace capabilities aren't configurable, so if the
6394 * host can't emulate the capabilities we report on
6395 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
6397 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix);
6402 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
6404 CPUState *cs = CPU(dev);
6405 X86CPU *cpu = X86_CPU(dev);
6406 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6407 CPUX86State *env = &cpu->env;
6408 Error *local_err = NULL;
6409 static bool ht_warned;
6411 if (xcc->host_cpuid_required) {
6412 if (!accel_uses_host_cpuid()) {
6413 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6414 error_setg(&local_err, "CPU model '%s' requires KVM", name);
6415 goto out;
6418 if (enable_cpu_pm) {
6419 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
6420 &cpu->mwait.ecx, &cpu->mwait.edx);
6421 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
6423 if (kvm_enabled() && cpu->ucode_rev == 0) {
6424 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state,
6425 MSR_IA32_UCODE_REV);
6429 if (cpu->ucode_rev == 0) {
6430 /* The default is the same as KVM's. */
6431 if (IS_AMD_CPU(env)) {
6432 cpu->ucode_rev = 0x01000065;
6433 } else {
6434 cpu->ucode_rev = 0x100000000ULL;
6438 /* mwait extended info: needed for Core compatibility */
6439 /* We always wake on interrupt even if host does not have the capability */
6440 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
6442 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
6443 error_setg(errp, "apic-id property was not initialized properly");
6444 return;
6447 x86_cpu_expand_features(cpu, &local_err);
6448 if (local_err) {
6449 goto out;
6452 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
6454 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
6455 error_setg(&local_err,
6456 accel_uses_host_cpuid() ?
6457 "Host doesn't support requested features" :
6458 "TCG doesn't support requested features");
6459 goto out;
6462 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
6463 * CPUID[1].EDX.
6465 if (IS_AMD_CPU(env)) {
6466 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
6467 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
6468 & CPUID_EXT2_AMD_ALIASES);
6471 /* For 64bit systems think about the number of physical bits to present.
6472 * ideally this should be the same as the host; anything other than matching
6473 * the host can cause incorrect guest behaviour.
6474 * QEMU used to pick the magic value of 40 bits that corresponds to
6475 * consumer AMD devices but nothing else.
6477 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
6478 if (accel_uses_host_cpuid()) {
6479 uint32_t host_phys_bits = x86_host_phys_bits();
6480 static bool warned;
6482 /* Print a warning if the user set it to a value that's not the
6483 * host value.
6485 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
6486 !warned) {
6487 warn_report("Host physical bits (%u)"
6488 " does not match phys-bits property (%u)",
6489 host_phys_bits, cpu->phys_bits);
6490 warned = true;
6493 if (cpu->host_phys_bits) {
6494 /* The user asked for us to use the host physical bits */
6495 cpu->phys_bits = host_phys_bits;
6496 if (cpu->host_phys_bits_limit &&
6497 cpu->phys_bits > cpu->host_phys_bits_limit) {
6498 cpu->phys_bits = cpu->host_phys_bits_limit;
6502 if (cpu->phys_bits &&
6503 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
6504 cpu->phys_bits < 32)) {
6505 error_setg(errp, "phys-bits should be between 32 and %u "
6506 " (but is %u)",
6507 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
6508 return;
6510 } else {
6511 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
6512 error_setg(errp, "TCG only supports phys-bits=%u",
6513 TCG_PHYS_ADDR_BITS);
6514 return;
6517 /* 0 means it was not explicitly set by the user (or by machine
6518 * compat_props or by the host code above). In this case, the default
6519 * is the value used by TCG (40).
6521 if (cpu->phys_bits == 0) {
6522 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
6524 } else {
6525 /* For 32 bit systems don't use the user set value, but keep
6526 * phys_bits consistent with what we tell the guest.
6528 if (cpu->phys_bits != 0) {
6529 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
6530 return;
6533 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
6534 cpu->phys_bits = 36;
6535 } else {
6536 cpu->phys_bits = 32;
6540 /* Cache information initialization */
6541 if (!cpu->legacy_cache) {
6542 if (!xcc->model || !xcc->model->cpudef->cache_info) {
6543 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6544 error_setg(errp,
6545 "CPU model '%s' doesn't support legacy-cache=off", name);
6546 return;
6548 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
6549 *xcc->model->cpudef->cache_info;
6550 } else {
6551 /* Build legacy cache information */
6552 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
6553 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
6554 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
6555 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
6557 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
6558 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
6559 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
6560 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
6562 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
6563 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
6564 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
6565 env->cache_info_amd.l3_cache = &legacy_l3_cache;
6569 cpu_exec_realizefn(cs, &local_err);
6570 if (local_err != NULL) {
6571 error_propagate(errp, local_err);
6572 return;
6575 #ifndef CONFIG_USER_ONLY
6576 MachineState *ms = MACHINE(qdev_get_machine());
6577 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
6579 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
6580 x86_cpu_apic_create(cpu, &local_err);
6581 if (local_err != NULL) {
6582 goto out;
6585 #endif
6587 mce_init(cpu);
6589 #ifndef CONFIG_USER_ONLY
6590 if (tcg_enabled()) {
6591 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
6592 cpu->cpu_as_root = g_new(MemoryRegion, 1);
6594 /* Outer container... */
6595 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
6596 memory_region_set_enabled(cpu->cpu_as_root, true);
6598 /* ... with two regions inside: normal system memory with low
6599 * priority, and...
6601 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
6602 get_system_memory(), 0, ~0ull);
6603 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
6604 memory_region_set_enabled(cpu->cpu_as_mem, true);
6606 cs->num_ases = 2;
6607 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
6608 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
6610 /* ... SMRAM with higher priority, linked from /machine/smram. */
6611 cpu->machine_done.notify = x86_cpu_machine_done;
6612 qemu_add_machine_init_done_notifier(&cpu->machine_done);
6614 #endif
6616 qemu_init_vcpu(cs);
6619 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
6620 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
6621 * based on inputs (sockets,cores,threads), it is still better to give
6622 * users a warning.
6624 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
6625 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
6627 if (IS_AMD_CPU(env) &&
6628 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
6629 cs->nr_threads > 1 && !ht_warned) {
6630 warn_report("This family of AMD CPU doesn't support "
6631 "hyperthreading(%d)",
6632 cs->nr_threads);
6633 error_printf("Please configure -smp options properly"
6634 " or try enabling topoext feature.\n");
6635 ht_warned = true;
6638 x86_cpu_apic_realize(cpu, &local_err);
6639 if (local_err != NULL) {
6640 goto out;
6642 cpu_reset(cs);
6644 xcc->parent_realize(dev, &local_err);
6646 out:
6647 if (local_err != NULL) {
6648 error_propagate(errp, local_err);
6649 return;
6653 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
6655 X86CPU *cpu = X86_CPU(dev);
6656 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6657 Error *local_err = NULL;
6659 #ifndef CONFIG_USER_ONLY
6660 cpu_remove_sync(CPU(dev));
6661 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
6662 #endif
6664 if (cpu->apic_state) {
6665 object_unparent(OBJECT(cpu->apic_state));
6666 cpu->apic_state = NULL;
6669 xcc->parent_unrealize(dev, &local_err);
6670 if (local_err != NULL) {
6671 error_propagate(errp, local_err);
6672 return;
6676 typedef struct BitProperty {
6677 FeatureWord w;
6678 uint64_t mask;
6679 } BitProperty;
6681 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
6682 void *opaque, Error **errp)
6684 X86CPU *cpu = X86_CPU(obj);
6685 BitProperty *fp = opaque;
6686 uint64_t f = cpu->env.features[fp->w];
6687 bool value = (f & fp->mask) == fp->mask;
6688 visit_type_bool(v, name, &value, errp);
6691 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
6692 void *opaque, Error **errp)
6694 DeviceState *dev = DEVICE(obj);
6695 X86CPU *cpu = X86_CPU(obj);
6696 BitProperty *fp = opaque;
6697 Error *local_err = NULL;
6698 bool value;
6700 if (dev->realized) {
6701 qdev_prop_set_after_realize(dev, name, errp);
6702 return;
6705 visit_type_bool(v, name, &value, &local_err);
6706 if (local_err) {
6707 error_propagate(errp, local_err);
6708 return;
6711 if (value) {
6712 cpu->env.features[fp->w] |= fp->mask;
6713 } else {
6714 cpu->env.features[fp->w] &= ~fp->mask;
6716 cpu->env.user_features[fp->w] |= fp->mask;
6719 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
6720 void *opaque)
6722 BitProperty *prop = opaque;
6723 g_free(prop);
6726 /* Register a boolean property to get/set a single bit in a uint32_t field.
6728 * The same property name can be registered multiple times to make it affect
6729 * multiple bits in the same FeatureWord. In that case, the getter will return
6730 * true only if all bits are set.
6732 static void x86_cpu_register_bit_prop(X86CPU *cpu,
6733 const char *prop_name,
6734 FeatureWord w,
6735 int bitnr)
6737 BitProperty *fp;
6738 ObjectProperty *op;
6739 uint64_t mask = (1ULL << bitnr);
6741 op = object_property_find(OBJECT(cpu), prop_name, NULL);
6742 if (op) {
6743 fp = op->opaque;
6744 assert(fp->w == w);
6745 fp->mask |= mask;
6746 } else {
6747 fp = g_new0(BitProperty, 1);
6748 fp->w = w;
6749 fp->mask = mask;
6750 object_property_add(OBJECT(cpu), prop_name, "bool",
6751 x86_cpu_get_bit_prop,
6752 x86_cpu_set_bit_prop,
6753 x86_cpu_release_bit_prop, fp, &error_abort);
6757 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
6758 FeatureWord w,
6759 int bitnr)
6761 FeatureWordInfo *fi = &feature_word_info[w];
6762 const char *name = fi->feat_names[bitnr];
6764 if (!name) {
6765 return;
6768 /* Property names should use "-" instead of "_".
6769 * Old names containing underscores are registered as aliases
6770 * using object_property_add_alias()
6772 assert(!strchr(name, '_'));
6773 /* aliases don't use "|" delimiters anymore, they are registered
6774 * manually using object_property_add_alias() */
6775 assert(!strchr(name, '|'));
6776 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
6779 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
6781 X86CPU *cpu = X86_CPU(cs);
6782 CPUX86State *env = &cpu->env;
6783 GuestPanicInformation *panic_info = NULL;
6785 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
6786 panic_info = g_malloc0(sizeof(GuestPanicInformation));
6788 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
6790 assert(HV_CRASH_PARAMS >= 5);
6791 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
6792 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
6793 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
6794 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
6795 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
6798 return panic_info;
6800 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
6801 const char *name, void *opaque,
6802 Error **errp)
6804 CPUState *cs = CPU(obj);
6805 GuestPanicInformation *panic_info;
6807 if (!cs->crash_occurred) {
6808 error_setg(errp, "No crash occured");
6809 return;
6812 panic_info = x86_cpu_get_crash_info(cs);
6813 if (panic_info == NULL) {
6814 error_setg(errp, "No crash information");
6815 return;
6818 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
6819 errp);
6820 qapi_free_GuestPanicInformation(panic_info);
6823 static void x86_cpu_initfn(Object *obj)
6825 X86CPU *cpu = X86_CPU(obj);
6826 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
6827 CPUX86State *env = &cpu->env;
6828 FeatureWord w;
6830 env->nr_dies = 1;
6831 cpu_set_cpustate_pointers(cpu);
6833 object_property_add(obj, "family", "int",
6834 x86_cpuid_version_get_family,
6835 x86_cpuid_version_set_family, NULL, NULL, NULL);
6836 object_property_add(obj, "model", "int",
6837 x86_cpuid_version_get_model,
6838 x86_cpuid_version_set_model, NULL, NULL, NULL);
6839 object_property_add(obj, "stepping", "int",
6840 x86_cpuid_version_get_stepping,
6841 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
6842 object_property_add_str(obj, "vendor",
6843 x86_cpuid_get_vendor,
6844 x86_cpuid_set_vendor, NULL);
6845 object_property_add_str(obj, "model-id",
6846 x86_cpuid_get_model_id,
6847 x86_cpuid_set_model_id, NULL);
6848 object_property_add(obj, "tsc-frequency", "int",
6849 x86_cpuid_get_tsc_freq,
6850 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
6851 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
6852 x86_cpu_get_feature_words,
6853 NULL, NULL, (void *)env->features, NULL);
6854 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
6855 x86_cpu_get_feature_words,
6856 NULL, NULL, (void *)cpu->filtered_features, NULL);
6858 * The "unavailable-features" property has the same semantics as
6859 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
6860 * QMP command: they list the features that would have prevented the
6861 * CPU from running if the "enforce" flag was set.
6863 object_property_add(obj, "unavailable-features", "strList",
6864 x86_cpu_get_unavailable_features,
6865 NULL, NULL, NULL, &error_abort);
6867 object_property_add(obj, "crash-information", "GuestPanicInformation",
6868 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
6870 for (w = 0; w < FEATURE_WORDS; w++) {
6871 int bitnr;
6873 for (bitnr = 0; bitnr < 64; bitnr++) {
6874 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
6878 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
6879 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
6880 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
6881 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
6882 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
6883 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
6884 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
6886 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
6887 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
6888 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
6889 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
6890 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
6891 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
6892 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
6893 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
6894 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
6895 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
6896 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
6897 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
6898 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
6899 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
6900 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control",
6901 &error_abort);
6902 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
6903 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
6904 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
6905 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
6906 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
6907 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
6908 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
6910 if (xcc->model) {
6911 x86_cpu_load_model(cpu, xcc->model, &error_abort);
6915 static int64_t x86_cpu_get_arch_id(CPUState *cs)
6917 X86CPU *cpu = X86_CPU(cs);
6919 return cpu->apic_id;
6922 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
6924 X86CPU *cpu = X86_CPU(cs);
6926 return cpu->env.cr[0] & CR0_PG_MASK;
6929 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
6931 X86CPU *cpu = X86_CPU(cs);
6933 cpu->env.eip = value;
6936 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
6938 X86CPU *cpu = X86_CPU(cs);
6940 cpu->env.eip = tb->pc - tb->cs_base;
6943 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
6945 X86CPU *cpu = X86_CPU(cs);
6946 CPUX86State *env = &cpu->env;
6948 #if !defined(CONFIG_USER_ONLY)
6949 if (interrupt_request & CPU_INTERRUPT_POLL) {
6950 return CPU_INTERRUPT_POLL;
6952 #endif
6953 if (interrupt_request & CPU_INTERRUPT_SIPI) {
6954 return CPU_INTERRUPT_SIPI;
6957 if (env->hflags2 & HF2_GIF_MASK) {
6958 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
6959 !(env->hflags & HF_SMM_MASK)) {
6960 return CPU_INTERRUPT_SMI;
6961 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
6962 !(env->hflags2 & HF2_NMI_MASK)) {
6963 return CPU_INTERRUPT_NMI;
6964 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
6965 return CPU_INTERRUPT_MCE;
6966 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
6967 (((env->hflags2 & HF2_VINTR_MASK) &&
6968 (env->hflags2 & HF2_HIF_MASK)) ||
6969 (!(env->hflags2 & HF2_VINTR_MASK) &&
6970 (env->eflags & IF_MASK &&
6971 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
6972 return CPU_INTERRUPT_HARD;
6973 #if !defined(CONFIG_USER_ONLY)
6974 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
6975 (env->eflags & IF_MASK) &&
6976 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
6977 return CPU_INTERRUPT_VIRQ;
6978 #endif
6982 return 0;
6985 static bool x86_cpu_has_work(CPUState *cs)
6987 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
6990 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
6992 X86CPU *cpu = X86_CPU(cs);
6993 CPUX86State *env = &cpu->env;
6995 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
6996 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
6997 : bfd_mach_i386_i8086);
6998 info->print_insn = print_insn_i386;
7000 info->cap_arch = CS_ARCH_X86;
7001 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
7002 : env->hflags & HF_CS32_MASK ? CS_MODE_32
7003 : CS_MODE_16);
7004 info->cap_insn_unit = 1;
7005 info->cap_insn_split = 8;
7008 void x86_update_hflags(CPUX86State *env)
7010 uint32_t hflags;
7011 #define HFLAG_COPY_MASK \
7012 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
7013 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
7014 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
7015 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
7017 hflags = env->hflags & HFLAG_COPY_MASK;
7018 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
7019 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
7020 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
7021 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
7022 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
7024 if (env->cr[4] & CR4_OSFXSR_MASK) {
7025 hflags |= HF_OSFXSR_MASK;
7028 if (env->efer & MSR_EFER_LMA) {
7029 hflags |= HF_LMA_MASK;
7032 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
7033 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
7034 } else {
7035 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
7036 (DESC_B_SHIFT - HF_CS32_SHIFT);
7037 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
7038 (DESC_B_SHIFT - HF_SS32_SHIFT);
7039 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
7040 !(hflags & HF_CS32_MASK)) {
7041 hflags |= HF_ADDSEG_MASK;
7042 } else {
7043 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
7044 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
7047 env->hflags = hflags;
7050 static Property x86_cpu_properties[] = {
7051 #ifdef CONFIG_USER_ONLY
7052 /* apic_id = 0 by default for *-user, see commit 9886e834 */
7053 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
7054 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
7055 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
7056 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
7057 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
7058 #else
7059 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
7060 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
7061 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
7062 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
7063 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
7064 #endif
7065 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
7066 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
7068 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
7069 HYPERV_SPINLOCK_NEVER_RETRY),
7070 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
7071 HYPERV_FEAT_RELAXED, 0),
7072 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
7073 HYPERV_FEAT_VAPIC, 0),
7074 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
7075 HYPERV_FEAT_TIME, 0),
7076 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
7077 HYPERV_FEAT_CRASH, 0),
7078 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
7079 HYPERV_FEAT_RESET, 0),
7080 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
7081 HYPERV_FEAT_VPINDEX, 0),
7082 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
7083 HYPERV_FEAT_RUNTIME, 0),
7084 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
7085 HYPERV_FEAT_SYNIC, 0),
7086 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
7087 HYPERV_FEAT_STIMER, 0),
7088 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
7089 HYPERV_FEAT_FREQUENCIES, 0),
7090 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
7091 HYPERV_FEAT_REENLIGHTENMENT, 0),
7092 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
7093 HYPERV_FEAT_TLBFLUSH, 0),
7094 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
7095 HYPERV_FEAT_EVMCS, 0),
7096 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
7097 HYPERV_FEAT_IPI, 0),
7098 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
7099 HYPERV_FEAT_STIMER_DIRECT, 0),
7100 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU,
7101 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF),
7102 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
7104 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
7105 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
7106 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
7107 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
7108 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
7109 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
7110 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
7111 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
7112 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7,
7113 UINT32_MAX),
7114 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
7115 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
7116 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
7117 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
7118 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
7119 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
7120 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0),
7121 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
7122 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
7123 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
7124 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
7125 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
7126 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
7127 false),
7128 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
7129 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
7130 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
7131 true),
7133 * lecacy_cache defaults to true unless the CPU model provides its
7134 * own cache information (see x86_cpu_load_def()).
7136 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
7139 * From "Requirements for Implementing the Microsoft
7140 * Hypervisor Interface":
7141 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
7143 * "Starting with Windows Server 2012 and Windows 8, if
7144 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
7145 * the hypervisor imposes no specific limit to the number of VPs.
7146 * In this case, Windows Server 2012 guest VMs may use more than
7147 * 64 VPs, up to the maximum supported number of processors applicable
7148 * to the specific Windows version being used."
7150 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
7151 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
7152 false),
7153 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
7154 true),
7155 DEFINE_PROP_END_OF_LIST()
7158 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
7160 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7161 CPUClass *cc = CPU_CLASS(oc);
7162 DeviceClass *dc = DEVICE_CLASS(oc);
7164 device_class_set_parent_realize(dc, x86_cpu_realizefn,
7165 &xcc->parent_realize);
7166 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
7167 &xcc->parent_unrealize);
7168 device_class_set_props(dc, x86_cpu_properties);
7170 cpu_class_set_parent_reset(cc, x86_cpu_reset, &xcc->parent_reset);
7171 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
7173 cc->class_by_name = x86_cpu_class_by_name;
7174 cc->parse_features = x86_cpu_parse_featurestr;
7175 cc->has_work = x86_cpu_has_work;
7176 #ifdef CONFIG_TCG
7177 cc->do_interrupt = x86_cpu_do_interrupt;
7178 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
7179 #endif
7180 cc->dump_state = x86_cpu_dump_state;
7181 cc->get_crash_info = x86_cpu_get_crash_info;
7182 cc->set_pc = x86_cpu_set_pc;
7183 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
7184 cc->gdb_read_register = x86_cpu_gdb_read_register;
7185 cc->gdb_write_register = x86_cpu_gdb_write_register;
7186 cc->get_arch_id = x86_cpu_get_arch_id;
7187 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
7188 #ifndef CONFIG_USER_ONLY
7189 cc->asidx_from_attrs = x86_asidx_from_attrs;
7190 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
7191 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug;
7192 cc->write_elf64_note = x86_cpu_write_elf64_note;
7193 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
7194 cc->write_elf32_note = x86_cpu_write_elf32_note;
7195 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
7196 cc->vmsd = &vmstate_x86_cpu;
7197 #endif
7198 cc->gdb_arch_name = x86_gdb_arch_name;
7199 #ifdef TARGET_X86_64
7200 cc->gdb_core_xml_file = "i386-64bit.xml";
7201 cc->gdb_num_core_regs = 66;
7202 #else
7203 cc->gdb_core_xml_file = "i386-32bit.xml";
7204 cc->gdb_num_core_regs = 50;
7205 #endif
7206 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
7207 cc->debug_excp_handler = breakpoint_handler;
7208 #endif
7209 cc->cpu_exec_enter = x86_cpu_exec_enter;
7210 cc->cpu_exec_exit = x86_cpu_exec_exit;
7211 #ifdef CONFIG_TCG
7212 cc->tcg_initialize = tcg_x86_init;
7213 cc->tlb_fill = x86_cpu_tlb_fill;
7214 #endif
7215 cc->disas_set_info = x86_disas_set_info;
7217 dc->user_creatable = true;
7220 static const TypeInfo x86_cpu_type_info = {
7221 .name = TYPE_X86_CPU,
7222 .parent = TYPE_CPU,
7223 .instance_size = sizeof(X86CPU),
7224 .instance_init = x86_cpu_initfn,
7225 .abstract = true,
7226 .class_size = sizeof(X86CPUClass),
7227 .class_init = x86_cpu_common_class_init,
7231 /* "base" CPU model, used by query-cpu-model-expansion */
7232 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
7234 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7236 xcc->static_model = true;
7237 xcc->migration_safe = true;
7238 xcc->model_description = "base CPU model type with no features enabled";
7239 xcc->ordering = 8;
7242 static const TypeInfo x86_base_cpu_type_info = {
7243 .name = X86_CPU_TYPE_NAME("base"),
7244 .parent = TYPE_X86_CPU,
7245 .class_init = x86_cpu_base_class_init,
7248 static void x86_cpu_register_types(void)
7250 int i;
7252 type_register_static(&x86_cpu_type_info);
7253 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
7254 x86_register_cpudef_types(&builtin_x86_defs[i]);
7256 type_register_static(&max_x86_cpu_type_info);
7257 type_register_static(&x86_base_cpu_type_info);
7258 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
7259 type_register_static(&host_x86_cpu_type_info);
7260 #endif
7263 type_init(x86_cpu_register_types)