target/ppc: Remove unused PPC_INPUT_INT defines
[qemu.git] / target / i386 / cpu.c
blob31556b7ec4c76d073b462fc894cde25dc68f596f
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/reset.h"
30 #include "sysemu/hvf.h"
31 #include "sysemu/cpus.h"
32 #include "kvm_i386.h"
33 #include "sev_i386.h"
35 #include "qemu/error-report.h"
36 #include "qemu/module.h"
37 #include "qemu/option.h"
38 #include "qemu/config-file.h"
39 #include "qapi/error.h"
40 #include "qapi/qapi-visit-machine.h"
41 #include "qapi/qapi-visit-run-state.h"
42 #include "qapi/qmp/qdict.h"
43 #include "qapi/qmp/qerror.h"
44 #include "qapi/visitor.h"
45 #include "qom/qom-qobject.h"
46 #include "sysemu/arch_init.h"
47 #include "qapi/qapi-commands-machine-target.h"
49 #include "standard-headers/asm-x86/kvm_para.h"
51 #include "sysemu/sysemu.h"
52 #include "sysemu/tcg.h"
53 #include "hw/qdev-properties.h"
54 #include "hw/i386/topology.h"
55 #ifndef CONFIG_USER_ONLY
56 #include "exec/address-spaces.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
62 #include "disas/capstone.h"
64 /* Helpers for building CPUID[2] descriptors: */
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
211 int i;
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
229 /* CPUID Leaf 4 constants: */
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
236 #define CACHE_LEVEL(l) (l << 5)
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
296 #define ASSOC_FULL 0xFF
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
344 * Define the constants to build the cpu topology. Right now, TOPOEXT
345 * feature is enabled only on EPYC. So, these constants are based on
346 * EPYC supported configurations. We may need to handle the cases if
347 * these values change in future.
349 /* Maximum core complexes in a node */
350 #define MAX_CCX 2
351 /* Maximum cores in a core complex */
352 #define MAX_CORES_IN_CCX 4
353 /* Maximum cores in a node */
354 #define MAX_CORES_IN_NODE 8
355 /* Maximum nodes in a socket */
356 #define MAX_NODES_PER_SOCKET 4
359 * Figure out the number of nodes required to build this config.
360 * Max cores in a node is 8
362 static int nodes_in_socket(int nr_cores)
364 int nodes;
366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
368 /* Hardware does not support config with 3 nodes, return 4 in that case */
369 return (nodes == 3) ? 4 : nodes;
373 * Decide the number of cores in a core complex with the given nr_cores using
374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
376 * L3 cache is shared across all cores in a core complex. So, this will also
377 * tell us how many cores are sharing the L3 cache.
379 static int cores_in_core_complex(int nr_cores)
381 int nodes;
383 /* Check if we can fit all the cores in one core complex */
384 if (nr_cores <= MAX_CORES_IN_CCX) {
385 return nr_cores;
387 /* Get the number of nodes required to build this config */
388 nodes = nodes_in_socket(nr_cores);
391 * Divide the cores accros all the core complexes
392 * Return rounded up value
394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
397 /* Encode cache info for CPUID[8000001D] */
398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
399 uint32_t *eax, uint32_t *ebx,
400 uint32_t *ecx, uint32_t *edx)
402 uint32_t l3_cores;
403 assert(cache->size == cache->line_size * cache->associativity *
404 cache->partitions * cache->sets);
406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
409 /* L3 is shared among multiple cores */
410 if (cache->level == 3) {
411 l3_cores = cores_in_core_complex(cs->nr_cores);
412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
413 } else {
414 *eax |= ((cs->nr_threads - 1) << 14);
417 assert(cache->line_size > 0);
418 assert(cache->partitions > 0);
419 assert(cache->associativity > 0);
420 /* We don't implement fully-associative caches */
421 assert(cache->associativity < cache->sets);
422 *ebx = (cache->line_size - 1) |
423 ((cache->partitions - 1) << 12) |
424 ((cache->associativity - 1) << 22);
426 assert(cache->sets > 0);
427 *ecx = cache->sets - 1;
429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
430 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
434 /* Data structure to hold the configuration info for a given core index */
435 struct core_topology {
436 /* core complex id of the current core index */
437 int ccx_id;
439 * Adjusted core index for this core in the topology
440 * This can be 0,1,2,3 with max 4 cores in a core complex
442 int core_id;
443 /* Node id for this core index */
444 int node_id;
445 /* Number of nodes in this config */
446 int num_nodes;
450 * Build the configuration closely match the EPYC hardware. Using the EPYC
451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
452 * right now. This could change in future.
453 * nr_cores : Total number of cores in the config
454 * core_id : Core index of the current CPU
455 * topo : Data structure to hold all the config info for this core index
457 static void build_core_topology(int nr_cores, int core_id,
458 struct core_topology *topo)
460 int nodes, cores_in_ccx;
462 /* First get the number of nodes required */
463 nodes = nodes_in_socket(nr_cores);
465 cores_in_ccx = cores_in_core_complex(nr_cores);
467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
469 topo->core_id = core_id % cores_in_ccx;
470 topo->num_nodes = nodes;
473 /* Encode cache info for CPUID[8000001E] */
474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
475 uint32_t *eax, uint32_t *ebx,
476 uint32_t *ecx, uint32_t *edx)
478 struct core_topology topo = {0};
479 unsigned long nodes;
480 int shift;
482 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
483 *eax = cpu->apic_id;
485 * CPUID_Fn8000001E_EBX
486 * 31:16 Reserved
487 * 15:8 Threads per core (The number of threads per core is
488 * Threads per core + 1)
489 * 7:0 Core id (see bit decoding below)
490 * SMT:
491 * 4:3 node id
492 * 2 Core complex id
493 * 1:0 Core id
494 * Non SMT:
495 * 5:4 node id
496 * 3 Core complex id
497 * 1:0 Core id
499 if (cs->nr_threads - 1) {
500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
501 (topo.ccx_id << 2) | topo.core_id;
502 } else {
503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
506 * CPUID_Fn8000001E_ECX
507 * 31:11 Reserved
508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
509 * 7:0 Node id (see bit decoding below)
510 * 2 Socket id
511 * 1:0 Node id
513 if (topo.num_nodes <= 4) {
514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
515 topo.node_id;
516 } else {
518 * Node id fix up. Actual hardware supports up to 4 nodes. But with
519 * more than 32 cores, we may end up with more than 4 nodes.
520 * Node id is a combination of socket id and node id. Only requirement
521 * here is that this number should be unique accross the system.
522 * Shift the socket id to accommodate more nodes. We dont expect both
523 * socket id and node id to be big number at the same time. This is not
524 * an ideal config but we need to to support it. Max nodes we can have
525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
526 * 5 bits for nodes. Find the left most set bit to represent the total
527 * number of nodes. find_last_bit returns last set bit(0 based). Left
528 * shift(+1) the socket id to represent all the nodes.
530 nodes = topo.num_nodes - 1;
531 shift = find_last_bit(&nodes, 8);
532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
533 topo.node_id;
535 *edx = 0;
539 * Definitions of the hardcoded cache entries we expose:
540 * These are legacy cache values. If there is a need to change any
541 * of these values please use builtin_x86_defs
544 /* L1 data cache: */
545 static CPUCacheInfo legacy_l1d_cache = {
546 .type = DATA_CACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1d_cache_amd = {
559 .type = DATA_CACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
571 /* L1 instruction cache: */
572 static CPUCacheInfo legacy_l1i_cache = {
573 .type = INSTRUCTION_CACHE,
574 .level = 1,
575 .size = 32 * KiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 8,
579 .sets = 64,
580 .partitions = 1,
581 .no_invd_sharing = true,
584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
585 static CPUCacheInfo legacy_l1i_cache_amd = {
586 .type = INSTRUCTION_CACHE,
587 .level = 1,
588 .size = 64 * KiB,
589 .self_init = 1,
590 .line_size = 64,
591 .associativity = 2,
592 .sets = 512,
593 .partitions = 1,
594 .lines_per_tag = 1,
595 .no_invd_sharing = true,
598 /* Level 2 unified cache: */
599 static CPUCacheInfo legacy_l2_cache = {
600 .type = UNIFIED_CACHE,
601 .level = 2,
602 .size = 4 * MiB,
603 .self_init = 1,
604 .line_size = 64,
605 .associativity = 16,
606 .sets = 4096,
607 .partitions = 1,
608 .no_invd_sharing = true,
611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
612 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
613 .type = UNIFIED_CACHE,
614 .level = 2,
615 .size = 2 * MiB,
616 .line_size = 64,
617 .associativity = 8,
621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
622 static CPUCacheInfo legacy_l2_cache_amd = {
623 .type = UNIFIED_CACHE,
624 .level = 2,
625 .size = 512 * KiB,
626 .line_size = 64,
627 .lines_per_tag = 1,
628 .associativity = 16,
629 .sets = 512,
630 .partitions = 1,
633 /* Level 3 unified cache: */
634 static CPUCacheInfo legacy_l3_cache = {
635 .type = UNIFIED_CACHE,
636 .level = 3,
637 .size = 16 * MiB,
638 .line_size = 64,
639 .associativity = 16,
640 .sets = 16384,
641 .partitions = 1,
642 .lines_per_tag = 1,
643 .self_init = true,
644 .inclusive = true,
645 .complex_indexing = true,
648 /* TLB definitions: */
650 #define L1_DTLB_2M_ASSOC 1
651 #define L1_DTLB_2M_ENTRIES 255
652 #define L1_DTLB_4K_ASSOC 1
653 #define L1_DTLB_4K_ENTRIES 255
655 #define L1_ITLB_2M_ASSOC 1
656 #define L1_ITLB_2M_ENTRIES 255
657 #define L1_ITLB_4K_ASSOC 1
658 #define L1_ITLB_4K_ENTRIES 255
660 #define L2_DTLB_2M_ASSOC 0 /* disabled */
661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
662 #define L2_DTLB_4K_ASSOC 4
663 #define L2_DTLB_4K_ENTRIES 512
665 #define L2_ITLB_2M_ASSOC 0 /* disabled */
666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
667 #define L2_ITLB_4K_ASSOC 4
668 #define L2_ITLB_4K_ENTRIES 512
670 /* CPUID Leaf 0x14 constants: */
671 #define INTEL_PT_MAX_SUBLEAF 0x1
673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
674 * MSR can be accessed;
675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
677 * of Intel PT MSRs across warm reset;
678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
680 #define INTEL_PT_MINIMAL_EBX 0xf
682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
684 * accessed;
685 * bit[01]: ToPA tables can hold any number of output entries, up to the
686 * maximum allowed by the MaskOrTableOffset field of
687 * IA32_RTIT_OUTPUT_MASK_PTRS;
688 * bit[02]: Support Single-Range Output scheme;
690 #define INTEL_PT_MINIMAL_ECX 0x7
691 /* generated packets which contain IP payloads have LIP values */
692 #define INTEL_PT_IP_LIP (1 << 31)
693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
700 uint32_t vendor2, uint32_t vendor3)
702 int i;
703 for (i = 0; i < 4; i++) {
704 dst[i] = vendor1 >> (8 * i);
705 dst[i + 4] = vendor2 >> (8 * i);
706 dst[i + 8] = vendor3 >> (8 * i);
708 dst[CPUID_VENDOR_SZ] = '\0';
711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
716 CPUID_PSE36 | CPUID_FXSR)
717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
721 CPUID_PAE | CPUID_SEP | CPUID_APIC)
723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
728 /* partly implemented:
729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
730 /* missing:
731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
737 CPUID_EXT_RDRAND)
738 /* missing:
739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
743 CPUID_EXT_F16C */
745 #ifdef TARGET_X86_64
746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
747 #else
748 #define TCG_EXT2_X86_64_FEATURES 0
749 #endif
751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
754 TCG_EXT2_X86_64_FEATURES)
755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
757 #define TCG_EXT4_FEATURES 0
758 #define TCG_SVM_FEATURES CPUID_SVM_NPT
759 #define TCG_KVM_FEATURES 0
760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
764 CPUID_7_0_EBX_ERMS)
765 /* missing:
766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
768 CPUID_7_0_EBX_RDSEED */
769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
771 CPUID_7_0_ECX_LA57)
772 #define TCG_7_0_EDX_FEATURES 0
773 #define TCG_7_1_EAX_FEATURES 0
774 #define TCG_APM_FEATURES 0
775 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
776 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
777 /* missing:
778 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
780 typedef enum FeatureWordType {
781 CPUID_FEATURE_WORD,
782 MSR_FEATURE_WORD,
783 } FeatureWordType;
785 typedef struct FeatureWordInfo {
786 FeatureWordType type;
787 /* feature flags names are taken from "Intel Processor Identification and
788 * the CPUID Instruction" and AMD's "CPUID Specification".
789 * In cases of disagreement between feature naming conventions,
790 * aliases may be added.
792 const char *feat_names[64];
793 union {
794 /* If type==CPUID_FEATURE_WORD */
795 struct {
796 uint32_t eax; /* Input EAX for CPUID */
797 bool needs_ecx; /* CPUID instruction uses ECX as input */
798 uint32_t ecx; /* Input ECX value for CPUID */
799 int reg; /* output register (R_* constant) */
800 } cpuid;
801 /* If type==MSR_FEATURE_WORD */
802 struct {
803 uint32_t index;
804 } msr;
806 uint64_t tcg_features; /* Feature flags supported by TCG */
807 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */
808 uint64_t migratable_flags; /* Feature flags known to be migratable */
809 /* Features that shouldn't be auto-enabled by "-cpu host" */
810 uint64_t no_autoenable_flags;
811 } FeatureWordInfo;
813 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
814 [FEAT_1_EDX] = {
815 .type = CPUID_FEATURE_WORD,
816 .feat_names = {
817 "fpu", "vme", "de", "pse",
818 "tsc", "msr", "pae", "mce",
819 "cx8", "apic", NULL, "sep",
820 "mtrr", "pge", "mca", "cmov",
821 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
822 NULL, "ds" /* Intel dts */, "acpi", "mmx",
823 "fxsr", "sse", "sse2", "ss",
824 "ht" /* Intel htt */, "tm", "ia64", "pbe",
826 .cpuid = {.eax = 1, .reg = R_EDX, },
827 .tcg_features = TCG_FEATURES,
829 [FEAT_1_ECX] = {
830 .type = CPUID_FEATURE_WORD,
831 .feat_names = {
832 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
833 "ds-cpl", "vmx", "smx", "est",
834 "tm2", "ssse3", "cid", NULL,
835 "fma", "cx16", "xtpr", "pdcm",
836 NULL, "pcid", "dca", "sse4.1",
837 "sse4.2", "x2apic", "movbe", "popcnt",
838 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
839 "avx", "f16c", "rdrand", "hypervisor",
841 .cpuid = { .eax = 1, .reg = R_ECX, },
842 .tcg_features = TCG_EXT_FEATURES,
844 /* Feature names that are already defined on feature_name[] but
845 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
846 * names on feat_names below. They are copied automatically
847 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
849 [FEAT_8000_0001_EDX] = {
850 .type = CPUID_FEATURE_WORD,
851 .feat_names = {
852 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
853 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
854 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
855 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
856 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
857 "nx", NULL, "mmxext", NULL /* mmx */,
858 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
859 NULL, "lm", "3dnowext", "3dnow",
861 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
862 .tcg_features = TCG_EXT2_FEATURES,
864 [FEAT_8000_0001_ECX] = {
865 .type = CPUID_FEATURE_WORD,
866 .feat_names = {
867 "lahf-lm", "cmp-legacy", "svm", "extapic",
868 "cr8legacy", "abm", "sse4a", "misalignsse",
869 "3dnowprefetch", "osvw", "ibs", "xop",
870 "skinit", "wdt", NULL, "lwp",
871 "fma4", "tce", NULL, "nodeid-msr",
872 NULL, "tbm", "topoext", "perfctr-core",
873 "perfctr-nb", NULL, NULL, NULL,
874 NULL, NULL, NULL, NULL,
876 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
877 .tcg_features = TCG_EXT3_FEATURES,
879 * TOPOEXT is always allowed but can't be enabled blindly by
880 * "-cpu host", as it requires consistent cache topology info
881 * to be provided so it doesn't confuse guests.
883 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
885 [FEAT_C000_0001_EDX] = {
886 .type = CPUID_FEATURE_WORD,
887 .feat_names = {
888 NULL, NULL, "xstore", "xstore-en",
889 NULL, NULL, "xcrypt", "xcrypt-en",
890 "ace2", "ace2-en", "phe", "phe-en",
891 "pmm", "pmm-en", NULL, NULL,
892 NULL, NULL, NULL, NULL,
893 NULL, NULL, NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
897 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
898 .tcg_features = TCG_EXT4_FEATURES,
900 [FEAT_KVM] = {
901 .type = CPUID_FEATURE_WORD,
902 .feat_names = {
903 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
904 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
905 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
906 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL,
907 NULL, NULL, NULL, NULL,
908 NULL, NULL, NULL, NULL,
909 "kvmclock-stable-bit", NULL, NULL, NULL,
910 NULL, NULL, NULL, NULL,
912 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
913 .tcg_features = TCG_KVM_FEATURES,
915 [FEAT_KVM_HINTS] = {
916 .type = CPUID_FEATURE_WORD,
917 .feat_names = {
918 "kvm-hint-dedicated", NULL, NULL, NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
927 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
928 .tcg_features = TCG_KVM_FEATURES,
930 * KVM hints aren't auto-enabled by -cpu host, they need to be
931 * explicitly enabled in the command-line.
933 .no_autoenable_flags = ~0U,
936 * .feat_names are commented out for Hyper-V enlightenments because we
937 * don't want to have two different ways for enabling them on QEMU command
938 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
939 * enabling several feature bits simultaneously, exposing these bits
940 * individually may just confuse guests.
942 [FEAT_HYPERV_EAX] = {
943 .type = CPUID_FEATURE_WORD,
944 .feat_names = {
945 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
946 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
947 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
948 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
949 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
950 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
951 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
952 NULL, NULL,
953 NULL, NULL, NULL, NULL,
954 NULL, NULL, NULL, NULL,
955 NULL, NULL, NULL, NULL,
956 NULL, NULL, NULL, NULL,
958 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
960 [FEAT_HYPERV_EBX] = {
961 .type = CPUID_FEATURE_WORD,
962 .feat_names = {
963 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
964 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
965 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
966 NULL /* hv_create_port */, NULL /* hv_connect_port */,
967 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
968 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
969 NULL, NULL,
970 NULL, NULL, NULL, NULL,
971 NULL, NULL, NULL, NULL,
972 NULL, NULL, NULL, NULL,
973 NULL, NULL, NULL, NULL,
975 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
977 [FEAT_HYPERV_EDX] = {
978 .type = CPUID_FEATURE_WORD,
979 .feat_names = {
980 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
981 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
982 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
983 NULL, NULL,
984 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
985 NULL, NULL, NULL, NULL,
986 NULL, NULL, NULL, NULL,
987 NULL, NULL, NULL, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
991 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
993 [FEAT_HV_RECOMM_EAX] = {
994 .type = CPUID_FEATURE_WORD,
995 .feat_names = {
996 NULL /* hv_recommend_pv_as_switch */,
997 NULL /* hv_recommend_pv_tlbflush_local */,
998 NULL /* hv_recommend_pv_tlbflush_remote */,
999 NULL /* hv_recommend_msr_apic_access */,
1000 NULL /* hv_recommend_msr_reset */,
1001 NULL /* hv_recommend_relaxed_timing */,
1002 NULL /* hv_recommend_dma_remapping */,
1003 NULL /* hv_recommend_int_remapping */,
1004 NULL /* hv_recommend_x2apic_msrs */,
1005 NULL /* hv_recommend_autoeoi_deprecation */,
1006 NULL /* hv_recommend_pv_ipi */,
1007 NULL /* hv_recommend_ex_hypercalls */,
1008 NULL /* hv_hypervisor_is_nested */,
1009 NULL /* hv_recommend_int_mbec */,
1010 NULL /* hv_recommend_evmcs */,
1011 NULL,
1012 NULL, NULL, NULL, NULL,
1013 NULL, NULL, NULL, NULL,
1014 NULL, NULL, NULL, NULL,
1015 NULL, NULL, NULL, NULL,
1017 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1019 [FEAT_HV_NESTED_EAX] = {
1020 .type = CPUID_FEATURE_WORD,
1021 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1023 [FEAT_SVM] = {
1024 .type = CPUID_FEATURE_WORD,
1025 .feat_names = {
1026 "npt", "lbrv", "svm-lock", "nrip-save",
1027 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1028 NULL, NULL, "pause-filter", NULL,
1029 "pfthreshold", NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1035 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1036 .tcg_features = TCG_SVM_FEATURES,
1038 [FEAT_7_0_EBX] = {
1039 .type = CPUID_FEATURE_WORD,
1040 .feat_names = {
1041 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1042 "hle", "avx2", NULL, "smep",
1043 "bmi2", "erms", "invpcid", "rtm",
1044 NULL, NULL, "mpx", NULL,
1045 "avx512f", "avx512dq", "rdseed", "adx",
1046 "smap", "avx512ifma", "pcommit", "clflushopt",
1047 "clwb", "intel-pt", "avx512pf", "avx512er",
1048 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1050 .cpuid = {
1051 .eax = 7,
1052 .needs_ecx = true, .ecx = 0,
1053 .reg = R_EBX,
1055 .tcg_features = TCG_7_0_EBX_FEATURES,
1057 [FEAT_7_0_ECX] = {
1058 .type = CPUID_FEATURE_WORD,
1059 .feat_names = {
1060 NULL, "avx512vbmi", "umip", "pku",
1061 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL,
1062 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1063 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1064 "la57", NULL, NULL, NULL,
1065 NULL, NULL, "rdpid", NULL,
1066 NULL, "cldemote", NULL, "movdiri",
1067 "movdir64b", NULL, NULL, NULL,
1069 .cpuid = {
1070 .eax = 7,
1071 .needs_ecx = true, .ecx = 0,
1072 .reg = R_ECX,
1074 .tcg_features = TCG_7_0_ECX_FEATURES,
1076 [FEAT_7_0_EDX] = {
1077 .type = CPUID_FEATURE_WORD,
1078 .feat_names = {
1079 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1080 NULL, NULL, NULL, NULL,
1081 NULL, NULL, "md-clear", NULL,
1082 NULL, NULL, NULL, NULL,
1083 NULL, NULL, NULL /* pconfig */, NULL,
1084 NULL, NULL, NULL, NULL,
1085 NULL, NULL, "spec-ctrl", "stibp",
1086 NULL, "arch-capabilities", "core-capability", "ssbd",
1088 .cpuid = {
1089 .eax = 7,
1090 .needs_ecx = true, .ecx = 0,
1091 .reg = R_EDX,
1093 .tcg_features = TCG_7_0_EDX_FEATURES,
1095 [FEAT_7_1_EAX] = {
1096 .type = CPUID_FEATURE_WORD,
1097 .feat_names = {
1098 NULL, NULL, NULL, NULL,
1099 NULL, "avx512-bf16", NULL, NULL,
1100 NULL, NULL, NULL, NULL,
1101 NULL, NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 NULL, NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1107 .cpuid = {
1108 .eax = 7,
1109 .needs_ecx = true, .ecx = 1,
1110 .reg = R_EAX,
1112 .tcg_features = TCG_7_1_EAX_FEATURES,
1114 [FEAT_8000_0007_EDX] = {
1115 .type = CPUID_FEATURE_WORD,
1116 .feat_names = {
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 "invtsc", NULL, NULL, NULL,
1120 NULL, NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL,
1122 NULL, NULL, NULL, NULL,
1123 NULL, NULL, NULL, NULL,
1124 NULL, NULL, NULL, NULL,
1126 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1127 .tcg_features = TCG_APM_FEATURES,
1128 .unmigratable_flags = CPUID_APM_INVTSC,
1130 [FEAT_8000_0008_EBX] = {
1131 .type = CPUID_FEATURE_WORD,
1132 .feat_names = {
1133 "clzero", NULL, "xsaveerptr", NULL,
1134 NULL, NULL, NULL, NULL,
1135 NULL, "wbnoinvd", NULL, NULL,
1136 "ibpb", NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1140 NULL, NULL, NULL, NULL,
1142 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1143 .tcg_features = 0,
1144 .unmigratable_flags = 0,
1146 [FEAT_XSAVE] = {
1147 .type = CPUID_FEATURE_WORD,
1148 .feat_names = {
1149 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1150 NULL, NULL, NULL, NULL,
1151 NULL, NULL, NULL, NULL,
1152 NULL, NULL, NULL, NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1158 .cpuid = {
1159 .eax = 0xd,
1160 .needs_ecx = true, .ecx = 1,
1161 .reg = R_EAX,
1163 .tcg_features = TCG_XSAVE_FEATURES,
1165 [FEAT_6_EAX] = {
1166 .type = CPUID_FEATURE_WORD,
1167 .feat_names = {
1168 NULL, NULL, "arat", NULL,
1169 NULL, NULL, NULL, NULL,
1170 NULL, NULL, NULL, NULL,
1171 NULL, NULL, NULL, NULL,
1172 NULL, NULL, NULL, NULL,
1173 NULL, NULL, NULL, NULL,
1174 NULL, NULL, NULL, NULL,
1175 NULL, NULL, NULL, NULL,
1177 .cpuid = { .eax = 6, .reg = R_EAX, },
1178 .tcg_features = TCG_6_EAX_FEATURES,
1180 [FEAT_XSAVE_COMP_LO] = {
1181 .type = CPUID_FEATURE_WORD,
1182 .cpuid = {
1183 .eax = 0xD,
1184 .needs_ecx = true, .ecx = 0,
1185 .reg = R_EAX,
1187 .tcg_features = ~0U,
1188 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1189 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1190 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1191 XSTATE_PKRU_MASK,
1193 [FEAT_XSAVE_COMP_HI] = {
1194 .type = CPUID_FEATURE_WORD,
1195 .cpuid = {
1196 .eax = 0xD,
1197 .needs_ecx = true, .ecx = 0,
1198 .reg = R_EDX,
1200 .tcg_features = ~0U,
1202 /*Below are MSR exposed features*/
1203 [FEAT_ARCH_CAPABILITIES] = {
1204 .type = MSR_FEATURE_WORD,
1205 .feat_names = {
1206 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1207 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl",
1208 "taa-no", NULL, NULL, NULL,
1209 NULL, NULL, NULL, NULL,
1210 NULL, NULL, NULL, NULL,
1211 NULL, NULL, NULL, NULL,
1212 NULL, NULL, NULL, NULL,
1213 NULL, NULL, NULL, NULL,
1215 .msr = {
1216 .index = MSR_IA32_ARCH_CAPABILITIES,
1219 [FEAT_CORE_CAPABILITY] = {
1220 .type = MSR_FEATURE_WORD,
1221 .feat_names = {
1222 NULL, NULL, NULL, NULL,
1223 NULL, "split-lock-detect", NULL, NULL,
1224 NULL, NULL, NULL, NULL,
1225 NULL, NULL, NULL, NULL,
1226 NULL, NULL, NULL, NULL,
1227 NULL, NULL, NULL, NULL,
1228 NULL, NULL, NULL, NULL,
1229 NULL, NULL, NULL, NULL,
1231 .msr = {
1232 .index = MSR_IA32_CORE_CAPABILITY,
1236 [FEAT_VMX_PROCBASED_CTLS] = {
1237 .type = MSR_FEATURE_WORD,
1238 .feat_names = {
1239 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset",
1240 NULL, NULL, NULL, "vmx-hlt-exit",
1241 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit",
1242 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit",
1243 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit",
1244 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit",
1245 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf",
1246 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls",
1248 .msr = {
1249 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1253 [FEAT_VMX_SECONDARY_CTLS] = {
1254 .type = MSR_FEATURE_WORD,
1255 .feat_names = {
1256 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit",
1257 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest",
1258 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit",
1259 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit",
1260 "vmx-rdseed-exit", "vmx-pml", NULL, NULL,
1261 "vmx-xsaves", NULL, NULL, NULL,
1262 NULL, NULL, NULL, NULL,
1263 NULL, NULL, NULL, NULL,
1265 .msr = {
1266 .index = MSR_IA32_VMX_PROCBASED_CTLS2,
1270 [FEAT_VMX_PINBASED_CTLS] = {
1271 .type = MSR_FEATURE_WORD,
1272 .feat_names = {
1273 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit",
1274 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr",
1275 NULL, NULL, NULL, NULL,
1276 NULL, NULL, NULL, NULL,
1277 NULL, NULL, NULL, NULL,
1278 NULL, NULL, NULL, NULL,
1279 NULL, NULL, NULL, NULL,
1280 NULL, NULL, NULL, NULL,
1282 .msr = {
1283 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1287 [FEAT_VMX_EXIT_CTLS] = {
1288 .type = MSR_FEATURE_WORD,
1290 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from
1291 * the LM CPUID bit.
1293 .feat_names = {
1294 NULL, NULL, "vmx-exit-nosave-debugctl", NULL,
1295 NULL, NULL, NULL, NULL,
1296 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL,
1297 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr",
1298 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat",
1299 "vmx-exit-save-efer", "vmx-exit-load-efer",
1300 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs",
1301 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL,
1302 NULL, NULL, NULL, NULL,
1304 .msr = {
1305 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS,
1309 [FEAT_VMX_ENTRY_CTLS] = {
1310 .type = MSR_FEATURE_WORD,
1311 .feat_names = {
1312 NULL, NULL, "vmx-entry-noload-debugctl", NULL,
1313 NULL, NULL, NULL, NULL,
1314 NULL, "vmx-entry-ia32e-mode", NULL, NULL,
1315 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer",
1316 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL,
1317 NULL, NULL, NULL, NULL,
1318 NULL, NULL, NULL, NULL,
1319 NULL, NULL, NULL, NULL,
1321 .msr = {
1322 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1326 [FEAT_VMX_MISC] = {
1327 .type = MSR_FEATURE_WORD,
1328 .feat_names = {
1329 NULL, NULL, NULL, NULL,
1330 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown",
1331 "vmx-activity-wait-sipi", NULL, NULL, NULL,
1332 NULL, NULL, NULL, NULL,
1333 NULL, NULL, NULL, NULL,
1334 NULL, NULL, NULL, NULL,
1335 NULL, NULL, NULL, NULL,
1336 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL,
1338 .msr = {
1339 .index = MSR_IA32_VMX_MISC,
1343 [FEAT_VMX_EPT_VPID_CAPS] = {
1344 .type = MSR_FEATURE_WORD,
1345 .feat_names = {
1346 "vmx-ept-execonly", NULL, NULL, NULL,
1347 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5",
1348 NULL, NULL, NULL, NULL,
1349 NULL, NULL, NULL, NULL,
1350 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL,
1351 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL,
1352 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL,
1353 NULL, NULL, NULL, NULL,
1354 "vmx-invvpid", NULL, NULL, NULL,
1355 NULL, NULL, NULL, NULL,
1356 "vmx-invvpid-single-addr", "vmx-invept-single-context",
1357 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals",
1358 NULL, NULL, NULL, NULL,
1359 NULL, NULL, NULL, NULL,
1360 NULL, NULL, NULL, NULL,
1361 NULL, NULL, NULL, NULL,
1362 NULL, NULL, NULL, NULL,
1364 .msr = {
1365 .index = MSR_IA32_VMX_EPT_VPID_CAP,
1369 [FEAT_VMX_BASIC] = {
1370 .type = MSR_FEATURE_WORD,
1371 .feat_names = {
1372 [54] = "vmx-ins-outs",
1373 [55] = "vmx-true-ctls",
1375 .msr = {
1376 .index = MSR_IA32_VMX_BASIC,
1378 /* Just to be safe - we don't support setting the MSEG version field. */
1379 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR,
1382 [FEAT_VMX_VMFUNC] = {
1383 .type = MSR_FEATURE_WORD,
1384 .feat_names = {
1385 [0] = "vmx-eptp-switching",
1387 .msr = {
1388 .index = MSR_IA32_VMX_VMFUNC,
1394 typedef struct FeatureMask {
1395 FeatureWord index;
1396 uint64_t mask;
1397 } FeatureMask;
1399 typedef struct FeatureDep {
1400 FeatureMask from, to;
1401 } FeatureDep;
1403 static FeatureDep feature_dependencies[] = {
1405 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES },
1406 .to = { FEAT_ARCH_CAPABILITIES, ~0ull },
1409 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY },
1410 .to = { FEAT_CORE_CAPABILITY, ~0ull },
1413 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1414 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull },
1417 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1418 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull },
1421 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1422 .to = { FEAT_VMX_EXIT_CTLS, ~0ull },
1425 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1426 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull },
1429 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1430 .to = { FEAT_VMX_MISC, ~0ull },
1433 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1434 .to = { FEAT_VMX_BASIC, ~0ull },
1437 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM },
1438 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE },
1441 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS },
1442 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull },
1445 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES },
1446 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES },
1449 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND },
1450 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING },
1453 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID },
1454 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID },
1457 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED },
1458 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING },
1461 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP },
1462 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP },
1465 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1466 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull },
1469 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1470 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST },
1473 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID },
1474 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 },
1477 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC },
1478 .to = { FEAT_VMX_VMFUNC, ~0ull },
1482 typedef struct X86RegisterInfo32 {
1483 /* Name of register */
1484 const char *name;
1485 /* QAPI enum value register */
1486 X86CPURegister32 qapi_enum;
1487 } X86RegisterInfo32;
1489 #define REGISTER(reg) \
1490 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1491 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1492 REGISTER(EAX),
1493 REGISTER(ECX),
1494 REGISTER(EDX),
1495 REGISTER(EBX),
1496 REGISTER(ESP),
1497 REGISTER(EBP),
1498 REGISTER(ESI),
1499 REGISTER(EDI),
1501 #undef REGISTER
1503 typedef struct ExtSaveArea {
1504 uint32_t feature, bits;
1505 uint32_t offset, size;
1506 } ExtSaveArea;
1508 static const ExtSaveArea x86_ext_save_areas[] = {
1509 [XSTATE_FP_BIT] = {
1510 /* x87 FP state component is always enabled if XSAVE is supported */
1511 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1512 /* x87 state is in the legacy region of the XSAVE area */
1513 .offset = 0,
1514 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1516 [XSTATE_SSE_BIT] = {
1517 /* SSE state component is always enabled if XSAVE is supported */
1518 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1519 /* SSE state is in the legacy region of the XSAVE area */
1520 .offset = 0,
1521 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1523 [XSTATE_YMM_BIT] =
1524 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1525 .offset = offsetof(X86XSaveArea, avx_state),
1526 .size = sizeof(XSaveAVX) },
1527 [XSTATE_BNDREGS_BIT] =
1528 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1529 .offset = offsetof(X86XSaveArea, bndreg_state),
1530 .size = sizeof(XSaveBNDREG) },
1531 [XSTATE_BNDCSR_BIT] =
1532 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1533 .offset = offsetof(X86XSaveArea, bndcsr_state),
1534 .size = sizeof(XSaveBNDCSR) },
1535 [XSTATE_OPMASK_BIT] =
1536 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1537 .offset = offsetof(X86XSaveArea, opmask_state),
1538 .size = sizeof(XSaveOpmask) },
1539 [XSTATE_ZMM_Hi256_BIT] =
1540 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1541 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1542 .size = sizeof(XSaveZMM_Hi256) },
1543 [XSTATE_Hi16_ZMM_BIT] =
1544 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1545 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1546 .size = sizeof(XSaveHi16_ZMM) },
1547 [XSTATE_PKRU_BIT] =
1548 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1549 .offset = offsetof(X86XSaveArea, pkru_state),
1550 .size = sizeof(XSavePKRU) },
1553 static uint32_t xsave_area_size(uint64_t mask)
1555 int i;
1556 uint64_t ret = 0;
1558 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1559 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1560 if ((mask >> i) & 1) {
1561 ret = MAX(ret, esa->offset + esa->size);
1564 return ret;
1567 static inline bool accel_uses_host_cpuid(void)
1569 return kvm_enabled() || hvf_enabled();
1572 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1574 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1575 cpu->env.features[FEAT_XSAVE_COMP_LO];
1578 const char *get_register_name_32(unsigned int reg)
1580 if (reg >= CPU_NB_REGS32) {
1581 return NULL;
1583 return x86_reg_info_32[reg].name;
1587 * Returns the set of feature flags that are supported and migratable by
1588 * QEMU, for a given FeatureWord.
1590 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w)
1592 FeatureWordInfo *wi = &feature_word_info[w];
1593 uint64_t r = 0;
1594 int i;
1596 for (i = 0; i < 64; i++) {
1597 uint64_t f = 1ULL << i;
1599 /* If the feature name is known, it is implicitly considered migratable,
1600 * unless it is explicitly set in unmigratable_flags */
1601 if ((wi->migratable_flags & f) ||
1602 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1603 r |= f;
1606 return r;
1609 void host_cpuid(uint32_t function, uint32_t count,
1610 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1612 uint32_t vec[4];
1614 #ifdef __x86_64__
1615 asm volatile("cpuid"
1616 : "=a"(vec[0]), "=b"(vec[1]),
1617 "=c"(vec[2]), "=d"(vec[3])
1618 : "0"(function), "c"(count) : "cc");
1619 #elif defined(__i386__)
1620 asm volatile("pusha \n\t"
1621 "cpuid \n\t"
1622 "mov %%eax, 0(%2) \n\t"
1623 "mov %%ebx, 4(%2) \n\t"
1624 "mov %%ecx, 8(%2) \n\t"
1625 "mov %%edx, 12(%2) \n\t"
1626 "popa"
1627 : : "a"(function), "c"(count), "S"(vec)
1628 : "memory", "cc");
1629 #else
1630 abort();
1631 #endif
1633 if (eax)
1634 *eax = vec[0];
1635 if (ebx)
1636 *ebx = vec[1];
1637 if (ecx)
1638 *ecx = vec[2];
1639 if (edx)
1640 *edx = vec[3];
1643 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1645 uint32_t eax, ebx, ecx, edx;
1647 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1648 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1650 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1651 if (family) {
1652 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1654 if (model) {
1655 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1657 if (stepping) {
1658 *stepping = eax & 0x0F;
1662 /* CPU class name definitions: */
1664 /* Return type name for a given CPU model name
1665 * Caller is responsible for freeing the returned string.
1667 static char *x86_cpu_type_name(const char *model_name)
1669 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1672 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1674 g_autofree char *typename = x86_cpu_type_name(cpu_model);
1675 return object_class_by_name(typename);
1678 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1680 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1681 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1682 return g_strndup(class_name,
1683 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1686 typedef struct PropValue {
1687 const char *prop, *value;
1688 } PropValue;
1690 typedef struct X86CPUVersionDefinition {
1691 X86CPUVersion version;
1692 const char *alias;
1693 PropValue *props;
1694 } X86CPUVersionDefinition;
1696 /* Base definition for a CPU model */
1697 typedef struct X86CPUDefinition {
1698 const char *name;
1699 uint32_t level;
1700 uint32_t xlevel;
1701 /* vendor is zero-terminated, 12 character ASCII string */
1702 char vendor[CPUID_VENDOR_SZ + 1];
1703 int family;
1704 int model;
1705 int stepping;
1706 FeatureWordArray features;
1707 const char *model_id;
1708 CPUCaches *cache_info;
1710 * Definitions for alternative versions of CPU model.
1711 * List is terminated by item with version == 0.
1712 * If NULL, version 1 will be registered automatically.
1714 const X86CPUVersionDefinition *versions;
1715 } X86CPUDefinition;
1717 /* Reference to a specific CPU model version */
1718 struct X86CPUModel {
1719 /* Base CPU definition */
1720 X86CPUDefinition *cpudef;
1721 /* CPU model version */
1722 X86CPUVersion version;
1724 * If true, this is an alias CPU model.
1725 * This matters only for "-cpu help" and query-cpu-definitions
1727 bool is_alias;
1730 /* Get full model name for CPU version */
1731 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef,
1732 X86CPUVersion version)
1734 assert(version > 0);
1735 return g_strdup_printf("%s-v%d", cpudef->name, (int)version);
1738 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def)
1740 /* When X86CPUDefinition::versions is NULL, we register only v1 */
1741 static const X86CPUVersionDefinition default_version_list[] = {
1742 { 1 },
1743 { /* end of list */ }
1746 return def->versions ?: default_version_list;
1749 static CPUCaches epyc_cache_info = {
1750 .l1d_cache = &(CPUCacheInfo) {
1751 .type = DATA_CACHE,
1752 .level = 1,
1753 .size = 32 * KiB,
1754 .line_size = 64,
1755 .associativity = 8,
1756 .partitions = 1,
1757 .sets = 64,
1758 .lines_per_tag = 1,
1759 .self_init = 1,
1760 .no_invd_sharing = true,
1762 .l1i_cache = &(CPUCacheInfo) {
1763 .type = INSTRUCTION_CACHE,
1764 .level = 1,
1765 .size = 64 * KiB,
1766 .line_size = 64,
1767 .associativity = 4,
1768 .partitions = 1,
1769 .sets = 256,
1770 .lines_per_tag = 1,
1771 .self_init = 1,
1772 .no_invd_sharing = true,
1774 .l2_cache = &(CPUCacheInfo) {
1775 .type = UNIFIED_CACHE,
1776 .level = 2,
1777 .size = 512 * KiB,
1778 .line_size = 64,
1779 .associativity = 8,
1780 .partitions = 1,
1781 .sets = 1024,
1782 .lines_per_tag = 1,
1784 .l3_cache = &(CPUCacheInfo) {
1785 .type = UNIFIED_CACHE,
1786 .level = 3,
1787 .size = 8 * MiB,
1788 .line_size = 64,
1789 .associativity = 16,
1790 .partitions = 1,
1791 .sets = 8192,
1792 .lines_per_tag = 1,
1793 .self_init = true,
1794 .inclusive = true,
1795 .complex_indexing = true,
1799 /* The following VMX features are not supported by KVM and are left out in the
1800 * CPU definitions:
1802 * Dual-monitor support (all processors)
1803 * Entry to SMM
1804 * Deactivate dual-monitor treatment
1805 * Number of CR3-target values
1806 * Shutdown activity state
1807 * Wait-for-SIPI activity state
1808 * PAUSE-loop exiting (Westmere and newer)
1809 * EPT-violation #VE (Broadwell and newer)
1810 * Inject event with insn length=0 (Skylake and newer)
1811 * Conceal non-root operation from PT
1812 * Conceal VM exits from PT
1813 * Conceal VM entries from PT
1814 * Enable ENCLS exiting
1815 * Mode-based execute control (XS/XU)
1816 s TSC scaling (Skylake Server and newer)
1817 * GPA translation for PT (IceLake and newer)
1818 * User wait and pause
1819 * ENCLV exiting
1820 * Load IA32_RTIT_CTL
1821 * Clear IA32_RTIT_CTL
1822 * Advanced VM-exit information for EPT violations
1823 * Sub-page write permissions
1824 * PT in VMX operation
1827 static X86CPUDefinition builtin_x86_defs[] = {
1829 .name = "qemu64",
1830 .level = 0xd,
1831 .vendor = CPUID_VENDOR_AMD,
1832 .family = 6,
1833 .model = 6,
1834 .stepping = 3,
1835 .features[FEAT_1_EDX] =
1836 PPRO_FEATURES |
1837 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1838 CPUID_PSE36,
1839 .features[FEAT_1_ECX] =
1840 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1841 .features[FEAT_8000_0001_EDX] =
1842 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1843 .features[FEAT_8000_0001_ECX] =
1844 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1845 .xlevel = 0x8000000A,
1846 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1849 .name = "phenom",
1850 .level = 5,
1851 .vendor = CPUID_VENDOR_AMD,
1852 .family = 16,
1853 .model = 2,
1854 .stepping = 3,
1855 /* Missing: CPUID_HT */
1856 .features[FEAT_1_EDX] =
1857 PPRO_FEATURES |
1858 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1859 CPUID_PSE36 | CPUID_VME,
1860 .features[FEAT_1_ECX] =
1861 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1862 CPUID_EXT_POPCNT,
1863 .features[FEAT_8000_0001_EDX] =
1864 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1865 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1866 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1867 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1868 CPUID_EXT3_CR8LEG,
1869 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1870 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1871 .features[FEAT_8000_0001_ECX] =
1872 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1873 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1874 /* Missing: CPUID_SVM_LBRV */
1875 .features[FEAT_SVM] =
1876 CPUID_SVM_NPT,
1877 .xlevel = 0x8000001A,
1878 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1881 .name = "core2duo",
1882 .level = 10,
1883 .vendor = CPUID_VENDOR_INTEL,
1884 .family = 6,
1885 .model = 15,
1886 .stepping = 11,
1887 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1888 .features[FEAT_1_EDX] =
1889 PPRO_FEATURES |
1890 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1891 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1892 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1893 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1894 .features[FEAT_1_ECX] =
1895 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1896 CPUID_EXT_CX16,
1897 .features[FEAT_8000_0001_EDX] =
1898 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1899 .features[FEAT_8000_0001_ECX] =
1900 CPUID_EXT3_LAHF_LM,
1901 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
1902 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1903 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1904 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1905 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1906 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
1907 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1908 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1909 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1910 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1911 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1912 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1913 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1914 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
1915 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
1916 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
1917 .features[FEAT_VMX_SECONDARY_CTLS] =
1918 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
1919 .xlevel = 0x80000008,
1920 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1923 .name = "kvm64",
1924 .level = 0xd,
1925 .vendor = CPUID_VENDOR_INTEL,
1926 .family = 15,
1927 .model = 6,
1928 .stepping = 1,
1929 /* Missing: CPUID_HT */
1930 .features[FEAT_1_EDX] =
1931 PPRO_FEATURES | CPUID_VME |
1932 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1933 CPUID_PSE36,
1934 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1935 .features[FEAT_1_ECX] =
1936 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1937 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1938 .features[FEAT_8000_0001_EDX] =
1939 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1940 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1941 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1942 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1943 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1944 .features[FEAT_8000_0001_ECX] =
1946 /* VMX features from Cedar Mill/Prescott */
1947 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1948 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1949 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1950 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1951 VMX_PIN_BASED_NMI_EXITING,
1952 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1953 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1954 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1955 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1956 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1957 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1958 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1959 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING,
1960 .xlevel = 0x80000008,
1961 .model_id = "Common KVM processor"
1964 .name = "qemu32",
1965 .level = 4,
1966 .vendor = CPUID_VENDOR_INTEL,
1967 .family = 6,
1968 .model = 6,
1969 .stepping = 3,
1970 .features[FEAT_1_EDX] =
1971 PPRO_FEATURES,
1972 .features[FEAT_1_ECX] =
1973 CPUID_EXT_SSE3,
1974 .xlevel = 0x80000004,
1975 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1978 .name = "kvm32",
1979 .level = 5,
1980 .vendor = CPUID_VENDOR_INTEL,
1981 .family = 15,
1982 .model = 6,
1983 .stepping = 1,
1984 .features[FEAT_1_EDX] =
1985 PPRO_FEATURES | CPUID_VME |
1986 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1987 .features[FEAT_1_ECX] =
1988 CPUID_EXT_SSE3,
1989 .features[FEAT_8000_0001_ECX] =
1991 /* VMX features from Yonah */
1992 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1993 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1994 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1995 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1996 VMX_PIN_BASED_NMI_EXITING,
1997 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1998 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1999 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2000 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2001 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
2002 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
2003 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
2004 .xlevel = 0x80000008,
2005 .model_id = "Common 32-bit KVM processor"
2008 .name = "coreduo",
2009 .level = 10,
2010 .vendor = CPUID_VENDOR_INTEL,
2011 .family = 6,
2012 .model = 14,
2013 .stepping = 8,
2014 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2015 .features[FEAT_1_EDX] =
2016 PPRO_FEATURES | CPUID_VME |
2017 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
2018 CPUID_SS,
2019 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
2020 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
2021 .features[FEAT_1_ECX] =
2022 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
2023 .features[FEAT_8000_0001_EDX] =
2024 CPUID_EXT2_NX,
2025 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2026 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2027 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2028 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2029 VMX_PIN_BASED_NMI_EXITING,
2030 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2031 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2032 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2033 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2034 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
2035 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
2036 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
2037 .xlevel = 0x80000008,
2038 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
2041 .name = "486",
2042 .level = 1,
2043 .vendor = CPUID_VENDOR_INTEL,
2044 .family = 4,
2045 .model = 8,
2046 .stepping = 0,
2047 .features[FEAT_1_EDX] =
2048 I486_FEATURES,
2049 .xlevel = 0,
2050 .model_id = "",
2053 .name = "pentium",
2054 .level = 1,
2055 .vendor = CPUID_VENDOR_INTEL,
2056 .family = 5,
2057 .model = 4,
2058 .stepping = 3,
2059 .features[FEAT_1_EDX] =
2060 PENTIUM_FEATURES,
2061 .xlevel = 0,
2062 .model_id = "",
2065 .name = "pentium2",
2066 .level = 2,
2067 .vendor = CPUID_VENDOR_INTEL,
2068 .family = 6,
2069 .model = 5,
2070 .stepping = 2,
2071 .features[FEAT_1_EDX] =
2072 PENTIUM2_FEATURES,
2073 .xlevel = 0,
2074 .model_id = "",
2077 .name = "pentium3",
2078 .level = 3,
2079 .vendor = CPUID_VENDOR_INTEL,
2080 .family = 6,
2081 .model = 7,
2082 .stepping = 3,
2083 .features[FEAT_1_EDX] =
2084 PENTIUM3_FEATURES,
2085 .xlevel = 0,
2086 .model_id = "",
2089 .name = "athlon",
2090 .level = 2,
2091 .vendor = CPUID_VENDOR_AMD,
2092 .family = 6,
2093 .model = 2,
2094 .stepping = 3,
2095 .features[FEAT_1_EDX] =
2096 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
2097 CPUID_MCA,
2098 .features[FEAT_8000_0001_EDX] =
2099 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
2100 .xlevel = 0x80000008,
2101 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
2104 .name = "n270",
2105 .level = 10,
2106 .vendor = CPUID_VENDOR_INTEL,
2107 .family = 6,
2108 .model = 28,
2109 .stepping = 2,
2110 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2111 .features[FEAT_1_EDX] =
2112 PPRO_FEATURES |
2113 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
2114 CPUID_ACPI | CPUID_SS,
2115 /* Some CPUs got no CPUID_SEP */
2116 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
2117 * CPUID_EXT_XTPR */
2118 .features[FEAT_1_ECX] =
2119 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
2120 CPUID_EXT_MOVBE,
2121 .features[FEAT_8000_0001_EDX] =
2122 CPUID_EXT2_NX,
2123 .features[FEAT_8000_0001_ECX] =
2124 CPUID_EXT3_LAHF_LM,
2125 .xlevel = 0x80000008,
2126 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
2129 .name = "Conroe",
2130 .level = 10,
2131 .vendor = CPUID_VENDOR_INTEL,
2132 .family = 6,
2133 .model = 15,
2134 .stepping = 3,
2135 .features[FEAT_1_EDX] =
2136 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2137 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2138 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2139 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2140 CPUID_DE | CPUID_FP87,
2141 .features[FEAT_1_ECX] =
2142 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2143 .features[FEAT_8000_0001_EDX] =
2144 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2145 .features[FEAT_8000_0001_ECX] =
2146 CPUID_EXT3_LAHF_LM,
2147 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2148 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2149 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2150 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2151 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2152 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2153 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2154 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2155 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2156 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2157 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2158 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2159 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2160 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2161 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2162 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2163 .features[FEAT_VMX_SECONDARY_CTLS] =
2164 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
2165 .xlevel = 0x80000008,
2166 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
2169 .name = "Penryn",
2170 .level = 10,
2171 .vendor = CPUID_VENDOR_INTEL,
2172 .family = 6,
2173 .model = 23,
2174 .stepping = 3,
2175 .features[FEAT_1_EDX] =
2176 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2180 CPUID_DE | CPUID_FP87,
2181 .features[FEAT_1_ECX] =
2182 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2183 CPUID_EXT_SSE3,
2184 .features[FEAT_8000_0001_EDX] =
2185 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2186 .features[FEAT_8000_0001_ECX] =
2187 CPUID_EXT3_LAHF_LM,
2188 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2189 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2190 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2191 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT |
2192 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2193 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2194 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2195 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2196 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2197 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2198 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2199 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2200 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2201 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2202 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2203 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2204 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2205 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2206 .features[FEAT_VMX_SECONDARY_CTLS] =
2207 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2208 VMX_SECONDARY_EXEC_WBINVD_EXITING,
2209 .xlevel = 0x80000008,
2210 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
2213 .name = "Nehalem",
2214 .level = 11,
2215 .vendor = CPUID_VENDOR_INTEL,
2216 .family = 6,
2217 .model = 26,
2218 .stepping = 3,
2219 .features[FEAT_1_EDX] =
2220 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2221 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2222 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2223 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2224 CPUID_DE | CPUID_FP87,
2225 .features[FEAT_1_ECX] =
2226 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2227 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2228 .features[FEAT_8000_0001_EDX] =
2229 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2230 .features[FEAT_8000_0001_ECX] =
2231 CPUID_EXT3_LAHF_LM,
2232 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2233 MSR_VMX_BASIC_TRUE_CTLS,
2234 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2235 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2236 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2237 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2238 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2239 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2240 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2241 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2242 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2243 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2244 .features[FEAT_VMX_EXIT_CTLS] =
2245 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2246 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2247 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2248 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2249 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2250 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2251 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2252 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2253 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2254 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2255 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2256 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2257 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2258 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2259 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2260 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2261 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2262 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2263 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2264 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2265 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2266 .features[FEAT_VMX_SECONDARY_CTLS] =
2267 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2268 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2269 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2270 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2271 VMX_SECONDARY_EXEC_ENABLE_VPID,
2272 .xlevel = 0x80000008,
2273 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
2274 .versions = (X86CPUVersionDefinition[]) {
2275 { .version = 1 },
2277 .version = 2,
2278 .alias = "Nehalem-IBRS",
2279 .props = (PropValue[]) {
2280 { "spec-ctrl", "on" },
2281 { "model-id",
2282 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" },
2283 { /* end of list */ }
2286 { /* end of list */ }
2290 .name = "Westmere",
2291 .level = 11,
2292 .vendor = CPUID_VENDOR_INTEL,
2293 .family = 6,
2294 .model = 44,
2295 .stepping = 1,
2296 .features[FEAT_1_EDX] =
2297 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2298 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2299 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2300 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2301 CPUID_DE | CPUID_FP87,
2302 .features[FEAT_1_ECX] =
2303 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2304 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2305 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2306 .features[FEAT_8000_0001_EDX] =
2307 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2308 .features[FEAT_8000_0001_ECX] =
2309 CPUID_EXT3_LAHF_LM,
2310 .features[FEAT_6_EAX] =
2311 CPUID_6_EAX_ARAT,
2312 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2313 MSR_VMX_BASIC_TRUE_CTLS,
2314 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2315 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2316 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2317 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2318 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2319 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2320 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2321 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2322 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2323 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2324 .features[FEAT_VMX_EXIT_CTLS] =
2325 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2326 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2327 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2328 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2329 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2330 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2331 MSR_VMX_MISC_STORE_LMA,
2332 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2333 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2334 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2335 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2336 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2337 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2338 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2339 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2340 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2341 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2342 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2343 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2344 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2345 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2346 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2347 .features[FEAT_VMX_SECONDARY_CTLS] =
2348 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2349 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2350 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2351 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2352 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2353 .xlevel = 0x80000008,
2354 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
2355 .versions = (X86CPUVersionDefinition[]) {
2356 { .version = 1 },
2358 .version = 2,
2359 .alias = "Westmere-IBRS",
2360 .props = (PropValue[]) {
2361 { "spec-ctrl", "on" },
2362 { "model-id",
2363 "Westmere E56xx/L56xx/X56xx (IBRS update)" },
2364 { /* end of list */ }
2367 { /* end of list */ }
2371 .name = "SandyBridge",
2372 .level = 0xd,
2373 .vendor = CPUID_VENDOR_INTEL,
2374 .family = 6,
2375 .model = 42,
2376 .stepping = 1,
2377 .features[FEAT_1_EDX] =
2378 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2379 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2380 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2381 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2382 CPUID_DE | CPUID_FP87,
2383 .features[FEAT_1_ECX] =
2384 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2385 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2386 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2387 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2388 CPUID_EXT_SSE3,
2389 .features[FEAT_8000_0001_EDX] =
2390 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2391 CPUID_EXT2_SYSCALL,
2392 .features[FEAT_8000_0001_ECX] =
2393 CPUID_EXT3_LAHF_LM,
2394 .features[FEAT_XSAVE] =
2395 CPUID_XSAVE_XSAVEOPT,
2396 .features[FEAT_6_EAX] =
2397 CPUID_6_EAX_ARAT,
2398 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2399 MSR_VMX_BASIC_TRUE_CTLS,
2400 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2401 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2402 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2403 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2404 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2405 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2406 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2407 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2408 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2409 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2410 .features[FEAT_VMX_EXIT_CTLS] =
2411 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2412 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2413 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2414 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2415 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2416 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2417 MSR_VMX_MISC_STORE_LMA,
2418 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2419 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2420 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2421 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2422 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2423 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2424 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2425 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2426 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2427 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2428 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2429 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2430 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2431 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2432 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2433 .features[FEAT_VMX_SECONDARY_CTLS] =
2434 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2435 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2436 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2437 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2438 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2439 .xlevel = 0x80000008,
2440 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
2441 .versions = (X86CPUVersionDefinition[]) {
2442 { .version = 1 },
2444 .version = 2,
2445 .alias = "SandyBridge-IBRS",
2446 .props = (PropValue[]) {
2447 { "spec-ctrl", "on" },
2448 { "model-id",
2449 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" },
2450 { /* end of list */ }
2453 { /* end of list */ }
2457 .name = "IvyBridge",
2458 .level = 0xd,
2459 .vendor = CPUID_VENDOR_INTEL,
2460 .family = 6,
2461 .model = 58,
2462 .stepping = 9,
2463 .features[FEAT_1_EDX] =
2464 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2465 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2466 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2467 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2468 CPUID_DE | CPUID_FP87,
2469 .features[FEAT_1_ECX] =
2470 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2471 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2472 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2473 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2474 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2475 .features[FEAT_7_0_EBX] =
2476 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2477 CPUID_7_0_EBX_ERMS,
2478 .features[FEAT_8000_0001_EDX] =
2479 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2480 CPUID_EXT2_SYSCALL,
2481 .features[FEAT_8000_0001_ECX] =
2482 CPUID_EXT3_LAHF_LM,
2483 .features[FEAT_XSAVE] =
2484 CPUID_XSAVE_XSAVEOPT,
2485 .features[FEAT_6_EAX] =
2486 CPUID_6_EAX_ARAT,
2487 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2488 MSR_VMX_BASIC_TRUE_CTLS,
2489 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2490 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2491 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2492 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2493 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2494 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2495 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2496 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2497 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2498 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2499 .features[FEAT_VMX_EXIT_CTLS] =
2500 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2501 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2502 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2503 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2504 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2505 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2506 MSR_VMX_MISC_STORE_LMA,
2507 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2508 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2509 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2510 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2511 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2512 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2513 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2514 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2515 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2516 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2517 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2518 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2519 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2520 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2521 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2522 .features[FEAT_VMX_SECONDARY_CTLS] =
2523 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2524 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2525 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2526 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2527 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2528 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2529 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2530 VMX_SECONDARY_EXEC_RDRAND_EXITING,
2531 .xlevel = 0x80000008,
2532 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
2533 .versions = (X86CPUVersionDefinition[]) {
2534 { .version = 1 },
2536 .version = 2,
2537 .alias = "IvyBridge-IBRS",
2538 .props = (PropValue[]) {
2539 { "spec-ctrl", "on" },
2540 { "model-id",
2541 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" },
2542 { /* end of list */ }
2545 { /* end of list */ }
2549 .name = "Haswell",
2550 .level = 0xd,
2551 .vendor = CPUID_VENDOR_INTEL,
2552 .family = 6,
2553 .model = 60,
2554 .stepping = 4,
2555 .features[FEAT_1_EDX] =
2556 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2557 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2558 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2559 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2560 CPUID_DE | CPUID_FP87,
2561 .features[FEAT_1_ECX] =
2562 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2563 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2564 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2565 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2566 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2567 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2568 .features[FEAT_8000_0001_EDX] =
2569 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2570 CPUID_EXT2_SYSCALL,
2571 .features[FEAT_8000_0001_ECX] =
2572 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2573 .features[FEAT_7_0_EBX] =
2574 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2575 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2576 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2577 CPUID_7_0_EBX_RTM,
2578 .features[FEAT_XSAVE] =
2579 CPUID_XSAVE_XSAVEOPT,
2580 .features[FEAT_6_EAX] =
2581 CPUID_6_EAX_ARAT,
2582 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2583 MSR_VMX_BASIC_TRUE_CTLS,
2584 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2585 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2586 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2587 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2588 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2589 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2590 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2591 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2592 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2593 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2594 .features[FEAT_VMX_EXIT_CTLS] =
2595 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2596 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2597 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2598 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2599 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2600 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2601 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2602 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2603 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2604 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2605 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2606 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2607 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2608 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2609 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2610 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2611 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2612 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2613 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2614 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2615 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2616 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2617 .features[FEAT_VMX_SECONDARY_CTLS] =
2618 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2619 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2620 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2621 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2622 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2623 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2624 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2625 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2626 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
2627 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2628 .xlevel = 0x80000008,
2629 .model_id = "Intel Core Processor (Haswell)",
2630 .versions = (X86CPUVersionDefinition[]) {
2631 { .version = 1 },
2633 .version = 2,
2634 .alias = "Haswell-noTSX",
2635 .props = (PropValue[]) {
2636 { "hle", "off" },
2637 { "rtm", "off" },
2638 { "stepping", "1" },
2639 { "model-id", "Intel Core Processor (Haswell, no TSX)", },
2640 { /* end of list */ }
2644 .version = 3,
2645 .alias = "Haswell-IBRS",
2646 .props = (PropValue[]) {
2647 /* Restore TSX features removed by -v2 above */
2648 { "hle", "on" },
2649 { "rtm", "on" },
2651 * Haswell and Haswell-IBRS had stepping=4 in
2652 * QEMU 4.0 and older
2654 { "stepping", "4" },
2655 { "spec-ctrl", "on" },
2656 { "model-id",
2657 "Intel Core Processor (Haswell, IBRS)" },
2658 { /* end of list */ }
2662 .version = 4,
2663 .alias = "Haswell-noTSX-IBRS",
2664 .props = (PropValue[]) {
2665 { "hle", "off" },
2666 { "rtm", "off" },
2667 /* spec-ctrl was already enabled by -v3 above */
2668 { "stepping", "1" },
2669 { "model-id",
2670 "Intel Core Processor (Haswell, no TSX, IBRS)" },
2671 { /* end of list */ }
2674 { /* end of list */ }
2678 .name = "Broadwell",
2679 .level = 0xd,
2680 .vendor = CPUID_VENDOR_INTEL,
2681 .family = 6,
2682 .model = 61,
2683 .stepping = 2,
2684 .features[FEAT_1_EDX] =
2685 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2686 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2687 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2688 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2689 CPUID_DE | CPUID_FP87,
2690 .features[FEAT_1_ECX] =
2691 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2692 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2693 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2694 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2695 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2696 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2697 .features[FEAT_8000_0001_EDX] =
2698 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2699 CPUID_EXT2_SYSCALL,
2700 .features[FEAT_8000_0001_ECX] =
2701 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2702 .features[FEAT_7_0_EBX] =
2703 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2704 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2705 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2706 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2707 CPUID_7_0_EBX_SMAP,
2708 .features[FEAT_XSAVE] =
2709 CPUID_XSAVE_XSAVEOPT,
2710 .features[FEAT_6_EAX] =
2711 CPUID_6_EAX_ARAT,
2712 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2713 MSR_VMX_BASIC_TRUE_CTLS,
2714 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2715 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2716 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2717 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2718 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2719 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2720 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2721 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2722 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2723 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2724 .features[FEAT_VMX_EXIT_CTLS] =
2725 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2726 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2727 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2728 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2729 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2730 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2731 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2732 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2733 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2734 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2735 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2736 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2737 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2738 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2739 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2740 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2741 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2742 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2743 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2744 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2745 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2746 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2747 .features[FEAT_VMX_SECONDARY_CTLS] =
2748 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2749 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2750 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2751 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2752 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2753 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2754 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2755 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2756 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2757 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2758 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2759 .xlevel = 0x80000008,
2760 .model_id = "Intel Core Processor (Broadwell)",
2761 .versions = (X86CPUVersionDefinition[]) {
2762 { .version = 1 },
2764 .version = 2,
2765 .alias = "Broadwell-noTSX",
2766 .props = (PropValue[]) {
2767 { "hle", "off" },
2768 { "rtm", "off" },
2769 { "model-id", "Intel Core Processor (Broadwell, no TSX)", },
2770 { /* end of list */ }
2774 .version = 3,
2775 .alias = "Broadwell-IBRS",
2776 .props = (PropValue[]) {
2777 /* Restore TSX features removed by -v2 above */
2778 { "hle", "on" },
2779 { "rtm", "on" },
2780 { "spec-ctrl", "on" },
2781 { "model-id",
2782 "Intel Core Processor (Broadwell, IBRS)" },
2783 { /* end of list */ }
2787 .version = 4,
2788 .alias = "Broadwell-noTSX-IBRS",
2789 .props = (PropValue[]) {
2790 { "hle", "off" },
2791 { "rtm", "off" },
2792 /* spec-ctrl was already enabled by -v3 above */
2793 { "model-id",
2794 "Intel Core Processor (Broadwell, no TSX, IBRS)" },
2795 { /* end of list */ }
2798 { /* end of list */ }
2802 .name = "Skylake-Client",
2803 .level = 0xd,
2804 .vendor = CPUID_VENDOR_INTEL,
2805 .family = 6,
2806 .model = 94,
2807 .stepping = 3,
2808 .features[FEAT_1_EDX] =
2809 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2810 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2811 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2812 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2813 CPUID_DE | CPUID_FP87,
2814 .features[FEAT_1_ECX] =
2815 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2816 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2817 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2818 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2819 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2820 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2821 .features[FEAT_8000_0001_EDX] =
2822 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2823 CPUID_EXT2_SYSCALL,
2824 .features[FEAT_8000_0001_ECX] =
2825 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2826 .features[FEAT_7_0_EBX] =
2827 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2828 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2829 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2830 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2831 CPUID_7_0_EBX_SMAP,
2832 /* Missing: XSAVES (not supported by some Linux versions,
2833 * including v4.1 to v4.12).
2834 * KVM doesn't yet expose any XSAVES state save component,
2835 * and the only one defined in Skylake (processor tracing)
2836 * probably will block migration anyway.
2838 .features[FEAT_XSAVE] =
2839 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2840 CPUID_XSAVE_XGETBV1,
2841 .features[FEAT_6_EAX] =
2842 CPUID_6_EAX_ARAT,
2843 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2844 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2845 MSR_VMX_BASIC_TRUE_CTLS,
2846 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2847 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2848 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2849 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2850 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2851 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2852 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2853 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2854 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2855 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2856 .features[FEAT_VMX_EXIT_CTLS] =
2857 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2858 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2859 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2860 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2861 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2862 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2863 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2864 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2865 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2866 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2867 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2868 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2869 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2870 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2871 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2872 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2873 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2874 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2875 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2876 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2877 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2878 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2879 .features[FEAT_VMX_SECONDARY_CTLS] =
2880 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2881 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2882 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2883 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2884 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2885 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2886 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2887 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2888 .xlevel = 0x80000008,
2889 .model_id = "Intel Core Processor (Skylake)",
2890 .versions = (X86CPUVersionDefinition[]) {
2891 { .version = 1 },
2893 .version = 2,
2894 .alias = "Skylake-Client-IBRS",
2895 .props = (PropValue[]) {
2896 { "spec-ctrl", "on" },
2897 { "model-id",
2898 "Intel Core Processor (Skylake, IBRS)" },
2899 { /* end of list */ }
2903 .version = 3,
2904 .alias = "Skylake-Client-noTSX-IBRS",
2905 .props = (PropValue[]) {
2906 { "hle", "off" },
2907 { "rtm", "off" },
2908 { /* end of list */ }
2911 { /* end of list */ }
2915 .name = "Skylake-Server",
2916 .level = 0xd,
2917 .vendor = CPUID_VENDOR_INTEL,
2918 .family = 6,
2919 .model = 85,
2920 .stepping = 4,
2921 .features[FEAT_1_EDX] =
2922 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2923 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2924 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2925 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2926 CPUID_DE | CPUID_FP87,
2927 .features[FEAT_1_ECX] =
2928 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2929 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2930 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2931 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2932 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2933 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2934 .features[FEAT_8000_0001_EDX] =
2935 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2936 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2937 .features[FEAT_8000_0001_ECX] =
2938 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2939 .features[FEAT_7_0_EBX] =
2940 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2941 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2942 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2943 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2944 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2945 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2946 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2947 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2948 .features[FEAT_7_0_ECX] =
2949 CPUID_7_0_ECX_PKU,
2950 /* Missing: XSAVES (not supported by some Linux versions,
2951 * including v4.1 to v4.12).
2952 * KVM doesn't yet expose any XSAVES state save component,
2953 * and the only one defined in Skylake (processor tracing)
2954 * probably will block migration anyway.
2956 .features[FEAT_XSAVE] =
2957 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2958 CPUID_XSAVE_XGETBV1,
2959 .features[FEAT_6_EAX] =
2960 CPUID_6_EAX_ARAT,
2961 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2962 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2963 MSR_VMX_BASIC_TRUE_CTLS,
2964 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2965 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2966 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2967 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2968 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2969 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2970 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2971 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2972 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2973 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2974 .features[FEAT_VMX_EXIT_CTLS] =
2975 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2976 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2977 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2978 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2979 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2980 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2981 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2982 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2983 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2984 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2985 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2986 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2987 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2988 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2989 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2990 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2991 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2992 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2993 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2994 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2995 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2996 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2997 .features[FEAT_VMX_SECONDARY_CTLS] =
2998 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2999 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3000 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3001 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3002 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3003 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3004 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3005 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3006 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3007 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3008 .xlevel = 0x80000008,
3009 .model_id = "Intel Xeon Processor (Skylake)",
3010 .versions = (X86CPUVersionDefinition[]) {
3011 { .version = 1 },
3013 .version = 2,
3014 .alias = "Skylake-Server-IBRS",
3015 .props = (PropValue[]) {
3016 /* clflushopt was not added to Skylake-Server-IBRS */
3017 /* TODO: add -v3 including clflushopt */
3018 { "clflushopt", "off" },
3019 { "spec-ctrl", "on" },
3020 { "model-id",
3021 "Intel Xeon Processor (Skylake, IBRS)" },
3022 { /* end of list */ }
3026 .version = 3,
3027 .alias = "Skylake-Server-noTSX-IBRS",
3028 .props = (PropValue[]) {
3029 { "hle", "off" },
3030 { "rtm", "off" },
3031 { /* end of list */ }
3034 { /* end of list */ }
3038 .name = "Cascadelake-Server",
3039 .level = 0xd,
3040 .vendor = CPUID_VENDOR_INTEL,
3041 .family = 6,
3042 .model = 85,
3043 .stepping = 6,
3044 .features[FEAT_1_EDX] =
3045 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3046 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3047 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3048 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3049 CPUID_DE | CPUID_FP87,
3050 .features[FEAT_1_ECX] =
3051 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3052 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3053 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3054 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3055 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3056 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3057 .features[FEAT_8000_0001_EDX] =
3058 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3059 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3060 .features[FEAT_8000_0001_ECX] =
3061 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3062 .features[FEAT_7_0_EBX] =
3063 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3064 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3065 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3066 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3067 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3068 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3069 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3070 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3071 .features[FEAT_7_0_ECX] =
3072 CPUID_7_0_ECX_PKU |
3073 CPUID_7_0_ECX_AVX512VNNI,
3074 .features[FEAT_7_0_EDX] =
3075 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3076 /* Missing: XSAVES (not supported by some Linux versions,
3077 * including v4.1 to v4.12).
3078 * KVM doesn't yet expose any XSAVES state save component,
3079 * and the only one defined in Skylake (processor tracing)
3080 * probably will block migration anyway.
3082 .features[FEAT_XSAVE] =
3083 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3084 CPUID_XSAVE_XGETBV1,
3085 .features[FEAT_6_EAX] =
3086 CPUID_6_EAX_ARAT,
3087 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3088 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3089 MSR_VMX_BASIC_TRUE_CTLS,
3090 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3091 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3092 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3093 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3094 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3095 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3096 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3097 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3098 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3099 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3100 .features[FEAT_VMX_EXIT_CTLS] =
3101 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3102 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3103 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3104 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3105 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3106 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3107 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3108 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3109 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3110 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3111 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3112 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3113 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3114 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3115 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3116 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3117 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3118 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3119 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3120 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3121 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3122 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3123 .features[FEAT_VMX_SECONDARY_CTLS] =
3124 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3125 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3126 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3127 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3128 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3129 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3130 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3131 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3132 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3133 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3134 .xlevel = 0x80000008,
3135 .model_id = "Intel Xeon Processor (Cascadelake)",
3136 .versions = (X86CPUVersionDefinition[]) {
3137 { .version = 1 },
3138 { .version = 2,
3139 .props = (PropValue[]) {
3140 { "arch-capabilities", "on" },
3141 { "rdctl-no", "on" },
3142 { "ibrs-all", "on" },
3143 { "skip-l1dfl-vmentry", "on" },
3144 { "mds-no", "on" },
3145 { /* end of list */ }
3148 { .version = 3,
3149 .alias = "Cascadelake-Server-noTSX",
3150 .props = (PropValue[]) {
3151 { "hle", "off" },
3152 { "rtm", "off" },
3153 { /* end of list */ }
3156 { /* end of list */ }
3160 .name = "Cooperlake",
3161 .level = 0xd,
3162 .vendor = CPUID_VENDOR_INTEL,
3163 .family = 6,
3164 .model = 85,
3165 .stepping = 10,
3166 .features[FEAT_1_EDX] =
3167 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3168 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3169 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3170 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3171 CPUID_DE | CPUID_FP87,
3172 .features[FEAT_1_ECX] =
3173 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3174 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3175 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3176 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3177 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3178 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3179 .features[FEAT_8000_0001_EDX] =
3180 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3181 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3182 .features[FEAT_8000_0001_ECX] =
3183 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3184 .features[FEAT_7_0_EBX] =
3185 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3186 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3187 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3188 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3189 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3190 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3191 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3192 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3193 .features[FEAT_7_0_ECX] =
3194 CPUID_7_0_ECX_PKU |
3195 CPUID_7_0_ECX_AVX512VNNI,
3196 .features[FEAT_7_0_EDX] =
3197 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP |
3198 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES,
3199 .features[FEAT_ARCH_CAPABILITIES] =
3200 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL |
3201 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO,
3202 .features[FEAT_7_1_EAX] =
3203 CPUID_7_1_EAX_AVX512_BF16,
3205 * Missing: XSAVES (not supported by some Linux versions,
3206 * including v4.1 to v4.12).
3207 * KVM doesn't yet expose any XSAVES state save component,
3208 * and the only one defined in Skylake (processor tracing)
3209 * probably will block migration anyway.
3211 .features[FEAT_XSAVE] =
3212 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3213 CPUID_XSAVE_XGETBV1,
3214 .features[FEAT_6_EAX] =
3215 CPUID_6_EAX_ARAT,
3216 .xlevel = 0x80000008,
3217 .model_id = "Intel Xeon Processor (Cooperlake)",
3220 .name = "Icelake-Client",
3221 .level = 0xd,
3222 .vendor = CPUID_VENDOR_INTEL,
3223 .family = 6,
3224 .model = 126,
3225 .stepping = 0,
3226 .features[FEAT_1_EDX] =
3227 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3228 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3229 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3230 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3231 CPUID_DE | CPUID_FP87,
3232 .features[FEAT_1_ECX] =
3233 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3234 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3235 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3236 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3237 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3238 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3239 .features[FEAT_8000_0001_EDX] =
3240 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
3241 CPUID_EXT2_SYSCALL,
3242 .features[FEAT_8000_0001_ECX] =
3243 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3244 .features[FEAT_8000_0008_EBX] =
3245 CPUID_8000_0008_EBX_WBNOINVD,
3246 .features[FEAT_7_0_EBX] =
3247 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3248 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3249 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3250 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3251 CPUID_7_0_EBX_SMAP,
3252 .features[FEAT_7_0_ECX] =
3253 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3254 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3255 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3256 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3257 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3258 .features[FEAT_7_0_EDX] =
3259 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3260 /* Missing: XSAVES (not supported by some Linux versions,
3261 * including v4.1 to v4.12).
3262 * KVM doesn't yet expose any XSAVES state save component,
3263 * and the only one defined in Skylake (processor tracing)
3264 * probably will block migration anyway.
3266 .features[FEAT_XSAVE] =
3267 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3268 CPUID_XSAVE_XGETBV1,
3269 .features[FEAT_6_EAX] =
3270 CPUID_6_EAX_ARAT,
3271 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3272 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3273 MSR_VMX_BASIC_TRUE_CTLS,
3274 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3275 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3276 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3277 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3278 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3279 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3280 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3281 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3282 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3283 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3284 .features[FEAT_VMX_EXIT_CTLS] =
3285 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3286 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3287 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3288 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3289 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3290 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3291 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3292 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3293 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3294 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
3295 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3296 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3297 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3298 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3299 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3300 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3301 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3302 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3303 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3304 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3305 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3306 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3307 .features[FEAT_VMX_SECONDARY_CTLS] =
3308 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3309 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3310 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3311 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3312 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3313 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3314 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3315 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3316 .xlevel = 0x80000008,
3317 .model_id = "Intel Core Processor (Icelake)",
3318 .versions = (X86CPUVersionDefinition[]) {
3319 { .version = 1 },
3321 .version = 2,
3322 .alias = "Icelake-Client-noTSX",
3323 .props = (PropValue[]) {
3324 { "hle", "off" },
3325 { "rtm", "off" },
3326 { /* end of list */ }
3329 { /* end of list */ }
3333 .name = "Icelake-Server",
3334 .level = 0xd,
3335 .vendor = CPUID_VENDOR_INTEL,
3336 .family = 6,
3337 .model = 134,
3338 .stepping = 0,
3339 .features[FEAT_1_EDX] =
3340 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3341 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3342 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3343 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3344 CPUID_DE | CPUID_FP87,
3345 .features[FEAT_1_ECX] =
3346 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3347 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3348 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3349 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3350 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3351 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3352 .features[FEAT_8000_0001_EDX] =
3353 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3354 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3355 .features[FEAT_8000_0001_ECX] =
3356 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3357 .features[FEAT_8000_0008_EBX] =
3358 CPUID_8000_0008_EBX_WBNOINVD,
3359 .features[FEAT_7_0_EBX] =
3360 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3361 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3362 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3363 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3364 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3365 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3366 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3367 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3368 .features[FEAT_7_0_ECX] =
3369 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3370 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3371 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3372 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3373 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
3374 .features[FEAT_7_0_EDX] =
3375 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3376 /* Missing: XSAVES (not supported by some Linux versions,
3377 * including v4.1 to v4.12).
3378 * KVM doesn't yet expose any XSAVES state save component,
3379 * and the only one defined in Skylake (processor tracing)
3380 * probably will block migration anyway.
3382 .features[FEAT_XSAVE] =
3383 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3384 CPUID_XSAVE_XGETBV1,
3385 .features[FEAT_6_EAX] =
3386 CPUID_6_EAX_ARAT,
3387 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3388 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3389 MSR_VMX_BASIC_TRUE_CTLS,
3390 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3391 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3392 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3393 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3394 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3395 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3396 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3397 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3398 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3399 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3400 .features[FEAT_VMX_EXIT_CTLS] =
3401 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3402 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3403 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3404 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3405 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3406 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3407 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3408 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3409 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3410 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3411 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3412 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3413 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3414 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3415 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3416 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3417 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3418 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3419 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3420 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3421 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3422 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3423 .features[FEAT_VMX_SECONDARY_CTLS] =
3424 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3425 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3426 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3427 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3428 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3429 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3430 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3431 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3432 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
3433 .xlevel = 0x80000008,
3434 .model_id = "Intel Xeon Processor (Icelake)",
3435 .versions = (X86CPUVersionDefinition[]) {
3436 { .version = 1 },
3438 .version = 2,
3439 .alias = "Icelake-Server-noTSX",
3440 .props = (PropValue[]) {
3441 { "hle", "off" },
3442 { "rtm", "off" },
3443 { /* end of list */ }
3446 { /* end of list */ }
3450 .name = "Denverton",
3451 .level = 21,
3452 .vendor = CPUID_VENDOR_INTEL,
3453 .family = 6,
3454 .model = 95,
3455 .stepping = 1,
3456 .features[FEAT_1_EDX] =
3457 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
3458 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
3459 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3460 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR |
3461 CPUID_SSE | CPUID_SSE2,
3462 .features[FEAT_1_ECX] =
3463 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3464 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 |
3465 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3466 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER |
3467 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND,
3468 .features[FEAT_8000_0001_EDX] =
3469 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
3470 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
3471 .features[FEAT_8000_0001_ECX] =
3472 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3473 .features[FEAT_7_0_EBX] =
3474 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS |
3475 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP |
3476 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI,
3477 .features[FEAT_7_0_EDX] =
3478 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES |
3479 CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3481 * Missing: XSAVES (not supported by some Linux versions,
3482 * including v4.1 to v4.12).
3483 * KVM doesn't yet expose any XSAVES state save component,
3484 * and the only one defined in Skylake (processor tracing)
3485 * probably will block migration anyway.
3487 .features[FEAT_XSAVE] =
3488 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1,
3489 .features[FEAT_6_EAX] =
3490 CPUID_6_EAX_ARAT,
3491 .features[FEAT_ARCH_CAPABILITIES] =
3492 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY,
3493 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3494 MSR_VMX_BASIC_TRUE_CTLS,
3495 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3496 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3497 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3498 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3499 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3500 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3501 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3502 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3503 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3504 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3505 .features[FEAT_VMX_EXIT_CTLS] =
3506 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3507 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3508 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3509 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3510 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3511 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3512 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3513 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3514 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3515 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3516 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3517 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3518 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3519 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3520 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3521 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3522 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3523 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3524 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3525 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3526 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3527 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3528 .features[FEAT_VMX_SECONDARY_CTLS] =
3529 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3530 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3531 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3532 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3533 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3534 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3535 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3536 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3537 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3538 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3539 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3540 .xlevel = 0x80000008,
3541 .model_id = "Intel Atom Processor (Denverton)",
3544 .name = "Snowridge",
3545 .level = 27,
3546 .vendor = CPUID_VENDOR_INTEL,
3547 .family = 6,
3548 .model = 134,
3549 .stepping = 1,
3550 .features[FEAT_1_EDX] =
3551 /* missing: CPUID_PN CPUID_IA64 */
3552 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
3553 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
3554 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
3555 CPUID_CX8 | CPUID_APIC | CPUID_SEP |
3556 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3557 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
3558 CPUID_MMX |
3559 CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
3560 .features[FEAT_1_ECX] =
3561 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3562 CPUID_EXT_SSSE3 |
3563 CPUID_EXT_CX16 |
3564 CPUID_EXT_SSE41 |
3565 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3566 CPUID_EXT_POPCNT |
3567 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
3568 CPUID_EXT_RDRAND,
3569 .features[FEAT_8000_0001_EDX] =
3570 CPUID_EXT2_SYSCALL |
3571 CPUID_EXT2_NX |
3572 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3573 CPUID_EXT2_LM,
3574 .features[FEAT_8000_0001_ECX] =
3575 CPUID_EXT3_LAHF_LM |
3576 CPUID_EXT3_3DNOWPREFETCH,
3577 .features[FEAT_7_0_EBX] =
3578 CPUID_7_0_EBX_FSGSBASE |
3579 CPUID_7_0_EBX_SMEP |
3580 CPUID_7_0_EBX_ERMS |
3581 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
3582 CPUID_7_0_EBX_RDSEED |
3583 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3584 CPUID_7_0_EBX_CLWB |
3585 CPUID_7_0_EBX_SHA_NI,
3586 .features[FEAT_7_0_ECX] =
3587 CPUID_7_0_ECX_UMIP |
3588 /* missing bit 5 */
3589 CPUID_7_0_ECX_GFNI |
3590 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
3591 CPUID_7_0_ECX_MOVDIR64B,
3592 .features[FEAT_7_0_EDX] =
3593 CPUID_7_0_EDX_SPEC_CTRL |
3594 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
3595 CPUID_7_0_EDX_CORE_CAPABILITY,
3596 .features[FEAT_CORE_CAPABILITY] =
3597 MSR_CORE_CAP_SPLIT_LOCK_DETECT,
3599 * Missing: XSAVES (not supported by some Linux versions,
3600 * including v4.1 to v4.12).
3601 * KVM doesn't yet expose any XSAVES state save component,
3602 * and the only one defined in Skylake (processor tracing)
3603 * probably will block migration anyway.
3605 .features[FEAT_XSAVE] =
3606 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3607 CPUID_XSAVE_XGETBV1,
3608 .features[FEAT_6_EAX] =
3609 CPUID_6_EAX_ARAT,
3610 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3611 MSR_VMX_BASIC_TRUE_CTLS,
3612 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3613 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3614 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3615 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3616 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3617 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3618 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3619 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3620 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3621 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3622 .features[FEAT_VMX_EXIT_CTLS] =
3623 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3624 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3625 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3626 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3627 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3628 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3629 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3630 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3631 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3632 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3633 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3634 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3635 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3636 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3637 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3638 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3639 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3640 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3641 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3642 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3643 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3644 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3645 .features[FEAT_VMX_SECONDARY_CTLS] =
3646 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3647 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3648 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3649 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3650 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3651 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3652 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3653 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3654 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3655 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3656 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3657 .xlevel = 0x80000008,
3658 .model_id = "Intel Atom Processor (SnowRidge)",
3659 .versions = (X86CPUVersionDefinition[]) {
3660 { .version = 1 },
3662 .version = 2,
3663 .props = (PropValue[]) {
3664 { "mpx", "off" },
3665 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" },
3666 { /* end of list */ },
3669 { /* end of list */ },
3673 .name = "KnightsMill",
3674 .level = 0xd,
3675 .vendor = CPUID_VENDOR_INTEL,
3676 .family = 6,
3677 .model = 133,
3678 .stepping = 0,
3679 .features[FEAT_1_EDX] =
3680 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
3681 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
3682 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
3683 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
3684 CPUID_PSE | CPUID_DE | CPUID_FP87,
3685 .features[FEAT_1_ECX] =
3686 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3687 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3688 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3689 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3690 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3691 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3692 .features[FEAT_8000_0001_EDX] =
3693 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3694 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3695 .features[FEAT_8000_0001_ECX] =
3696 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3697 .features[FEAT_7_0_EBX] =
3698 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3699 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
3700 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
3701 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
3702 CPUID_7_0_EBX_AVX512ER,
3703 .features[FEAT_7_0_ECX] =
3704 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3705 .features[FEAT_7_0_EDX] =
3706 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
3707 .features[FEAT_XSAVE] =
3708 CPUID_XSAVE_XSAVEOPT,
3709 .features[FEAT_6_EAX] =
3710 CPUID_6_EAX_ARAT,
3711 .xlevel = 0x80000008,
3712 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
3715 .name = "Opteron_G1",
3716 .level = 5,
3717 .vendor = CPUID_VENDOR_AMD,
3718 .family = 15,
3719 .model = 6,
3720 .stepping = 1,
3721 .features[FEAT_1_EDX] =
3722 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3723 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3724 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3725 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3726 CPUID_DE | CPUID_FP87,
3727 .features[FEAT_1_ECX] =
3728 CPUID_EXT_SSE3,
3729 .features[FEAT_8000_0001_EDX] =
3730 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3731 .xlevel = 0x80000008,
3732 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
3735 .name = "Opteron_G2",
3736 .level = 5,
3737 .vendor = CPUID_VENDOR_AMD,
3738 .family = 15,
3739 .model = 6,
3740 .stepping = 1,
3741 .features[FEAT_1_EDX] =
3742 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3743 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3744 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3745 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3746 CPUID_DE | CPUID_FP87,
3747 .features[FEAT_1_ECX] =
3748 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
3749 .features[FEAT_8000_0001_EDX] =
3750 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3751 .features[FEAT_8000_0001_ECX] =
3752 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3753 .xlevel = 0x80000008,
3754 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
3757 .name = "Opteron_G3",
3758 .level = 5,
3759 .vendor = CPUID_VENDOR_AMD,
3760 .family = 16,
3761 .model = 2,
3762 .stepping = 3,
3763 .features[FEAT_1_EDX] =
3764 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3765 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3766 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3767 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3768 CPUID_DE | CPUID_FP87,
3769 .features[FEAT_1_ECX] =
3770 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
3771 CPUID_EXT_SSE3,
3772 .features[FEAT_8000_0001_EDX] =
3773 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
3774 CPUID_EXT2_RDTSCP,
3775 .features[FEAT_8000_0001_ECX] =
3776 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
3777 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3778 .xlevel = 0x80000008,
3779 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
3782 .name = "Opteron_G4",
3783 .level = 0xd,
3784 .vendor = CPUID_VENDOR_AMD,
3785 .family = 21,
3786 .model = 1,
3787 .stepping = 2,
3788 .features[FEAT_1_EDX] =
3789 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3790 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3791 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3792 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3793 CPUID_DE | CPUID_FP87,
3794 .features[FEAT_1_ECX] =
3795 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3796 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3797 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
3798 CPUID_EXT_SSE3,
3799 .features[FEAT_8000_0001_EDX] =
3800 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3801 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3802 .features[FEAT_8000_0001_ECX] =
3803 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3804 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3805 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3806 CPUID_EXT3_LAHF_LM,
3807 .features[FEAT_SVM] =
3808 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3809 /* no xsaveopt! */
3810 .xlevel = 0x8000001A,
3811 .model_id = "AMD Opteron 62xx class CPU",
3814 .name = "Opteron_G5",
3815 .level = 0xd,
3816 .vendor = CPUID_VENDOR_AMD,
3817 .family = 21,
3818 .model = 2,
3819 .stepping = 0,
3820 .features[FEAT_1_EDX] =
3821 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3822 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3823 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3824 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3825 CPUID_DE | CPUID_FP87,
3826 .features[FEAT_1_ECX] =
3827 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
3828 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
3829 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
3830 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3831 .features[FEAT_8000_0001_EDX] =
3832 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3833 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3834 .features[FEAT_8000_0001_ECX] =
3835 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3836 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3837 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3838 CPUID_EXT3_LAHF_LM,
3839 .features[FEAT_SVM] =
3840 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3841 /* no xsaveopt! */
3842 .xlevel = 0x8000001A,
3843 .model_id = "AMD Opteron 63xx class CPU",
3846 .name = "EPYC",
3847 .level = 0xd,
3848 .vendor = CPUID_VENDOR_AMD,
3849 .family = 23,
3850 .model = 1,
3851 .stepping = 2,
3852 .features[FEAT_1_EDX] =
3853 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3854 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3855 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3856 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3857 CPUID_VME | CPUID_FP87,
3858 .features[FEAT_1_ECX] =
3859 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3860 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
3861 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3862 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3863 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3864 .features[FEAT_8000_0001_EDX] =
3865 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3866 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3867 CPUID_EXT2_SYSCALL,
3868 .features[FEAT_8000_0001_ECX] =
3869 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3870 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3871 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3872 CPUID_EXT3_TOPOEXT,
3873 .features[FEAT_7_0_EBX] =
3874 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3875 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3876 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3877 CPUID_7_0_EBX_SHA_NI,
3878 /* Missing: XSAVES (not supported by some Linux versions,
3879 * including v4.1 to v4.12).
3880 * KVM doesn't yet expose any XSAVES state save component.
3882 .features[FEAT_XSAVE] =
3883 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3884 CPUID_XSAVE_XGETBV1,
3885 .features[FEAT_6_EAX] =
3886 CPUID_6_EAX_ARAT,
3887 .features[FEAT_SVM] =
3888 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3889 .xlevel = 0x8000001E,
3890 .model_id = "AMD EPYC Processor",
3891 .cache_info = &epyc_cache_info,
3892 .versions = (X86CPUVersionDefinition[]) {
3893 { .version = 1 },
3895 .version = 2,
3896 .alias = "EPYC-IBPB",
3897 .props = (PropValue[]) {
3898 { "ibpb", "on" },
3899 { "model-id",
3900 "AMD EPYC Processor (with IBPB)" },
3901 { /* end of list */ }
3904 { /* end of list */ }
3908 .name = "Dhyana",
3909 .level = 0xd,
3910 .vendor = CPUID_VENDOR_HYGON,
3911 .family = 24,
3912 .model = 0,
3913 .stepping = 1,
3914 .features[FEAT_1_EDX] =
3915 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3916 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3917 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3918 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3919 CPUID_VME | CPUID_FP87,
3920 .features[FEAT_1_ECX] =
3921 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3922 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
3923 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3924 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3925 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
3926 .features[FEAT_8000_0001_EDX] =
3927 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3928 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3929 CPUID_EXT2_SYSCALL,
3930 .features[FEAT_8000_0001_ECX] =
3931 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3932 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3933 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3934 CPUID_EXT3_TOPOEXT,
3935 .features[FEAT_8000_0008_EBX] =
3936 CPUID_8000_0008_EBX_IBPB,
3937 .features[FEAT_7_0_EBX] =
3938 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3939 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3940 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
3942 * Missing: XSAVES (not supported by some Linux versions,
3943 * including v4.1 to v4.12).
3944 * KVM doesn't yet expose any XSAVES state save component.
3946 .features[FEAT_XSAVE] =
3947 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3948 CPUID_XSAVE_XGETBV1,
3949 .features[FEAT_6_EAX] =
3950 CPUID_6_EAX_ARAT,
3951 .features[FEAT_SVM] =
3952 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3953 .xlevel = 0x8000001E,
3954 .model_id = "Hygon Dhyana Processor",
3955 .cache_info = &epyc_cache_info,
3959 /* KVM-specific features that are automatically added/removed
3960 * from all CPU models when KVM is enabled.
3962 static PropValue kvm_default_props[] = {
3963 { "kvmclock", "on" },
3964 { "kvm-nopiodelay", "on" },
3965 { "kvm-asyncpf", "on" },
3966 { "kvm-steal-time", "on" },
3967 { "kvm-pv-eoi", "on" },
3968 { "kvmclock-stable-bit", "on" },
3969 { "x2apic", "on" },
3970 { "acpi", "off" },
3971 { "monitor", "off" },
3972 { "svm", "off" },
3973 { NULL, NULL },
3976 /* TCG-specific defaults that override all CPU models when using TCG
3978 static PropValue tcg_default_props[] = {
3979 { "vme", "off" },
3980 { NULL, NULL },
3985 * We resolve CPU model aliases using -v1 when using "-machine
3986 * none", but this is just for compatibility while libvirt isn't
3987 * adapted to resolve CPU model versions before creating VMs.
3988 * See "Runnability guarantee of CPU models" at * qemu-deprecated.texi.
3990 X86CPUVersion default_cpu_version = 1;
3992 void x86_cpu_set_default_version(X86CPUVersion version)
3994 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */
3995 assert(version != CPU_VERSION_AUTO);
3996 default_cpu_version = version;
3999 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model)
4001 int v = 0;
4002 const X86CPUVersionDefinition *vdef =
4003 x86_cpu_def_get_versions(model->cpudef);
4004 while (vdef->version) {
4005 v = vdef->version;
4006 vdef++;
4008 return v;
4011 /* Return the actual version being used for a specific CPU model */
4012 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
4014 X86CPUVersion v = model->version;
4015 if (v == CPU_VERSION_AUTO) {
4016 v = default_cpu_version;
4018 if (v == CPU_VERSION_LATEST) {
4019 return x86_cpu_model_last_version(model);
4021 return v;
4024 void x86_cpu_change_kvm_default(const char *prop, const char *value)
4026 PropValue *pv;
4027 for (pv = kvm_default_props; pv->prop; pv++) {
4028 if (!strcmp(pv->prop, prop)) {
4029 pv->value = value;
4030 break;
4034 /* It is valid to call this function only for properties that
4035 * are already present in the kvm_default_props table.
4037 assert(pv->prop);
4040 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4041 bool migratable_only);
4043 static bool lmce_supported(void)
4045 uint64_t mce_cap = 0;
4047 #ifdef CONFIG_KVM
4048 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
4049 return false;
4051 #endif
4053 return !!(mce_cap & MCG_LMCE_P);
4056 #define CPUID_MODEL_ID_SZ 48
4059 * cpu_x86_fill_model_id:
4060 * Get CPUID model ID string from host CPU.
4062 * @str should have at least CPUID_MODEL_ID_SZ bytes
4064 * The function does NOT add a null terminator to the string
4065 * automatically.
4067 static int cpu_x86_fill_model_id(char *str)
4069 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
4070 int i;
4072 for (i = 0; i < 3; i++) {
4073 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
4074 memcpy(str + i * 16 + 0, &eax, 4);
4075 memcpy(str + i * 16 + 4, &ebx, 4);
4076 memcpy(str + i * 16 + 8, &ecx, 4);
4077 memcpy(str + i * 16 + 12, &edx, 4);
4079 return 0;
4082 static Property max_x86_cpu_properties[] = {
4083 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
4084 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
4085 DEFINE_PROP_END_OF_LIST()
4088 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
4090 DeviceClass *dc = DEVICE_CLASS(oc);
4091 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4093 xcc->ordering = 9;
4095 xcc->model_description =
4096 "Enables all features supported by the accelerator in the current host";
4098 dc->props = max_x86_cpu_properties;
4101 static void max_x86_cpu_initfn(Object *obj)
4103 X86CPU *cpu = X86_CPU(obj);
4104 CPUX86State *env = &cpu->env;
4105 KVMState *s = kvm_state;
4107 /* We can't fill the features array here because we don't know yet if
4108 * "migratable" is true or false.
4110 cpu->max_features = true;
4112 if (accel_uses_host_cpuid()) {
4113 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
4114 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
4115 int family, model, stepping;
4117 host_vendor_fms(vendor, &family, &model, &stepping);
4118 cpu_x86_fill_model_id(model_id);
4120 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
4121 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
4122 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
4123 object_property_set_int(OBJECT(cpu), stepping, "stepping",
4124 &error_abort);
4125 object_property_set_str(OBJECT(cpu), model_id, "model-id",
4126 &error_abort);
4128 if (kvm_enabled()) {
4129 env->cpuid_min_level =
4130 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
4131 env->cpuid_min_xlevel =
4132 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
4133 env->cpuid_min_xlevel2 =
4134 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
4135 } else {
4136 env->cpuid_min_level =
4137 hvf_get_supported_cpuid(0x0, 0, R_EAX);
4138 env->cpuid_min_xlevel =
4139 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
4140 env->cpuid_min_xlevel2 =
4141 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
4144 if (lmce_supported()) {
4145 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
4147 } else {
4148 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
4149 "vendor", &error_abort);
4150 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
4151 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
4152 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
4153 object_property_set_str(OBJECT(cpu),
4154 "QEMU TCG CPU version " QEMU_HW_VERSION,
4155 "model-id", &error_abort);
4158 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
4161 static const TypeInfo max_x86_cpu_type_info = {
4162 .name = X86_CPU_TYPE_NAME("max"),
4163 .parent = TYPE_X86_CPU,
4164 .instance_init = max_x86_cpu_initfn,
4165 .class_init = max_x86_cpu_class_init,
4168 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4169 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
4171 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4173 xcc->host_cpuid_required = true;
4174 xcc->ordering = 8;
4176 #if defined(CONFIG_KVM)
4177 xcc->model_description =
4178 "KVM processor with all supported host features ";
4179 #elif defined(CONFIG_HVF)
4180 xcc->model_description =
4181 "HVF processor with all supported host features ";
4182 #endif
4185 static const TypeInfo host_x86_cpu_type_info = {
4186 .name = X86_CPU_TYPE_NAME("host"),
4187 .parent = X86_CPU_TYPE_NAME("max"),
4188 .class_init = host_x86_cpu_class_init,
4191 #endif
4193 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
4195 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
4197 switch (f->type) {
4198 case CPUID_FEATURE_WORD:
4200 const char *reg = get_register_name_32(f->cpuid.reg);
4201 assert(reg);
4202 return g_strdup_printf("CPUID.%02XH:%s",
4203 f->cpuid.eax, reg);
4205 case MSR_FEATURE_WORD:
4206 return g_strdup_printf("MSR(%02XH)",
4207 f->msr.index);
4210 return NULL;
4213 static bool x86_cpu_have_filtered_features(X86CPU *cpu)
4215 FeatureWord w;
4217 for (w = 0; w < FEATURE_WORDS; w++) {
4218 if (cpu->filtered_features[w]) {
4219 return true;
4223 return false;
4226 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
4227 const char *verbose_prefix)
4229 CPUX86State *env = &cpu->env;
4230 FeatureWordInfo *f = &feature_word_info[w];
4231 int i;
4233 if (!cpu->force_features) {
4234 env->features[w] &= ~mask;
4236 cpu->filtered_features[w] |= mask;
4238 if (!verbose_prefix) {
4239 return;
4242 for (i = 0; i < 64; ++i) {
4243 if ((1ULL << i) & mask) {
4244 g_autofree char *feat_word_str = feature_word_description(f, i);
4245 warn_report("%s: %s%s%s [bit %d]",
4246 verbose_prefix,
4247 feat_word_str,
4248 f->feat_names[i] ? "." : "",
4249 f->feat_names[i] ? f->feat_names[i] : "", i);
4254 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
4255 const char *name, void *opaque,
4256 Error **errp)
4258 X86CPU *cpu = X86_CPU(obj);
4259 CPUX86State *env = &cpu->env;
4260 int64_t value;
4262 value = (env->cpuid_version >> 8) & 0xf;
4263 if (value == 0xf) {
4264 value += (env->cpuid_version >> 20) & 0xff;
4266 visit_type_int(v, name, &value, errp);
4269 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
4270 const char *name, void *opaque,
4271 Error **errp)
4273 X86CPU *cpu = X86_CPU(obj);
4274 CPUX86State *env = &cpu->env;
4275 const int64_t min = 0;
4276 const int64_t max = 0xff + 0xf;
4277 Error *local_err = NULL;
4278 int64_t value;
4280 visit_type_int(v, name, &value, &local_err);
4281 if (local_err) {
4282 error_propagate(errp, local_err);
4283 return;
4285 if (value < min || value > max) {
4286 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4287 name ? name : "null", value, min, max);
4288 return;
4291 env->cpuid_version &= ~0xff00f00;
4292 if (value > 0x0f) {
4293 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
4294 } else {
4295 env->cpuid_version |= value << 8;
4299 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
4300 const char *name, void *opaque,
4301 Error **errp)
4303 X86CPU *cpu = X86_CPU(obj);
4304 CPUX86State *env = &cpu->env;
4305 int64_t value;
4307 value = (env->cpuid_version >> 4) & 0xf;
4308 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
4309 visit_type_int(v, name, &value, errp);
4312 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
4313 const char *name, void *opaque,
4314 Error **errp)
4316 X86CPU *cpu = X86_CPU(obj);
4317 CPUX86State *env = &cpu->env;
4318 const int64_t min = 0;
4319 const int64_t max = 0xff;
4320 Error *local_err = NULL;
4321 int64_t value;
4323 visit_type_int(v, name, &value, &local_err);
4324 if (local_err) {
4325 error_propagate(errp, local_err);
4326 return;
4328 if (value < min || value > max) {
4329 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4330 name ? name : "null", value, min, max);
4331 return;
4334 env->cpuid_version &= ~0xf00f0;
4335 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
4338 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
4339 const char *name, void *opaque,
4340 Error **errp)
4342 X86CPU *cpu = X86_CPU(obj);
4343 CPUX86State *env = &cpu->env;
4344 int64_t value;
4346 value = env->cpuid_version & 0xf;
4347 visit_type_int(v, name, &value, errp);
4350 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
4351 const char *name, void *opaque,
4352 Error **errp)
4354 X86CPU *cpu = X86_CPU(obj);
4355 CPUX86State *env = &cpu->env;
4356 const int64_t min = 0;
4357 const int64_t max = 0xf;
4358 Error *local_err = NULL;
4359 int64_t value;
4361 visit_type_int(v, name, &value, &local_err);
4362 if (local_err) {
4363 error_propagate(errp, local_err);
4364 return;
4366 if (value < min || value > max) {
4367 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4368 name ? name : "null", value, min, max);
4369 return;
4372 env->cpuid_version &= ~0xf;
4373 env->cpuid_version |= value & 0xf;
4376 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
4378 X86CPU *cpu = X86_CPU(obj);
4379 CPUX86State *env = &cpu->env;
4380 char *value;
4382 value = g_malloc(CPUID_VENDOR_SZ + 1);
4383 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
4384 env->cpuid_vendor3);
4385 return value;
4388 static void x86_cpuid_set_vendor(Object *obj, const char *value,
4389 Error **errp)
4391 X86CPU *cpu = X86_CPU(obj);
4392 CPUX86State *env = &cpu->env;
4393 int i;
4395 if (strlen(value) != CPUID_VENDOR_SZ) {
4396 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
4397 return;
4400 env->cpuid_vendor1 = 0;
4401 env->cpuid_vendor2 = 0;
4402 env->cpuid_vendor3 = 0;
4403 for (i = 0; i < 4; i++) {
4404 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
4405 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
4406 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
4410 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
4412 X86CPU *cpu = X86_CPU(obj);
4413 CPUX86State *env = &cpu->env;
4414 char *value;
4415 int i;
4417 value = g_malloc(48 + 1);
4418 for (i = 0; i < 48; i++) {
4419 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
4421 value[48] = '\0';
4422 return value;
4425 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
4426 Error **errp)
4428 X86CPU *cpu = X86_CPU(obj);
4429 CPUX86State *env = &cpu->env;
4430 int c, len, i;
4432 if (model_id == NULL) {
4433 model_id = "";
4435 len = strlen(model_id);
4436 memset(env->cpuid_model, 0, 48);
4437 for (i = 0; i < 48; i++) {
4438 if (i >= len) {
4439 c = '\0';
4440 } else {
4441 c = (uint8_t)model_id[i];
4443 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
4447 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
4448 void *opaque, Error **errp)
4450 X86CPU *cpu = X86_CPU(obj);
4451 int64_t value;
4453 value = cpu->env.tsc_khz * 1000;
4454 visit_type_int(v, name, &value, errp);
4457 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
4458 void *opaque, Error **errp)
4460 X86CPU *cpu = X86_CPU(obj);
4461 const int64_t min = 0;
4462 const int64_t max = INT64_MAX;
4463 Error *local_err = NULL;
4464 int64_t value;
4466 visit_type_int(v, name, &value, &local_err);
4467 if (local_err) {
4468 error_propagate(errp, local_err);
4469 return;
4471 if (value < min || value > max) {
4472 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4473 name ? name : "null", value, min, max);
4474 return;
4477 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
4480 /* Generic getter for "feature-words" and "filtered-features" properties */
4481 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
4482 const char *name, void *opaque,
4483 Error **errp)
4485 uint64_t *array = (uint64_t *)opaque;
4486 FeatureWord w;
4487 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
4488 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
4489 X86CPUFeatureWordInfoList *list = NULL;
4491 for (w = 0; w < FEATURE_WORDS; w++) {
4492 FeatureWordInfo *wi = &feature_word_info[w];
4494 * We didn't have MSR features when "feature-words" was
4495 * introduced. Therefore skipped other type entries.
4497 if (wi->type != CPUID_FEATURE_WORD) {
4498 continue;
4500 X86CPUFeatureWordInfo *qwi = &word_infos[w];
4501 qwi->cpuid_input_eax = wi->cpuid.eax;
4502 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
4503 qwi->cpuid_input_ecx = wi->cpuid.ecx;
4504 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
4505 qwi->features = array[w];
4507 /* List will be in reverse order, but order shouldn't matter */
4508 list_entries[w].next = list;
4509 list_entries[w].value = &word_infos[w];
4510 list = &list_entries[w];
4513 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
4516 /* Convert all '_' in a feature string option name to '-', to make feature
4517 * name conform to QOM property naming rule, which uses '-' instead of '_'.
4519 static inline void feat2prop(char *s)
4521 while ((s = strchr(s, '_'))) {
4522 *s = '-';
4526 /* Return the feature property name for a feature flag bit */
4527 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
4529 const char *name;
4530 /* XSAVE components are automatically enabled by other features,
4531 * so return the original feature name instead
4533 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
4534 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
4536 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
4537 x86_ext_save_areas[comp].bits) {
4538 w = x86_ext_save_areas[comp].feature;
4539 bitnr = ctz32(x86_ext_save_areas[comp].bits);
4543 assert(bitnr < 64);
4544 assert(w < FEATURE_WORDS);
4545 name = feature_word_info[w].feat_names[bitnr];
4546 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD));
4547 return name;
4550 /* Compatibily hack to maintain legacy +-feat semantic,
4551 * where +-feat overwrites any feature set by
4552 * feat=on|feat even if the later is parsed after +-feat
4553 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
4555 static GList *plus_features, *minus_features;
4557 static gint compare_string(gconstpointer a, gconstpointer b)
4559 return g_strcmp0(a, b);
4562 /* Parse "+feature,-feature,feature=foo" CPU feature string
4564 static void x86_cpu_parse_featurestr(const char *typename, char *features,
4565 Error **errp)
4567 char *featurestr; /* Single 'key=value" string being parsed */
4568 static bool cpu_globals_initialized;
4569 bool ambiguous = false;
4571 if (cpu_globals_initialized) {
4572 return;
4574 cpu_globals_initialized = true;
4576 if (!features) {
4577 return;
4580 for (featurestr = strtok(features, ",");
4581 featurestr;
4582 featurestr = strtok(NULL, ",")) {
4583 const char *name;
4584 const char *val = NULL;
4585 char *eq = NULL;
4586 char num[32];
4587 GlobalProperty *prop;
4589 /* Compatibility syntax: */
4590 if (featurestr[0] == '+') {
4591 plus_features = g_list_append(plus_features,
4592 g_strdup(featurestr + 1));
4593 continue;
4594 } else if (featurestr[0] == '-') {
4595 minus_features = g_list_append(minus_features,
4596 g_strdup(featurestr + 1));
4597 continue;
4600 eq = strchr(featurestr, '=');
4601 if (eq) {
4602 *eq++ = 0;
4603 val = eq;
4604 } else {
4605 val = "on";
4608 feat2prop(featurestr);
4609 name = featurestr;
4611 if (g_list_find_custom(plus_features, name, compare_string)) {
4612 warn_report("Ambiguous CPU model string. "
4613 "Don't mix both \"+%s\" and \"%s=%s\"",
4614 name, name, val);
4615 ambiguous = true;
4617 if (g_list_find_custom(minus_features, name, compare_string)) {
4618 warn_report("Ambiguous CPU model string. "
4619 "Don't mix both \"-%s\" and \"%s=%s\"",
4620 name, name, val);
4621 ambiguous = true;
4624 /* Special case: */
4625 if (!strcmp(name, "tsc-freq")) {
4626 int ret;
4627 uint64_t tsc_freq;
4629 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
4630 if (ret < 0 || tsc_freq > INT64_MAX) {
4631 error_setg(errp, "bad numerical value %s", val);
4632 return;
4634 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
4635 val = num;
4636 name = "tsc-frequency";
4639 prop = g_new0(typeof(*prop), 1);
4640 prop->driver = typename;
4641 prop->property = g_strdup(name);
4642 prop->value = g_strdup(val);
4643 qdev_prop_register_global(prop);
4646 if (ambiguous) {
4647 warn_report("Compatibility of ambiguous CPU model "
4648 "strings won't be kept on future QEMU versions");
4652 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
4653 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
4655 /* Build a list with the name of all features on a feature word array */
4656 static void x86_cpu_list_feature_names(FeatureWordArray features,
4657 strList **feat_names)
4659 FeatureWord w;
4660 strList **next = feat_names;
4662 for (w = 0; w < FEATURE_WORDS; w++) {
4663 uint64_t filtered = features[w];
4664 int i;
4665 for (i = 0; i < 64; i++) {
4666 if (filtered & (1ULL << i)) {
4667 strList *new = g_new0(strList, 1);
4668 new->value = g_strdup(x86_cpu_feature_name(w, i));
4669 *next = new;
4670 next = &new->next;
4676 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
4677 const char *name, void *opaque,
4678 Error **errp)
4680 X86CPU *xc = X86_CPU(obj);
4681 strList *result = NULL;
4683 x86_cpu_list_feature_names(xc->filtered_features, &result);
4684 visit_type_strList(v, "unavailable-features", &result, errp);
4687 /* Check for missing features that may prevent the CPU class from
4688 * running using the current machine and accelerator.
4690 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
4691 strList **missing_feats)
4693 X86CPU *xc;
4694 Error *err = NULL;
4695 strList **next = missing_feats;
4697 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4698 strList *new = g_new0(strList, 1);
4699 new->value = g_strdup("kvm");
4700 *missing_feats = new;
4701 return;
4704 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
4706 x86_cpu_expand_features(xc, &err);
4707 if (err) {
4708 /* Errors at x86_cpu_expand_features should never happen,
4709 * but in case it does, just report the model as not
4710 * runnable at all using the "type" property.
4712 strList *new = g_new0(strList, 1);
4713 new->value = g_strdup("type");
4714 *next = new;
4715 next = &new->next;
4718 x86_cpu_filter_features(xc, false);
4720 x86_cpu_list_feature_names(xc->filtered_features, next);
4722 object_unref(OBJECT(xc));
4725 /* Print all cpuid feature names in featureset
4727 static void listflags(GList *features)
4729 size_t len = 0;
4730 GList *tmp;
4732 for (tmp = features; tmp; tmp = tmp->next) {
4733 const char *name = tmp->data;
4734 if ((len + strlen(name) + 1) >= 75) {
4735 qemu_printf("\n");
4736 len = 0;
4738 qemu_printf("%s%s", len == 0 ? " " : " ", name);
4739 len += strlen(name) + 1;
4741 qemu_printf("\n");
4744 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
4745 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
4747 ObjectClass *class_a = (ObjectClass *)a;
4748 ObjectClass *class_b = (ObjectClass *)b;
4749 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
4750 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
4751 int ret;
4753 if (cc_a->ordering != cc_b->ordering) {
4754 ret = cc_a->ordering - cc_b->ordering;
4755 } else {
4756 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a);
4757 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b);
4758 ret = strcmp(name_a, name_b);
4760 return ret;
4763 static GSList *get_sorted_cpu_model_list(void)
4765 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
4766 list = g_slist_sort(list, x86_cpu_list_compare);
4767 return list;
4770 static char *x86_cpu_class_get_model_id(X86CPUClass *xc)
4772 Object *obj = object_new_with_class(OBJECT_CLASS(xc));
4773 char *r = object_property_get_str(obj, "model-id", &error_abort);
4774 object_unref(obj);
4775 return r;
4778 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc)
4780 X86CPUVersion version;
4782 if (!cc->model || !cc->model->is_alias) {
4783 return NULL;
4785 version = x86_cpu_model_resolve_version(cc->model);
4786 if (version <= 0) {
4787 return NULL;
4789 return x86_cpu_versioned_model_name(cc->model->cpudef, version);
4792 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
4794 ObjectClass *oc = data;
4795 X86CPUClass *cc = X86_CPU_CLASS(oc);
4796 g_autofree char *name = x86_cpu_class_get_model_name(cc);
4797 g_autofree char *desc = g_strdup(cc->model_description);
4798 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc);
4800 if (!desc && alias_of) {
4801 if (cc->model && cc->model->version == CPU_VERSION_AUTO) {
4802 desc = g_strdup("(alias configured by machine type)");
4803 } else {
4804 desc = g_strdup_printf("(alias of %s)", alias_of);
4807 if (!desc) {
4808 desc = x86_cpu_class_get_model_id(cc);
4811 qemu_printf("x86 %-20s %-48s\n", name, desc);
4814 /* list available CPU models and flags */
4815 void x86_cpu_list(void)
4817 int i, j;
4818 GSList *list;
4819 GList *names = NULL;
4821 qemu_printf("Available CPUs:\n");
4822 list = get_sorted_cpu_model_list();
4823 g_slist_foreach(list, x86_cpu_list_entry, NULL);
4824 g_slist_free(list);
4826 names = NULL;
4827 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
4828 FeatureWordInfo *fw = &feature_word_info[i];
4829 for (j = 0; j < 64; j++) {
4830 if (fw->feat_names[j]) {
4831 names = g_list_append(names, (gpointer)fw->feat_names[j]);
4836 names = g_list_sort(names, (GCompareFunc)strcmp);
4838 qemu_printf("\nRecognized CPUID flags:\n");
4839 listflags(names);
4840 qemu_printf("\n");
4841 g_list_free(names);
4844 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
4846 ObjectClass *oc = data;
4847 X86CPUClass *cc = X86_CPU_CLASS(oc);
4848 CpuDefinitionInfoList **cpu_list = user_data;
4849 CpuDefinitionInfoList *entry;
4850 CpuDefinitionInfo *info;
4852 info = g_malloc0(sizeof(*info));
4853 info->name = x86_cpu_class_get_model_name(cc);
4854 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
4855 info->has_unavailable_features = true;
4856 info->q_typename = g_strdup(object_class_get_name(oc));
4857 info->migration_safe = cc->migration_safe;
4858 info->has_migration_safe = true;
4859 info->q_static = cc->static_model;
4861 * Old machine types won't report aliases, so that alias translation
4862 * doesn't break compatibility with previous QEMU versions.
4864 if (default_cpu_version != CPU_VERSION_LEGACY) {
4865 info->alias_of = x86_cpu_class_get_alias_of(cc);
4866 info->has_alias_of = !!info->alias_of;
4869 entry = g_malloc0(sizeof(*entry));
4870 entry->value = info;
4871 entry->next = *cpu_list;
4872 *cpu_list = entry;
4875 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
4877 CpuDefinitionInfoList *cpu_list = NULL;
4878 GSList *list = get_sorted_cpu_model_list();
4879 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
4880 g_slist_free(list);
4881 return cpu_list;
4884 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4885 bool migratable_only)
4887 FeatureWordInfo *wi = &feature_word_info[w];
4888 uint64_t r = 0;
4890 if (kvm_enabled()) {
4891 switch (wi->type) {
4892 case CPUID_FEATURE_WORD:
4893 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
4894 wi->cpuid.ecx,
4895 wi->cpuid.reg);
4896 break;
4897 case MSR_FEATURE_WORD:
4898 r = kvm_arch_get_supported_msr_feature(kvm_state,
4899 wi->msr.index);
4900 break;
4902 } else if (hvf_enabled()) {
4903 if (wi->type != CPUID_FEATURE_WORD) {
4904 return 0;
4906 r = hvf_get_supported_cpuid(wi->cpuid.eax,
4907 wi->cpuid.ecx,
4908 wi->cpuid.reg);
4909 } else if (tcg_enabled()) {
4910 r = wi->tcg_features;
4911 } else {
4912 return ~0;
4914 if (migratable_only) {
4915 r &= x86_cpu_get_migratable_flags(w);
4917 return r;
4920 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
4922 PropValue *pv;
4923 for (pv = props; pv->prop; pv++) {
4924 if (!pv->value) {
4925 continue;
4927 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
4928 &error_abort);
4932 /* Apply properties for the CPU model version specified in model */
4933 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
4935 const X86CPUVersionDefinition *vdef;
4936 X86CPUVersion version = x86_cpu_model_resolve_version(model);
4938 if (version == CPU_VERSION_LEGACY) {
4939 return;
4942 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) {
4943 PropValue *p;
4945 for (p = vdef->props; p && p->prop; p++) {
4946 object_property_parse(OBJECT(cpu), p->value, p->prop,
4947 &error_abort);
4950 if (vdef->version == version) {
4951 break;
4956 * If we reached the end of the list, version number was invalid
4958 assert(vdef->version == version);
4961 /* Load data from X86CPUDefinition into a X86CPU object
4963 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp)
4965 X86CPUDefinition *def = model->cpudef;
4966 CPUX86State *env = &cpu->env;
4967 const char *vendor;
4968 char host_vendor[CPUID_VENDOR_SZ + 1];
4969 FeatureWord w;
4971 /*NOTE: any property set by this function should be returned by
4972 * x86_cpu_static_props(), so static expansion of
4973 * query-cpu-model-expansion is always complete.
4976 /* CPU models only set _minimum_ values for level/xlevel: */
4977 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
4978 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
4980 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
4981 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
4982 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
4983 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
4984 for (w = 0; w < FEATURE_WORDS; w++) {
4985 env->features[w] = def->features[w];
4988 /* legacy-cache defaults to 'off' if CPU model provides cache info */
4989 cpu->legacy_cache = !def->cache_info;
4991 /* Special cases not set in the X86CPUDefinition structs: */
4992 /* TODO: in-kernel irqchip for hvf */
4993 if (kvm_enabled()) {
4994 if (!kvm_irqchip_in_kernel()) {
4995 x86_cpu_change_kvm_default("x2apic", "off");
4998 x86_cpu_apply_props(cpu, kvm_default_props);
4999 } else if (tcg_enabled()) {
5000 x86_cpu_apply_props(cpu, tcg_default_props);
5003 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
5005 /* sysenter isn't supported in compatibility mode on AMD,
5006 * syscall isn't supported in compatibility mode on Intel.
5007 * Normally we advertise the actual CPU vendor, but you can
5008 * override this using the 'vendor' property if you want to use
5009 * KVM's sysenter/syscall emulation in compatibility mode and
5010 * when doing cross vendor migration
5012 vendor = def->vendor;
5013 if (accel_uses_host_cpuid()) {
5014 uint32_t ebx = 0, ecx = 0, edx = 0;
5015 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
5016 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
5017 vendor = host_vendor;
5020 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
5022 x86_cpu_apply_version_props(cpu, model);
5025 #ifndef CONFIG_USER_ONLY
5026 /* Return a QDict containing keys for all properties that can be included
5027 * in static expansion of CPU models. All properties set by x86_cpu_load_model()
5028 * must be included in the dictionary.
5030 static QDict *x86_cpu_static_props(void)
5032 FeatureWord w;
5033 int i;
5034 static const char *props[] = {
5035 "min-level",
5036 "min-xlevel",
5037 "family",
5038 "model",
5039 "stepping",
5040 "model-id",
5041 "vendor",
5042 "lmce",
5043 NULL,
5045 static QDict *d;
5047 if (d) {
5048 return d;
5051 d = qdict_new();
5052 for (i = 0; props[i]; i++) {
5053 qdict_put_null(d, props[i]);
5056 for (w = 0; w < FEATURE_WORDS; w++) {
5057 FeatureWordInfo *fi = &feature_word_info[w];
5058 int bit;
5059 for (bit = 0; bit < 64; bit++) {
5060 if (!fi->feat_names[bit]) {
5061 continue;
5063 qdict_put_null(d, fi->feat_names[bit]);
5067 return d;
5070 /* Add an entry to @props dict, with the value for property. */
5071 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
5073 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
5074 &error_abort);
5076 qdict_put_obj(props, prop, value);
5079 /* Convert CPU model data from X86CPU object to a property dictionary
5080 * that can recreate exactly the same CPU model.
5082 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
5084 QDict *sprops = x86_cpu_static_props();
5085 const QDictEntry *e;
5087 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
5088 const char *prop = qdict_entry_key(e);
5089 x86_cpu_expand_prop(cpu, props, prop);
5093 /* Convert CPU model data from X86CPU object to a property dictionary
5094 * that can recreate exactly the same CPU model, including every
5095 * writeable QOM property.
5097 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
5099 ObjectPropertyIterator iter;
5100 ObjectProperty *prop;
5102 object_property_iter_init(&iter, OBJECT(cpu));
5103 while ((prop = object_property_iter_next(&iter))) {
5104 /* skip read-only or write-only properties */
5105 if (!prop->get || !prop->set) {
5106 continue;
5109 /* "hotplugged" is the only property that is configurable
5110 * on the command-line but will be set differently on CPUs
5111 * created using "-cpu ... -smp ..." and by CPUs created
5112 * on the fly by x86_cpu_from_model() for querying. Skip it.
5114 if (!strcmp(prop->name, "hotplugged")) {
5115 continue;
5117 x86_cpu_expand_prop(cpu, props, prop->name);
5121 static void object_apply_props(Object *obj, QDict *props, Error **errp)
5123 const QDictEntry *prop;
5124 Error *err = NULL;
5126 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
5127 object_property_set_qobject(obj, qdict_entry_value(prop),
5128 qdict_entry_key(prop), &err);
5129 if (err) {
5130 break;
5134 error_propagate(errp, err);
5137 /* Create X86CPU object according to model+props specification */
5138 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
5140 X86CPU *xc = NULL;
5141 X86CPUClass *xcc;
5142 Error *err = NULL;
5144 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
5145 if (xcc == NULL) {
5146 error_setg(&err, "CPU model '%s' not found", model);
5147 goto out;
5150 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
5151 if (props) {
5152 object_apply_props(OBJECT(xc), props, &err);
5153 if (err) {
5154 goto out;
5158 x86_cpu_expand_features(xc, &err);
5159 if (err) {
5160 goto out;
5163 out:
5164 if (err) {
5165 error_propagate(errp, err);
5166 object_unref(OBJECT(xc));
5167 xc = NULL;
5169 return xc;
5172 CpuModelExpansionInfo *
5173 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
5174 CpuModelInfo *model,
5175 Error **errp)
5177 X86CPU *xc = NULL;
5178 Error *err = NULL;
5179 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
5180 QDict *props = NULL;
5181 const char *base_name;
5183 xc = x86_cpu_from_model(model->name,
5184 model->has_props ?
5185 qobject_to(QDict, model->props) :
5186 NULL, &err);
5187 if (err) {
5188 goto out;
5191 props = qdict_new();
5192 ret->model = g_new0(CpuModelInfo, 1);
5193 ret->model->props = QOBJECT(props);
5194 ret->model->has_props = true;
5196 switch (type) {
5197 case CPU_MODEL_EXPANSION_TYPE_STATIC:
5198 /* Static expansion will be based on "base" only */
5199 base_name = "base";
5200 x86_cpu_to_dict(xc, props);
5201 break;
5202 case CPU_MODEL_EXPANSION_TYPE_FULL:
5203 /* As we don't return every single property, full expansion needs
5204 * to keep the original model name+props, and add extra
5205 * properties on top of that.
5207 base_name = model->name;
5208 x86_cpu_to_dict_full(xc, props);
5209 break;
5210 default:
5211 error_setg(&err, "Unsupported expansion type");
5212 goto out;
5215 x86_cpu_to_dict(xc, props);
5217 ret->model->name = g_strdup(base_name);
5219 out:
5220 object_unref(OBJECT(xc));
5221 if (err) {
5222 error_propagate(errp, err);
5223 qapi_free_CpuModelExpansionInfo(ret);
5224 ret = NULL;
5226 return ret;
5228 #endif /* !CONFIG_USER_ONLY */
5230 static gchar *x86_gdb_arch_name(CPUState *cs)
5232 #ifdef TARGET_X86_64
5233 return g_strdup("i386:x86-64");
5234 #else
5235 return g_strdup("i386");
5236 #endif
5239 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
5241 X86CPUModel *model = data;
5242 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5244 xcc->model = model;
5245 xcc->migration_safe = true;
5248 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model)
5250 g_autofree char *typename = x86_cpu_type_name(name);
5251 TypeInfo ti = {
5252 .name = typename,
5253 .parent = TYPE_X86_CPU,
5254 .class_init = x86_cpu_cpudef_class_init,
5255 .class_data = model,
5258 type_register(&ti);
5261 static void x86_register_cpudef_types(X86CPUDefinition *def)
5263 X86CPUModel *m;
5264 const X86CPUVersionDefinition *vdef;
5266 /* AMD aliases are handled at runtime based on CPUID vendor, so
5267 * they shouldn't be set on the CPU model table.
5269 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
5270 /* catch mistakes instead of silently truncating model_id when too long */
5271 assert(def->model_id && strlen(def->model_id) <= 48);
5273 /* Unversioned model: */
5274 m = g_new0(X86CPUModel, 1);
5275 m->cpudef = def;
5276 m->version = CPU_VERSION_AUTO;
5277 m->is_alias = true;
5278 x86_register_cpu_model_type(def->name, m);
5280 /* Versioned models: */
5282 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
5283 X86CPUModel *m = g_new0(X86CPUModel, 1);
5284 g_autofree char *name =
5285 x86_cpu_versioned_model_name(def, vdef->version);
5286 m->cpudef = def;
5287 m->version = vdef->version;
5288 x86_register_cpu_model_type(name, m);
5290 if (vdef->alias) {
5291 X86CPUModel *am = g_new0(X86CPUModel, 1);
5292 am->cpudef = def;
5293 am->version = vdef->version;
5294 am->is_alias = true;
5295 x86_register_cpu_model_type(vdef->alias, am);
5301 #if !defined(CONFIG_USER_ONLY)
5303 void cpu_clear_apic_feature(CPUX86State *env)
5305 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
5308 #endif /* !CONFIG_USER_ONLY */
5310 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
5311 uint32_t *eax, uint32_t *ebx,
5312 uint32_t *ecx, uint32_t *edx)
5314 X86CPU *cpu = env_archcpu(env);
5315 CPUState *cs = env_cpu(env);
5316 uint32_t die_offset;
5317 uint32_t limit;
5318 uint32_t signature[3];
5320 /* Calculate & apply limits for different index ranges */
5321 if (index >= 0xC0000000) {
5322 limit = env->cpuid_xlevel2;
5323 } else if (index >= 0x80000000) {
5324 limit = env->cpuid_xlevel;
5325 } else if (index >= 0x40000000) {
5326 limit = 0x40000001;
5327 } else {
5328 limit = env->cpuid_level;
5331 if (index > limit) {
5332 /* Intel documentation states that invalid EAX input will
5333 * return the same information as EAX=cpuid_level
5334 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
5336 index = env->cpuid_level;
5339 switch(index) {
5340 case 0:
5341 *eax = env->cpuid_level;
5342 *ebx = env->cpuid_vendor1;
5343 *edx = env->cpuid_vendor2;
5344 *ecx = env->cpuid_vendor3;
5345 break;
5346 case 1:
5347 *eax = env->cpuid_version;
5348 *ebx = (cpu->apic_id << 24) |
5349 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
5350 *ecx = env->features[FEAT_1_ECX];
5351 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
5352 *ecx |= CPUID_EXT_OSXSAVE;
5354 *edx = env->features[FEAT_1_EDX];
5355 if (cs->nr_cores * cs->nr_threads > 1) {
5356 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
5357 *edx |= CPUID_HT;
5359 break;
5360 case 2:
5361 /* cache info: needed for Pentium Pro compatibility */
5362 if (cpu->cache_info_passthrough) {
5363 host_cpuid(index, 0, eax, ebx, ecx, edx);
5364 break;
5366 *eax = 1; /* Number of CPUID[EAX=2] calls required */
5367 *ebx = 0;
5368 if (!cpu->enable_l3_cache) {
5369 *ecx = 0;
5370 } else {
5371 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
5373 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
5374 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
5375 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
5376 break;
5377 case 4:
5378 /* cache info: needed for Core compatibility */
5379 if (cpu->cache_info_passthrough) {
5380 host_cpuid(index, count, eax, ebx, ecx, edx);
5381 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
5382 *eax &= ~0xFC000000;
5383 if ((*eax & 31) && cs->nr_cores > 1) {
5384 *eax |= (cs->nr_cores - 1) << 26;
5386 } else {
5387 *eax = 0;
5388 switch (count) {
5389 case 0: /* L1 dcache info */
5390 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
5391 1, cs->nr_cores,
5392 eax, ebx, ecx, edx);
5393 break;
5394 case 1: /* L1 icache info */
5395 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
5396 1, cs->nr_cores,
5397 eax, ebx, ecx, edx);
5398 break;
5399 case 2: /* L2 cache info */
5400 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
5401 cs->nr_threads, cs->nr_cores,
5402 eax, ebx, ecx, edx);
5403 break;
5404 case 3: /* L3 cache info */
5405 die_offset = apicid_die_offset(env->nr_dies,
5406 cs->nr_cores, cs->nr_threads);
5407 if (cpu->enable_l3_cache) {
5408 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
5409 (1 << die_offset), cs->nr_cores,
5410 eax, ebx, ecx, edx);
5411 break;
5413 /* fall through */
5414 default: /* end of info */
5415 *eax = *ebx = *ecx = *edx = 0;
5416 break;
5419 break;
5420 case 5:
5421 /* MONITOR/MWAIT Leaf */
5422 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
5423 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
5424 *ecx = cpu->mwait.ecx; /* flags */
5425 *edx = cpu->mwait.edx; /* mwait substates */
5426 break;
5427 case 6:
5428 /* Thermal and Power Leaf */
5429 *eax = env->features[FEAT_6_EAX];
5430 *ebx = 0;
5431 *ecx = 0;
5432 *edx = 0;
5433 break;
5434 case 7:
5435 /* Structured Extended Feature Flags Enumeration Leaf */
5436 if (count == 0) {
5437 /* Maximum ECX value for sub-leaves */
5438 *eax = env->cpuid_level_func7;
5439 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
5440 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
5441 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
5442 *ecx |= CPUID_7_0_ECX_OSPKE;
5444 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
5445 } else if (count == 1) {
5446 *eax = env->features[FEAT_7_1_EAX];
5447 *ebx = 0;
5448 *ecx = 0;
5449 *edx = 0;
5450 } else {
5451 *eax = 0;
5452 *ebx = 0;
5453 *ecx = 0;
5454 *edx = 0;
5456 break;
5457 case 9:
5458 /* Direct Cache Access Information Leaf */
5459 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
5460 *ebx = 0;
5461 *ecx = 0;
5462 *edx = 0;
5463 break;
5464 case 0xA:
5465 /* Architectural Performance Monitoring Leaf */
5466 if (kvm_enabled() && cpu->enable_pmu) {
5467 KVMState *s = cs->kvm_state;
5469 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
5470 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
5471 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
5472 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
5473 } else if (hvf_enabled() && cpu->enable_pmu) {
5474 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
5475 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
5476 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
5477 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
5478 } else {
5479 *eax = 0;
5480 *ebx = 0;
5481 *ecx = 0;
5482 *edx = 0;
5484 break;
5485 case 0xB:
5486 /* Extended Topology Enumeration Leaf */
5487 if (!cpu->enable_cpuid_0xb) {
5488 *eax = *ebx = *ecx = *edx = 0;
5489 break;
5492 *ecx = count & 0xff;
5493 *edx = cpu->apic_id;
5495 switch (count) {
5496 case 0:
5497 *eax = apicid_core_offset(env->nr_dies,
5498 cs->nr_cores, cs->nr_threads);
5499 *ebx = cs->nr_threads;
5500 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5501 break;
5502 case 1:
5503 *eax = apicid_pkg_offset(env->nr_dies,
5504 cs->nr_cores, cs->nr_threads);
5505 *ebx = cs->nr_cores * cs->nr_threads;
5506 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5507 break;
5508 default:
5509 *eax = 0;
5510 *ebx = 0;
5511 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5514 assert(!(*eax & ~0x1f));
5515 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5516 break;
5517 case 0x1F:
5518 /* V2 Extended Topology Enumeration Leaf */
5519 if (env->nr_dies < 2) {
5520 *eax = *ebx = *ecx = *edx = 0;
5521 break;
5524 *ecx = count & 0xff;
5525 *edx = cpu->apic_id;
5526 switch (count) {
5527 case 0:
5528 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores,
5529 cs->nr_threads);
5530 *ebx = cs->nr_threads;
5531 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5532 break;
5533 case 1:
5534 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores,
5535 cs->nr_threads);
5536 *ebx = cs->nr_cores * cs->nr_threads;
5537 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5538 break;
5539 case 2:
5540 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores,
5541 cs->nr_threads);
5542 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
5543 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
5544 break;
5545 default:
5546 *eax = 0;
5547 *ebx = 0;
5548 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5550 assert(!(*eax & ~0x1f));
5551 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5552 break;
5553 case 0xD: {
5554 /* Processor Extended State */
5555 *eax = 0;
5556 *ebx = 0;
5557 *ecx = 0;
5558 *edx = 0;
5559 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
5560 break;
5563 if (count == 0) {
5564 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
5565 *eax = env->features[FEAT_XSAVE_COMP_LO];
5566 *edx = env->features[FEAT_XSAVE_COMP_HI];
5568 * The initial value of xcr0 and ebx == 0, On host without kvm
5569 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0
5570 * even through guest update xcr0, this will crash some legacy guest
5571 * (e.g., CentOS 6), So set ebx == ecx to workaroud it.
5573 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0);
5574 } else if (count == 1) {
5575 *eax = env->features[FEAT_XSAVE];
5576 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
5577 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
5578 const ExtSaveArea *esa = &x86_ext_save_areas[count];
5579 *eax = esa->size;
5580 *ebx = esa->offset;
5583 break;
5585 case 0x14: {
5586 /* Intel Processor Trace Enumeration */
5587 *eax = 0;
5588 *ebx = 0;
5589 *ecx = 0;
5590 *edx = 0;
5591 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
5592 !kvm_enabled()) {
5593 break;
5596 if (count == 0) {
5597 *eax = INTEL_PT_MAX_SUBLEAF;
5598 *ebx = INTEL_PT_MINIMAL_EBX;
5599 *ecx = INTEL_PT_MINIMAL_ECX;
5600 } else if (count == 1) {
5601 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
5602 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
5604 break;
5606 case 0x40000000:
5608 * CPUID code in kvm_arch_init_vcpu() ignores stuff
5609 * set here, but we restrict to TCG none the less.
5611 if (tcg_enabled() && cpu->expose_tcg) {
5612 memcpy(signature, "TCGTCGTCGTCG", 12);
5613 *eax = 0x40000001;
5614 *ebx = signature[0];
5615 *ecx = signature[1];
5616 *edx = signature[2];
5617 } else {
5618 *eax = 0;
5619 *ebx = 0;
5620 *ecx = 0;
5621 *edx = 0;
5623 break;
5624 case 0x40000001:
5625 *eax = 0;
5626 *ebx = 0;
5627 *ecx = 0;
5628 *edx = 0;
5629 break;
5630 case 0x80000000:
5631 *eax = env->cpuid_xlevel;
5632 *ebx = env->cpuid_vendor1;
5633 *edx = env->cpuid_vendor2;
5634 *ecx = env->cpuid_vendor3;
5635 break;
5636 case 0x80000001:
5637 *eax = env->cpuid_version;
5638 *ebx = 0;
5639 *ecx = env->features[FEAT_8000_0001_ECX];
5640 *edx = env->features[FEAT_8000_0001_EDX];
5642 /* The Linux kernel checks for the CMPLegacy bit and
5643 * discards multiple thread information if it is set.
5644 * So don't set it here for Intel to make Linux guests happy.
5646 if (cs->nr_cores * cs->nr_threads > 1) {
5647 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
5648 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
5649 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
5650 *ecx |= 1 << 1; /* CmpLegacy bit */
5653 break;
5654 case 0x80000002:
5655 case 0x80000003:
5656 case 0x80000004:
5657 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
5658 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
5659 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
5660 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
5661 break;
5662 case 0x80000005:
5663 /* cache info (L1 cache) */
5664 if (cpu->cache_info_passthrough) {
5665 host_cpuid(index, 0, eax, ebx, ecx, edx);
5666 break;
5668 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
5669 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
5670 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
5671 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
5672 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
5673 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
5674 break;
5675 case 0x80000006:
5676 /* cache info (L2 cache) */
5677 if (cpu->cache_info_passthrough) {
5678 host_cpuid(index, 0, eax, ebx, ecx, edx);
5679 break;
5681 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
5682 (L2_DTLB_2M_ENTRIES << 16) | \
5683 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
5684 (L2_ITLB_2M_ENTRIES);
5685 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
5686 (L2_DTLB_4K_ENTRIES << 16) | \
5687 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
5688 (L2_ITLB_4K_ENTRIES);
5689 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
5690 cpu->enable_l3_cache ?
5691 env->cache_info_amd.l3_cache : NULL,
5692 ecx, edx);
5693 break;
5694 case 0x80000007:
5695 *eax = 0;
5696 *ebx = 0;
5697 *ecx = 0;
5698 *edx = env->features[FEAT_8000_0007_EDX];
5699 break;
5700 case 0x80000008:
5701 /* virtual & phys address size in low 2 bytes. */
5702 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5703 /* 64 bit processor */
5704 *eax = cpu->phys_bits; /* configurable physical bits */
5705 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
5706 *eax |= 0x00003900; /* 57 bits virtual */
5707 } else {
5708 *eax |= 0x00003000; /* 48 bits virtual */
5710 } else {
5711 *eax = cpu->phys_bits;
5713 *ebx = env->features[FEAT_8000_0008_EBX];
5714 *ecx = 0;
5715 *edx = 0;
5716 if (cs->nr_cores * cs->nr_threads > 1) {
5717 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
5719 break;
5720 case 0x8000000A:
5721 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5722 *eax = 0x00000001; /* SVM Revision */
5723 *ebx = 0x00000010; /* nr of ASIDs */
5724 *ecx = 0;
5725 *edx = env->features[FEAT_SVM]; /* optional features */
5726 } else {
5727 *eax = 0;
5728 *ebx = 0;
5729 *ecx = 0;
5730 *edx = 0;
5732 break;
5733 case 0x8000001D:
5734 *eax = 0;
5735 if (cpu->cache_info_passthrough) {
5736 host_cpuid(index, count, eax, ebx, ecx, edx);
5737 break;
5739 switch (count) {
5740 case 0: /* L1 dcache info */
5741 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
5742 eax, ebx, ecx, edx);
5743 break;
5744 case 1: /* L1 icache info */
5745 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
5746 eax, ebx, ecx, edx);
5747 break;
5748 case 2: /* L2 cache info */
5749 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
5750 eax, ebx, ecx, edx);
5751 break;
5752 case 3: /* L3 cache info */
5753 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
5754 eax, ebx, ecx, edx);
5755 break;
5756 default: /* end of info */
5757 *eax = *ebx = *ecx = *edx = 0;
5758 break;
5760 break;
5761 case 0x8000001E:
5762 assert(cpu->core_id <= 255);
5763 encode_topo_cpuid8000001e(cs, cpu,
5764 eax, ebx, ecx, edx);
5765 break;
5766 case 0xC0000000:
5767 *eax = env->cpuid_xlevel2;
5768 *ebx = 0;
5769 *ecx = 0;
5770 *edx = 0;
5771 break;
5772 case 0xC0000001:
5773 /* Support for VIA CPU's CPUID instruction */
5774 *eax = env->cpuid_version;
5775 *ebx = 0;
5776 *ecx = 0;
5777 *edx = env->features[FEAT_C000_0001_EDX];
5778 break;
5779 case 0xC0000002:
5780 case 0xC0000003:
5781 case 0xC0000004:
5782 /* Reserved for the future, and now filled with zero */
5783 *eax = 0;
5784 *ebx = 0;
5785 *ecx = 0;
5786 *edx = 0;
5787 break;
5788 case 0x8000001F:
5789 *eax = sev_enabled() ? 0x2 : 0;
5790 *ebx = sev_get_cbit_position();
5791 *ebx |= sev_get_reduced_phys_bits() << 6;
5792 *ecx = 0;
5793 *edx = 0;
5794 break;
5795 default:
5796 /* reserved values: zero */
5797 *eax = 0;
5798 *ebx = 0;
5799 *ecx = 0;
5800 *edx = 0;
5801 break;
5805 /* CPUClass::reset() */
5806 static void x86_cpu_reset(CPUState *s)
5808 X86CPU *cpu = X86_CPU(s);
5809 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
5810 CPUX86State *env = &cpu->env;
5811 target_ulong cr4;
5812 uint64_t xcr0;
5813 int i;
5815 xcc->parent_reset(s);
5817 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
5819 env->old_exception = -1;
5821 /* init to reset state */
5823 env->hflags2 |= HF2_GIF_MASK;
5825 cpu_x86_update_cr0(env, 0x60000010);
5826 env->a20_mask = ~0x0;
5827 env->smbase = 0x30000;
5828 env->msr_smi_count = 0;
5830 env->idt.limit = 0xffff;
5831 env->gdt.limit = 0xffff;
5832 env->ldt.limit = 0xffff;
5833 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
5834 env->tr.limit = 0xffff;
5835 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
5837 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
5838 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
5839 DESC_R_MASK | DESC_A_MASK);
5840 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
5841 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5842 DESC_A_MASK);
5843 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
5844 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5845 DESC_A_MASK);
5846 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
5847 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5848 DESC_A_MASK);
5849 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
5850 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5851 DESC_A_MASK);
5852 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
5853 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5854 DESC_A_MASK);
5856 env->eip = 0xfff0;
5857 env->regs[R_EDX] = env->cpuid_version;
5859 env->eflags = 0x2;
5861 /* FPU init */
5862 for (i = 0; i < 8; i++) {
5863 env->fptags[i] = 1;
5865 cpu_set_fpuc(env, 0x37f);
5867 env->mxcsr = 0x1f80;
5868 /* All units are in INIT state. */
5869 env->xstate_bv = 0;
5871 env->pat = 0x0007040600070406ULL;
5872 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
5873 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
5874 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
5877 memset(env->dr, 0, sizeof(env->dr));
5878 env->dr[6] = DR6_FIXED_1;
5879 env->dr[7] = DR7_FIXED_1;
5880 cpu_breakpoint_remove_all(s, BP_CPU);
5881 cpu_watchpoint_remove_all(s, BP_CPU);
5883 cr4 = 0;
5884 xcr0 = XSTATE_FP_MASK;
5886 #ifdef CONFIG_USER_ONLY
5887 /* Enable all the features for user-mode. */
5888 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
5889 xcr0 |= XSTATE_SSE_MASK;
5891 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
5892 const ExtSaveArea *esa = &x86_ext_save_areas[i];
5893 if (env->features[esa->feature] & esa->bits) {
5894 xcr0 |= 1ull << i;
5898 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
5899 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
5901 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
5902 cr4 |= CR4_FSGSBASE_MASK;
5904 #endif
5906 env->xcr0 = xcr0;
5907 cpu_x86_update_cr4(env, cr4);
5910 * SDM 11.11.5 requires:
5911 * - IA32_MTRR_DEF_TYPE MSR.E = 0
5912 * - IA32_MTRR_PHYSMASKn.V = 0
5913 * All other bits are undefined. For simplification, zero it all.
5915 env->mtrr_deftype = 0;
5916 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
5917 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
5919 env->interrupt_injected = -1;
5920 env->exception_nr = -1;
5921 env->exception_pending = 0;
5922 env->exception_injected = 0;
5923 env->exception_has_payload = false;
5924 env->exception_payload = 0;
5925 env->nmi_injected = false;
5926 #if !defined(CONFIG_USER_ONLY)
5927 /* We hard-wire the BSP to the first CPU. */
5928 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
5930 s->halted = !cpu_is_bsp(cpu);
5932 if (kvm_enabled()) {
5933 kvm_arch_reset_vcpu(cpu);
5935 else if (hvf_enabled()) {
5936 hvf_reset_vcpu(s);
5938 #endif
5941 #ifndef CONFIG_USER_ONLY
5942 bool cpu_is_bsp(X86CPU *cpu)
5944 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
5947 /* TODO: remove me, when reset over QOM tree is implemented */
5948 static void x86_cpu_machine_reset_cb(void *opaque)
5950 X86CPU *cpu = opaque;
5951 cpu_reset(CPU(cpu));
5953 #endif
5955 static void mce_init(X86CPU *cpu)
5957 CPUX86State *cenv = &cpu->env;
5958 unsigned int bank;
5960 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
5961 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
5962 (CPUID_MCE | CPUID_MCA)) {
5963 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
5964 (cpu->enable_lmce ? MCG_LMCE_P : 0);
5965 cenv->mcg_ctl = ~(uint64_t)0;
5966 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
5967 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
5972 #ifndef CONFIG_USER_ONLY
5973 APICCommonClass *apic_get_class(void)
5975 const char *apic_type = "apic";
5977 /* TODO: in-kernel irqchip for hvf */
5978 if (kvm_apic_in_kernel()) {
5979 apic_type = "kvm-apic";
5980 } else if (xen_enabled()) {
5981 apic_type = "xen-apic";
5984 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
5987 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
5989 APICCommonState *apic;
5990 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
5992 cpu->apic_state = DEVICE(object_new_with_class(apic_class));
5994 object_property_add_child(OBJECT(cpu), "lapic",
5995 OBJECT(cpu->apic_state), &error_abort);
5996 object_unref(OBJECT(cpu->apic_state));
5998 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
5999 /* TODO: convert to link<> */
6000 apic = APIC_COMMON(cpu->apic_state);
6001 apic->cpu = cpu;
6002 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
6005 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6007 APICCommonState *apic;
6008 static bool apic_mmio_map_once;
6010 if (cpu->apic_state == NULL) {
6011 return;
6013 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
6014 errp);
6016 /* Map APIC MMIO area */
6017 apic = APIC_COMMON(cpu->apic_state);
6018 if (!apic_mmio_map_once) {
6019 memory_region_add_subregion_overlap(get_system_memory(),
6020 apic->apicbase &
6021 MSR_IA32_APICBASE_BASE,
6022 &apic->io_memory,
6023 0x1000);
6024 apic_mmio_map_once = true;
6028 static void x86_cpu_machine_done(Notifier *n, void *unused)
6030 X86CPU *cpu = container_of(n, X86CPU, machine_done);
6031 MemoryRegion *smram =
6032 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
6034 if (smram) {
6035 cpu->smram = g_new(MemoryRegion, 1);
6036 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
6037 smram, 0, 1ull << 32);
6038 memory_region_set_enabled(cpu->smram, true);
6039 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
6042 #else
6043 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6046 #endif
6048 /* Note: Only safe for use on x86(-64) hosts */
6049 static uint32_t x86_host_phys_bits(void)
6051 uint32_t eax;
6052 uint32_t host_phys_bits;
6054 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
6055 if (eax >= 0x80000008) {
6056 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
6057 /* Note: According to AMD doc 25481 rev 2.34 they have a field
6058 * at 23:16 that can specify a maximum physical address bits for
6059 * the guest that can override this value; but I've not seen
6060 * anything with that set.
6062 host_phys_bits = eax & 0xff;
6063 } else {
6064 /* It's an odd 64 bit machine that doesn't have the leaf for
6065 * physical address bits; fall back to 36 that's most older
6066 * Intel.
6068 host_phys_bits = 36;
6071 return host_phys_bits;
6074 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
6076 if (*min < value) {
6077 *min = value;
6081 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
6082 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
6084 CPUX86State *env = &cpu->env;
6085 FeatureWordInfo *fi = &feature_word_info[w];
6086 uint32_t eax = fi->cpuid.eax;
6087 uint32_t region = eax & 0xF0000000;
6089 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
6090 if (!env->features[w]) {
6091 return;
6094 switch (region) {
6095 case 0x00000000:
6096 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
6097 break;
6098 case 0x80000000:
6099 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
6100 break;
6101 case 0xC0000000:
6102 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
6103 break;
6106 if (eax == 7) {
6107 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7,
6108 fi->cpuid.ecx);
6112 /* Calculate XSAVE components based on the configured CPU feature flags */
6113 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
6115 CPUX86State *env = &cpu->env;
6116 int i;
6117 uint64_t mask;
6119 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
6120 return;
6123 mask = 0;
6124 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
6125 const ExtSaveArea *esa = &x86_ext_save_areas[i];
6126 if (env->features[esa->feature] & esa->bits) {
6127 mask |= (1ULL << i);
6131 env->features[FEAT_XSAVE_COMP_LO] = mask;
6132 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
6135 /***** Steps involved on loading and filtering CPUID data
6137 * When initializing and realizing a CPU object, the steps
6138 * involved in setting up CPUID data are:
6140 * 1) Loading CPU model definition (X86CPUDefinition). This is
6141 * implemented by x86_cpu_load_model() and should be completely
6142 * transparent, as it is done automatically by instance_init.
6143 * No code should need to look at X86CPUDefinition structs
6144 * outside instance_init.
6146 * 2) CPU expansion. This is done by realize before CPUID
6147 * filtering, and will make sure host/accelerator data is
6148 * loaded for CPU models that depend on host capabilities
6149 * (e.g. "host"). Done by x86_cpu_expand_features().
6151 * 3) CPUID filtering. This initializes extra data related to
6152 * CPUID, and checks if the host supports all capabilities
6153 * required by the CPU. Runnability of a CPU model is
6154 * determined at this step. Done by x86_cpu_filter_features().
6156 * Some operations don't require all steps to be performed.
6157 * More precisely:
6159 * - CPU instance creation (instance_init) will run only CPU
6160 * model loading. CPU expansion can't run at instance_init-time
6161 * because host/accelerator data may be not available yet.
6162 * - CPU realization will perform both CPU model expansion and CPUID
6163 * filtering, and return an error in case one of them fails.
6164 * - query-cpu-definitions needs to run all 3 steps. It needs
6165 * to run CPUID filtering, as the 'unavailable-features'
6166 * field is set based on the filtering results.
6167 * - The query-cpu-model-expansion QMP command only needs to run
6168 * CPU model loading and CPU expansion. It should not filter
6169 * any CPUID data based on host capabilities.
6172 /* Expand CPU configuration data, based on configured features
6173 * and host/accelerator capabilities when appropriate.
6175 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
6177 CPUX86State *env = &cpu->env;
6178 FeatureWord w;
6179 int i;
6180 GList *l;
6181 Error *local_err = NULL;
6183 for (l = plus_features; l; l = l->next) {
6184 const char *prop = l->data;
6185 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
6186 if (local_err) {
6187 goto out;
6191 for (l = minus_features; l; l = l->next) {
6192 const char *prop = l->data;
6193 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
6194 if (local_err) {
6195 goto out;
6199 /*TODO: Now cpu->max_features doesn't overwrite features
6200 * set using QOM properties, and we can convert
6201 * plus_features & minus_features to global properties
6202 * inside x86_cpu_parse_featurestr() too.
6204 if (cpu->max_features) {
6205 for (w = 0; w < FEATURE_WORDS; w++) {
6206 /* Override only features that weren't set explicitly
6207 * by the user.
6209 env->features[w] |=
6210 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
6211 ~env->user_features[w] & \
6212 ~feature_word_info[w].no_autoenable_flags;
6216 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
6217 FeatureDep *d = &feature_dependencies[i];
6218 if (!(env->features[d->from.index] & d->from.mask)) {
6219 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask;
6221 /* Not an error unless the dependent feature was added explicitly. */
6222 mark_unavailable_features(cpu, d->to.index,
6223 unavailable_features & env->user_features[d->to.index],
6224 "This feature depends on other features that were not requested");
6226 env->user_features[d->to.index] |= unavailable_features;
6227 env->features[d->to.index] &= ~unavailable_features;
6231 if (!kvm_enabled() || !cpu->expose_kvm) {
6232 env->features[FEAT_KVM] = 0;
6235 x86_cpu_enable_xsave_components(cpu);
6237 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
6238 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
6239 if (cpu->full_cpuid_auto_level) {
6240 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
6241 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
6242 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
6243 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
6244 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
6245 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
6246 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
6247 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
6248 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
6249 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
6250 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
6251 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
6253 /* Intel Processor Trace requires CPUID[0x14] */
6254 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
6255 kvm_enabled() && cpu->intel_pt_auto_level) {
6256 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
6259 /* CPU topology with multi-dies support requires CPUID[0x1F] */
6260 if (env->nr_dies > 1) {
6261 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
6264 /* SVM requires CPUID[0x8000000A] */
6265 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
6266 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
6269 /* SEV requires CPUID[0x8000001F] */
6270 if (sev_enabled()) {
6271 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
6275 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
6276 if (env->cpuid_level_func7 == UINT32_MAX) {
6277 env->cpuid_level_func7 = env->cpuid_min_level_func7;
6279 if (env->cpuid_level == UINT32_MAX) {
6280 env->cpuid_level = env->cpuid_min_level;
6282 if (env->cpuid_xlevel == UINT32_MAX) {
6283 env->cpuid_xlevel = env->cpuid_min_xlevel;
6285 if (env->cpuid_xlevel2 == UINT32_MAX) {
6286 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
6289 out:
6290 if (local_err != NULL) {
6291 error_propagate(errp, local_err);
6296 * Finishes initialization of CPUID data, filters CPU feature
6297 * words based on host availability of each feature.
6299 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
6301 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
6303 CPUX86State *env = &cpu->env;
6304 FeatureWord w;
6305 const char *prefix = NULL;
6307 if (verbose) {
6308 prefix = accel_uses_host_cpuid()
6309 ? "host doesn't support requested feature"
6310 : "TCG doesn't support requested feature";
6313 for (w = 0; w < FEATURE_WORDS; w++) {
6314 uint64_t host_feat =
6315 x86_cpu_get_supported_feature_word(w, false);
6316 uint64_t requested_features = env->features[w];
6317 uint64_t unavailable_features = requested_features & ~host_feat;
6318 mark_unavailable_features(cpu, w, unavailable_features, prefix);
6321 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
6322 kvm_enabled()) {
6323 KVMState *s = CPU(cpu)->kvm_state;
6324 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
6325 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
6326 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
6327 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
6328 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
6330 if (!eax_0 ||
6331 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
6332 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
6333 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
6334 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
6335 INTEL_PT_ADDR_RANGES_NUM) ||
6336 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
6337 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
6338 (ecx_0 & INTEL_PT_IP_LIP)) {
6340 * Processor Trace capabilities aren't configurable, so if the
6341 * host can't emulate the capabilities we report on
6342 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
6344 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix);
6349 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
6351 CPUState *cs = CPU(dev);
6352 X86CPU *cpu = X86_CPU(dev);
6353 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6354 CPUX86State *env = &cpu->env;
6355 Error *local_err = NULL;
6356 static bool ht_warned;
6358 if (xcc->host_cpuid_required) {
6359 if (!accel_uses_host_cpuid()) {
6360 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6361 error_setg(&local_err, "CPU model '%s' requires KVM", name);
6362 goto out;
6365 if (enable_cpu_pm) {
6366 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
6367 &cpu->mwait.ecx, &cpu->mwait.edx);
6368 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
6372 /* mwait extended info: needed for Core compatibility */
6373 /* We always wake on interrupt even if host does not have the capability */
6374 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
6376 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
6377 error_setg(errp, "apic-id property was not initialized properly");
6378 return;
6381 x86_cpu_expand_features(cpu, &local_err);
6382 if (local_err) {
6383 goto out;
6386 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
6388 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
6389 error_setg(&local_err,
6390 accel_uses_host_cpuid() ?
6391 "Host doesn't support requested features" :
6392 "TCG doesn't support requested features");
6393 goto out;
6396 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
6397 * CPUID[1].EDX.
6399 if (IS_AMD_CPU(env)) {
6400 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
6401 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
6402 & CPUID_EXT2_AMD_ALIASES);
6405 /* For 64bit systems think about the number of physical bits to present.
6406 * ideally this should be the same as the host; anything other than matching
6407 * the host can cause incorrect guest behaviour.
6408 * QEMU used to pick the magic value of 40 bits that corresponds to
6409 * consumer AMD devices but nothing else.
6411 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
6412 if (accel_uses_host_cpuid()) {
6413 uint32_t host_phys_bits = x86_host_phys_bits();
6414 static bool warned;
6416 /* Print a warning if the user set it to a value that's not the
6417 * host value.
6419 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
6420 !warned) {
6421 warn_report("Host physical bits (%u)"
6422 " does not match phys-bits property (%u)",
6423 host_phys_bits, cpu->phys_bits);
6424 warned = true;
6427 if (cpu->host_phys_bits) {
6428 /* The user asked for us to use the host physical bits */
6429 cpu->phys_bits = host_phys_bits;
6430 if (cpu->host_phys_bits_limit &&
6431 cpu->phys_bits > cpu->host_phys_bits_limit) {
6432 cpu->phys_bits = cpu->host_phys_bits_limit;
6436 if (cpu->phys_bits &&
6437 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
6438 cpu->phys_bits < 32)) {
6439 error_setg(errp, "phys-bits should be between 32 and %u "
6440 " (but is %u)",
6441 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
6442 return;
6444 } else {
6445 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
6446 error_setg(errp, "TCG only supports phys-bits=%u",
6447 TCG_PHYS_ADDR_BITS);
6448 return;
6451 /* 0 means it was not explicitly set by the user (or by machine
6452 * compat_props or by the host code above). In this case, the default
6453 * is the value used by TCG (40).
6455 if (cpu->phys_bits == 0) {
6456 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
6458 } else {
6459 /* For 32 bit systems don't use the user set value, but keep
6460 * phys_bits consistent with what we tell the guest.
6462 if (cpu->phys_bits != 0) {
6463 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
6464 return;
6467 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
6468 cpu->phys_bits = 36;
6469 } else {
6470 cpu->phys_bits = 32;
6474 /* Cache information initialization */
6475 if (!cpu->legacy_cache) {
6476 if (!xcc->model || !xcc->model->cpudef->cache_info) {
6477 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6478 error_setg(errp,
6479 "CPU model '%s' doesn't support legacy-cache=off", name);
6480 return;
6482 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
6483 *xcc->model->cpudef->cache_info;
6484 } else {
6485 /* Build legacy cache information */
6486 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
6487 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
6488 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
6489 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
6491 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
6492 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
6493 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
6494 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
6496 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
6497 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
6498 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
6499 env->cache_info_amd.l3_cache = &legacy_l3_cache;
6503 cpu_exec_realizefn(cs, &local_err);
6504 if (local_err != NULL) {
6505 error_propagate(errp, local_err);
6506 return;
6509 #ifndef CONFIG_USER_ONLY
6510 MachineState *ms = MACHINE(qdev_get_machine());
6511 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
6513 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
6514 x86_cpu_apic_create(cpu, &local_err);
6515 if (local_err != NULL) {
6516 goto out;
6519 #endif
6521 mce_init(cpu);
6523 #ifndef CONFIG_USER_ONLY
6524 if (tcg_enabled()) {
6525 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
6526 cpu->cpu_as_root = g_new(MemoryRegion, 1);
6528 /* Outer container... */
6529 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
6530 memory_region_set_enabled(cpu->cpu_as_root, true);
6532 /* ... with two regions inside: normal system memory with low
6533 * priority, and...
6535 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
6536 get_system_memory(), 0, ~0ull);
6537 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
6538 memory_region_set_enabled(cpu->cpu_as_mem, true);
6540 cs->num_ases = 2;
6541 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
6542 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
6544 /* ... SMRAM with higher priority, linked from /machine/smram. */
6545 cpu->machine_done.notify = x86_cpu_machine_done;
6546 qemu_add_machine_init_done_notifier(&cpu->machine_done);
6548 #endif
6550 qemu_init_vcpu(cs);
6553 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
6554 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
6555 * based on inputs (sockets,cores,threads), it is still better to give
6556 * users a warning.
6558 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
6559 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
6561 if (IS_AMD_CPU(env) &&
6562 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
6563 cs->nr_threads > 1 && !ht_warned) {
6564 warn_report("This family of AMD CPU doesn't support "
6565 "hyperthreading(%d)",
6566 cs->nr_threads);
6567 error_printf("Please configure -smp options properly"
6568 " or try enabling topoext feature.\n");
6569 ht_warned = true;
6572 x86_cpu_apic_realize(cpu, &local_err);
6573 if (local_err != NULL) {
6574 goto out;
6576 cpu_reset(cs);
6578 xcc->parent_realize(dev, &local_err);
6580 out:
6581 if (local_err != NULL) {
6582 error_propagate(errp, local_err);
6583 return;
6587 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
6589 X86CPU *cpu = X86_CPU(dev);
6590 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6591 Error *local_err = NULL;
6593 #ifndef CONFIG_USER_ONLY
6594 cpu_remove_sync(CPU(dev));
6595 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
6596 #endif
6598 if (cpu->apic_state) {
6599 object_unparent(OBJECT(cpu->apic_state));
6600 cpu->apic_state = NULL;
6603 xcc->parent_unrealize(dev, &local_err);
6604 if (local_err != NULL) {
6605 error_propagate(errp, local_err);
6606 return;
6610 typedef struct BitProperty {
6611 FeatureWord w;
6612 uint64_t mask;
6613 } BitProperty;
6615 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
6616 void *opaque, Error **errp)
6618 X86CPU *cpu = X86_CPU(obj);
6619 BitProperty *fp = opaque;
6620 uint64_t f = cpu->env.features[fp->w];
6621 bool value = (f & fp->mask) == fp->mask;
6622 visit_type_bool(v, name, &value, errp);
6625 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
6626 void *opaque, Error **errp)
6628 DeviceState *dev = DEVICE(obj);
6629 X86CPU *cpu = X86_CPU(obj);
6630 BitProperty *fp = opaque;
6631 Error *local_err = NULL;
6632 bool value;
6634 if (dev->realized) {
6635 qdev_prop_set_after_realize(dev, name, errp);
6636 return;
6639 visit_type_bool(v, name, &value, &local_err);
6640 if (local_err) {
6641 error_propagate(errp, local_err);
6642 return;
6645 if (value) {
6646 cpu->env.features[fp->w] |= fp->mask;
6647 } else {
6648 cpu->env.features[fp->w] &= ~fp->mask;
6650 cpu->env.user_features[fp->w] |= fp->mask;
6653 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
6654 void *opaque)
6656 BitProperty *prop = opaque;
6657 g_free(prop);
6660 /* Register a boolean property to get/set a single bit in a uint32_t field.
6662 * The same property name can be registered multiple times to make it affect
6663 * multiple bits in the same FeatureWord. In that case, the getter will return
6664 * true only if all bits are set.
6666 static void x86_cpu_register_bit_prop(X86CPU *cpu,
6667 const char *prop_name,
6668 FeatureWord w,
6669 int bitnr)
6671 BitProperty *fp;
6672 ObjectProperty *op;
6673 uint64_t mask = (1ULL << bitnr);
6675 op = object_property_find(OBJECT(cpu), prop_name, NULL);
6676 if (op) {
6677 fp = op->opaque;
6678 assert(fp->w == w);
6679 fp->mask |= mask;
6680 } else {
6681 fp = g_new0(BitProperty, 1);
6682 fp->w = w;
6683 fp->mask = mask;
6684 object_property_add(OBJECT(cpu), prop_name, "bool",
6685 x86_cpu_get_bit_prop,
6686 x86_cpu_set_bit_prop,
6687 x86_cpu_release_bit_prop, fp, &error_abort);
6691 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
6692 FeatureWord w,
6693 int bitnr)
6695 FeatureWordInfo *fi = &feature_word_info[w];
6696 const char *name = fi->feat_names[bitnr];
6698 if (!name) {
6699 return;
6702 /* Property names should use "-" instead of "_".
6703 * Old names containing underscores are registered as aliases
6704 * using object_property_add_alias()
6706 assert(!strchr(name, '_'));
6707 /* aliases don't use "|" delimiters anymore, they are registered
6708 * manually using object_property_add_alias() */
6709 assert(!strchr(name, '|'));
6710 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
6713 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
6715 X86CPU *cpu = X86_CPU(cs);
6716 CPUX86State *env = &cpu->env;
6717 GuestPanicInformation *panic_info = NULL;
6719 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
6720 panic_info = g_malloc0(sizeof(GuestPanicInformation));
6722 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
6724 assert(HV_CRASH_PARAMS >= 5);
6725 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
6726 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
6727 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
6728 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
6729 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
6732 return panic_info;
6734 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
6735 const char *name, void *opaque,
6736 Error **errp)
6738 CPUState *cs = CPU(obj);
6739 GuestPanicInformation *panic_info;
6741 if (!cs->crash_occurred) {
6742 error_setg(errp, "No crash occured");
6743 return;
6746 panic_info = x86_cpu_get_crash_info(cs);
6747 if (panic_info == NULL) {
6748 error_setg(errp, "No crash information");
6749 return;
6752 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
6753 errp);
6754 qapi_free_GuestPanicInformation(panic_info);
6757 static void x86_cpu_initfn(Object *obj)
6759 X86CPU *cpu = X86_CPU(obj);
6760 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
6761 CPUX86State *env = &cpu->env;
6762 FeatureWord w;
6764 env->nr_dies = 1;
6765 cpu_set_cpustate_pointers(cpu);
6767 object_property_add(obj, "family", "int",
6768 x86_cpuid_version_get_family,
6769 x86_cpuid_version_set_family, NULL, NULL, NULL);
6770 object_property_add(obj, "model", "int",
6771 x86_cpuid_version_get_model,
6772 x86_cpuid_version_set_model, NULL, NULL, NULL);
6773 object_property_add(obj, "stepping", "int",
6774 x86_cpuid_version_get_stepping,
6775 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
6776 object_property_add_str(obj, "vendor",
6777 x86_cpuid_get_vendor,
6778 x86_cpuid_set_vendor, NULL);
6779 object_property_add_str(obj, "model-id",
6780 x86_cpuid_get_model_id,
6781 x86_cpuid_set_model_id, NULL);
6782 object_property_add(obj, "tsc-frequency", "int",
6783 x86_cpuid_get_tsc_freq,
6784 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
6785 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
6786 x86_cpu_get_feature_words,
6787 NULL, NULL, (void *)env->features, NULL);
6788 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
6789 x86_cpu_get_feature_words,
6790 NULL, NULL, (void *)cpu->filtered_features, NULL);
6792 * The "unavailable-features" property has the same semantics as
6793 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
6794 * QMP command: they list the features that would have prevented the
6795 * CPU from running if the "enforce" flag was set.
6797 object_property_add(obj, "unavailable-features", "strList",
6798 x86_cpu_get_unavailable_features,
6799 NULL, NULL, NULL, &error_abort);
6801 object_property_add(obj, "crash-information", "GuestPanicInformation",
6802 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
6804 for (w = 0; w < FEATURE_WORDS; w++) {
6805 int bitnr;
6807 for (bitnr = 0; bitnr < 64; bitnr++) {
6808 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
6812 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
6813 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
6814 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
6815 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
6816 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
6817 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
6818 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
6820 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
6821 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
6822 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
6823 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
6824 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
6825 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
6826 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
6827 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
6828 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
6829 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
6830 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
6831 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
6832 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
6833 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
6834 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control",
6835 &error_abort);
6836 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
6837 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
6838 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
6839 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
6840 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
6841 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
6842 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
6844 if (xcc->model) {
6845 x86_cpu_load_model(cpu, xcc->model, &error_abort);
6849 static int64_t x86_cpu_get_arch_id(CPUState *cs)
6851 X86CPU *cpu = X86_CPU(cs);
6853 return cpu->apic_id;
6856 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
6858 X86CPU *cpu = X86_CPU(cs);
6860 return cpu->env.cr[0] & CR0_PG_MASK;
6863 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
6865 X86CPU *cpu = X86_CPU(cs);
6867 cpu->env.eip = value;
6870 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
6872 X86CPU *cpu = X86_CPU(cs);
6874 cpu->env.eip = tb->pc - tb->cs_base;
6877 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
6879 X86CPU *cpu = X86_CPU(cs);
6880 CPUX86State *env = &cpu->env;
6882 #if !defined(CONFIG_USER_ONLY)
6883 if (interrupt_request & CPU_INTERRUPT_POLL) {
6884 return CPU_INTERRUPT_POLL;
6886 #endif
6887 if (interrupt_request & CPU_INTERRUPT_SIPI) {
6888 return CPU_INTERRUPT_SIPI;
6891 if (env->hflags2 & HF2_GIF_MASK) {
6892 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
6893 !(env->hflags & HF_SMM_MASK)) {
6894 return CPU_INTERRUPT_SMI;
6895 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
6896 !(env->hflags2 & HF2_NMI_MASK)) {
6897 return CPU_INTERRUPT_NMI;
6898 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
6899 return CPU_INTERRUPT_MCE;
6900 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
6901 (((env->hflags2 & HF2_VINTR_MASK) &&
6902 (env->hflags2 & HF2_HIF_MASK)) ||
6903 (!(env->hflags2 & HF2_VINTR_MASK) &&
6904 (env->eflags & IF_MASK &&
6905 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
6906 return CPU_INTERRUPT_HARD;
6907 #if !defined(CONFIG_USER_ONLY)
6908 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
6909 (env->eflags & IF_MASK) &&
6910 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
6911 return CPU_INTERRUPT_VIRQ;
6912 #endif
6916 return 0;
6919 static bool x86_cpu_has_work(CPUState *cs)
6921 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
6924 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
6926 X86CPU *cpu = X86_CPU(cs);
6927 CPUX86State *env = &cpu->env;
6929 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
6930 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
6931 : bfd_mach_i386_i8086);
6932 info->print_insn = print_insn_i386;
6934 info->cap_arch = CS_ARCH_X86;
6935 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
6936 : env->hflags & HF_CS32_MASK ? CS_MODE_32
6937 : CS_MODE_16);
6938 info->cap_insn_unit = 1;
6939 info->cap_insn_split = 8;
6942 void x86_update_hflags(CPUX86State *env)
6944 uint32_t hflags;
6945 #define HFLAG_COPY_MASK \
6946 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
6947 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
6948 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
6949 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
6951 hflags = env->hflags & HFLAG_COPY_MASK;
6952 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
6953 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
6954 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
6955 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
6956 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
6958 if (env->cr[4] & CR4_OSFXSR_MASK) {
6959 hflags |= HF_OSFXSR_MASK;
6962 if (env->efer & MSR_EFER_LMA) {
6963 hflags |= HF_LMA_MASK;
6966 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
6967 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
6968 } else {
6969 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
6970 (DESC_B_SHIFT - HF_CS32_SHIFT);
6971 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
6972 (DESC_B_SHIFT - HF_SS32_SHIFT);
6973 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
6974 !(hflags & HF_CS32_MASK)) {
6975 hflags |= HF_ADDSEG_MASK;
6976 } else {
6977 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
6978 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
6981 env->hflags = hflags;
6984 static Property x86_cpu_properties[] = {
6985 #ifdef CONFIG_USER_ONLY
6986 /* apic_id = 0 by default for *-user, see commit 9886e834 */
6987 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
6988 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
6989 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
6990 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
6991 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
6992 #else
6993 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
6994 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
6995 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
6996 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
6997 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
6998 #endif
6999 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
7000 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
7002 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
7003 HYPERV_SPINLOCK_NEVER_RETRY),
7004 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
7005 HYPERV_FEAT_RELAXED, 0),
7006 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
7007 HYPERV_FEAT_VAPIC, 0),
7008 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
7009 HYPERV_FEAT_TIME, 0),
7010 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
7011 HYPERV_FEAT_CRASH, 0),
7012 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
7013 HYPERV_FEAT_RESET, 0),
7014 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
7015 HYPERV_FEAT_VPINDEX, 0),
7016 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
7017 HYPERV_FEAT_RUNTIME, 0),
7018 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
7019 HYPERV_FEAT_SYNIC, 0),
7020 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
7021 HYPERV_FEAT_STIMER, 0),
7022 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
7023 HYPERV_FEAT_FREQUENCIES, 0),
7024 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
7025 HYPERV_FEAT_REENLIGHTENMENT, 0),
7026 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
7027 HYPERV_FEAT_TLBFLUSH, 0),
7028 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
7029 HYPERV_FEAT_EVMCS, 0),
7030 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
7031 HYPERV_FEAT_IPI, 0),
7032 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
7033 HYPERV_FEAT_STIMER_DIRECT, 0),
7034 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU,
7035 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF),
7036 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
7038 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
7039 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
7040 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
7041 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
7042 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
7043 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
7044 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
7045 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
7046 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7,
7047 UINT32_MAX),
7048 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
7049 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
7050 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
7051 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
7052 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
7053 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
7054 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
7055 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
7056 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
7057 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
7058 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
7059 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
7060 false),
7061 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
7062 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
7063 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
7064 true),
7066 * lecacy_cache defaults to true unless the CPU model provides its
7067 * own cache information (see x86_cpu_load_def()).
7069 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
7072 * From "Requirements for Implementing the Microsoft
7073 * Hypervisor Interface":
7074 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
7076 * "Starting with Windows Server 2012 and Windows 8, if
7077 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
7078 * the hypervisor imposes no specific limit to the number of VPs.
7079 * In this case, Windows Server 2012 guest VMs may use more than
7080 * 64 VPs, up to the maximum supported number of processors applicable
7081 * to the specific Windows version being used."
7083 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
7084 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
7085 false),
7086 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
7087 true),
7088 DEFINE_PROP_END_OF_LIST()
7091 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
7093 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7094 CPUClass *cc = CPU_CLASS(oc);
7095 DeviceClass *dc = DEVICE_CLASS(oc);
7097 device_class_set_parent_realize(dc, x86_cpu_realizefn,
7098 &xcc->parent_realize);
7099 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
7100 &xcc->parent_unrealize);
7101 dc->props = x86_cpu_properties;
7103 xcc->parent_reset = cc->reset;
7104 cc->reset = x86_cpu_reset;
7105 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
7107 cc->class_by_name = x86_cpu_class_by_name;
7108 cc->parse_features = x86_cpu_parse_featurestr;
7109 cc->has_work = x86_cpu_has_work;
7110 #ifdef CONFIG_TCG
7111 cc->do_interrupt = x86_cpu_do_interrupt;
7112 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
7113 #endif
7114 cc->dump_state = x86_cpu_dump_state;
7115 cc->get_crash_info = x86_cpu_get_crash_info;
7116 cc->set_pc = x86_cpu_set_pc;
7117 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
7118 cc->gdb_read_register = x86_cpu_gdb_read_register;
7119 cc->gdb_write_register = x86_cpu_gdb_write_register;
7120 cc->get_arch_id = x86_cpu_get_arch_id;
7121 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
7122 #ifndef CONFIG_USER_ONLY
7123 cc->asidx_from_attrs = x86_asidx_from_attrs;
7124 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
7125 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug;
7126 cc->write_elf64_note = x86_cpu_write_elf64_note;
7127 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
7128 cc->write_elf32_note = x86_cpu_write_elf32_note;
7129 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
7130 cc->vmsd = &vmstate_x86_cpu;
7131 #endif
7132 cc->gdb_arch_name = x86_gdb_arch_name;
7133 #ifdef TARGET_X86_64
7134 cc->gdb_core_xml_file = "i386-64bit.xml";
7135 cc->gdb_num_core_regs = 66;
7136 #else
7137 cc->gdb_core_xml_file = "i386-32bit.xml";
7138 cc->gdb_num_core_regs = 50;
7139 #endif
7140 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
7141 cc->debug_excp_handler = breakpoint_handler;
7142 #endif
7143 cc->cpu_exec_enter = x86_cpu_exec_enter;
7144 cc->cpu_exec_exit = x86_cpu_exec_exit;
7145 #ifdef CONFIG_TCG
7146 cc->tcg_initialize = tcg_x86_init;
7147 cc->tlb_fill = x86_cpu_tlb_fill;
7148 #endif
7149 cc->disas_set_info = x86_disas_set_info;
7151 dc->user_creatable = true;
7154 static const TypeInfo x86_cpu_type_info = {
7155 .name = TYPE_X86_CPU,
7156 .parent = TYPE_CPU,
7157 .instance_size = sizeof(X86CPU),
7158 .instance_init = x86_cpu_initfn,
7159 .abstract = true,
7160 .class_size = sizeof(X86CPUClass),
7161 .class_init = x86_cpu_common_class_init,
7165 /* "base" CPU model, used by query-cpu-model-expansion */
7166 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
7168 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7170 xcc->static_model = true;
7171 xcc->migration_safe = true;
7172 xcc->model_description = "base CPU model type with no features enabled";
7173 xcc->ordering = 8;
7176 static const TypeInfo x86_base_cpu_type_info = {
7177 .name = X86_CPU_TYPE_NAME("base"),
7178 .parent = TYPE_X86_CPU,
7179 .class_init = x86_cpu_base_class_init,
7182 static void x86_cpu_register_types(void)
7184 int i;
7186 type_register_static(&x86_cpu_type_info);
7187 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
7188 x86_register_cpudef_types(&builtin_x86_defs[i]);
7190 type_register_static(&max_x86_cpu_type_info);
7191 type_register_static(&x86_base_cpu_type_info);
7192 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
7193 type_register_static(&host_x86_cpu_type_info);
7194 #endif
7197 type_init(x86_cpu_register_types)