s390x/tcg: Fix VECTOR SHIFT RIGHT ARITHMETIC BY BYTE
[qemu/ar7.git] / target / i386 / cpu.c
blob47200b40c1e1a2295d3b9a6e0a553bb4b0d3a030
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/reset.h"
30 #include "sysemu/hvf.h"
31 #include "sysemu/cpus.h"
32 #include "kvm_i386.h"
33 #include "sev_i386.h"
35 #include "qemu/error-report.h"
36 #include "qemu/module.h"
37 #include "qemu/option.h"
38 #include "qemu/config-file.h"
39 #include "qapi/error.h"
40 #include "qapi/qapi-visit-machine.h"
41 #include "qapi/qapi-visit-run-state.h"
42 #include "qapi/qmp/qdict.h"
43 #include "qapi/qmp/qerror.h"
44 #include "qapi/visitor.h"
45 #include "qom/qom-qobject.h"
46 #include "sysemu/arch_init.h"
47 #include "qapi/qapi-commands-machine-target.h"
49 #include "standard-headers/asm-x86/kvm_para.h"
51 #include "sysemu/sysemu.h"
52 #include "sysemu/tcg.h"
53 #include "hw/qdev-properties.h"
54 #include "hw/i386/topology.h"
55 #ifndef CONFIG_USER_ONLY
56 #include "exec/address-spaces.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
62 #include "disas/capstone.h"
64 /* Helpers for building CPUID[2] descriptors: */
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
211 int i;
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
229 /* CPUID Leaf 4 constants: */
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
236 #define CACHE_LEVEL(l) (l << 5)
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
296 #define ASSOC_FULL 0xFF
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
344 * Define the constants to build the cpu topology. Right now, TOPOEXT
345 * feature is enabled only on EPYC. So, these constants are based on
346 * EPYC supported configurations. We may need to handle the cases if
347 * these values change in future.
349 /* Maximum core complexes in a node */
350 #define MAX_CCX 2
351 /* Maximum cores in a core complex */
352 #define MAX_CORES_IN_CCX 4
353 /* Maximum cores in a node */
354 #define MAX_CORES_IN_NODE 8
355 /* Maximum nodes in a socket */
356 #define MAX_NODES_PER_SOCKET 4
359 * Figure out the number of nodes required to build this config.
360 * Max cores in a node is 8
362 static int nodes_in_socket(int nr_cores)
364 int nodes;
366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
368 /* Hardware does not support config with 3 nodes, return 4 in that case */
369 return (nodes == 3) ? 4 : nodes;
373 * Decide the number of cores in a core complex with the given nr_cores using
374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
376 * L3 cache is shared across all cores in a core complex. So, this will also
377 * tell us how many cores are sharing the L3 cache.
379 static int cores_in_core_complex(int nr_cores)
381 int nodes;
383 /* Check if we can fit all the cores in one core complex */
384 if (nr_cores <= MAX_CORES_IN_CCX) {
385 return nr_cores;
387 /* Get the number of nodes required to build this config */
388 nodes = nodes_in_socket(nr_cores);
391 * Divide the cores accros all the core complexes
392 * Return rounded up value
394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
397 /* Encode cache info for CPUID[8000001D] */
398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
399 uint32_t *eax, uint32_t *ebx,
400 uint32_t *ecx, uint32_t *edx)
402 uint32_t l3_cores;
403 assert(cache->size == cache->line_size * cache->associativity *
404 cache->partitions * cache->sets);
406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
409 /* L3 is shared among multiple cores */
410 if (cache->level == 3) {
411 l3_cores = cores_in_core_complex(cs->nr_cores);
412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
413 } else {
414 *eax |= ((cs->nr_threads - 1) << 14);
417 assert(cache->line_size > 0);
418 assert(cache->partitions > 0);
419 assert(cache->associativity > 0);
420 /* We don't implement fully-associative caches */
421 assert(cache->associativity < cache->sets);
422 *ebx = (cache->line_size - 1) |
423 ((cache->partitions - 1) << 12) |
424 ((cache->associativity - 1) << 22);
426 assert(cache->sets > 0);
427 *ecx = cache->sets - 1;
429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
430 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
434 /* Data structure to hold the configuration info for a given core index */
435 struct core_topology {
436 /* core complex id of the current core index */
437 int ccx_id;
439 * Adjusted core index for this core in the topology
440 * This can be 0,1,2,3 with max 4 cores in a core complex
442 int core_id;
443 /* Node id for this core index */
444 int node_id;
445 /* Number of nodes in this config */
446 int num_nodes;
450 * Build the configuration closely match the EPYC hardware. Using the EPYC
451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
452 * right now. This could change in future.
453 * nr_cores : Total number of cores in the config
454 * core_id : Core index of the current CPU
455 * topo : Data structure to hold all the config info for this core index
457 static void build_core_topology(int nr_cores, int core_id,
458 struct core_topology *topo)
460 int nodes, cores_in_ccx;
462 /* First get the number of nodes required */
463 nodes = nodes_in_socket(nr_cores);
465 cores_in_ccx = cores_in_core_complex(nr_cores);
467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
469 topo->core_id = core_id % cores_in_ccx;
470 topo->num_nodes = nodes;
473 /* Encode cache info for CPUID[8000001E] */
474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
475 uint32_t *eax, uint32_t *ebx,
476 uint32_t *ecx, uint32_t *edx)
478 struct core_topology topo = {0};
479 unsigned long nodes;
480 int shift;
482 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
483 *eax = cpu->apic_id;
485 * CPUID_Fn8000001E_EBX
486 * 31:16 Reserved
487 * 15:8 Threads per core (The number of threads per core is
488 * Threads per core + 1)
489 * 7:0 Core id (see bit decoding below)
490 * SMT:
491 * 4:3 node id
492 * 2 Core complex id
493 * 1:0 Core id
494 * Non SMT:
495 * 5:4 node id
496 * 3 Core complex id
497 * 1:0 Core id
499 if (cs->nr_threads - 1) {
500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
501 (topo.ccx_id << 2) | topo.core_id;
502 } else {
503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
506 * CPUID_Fn8000001E_ECX
507 * 31:11 Reserved
508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
509 * 7:0 Node id (see bit decoding below)
510 * 2 Socket id
511 * 1:0 Node id
513 if (topo.num_nodes <= 4) {
514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
515 topo.node_id;
516 } else {
518 * Node id fix up. Actual hardware supports up to 4 nodes. But with
519 * more than 32 cores, we may end up with more than 4 nodes.
520 * Node id is a combination of socket id and node id. Only requirement
521 * here is that this number should be unique accross the system.
522 * Shift the socket id to accommodate more nodes. We dont expect both
523 * socket id and node id to be big number at the same time. This is not
524 * an ideal config but we need to to support it. Max nodes we can have
525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
526 * 5 bits for nodes. Find the left most set bit to represent the total
527 * number of nodes. find_last_bit returns last set bit(0 based). Left
528 * shift(+1) the socket id to represent all the nodes.
530 nodes = topo.num_nodes - 1;
531 shift = find_last_bit(&nodes, 8);
532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
533 topo.node_id;
535 *edx = 0;
539 * Definitions of the hardcoded cache entries we expose:
540 * These are legacy cache values. If there is a need to change any
541 * of these values please use builtin_x86_defs
544 /* L1 data cache: */
545 static CPUCacheInfo legacy_l1d_cache = {
546 .type = DATA_CACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1d_cache_amd = {
559 .type = DATA_CACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
571 /* L1 instruction cache: */
572 static CPUCacheInfo legacy_l1i_cache = {
573 .type = INSTRUCTION_CACHE,
574 .level = 1,
575 .size = 32 * KiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 8,
579 .sets = 64,
580 .partitions = 1,
581 .no_invd_sharing = true,
584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
585 static CPUCacheInfo legacy_l1i_cache_amd = {
586 .type = INSTRUCTION_CACHE,
587 .level = 1,
588 .size = 64 * KiB,
589 .self_init = 1,
590 .line_size = 64,
591 .associativity = 2,
592 .sets = 512,
593 .partitions = 1,
594 .lines_per_tag = 1,
595 .no_invd_sharing = true,
598 /* Level 2 unified cache: */
599 static CPUCacheInfo legacy_l2_cache = {
600 .type = UNIFIED_CACHE,
601 .level = 2,
602 .size = 4 * MiB,
603 .self_init = 1,
604 .line_size = 64,
605 .associativity = 16,
606 .sets = 4096,
607 .partitions = 1,
608 .no_invd_sharing = true,
611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
612 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
613 .type = UNIFIED_CACHE,
614 .level = 2,
615 .size = 2 * MiB,
616 .line_size = 64,
617 .associativity = 8,
621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
622 static CPUCacheInfo legacy_l2_cache_amd = {
623 .type = UNIFIED_CACHE,
624 .level = 2,
625 .size = 512 * KiB,
626 .line_size = 64,
627 .lines_per_tag = 1,
628 .associativity = 16,
629 .sets = 512,
630 .partitions = 1,
633 /* Level 3 unified cache: */
634 static CPUCacheInfo legacy_l3_cache = {
635 .type = UNIFIED_CACHE,
636 .level = 3,
637 .size = 16 * MiB,
638 .line_size = 64,
639 .associativity = 16,
640 .sets = 16384,
641 .partitions = 1,
642 .lines_per_tag = 1,
643 .self_init = true,
644 .inclusive = true,
645 .complex_indexing = true,
648 /* TLB definitions: */
650 #define L1_DTLB_2M_ASSOC 1
651 #define L1_DTLB_2M_ENTRIES 255
652 #define L1_DTLB_4K_ASSOC 1
653 #define L1_DTLB_4K_ENTRIES 255
655 #define L1_ITLB_2M_ASSOC 1
656 #define L1_ITLB_2M_ENTRIES 255
657 #define L1_ITLB_4K_ASSOC 1
658 #define L1_ITLB_4K_ENTRIES 255
660 #define L2_DTLB_2M_ASSOC 0 /* disabled */
661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
662 #define L2_DTLB_4K_ASSOC 4
663 #define L2_DTLB_4K_ENTRIES 512
665 #define L2_ITLB_2M_ASSOC 0 /* disabled */
666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
667 #define L2_ITLB_4K_ASSOC 4
668 #define L2_ITLB_4K_ENTRIES 512
670 /* CPUID Leaf 0x14 constants: */
671 #define INTEL_PT_MAX_SUBLEAF 0x1
673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
674 * MSR can be accessed;
675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
677 * of Intel PT MSRs across warm reset;
678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
680 #define INTEL_PT_MINIMAL_EBX 0xf
682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
684 * accessed;
685 * bit[01]: ToPA tables can hold any number of output entries, up to the
686 * maximum allowed by the MaskOrTableOffset field of
687 * IA32_RTIT_OUTPUT_MASK_PTRS;
688 * bit[02]: Support Single-Range Output scheme;
690 #define INTEL_PT_MINIMAL_ECX 0x7
691 /* generated packets which contain IP payloads have LIP values */
692 #define INTEL_PT_IP_LIP (1 << 31)
693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
700 uint32_t vendor2, uint32_t vendor3)
702 int i;
703 for (i = 0; i < 4; i++) {
704 dst[i] = vendor1 >> (8 * i);
705 dst[i + 4] = vendor2 >> (8 * i);
706 dst[i + 8] = vendor3 >> (8 * i);
708 dst[CPUID_VENDOR_SZ] = '\0';
711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
716 CPUID_PSE36 | CPUID_FXSR)
717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
721 CPUID_PAE | CPUID_SEP | CPUID_APIC)
723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
728 /* partly implemented:
729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
730 /* missing:
731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
737 CPUID_EXT_RDRAND)
738 /* missing:
739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
743 CPUID_EXT_F16C */
745 #ifdef TARGET_X86_64
746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
747 #else
748 #define TCG_EXT2_X86_64_FEATURES 0
749 #endif
751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
754 TCG_EXT2_X86_64_FEATURES)
755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
757 #define TCG_EXT4_FEATURES 0
758 #define TCG_SVM_FEATURES CPUID_SVM_NPT
759 #define TCG_KVM_FEATURES 0
760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
764 CPUID_7_0_EBX_ERMS)
765 /* missing:
766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
768 CPUID_7_0_EBX_RDSEED */
769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
771 CPUID_7_0_ECX_LA57)
772 #define TCG_7_0_EDX_FEATURES 0
773 #define TCG_7_1_EAX_FEATURES 0
774 #define TCG_APM_FEATURES 0
775 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
776 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
777 /* missing:
778 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
780 typedef enum FeatureWordType {
781 CPUID_FEATURE_WORD,
782 MSR_FEATURE_WORD,
783 } FeatureWordType;
785 typedef struct FeatureWordInfo {
786 FeatureWordType type;
787 /* feature flags names are taken from "Intel Processor Identification and
788 * the CPUID Instruction" and AMD's "CPUID Specification".
789 * In cases of disagreement between feature naming conventions,
790 * aliases may be added.
792 const char *feat_names[64];
793 union {
794 /* If type==CPUID_FEATURE_WORD */
795 struct {
796 uint32_t eax; /* Input EAX for CPUID */
797 bool needs_ecx; /* CPUID instruction uses ECX as input */
798 uint32_t ecx; /* Input ECX value for CPUID */
799 int reg; /* output register (R_* constant) */
800 } cpuid;
801 /* If type==MSR_FEATURE_WORD */
802 struct {
803 uint32_t index;
804 } msr;
806 uint64_t tcg_features; /* Feature flags supported by TCG */
807 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */
808 uint64_t migratable_flags; /* Feature flags known to be migratable */
809 /* Features that shouldn't be auto-enabled by "-cpu host" */
810 uint64_t no_autoenable_flags;
811 } FeatureWordInfo;
813 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
814 [FEAT_1_EDX] = {
815 .type = CPUID_FEATURE_WORD,
816 .feat_names = {
817 "fpu", "vme", "de", "pse",
818 "tsc", "msr", "pae", "mce",
819 "cx8", "apic", NULL, "sep",
820 "mtrr", "pge", "mca", "cmov",
821 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
822 NULL, "ds" /* Intel dts */, "acpi", "mmx",
823 "fxsr", "sse", "sse2", "ss",
824 "ht" /* Intel htt */, "tm", "ia64", "pbe",
826 .cpuid = {.eax = 1, .reg = R_EDX, },
827 .tcg_features = TCG_FEATURES,
829 [FEAT_1_ECX] = {
830 .type = CPUID_FEATURE_WORD,
831 .feat_names = {
832 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
833 "ds-cpl", "vmx", "smx", "est",
834 "tm2", "ssse3", "cid", NULL,
835 "fma", "cx16", "xtpr", "pdcm",
836 NULL, "pcid", "dca", "sse4.1",
837 "sse4.2", "x2apic", "movbe", "popcnt",
838 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
839 "avx", "f16c", "rdrand", "hypervisor",
841 .cpuid = { .eax = 1, .reg = R_ECX, },
842 .tcg_features = TCG_EXT_FEATURES,
844 /* Feature names that are already defined on feature_name[] but
845 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
846 * names on feat_names below. They are copied automatically
847 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
849 [FEAT_8000_0001_EDX] = {
850 .type = CPUID_FEATURE_WORD,
851 .feat_names = {
852 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
853 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
854 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
855 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
856 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
857 "nx", NULL, "mmxext", NULL /* mmx */,
858 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
859 NULL, "lm", "3dnowext", "3dnow",
861 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
862 .tcg_features = TCG_EXT2_FEATURES,
864 [FEAT_8000_0001_ECX] = {
865 .type = CPUID_FEATURE_WORD,
866 .feat_names = {
867 "lahf-lm", "cmp-legacy", "svm", "extapic",
868 "cr8legacy", "abm", "sse4a", "misalignsse",
869 "3dnowprefetch", "osvw", "ibs", "xop",
870 "skinit", "wdt", NULL, "lwp",
871 "fma4", "tce", NULL, "nodeid-msr",
872 NULL, "tbm", "topoext", "perfctr-core",
873 "perfctr-nb", NULL, NULL, NULL,
874 NULL, NULL, NULL, NULL,
876 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
877 .tcg_features = TCG_EXT3_FEATURES,
879 * TOPOEXT is always allowed but can't be enabled blindly by
880 * "-cpu host", as it requires consistent cache topology info
881 * to be provided so it doesn't confuse guests.
883 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
885 [FEAT_C000_0001_EDX] = {
886 .type = CPUID_FEATURE_WORD,
887 .feat_names = {
888 NULL, NULL, "xstore", "xstore-en",
889 NULL, NULL, "xcrypt", "xcrypt-en",
890 "ace2", "ace2-en", "phe", "phe-en",
891 "pmm", "pmm-en", NULL, NULL,
892 NULL, NULL, NULL, NULL,
893 NULL, NULL, NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
897 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
898 .tcg_features = TCG_EXT4_FEATURES,
900 [FEAT_KVM] = {
901 .type = CPUID_FEATURE_WORD,
902 .feat_names = {
903 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
904 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
905 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
906 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL,
907 NULL, NULL, NULL, NULL,
908 NULL, NULL, NULL, NULL,
909 "kvmclock-stable-bit", NULL, NULL, NULL,
910 NULL, NULL, NULL, NULL,
912 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
913 .tcg_features = TCG_KVM_FEATURES,
915 [FEAT_KVM_HINTS] = {
916 .type = CPUID_FEATURE_WORD,
917 .feat_names = {
918 "kvm-hint-dedicated", NULL, NULL, NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
927 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
928 .tcg_features = TCG_KVM_FEATURES,
930 * KVM hints aren't auto-enabled by -cpu host, they need to be
931 * explicitly enabled in the command-line.
933 .no_autoenable_flags = ~0U,
936 * .feat_names are commented out for Hyper-V enlightenments because we
937 * don't want to have two different ways for enabling them on QEMU command
938 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
939 * enabling several feature bits simultaneously, exposing these bits
940 * individually may just confuse guests.
942 [FEAT_HYPERV_EAX] = {
943 .type = CPUID_FEATURE_WORD,
944 .feat_names = {
945 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
946 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
947 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
948 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
949 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
950 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
951 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
952 NULL, NULL,
953 NULL, NULL, NULL, NULL,
954 NULL, NULL, NULL, NULL,
955 NULL, NULL, NULL, NULL,
956 NULL, NULL, NULL, NULL,
958 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
960 [FEAT_HYPERV_EBX] = {
961 .type = CPUID_FEATURE_WORD,
962 .feat_names = {
963 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
964 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
965 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
966 NULL /* hv_create_port */, NULL /* hv_connect_port */,
967 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
968 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
969 NULL, NULL,
970 NULL, NULL, NULL, NULL,
971 NULL, NULL, NULL, NULL,
972 NULL, NULL, NULL, NULL,
973 NULL, NULL, NULL, NULL,
975 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
977 [FEAT_HYPERV_EDX] = {
978 .type = CPUID_FEATURE_WORD,
979 .feat_names = {
980 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
981 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
982 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
983 NULL, NULL,
984 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
985 NULL, NULL, NULL, NULL,
986 NULL, NULL, NULL, NULL,
987 NULL, NULL, NULL, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
991 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
993 [FEAT_HV_RECOMM_EAX] = {
994 .type = CPUID_FEATURE_WORD,
995 .feat_names = {
996 NULL /* hv_recommend_pv_as_switch */,
997 NULL /* hv_recommend_pv_tlbflush_local */,
998 NULL /* hv_recommend_pv_tlbflush_remote */,
999 NULL /* hv_recommend_msr_apic_access */,
1000 NULL /* hv_recommend_msr_reset */,
1001 NULL /* hv_recommend_relaxed_timing */,
1002 NULL /* hv_recommend_dma_remapping */,
1003 NULL /* hv_recommend_int_remapping */,
1004 NULL /* hv_recommend_x2apic_msrs */,
1005 NULL /* hv_recommend_autoeoi_deprecation */,
1006 NULL /* hv_recommend_pv_ipi */,
1007 NULL /* hv_recommend_ex_hypercalls */,
1008 NULL /* hv_hypervisor_is_nested */,
1009 NULL /* hv_recommend_int_mbec */,
1010 NULL /* hv_recommend_evmcs */,
1011 NULL,
1012 NULL, NULL, NULL, NULL,
1013 NULL, NULL, NULL, NULL,
1014 NULL, NULL, NULL, NULL,
1015 NULL, NULL, NULL, NULL,
1017 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1019 [FEAT_HV_NESTED_EAX] = {
1020 .type = CPUID_FEATURE_WORD,
1021 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1023 [FEAT_SVM] = {
1024 .type = CPUID_FEATURE_WORD,
1025 .feat_names = {
1026 "npt", "lbrv", "svm-lock", "nrip-save",
1027 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1028 NULL, NULL, "pause-filter", NULL,
1029 "pfthreshold", NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1035 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1036 .tcg_features = TCG_SVM_FEATURES,
1038 [FEAT_7_0_EBX] = {
1039 .type = CPUID_FEATURE_WORD,
1040 .feat_names = {
1041 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1042 "hle", "avx2", NULL, "smep",
1043 "bmi2", "erms", "invpcid", "rtm",
1044 NULL, NULL, "mpx", NULL,
1045 "avx512f", "avx512dq", "rdseed", "adx",
1046 "smap", "avx512ifma", "pcommit", "clflushopt",
1047 "clwb", "intel-pt", "avx512pf", "avx512er",
1048 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1050 .cpuid = {
1051 .eax = 7,
1052 .needs_ecx = true, .ecx = 0,
1053 .reg = R_EBX,
1055 .tcg_features = TCG_7_0_EBX_FEATURES,
1057 [FEAT_7_0_ECX] = {
1058 .type = CPUID_FEATURE_WORD,
1059 .feat_names = {
1060 NULL, "avx512vbmi", "umip", "pku",
1061 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
1062 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1063 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1064 "la57", NULL, NULL, NULL,
1065 NULL, NULL, "rdpid", NULL,
1066 NULL, "cldemote", NULL, "movdiri",
1067 "movdir64b", NULL, NULL, NULL,
1069 .cpuid = {
1070 .eax = 7,
1071 .needs_ecx = true, .ecx = 0,
1072 .reg = R_ECX,
1074 .tcg_features = TCG_7_0_ECX_FEATURES,
1076 [FEAT_7_0_EDX] = {
1077 .type = CPUID_FEATURE_WORD,
1078 .feat_names = {
1079 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1080 NULL, NULL, NULL, NULL,
1081 NULL, NULL, "md-clear", NULL,
1082 NULL, NULL, NULL, NULL,
1083 NULL, NULL, NULL /* pconfig */, NULL,
1084 NULL, NULL, NULL, NULL,
1085 NULL, NULL, "spec-ctrl", "stibp",
1086 NULL, "arch-capabilities", "core-capability", "ssbd",
1088 .cpuid = {
1089 .eax = 7,
1090 .needs_ecx = true, .ecx = 0,
1091 .reg = R_EDX,
1093 .tcg_features = TCG_7_0_EDX_FEATURES,
1095 [FEAT_7_1_EAX] = {
1096 .type = CPUID_FEATURE_WORD,
1097 .feat_names = {
1098 NULL, NULL, NULL, NULL,
1099 NULL, "avx512-bf16", NULL, NULL,
1100 NULL, NULL, NULL, NULL,
1101 NULL, NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 NULL, NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1107 .cpuid = {
1108 .eax = 7,
1109 .needs_ecx = true, .ecx = 1,
1110 .reg = R_EAX,
1112 .tcg_features = TCG_7_1_EAX_FEATURES,
1114 [FEAT_8000_0007_EDX] = {
1115 .type = CPUID_FEATURE_WORD,
1116 .feat_names = {
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 "invtsc", NULL, NULL, NULL,
1120 NULL, NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL,
1122 NULL, NULL, NULL, NULL,
1123 NULL, NULL, NULL, NULL,
1124 NULL, NULL, NULL, NULL,
1126 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1127 .tcg_features = TCG_APM_FEATURES,
1128 .unmigratable_flags = CPUID_APM_INVTSC,
1130 [FEAT_8000_0008_EBX] = {
1131 .type = CPUID_FEATURE_WORD,
1132 .feat_names = {
1133 "clzero", NULL, "xsaveerptr", NULL,
1134 NULL, NULL, NULL, NULL,
1135 NULL, "wbnoinvd", NULL, NULL,
1136 "ibpb", NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1140 NULL, NULL, NULL, NULL,
1142 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1143 .tcg_features = 0,
1144 .unmigratable_flags = 0,
1146 [FEAT_XSAVE] = {
1147 .type = CPUID_FEATURE_WORD,
1148 .feat_names = {
1149 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1150 NULL, NULL, NULL, NULL,
1151 NULL, NULL, NULL, NULL,
1152 NULL, NULL, NULL, NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1158 .cpuid = {
1159 .eax = 0xd,
1160 .needs_ecx = true, .ecx = 1,
1161 .reg = R_EAX,
1163 .tcg_features = TCG_XSAVE_FEATURES,
1165 [FEAT_6_EAX] = {
1166 .type = CPUID_FEATURE_WORD,
1167 .feat_names = {
1168 NULL, NULL, "arat", NULL,
1169 NULL, NULL, NULL, NULL,
1170 NULL, NULL, NULL, NULL,
1171 NULL, NULL, NULL, NULL,
1172 NULL, NULL, NULL, NULL,
1173 NULL, NULL, NULL, NULL,
1174 NULL, NULL, NULL, NULL,
1175 NULL, NULL, NULL, NULL,
1177 .cpuid = { .eax = 6, .reg = R_EAX, },
1178 .tcg_features = TCG_6_EAX_FEATURES,
1180 [FEAT_XSAVE_COMP_LO] = {
1181 .type = CPUID_FEATURE_WORD,
1182 .cpuid = {
1183 .eax = 0xD,
1184 .needs_ecx = true, .ecx = 0,
1185 .reg = R_EAX,
1187 .tcg_features = ~0U,
1188 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1189 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1190 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1191 XSTATE_PKRU_MASK,
1193 [FEAT_XSAVE_COMP_HI] = {
1194 .type = CPUID_FEATURE_WORD,
1195 .cpuid = {
1196 .eax = 0xD,
1197 .needs_ecx = true, .ecx = 0,
1198 .reg = R_EDX,
1200 .tcg_features = ~0U,
1202 /*Below are MSR exposed features*/
1203 [FEAT_ARCH_CAPABILITIES] = {
1204 .type = MSR_FEATURE_WORD,
1205 .feat_names = {
1206 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1207 "ssb-no", "mds-no", NULL, NULL,
1208 NULL, NULL, NULL, NULL,
1209 NULL, NULL, NULL, NULL,
1210 NULL, NULL, NULL, NULL,
1211 NULL, NULL, NULL, NULL,
1212 NULL, NULL, NULL, NULL,
1213 NULL, NULL, NULL, NULL,
1215 .msr = {
1216 .index = MSR_IA32_ARCH_CAPABILITIES,
1219 [FEAT_CORE_CAPABILITY] = {
1220 .type = MSR_FEATURE_WORD,
1221 .feat_names = {
1222 NULL, NULL, NULL, NULL,
1223 NULL, "split-lock-detect", NULL, NULL,
1224 NULL, NULL, NULL, NULL,
1225 NULL, NULL, NULL, NULL,
1226 NULL, NULL, NULL, NULL,
1227 NULL, NULL, NULL, NULL,
1228 NULL, NULL, NULL, NULL,
1229 NULL, NULL, NULL, NULL,
1231 .msr = {
1232 .index = MSR_IA32_CORE_CAPABILITY,
1236 [FEAT_VMX_PROCBASED_CTLS] = {
1237 .type = MSR_FEATURE_WORD,
1238 .feat_names = {
1239 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset",
1240 NULL, NULL, NULL, "vmx-hlt-exit",
1241 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit",
1242 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit",
1243 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit",
1244 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit",
1245 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf",
1246 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls",
1248 .msr = {
1249 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1253 [FEAT_VMX_SECONDARY_CTLS] = {
1254 .type = MSR_FEATURE_WORD,
1255 .feat_names = {
1256 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit",
1257 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest",
1258 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit",
1259 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit",
1260 "vmx-rdseed-exit", "vmx-pml", NULL, NULL,
1261 "vmx-xsaves", NULL, NULL, NULL,
1262 NULL, NULL, NULL, NULL,
1263 NULL, NULL, NULL, NULL,
1265 .msr = {
1266 .index = MSR_IA32_VMX_PROCBASED_CTLS2,
1270 [FEAT_VMX_PINBASED_CTLS] = {
1271 .type = MSR_FEATURE_WORD,
1272 .feat_names = {
1273 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit",
1274 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr",
1275 NULL, NULL, NULL, NULL,
1276 NULL, NULL, NULL, NULL,
1277 NULL, NULL, NULL, NULL,
1278 NULL, NULL, NULL, NULL,
1279 NULL, NULL, NULL, NULL,
1280 NULL, NULL, NULL, NULL,
1282 .msr = {
1283 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1287 [FEAT_VMX_EXIT_CTLS] = {
1288 .type = MSR_FEATURE_WORD,
1290 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from
1291 * the LM CPUID bit.
1293 .feat_names = {
1294 NULL, NULL, "vmx-exit-nosave-debugctl", NULL,
1295 NULL, NULL, NULL, NULL,
1296 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL,
1297 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr",
1298 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat",
1299 "vmx-exit-save-efer", "vmx-exit-load-efer",
1300 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs",
1301 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL,
1302 NULL, NULL, NULL, NULL,
1304 .msr = {
1305 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS,
1309 [FEAT_VMX_ENTRY_CTLS] = {
1310 .type = MSR_FEATURE_WORD,
1311 .feat_names = {
1312 NULL, NULL, "vmx-entry-noload-debugctl", NULL,
1313 NULL, NULL, NULL, NULL,
1314 NULL, "vmx-entry-ia32e-mode", NULL, NULL,
1315 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer",
1316 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL,
1317 NULL, NULL, NULL, NULL,
1318 NULL, NULL, NULL, NULL,
1319 NULL, NULL, NULL, NULL,
1321 .msr = {
1322 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1326 [FEAT_VMX_MISC] = {
1327 .type = MSR_FEATURE_WORD,
1328 .feat_names = {
1329 NULL, NULL, NULL, NULL,
1330 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown",
1331 "vmx-activity-wait-sipi", NULL, NULL, NULL,
1332 NULL, NULL, NULL, NULL,
1333 NULL, NULL, NULL, NULL,
1334 NULL, NULL, NULL, NULL,
1335 NULL, NULL, NULL, NULL,
1336 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL,
1338 .msr = {
1339 .index = MSR_IA32_VMX_MISC,
1343 [FEAT_VMX_EPT_VPID_CAPS] = {
1344 .type = MSR_FEATURE_WORD,
1345 .feat_names = {
1346 "vmx-ept-execonly", NULL, NULL, NULL,
1347 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5",
1348 NULL, NULL, NULL, NULL,
1349 NULL, NULL, NULL, NULL,
1350 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL,
1351 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL,
1352 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL,
1353 NULL, NULL, NULL, NULL,
1354 "vmx-invvpid", NULL, NULL, NULL,
1355 NULL, NULL, NULL, NULL,
1356 "vmx-invvpid-single-addr", "vmx-invept-single-context",
1357 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals",
1358 NULL, NULL, NULL, NULL,
1359 NULL, NULL, NULL, NULL,
1360 NULL, NULL, NULL, NULL,
1361 NULL, NULL, NULL, NULL,
1362 NULL, NULL, NULL, NULL,
1364 .msr = {
1365 .index = MSR_IA32_VMX_EPT_VPID_CAP,
1369 [FEAT_VMX_BASIC] = {
1370 .type = MSR_FEATURE_WORD,
1371 .feat_names = {
1372 [54] = "vmx-ins-outs",
1373 [55] = "vmx-true-ctls",
1375 .msr = {
1376 .index = MSR_IA32_VMX_BASIC,
1378 /* Just to be safe - we don't support setting the MSEG version field. */
1379 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR,
1382 [FEAT_VMX_VMFUNC] = {
1383 .type = MSR_FEATURE_WORD,
1384 .feat_names = {
1385 [0] = "vmx-eptp-switching",
1387 .msr = {
1388 .index = MSR_IA32_VMX_VMFUNC,
1394 typedef struct FeatureMask {
1395 FeatureWord index;
1396 uint64_t mask;
1397 } FeatureMask;
1399 typedef struct FeatureDep {
1400 FeatureMask from, to;
1401 } FeatureDep;
1403 static FeatureDep feature_dependencies[] = {
1405 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES },
1406 .to = { FEAT_ARCH_CAPABILITIES, ~0ull },
1409 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY },
1410 .to = { FEAT_CORE_CAPABILITY, ~0ull },
1413 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1414 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull },
1417 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1418 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull },
1421 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1422 .to = { FEAT_VMX_EXIT_CTLS, ~0ull },
1425 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1426 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull },
1429 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1430 .to = { FEAT_VMX_MISC, ~0ull },
1433 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1434 .to = { FEAT_VMX_BASIC, ~0ull },
1437 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM },
1438 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE },
1441 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS },
1442 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull },
1445 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES },
1446 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES },
1449 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND },
1450 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING },
1453 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID },
1454 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID },
1457 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED },
1458 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING },
1461 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP },
1462 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP },
1465 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1466 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull },
1469 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1470 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST },
1473 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID },
1474 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 },
1477 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC },
1478 .to = { FEAT_VMX_VMFUNC, ~0ull },
1482 typedef struct X86RegisterInfo32 {
1483 /* Name of register */
1484 const char *name;
1485 /* QAPI enum value register */
1486 X86CPURegister32 qapi_enum;
1487 } X86RegisterInfo32;
1489 #define REGISTER(reg) \
1490 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1491 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1492 REGISTER(EAX),
1493 REGISTER(ECX),
1494 REGISTER(EDX),
1495 REGISTER(EBX),
1496 REGISTER(ESP),
1497 REGISTER(EBP),
1498 REGISTER(ESI),
1499 REGISTER(EDI),
1501 #undef REGISTER
1503 typedef struct ExtSaveArea {
1504 uint32_t feature, bits;
1505 uint32_t offset, size;
1506 } ExtSaveArea;
1508 static const ExtSaveArea x86_ext_save_areas[] = {
1509 [XSTATE_FP_BIT] = {
1510 /* x87 FP state component is always enabled if XSAVE is supported */
1511 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1512 /* x87 state is in the legacy region of the XSAVE area */
1513 .offset = 0,
1514 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1516 [XSTATE_SSE_BIT] = {
1517 /* SSE state component is always enabled if XSAVE is supported */
1518 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1519 /* SSE state is in the legacy region of the XSAVE area */
1520 .offset = 0,
1521 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1523 [XSTATE_YMM_BIT] =
1524 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1525 .offset = offsetof(X86XSaveArea, avx_state),
1526 .size = sizeof(XSaveAVX) },
1527 [XSTATE_BNDREGS_BIT] =
1528 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1529 .offset = offsetof(X86XSaveArea, bndreg_state),
1530 .size = sizeof(XSaveBNDREG) },
1531 [XSTATE_BNDCSR_BIT] =
1532 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1533 .offset = offsetof(X86XSaveArea, bndcsr_state),
1534 .size = sizeof(XSaveBNDCSR) },
1535 [XSTATE_OPMASK_BIT] =
1536 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1537 .offset = offsetof(X86XSaveArea, opmask_state),
1538 .size = sizeof(XSaveOpmask) },
1539 [XSTATE_ZMM_Hi256_BIT] =
1540 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1541 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1542 .size = sizeof(XSaveZMM_Hi256) },
1543 [XSTATE_Hi16_ZMM_BIT] =
1544 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1545 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1546 .size = sizeof(XSaveHi16_ZMM) },
1547 [XSTATE_PKRU_BIT] =
1548 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1549 .offset = offsetof(X86XSaveArea, pkru_state),
1550 .size = sizeof(XSavePKRU) },
1553 static uint32_t xsave_area_size(uint64_t mask)
1555 int i;
1556 uint64_t ret = 0;
1558 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1559 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1560 if ((mask >> i) & 1) {
1561 ret = MAX(ret, esa->offset + esa->size);
1564 return ret;
1567 static inline bool accel_uses_host_cpuid(void)
1569 return kvm_enabled() || hvf_enabled();
1572 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1574 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1575 cpu->env.features[FEAT_XSAVE_COMP_LO];
1578 const char *get_register_name_32(unsigned int reg)
1580 if (reg >= CPU_NB_REGS32) {
1581 return NULL;
1583 return x86_reg_info_32[reg].name;
1587 * Returns the set of feature flags that are supported and migratable by
1588 * QEMU, for a given FeatureWord.
1590 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w)
1592 FeatureWordInfo *wi = &feature_word_info[w];
1593 uint64_t r = 0;
1594 int i;
1596 for (i = 0; i < 64; i++) {
1597 uint64_t f = 1ULL << i;
1599 /* If the feature name is known, it is implicitly considered migratable,
1600 * unless it is explicitly set in unmigratable_flags */
1601 if ((wi->migratable_flags & f) ||
1602 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1603 r |= f;
1606 return r;
1609 void host_cpuid(uint32_t function, uint32_t count,
1610 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1612 uint32_t vec[4];
1614 #ifdef __x86_64__
1615 asm volatile("cpuid"
1616 : "=a"(vec[0]), "=b"(vec[1]),
1617 "=c"(vec[2]), "=d"(vec[3])
1618 : "0"(function), "c"(count) : "cc");
1619 #elif defined(__i386__)
1620 asm volatile("pusha \n\t"
1621 "cpuid \n\t"
1622 "mov %%eax, 0(%2) \n\t"
1623 "mov %%ebx, 4(%2) \n\t"
1624 "mov %%ecx, 8(%2) \n\t"
1625 "mov %%edx, 12(%2) \n\t"
1626 "popa"
1627 : : "a"(function), "c"(count), "S"(vec)
1628 : "memory", "cc");
1629 #else
1630 abort();
1631 #endif
1633 if (eax)
1634 *eax = vec[0];
1635 if (ebx)
1636 *ebx = vec[1];
1637 if (ecx)
1638 *ecx = vec[2];
1639 if (edx)
1640 *edx = vec[3];
1643 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1645 uint32_t eax, ebx, ecx, edx;
1647 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1648 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1650 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1651 if (family) {
1652 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1654 if (model) {
1655 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1657 if (stepping) {
1658 *stepping = eax & 0x0F;
1662 /* CPU class name definitions: */
1664 /* Return type name for a given CPU model name
1665 * Caller is responsible for freeing the returned string.
1667 static char *x86_cpu_type_name(const char *model_name)
1669 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1672 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1674 ObjectClass *oc;
1675 char *typename = x86_cpu_type_name(cpu_model);
1676 oc = object_class_by_name(typename);
1677 g_free(typename);
1678 return oc;
1681 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1683 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1684 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1685 return g_strndup(class_name,
1686 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1689 typedef struct PropValue {
1690 const char *prop, *value;
1691 } PropValue;
1693 typedef struct X86CPUVersionDefinition {
1694 X86CPUVersion version;
1695 const char *alias;
1696 PropValue *props;
1697 } X86CPUVersionDefinition;
1699 /* Base definition for a CPU model */
1700 typedef struct X86CPUDefinition {
1701 const char *name;
1702 uint32_t level;
1703 uint32_t xlevel;
1704 /* vendor is zero-terminated, 12 character ASCII string */
1705 char vendor[CPUID_VENDOR_SZ + 1];
1706 int family;
1707 int model;
1708 int stepping;
1709 FeatureWordArray features;
1710 const char *model_id;
1711 CPUCaches *cache_info;
1713 * Definitions for alternative versions of CPU model.
1714 * List is terminated by item with version == 0.
1715 * If NULL, version 1 will be registered automatically.
1717 const X86CPUVersionDefinition *versions;
1718 } X86CPUDefinition;
1720 /* Reference to a specific CPU model version */
1721 struct X86CPUModel {
1722 /* Base CPU definition */
1723 X86CPUDefinition *cpudef;
1724 /* CPU model version */
1725 X86CPUVersion version;
1727 * If true, this is an alias CPU model.
1728 * This matters only for "-cpu help" and query-cpu-definitions
1730 bool is_alias;
1733 /* Get full model name for CPU version */
1734 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef,
1735 X86CPUVersion version)
1737 assert(version > 0);
1738 return g_strdup_printf("%s-v%d", cpudef->name, (int)version);
1741 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def)
1743 /* When X86CPUDefinition::versions is NULL, we register only v1 */
1744 static const X86CPUVersionDefinition default_version_list[] = {
1745 { 1 },
1746 { /* end of list */ }
1749 return def->versions ?: default_version_list;
1752 static CPUCaches epyc_cache_info = {
1753 .l1d_cache = &(CPUCacheInfo) {
1754 .type = DATA_CACHE,
1755 .level = 1,
1756 .size = 32 * KiB,
1757 .line_size = 64,
1758 .associativity = 8,
1759 .partitions = 1,
1760 .sets = 64,
1761 .lines_per_tag = 1,
1762 .self_init = 1,
1763 .no_invd_sharing = true,
1765 .l1i_cache = &(CPUCacheInfo) {
1766 .type = INSTRUCTION_CACHE,
1767 .level = 1,
1768 .size = 64 * KiB,
1769 .line_size = 64,
1770 .associativity = 4,
1771 .partitions = 1,
1772 .sets = 256,
1773 .lines_per_tag = 1,
1774 .self_init = 1,
1775 .no_invd_sharing = true,
1777 .l2_cache = &(CPUCacheInfo) {
1778 .type = UNIFIED_CACHE,
1779 .level = 2,
1780 .size = 512 * KiB,
1781 .line_size = 64,
1782 .associativity = 8,
1783 .partitions = 1,
1784 .sets = 1024,
1785 .lines_per_tag = 1,
1787 .l3_cache = &(CPUCacheInfo) {
1788 .type = UNIFIED_CACHE,
1789 .level = 3,
1790 .size = 8 * MiB,
1791 .line_size = 64,
1792 .associativity = 16,
1793 .partitions = 1,
1794 .sets = 8192,
1795 .lines_per_tag = 1,
1796 .self_init = true,
1797 .inclusive = true,
1798 .complex_indexing = true,
1802 static X86CPUDefinition builtin_x86_defs[] = {
1804 .name = "qemu64",
1805 .level = 0xd,
1806 .vendor = CPUID_VENDOR_AMD,
1807 .family = 6,
1808 .model = 6,
1809 .stepping = 3,
1810 .features[FEAT_1_EDX] =
1811 PPRO_FEATURES |
1812 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1813 CPUID_PSE36,
1814 .features[FEAT_1_ECX] =
1815 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1816 .features[FEAT_8000_0001_EDX] =
1817 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1818 .features[FEAT_8000_0001_ECX] =
1819 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1820 .xlevel = 0x8000000A,
1821 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1824 .name = "phenom",
1825 .level = 5,
1826 .vendor = CPUID_VENDOR_AMD,
1827 .family = 16,
1828 .model = 2,
1829 .stepping = 3,
1830 /* Missing: CPUID_HT */
1831 .features[FEAT_1_EDX] =
1832 PPRO_FEATURES |
1833 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1834 CPUID_PSE36 | CPUID_VME,
1835 .features[FEAT_1_ECX] =
1836 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1837 CPUID_EXT_POPCNT,
1838 .features[FEAT_8000_0001_EDX] =
1839 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1840 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1841 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1842 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1843 CPUID_EXT3_CR8LEG,
1844 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1845 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1846 .features[FEAT_8000_0001_ECX] =
1847 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1848 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1849 /* Missing: CPUID_SVM_LBRV */
1850 .features[FEAT_SVM] =
1851 CPUID_SVM_NPT,
1852 .xlevel = 0x8000001A,
1853 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1856 .name = "core2duo",
1857 .level = 10,
1858 .vendor = CPUID_VENDOR_INTEL,
1859 .family = 6,
1860 .model = 15,
1861 .stepping = 11,
1862 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1863 .features[FEAT_1_EDX] =
1864 PPRO_FEATURES |
1865 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1866 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1867 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1868 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1869 .features[FEAT_1_ECX] =
1870 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1871 CPUID_EXT_CX16,
1872 .features[FEAT_8000_0001_EDX] =
1873 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1874 .features[FEAT_8000_0001_ECX] =
1875 CPUID_EXT3_LAHF_LM,
1876 .xlevel = 0x80000008,
1877 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1880 .name = "kvm64",
1881 .level = 0xd,
1882 .vendor = CPUID_VENDOR_INTEL,
1883 .family = 15,
1884 .model = 6,
1885 .stepping = 1,
1886 /* Missing: CPUID_HT */
1887 .features[FEAT_1_EDX] =
1888 PPRO_FEATURES | CPUID_VME |
1889 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1890 CPUID_PSE36,
1891 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1892 .features[FEAT_1_ECX] =
1893 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1894 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1895 .features[FEAT_8000_0001_EDX] =
1896 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1897 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1898 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1899 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1900 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1901 .features[FEAT_8000_0001_ECX] =
1903 .xlevel = 0x80000008,
1904 .model_id = "Common KVM processor"
1907 .name = "qemu32",
1908 .level = 4,
1909 .vendor = CPUID_VENDOR_INTEL,
1910 .family = 6,
1911 .model = 6,
1912 .stepping = 3,
1913 .features[FEAT_1_EDX] =
1914 PPRO_FEATURES,
1915 .features[FEAT_1_ECX] =
1916 CPUID_EXT_SSE3,
1917 .xlevel = 0x80000004,
1918 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1921 .name = "kvm32",
1922 .level = 5,
1923 .vendor = CPUID_VENDOR_INTEL,
1924 .family = 15,
1925 .model = 6,
1926 .stepping = 1,
1927 .features[FEAT_1_EDX] =
1928 PPRO_FEATURES | CPUID_VME |
1929 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1930 .features[FEAT_1_ECX] =
1931 CPUID_EXT_SSE3,
1932 .features[FEAT_8000_0001_ECX] =
1934 .xlevel = 0x80000008,
1935 .model_id = "Common 32-bit KVM processor"
1938 .name = "coreduo",
1939 .level = 10,
1940 .vendor = CPUID_VENDOR_INTEL,
1941 .family = 6,
1942 .model = 14,
1943 .stepping = 8,
1944 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1945 .features[FEAT_1_EDX] =
1946 PPRO_FEATURES | CPUID_VME |
1947 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1948 CPUID_SS,
1949 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1950 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1951 .features[FEAT_1_ECX] =
1952 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1953 .features[FEAT_8000_0001_EDX] =
1954 CPUID_EXT2_NX,
1955 .xlevel = 0x80000008,
1956 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1959 .name = "486",
1960 .level = 1,
1961 .vendor = CPUID_VENDOR_INTEL,
1962 .family = 4,
1963 .model = 8,
1964 .stepping = 0,
1965 .features[FEAT_1_EDX] =
1966 I486_FEATURES,
1967 .xlevel = 0,
1968 .model_id = "",
1971 .name = "pentium",
1972 .level = 1,
1973 .vendor = CPUID_VENDOR_INTEL,
1974 .family = 5,
1975 .model = 4,
1976 .stepping = 3,
1977 .features[FEAT_1_EDX] =
1978 PENTIUM_FEATURES,
1979 .xlevel = 0,
1980 .model_id = "",
1983 .name = "pentium2",
1984 .level = 2,
1985 .vendor = CPUID_VENDOR_INTEL,
1986 .family = 6,
1987 .model = 5,
1988 .stepping = 2,
1989 .features[FEAT_1_EDX] =
1990 PENTIUM2_FEATURES,
1991 .xlevel = 0,
1992 .model_id = "",
1995 .name = "pentium3",
1996 .level = 3,
1997 .vendor = CPUID_VENDOR_INTEL,
1998 .family = 6,
1999 .model = 7,
2000 .stepping = 3,
2001 .features[FEAT_1_EDX] =
2002 PENTIUM3_FEATURES,
2003 .xlevel = 0,
2004 .model_id = "",
2007 .name = "athlon",
2008 .level = 2,
2009 .vendor = CPUID_VENDOR_AMD,
2010 .family = 6,
2011 .model = 2,
2012 .stepping = 3,
2013 .features[FEAT_1_EDX] =
2014 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
2015 CPUID_MCA,
2016 .features[FEAT_8000_0001_EDX] =
2017 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
2018 .xlevel = 0x80000008,
2019 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
2022 .name = "n270",
2023 .level = 10,
2024 .vendor = CPUID_VENDOR_INTEL,
2025 .family = 6,
2026 .model = 28,
2027 .stepping = 2,
2028 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2029 .features[FEAT_1_EDX] =
2030 PPRO_FEATURES |
2031 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
2032 CPUID_ACPI | CPUID_SS,
2033 /* Some CPUs got no CPUID_SEP */
2034 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
2035 * CPUID_EXT_XTPR */
2036 .features[FEAT_1_ECX] =
2037 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
2038 CPUID_EXT_MOVBE,
2039 .features[FEAT_8000_0001_EDX] =
2040 CPUID_EXT2_NX,
2041 .features[FEAT_8000_0001_ECX] =
2042 CPUID_EXT3_LAHF_LM,
2043 .xlevel = 0x80000008,
2044 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
2047 .name = "Conroe",
2048 .level = 10,
2049 .vendor = CPUID_VENDOR_INTEL,
2050 .family = 6,
2051 .model = 15,
2052 .stepping = 3,
2053 .features[FEAT_1_EDX] =
2054 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2055 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2056 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2057 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2058 CPUID_DE | CPUID_FP87,
2059 .features[FEAT_1_ECX] =
2060 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2061 .features[FEAT_8000_0001_EDX] =
2062 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2063 .features[FEAT_8000_0001_ECX] =
2064 CPUID_EXT3_LAHF_LM,
2065 .xlevel = 0x80000008,
2066 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
2069 .name = "Penryn",
2070 .level = 10,
2071 .vendor = CPUID_VENDOR_INTEL,
2072 .family = 6,
2073 .model = 23,
2074 .stepping = 3,
2075 .features[FEAT_1_EDX] =
2076 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2077 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2078 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2079 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2080 CPUID_DE | CPUID_FP87,
2081 .features[FEAT_1_ECX] =
2082 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2083 CPUID_EXT_SSE3,
2084 .features[FEAT_8000_0001_EDX] =
2085 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2086 .features[FEAT_8000_0001_ECX] =
2087 CPUID_EXT3_LAHF_LM,
2088 .xlevel = 0x80000008,
2089 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
2092 .name = "Nehalem",
2093 .level = 11,
2094 .vendor = CPUID_VENDOR_INTEL,
2095 .family = 6,
2096 .model = 26,
2097 .stepping = 3,
2098 .features[FEAT_1_EDX] =
2099 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2100 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2101 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2102 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2103 CPUID_DE | CPUID_FP87,
2104 .features[FEAT_1_ECX] =
2105 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2106 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2107 .features[FEAT_8000_0001_EDX] =
2108 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2109 .features[FEAT_8000_0001_ECX] =
2110 CPUID_EXT3_LAHF_LM,
2111 .xlevel = 0x80000008,
2112 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
2113 .versions = (X86CPUVersionDefinition[]) {
2114 { .version = 1 },
2116 .version = 2,
2117 .alias = "Nehalem-IBRS",
2118 .props = (PropValue[]) {
2119 { "spec-ctrl", "on" },
2120 { "model-id",
2121 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" },
2122 { /* end of list */ }
2125 { /* end of list */ }
2129 .name = "Westmere",
2130 .level = 11,
2131 .vendor = CPUID_VENDOR_INTEL,
2132 .family = 6,
2133 .model = 44,
2134 .stepping = 1,
2135 .features[FEAT_1_EDX] =
2136 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2137 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2138 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2139 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2140 CPUID_DE | CPUID_FP87,
2141 .features[FEAT_1_ECX] =
2142 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2143 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2144 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2145 .features[FEAT_8000_0001_EDX] =
2146 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2147 .features[FEAT_8000_0001_ECX] =
2148 CPUID_EXT3_LAHF_LM,
2149 .features[FEAT_6_EAX] =
2150 CPUID_6_EAX_ARAT,
2151 .xlevel = 0x80000008,
2152 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
2153 .versions = (X86CPUVersionDefinition[]) {
2154 { .version = 1 },
2156 .version = 2,
2157 .alias = "Westmere-IBRS",
2158 .props = (PropValue[]) {
2159 { "spec-ctrl", "on" },
2160 { "model-id",
2161 "Westmere E56xx/L56xx/X56xx (IBRS update)" },
2162 { /* end of list */ }
2165 { /* end of list */ }
2169 .name = "SandyBridge",
2170 .level = 0xd,
2171 .vendor = CPUID_VENDOR_INTEL,
2172 .family = 6,
2173 .model = 42,
2174 .stepping = 1,
2175 .features[FEAT_1_EDX] =
2176 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2180 CPUID_DE | CPUID_FP87,
2181 .features[FEAT_1_ECX] =
2182 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2183 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2184 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2185 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2186 CPUID_EXT_SSE3,
2187 .features[FEAT_8000_0001_EDX] =
2188 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2189 CPUID_EXT2_SYSCALL,
2190 .features[FEAT_8000_0001_ECX] =
2191 CPUID_EXT3_LAHF_LM,
2192 .features[FEAT_XSAVE] =
2193 CPUID_XSAVE_XSAVEOPT,
2194 .features[FEAT_6_EAX] =
2195 CPUID_6_EAX_ARAT,
2196 .xlevel = 0x80000008,
2197 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
2198 .versions = (X86CPUVersionDefinition[]) {
2199 { .version = 1 },
2201 .version = 2,
2202 .alias = "SandyBridge-IBRS",
2203 .props = (PropValue[]) {
2204 { "spec-ctrl", "on" },
2205 { "model-id",
2206 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" },
2207 { /* end of list */ }
2210 { /* end of list */ }
2214 .name = "IvyBridge",
2215 .level = 0xd,
2216 .vendor = CPUID_VENDOR_INTEL,
2217 .family = 6,
2218 .model = 58,
2219 .stepping = 9,
2220 .features[FEAT_1_EDX] =
2221 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2222 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2223 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2224 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2225 CPUID_DE | CPUID_FP87,
2226 .features[FEAT_1_ECX] =
2227 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2228 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2229 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2230 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2231 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2232 .features[FEAT_7_0_EBX] =
2233 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2234 CPUID_7_0_EBX_ERMS,
2235 .features[FEAT_8000_0001_EDX] =
2236 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2237 CPUID_EXT2_SYSCALL,
2238 .features[FEAT_8000_0001_ECX] =
2239 CPUID_EXT3_LAHF_LM,
2240 .features[FEAT_XSAVE] =
2241 CPUID_XSAVE_XSAVEOPT,
2242 .features[FEAT_6_EAX] =
2243 CPUID_6_EAX_ARAT,
2244 .xlevel = 0x80000008,
2245 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
2246 .versions = (X86CPUVersionDefinition[]) {
2247 { .version = 1 },
2249 .version = 2,
2250 .alias = "IvyBridge-IBRS",
2251 .props = (PropValue[]) {
2252 { "spec-ctrl", "on" },
2253 { "model-id",
2254 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" },
2255 { /* end of list */ }
2258 { /* end of list */ }
2262 .name = "Haswell",
2263 .level = 0xd,
2264 .vendor = CPUID_VENDOR_INTEL,
2265 .family = 6,
2266 .model = 60,
2267 .stepping = 4,
2268 .features[FEAT_1_EDX] =
2269 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2270 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2271 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2272 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2273 CPUID_DE | CPUID_FP87,
2274 .features[FEAT_1_ECX] =
2275 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2276 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2277 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2278 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2279 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2280 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2281 .features[FEAT_8000_0001_EDX] =
2282 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2283 CPUID_EXT2_SYSCALL,
2284 .features[FEAT_8000_0001_ECX] =
2285 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2286 .features[FEAT_7_0_EBX] =
2287 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2288 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2289 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2290 CPUID_7_0_EBX_RTM,
2291 .features[FEAT_XSAVE] =
2292 CPUID_XSAVE_XSAVEOPT,
2293 .features[FEAT_6_EAX] =
2294 CPUID_6_EAX_ARAT,
2295 .xlevel = 0x80000008,
2296 .model_id = "Intel Core Processor (Haswell)",
2297 .versions = (X86CPUVersionDefinition[]) {
2298 { .version = 1 },
2300 .version = 2,
2301 .alias = "Haswell-noTSX",
2302 .props = (PropValue[]) {
2303 { "hle", "off" },
2304 { "rtm", "off" },
2305 { "stepping", "1" },
2306 { "model-id", "Intel Core Processor (Haswell, no TSX)", },
2307 { /* end of list */ }
2311 .version = 3,
2312 .alias = "Haswell-IBRS",
2313 .props = (PropValue[]) {
2314 /* Restore TSX features removed by -v2 above */
2315 { "hle", "on" },
2316 { "rtm", "on" },
2318 * Haswell and Haswell-IBRS had stepping=4 in
2319 * QEMU 4.0 and older
2321 { "stepping", "4" },
2322 { "spec-ctrl", "on" },
2323 { "model-id",
2324 "Intel Core Processor (Haswell, IBRS)" },
2325 { /* end of list */ }
2329 .version = 4,
2330 .alias = "Haswell-noTSX-IBRS",
2331 .props = (PropValue[]) {
2332 { "hle", "off" },
2333 { "rtm", "off" },
2334 /* spec-ctrl was already enabled by -v3 above */
2335 { "stepping", "1" },
2336 { "model-id",
2337 "Intel Core Processor (Haswell, no TSX, IBRS)" },
2338 { /* end of list */ }
2341 { /* end of list */ }
2345 .name = "Broadwell",
2346 .level = 0xd,
2347 .vendor = CPUID_VENDOR_INTEL,
2348 .family = 6,
2349 .model = 61,
2350 .stepping = 2,
2351 .features[FEAT_1_EDX] =
2352 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2353 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2354 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2355 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2356 CPUID_DE | CPUID_FP87,
2357 .features[FEAT_1_ECX] =
2358 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2359 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2360 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2361 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2362 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2363 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2364 .features[FEAT_8000_0001_EDX] =
2365 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2366 CPUID_EXT2_SYSCALL,
2367 .features[FEAT_8000_0001_ECX] =
2368 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2369 .features[FEAT_7_0_EBX] =
2370 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2371 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2372 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2373 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2374 CPUID_7_0_EBX_SMAP,
2375 .features[FEAT_XSAVE] =
2376 CPUID_XSAVE_XSAVEOPT,
2377 .features[FEAT_6_EAX] =
2378 CPUID_6_EAX_ARAT,
2379 .xlevel = 0x80000008,
2380 .model_id = "Intel Core Processor (Broadwell)",
2381 .versions = (X86CPUVersionDefinition[]) {
2382 { .version = 1 },
2384 .version = 2,
2385 .alias = "Broadwell-noTSX",
2386 .props = (PropValue[]) {
2387 { "hle", "off" },
2388 { "rtm", "off" },
2389 { "model-id", "Intel Core Processor (Broadwell, no TSX)", },
2390 { /* end of list */ }
2394 .version = 3,
2395 .alias = "Broadwell-IBRS",
2396 .props = (PropValue[]) {
2397 /* Restore TSX features removed by -v2 above */
2398 { "hle", "on" },
2399 { "rtm", "on" },
2400 { "spec-ctrl", "on" },
2401 { "model-id",
2402 "Intel Core Processor (Broadwell, IBRS)" },
2403 { /* end of list */ }
2407 .version = 4,
2408 .alias = "Broadwell-noTSX-IBRS",
2409 .props = (PropValue[]) {
2410 { "hle", "off" },
2411 { "rtm", "off" },
2412 /* spec-ctrl was already enabled by -v3 above */
2413 { "model-id",
2414 "Intel Core Processor (Broadwell, no TSX, IBRS)" },
2415 { /* end of list */ }
2418 { /* end of list */ }
2422 .name = "Skylake-Client",
2423 .level = 0xd,
2424 .vendor = CPUID_VENDOR_INTEL,
2425 .family = 6,
2426 .model = 94,
2427 .stepping = 3,
2428 .features[FEAT_1_EDX] =
2429 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2430 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2431 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2432 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2433 CPUID_DE | CPUID_FP87,
2434 .features[FEAT_1_ECX] =
2435 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2436 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2437 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2438 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2439 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2440 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2441 .features[FEAT_8000_0001_EDX] =
2442 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2443 CPUID_EXT2_SYSCALL,
2444 .features[FEAT_8000_0001_ECX] =
2445 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2446 .features[FEAT_7_0_EBX] =
2447 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2448 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2449 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2450 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2451 CPUID_7_0_EBX_SMAP,
2452 /* Missing: XSAVES (not supported by some Linux versions,
2453 * including v4.1 to v4.12).
2454 * KVM doesn't yet expose any XSAVES state save component,
2455 * and the only one defined in Skylake (processor tracing)
2456 * probably will block migration anyway.
2458 .features[FEAT_XSAVE] =
2459 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2460 CPUID_XSAVE_XGETBV1,
2461 .features[FEAT_6_EAX] =
2462 CPUID_6_EAX_ARAT,
2463 .xlevel = 0x80000008,
2464 .model_id = "Intel Core Processor (Skylake)",
2465 .versions = (X86CPUVersionDefinition[]) {
2466 { .version = 1 },
2468 .version = 2,
2469 .alias = "Skylake-Client-IBRS",
2470 .props = (PropValue[]) {
2471 { "spec-ctrl", "on" },
2472 { "model-id",
2473 "Intel Core Processor (Skylake, IBRS)" },
2474 { /* end of list */ }
2477 { /* end of list */ }
2481 .name = "Skylake-Server",
2482 .level = 0xd,
2483 .vendor = CPUID_VENDOR_INTEL,
2484 .family = 6,
2485 .model = 85,
2486 .stepping = 4,
2487 .features[FEAT_1_EDX] =
2488 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2489 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2490 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2491 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2492 CPUID_DE | CPUID_FP87,
2493 .features[FEAT_1_ECX] =
2494 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2495 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2496 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2497 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2498 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2499 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2500 .features[FEAT_8000_0001_EDX] =
2501 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2502 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2503 .features[FEAT_8000_0001_ECX] =
2504 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2505 .features[FEAT_7_0_EBX] =
2506 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2507 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2508 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2509 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2510 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2511 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2512 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2513 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2514 .features[FEAT_7_0_ECX] =
2515 CPUID_7_0_ECX_PKU,
2516 /* Missing: XSAVES (not supported by some Linux versions,
2517 * including v4.1 to v4.12).
2518 * KVM doesn't yet expose any XSAVES state save component,
2519 * and the only one defined in Skylake (processor tracing)
2520 * probably will block migration anyway.
2522 .features[FEAT_XSAVE] =
2523 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2524 CPUID_XSAVE_XGETBV1,
2525 .features[FEAT_6_EAX] =
2526 CPUID_6_EAX_ARAT,
2527 .xlevel = 0x80000008,
2528 .model_id = "Intel Xeon Processor (Skylake)",
2529 .versions = (X86CPUVersionDefinition[]) {
2530 { .version = 1 },
2532 .version = 2,
2533 .alias = "Skylake-Server-IBRS",
2534 .props = (PropValue[]) {
2535 /* clflushopt was not added to Skylake-Server-IBRS */
2536 /* TODO: add -v3 including clflushopt */
2537 { "clflushopt", "off" },
2538 { "spec-ctrl", "on" },
2539 { "model-id",
2540 "Intel Xeon Processor (Skylake, IBRS)" },
2541 { /* end of list */ }
2544 { /* end of list */ }
2548 .name = "Cascadelake-Server",
2549 .level = 0xd,
2550 .vendor = CPUID_VENDOR_INTEL,
2551 .family = 6,
2552 .model = 85,
2553 .stepping = 6,
2554 .features[FEAT_1_EDX] =
2555 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2556 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2557 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2558 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2559 CPUID_DE | CPUID_FP87,
2560 .features[FEAT_1_ECX] =
2561 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2562 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2563 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2564 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2565 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2566 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2567 .features[FEAT_8000_0001_EDX] =
2568 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2569 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2570 .features[FEAT_8000_0001_ECX] =
2571 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2572 .features[FEAT_7_0_EBX] =
2573 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2574 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2575 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2576 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2577 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2578 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2579 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2580 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2581 .features[FEAT_7_0_ECX] =
2582 CPUID_7_0_ECX_PKU |
2583 CPUID_7_0_ECX_AVX512VNNI,
2584 .features[FEAT_7_0_EDX] =
2585 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2586 /* Missing: XSAVES (not supported by some Linux versions,
2587 * including v4.1 to v4.12).
2588 * KVM doesn't yet expose any XSAVES state save component,
2589 * and the only one defined in Skylake (processor tracing)
2590 * probably will block migration anyway.
2592 .features[FEAT_XSAVE] =
2593 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2594 CPUID_XSAVE_XGETBV1,
2595 .features[FEAT_6_EAX] =
2596 CPUID_6_EAX_ARAT,
2597 .xlevel = 0x80000008,
2598 .model_id = "Intel Xeon Processor (Cascadelake)",
2599 .versions = (X86CPUVersionDefinition[]) {
2600 { .version = 1 },
2601 { .version = 2,
2602 .props = (PropValue[]) {
2603 { "arch-capabilities", "on" },
2604 { "rdctl-no", "on" },
2605 { "ibrs-all", "on" },
2606 { "skip-l1dfl-vmentry", "on" },
2607 { "mds-no", "on" },
2608 { /* end of list */ }
2611 { /* end of list */ }
2615 .name = "Icelake-Client",
2616 .level = 0xd,
2617 .vendor = CPUID_VENDOR_INTEL,
2618 .family = 6,
2619 .model = 126,
2620 .stepping = 0,
2621 .features[FEAT_1_EDX] =
2622 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2623 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2624 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2625 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2626 CPUID_DE | CPUID_FP87,
2627 .features[FEAT_1_ECX] =
2628 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2629 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2630 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2631 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2632 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2633 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2634 .features[FEAT_8000_0001_EDX] =
2635 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2636 CPUID_EXT2_SYSCALL,
2637 .features[FEAT_8000_0001_ECX] =
2638 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2639 .features[FEAT_8000_0008_EBX] =
2640 CPUID_8000_0008_EBX_WBNOINVD,
2641 .features[FEAT_7_0_EBX] =
2642 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2643 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2644 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2645 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2646 CPUID_7_0_EBX_SMAP,
2647 .features[FEAT_7_0_ECX] =
2648 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2649 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
2650 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2651 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2652 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2653 .features[FEAT_7_0_EDX] =
2654 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2655 /* Missing: XSAVES (not supported by some Linux versions,
2656 * including v4.1 to v4.12).
2657 * KVM doesn't yet expose any XSAVES state save component,
2658 * and the only one defined in Skylake (processor tracing)
2659 * probably will block migration anyway.
2661 .features[FEAT_XSAVE] =
2662 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2663 CPUID_XSAVE_XGETBV1,
2664 .features[FEAT_6_EAX] =
2665 CPUID_6_EAX_ARAT,
2666 .xlevel = 0x80000008,
2667 .model_id = "Intel Core Processor (Icelake)",
2670 .name = "Icelake-Server",
2671 .level = 0xd,
2672 .vendor = CPUID_VENDOR_INTEL,
2673 .family = 6,
2674 .model = 134,
2675 .stepping = 0,
2676 .features[FEAT_1_EDX] =
2677 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2678 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2679 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2680 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2681 CPUID_DE | CPUID_FP87,
2682 .features[FEAT_1_ECX] =
2683 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2684 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2685 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2686 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2687 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2688 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2689 .features[FEAT_8000_0001_EDX] =
2690 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2691 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2692 .features[FEAT_8000_0001_ECX] =
2693 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2694 .features[FEAT_8000_0008_EBX] =
2695 CPUID_8000_0008_EBX_WBNOINVD,
2696 .features[FEAT_7_0_EBX] =
2697 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2698 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2699 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2700 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2701 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2702 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2703 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2704 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2705 .features[FEAT_7_0_ECX] =
2706 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2707 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
2708 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2709 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2710 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2711 .features[FEAT_7_0_EDX] =
2712 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2713 /* Missing: XSAVES (not supported by some Linux versions,
2714 * including v4.1 to v4.12).
2715 * KVM doesn't yet expose any XSAVES state save component,
2716 * and the only one defined in Skylake (processor tracing)
2717 * probably will block migration anyway.
2719 .features[FEAT_XSAVE] =
2720 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2721 CPUID_XSAVE_XGETBV1,
2722 .features[FEAT_6_EAX] =
2723 CPUID_6_EAX_ARAT,
2724 .xlevel = 0x80000008,
2725 .model_id = "Intel Xeon Processor (Icelake)",
2728 .name = "Snowridge",
2729 .level = 27,
2730 .vendor = CPUID_VENDOR_INTEL,
2731 .family = 6,
2732 .model = 134,
2733 .stepping = 1,
2734 .features[FEAT_1_EDX] =
2735 /* missing: CPUID_PN CPUID_IA64 */
2736 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2737 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
2738 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
2739 CPUID_CX8 | CPUID_APIC | CPUID_SEP |
2740 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
2741 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
2742 CPUID_MMX |
2743 CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
2744 .features[FEAT_1_ECX] =
2745 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
2746 CPUID_EXT_SSSE3 |
2747 CPUID_EXT_CX16 |
2748 CPUID_EXT_SSE41 |
2749 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
2750 CPUID_EXT_POPCNT |
2751 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
2752 CPUID_EXT_RDRAND,
2753 .features[FEAT_8000_0001_EDX] =
2754 CPUID_EXT2_SYSCALL |
2755 CPUID_EXT2_NX |
2756 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2757 CPUID_EXT2_LM,
2758 .features[FEAT_8000_0001_ECX] =
2759 CPUID_EXT3_LAHF_LM |
2760 CPUID_EXT3_3DNOWPREFETCH,
2761 .features[FEAT_7_0_EBX] =
2762 CPUID_7_0_EBX_FSGSBASE |
2763 CPUID_7_0_EBX_SMEP |
2764 CPUID_7_0_EBX_ERMS |
2765 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
2766 CPUID_7_0_EBX_RDSEED |
2767 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2768 CPUID_7_0_EBX_CLWB |
2769 CPUID_7_0_EBX_SHA_NI,
2770 .features[FEAT_7_0_ECX] =
2771 CPUID_7_0_ECX_UMIP |
2772 /* missing bit 5 */
2773 CPUID_7_0_ECX_GFNI |
2774 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
2775 CPUID_7_0_ECX_MOVDIR64B,
2776 .features[FEAT_7_0_EDX] =
2777 CPUID_7_0_EDX_SPEC_CTRL |
2778 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
2779 CPUID_7_0_EDX_CORE_CAPABILITY,
2780 .features[FEAT_CORE_CAPABILITY] =
2781 MSR_CORE_CAP_SPLIT_LOCK_DETECT,
2783 * Missing: XSAVES (not supported by some Linux versions,
2784 * including v4.1 to v4.12).
2785 * KVM doesn't yet expose any XSAVES state save component,
2786 * and the only one defined in Skylake (processor tracing)
2787 * probably will block migration anyway.
2789 .features[FEAT_XSAVE] =
2790 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2791 CPUID_XSAVE_XGETBV1,
2792 .features[FEAT_6_EAX] =
2793 CPUID_6_EAX_ARAT,
2794 .xlevel = 0x80000008,
2795 .model_id = "Intel Atom Processor (SnowRidge)",
2796 .versions = (X86CPUVersionDefinition[]) {
2797 { .version = 1 },
2799 .version = 2,
2800 .props = (PropValue[]) {
2801 { "mpx", "off" },
2802 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" },
2803 { /* end of list */ },
2806 { /* end of list */ },
2810 .name = "KnightsMill",
2811 .level = 0xd,
2812 .vendor = CPUID_VENDOR_INTEL,
2813 .family = 6,
2814 .model = 133,
2815 .stepping = 0,
2816 .features[FEAT_1_EDX] =
2817 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2818 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2819 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2820 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2821 CPUID_PSE | CPUID_DE | CPUID_FP87,
2822 .features[FEAT_1_ECX] =
2823 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2824 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2825 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2826 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2827 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2828 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2829 .features[FEAT_8000_0001_EDX] =
2830 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2831 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2832 .features[FEAT_8000_0001_ECX] =
2833 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2834 .features[FEAT_7_0_EBX] =
2835 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2836 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2837 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2838 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2839 CPUID_7_0_EBX_AVX512ER,
2840 .features[FEAT_7_0_ECX] =
2841 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2842 .features[FEAT_7_0_EDX] =
2843 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2844 .features[FEAT_XSAVE] =
2845 CPUID_XSAVE_XSAVEOPT,
2846 .features[FEAT_6_EAX] =
2847 CPUID_6_EAX_ARAT,
2848 .xlevel = 0x80000008,
2849 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2852 .name = "Opteron_G1",
2853 .level = 5,
2854 .vendor = CPUID_VENDOR_AMD,
2855 .family = 15,
2856 .model = 6,
2857 .stepping = 1,
2858 .features[FEAT_1_EDX] =
2859 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2860 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2861 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2862 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2863 CPUID_DE | CPUID_FP87,
2864 .features[FEAT_1_ECX] =
2865 CPUID_EXT_SSE3,
2866 .features[FEAT_8000_0001_EDX] =
2867 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2868 .xlevel = 0x80000008,
2869 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2872 .name = "Opteron_G2",
2873 .level = 5,
2874 .vendor = CPUID_VENDOR_AMD,
2875 .family = 15,
2876 .model = 6,
2877 .stepping = 1,
2878 .features[FEAT_1_EDX] =
2879 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2880 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2881 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2882 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2883 CPUID_DE | CPUID_FP87,
2884 .features[FEAT_1_ECX] =
2885 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2886 .features[FEAT_8000_0001_EDX] =
2887 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2888 .features[FEAT_8000_0001_ECX] =
2889 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2890 .xlevel = 0x80000008,
2891 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2894 .name = "Opteron_G3",
2895 .level = 5,
2896 .vendor = CPUID_VENDOR_AMD,
2897 .family = 16,
2898 .model = 2,
2899 .stepping = 3,
2900 .features[FEAT_1_EDX] =
2901 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2902 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2903 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2904 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2905 CPUID_DE | CPUID_FP87,
2906 .features[FEAT_1_ECX] =
2907 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2908 CPUID_EXT_SSE3,
2909 .features[FEAT_8000_0001_EDX] =
2910 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
2911 CPUID_EXT2_RDTSCP,
2912 .features[FEAT_8000_0001_ECX] =
2913 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2914 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2915 .xlevel = 0x80000008,
2916 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2919 .name = "Opteron_G4",
2920 .level = 0xd,
2921 .vendor = CPUID_VENDOR_AMD,
2922 .family = 21,
2923 .model = 1,
2924 .stepping = 2,
2925 .features[FEAT_1_EDX] =
2926 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2927 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2928 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2929 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2930 CPUID_DE | CPUID_FP87,
2931 .features[FEAT_1_ECX] =
2932 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2933 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2934 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2935 CPUID_EXT_SSE3,
2936 .features[FEAT_8000_0001_EDX] =
2937 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2938 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2939 .features[FEAT_8000_0001_ECX] =
2940 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2941 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2942 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2943 CPUID_EXT3_LAHF_LM,
2944 .features[FEAT_SVM] =
2945 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2946 /* no xsaveopt! */
2947 .xlevel = 0x8000001A,
2948 .model_id = "AMD Opteron 62xx class CPU",
2951 .name = "Opteron_G5",
2952 .level = 0xd,
2953 .vendor = CPUID_VENDOR_AMD,
2954 .family = 21,
2955 .model = 2,
2956 .stepping = 0,
2957 .features[FEAT_1_EDX] =
2958 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2959 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2960 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2961 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2962 CPUID_DE | CPUID_FP87,
2963 .features[FEAT_1_ECX] =
2964 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2965 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2966 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2967 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2968 .features[FEAT_8000_0001_EDX] =
2969 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2970 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2971 .features[FEAT_8000_0001_ECX] =
2972 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2973 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2974 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2975 CPUID_EXT3_LAHF_LM,
2976 .features[FEAT_SVM] =
2977 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2978 /* no xsaveopt! */
2979 .xlevel = 0x8000001A,
2980 .model_id = "AMD Opteron 63xx class CPU",
2983 .name = "EPYC",
2984 .level = 0xd,
2985 .vendor = CPUID_VENDOR_AMD,
2986 .family = 23,
2987 .model = 1,
2988 .stepping = 2,
2989 .features[FEAT_1_EDX] =
2990 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2991 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2992 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2993 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2994 CPUID_VME | CPUID_FP87,
2995 .features[FEAT_1_ECX] =
2996 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2997 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2998 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2999 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3000 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3001 .features[FEAT_8000_0001_EDX] =
3002 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3003 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3004 CPUID_EXT2_SYSCALL,
3005 .features[FEAT_8000_0001_ECX] =
3006 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3007 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3008 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3009 CPUID_EXT3_TOPOEXT,
3010 .features[FEAT_7_0_EBX] =
3011 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3012 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3013 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3014 CPUID_7_0_EBX_SHA_NI,
3015 /* Missing: XSAVES (not supported by some Linux versions,
3016 * including v4.1 to v4.12).
3017 * KVM doesn't yet expose any XSAVES state save component.
3019 .features[FEAT_XSAVE] =
3020 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3021 CPUID_XSAVE_XGETBV1,
3022 .features[FEAT_6_EAX] =
3023 CPUID_6_EAX_ARAT,
3024 .features[FEAT_SVM] =
3025 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3026 .xlevel = 0x8000001E,
3027 .model_id = "AMD EPYC Processor",
3028 .cache_info = &epyc_cache_info,
3029 .versions = (X86CPUVersionDefinition[]) {
3030 { .version = 1 },
3032 .version = 2,
3033 .alias = "EPYC-IBPB",
3034 .props = (PropValue[]) {
3035 { "ibpb", "on" },
3036 { "model-id",
3037 "AMD EPYC Processor (with IBPB)" },
3038 { /* end of list */ }
3041 { /* end of list */ }
3045 .name = "Dhyana",
3046 .level = 0xd,
3047 .vendor = CPUID_VENDOR_HYGON,
3048 .family = 24,
3049 .model = 0,
3050 .stepping = 1,
3051 .features[FEAT_1_EDX] =
3052 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3053 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3054 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3055 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3056 CPUID_VME | CPUID_FP87,
3057 .features[FEAT_1_ECX] =
3058 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3059 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
3060 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3061 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3062 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
3063 .features[FEAT_8000_0001_EDX] =
3064 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3065 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3066 CPUID_EXT2_SYSCALL,
3067 .features[FEAT_8000_0001_ECX] =
3068 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3069 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3070 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3071 CPUID_EXT3_TOPOEXT,
3072 .features[FEAT_8000_0008_EBX] =
3073 CPUID_8000_0008_EBX_IBPB,
3074 .features[FEAT_7_0_EBX] =
3075 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3076 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3077 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
3079 * Missing: XSAVES (not supported by some Linux versions,
3080 * including v4.1 to v4.12).
3081 * KVM doesn't yet expose any XSAVES state save component.
3083 .features[FEAT_XSAVE] =
3084 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3085 CPUID_XSAVE_XGETBV1,
3086 .features[FEAT_6_EAX] =
3087 CPUID_6_EAX_ARAT,
3088 .features[FEAT_SVM] =
3089 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3090 .xlevel = 0x8000001E,
3091 .model_id = "Hygon Dhyana Processor",
3092 .cache_info = &epyc_cache_info,
3096 /* KVM-specific features that are automatically added/removed
3097 * from all CPU models when KVM is enabled.
3099 static PropValue kvm_default_props[] = {
3100 { "kvmclock", "on" },
3101 { "kvm-nopiodelay", "on" },
3102 { "kvm-asyncpf", "on" },
3103 { "kvm-steal-time", "on" },
3104 { "kvm-pv-eoi", "on" },
3105 { "kvmclock-stable-bit", "on" },
3106 { "x2apic", "on" },
3107 { "acpi", "off" },
3108 { "monitor", "off" },
3109 { "svm", "off" },
3110 { NULL, NULL },
3113 /* TCG-specific defaults that override all CPU models when using TCG
3115 static PropValue tcg_default_props[] = {
3116 { "vme", "off" },
3117 { NULL, NULL },
3121 X86CPUVersion default_cpu_version = CPU_VERSION_LATEST;
3123 void x86_cpu_set_default_version(X86CPUVersion version)
3125 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */
3126 assert(version != CPU_VERSION_AUTO);
3127 default_cpu_version = version;
3130 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model)
3132 int v = 0;
3133 const X86CPUVersionDefinition *vdef =
3134 x86_cpu_def_get_versions(model->cpudef);
3135 while (vdef->version) {
3136 v = vdef->version;
3137 vdef++;
3139 return v;
3142 /* Return the actual version being used for a specific CPU model */
3143 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
3145 X86CPUVersion v = model->version;
3146 if (v == CPU_VERSION_AUTO) {
3147 v = default_cpu_version;
3149 if (v == CPU_VERSION_LATEST) {
3150 return x86_cpu_model_last_version(model);
3152 return v;
3155 void x86_cpu_change_kvm_default(const char *prop, const char *value)
3157 PropValue *pv;
3158 for (pv = kvm_default_props; pv->prop; pv++) {
3159 if (!strcmp(pv->prop, prop)) {
3160 pv->value = value;
3161 break;
3165 /* It is valid to call this function only for properties that
3166 * are already present in the kvm_default_props table.
3168 assert(pv->prop);
3171 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
3172 bool migratable_only);
3174 static bool lmce_supported(void)
3176 uint64_t mce_cap = 0;
3178 #ifdef CONFIG_KVM
3179 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
3180 return false;
3182 #endif
3184 return !!(mce_cap & MCG_LMCE_P);
3187 #define CPUID_MODEL_ID_SZ 48
3190 * cpu_x86_fill_model_id:
3191 * Get CPUID model ID string from host CPU.
3193 * @str should have at least CPUID_MODEL_ID_SZ bytes
3195 * The function does NOT add a null terminator to the string
3196 * automatically.
3198 static int cpu_x86_fill_model_id(char *str)
3200 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3201 int i;
3203 for (i = 0; i < 3; i++) {
3204 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
3205 memcpy(str + i * 16 + 0, &eax, 4);
3206 memcpy(str + i * 16 + 4, &ebx, 4);
3207 memcpy(str + i * 16 + 8, &ecx, 4);
3208 memcpy(str + i * 16 + 12, &edx, 4);
3210 return 0;
3213 static Property max_x86_cpu_properties[] = {
3214 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
3215 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
3216 DEFINE_PROP_END_OF_LIST()
3219 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
3221 DeviceClass *dc = DEVICE_CLASS(oc);
3222 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3224 xcc->ordering = 9;
3226 xcc->model_description =
3227 "Enables all features supported by the accelerator in the current host";
3229 dc->props = max_x86_cpu_properties;
3232 static void max_x86_cpu_initfn(Object *obj)
3234 X86CPU *cpu = X86_CPU(obj);
3235 CPUX86State *env = &cpu->env;
3236 KVMState *s = kvm_state;
3238 /* We can't fill the features array here because we don't know yet if
3239 * "migratable" is true or false.
3241 cpu->max_features = true;
3243 if (accel_uses_host_cpuid()) {
3244 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
3245 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
3246 int family, model, stepping;
3248 host_vendor_fms(vendor, &family, &model, &stepping);
3249 cpu_x86_fill_model_id(model_id);
3251 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
3252 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
3253 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
3254 object_property_set_int(OBJECT(cpu), stepping, "stepping",
3255 &error_abort);
3256 object_property_set_str(OBJECT(cpu), model_id, "model-id",
3257 &error_abort);
3259 if (kvm_enabled()) {
3260 env->cpuid_min_level =
3261 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
3262 env->cpuid_min_xlevel =
3263 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
3264 env->cpuid_min_xlevel2 =
3265 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
3266 } else {
3267 env->cpuid_min_level =
3268 hvf_get_supported_cpuid(0x0, 0, R_EAX);
3269 env->cpuid_min_xlevel =
3270 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
3271 env->cpuid_min_xlevel2 =
3272 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
3275 if (lmce_supported()) {
3276 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
3278 } else {
3279 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
3280 "vendor", &error_abort);
3281 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
3282 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
3283 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
3284 object_property_set_str(OBJECT(cpu),
3285 "QEMU TCG CPU version " QEMU_HW_VERSION,
3286 "model-id", &error_abort);
3289 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
3292 static const TypeInfo max_x86_cpu_type_info = {
3293 .name = X86_CPU_TYPE_NAME("max"),
3294 .parent = TYPE_X86_CPU,
3295 .instance_init = max_x86_cpu_initfn,
3296 .class_init = max_x86_cpu_class_init,
3299 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3300 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
3302 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3304 xcc->host_cpuid_required = true;
3305 xcc->ordering = 8;
3307 #if defined(CONFIG_KVM)
3308 xcc->model_description =
3309 "KVM processor with all supported host features ";
3310 #elif defined(CONFIG_HVF)
3311 xcc->model_description =
3312 "HVF processor with all supported host features ";
3313 #endif
3316 static const TypeInfo host_x86_cpu_type_info = {
3317 .name = X86_CPU_TYPE_NAME("host"),
3318 .parent = X86_CPU_TYPE_NAME("max"),
3319 .class_init = host_x86_cpu_class_init,
3322 #endif
3324 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
3326 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
3328 switch (f->type) {
3329 case CPUID_FEATURE_WORD:
3331 const char *reg = get_register_name_32(f->cpuid.reg);
3332 assert(reg);
3333 return g_strdup_printf("CPUID.%02XH:%s",
3334 f->cpuid.eax, reg);
3336 case MSR_FEATURE_WORD:
3337 return g_strdup_printf("MSR(%02XH)",
3338 f->msr.index);
3341 return NULL;
3344 static bool x86_cpu_have_filtered_features(X86CPU *cpu)
3346 FeatureWord w;
3348 for (w = 0; w < FEATURE_WORDS; w++) {
3349 if (cpu->filtered_features[w]) {
3350 return true;
3354 return false;
3357 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
3358 const char *verbose_prefix)
3360 CPUX86State *env = &cpu->env;
3361 FeatureWordInfo *f = &feature_word_info[w];
3362 int i;
3363 char *feat_word_str;
3365 if (!cpu->force_features) {
3366 env->features[w] &= ~mask;
3368 cpu->filtered_features[w] |= mask;
3370 if (!verbose_prefix) {
3371 return;
3374 for (i = 0; i < 64; ++i) {
3375 if ((1ULL << i) & mask) {
3376 feat_word_str = feature_word_description(f, i);
3377 warn_report("%s: %s%s%s [bit %d]",
3378 verbose_prefix,
3379 feat_word_str,
3380 f->feat_names[i] ? "." : "",
3381 f->feat_names[i] ? f->feat_names[i] : "", i);
3382 g_free(feat_word_str);
3387 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
3388 const char *name, void *opaque,
3389 Error **errp)
3391 X86CPU *cpu = X86_CPU(obj);
3392 CPUX86State *env = &cpu->env;
3393 int64_t value;
3395 value = (env->cpuid_version >> 8) & 0xf;
3396 if (value == 0xf) {
3397 value += (env->cpuid_version >> 20) & 0xff;
3399 visit_type_int(v, name, &value, errp);
3402 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3403 const char *name, void *opaque,
3404 Error **errp)
3406 X86CPU *cpu = X86_CPU(obj);
3407 CPUX86State *env = &cpu->env;
3408 const int64_t min = 0;
3409 const int64_t max = 0xff + 0xf;
3410 Error *local_err = NULL;
3411 int64_t value;
3413 visit_type_int(v, name, &value, &local_err);
3414 if (local_err) {
3415 error_propagate(errp, local_err);
3416 return;
3418 if (value < min || value > max) {
3419 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3420 name ? name : "null", value, min, max);
3421 return;
3424 env->cpuid_version &= ~0xff00f00;
3425 if (value > 0x0f) {
3426 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3427 } else {
3428 env->cpuid_version |= value << 8;
3432 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3433 const char *name, void *opaque,
3434 Error **errp)
3436 X86CPU *cpu = X86_CPU(obj);
3437 CPUX86State *env = &cpu->env;
3438 int64_t value;
3440 value = (env->cpuid_version >> 4) & 0xf;
3441 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3442 visit_type_int(v, name, &value, errp);
3445 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3446 const char *name, void *opaque,
3447 Error **errp)
3449 X86CPU *cpu = X86_CPU(obj);
3450 CPUX86State *env = &cpu->env;
3451 const int64_t min = 0;
3452 const int64_t max = 0xff;
3453 Error *local_err = NULL;
3454 int64_t value;
3456 visit_type_int(v, name, &value, &local_err);
3457 if (local_err) {
3458 error_propagate(errp, local_err);
3459 return;
3461 if (value < min || value > max) {
3462 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3463 name ? name : "null", value, min, max);
3464 return;
3467 env->cpuid_version &= ~0xf00f0;
3468 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3471 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3472 const char *name, void *opaque,
3473 Error **errp)
3475 X86CPU *cpu = X86_CPU(obj);
3476 CPUX86State *env = &cpu->env;
3477 int64_t value;
3479 value = env->cpuid_version & 0xf;
3480 visit_type_int(v, name, &value, errp);
3483 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3484 const char *name, void *opaque,
3485 Error **errp)
3487 X86CPU *cpu = X86_CPU(obj);
3488 CPUX86State *env = &cpu->env;
3489 const int64_t min = 0;
3490 const int64_t max = 0xf;
3491 Error *local_err = NULL;
3492 int64_t value;
3494 visit_type_int(v, name, &value, &local_err);
3495 if (local_err) {
3496 error_propagate(errp, local_err);
3497 return;
3499 if (value < min || value > max) {
3500 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3501 name ? name : "null", value, min, max);
3502 return;
3505 env->cpuid_version &= ~0xf;
3506 env->cpuid_version |= value & 0xf;
3509 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3511 X86CPU *cpu = X86_CPU(obj);
3512 CPUX86State *env = &cpu->env;
3513 char *value;
3515 value = g_malloc(CPUID_VENDOR_SZ + 1);
3516 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3517 env->cpuid_vendor3);
3518 return value;
3521 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3522 Error **errp)
3524 X86CPU *cpu = X86_CPU(obj);
3525 CPUX86State *env = &cpu->env;
3526 int i;
3528 if (strlen(value) != CPUID_VENDOR_SZ) {
3529 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3530 return;
3533 env->cpuid_vendor1 = 0;
3534 env->cpuid_vendor2 = 0;
3535 env->cpuid_vendor3 = 0;
3536 for (i = 0; i < 4; i++) {
3537 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3538 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3539 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3543 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3545 X86CPU *cpu = X86_CPU(obj);
3546 CPUX86State *env = &cpu->env;
3547 char *value;
3548 int i;
3550 value = g_malloc(48 + 1);
3551 for (i = 0; i < 48; i++) {
3552 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3554 value[48] = '\0';
3555 return value;
3558 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3559 Error **errp)
3561 X86CPU *cpu = X86_CPU(obj);
3562 CPUX86State *env = &cpu->env;
3563 int c, len, i;
3565 if (model_id == NULL) {
3566 model_id = "";
3568 len = strlen(model_id);
3569 memset(env->cpuid_model, 0, 48);
3570 for (i = 0; i < 48; i++) {
3571 if (i >= len) {
3572 c = '\0';
3573 } else {
3574 c = (uint8_t)model_id[i];
3576 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3580 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3581 void *opaque, Error **errp)
3583 X86CPU *cpu = X86_CPU(obj);
3584 int64_t value;
3586 value = cpu->env.tsc_khz * 1000;
3587 visit_type_int(v, name, &value, errp);
3590 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3591 void *opaque, Error **errp)
3593 X86CPU *cpu = X86_CPU(obj);
3594 const int64_t min = 0;
3595 const int64_t max = INT64_MAX;
3596 Error *local_err = NULL;
3597 int64_t value;
3599 visit_type_int(v, name, &value, &local_err);
3600 if (local_err) {
3601 error_propagate(errp, local_err);
3602 return;
3604 if (value < min || value > max) {
3605 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3606 name ? name : "null", value, min, max);
3607 return;
3610 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3613 /* Generic getter for "feature-words" and "filtered-features" properties */
3614 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3615 const char *name, void *opaque,
3616 Error **errp)
3618 uint64_t *array = (uint64_t *)opaque;
3619 FeatureWord w;
3620 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3621 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3622 X86CPUFeatureWordInfoList *list = NULL;
3624 for (w = 0; w < FEATURE_WORDS; w++) {
3625 FeatureWordInfo *wi = &feature_word_info[w];
3627 * We didn't have MSR features when "feature-words" was
3628 * introduced. Therefore skipped other type entries.
3630 if (wi->type != CPUID_FEATURE_WORD) {
3631 continue;
3633 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3634 qwi->cpuid_input_eax = wi->cpuid.eax;
3635 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
3636 qwi->cpuid_input_ecx = wi->cpuid.ecx;
3637 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
3638 qwi->features = array[w];
3640 /* List will be in reverse order, but order shouldn't matter */
3641 list_entries[w].next = list;
3642 list_entries[w].value = &word_infos[w];
3643 list = &list_entries[w];
3646 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3649 /* Convert all '_' in a feature string option name to '-', to make feature
3650 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3652 static inline void feat2prop(char *s)
3654 while ((s = strchr(s, '_'))) {
3655 *s = '-';
3659 /* Return the feature property name for a feature flag bit */
3660 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3662 const char *name;
3663 /* XSAVE components are automatically enabled by other features,
3664 * so return the original feature name instead
3666 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3667 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3669 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3670 x86_ext_save_areas[comp].bits) {
3671 w = x86_ext_save_areas[comp].feature;
3672 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3676 assert(bitnr < 64);
3677 assert(w < FEATURE_WORDS);
3678 name = feature_word_info[w].feat_names[bitnr];
3679 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD));
3680 return name;
3683 /* Compatibily hack to maintain legacy +-feat semantic,
3684 * where +-feat overwrites any feature set by
3685 * feat=on|feat even if the later is parsed after +-feat
3686 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3688 static GList *plus_features, *minus_features;
3690 static gint compare_string(gconstpointer a, gconstpointer b)
3692 return g_strcmp0(a, b);
3695 /* Parse "+feature,-feature,feature=foo" CPU feature string
3697 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3698 Error **errp)
3700 char *featurestr; /* Single 'key=value" string being parsed */
3701 static bool cpu_globals_initialized;
3702 bool ambiguous = false;
3704 if (cpu_globals_initialized) {
3705 return;
3707 cpu_globals_initialized = true;
3709 if (!features) {
3710 return;
3713 for (featurestr = strtok(features, ",");
3714 featurestr;
3715 featurestr = strtok(NULL, ",")) {
3716 const char *name;
3717 const char *val = NULL;
3718 char *eq = NULL;
3719 char num[32];
3720 GlobalProperty *prop;
3722 /* Compatibility syntax: */
3723 if (featurestr[0] == '+') {
3724 plus_features = g_list_append(plus_features,
3725 g_strdup(featurestr + 1));
3726 continue;
3727 } else if (featurestr[0] == '-') {
3728 minus_features = g_list_append(minus_features,
3729 g_strdup(featurestr + 1));
3730 continue;
3733 eq = strchr(featurestr, '=');
3734 if (eq) {
3735 *eq++ = 0;
3736 val = eq;
3737 } else {
3738 val = "on";
3741 feat2prop(featurestr);
3742 name = featurestr;
3744 if (g_list_find_custom(plus_features, name, compare_string)) {
3745 warn_report("Ambiguous CPU model string. "
3746 "Don't mix both \"+%s\" and \"%s=%s\"",
3747 name, name, val);
3748 ambiguous = true;
3750 if (g_list_find_custom(minus_features, name, compare_string)) {
3751 warn_report("Ambiguous CPU model string. "
3752 "Don't mix both \"-%s\" and \"%s=%s\"",
3753 name, name, val);
3754 ambiguous = true;
3757 /* Special case: */
3758 if (!strcmp(name, "tsc-freq")) {
3759 int ret;
3760 uint64_t tsc_freq;
3762 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3763 if (ret < 0 || tsc_freq > INT64_MAX) {
3764 error_setg(errp, "bad numerical value %s", val);
3765 return;
3767 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3768 val = num;
3769 name = "tsc-frequency";
3772 prop = g_new0(typeof(*prop), 1);
3773 prop->driver = typename;
3774 prop->property = g_strdup(name);
3775 prop->value = g_strdup(val);
3776 qdev_prop_register_global(prop);
3779 if (ambiguous) {
3780 warn_report("Compatibility of ambiguous CPU model "
3781 "strings won't be kept on future QEMU versions");
3785 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3786 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
3788 /* Build a list with the name of all features on a feature word array */
3789 static void x86_cpu_list_feature_names(FeatureWordArray features,
3790 strList **feat_names)
3792 FeatureWord w;
3793 strList **next = feat_names;
3795 for (w = 0; w < FEATURE_WORDS; w++) {
3796 uint64_t filtered = features[w];
3797 int i;
3798 for (i = 0; i < 64; i++) {
3799 if (filtered & (1ULL << i)) {
3800 strList *new = g_new0(strList, 1);
3801 new->value = g_strdup(x86_cpu_feature_name(w, i));
3802 *next = new;
3803 next = &new->next;
3809 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
3810 const char *name, void *opaque,
3811 Error **errp)
3813 X86CPU *xc = X86_CPU(obj);
3814 strList *result = NULL;
3816 x86_cpu_list_feature_names(xc->filtered_features, &result);
3817 visit_type_strList(v, "unavailable-features", &result, errp);
3820 /* Check for missing features that may prevent the CPU class from
3821 * running using the current machine and accelerator.
3823 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3824 strList **missing_feats)
3826 X86CPU *xc;
3827 Error *err = NULL;
3828 strList **next = missing_feats;
3830 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3831 strList *new = g_new0(strList, 1);
3832 new->value = g_strdup("kvm");
3833 *missing_feats = new;
3834 return;
3837 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3839 x86_cpu_expand_features(xc, &err);
3840 if (err) {
3841 /* Errors at x86_cpu_expand_features should never happen,
3842 * but in case it does, just report the model as not
3843 * runnable at all using the "type" property.
3845 strList *new = g_new0(strList, 1);
3846 new->value = g_strdup("type");
3847 *next = new;
3848 next = &new->next;
3851 x86_cpu_filter_features(xc, false);
3853 x86_cpu_list_feature_names(xc->filtered_features, next);
3855 object_unref(OBJECT(xc));
3858 /* Print all cpuid feature names in featureset
3860 static void listflags(GList *features)
3862 size_t len = 0;
3863 GList *tmp;
3865 for (tmp = features; tmp; tmp = tmp->next) {
3866 const char *name = tmp->data;
3867 if ((len + strlen(name) + 1) >= 75) {
3868 qemu_printf("\n");
3869 len = 0;
3871 qemu_printf("%s%s", len == 0 ? " " : " ", name);
3872 len += strlen(name) + 1;
3874 qemu_printf("\n");
3877 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3878 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3880 ObjectClass *class_a = (ObjectClass *)a;
3881 ObjectClass *class_b = (ObjectClass *)b;
3882 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3883 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3884 char *name_a, *name_b;
3885 int ret;
3887 if (cc_a->ordering != cc_b->ordering) {
3888 ret = cc_a->ordering - cc_b->ordering;
3889 } else {
3890 name_a = x86_cpu_class_get_model_name(cc_a);
3891 name_b = x86_cpu_class_get_model_name(cc_b);
3892 ret = strcmp(name_a, name_b);
3893 g_free(name_a);
3894 g_free(name_b);
3896 return ret;
3899 static GSList *get_sorted_cpu_model_list(void)
3901 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3902 list = g_slist_sort(list, x86_cpu_list_compare);
3903 return list;
3906 static char *x86_cpu_class_get_model_id(X86CPUClass *xc)
3908 Object *obj = object_new(object_class_get_name(OBJECT_CLASS(xc)));
3909 char *r = object_property_get_str(obj, "model-id", &error_abort);
3910 object_unref(obj);
3911 return r;
3914 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc)
3916 X86CPUVersion version;
3918 if (!cc->model || !cc->model->is_alias) {
3919 return NULL;
3921 version = x86_cpu_model_resolve_version(cc->model);
3922 if (version <= 0) {
3923 return NULL;
3925 return x86_cpu_versioned_model_name(cc->model->cpudef, version);
3928 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3930 ObjectClass *oc = data;
3931 X86CPUClass *cc = X86_CPU_CLASS(oc);
3932 char *name = x86_cpu_class_get_model_name(cc);
3933 char *desc = g_strdup(cc->model_description);
3934 char *alias_of = x86_cpu_class_get_alias_of(cc);
3936 if (!desc && alias_of) {
3937 if (cc->model && cc->model->version == CPU_VERSION_AUTO) {
3938 desc = g_strdup("(alias configured by machine type)");
3939 } else {
3940 desc = g_strdup_printf("(alias of %s)", alias_of);
3943 if (!desc) {
3944 desc = x86_cpu_class_get_model_id(cc);
3947 qemu_printf("x86 %-20s %-48s\n", name, desc);
3948 g_free(name);
3949 g_free(desc);
3950 g_free(alias_of);
3953 /* list available CPU models and flags */
3954 void x86_cpu_list(void)
3956 int i, j;
3957 GSList *list;
3958 GList *names = NULL;
3960 qemu_printf("Available CPUs:\n");
3961 list = get_sorted_cpu_model_list();
3962 g_slist_foreach(list, x86_cpu_list_entry, NULL);
3963 g_slist_free(list);
3965 names = NULL;
3966 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3967 FeatureWordInfo *fw = &feature_word_info[i];
3968 for (j = 0; j < 64; j++) {
3969 if (fw->feat_names[j]) {
3970 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3975 names = g_list_sort(names, (GCompareFunc)strcmp);
3977 qemu_printf("\nRecognized CPUID flags:\n");
3978 listflags(names);
3979 qemu_printf("\n");
3980 g_list_free(names);
3983 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3985 ObjectClass *oc = data;
3986 X86CPUClass *cc = X86_CPU_CLASS(oc);
3987 CpuDefinitionInfoList **cpu_list = user_data;
3988 CpuDefinitionInfoList *entry;
3989 CpuDefinitionInfo *info;
3991 info = g_malloc0(sizeof(*info));
3992 info->name = x86_cpu_class_get_model_name(cc);
3993 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3994 info->has_unavailable_features = true;
3995 info->q_typename = g_strdup(object_class_get_name(oc));
3996 info->migration_safe = cc->migration_safe;
3997 info->has_migration_safe = true;
3998 info->q_static = cc->static_model;
4000 * Old machine types won't report aliases, so that alias translation
4001 * doesn't break compatibility with previous QEMU versions.
4003 if (default_cpu_version != CPU_VERSION_LEGACY) {
4004 info->alias_of = x86_cpu_class_get_alias_of(cc);
4005 info->has_alias_of = !!info->alias_of;
4008 entry = g_malloc0(sizeof(*entry));
4009 entry->value = info;
4010 entry->next = *cpu_list;
4011 *cpu_list = entry;
4014 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
4016 CpuDefinitionInfoList *cpu_list = NULL;
4017 GSList *list = get_sorted_cpu_model_list();
4018 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
4019 g_slist_free(list);
4020 return cpu_list;
4023 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4024 bool migratable_only)
4026 FeatureWordInfo *wi = &feature_word_info[w];
4027 uint64_t r = 0;
4029 if (kvm_enabled()) {
4030 switch (wi->type) {
4031 case CPUID_FEATURE_WORD:
4032 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
4033 wi->cpuid.ecx,
4034 wi->cpuid.reg);
4035 break;
4036 case MSR_FEATURE_WORD:
4037 r = kvm_arch_get_supported_msr_feature(kvm_state,
4038 wi->msr.index);
4039 break;
4041 } else if (hvf_enabled()) {
4042 if (wi->type != CPUID_FEATURE_WORD) {
4043 return 0;
4045 r = hvf_get_supported_cpuid(wi->cpuid.eax,
4046 wi->cpuid.ecx,
4047 wi->cpuid.reg);
4048 } else if (tcg_enabled()) {
4049 r = wi->tcg_features;
4050 } else {
4051 return ~0;
4053 if (migratable_only) {
4054 r &= x86_cpu_get_migratable_flags(w);
4056 return r;
4059 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
4061 PropValue *pv;
4062 for (pv = props; pv->prop; pv++) {
4063 if (!pv->value) {
4064 continue;
4066 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
4067 &error_abort);
4071 /* Apply properties for the CPU model version specified in model */
4072 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
4074 const X86CPUVersionDefinition *vdef;
4075 X86CPUVersion version = x86_cpu_model_resolve_version(model);
4077 if (version == CPU_VERSION_LEGACY) {
4078 return;
4081 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) {
4082 PropValue *p;
4084 for (p = vdef->props; p && p->prop; p++) {
4085 object_property_parse(OBJECT(cpu), p->value, p->prop,
4086 &error_abort);
4089 if (vdef->version == version) {
4090 break;
4095 * If we reached the end of the list, version number was invalid
4097 assert(vdef->version == version);
4100 /* Load data from X86CPUDefinition into a X86CPU object
4102 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp)
4104 X86CPUDefinition *def = model->cpudef;
4105 CPUX86State *env = &cpu->env;
4106 const char *vendor;
4107 char host_vendor[CPUID_VENDOR_SZ + 1];
4108 FeatureWord w;
4110 /*NOTE: any property set by this function should be returned by
4111 * x86_cpu_static_props(), so static expansion of
4112 * query-cpu-model-expansion is always complete.
4115 /* CPU models only set _minimum_ values for level/xlevel: */
4116 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
4117 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
4119 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
4120 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
4121 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
4122 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
4123 for (w = 0; w < FEATURE_WORDS; w++) {
4124 env->features[w] = def->features[w];
4127 /* legacy-cache defaults to 'off' if CPU model provides cache info */
4128 cpu->legacy_cache = !def->cache_info;
4130 /* Special cases not set in the X86CPUDefinition structs: */
4131 /* TODO: in-kernel irqchip for hvf */
4132 if (kvm_enabled()) {
4133 if (!kvm_irqchip_in_kernel()) {
4134 x86_cpu_change_kvm_default("x2apic", "off");
4137 x86_cpu_apply_props(cpu, kvm_default_props);
4138 } else if (tcg_enabled()) {
4139 x86_cpu_apply_props(cpu, tcg_default_props);
4142 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
4144 /* sysenter isn't supported in compatibility mode on AMD,
4145 * syscall isn't supported in compatibility mode on Intel.
4146 * Normally we advertise the actual CPU vendor, but you can
4147 * override this using the 'vendor' property if you want to use
4148 * KVM's sysenter/syscall emulation in compatibility mode and
4149 * when doing cross vendor migration
4151 vendor = def->vendor;
4152 if (accel_uses_host_cpuid()) {
4153 uint32_t ebx = 0, ecx = 0, edx = 0;
4154 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
4155 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
4156 vendor = host_vendor;
4159 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
4161 x86_cpu_apply_version_props(cpu, model);
4164 #ifndef CONFIG_USER_ONLY
4165 /* Return a QDict containing keys for all properties that can be included
4166 * in static expansion of CPU models. All properties set by x86_cpu_load_model()
4167 * must be included in the dictionary.
4169 static QDict *x86_cpu_static_props(void)
4171 FeatureWord w;
4172 int i;
4173 static const char *props[] = {
4174 "min-level",
4175 "min-xlevel",
4176 "family",
4177 "model",
4178 "stepping",
4179 "model-id",
4180 "vendor",
4181 "lmce",
4182 NULL,
4184 static QDict *d;
4186 if (d) {
4187 return d;
4190 d = qdict_new();
4191 for (i = 0; props[i]; i++) {
4192 qdict_put_null(d, props[i]);
4195 for (w = 0; w < FEATURE_WORDS; w++) {
4196 FeatureWordInfo *fi = &feature_word_info[w];
4197 int bit;
4198 for (bit = 0; bit < 64; bit++) {
4199 if (!fi->feat_names[bit]) {
4200 continue;
4202 qdict_put_null(d, fi->feat_names[bit]);
4206 return d;
4209 /* Add an entry to @props dict, with the value for property. */
4210 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
4212 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
4213 &error_abort);
4215 qdict_put_obj(props, prop, value);
4218 /* Convert CPU model data from X86CPU object to a property dictionary
4219 * that can recreate exactly the same CPU model.
4221 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
4223 QDict *sprops = x86_cpu_static_props();
4224 const QDictEntry *e;
4226 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
4227 const char *prop = qdict_entry_key(e);
4228 x86_cpu_expand_prop(cpu, props, prop);
4232 /* Convert CPU model data from X86CPU object to a property dictionary
4233 * that can recreate exactly the same CPU model, including every
4234 * writeable QOM property.
4236 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
4238 ObjectPropertyIterator iter;
4239 ObjectProperty *prop;
4241 object_property_iter_init(&iter, OBJECT(cpu));
4242 while ((prop = object_property_iter_next(&iter))) {
4243 /* skip read-only or write-only properties */
4244 if (!prop->get || !prop->set) {
4245 continue;
4248 /* "hotplugged" is the only property that is configurable
4249 * on the command-line but will be set differently on CPUs
4250 * created using "-cpu ... -smp ..." and by CPUs created
4251 * on the fly by x86_cpu_from_model() for querying. Skip it.
4253 if (!strcmp(prop->name, "hotplugged")) {
4254 continue;
4256 x86_cpu_expand_prop(cpu, props, prop->name);
4260 static void object_apply_props(Object *obj, QDict *props, Error **errp)
4262 const QDictEntry *prop;
4263 Error *err = NULL;
4265 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
4266 object_property_set_qobject(obj, qdict_entry_value(prop),
4267 qdict_entry_key(prop), &err);
4268 if (err) {
4269 break;
4273 error_propagate(errp, err);
4276 /* Create X86CPU object according to model+props specification */
4277 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
4279 X86CPU *xc = NULL;
4280 X86CPUClass *xcc;
4281 Error *err = NULL;
4283 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
4284 if (xcc == NULL) {
4285 error_setg(&err, "CPU model '%s' not found", model);
4286 goto out;
4289 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
4290 if (props) {
4291 object_apply_props(OBJECT(xc), props, &err);
4292 if (err) {
4293 goto out;
4297 x86_cpu_expand_features(xc, &err);
4298 if (err) {
4299 goto out;
4302 out:
4303 if (err) {
4304 error_propagate(errp, err);
4305 object_unref(OBJECT(xc));
4306 xc = NULL;
4308 return xc;
4311 CpuModelExpansionInfo *
4312 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
4313 CpuModelInfo *model,
4314 Error **errp)
4316 X86CPU *xc = NULL;
4317 Error *err = NULL;
4318 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
4319 QDict *props = NULL;
4320 const char *base_name;
4322 xc = x86_cpu_from_model(model->name,
4323 model->has_props ?
4324 qobject_to(QDict, model->props) :
4325 NULL, &err);
4326 if (err) {
4327 goto out;
4330 props = qdict_new();
4331 ret->model = g_new0(CpuModelInfo, 1);
4332 ret->model->props = QOBJECT(props);
4333 ret->model->has_props = true;
4335 switch (type) {
4336 case CPU_MODEL_EXPANSION_TYPE_STATIC:
4337 /* Static expansion will be based on "base" only */
4338 base_name = "base";
4339 x86_cpu_to_dict(xc, props);
4340 break;
4341 case CPU_MODEL_EXPANSION_TYPE_FULL:
4342 /* As we don't return every single property, full expansion needs
4343 * to keep the original model name+props, and add extra
4344 * properties on top of that.
4346 base_name = model->name;
4347 x86_cpu_to_dict_full(xc, props);
4348 break;
4349 default:
4350 error_setg(&err, "Unsupported expansion type");
4351 goto out;
4354 x86_cpu_to_dict(xc, props);
4356 ret->model->name = g_strdup(base_name);
4358 out:
4359 object_unref(OBJECT(xc));
4360 if (err) {
4361 error_propagate(errp, err);
4362 qapi_free_CpuModelExpansionInfo(ret);
4363 ret = NULL;
4365 return ret;
4367 #endif /* !CONFIG_USER_ONLY */
4369 static gchar *x86_gdb_arch_name(CPUState *cs)
4371 #ifdef TARGET_X86_64
4372 return g_strdup("i386:x86-64");
4373 #else
4374 return g_strdup("i386");
4375 #endif
4378 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
4380 X86CPUModel *model = data;
4381 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4383 xcc->model = model;
4384 xcc->migration_safe = true;
4387 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model)
4389 char *typename = x86_cpu_type_name(name);
4390 TypeInfo ti = {
4391 .name = typename,
4392 .parent = TYPE_X86_CPU,
4393 .class_init = x86_cpu_cpudef_class_init,
4394 .class_data = model,
4397 type_register(&ti);
4398 g_free(typename);
4401 static void x86_register_cpudef_types(X86CPUDefinition *def)
4403 X86CPUModel *m;
4404 const X86CPUVersionDefinition *vdef;
4405 char *name;
4407 /* AMD aliases are handled at runtime based on CPUID vendor, so
4408 * they shouldn't be set on the CPU model table.
4410 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
4411 /* catch mistakes instead of silently truncating model_id when too long */
4412 assert(def->model_id && strlen(def->model_id) <= 48);
4414 /* Unversioned model: */
4415 m = g_new0(X86CPUModel, 1);
4416 m->cpudef = def;
4417 m->version = CPU_VERSION_AUTO;
4418 m->is_alias = true;
4419 x86_register_cpu_model_type(def->name, m);
4421 /* Versioned models: */
4423 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
4424 X86CPUModel *m = g_new0(X86CPUModel, 1);
4425 m->cpudef = def;
4426 m->version = vdef->version;
4427 name = x86_cpu_versioned_model_name(def, vdef->version);
4428 x86_register_cpu_model_type(name, m);
4429 g_free(name);
4431 if (vdef->alias) {
4432 X86CPUModel *am = g_new0(X86CPUModel, 1);
4433 am->cpudef = def;
4434 am->version = vdef->version;
4435 am->is_alias = true;
4436 x86_register_cpu_model_type(vdef->alias, am);
4442 #if !defined(CONFIG_USER_ONLY)
4444 void cpu_clear_apic_feature(CPUX86State *env)
4446 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
4449 #endif /* !CONFIG_USER_ONLY */
4451 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
4452 uint32_t *eax, uint32_t *ebx,
4453 uint32_t *ecx, uint32_t *edx)
4455 X86CPU *cpu = env_archcpu(env);
4456 CPUState *cs = env_cpu(env);
4457 uint32_t die_offset;
4458 uint32_t limit;
4459 uint32_t signature[3];
4461 /* Calculate & apply limits for different index ranges */
4462 if (index >= 0xC0000000) {
4463 limit = env->cpuid_xlevel2;
4464 } else if (index >= 0x80000000) {
4465 limit = env->cpuid_xlevel;
4466 } else if (index >= 0x40000000) {
4467 limit = 0x40000001;
4468 } else {
4469 limit = env->cpuid_level;
4472 if (index > limit) {
4473 /* Intel documentation states that invalid EAX input will
4474 * return the same information as EAX=cpuid_level
4475 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4477 index = env->cpuid_level;
4480 switch(index) {
4481 case 0:
4482 *eax = env->cpuid_level;
4483 *ebx = env->cpuid_vendor1;
4484 *edx = env->cpuid_vendor2;
4485 *ecx = env->cpuid_vendor3;
4486 break;
4487 case 1:
4488 *eax = env->cpuid_version;
4489 *ebx = (cpu->apic_id << 24) |
4490 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4491 *ecx = env->features[FEAT_1_ECX];
4492 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4493 *ecx |= CPUID_EXT_OSXSAVE;
4495 *edx = env->features[FEAT_1_EDX];
4496 if (cs->nr_cores * cs->nr_threads > 1) {
4497 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4498 *edx |= CPUID_HT;
4500 break;
4501 case 2:
4502 /* cache info: needed for Pentium Pro compatibility */
4503 if (cpu->cache_info_passthrough) {
4504 host_cpuid(index, 0, eax, ebx, ecx, edx);
4505 break;
4507 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4508 *ebx = 0;
4509 if (!cpu->enable_l3_cache) {
4510 *ecx = 0;
4511 } else {
4512 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4514 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4515 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4516 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4517 break;
4518 case 4:
4519 /* cache info: needed for Core compatibility */
4520 if (cpu->cache_info_passthrough) {
4521 host_cpuid(index, count, eax, ebx, ecx, edx);
4522 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4523 *eax &= ~0xFC000000;
4524 if ((*eax & 31) && cs->nr_cores > 1) {
4525 *eax |= (cs->nr_cores - 1) << 26;
4527 } else {
4528 *eax = 0;
4529 switch (count) {
4530 case 0: /* L1 dcache info */
4531 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4532 1, cs->nr_cores,
4533 eax, ebx, ecx, edx);
4534 break;
4535 case 1: /* L1 icache info */
4536 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4537 1, cs->nr_cores,
4538 eax, ebx, ecx, edx);
4539 break;
4540 case 2: /* L2 cache info */
4541 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4542 cs->nr_threads, cs->nr_cores,
4543 eax, ebx, ecx, edx);
4544 break;
4545 case 3: /* L3 cache info */
4546 die_offset = apicid_die_offset(env->nr_dies,
4547 cs->nr_cores, cs->nr_threads);
4548 if (cpu->enable_l3_cache) {
4549 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4550 (1 << die_offset), cs->nr_cores,
4551 eax, ebx, ecx, edx);
4552 break;
4554 /* fall through */
4555 default: /* end of info */
4556 *eax = *ebx = *ecx = *edx = 0;
4557 break;
4560 break;
4561 case 5:
4562 /* MONITOR/MWAIT Leaf */
4563 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4564 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4565 *ecx = cpu->mwait.ecx; /* flags */
4566 *edx = cpu->mwait.edx; /* mwait substates */
4567 break;
4568 case 6:
4569 /* Thermal and Power Leaf */
4570 *eax = env->features[FEAT_6_EAX];
4571 *ebx = 0;
4572 *ecx = 0;
4573 *edx = 0;
4574 break;
4575 case 7:
4576 /* Structured Extended Feature Flags Enumeration Leaf */
4577 if (count == 0) {
4578 /* Maximum ECX value for sub-leaves */
4579 *eax = env->cpuid_level_func7;
4580 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4581 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4582 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4583 *ecx |= CPUID_7_0_ECX_OSPKE;
4585 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4586 } else if (count == 1) {
4587 *eax = env->features[FEAT_7_1_EAX];
4588 *ebx = 0;
4589 *ecx = 0;
4590 *edx = 0;
4591 } else {
4592 *eax = 0;
4593 *ebx = 0;
4594 *ecx = 0;
4595 *edx = 0;
4597 break;
4598 case 9:
4599 /* Direct Cache Access Information Leaf */
4600 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4601 *ebx = 0;
4602 *ecx = 0;
4603 *edx = 0;
4604 break;
4605 case 0xA:
4606 /* Architectural Performance Monitoring Leaf */
4607 if (kvm_enabled() && cpu->enable_pmu) {
4608 KVMState *s = cs->kvm_state;
4610 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4611 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4612 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4613 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4614 } else if (hvf_enabled() && cpu->enable_pmu) {
4615 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4616 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4617 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4618 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4619 } else {
4620 *eax = 0;
4621 *ebx = 0;
4622 *ecx = 0;
4623 *edx = 0;
4625 break;
4626 case 0xB:
4627 /* Extended Topology Enumeration Leaf */
4628 if (!cpu->enable_cpuid_0xb) {
4629 *eax = *ebx = *ecx = *edx = 0;
4630 break;
4633 *ecx = count & 0xff;
4634 *edx = cpu->apic_id;
4636 switch (count) {
4637 case 0:
4638 *eax = apicid_core_offset(env->nr_dies,
4639 cs->nr_cores, cs->nr_threads);
4640 *ebx = cs->nr_threads;
4641 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4642 break;
4643 case 1:
4644 *eax = apicid_pkg_offset(env->nr_dies,
4645 cs->nr_cores, cs->nr_threads);
4646 *ebx = cs->nr_cores * cs->nr_threads;
4647 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4648 break;
4649 default:
4650 *eax = 0;
4651 *ebx = 0;
4652 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4655 assert(!(*eax & ~0x1f));
4656 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4657 break;
4658 case 0x1F:
4659 /* V2 Extended Topology Enumeration Leaf */
4660 if (env->nr_dies < 2) {
4661 *eax = *ebx = *ecx = *edx = 0;
4662 break;
4665 *ecx = count & 0xff;
4666 *edx = cpu->apic_id;
4667 switch (count) {
4668 case 0:
4669 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores,
4670 cs->nr_threads);
4671 *ebx = cs->nr_threads;
4672 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4673 break;
4674 case 1:
4675 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores,
4676 cs->nr_threads);
4677 *ebx = cs->nr_cores * cs->nr_threads;
4678 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4679 break;
4680 case 2:
4681 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores,
4682 cs->nr_threads);
4683 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
4684 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
4685 break;
4686 default:
4687 *eax = 0;
4688 *ebx = 0;
4689 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4691 assert(!(*eax & ~0x1f));
4692 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4693 break;
4694 case 0xD: {
4695 /* Processor Extended State */
4696 *eax = 0;
4697 *ebx = 0;
4698 *ecx = 0;
4699 *edx = 0;
4700 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4701 break;
4704 if (count == 0) {
4705 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4706 *eax = env->features[FEAT_XSAVE_COMP_LO];
4707 *edx = env->features[FEAT_XSAVE_COMP_HI];
4709 * The initial value of xcr0 and ebx == 0, On host without kvm
4710 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0
4711 * even through guest update xcr0, this will crash some legacy guest
4712 * (e.g., CentOS 6), So set ebx == ecx to workaroud it.
4714 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0);
4715 } else if (count == 1) {
4716 *eax = env->features[FEAT_XSAVE];
4717 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4718 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4719 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4720 *eax = esa->size;
4721 *ebx = esa->offset;
4724 break;
4726 case 0x14: {
4727 /* Intel Processor Trace Enumeration */
4728 *eax = 0;
4729 *ebx = 0;
4730 *ecx = 0;
4731 *edx = 0;
4732 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4733 !kvm_enabled()) {
4734 break;
4737 if (count == 0) {
4738 *eax = INTEL_PT_MAX_SUBLEAF;
4739 *ebx = INTEL_PT_MINIMAL_EBX;
4740 *ecx = INTEL_PT_MINIMAL_ECX;
4741 } else if (count == 1) {
4742 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4743 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4745 break;
4747 case 0x40000000:
4749 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4750 * set here, but we restrict to TCG none the less.
4752 if (tcg_enabled() && cpu->expose_tcg) {
4753 memcpy(signature, "TCGTCGTCGTCG", 12);
4754 *eax = 0x40000001;
4755 *ebx = signature[0];
4756 *ecx = signature[1];
4757 *edx = signature[2];
4758 } else {
4759 *eax = 0;
4760 *ebx = 0;
4761 *ecx = 0;
4762 *edx = 0;
4764 break;
4765 case 0x40000001:
4766 *eax = 0;
4767 *ebx = 0;
4768 *ecx = 0;
4769 *edx = 0;
4770 break;
4771 case 0x80000000:
4772 *eax = env->cpuid_xlevel;
4773 *ebx = env->cpuid_vendor1;
4774 *edx = env->cpuid_vendor2;
4775 *ecx = env->cpuid_vendor3;
4776 break;
4777 case 0x80000001:
4778 *eax = env->cpuid_version;
4779 *ebx = 0;
4780 *ecx = env->features[FEAT_8000_0001_ECX];
4781 *edx = env->features[FEAT_8000_0001_EDX];
4783 /* The Linux kernel checks for the CMPLegacy bit and
4784 * discards multiple thread information if it is set.
4785 * So don't set it here for Intel to make Linux guests happy.
4787 if (cs->nr_cores * cs->nr_threads > 1) {
4788 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4789 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4790 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4791 *ecx |= 1 << 1; /* CmpLegacy bit */
4794 break;
4795 case 0x80000002:
4796 case 0x80000003:
4797 case 0x80000004:
4798 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4799 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4800 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4801 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4802 break;
4803 case 0x80000005:
4804 /* cache info (L1 cache) */
4805 if (cpu->cache_info_passthrough) {
4806 host_cpuid(index, 0, eax, ebx, ecx, edx);
4807 break;
4809 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4810 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4811 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4812 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4813 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4814 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4815 break;
4816 case 0x80000006:
4817 /* cache info (L2 cache) */
4818 if (cpu->cache_info_passthrough) {
4819 host_cpuid(index, 0, eax, ebx, ecx, edx);
4820 break;
4822 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4823 (L2_DTLB_2M_ENTRIES << 16) | \
4824 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4825 (L2_ITLB_2M_ENTRIES);
4826 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4827 (L2_DTLB_4K_ENTRIES << 16) | \
4828 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4829 (L2_ITLB_4K_ENTRIES);
4830 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4831 cpu->enable_l3_cache ?
4832 env->cache_info_amd.l3_cache : NULL,
4833 ecx, edx);
4834 break;
4835 case 0x80000007:
4836 *eax = 0;
4837 *ebx = 0;
4838 *ecx = 0;
4839 *edx = env->features[FEAT_8000_0007_EDX];
4840 break;
4841 case 0x80000008:
4842 /* virtual & phys address size in low 2 bytes. */
4843 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4844 /* 64 bit processor */
4845 *eax = cpu->phys_bits; /* configurable physical bits */
4846 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4847 *eax |= 0x00003900; /* 57 bits virtual */
4848 } else {
4849 *eax |= 0x00003000; /* 48 bits virtual */
4851 } else {
4852 *eax = cpu->phys_bits;
4854 *ebx = env->features[FEAT_8000_0008_EBX];
4855 *ecx = 0;
4856 *edx = 0;
4857 if (cs->nr_cores * cs->nr_threads > 1) {
4858 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4860 break;
4861 case 0x8000000A:
4862 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4863 *eax = 0x00000001; /* SVM Revision */
4864 *ebx = 0x00000010; /* nr of ASIDs */
4865 *ecx = 0;
4866 *edx = env->features[FEAT_SVM]; /* optional features */
4867 } else {
4868 *eax = 0;
4869 *ebx = 0;
4870 *ecx = 0;
4871 *edx = 0;
4873 break;
4874 case 0x8000001D:
4875 *eax = 0;
4876 if (cpu->cache_info_passthrough) {
4877 host_cpuid(index, count, eax, ebx, ecx, edx);
4878 break;
4880 switch (count) {
4881 case 0: /* L1 dcache info */
4882 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4883 eax, ebx, ecx, edx);
4884 break;
4885 case 1: /* L1 icache info */
4886 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4887 eax, ebx, ecx, edx);
4888 break;
4889 case 2: /* L2 cache info */
4890 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4891 eax, ebx, ecx, edx);
4892 break;
4893 case 3: /* L3 cache info */
4894 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4895 eax, ebx, ecx, edx);
4896 break;
4897 default: /* end of info */
4898 *eax = *ebx = *ecx = *edx = 0;
4899 break;
4901 break;
4902 case 0x8000001E:
4903 assert(cpu->core_id <= 255);
4904 encode_topo_cpuid8000001e(cs, cpu,
4905 eax, ebx, ecx, edx);
4906 break;
4907 case 0xC0000000:
4908 *eax = env->cpuid_xlevel2;
4909 *ebx = 0;
4910 *ecx = 0;
4911 *edx = 0;
4912 break;
4913 case 0xC0000001:
4914 /* Support for VIA CPU's CPUID instruction */
4915 *eax = env->cpuid_version;
4916 *ebx = 0;
4917 *ecx = 0;
4918 *edx = env->features[FEAT_C000_0001_EDX];
4919 break;
4920 case 0xC0000002:
4921 case 0xC0000003:
4922 case 0xC0000004:
4923 /* Reserved for the future, and now filled with zero */
4924 *eax = 0;
4925 *ebx = 0;
4926 *ecx = 0;
4927 *edx = 0;
4928 break;
4929 case 0x8000001F:
4930 *eax = sev_enabled() ? 0x2 : 0;
4931 *ebx = sev_get_cbit_position();
4932 *ebx |= sev_get_reduced_phys_bits() << 6;
4933 *ecx = 0;
4934 *edx = 0;
4935 break;
4936 default:
4937 /* reserved values: zero */
4938 *eax = 0;
4939 *ebx = 0;
4940 *ecx = 0;
4941 *edx = 0;
4942 break;
4946 /* CPUClass::reset() */
4947 static void x86_cpu_reset(CPUState *s)
4949 X86CPU *cpu = X86_CPU(s);
4950 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4951 CPUX86State *env = &cpu->env;
4952 target_ulong cr4;
4953 uint64_t xcr0;
4954 int i;
4956 xcc->parent_reset(s);
4958 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4960 env->old_exception = -1;
4962 /* init to reset state */
4964 env->hflags2 |= HF2_GIF_MASK;
4966 cpu_x86_update_cr0(env, 0x60000010);
4967 env->a20_mask = ~0x0;
4968 env->smbase = 0x30000;
4969 env->msr_smi_count = 0;
4971 env->idt.limit = 0xffff;
4972 env->gdt.limit = 0xffff;
4973 env->ldt.limit = 0xffff;
4974 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4975 env->tr.limit = 0xffff;
4976 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4978 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4979 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4980 DESC_R_MASK | DESC_A_MASK);
4981 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4982 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4983 DESC_A_MASK);
4984 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4985 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4986 DESC_A_MASK);
4987 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4988 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4989 DESC_A_MASK);
4990 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4991 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4992 DESC_A_MASK);
4993 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4994 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4995 DESC_A_MASK);
4997 env->eip = 0xfff0;
4998 env->regs[R_EDX] = env->cpuid_version;
5000 env->eflags = 0x2;
5002 /* FPU init */
5003 for (i = 0; i < 8; i++) {
5004 env->fptags[i] = 1;
5006 cpu_set_fpuc(env, 0x37f);
5008 env->mxcsr = 0x1f80;
5009 /* All units are in INIT state. */
5010 env->xstate_bv = 0;
5012 env->pat = 0x0007040600070406ULL;
5013 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
5014 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
5015 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
5018 memset(env->dr, 0, sizeof(env->dr));
5019 env->dr[6] = DR6_FIXED_1;
5020 env->dr[7] = DR7_FIXED_1;
5021 cpu_breakpoint_remove_all(s, BP_CPU);
5022 cpu_watchpoint_remove_all(s, BP_CPU);
5024 cr4 = 0;
5025 xcr0 = XSTATE_FP_MASK;
5027 #ifdef CONFIG_USER_ONLY
5028 /* Enable all the features for user-mode. */
5029 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
5030 xcr0 |= XSTATE_SSE_MASK;
5032 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
5033 const ExtSaveArea *esa = &x86_ext_save_areas[i];
5034 if (env->features[esa->feature] & esa->bits) {
5035 xcr0 |= 1ull << i;
5039 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
5040 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
5042 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
5043 cr4 |= CR4_FSGSBASE_MASK;
5045 #endif
5047 env->xcr0 = xcr0;
5048 cpu_x86_update_cr4(env, cr4);
5051 * SDM 11.11.5 requires:
5052 * - IA32_MTRR_DEF_TYPE MSR.E = 0
5053 * - IA32_MTRR_PHYSMASKn.V = 0
5054 * All other bits are undefined. For simplification, zero it all.
5056 env->mtrr_deftype = 0;
5057 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
5058 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
5060 env->interrupt_injected = -1;
5061 env->exception_nr = -1;
5062 env->exception_pending = 0;
5063 env->exception_injected = 0;
5064 env->exception_has_payload = false;
5065 env->exception_payload = 0;
5066 env->nmi_injected = false;
5067 #if !defined(CONFIG_USER_ONLY)
5068 /* We hard-wire the BSP to the first CPU. */
5069 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
5071 s->halted = !cpu_is_bsp(cpu);
5073 if (kvm_enabled()) {
5074 kvm_arch_reset_vcpu(cpu);
5076 else if (hvf_enabled()) {
5077 hvf_reset_vcpu(s);
5079 #endif
5082 #ifndef CONFIG_USER_ONLY
5083 bool cpu_is_bsp(X86CPU *cpu)
5085 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
5088 /* TODO: remove me, when reset over QOM tree is implemented */
5089 static void x86_cpu_machine_reset_cb(void *opaque)
5091 X86CPU *cpu = opaque;
5092 cpu_reset(CPU(cpu));
5094 #endif
5096 static void mce_init(X86CPU *cpu)
5098 CPUX86State *cenv = &cpu->env;
5099 unsigned int bank;
5101 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
5102 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
5103 (CPUID_MCE | CPUID_MCA)) {
5104 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
5105 (cpu->enable_lmce ? MCG_LMCE_P : 0);
5106 cenv->mcg_ctl = ~(uint64_t)0;
5107 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
5108 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
5113 #ifndef CONFIG_USER_ONLY
5114 APICCommonClass *apic_get_class(void)
5116 const char *apic_type = "apic";
5118 /* TODO: in-kernel irqchip for hvf */
5119 if (kvm_apic_in_kernel()) {
5120 apic_type = "kvm-apic";
5121 } else if (xen_enabled()) {
5122 apic_type = "xen-apic";
5125 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
5128 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
5130 APICCommonState *apic;
5131 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
5133 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
5135 object_property_add_child(OBJECT(cpu), "lapic",
5136 OBJECT(cpu->apic_state), &error_abort);
5137 object_unref(OBJECT(cpu->apic_state));
5139 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
5140 /* TODO: convert to link<> */
5141 apic = APIC_COMMON(cpu->apic_state);
5142 apic->cpu = cpu;
5143 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
5146 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
5148 APICCommonState *apic;
5149 static bool apic_mmio_map_once;
5151 if (cpu->apic_state == NULL) {
5152 return;
5154 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
5155 errp);
5157 /* Map APIC MMIO area */
5158 apic = APIC_COMMON(cpu->apic_state);
5159 if (!apic_mmio_map_once) {
5160 memory_region_add_subregion_overlap(get_system_memory(),
5161 apic->apicbase &
5162 MSR_IA32_APICBASE_BASE,
5163 &apic->io_memory,
5164 0x1000);
5165 apic_mmio_map_once = true;
5169 static void x86_cpu_machine_done(Notifier *n, void *unused)
5171 X86CPU *cpu = container_of(n, X86CPU, machine_done);
5172 MemoryRegion *smram =
5173 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
5175 if (smram) {
5176 cpu->smram = g_new(MemoryRegion, 1);
5177 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
5178 smram, 0, 1ull << 32);
5179 memory_region_set_enabled(cpu->smram, true);
5180 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
5183 #else
5184 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
5187 #endif
5189 /* Note: Only safe for use on x86(-64) hosts */
5190 static uint32_t x86_host_phys_bits(void)
5192 uint32_t eax;
5193 uint32_t host_phys_bits;
5195 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
5196 if (eax >= 0x80000008) {
5197 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
5198 /* Note: According to AMD doc 25481 rev 2.34 they have a field
5199 * at 23:16 that can specify a maximum physical address bits for
5200 * the guest that can override this value; but I've not seen
5201 * anything with that set.
5203 host_phys_bits = eax & 0xff;
5204 } else {
5205 /* It's an odd 64 bit machine that doesn't have the leaf for
5206 * physical address bits; fall back to 36 that's most older
5207 * Intel.
5209 host_phys_bits = 36;
5212 return host_phys_bits;
5215 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
5217 if (*min < value) {
5218 *min = value;
5222 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
5223 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
5225 CPUX86State *env = &cpu->env;
5226 FeatureWordInfo *fi = &feature_word_info[w];
5227 uint32_t eax = fi->cpuid.eax;
5228 uint32_t region = eax & 0xF0000000;
5230 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
5231 if (!env->features[w]) {
5232 return;
5235 switch (region) {
5236 case 0x00000000:
5237 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
5238 break;
5239 case 0x80000000:
5240 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
5241 break;
5242 case 0xC0000000:
5243 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
5244 break;
5247 if (eax == 7) {
5248 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7,
5249 fi->cpuid.ecx);
5253 /* Calculate XSAVE components based on the configured CPU feature flags */
5254 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
5256 CPUX86State *env = &cpu->env;
5257 int i;
5258 uint64_t mask;
5260 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
5261 return;
5264 mask = 0;
5265 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
5266 const ExtSaveArea *esa = &x86_ext_save_areas[i];
5267 if (env->features[esa->feature] & esa->bits) {
5268 mask |= (1ULL << i);
5272 env->features[FEAT_XSAVE_COMP_LO] = mask;
5273 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
5276 /***** Steps involved on loading and filtering CPUID data
5278 * When initializing and realizing a CPU object, the steps
5279 * involved in setting up CPUID data are:
5281 * 1) Loading CPU model definition (X86CPUDefinition). This is
5282 * implemented by x86_cpu_load_model() and should be completely
5283 * transparent, as it is done automatically by instance_init.
5284 * No code should need to look at X86CPUDefinition structs
5285 * outside instance_init.
5287 * 2) CPU expansion. This is done by realize before CPUID
5288 * filtering, and will make sure host/accelerator data is
5289 * loaded for CPU models that depend on host capabilities
5290 * (e.g. "host"). Done by x86_cpu_expand_features().
5292 * 3) CPUID filtering. This initializes extra data related to
5293 * CPUID, and checks if the host supports all capabilities
5294 * required by the CPU. Runnability of a CPU model is
5295 * determined at this step. Done by x86_cpu_filter_features().
5297 * Some operations don't require all steps to be performed.
5298 * More precisely:
5300 * - CPU instance creation (instance_init) will run only CPU
5301 * model loading. CPU expansion can't run at instance_init-time
5302 * because host/accelerator data may be not available yet.
5303 * - CPU realization will perform both CPU model expansion and CPUID
5304 * filtering, and return an error in case one of them fails.
5305 * - query-cpu-definitions needs to run all 3 steps. It needs
5306 * to run CPUID filtering, as the 'unavailable-features'
5307 * field is set based on the filtering results.
5308 * - The query-cpu-model-expansion QMP command only needs to run
5309 * CPU model loading and CPU expansion. It should not filter
5310 * any CPUID data based on host capabilities.
5313 /* Expand CPU configuration data, based on configured features
5314 * and host/accelerator capabilities when appropriate.
5316 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
5318 CPUX86State *env = &cpu->env;
5319 FeatureWord w;
5320 int i;
5321 GList *l;
5322 Error *local_err = NULL;
5324 for (l = plus_features; l; l = l->next) {
5325 const char *prop = l->data;
5326 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
5327 if (local_err) {
5328 goto out;
5332 for (l = minus_features; l; l = l->next) {
5333 const char *prop = l->data;
5334 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
5335 if (local_err) {
5336 goto out;
5340 /*TODO: Now cpu->max_features doesn't overwrite features
5341 * set using QOM properties, and we can convert
5342 * plus_features & minus_features to global properties
5343 * inside x86_cpu_parse_featurestr() too.
5345 if (cpu->max_features) {
5346 for (w = 0; w < FEATURE_WORDS; w++) {
5347 /* Override only features that weren't set explicitly
5348 * by the user.
5350 env->features[w] |=
5351 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
5352 ~env->user_features[w] & \
5353 ~feature_word_info[w].no_autoenable_flags;
5357 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
5358 FeatureDep *d = &feature_dependencies[i];
5359 if (!(env->features[d->from.index] & d->from.mask)) {
5360 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask;
5362 /* Not an error unless the dependent feature was added explicitly. */
5363 mark_unavailable_features(cpu, d->to.index,
5364 unavailable_features & env->user_features[d->to.index],
5365 "This feature depends on other features that were not requested");
5367 env->user_features[d->to.index] |= unavailable_features;
5368 env->features[d->to.index] &= ~unavailable_features;
5372 if (!kvm_enabled() || !cpu->expose_kvm) {
5373 env->features[FEAT_KVM] = 0;
5376 x86_cpu_enable_xsave_components(cpu);
5378 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5379 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
5380 if (cpu->full_cpuid_auto_level) {
5381 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
5382 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
5383 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
5384 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
5385 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
5386 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
5387 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
5388 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
5389 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
5390 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
5391 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
5392 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
5394 /* Intel Processor Trace requires CPUID[0x14] */
5395 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5396 kvm_enabled() && cpu->intel_pt_auto_level) {
5397 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
5400 /* CPU topology with multi-dies support requires CPUID[0x1F] */
5401 if (env->nr_dies > 1) {
5402 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
5405 /* SVM requires CPUID[0x8000000A] */
5406 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5407 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
5410 /* SEV requires CPUID[0x8000001F] */
5411 if (sev_enabled()) {
5412 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
5416 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5417 if (env->cpuid_level_func7 == UINT32_MAX) {
5418 env->cpuid_level_func7 = env->cpuid_min_level_func7;
5420 if (env->cpuid_level == UINT32_MAX) {
5421 env->cpuid_level = env->cpuid_min_level;
5423 if (env->cpuid_xlevel == UINT32_MAX) {
5424 env->cpuid_xlevel = env->cpuid_min_xlevel;
5426 if (env->cpuid_xlevel2 == UINT32_MAX) {
5427 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
5430 out:
5431 if (local_err != NULL) {
5432 error_propagate(errp, local_err);
5437 * Finishes initialization of CPUID data, filters CPU feature
5438 * words based on host availability of each feature.
5440 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5442 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
5444 CPUX86State *env = &cpu->env;
5445 FeatureWord w;
5446 const char *prefix = NULL;
5448 if (verbose) {
5449 prefix = accel_uses_host_cpuid()
5450 ? "host doesn't support requested feature"
5451 : "TCG doesn't support requested feature";
5454 for (w = 0; w < FEATURE_WORDS; w++) {
5455 uint64_t host_feat =
5456 x86_cpu_get_supported_feature_word(w, false);
5457 uint64_t requested_features = env->features[w];
5458 uint64_t unavailable_features = requested_features & ~host_feat;
5459 mark_unavailable_features(cpu, w, unavailable_features, prefix);
5462 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5463 kvm_enabled()) {
5464 KVMState *s = CPU(cpu)->kvm_state;
5465 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
5466 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
5467 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
5468 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
5469 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
5471 if (!eax_0 ||
5472 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
5473 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
5474 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
5475 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
5476 INTEL_PT_ADDR_RANGES_NUM) ||
5477 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
5478 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
5479 (ecx_0 & INTEL_PT_IP_LIP)) {
5481 * Processor Trace capabilities aren't configurable, so if the
5482 * host can't emulate the capabilities we report on
5483 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5485 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix);
5490 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
5492 CPUState *cs = CPU(dev);
5493 X86CPU *cpu = X86_CPU(dev);
5494 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5495 CPUX86State *env = &cpu->env;
5496 Error *local_err = NULL;
5497 static bool ht_warned;
5499 if (xcc->host_cpuid_required) {
5500 if (!accel_uses_host_cpuid()) {
5501 char *name = x86_cpu_class_get_model_name(xcc);
5502 error_setg(&local_err, "CPU model '%s' requires KVM", name);
5503 g_free(name);
5504 goto out;
5507 if (enable_cpu_pm) {
5508 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
5509 &cpu->mwait.ecx, &cpu->mwait.edx);
5510 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
5514 /* mwait extended info: needed for Core compatibility */
5515 /* We always wake on interrupt even if host does not have the capability */
5516 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
5518 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
5519 error_setg(errp, "apic-id property was not initialized properly");
5520 return;
5523 x86_cpu_expand_features(cpu, &local_err);
5524 if (local_err) {
5525 goto out;
5528 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
5530 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
5531 error_setg(&local_err,
5532 accel_uses_host_cpuid() ?
5533 "Host doesn't support requested features" :
5534 "TCG doesn't support requested features");
5535 goto out;
5538 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5539 * CPUID[1].EDX.
5541 if (IS_AMD_CPU(env)) {
5542 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
5543 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
5544 & CPUID_EXT2_AMD_ALIASES);
5547 /* For 64bit systems think about the number of physical bits to present.
5548 * ideally this should be the same as the host; anything other than matching
5549 * the host can cause incorrect guest behaviour.
5550 * QEMU used to pick the magic value of 40 bits that corresponds to
5551 * consumer AMD devices but nothing else.
5553 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5554 if (accel_uses_host_cpuid()) {
5555 uint32_t host_phys_bits = x86_host_phys_bits();
5556 static bool warned;
5558 /* Print a warning if the user set it to a value that's not the
5559 * host value.
5561 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
5562 !warned) {
5563 warn_report("Host physical bits (%u)"
5564 " does not match phys-bits property (%u)",
5565 host_phys_bits, cpu->phys_bits);
5566 warned = true;
5569 if (cpu->host_phys_bits) {
5570 /* The user asked for us to use the host physical bits */
5571 cpu->phys_bits = host_phys_bits;
5572 if (cpu->host_phys_bits_limit &&
5573 cpu->phys_bits > cpu->host_phys_bits_limit) {
5574 cpu->phys_bits = cpu->host_phys_bits_limit;
5578 if (cpu->phys_bits &&
5579 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5580 cpu->phys_bits < 32)) {
5581 error_setg(errp, "phys-bits should be between 32 and %u "
5582 " (but is %u)",
5583 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5584 return;
5586 } else {
5587 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5588 error_setg(errp, "TCG only supports phys-bits=%u",
5589 TCG_PHYS_ADDR_BITS);
5590 return;
5593 /* 0 means it was not explicitly set by the user (or by machine
5594 * compat_props or by the host code above). In this case, the default
5595 * is the value used by TCG (40).
5597 if (cpu->phys_bits == 0) {
5598 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5600 } else {
5601 /* For 32 bit systems don't use the user set value, but keep
5602 * phys_bits consistent with what we tell the guest.
5604 if (cpu->phys_bits != 0) {
5605 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5606 return;
5609 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5610 cpu->phys_bits = 36;
5611 } else {
5612 cpu->phys_bits = 32;
5616 /* Cache information initialization */
5617 if (!cpu->legacy_cache) {
5618 if (!xcc->model || !xcc->model->cpudef->cache_info) {
5619 char *name = x86_cpu_class_get_model_name(xcc);
5620 error_setg(errp,
5621 "CPU model '%s' doesn't support legacy-cache=off", name);
5622 g_free(name);
5623 return;
5625 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5626 *xcc->model->cpudef->cache_info;
5627 } else {
5628 /* Build legacy cache information */
5629 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5630 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5631 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5632 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5634 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5635 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5636 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5637 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5639 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5640 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5641 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5642 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5646 cpu_exec_realizefn(cs, &local_err);
5647 if (local_err != NULL) {
5648 error_propagate(errp, local_err);
5649 return;
5652 #ifndef CONFIG_USER_ONLY
5653 MachineState *ms = MACHINE(qdev_get_machine());
5654 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5656 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
5657 x86_cpu_apic_create(cpu, &local_err);
5658 if (local_err != NULL) {
5659 goto out;
5662 #endif
5664 mce_init(cpu);
5666 #ifndef CONFIG_USER_ONLY
5667 if (tcg_enabled()) {
5668 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5669 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5671 /* Outer container... */
5672 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5673 memory_region_set_enabled(cpu->cpu_as_root, true);
5675 /* ... with two regions inside: normal system memory with low
5676 * priority, and...
5678 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5679 get_system_memory(), 0, ~0ull);
5680 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5681 memory_region_set_enabled(cpu->cpu_as_mem, true);
5683 cs->num_ases = 2;
5684 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5685 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5687 /* ... SMRAM with higher priority, linked from /machine/smram. */
5688 cpu->machine_done.notify = x86_cpu_machine_done;
5689 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5691 #endif
5693 qemu_init_vcpu(cs);
5696 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5697 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5698 * based on inputs (sockets,cores,threads), it is still better to give
5699 * users a warning.
5701 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5702 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5704 if (IS_AMD_CPU(env) &&
5705 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5706 cs->nr_threads > 1 && !ht_warned) {
5707 warn_report("This family of AMD CPU doesn't support "
5708 "hyperthreading(%d)",
5709 cs->nr_threads);
5710 error_printf("Please configure -smp options properly"
5711 " or try enabling topoext feature.\n");
5712 ht_warned = true;
5715 x86_cpu_apic_realize(cpu, &local_err);
5716 if (local_err != NULL) {
5717 goto out;
5719 cpu_reset(cs);
5721 xcc->parent_realize(dev, &local_err);
5723 out:
5724 if (local_err != NULL) {
5725 error_propagate(errp, local_err);
5726 return;
5730 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5732 X86CPU *cpu = X86_CPU(dev);
5733 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5734 Error *local_err = NULL;
5736 #ifndef CONFIG_USER_ONLY
5737 cpu_remove_sync(CPU(dev));
5738 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5739 #endif
5741 if (cpu->apic_state) {
5742 object_unparent(OBJECT(cpu->apic_state));
5743 cpu->apic_state = NULL;
5746 xcc->parent_unrealize(dev, &local_err);
5747 if (local_err != NULL) {
5748 error_propagate(errp, local_err);
5749 return;
5753 typedef struct BitProperty {
5754 FeatureWord w;
5755 uint64_t mask;
5756 } BitProperty;
5758 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5759 void *opaque, Error **errp)
5761 X86CPU *cpu = X86_CPU(obj);
5762 BitProperty *fp = opaque;
5763 uint64_t f = cpu->env.features[fp->w];
5764 bool value = (f & fp->mask) == fp->mask;
5765 visit_type_bool(v, name, &value, errp);
5768 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5769 void *opaque, Error **errp)
5771 DeviceState *dev = DEVICE(obj);
5772 X86CPU *cpu = X86_CPU(obj);
5773 BitProperty *fp = opaque;
5774 Error *local_err = NULL;
5775 bool value;
5777 if (dev->realized) {
5778 qdev_prop_set_after_realize(dev, name, errp);
5779 return;
5782 visit_type_bool(v, name, &value, &local_err);
5783 if (local_err) {
5784 error_propagate(errp, local_err);
5785 return;
5788 if (value) {
5789 cpu->env.features[fp->w] |= fp->mask;
5790 } else {
5791 cpu->env.features[fp->w] &= ~fp->mask;
5793 cpu->env.user_features[fp->w] |= fp->mask;
5796 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5797 void *opaque)
5799 BitProperty *prop = opaque;
5800 g_free(prop);
5803 /* Register a boolean property to get/set a single bit in a uint32_t field.
5805 * The same property name can be registered multiple times to make it affect
5806 * multiple bits in the same FeatureWord. In that case, the getter will return
5807 * true only if all bits are set.
5809 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5810 const char *prop_name,
5811 FeatureWord w,
5812 int bitnr)
5814 BitProperty *fp;
5815 ObjectProperty *op;
5816 uint64_t mask = (1ULL << bitnr);
5818 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5819 if (op) {
5820 fp = op->opaque;
5821 assert(fp->w == w);
5822 fp->mask |= mask;
5823 } else {
5824 fp = g_new0(BitProperty, 1);
5825 fp->w = w;
5826 fp->mask = mask;
5827 object_property_add(OBJECT(cpu), prop_name, "bool",
5828 x86_cpu_get_bit_prop,
5829 x86_cpu_set_bit_prop,
5830 x86_cpu_release_bit_prop, fp, &error_abort);
5834 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5835 FeatureWord w,
5836 int bitnr)
5838 FeatureWordInfo *fi = &feature_word_info[w];
5839 const char *name = fi->feat_names[bitnr];
5841 if (!name) {
5842 return;
5845 /* Property names should use "-" instead of "_".
5846 * Old names containing underscores are registered as aliases
5847 * using object_property_add_alias()
5849 assert(!strchr(name, '_'));
5850 /* aliases don't use "|" delimiters anymore, they are registered
5851 * manually using object_property_add_alias() */
5852 assert(!strchr(name, '|'));
5853 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5856 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5858 X86CPU *cpu = X86_CPU(cs);
5859 CPUX86State *env = &cpu->env;
5860 GuestPanicInformation *panic_info = NULL;
5862 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5863 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5865 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5867 assert(HV_CRASH_PARAMS >= 5);
5868 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5869 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5870 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5871 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5872 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5875 return panic_info;
5877 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5878 const char *name, void *opaque,
5879 Error **errp)
5881 CPUState *cs = CPU(obj);
5882 GuestPanicInformation *panic_info;
5884 if (!cs->crash_occurred) {
5885 error_setg(errp, "No crash occured");
5886 return;
5889 panic_info = x86_cpu_get_crash_info(cs);
5890 if (panic_info == NULL) {
5891 error_setg(errp, "No crash information");
5892 return;
5895 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5896 errp);
5897 qapi_free_GuestPanicInformation(panic_info);
5900 static void x86_cpu_initfn(Object *obj)
5902 X86CPU *cpu = X86_CPU(obj);
5903 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5904 CPUX86State *env = &cpu->env;
5905 FeatureWord w;
5907 env->nr_dies = 1;
5908 cpu_set_cpustate_pointers(cpu);
5910 object_property_add(obj, "family", "int",
5911 x86_cpuid_version_get_family,
5912 x86_cpuid_version_set_family, NULL, NULL, NULL);
5913 object_property_add(obj, "model", "int",
5914 x86_cpuid_version_get_model,
5915 x86_cpuid_version_set_model, NULL, NULL, NULL);
5916 object_property_add(obj, "stepping", "int",
5917 x86_cpuid_version_get_stepping,
5918 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5919 object_property_add_str(obj, "vendor",
5920 x86_cpuid_get_vendor,
5921 x86_cpuid_set_vendor, NULL);
5922 object_property_add_str(obj, "model-id",
5923 x86_cpuid_get_model_id,
5924 x86_cpuid_set_model_id, NULL);
5925 object_property_add(obj, "tsc-frequency", "int",
5926 x86_cpuid_get_tsc_freq,
5927 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5928 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5929 x86_cpu_get_feature_words,
5930 NULL, NULL, (void *)env->features, NULL);
5931 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5932 x86_cpu_get_feature_words,
5933 NULL, NULL, (void *)cpu->filtered_features, NULL);
5935 * The "unavailable-features" property has the same semantics as
5936 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
5937 * QMP command: they list the features that would have prevented the
5938 * CPU from running if the "enforce" flag was set.
5940 object_property_add(obj, "unavailable-features", "strList",
5941 x86_cpu_get_unavailable_features,
5942 NULL, NULL, NULL, &error_abort);
5944 object_property_add(obj, "crash-information", "GuestPanicInformation",
5945 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5947 for (w = 0; w < FEATURE_WORDS; w++) {
5948 int bitnr;
5950 for (bitnr = 0; bitnr < 64; bitnr++) {
5951 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5955 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5956 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5957 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5958 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5959 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5960 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5961 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5963 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5964 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5965 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5966 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5967 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5968 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5969 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5970 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5971 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5972 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5973 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5974 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5975 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5976 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5977 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control",
5978 &error_abort);
5979 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5980 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5981 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5982 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5983 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5984 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5985 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5987 if (xcc->model) {
5988 x86_cpu_load_model(cpu, xcc->model, &error_abort);
5992 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5994 X86CPU *cpu = X86_CPU(cs);
5996 return cpu->apic_id;
5999 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
6001 X86CPU *cpu = X86_CPU(cs);
6003 return cpu->env.cr[0] & CR0_PG_MASK;
6006 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
6008 X86CPU *cpu = X86_CPU(cs);
6010 cpu->env.eip = value;
6013 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
6015 X86CPU *cpu = X86_CPU(cs);
6017 cpu->env.eip = tb->pc - tb->cs_base;
6020 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
6022 X86CPU *cpu = X86_CPU(cs);
6023 CPUX86State *env = &cpu->env;
6025 #if !defined(CONFIG_USER_ONLY)
6026 if (interrupt_request & CPU_INTERRUPT_POLL) {
6027 return CPU_INTERRUPT_POLL;
6029 #endif
6030 if (interrupt_request & CPU_INTERRUPT_SIPI) {
6031 return CPU_INTERRUPT_SIPI;
6034 if (env->hflags2 & HF2_GIF_MASK) {
6035 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
6036 !(env->hflags & HF_SMM_MASK)) {
6037 return CPU_INTERRUPT_SMI;
6038 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
6039 !(env->hflags2 & HF2_NMI_MASK)) {
6040 return CPU_INTERRUPT_NMI;
6041 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
6042 return CPU_INTERRUPT_MCE;
6043 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
6044 (((env->hflags2 & HF2_VINTR_MASK) &&
6045 (env->hflags2 & HF2_HIF_MASK)) ||
6046 (!(env->hflags2 & HF2_VINTR_MASK) &&
6047 (env->eflags & IF_MASK &&
6048 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
6049 return CPU_INTERRUPT_HARD;
6050 #if !defined(CONFIG_USER_ONLY)
6051 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
6052 (env->eflags & IF_MASK) &&
6053 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
6054 return CPU_INTERRUPT_VIRQ;
6055 #endif
6059 return 0;
6062 static bool x86_cpu_has_work(CPUState *cs)
6064 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
6067 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
6069 X86CPU *cpu = X86_CPU(cs);
6070 CPUX86State *env = &cpu->env;
6072 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
6073 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
6074 : bfd_mach_i386_i8086);
6075 info->print_insn = print_insn_i386;
6077 info->cap_arch = CS_ARCH_X86;
6078 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
6079 : env->hflags & HF_CS32_MASK ? CS_MODE_32
6080 : CS_MODE_16);
6081 info->cap_insn_unit = 1;
6082 info->cap_insn_split = 8;
6085 void x86_update_hflags(CPUX86State *env)
6087 uint32_t hflags;
6088 #define HFLAG_COPY_MASK \
6089 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
6090 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
6091 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
6092 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
6094 hflags = env->hflags & HFLAG_COPY_MASK;
6095 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
6096 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
6097 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
6098 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
6099 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
6101 if (env->cr[4] & CR4_OSFXSR_MASK) {
6102 hflags |= HF_OSFXSR_MASK;
6105 if (env->efer & MSR_EFER_LMA) {
6106 hflags |= HF_LMA_MASK;
6109 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
6110 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
6111 } else {
6112 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
6113 (DESC_B_SHIFT - HF_CS32_SHIFT);
6114 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
6115 (DESC_B_SHIFT - HF_SS32_SHIFT);
6116 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
6117 !(hflags & HF_CS32_MASK)) {
6118 hflags |= HF_ADDSEG_MASK;
6119 } else {
6120 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
6121 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
6124 env->hflags = hflags;
6127 static Property x86_cpu_properties[] = {
6128 #ifdef CONFIG_USER_ONLY
6129 /* apic_id = 0 by default for *-user, see commit 9886e834 */
6130 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
6131 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
6132 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
6133 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
6134 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
6135 #else
6136 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
6137 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
6138 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
6139 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
6140 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
6141 #endif
6142 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
6143 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
6145 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
6146 HYPERV_SPINLOCK_NEVER_RETRY),
6147 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
6148 HYPERV_FEAT_RELAXED, 0),
6149 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
6150 HYPERV_FEAT_VAPIC, 0),
6151 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
6152 HYPERV_FEAT_TIME, 0),
6153 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
6154 HYPERV_FEAT_CRASH, 0),
6155 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
6156 HYPERV_FEAT_RESET, 0),
6157 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
6158 HYPERV_FEAT_VPINDEX, 0),
6159 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
6160 HYPERV_FEAT_RUNTIME, 0),
6161 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
6162 HYPERV_FEAT_SYNIC, 0),
6163 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
6164 HYPERV_FEAT_STIMER, 0),
6165 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
6166 HYPERV_FEAT_FREQUENCIES, 0),
6167 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
6168 HYPERV_FEAT_REENLIGHTENMENT, 0),
6169 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
6170 HYPERV_FEAT_TLBFLUSH, 0),
6171 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
6172 HYPERV_FEAT_EVMCS, 0),
6173 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
6174 HYPERV_FEAT_IPI, 0),
6175 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
6176 HYPERV_FEAT_STIMER_DIRECT, 0),
6177 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
6179 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
6180 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
6181 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
6182 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
6183 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
6184 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
6185 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
6186 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
6187 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7,
6188 UINT32_MAX),
6189 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
6190 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
6191 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
6192 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
6193 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
6194 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
6195 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
6196 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
6197 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
6198 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
6199 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
6200 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
6201 false),
6202 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
6203 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
6204 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
6205 true),
6207 * lecacy_cache defaults to true unless the CPU model provides its
6208 * own cache information (see x86_cpu_load_def()).
6210 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
6213 * From "Requirements for Implementing the Microsoft
6214 * Hypervisor Interface":
6215 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
6217 * "Starting with Windows Server 2012 and Windows 8, if
6218 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
6219 * the hypervisor imposes no specific limit to the number of VPs.
6220 * In this case, Windows Server 2012 guest VMs may use more than
6221 * 64 VPs, up to the maximum supported number of processors applicable
6222 * to the specific Windows version being used."
6224 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
6225 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
6226 false),
6227 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
6228 true),
6229 DEFINE_PROP_END_OF_LIST()
6232 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
6234 X86CPUClass *xcc = X86_CPU_CLASS(oc);
6235 CPUClass *cc = CPU_CLASS(oc);
6236 DeviceClass *dc = DEVICE_CLASS(oc);
6238 device_class_set_parent_realize(dc, x86_cpu_realizefn,
6239 &xcc->parent_realize);
6240 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
6241 &xcc->parent_unrealize);
6242 dc->props = x86_cpu_properties;
6244 xcc->parent_reset = cc->reset;
6245 cc->reset = x86_cpu_reset;
6246 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
6248 cc->class_by_name = x86_cpu_class_by_name;
6249 cc->parse_features = x86_cpu_parse_featurestr;
6250 cc->has_work = x86_cpu_has_work;
6251 #ifdef CONFIG_TCG
6252 cc->do_interrupt = x86_cpu_do_interrupt;
6253 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
6254 #endif
6255 cc->dump_state = x86_cpu_dump_state;
6256 cc->get_crash_info = x86_cpu_get_crash_info;
6257 cc->set_pc = x86_cpu_set_pc;
6258 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
6259 cc->gdb_read_register = x86_cpu_gdb_read_register;
6260 cc->gdb_write_register = x86_cpu_gdb_write_register;
6261 cc->get_arch_id = x86_cpu_get_arch_id;
6262 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
6263 #ifndef CONFIG_USER_ONLY
6264 cc->asidx_from_attrs = x86_asidx_from_attrs;
6265 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
6266 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug;
6267 cc->write_elf64_note = x86_cpu_write_elf64_note;
6268 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
6269 cc->write_elf32_note = x86_cpu_write_elf32_note;
6270 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
6271 cc->vmsd = &vmstate_x86_cpu;
6272 #endif
6273 cc->gdb_arch_name = x86_gdb_arch_name;
6274 #ifdef TARGET_X86_64
6275 cc->gdb_core_xml_file = "i386-64bit.xml";
6276 cc->gdb_num_core_regs = 66;
6277 #else
6278 cc->gdb_core_xml_file = "i386-32bit.xml";
6279 cc->gdb_num_core_regs = 50;
6280 #endif
6281 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
6282 cc->debug_excp_handler = breakpoint_handler;
6283 #endif
6284 cc->cpu_exec_enter = x86_cpu_exec_enter;
6285 cc->cpu_exec_exit = x86_cpu_exec_exit;
6286 #ifdef CONFIG_TCG
6287 cc->tcg_initialize = tcg_x86_init;
6288 cc->tlb_fill = x86_cpu_tlb_fill;
6289 #endif
6290 cc->disas_set_info = x86_disas_set_info;
6292 dc->user_creatable = true;
6295 static const TypeInfo x86_cpu_type_info = {
6296 .name = TYPE_X86_CPU,
6297 .parent = TYPE_CPU,
6298 .instance_size = sizeof(X86CPU),
6299 .instance_init = x86_cpu_initfn,
6300 .abstract = true,
6301 .class_size = sizeof(X86CPUClass),
6302 .class_init = x86_cpu_common_class_init,
6306 /* "base" CPU model, used by query-cpu-model-expansion */
6307 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
6309 X86CPUClass *xcc = X86_CPU_CLASS(oc);
6311 xcc->static_model = true;
6312 xcc->migration_safe = true;
6313 xcc->model_description = "base CPU model type with no features enabled";
6314 xcc->ordering = 8;
6317 static const TypeInfo x86_base_cpu_type_info = {
6318 .name = X86_CPU_TYPE_NAME("base"),
6319 .parent = TYPE_X86_CPU,
6320 .class_init = x86_cpu_base_class_init,
6323 static void x86_cpu_register_types(void)
6325 int i;
6327 type_register_static(&x86_cpu_type_info);
6328 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
6329 x86_register_cpudef_types(&builtin_x86_defs[i]);
6331 type_register_static(&max_x86_cpu_type_info);
6332 type_register_static(&x86_base_cpu_type_info);
6333 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
6334 type_register_static(&host_x86_cpu_type_info);
6335 #endif
6338 type_init(x86_cpu_register_types)