s390x/mmu: Drop debug logging from MMU code
[qemu/kevin.git] / target / i386 / cpu.c
blob44f1bbdcac76fb906f6e12c9bf29589e82b9192e
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/reset.h"
30 #include "sysemu/hvf.h"
31 #include "sysemu/cpus.h"
32 #include "kvm_i386.h"
33 #include "sev_i386.h"
35 #include "qemu/error-report.h"
36 #include "qemu/module.h"
37 #include "qemu/option.h"
38 #include "qemu/config-file.h"
39 #include "qapi/error.h"
40 #include "qapi/qapi-visit-machine.h"
41 #include "qapi/qapi-visit-run-state.h"
42 #include "qapi/qmp/qdict.h"
43 #include "qapi/qmp/qerror.h"
44 #include "qapi/visitor.h"
45 #include "qom/qom-qobject.h"
46 #include "sysemu/arch_init.h"
47 #include "qapi/qapi-commands-machine-target.h"
49 #include "standard-headers/asm-x86/kvm_para.h"
51 #include "sysemu/sysemu.h"
52 #include "sysemu/tcg.h"
53 #include "hw/qdev-properties.h"
54 #include "hw/i386/topology.h"
55 #ifndef CONFIG_USER_ONLY
56 #include "exec/address-spaces.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
62 #include "disas/capstone.h"
64 /* Helpers for building CPUID[2] descriptors: */
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
211 int i;
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
229 /* CPUID Leaf 4 constants: */
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
236 #define CACHE_LEVEL(l) (l << 5)
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
296 #define ASSOC_FULL 0xFF
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
344 * Define the constants to build the cpu topology. Right now, TOPOEXT
345 * feature is enabled only on EPYC. So, these constants are based on
346 * EPYC supported configurations. We may need to handle the cases if
347 * these values change in future.
349 /* Maximum core complexes in a node */
350 #define MAX_CCX 2
351 /* Maximum cores in a core complex */
352 #define MAX_CORES_IN_CCX 4
353 /* Maximum cores in a node */
354 #define MAX_CORES_IN_NODE 8
355 /* Maximum nodes in a socket */
356 #define MAX_NODES_PER_SOCKET 4
359 * Figure out the number of nodes required to build this config.
360 * Max cores in a node is 8
362 static int nodes_in_socket(int nr_cores)
364 int nodes;
366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
368 /* Hardware does not support config with 3 nodes, return 4 in that case */
369 return (nodes == 3) ? 4 : nodes;
373 * Decide the number of cores in a core complex with the given nr_cores using
374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
376 * L3 cache is shared across all cores in a core complex. So, this will also
377 * tell us how many cores are sharing the L3 cache.
379 static int cores_in_core_complex(int nr_cores)
381 int nodes;
383 /* Check if we can fit all the cores in one core complex */
384 if (nr_cores <= MAX_CORES_IN_CCX) {
385 return nr_cores;
387 /* Get the number of nodes required to build this config */
388 nodes = nodes_in_socket(nr_cores);
391 * Divide the cores accros all the core complexes
392 * Return rounded up value
394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
397 /* Encode cache info for CPUID[8000001D] */
398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
399 uint32_t *eax, uint32_t *ebx,
400 uint32_t *ecx, uint32_t *edx)
402 uint32_t l3_cores;
403 assert(cache->size == cache->line_size * cache->associativity *
404 cache->partitions * cache->sets);
406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
409 /* L3 is shared among multiple cores */
410 if (cache->level == 3) {
411 l3_cores = cores_in_core_complex(cs->nr_cores);
412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
413 } else {
414 *eax |= ((cs->nr_threads - 1) << 14);
417 assert(cache->line_size > 0);
418 assert(cache->partitions > 0);
419 assert(cache->associativity > 0);
420 /* We don't implement fully-associative caches */
421 assert(cache->associativity < cache->sets);
422 *ebx = (cache->line_size - 1) |
423 ((cache->partitions - 1) << 12) |
424 ((cache->associativity - 1) << 22);
426 assert(cache->sets > 0);
427 *ecx = cache->sets - 1;
429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
430 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
434 /* Data structure to hold the configuration info for a given core index */
435 struct core_topology {
436 /* core complex id of the current core index */
437 int ccx_id;
439 * Adjusted core index for this core in the topology
440 * This can be 0,1,2,3 with max 4 cores in a core complex
442 int core_id;
443 /* Node id for this core index */
444 int node_id;
445 /* Number of nodes in this config */
446 int num_nodes;
450 * Build the configuration closely match the EPYC hardware. Using the EPYC
451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
452 * right now. This could change in future.
453 * nr_cores : Total number of cores in the config
454 * core_id : Core index of the current CPU
455 * topo : Data structure to hold all the config info for this core index
457 static void build_core_topology(int nr_cores, int core_id,
458 struct core_topology *topo)
460 int nodes, cores_in_ccx;
462 /* First get the number of nodes required */
463 nodes = nodes_in_socket(nr_cores);
465 cores_in_ccx = cores_in_core_complex(nr_cores);
467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
469 topo->core_id = core_id % cores_in_ccx;
470 topo->num_nodes = nodes;
473 /* Encode cache info for CPUID[8000001E] */
474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
475 uint32_t *eax, uint32_t *ebx,
476 uint32_t *ecx, uint32_t *edx)
478 struct core_topology topo = {0};
479 unsigned long nodes;
480 int shift;
482 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
483 *eax = cpu->apic_id;
485 * CPUID_Fn8000001E_EBX
486 * 31:16 Reserved
487 * 15:8 Threads per core (The number of threads per core is
488 * Threads per core + 1)
489 * 7:0 Core id (see bit decoding below)
490 * SMT:
491 * 4:3 node id
492 * 2 Core complex id
493 * 1:0 Core id
494 * Non SMT:
495 * 5:4 node id
496 * 3 Core complex id
497 * 1:0 Core id
499 if (cs->nr_threads - 1) {
500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
501 (topo.ccx_id << 2) | topo.core_id;
502 } else {
503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
506 * CPUID_Fn8000001E_ECX
507 * 31:11 Reserved
508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
509 * 7:0 Node id (see bit decoding below)
510 * 2 Socket id
511 * 1:0 Node id
513 if (topo.num_nodes <= 4) {
514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
515 topo.node_id;
516 } else {
518 * Node id fix up. Actual hardware supports up to 4 nodes. But with
519 * more than 32 cores, we may end up with more than 4 nodes.
520 * Node id is a combination of socket id and node id. Only requirement
521 * here is that this number should be unique accross the system.
522 * Shift the socket id to accommodate more nodes. We dont expect both
523 * socket id and node id to be big number at the same time. This is not
524 * an ideal config but we need to to support it. Max nodes we can have
525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
526 * 5 bits for nodes. Find the left most set bit to represent the total
527 * number of nodes. find_last_bit returns last set bit(0 based). Left
528 * shift(+1) the socket id to represent all the nodes.
530 nodes = topo.num_nodes - 1;
531 shift = find_last_bit(&nodes, 8);
532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
533 topo.node_id;
535 *edx = 0;
539 * Definitions of the hardcoded cache entries we expose:
540 * These are legacy cache values. If there is a need to change any
541 * of these values please use builtin_x86_defs
544 /* L1 data cache: */
545 static CPUCacheInfo legacy_l1d_cache = {
546 .type = DATA_CACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1d_cache_amd = {
559 .type = DATA_CACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
571 /* L1 instruction cache: */
572 static CPUCacheInfo legacy_l1i_cache = {
573 .type = INSTRUCTION_CACHE,
574 .level = 1,
575 .size = 32 * KiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 8,
579 .sets = 64,
580 .partitions = 1,
581 .no_invd_sharing = true,
584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
585 static CPUCacheInfo legacy_l1i_cache_amd = {
586 .type = INSTRUCTION_CACHE,
587 .level = 1,
588 .size = 64 * KiB,
589 .self_init = 1,
590 .line_size = 64,
591 .associativity = 2,
592 .sets = 512,
593 .partitions = 1,
594 .lines_per_tag = 1,
595 .no_invd_sharing = true,
598 /* Level 2 unified cache: */
599 static CPUCacheInfo legacy_l2_cache = {
600 .type = UNIFIED_CACHE,
601 .level = 2,
602 .size = 4 * MiB,
603 .self_init = 1,
604 .line_size = 64,
605 .associativity = 16,
606 .sets = 4096,
607 .partitions = 1,
608 .no_invd_sharing = true,
611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
612 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
613 .type = UNIFIED_CACHE,
614 .level = 2,
615 .size = 2 * MiB,
616 .line_size = 64,
617 .associativity = 8,
621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
622 static CPUCacheInfo legacy_l2_cache_amd = {
623 .type = UNIFIED_CACHE,
624 .level = 2,
625 .size = 512 * KiB,
626 .line_size = 64,
627 .lines_per_tag = 1,
628 .associativity = 16,
629 .sets = 512,
630 .partitions = 1,
633 /* Level 3 unified cache: */
634 static CPUCacheInfo legacy_l3_cache = {
635 .type = UNIFIED_CACHE,
636 .level = 3,
637 .size = 16 * MiB,
638 .line_size = 64,
639 .associativity = 16,
640 .sets = 16384,
641 .partitions = 1,
642 .lines_per_tag = 1,
643 .self_init = true,
644 .inclusive = true,
645 .complex_indexing = true,
648 /* TLB definitions: */
650 #define L1_DTLB_2M_ASSOC 1
651 #define L1_DTLB_2M_ENTRIES 255
652 #define L1_DTLB_4K_ASSOC 1
653 #define L1_DTLB_4K_ENTRIES 255
655 #define L1_ITLB_2M_ASSOC 1
656 #define L1_ITLB_2M_ENTRIES 255
657 #define L1_ITLB_4K_ASSOC 1
658 #define L1_ITLB_4K_ENTRIES 255
660 #define L2_DTLB_2M_ASSOC 0 /* disabled */
661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
662 #define L2_DTLB_4K_ASSOC 4
663 #define L2_DTLB_4K_ENTRIES 512
665 #define L2_ITLB_2M_ASSOC 0 /* disabled */
666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
667 #define L2_ITLB_4K_ASSOC 4
668 #define L2_ITLB_4K_ENTRIES 512
670 /* CPUID Leaf 0x14 constants: */
671 #define INTEL_PT_MAX_SUBLEAF 0x1
673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
674 * MSR can be accessed;
675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
677 * of Intel PT MSRs across warm reset;
678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
680 #define INTEL_PT_MINIMAL_EBX 0xf
682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
684 * accessed;
685 * bit[01]: ToPA tables can hold any number of output entries, up to the
686 * maximum allowed by the MaskOrTableOffset field of
687 * IA32_RTIT_OUTPUT_MASK_PTRS;
688 * bit[02]: Support Single-Range Output scheme;
690 #define INTEL_PT_MINIMAL_ECX 0x7
691 /* generated packets which contain IP payloads have LIP values */
692 #define INTEL_PT_IP_LIP (1 << 31)
693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
700 uint32_t vendor2, uint32_t vendor3)
702 int i;
703 for (i = 0; i < 4; i++) {
704 dst[i] = vendor1 >> (8 * i);
705 dst[i + 4] = vendor2 >> (8 * i);
706 dst[i + 8] = vendor3 >> (8 * i);
708 dst[CPUID_VENDOR_SZ] = '\0';
711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
716 CPUID_PSE36 | CPUID_FXSR)
717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
721 CPUID_PAE | CPUID_SEP | CPUID_APIC)
723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
728 /* partly implemented:
729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
730 /* missing:
731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
737 CPUID_EXT_RDRAND)
738 /* missing:
739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
743 CPUID_EXT_F16C */
745 #ifdef TARGET_X86_64
746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
747 #else
748 #define TCG_EXT2_X86_64_FEATURES 0
749 #endif
751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
754 TCG_EXT2_X86_64_FEATURES)
755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
757 #define TCG_EXT4_FEATURES 0
758 #define TCG_SVM_FEATURES CPUID_SVM_NPT
759 #define TCG_KVM_FEATURES 0
760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
764 CPUID_7_0_EBX_ERMS)
765 /* missing:
766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
768 CPUID_7_0_EBX_RDSEED */
769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
771 CPUID_7_0_ECX_LA57)
772 #define TCG_7_0_EDX_FEATURES 0
773 #define TCG_7_1_EAX_FEATURES 0
774 #define TCG_APM_FEATURES 0
775 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
776 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
777 /* missing:
778 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
780 typedef enum FeatureWordType {
781 CPUID_FEATURE_WORD,
782 MSR_FEATURE_WORD,
783 } FeatureWordType;
785 typedef struct FeatureWordInfo {
786 FeatureWordType type;
787 /* feature flags names are taken from "Intel Processor Identification and
788 * the CPUID Instruction" and AMD's "CPUID Specification".
789 * In cases of disagreement between feature naming conventions,
790 * aliases may be added.
792 const char *feat_names[64];
793 union {
794 /* If type==CPUID_FEATURE_WORD */
795 struct {
796 uint32_t eax; /* Input EAX for CPUID */
797 bool needs_ecx; /* CPUID instruction uses ECX as input */
798 uint32_t ecx; /* Input ECX value for CPUID */
799 int reg; /* output register (R_* constant) */
800 } cpuid;
801 /* If type==MSR_FEATURE_WORD */
802 struct {
803 uint32_t index;
804 } msr;
806 uint64_t tcg_features; /* Feature flags supported by TCG */
807 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */
808 uint64_t migratable_flags; /* Feature flags known to be migratable */
809 /* Features that shouldn't be auto-enabled by "-cpu host" */
810 uint64_t no_autoenable_flags;
811 } FeatureWordInfo;
813 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
814 [FEAT_1_EDX] = {
815 .type = CPUID_FEATURE_WORD,
816 .feat_names = {
817 "fpu", "vme", "de", "pse",
818 "tsc", "msr", "pae", "mce",
819 "cx8", "apic", NULL, "sep",
820 "mtrr", "pge", "mca", "cmov",
821 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
822 NULL, "ds" /* Intel dts */, "acpi", "mmx",
823 "fxsr", "sse", "sse2", "ss",
824 "ht" /* Intel htt */, "tm", "ia64", "pbe",
826 .cpuid = {.eax = 1, .reg = R_EDX, },
827 .tcg_features = TCG_FEATURES,
829 [FEAT_1_ECX] = {
830 .type = CPUID_FEATURE_WORD,
831 .feat_names = {
832 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
833 "ds-cpl", "vmx", "smx", "est",
834 "tm2", "ssse3", "cid", NULL,
835 "fma", "cx16", "xtpr", "pdcm",
836 NULL, "pcid", "dca", "sse4.1",
837 "sse4.2", "x2apic", "movbe", "popcnt",
838 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
839 "avx", "f16c", "rdrand", "hypervisor",
841 .cpuid = { .eax = 1, .reg = R_ECX, },
842 .tcg_features = TCG_EXT_FEATURES,
844 /* Feature names that are already defined on feature_name[] but
845 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
846 * names on feat_names below. They are copied automatically
847 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
849 [FEAT_8000_0001_EDX] = {
850 .type = CPUID_FEATURE_WORD,
851 .feat_names = {
852 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
853 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
854 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
855 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
856 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
857 "nx", NULL, "mmxext", NULL /* mmx */,
858 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
859 NULL, "lm", "3dnowext", "3dnow",
861 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
862 .tcg_features = TCG_EXT2_FEATURES,
864 [FEAT_8000_0001_ECX] = {
865 .type = CPUID_FEATURE_WORD,
866 .feat_names = {
867 "lahf-lm", "cmp-legacy", "svm", "extapic",
868 "cr8legacy", "abm", "sse4a", "misalignsse",
869 "3dnowprefetch", "osvw", "ibs", "xop",
870 "skinit", "wdt", NULL, "lwp",
871 "fma4", "tce", NULL, "nodeid-msr",
872 NULL, "tbm", "topoext", "perfctr-core",
873 "perfctr-nb", NULL, NULL, NULL,
874 NULL, NULL, NULL, NULL,
876 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
877 .tcg_features = TCG_EXT3_FEATURES,
879 * TOPOEXT is always allowed but can't be enabled blindly by
880 * "-cpu host", as it requires consistent cache topology info
881 * to be provided so it doesn't confuse guests.
883 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
885 [FEAT_C000_0001_EDX] = {
886 .type = CPUID_FEATURE_WORD,
887 .feat_names = {
888 NULL, NULL, "xstore", "xstore-en",
889 NULL, NULL, "xcrypt", "xcrypt-en",
890 "ace2", "ace2-en", "phe", "phe-en",
891 "pmm", "pmm-en", NULL, NULL,
892 NULL, NULL, NULL, NULL,
893 NULL, NULL, NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
897 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
898 .tcg_features = TCG_EXT4_FEATURES,
900 [FEAT_KVM] = {
901 .type = CPUID_FEATURE_WORD,
902 .feat_names = {
903 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
904 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
905 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
906 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL,
907 NULL, NULL, NULL, NULL,
908 NULL, NULL, NULL, NULL,
909 "kvmclock-stable-bit", NULL, NULL, NULL,
910 NULL, NULL, NULL, NULL,
912 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
913 .tcg_features = TCG_KVM_FEATURES,
915 [FEAT_KVM_HINTS] = {
916 .type = CPUID_FEATURE_WORD,
917 .feat_names = {
918 "kvm-hint-dedicated", NULL, NULL, NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
927 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
928 .tcg_features = TCG_KVM_FEATURES,
930 * KVM hints aren't auto-enabled by -cpu host, they need to be
931 * explicitly enabled in the command-line.
933 .no_autoenable_flags = ~0U,
936 * .feat_names are commented out for Hyper-V enlightenments because we
937 * don't want to have two different ways for enabling them on QEMU command
938 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
939 * enabling several feature bits simultaneously, exposing these bits
940 * individually may just confuse guests.
942 [FEAT_HYPERV_EAX] = {
943 .type = CPUID_FEATURE_WORD,
944 .feat_names = {
945 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
946 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
947 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
948 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
949 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
950 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
951 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
952 NULL, NULL,
953 NULL, NULL, NULL, NULL,
954 NULL, NULL, NULL, NULL,
955 NULL, NULL, NULL, NULL,
956 NULL, NULL, NULL, NULL,
958 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
960 [FEAT_HYPERV_EBX] = {
961 .type = CPUID_FEATURE_WORD,
962 .feat_names = {
963 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
964 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
965 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
966 NULL /* hv_create_port */, NULL /* hv_connect_port */,
967 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
968 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
969 NULL, NULL,
970 NULL, NULL, NULL, NULL,
971 NULL, NULL, NULL, NULL,
972 NULL, NULL, NULL, NULL,
973 NULL, NULL, NULL, NULL,
975 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
977 [FEAT_HYPERV_EDX] = {
978 .type = CPUID_FEATURE_WORD,
979 .feat_names = {
980 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
981 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
982 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
983 NULL, NULL,
984 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
985 NULL, NULL, NULL, NULL,
986 NULL, NULL, NULL, NULL,
987 NULL, NULL, NULL, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
991 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
993 [FEAT_HV_RECOMM_EAX] = {
994 .type = CPUID_FEATURE_WORD,
995 .feat_names = {
996 NULL /* hv_recommend_pv_as_switch */,
997 NULL /* hv_recommend_pv_tlbflush_local */,
998 NULL /* hv_recommend_pv_tlbflush_remote */,
999 NULL /* hv_recommend_msr_apic_access */,
1000 NULL /* hv_recommend_msr_reset */,
1001 NULL /* hv_recommend_relaxed_timing */,
1002 NULL /* hv_recommend_dma_remapping */,
1003 NULL /* hv_recommend_int_remapping */,
1004 NULL /* hv_recommend_x2apic_msrs */,
1005 NULL /* hv_recommend_autoeoi_deprecation */,
1006 NULL /* hv_recommend_pv_ipi */,
1007 NULL /* hv_recommend_ex_hypercalls */,
1008 NULL /* hv_hypervisor_is_nested */,
1009 NULL /* hv_recommend_int_mbec */,
1010 NULL /* hv_recommend_evmcs */,
1011 NULL,
1012 NULL, NULL, NULL, NULL,
1013 NULL, NULL, NULL, NULL,
1014 NULL, NULL, NULL, NULL,
1015 NULL, NULL, NULL, NULL,
1017 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1019 [FEAT_HV_NESTED_EAX] = {
1020 .type = CPUID_FEATURE_WORD,
1021 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1023 [FEAT_SVM] = {
1024 .type = CPUID_FEATURE_WORD,
1025 .feat_names = {
1026 "npt", "lbrv", "svm-lock", "nrip-save",
1027 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1028 NULL, NULL, "pause-filter", NULL,
1029 "pfthreshold", NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1035 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1036 .tcg_features = TCG_SVM_FEATURES,
1038 [FEAT_7_0_EBX] = {
1039 .type = CPUID_FEATURE_WORD,
1040 .feat_names = {
1041 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1042 "hle", "avx2", NULL, "smep",
1043 "bmi2", "erms", "invpcid", "rtm",
1044 NULL, NULL, "mpx", NULL,
1045 "avx512f", "avx512dq", "rdseed", "adx",
1046 "smap", "avx512ifma", "pcommit", "clflushopt",
1047 "clwb", "intel-pt", "avx512pf", "avx512er",
1048 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1050 .cpuid = {
1051 .eax = 7,
1052 .needs_ecx = true, .ecx = 0,
1053 .reg = R_EBX,
1055 .tcg_features = TCG_7_0_EBX_FEATURES,
1057 [FEAT_7_0_ECX] = {
1058 .type = CPUID_FEATURE_WORD,
1059 .feat_names = {
1060 NULL, "avx512vbmi", "umip", "pku",
1061 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
1062 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1063 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1064 "la57", NULL, NULL, NULL,
1065 NULL, NULL, "rdpid", NULL,
1066 NULL, "cldemote", NULL, "movdiri",
1067 "movdir64b", NULL, NULL, NULL,
1069 .cpuid = {
1070 .eax = 7,
1071 .needs_ecx = true, .ecx = 0,
1072 .reg = R_ECX,
1074 .tcg_features = TCG_7_0_ECX_FEATURES,
1076 [FEAT_7_0_EDX] = {
1077 .type = CPUID_FEATURE_WORD,
1078 .feat_names = {
1079 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1080 NULL, NULL, NULL, NULL,
1081 NULL, NULL, "md-clear", NULL,
1082 NULL, NULL, NULL, NULL,
1083 NULL, NULL, NULL /* pconfig */, NULL,
1084 NULL, NULL, NULL, NULL,
1085 NULL, NULL, "spec-ctrl", "stibp",
1086 NULL, "arch-capabilities", "core-capability", "ssbd",
1088 .cpuid = {
1089 .eax = 7,
1090 .needs_ecx = true, .ecx = 0,
1091 .reg = R_EDX,
1093 .tcg_features = TCG_7_0_EDX_FEATURES,
1095 [FEAT_7_1_EAX] = {
1096 .type = CPUID_FEATURE_WORD,
1097 .feat_names = {
1098 NULL, NULL, NULL, NULL,
1099 NULL, "avx512-bf16", NULL, NULL,
1100 NULL, NULL, NULL, NULL,
1101 NULL, NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 NULL, NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1107 .cpuid = {
1108 .eax = 7,
1109 .needs_ecx = true, .ecx = 1,
1110 .reg = R_EAX,
1112 .tcg_features = TCG_7_1_EAX_FEATURES,
1114 [FEAT_8000_0007_EDX] = {
1115 .type = CPUID_FEATURE_WORD,
1116 .feat_names = {
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 "invtsc", NULL, NULL, NULL,
1120 NULL, NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL,
1122 NULL, NULL, NULL, NULL,
1123 NULL, NULL, NULL, NULL,
1124 NULL, NULL, NULL, NULL,
1126 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1127 .tcg_features = TCG_APM_FEATURES,
1128 .unmigratable_flags = CPUID_APM_INVTSC,
1130 [FEAT_8000_0008_EBX] = {
1131 .type = CPUID_FEATURE_WORD,
1132 .feat_names = {
1133 "clzero", NULL, "xsaveerptr", NULL,
1134 NULL, NULL, NULL, NULL,
1135 NULL, "wbnoinvd", NULL, NULL,
1136 "ibpb", NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1140 NULL, NULL, NULL, NULL,
1142 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1143 .tcg_features = 0,
1144 .unmigratable_flags = 0,
1146 [FEAT_XSAVE] = {
1147 .type = CPUID_FEATURE_WORD,
1148 .feat_names = {
1149 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1150 NULL, NULL, NULL, NULL,
1151 NULL, NULL, NULL, NULL,
1152 NULL, NULL, NULL, NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1158 .cpuid = {
1159 .eax = 0xd,
1160 .needs_ecx = true, .ecx = 1,
1161 .reg = R_EAX,
1163 .tcg_features = TCG_XSAVE_FEATURES,
1165 [FEAT_6_EAX] = {
1166 .type = CPUID_FEATURE_WORD,
1167 .feat_names = {
1168 NULL, NULL, "arat", NULL,
1169 NULL, NULL, NULL, NULL,
1170 NULL, NULL, NULL, NULL,
1171 NULL, NULL, NULL, NULL,
1172 NULL, NULL, NULL, NULL,
1173 NULL, NULL, NULL, NULL,
1174 NULL, NULL, NULL, NULL,
1175 NULL, NULL, NULL, NULL,
1177 .cpuid = { .eax = 6, .reg = R_EAX, },
1178 .tcg_features = TCG_6_EAX_FEATURES,
1180 [FEAT_XSAVE_COMP_LO] = {
1181 .type = CPUID_FEATURE_WORD,
1182 .cpuid = {
1183 .eax = 0xD,
1184 .needs_ecx = true, .ecx = 0,
1185 .reg = R_EAX,
1187 .tcg_features = ~0U,
1188 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1189 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1190 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1191 XSTATE_PKRU_MASK,
1193 [FEAT_XSAVE_COMP_HI] = {
1194 .type = CPUID_FEATURE_WORD,
1195 .cpuid = {
1196 .eax = 0xD,
1197 .needs_ecx = true, .ecx = 0,
1198 .reg = R_EDX,
1200 .tcg_features = ~0U,
1202 /*Below are MSR exposed features*/
1203 [FEAT_ARCH_CAPABILITIES] = {
1204 .type = MSR_FEATURE_WORD,
1205 .feat_names = {
1206 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1207 "ssb-no", "mds-no", NULL, NULL,
1208 NULL, NULL, NULL, NULL,
1209 NULL, NULL, NULL, NULL,
1210 NULL, NULL, NULL, NULL,
1211 NULL, NULL, NULL, NULL,
1212 NULL, NULL, NULL, NULL,
1213 NULL, NULL, NULL, NULL,
1215 .msr = {
1216 .index = MSR_IA32_ARCH_CAPABILITIES,
1219 [FEAT_CORE_CAPABILITY] = {
1220 .type = MSR_FEATURE_WORD,
1221 .feat_names = {
1222 NULL, NULL, NULL, NULL,
1223 NULL, "split-lock-detect", NULL, NULL,
1224 NULL, NULL, NULL, NULL,
1225 NULL, NULL, NULL, NULL,
1226 NULL, NULL, NULL, NULL,
1227 NULL, NULL, NULL, NULL,
1228 NULL, NULL, NULL, NULL,
1229 NULL, NULL, NULL, NULL,
1231 .msr = {
1232 .index = MSR_IA32_CORE_CAPABILITY,
1236 [FEAT_VMX_PROCBASED_CTLS] = {
1237 .type = MSR_FEATURE_WORD,
1238 .feat_names = {
1239 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset",
1240 NULL, NULL, NULL, "vmx-hlt-exit",
1241 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit",
1242 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit",
1243 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit",
1244 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit",
1245 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf",
1246 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls",
1248 .msr = {
1249 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1253 [FEAT_VMX_SECONDARY_CTLS] = {
1254 .type = MSR_FEATURE_WORD,
1255 .feat_names = {
1256 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit",
1257 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest",
1258 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit",
1259 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit",
1260 "vmx-rdseed-exit", "vmx-pml", NULL, NULL,
1261 "vmx-xsaves", NULL, NULL, NULL,
1262 NULL, NULL, NULL, NULL,
1263 NULL, NULL, NULL, NULL,
1265 .msr = {
1266 .index = MSR_IA32_VMX_PROCBASED_CTLS2,
1270 [FEAT_VMX_PINBASED_CTLS] = {
1271 .type = MSR_FEATURE_WORD,
1272 .feat_names = {
1273 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit",
1274 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr",
1275 NULL, NULL, NULL, NULL,
1276 NULL, NULL, NULL, NULL,
1277 NULL, NULL, NULL, NULL,
1278 NULL, NULL, NULL, NULL,
1279 NULL, NULL, NULL, NULL,
1280 NULL, NULL, NULL, NULL,
1282 .msr = {
1283 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1287 [FEAT_VMX_EXIT_CTLS] = {
1288 .type = MSR_FEATURE_WORD,
1290 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from
1291 * the LM CPUID bit.
1293 .feat_names = {
1294 NULL, NULL, "vmx-exit-nosave-debugctl", NULL,
1295 NULL, NULL, NULL, NULL,
1296 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL,
1297 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr",
1298 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat",
1299 "vmx-exit-save-efer", "vmx-exit-load-efer",
1300 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs",
1301 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL,
1302 NULL, NULL, NULL, NULL,
1304 .msr = {
1305 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS,
1309 [FEAT_VMX_ENTRY_CTLS] = {
1310 .type = MSR_FEATURE_WORD,
1311 .feat_names = {
1312 NULL, NULL, "vmx-entry-noload-debugctl", NULL,
1313 NULL, NULL, NULL, NULL,
1314 NULL, "vmx-entry-ia32e-mode", NULL, NULL,
1315 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer",
1316 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL,
1317 NULL, NULL, NULL, NULL,
1318 NULL, NULL, NULL, NULL,
1319 NULL, NULL, NULL, NULL,
1321 .msr = {
1322 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1326 [FEAT_VMX_MISC] = {
1327 .type = MSR_FEATURE_WORD,
1328 .feat_names = {
1329 NULL, NULL, NULL, NULL,
1330 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown",
1331 "vmx-activity-wait-sipi", NULL, NULL, NULL,
1332 NULL, NULL, NULL, NULL,
1333 NULL, NULL, NULL, NULL,
1334 NULL, NULL, NULL, NULL,
1335 NULL, NULL, NULL, NULL,
1336 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL,
1338 .msr = {
1339 .index = MSR_IA32_VMX_MISC,
1343 [FEAT_VMX_EPT_VPID_CAPS] = {
1344 .type = MSR_FEATURE_WORD,
1345 .feat_names = {
1346 "vmx-ept-execonly", NULL, NULL, NULL,
1347 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5",
1348 NULL, NULL, NULL, NULL,
1349 NULL, NULL, NULL, NULL,
1350 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL,
1351 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL,
1352 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL,
1353 NULL, NULL, NULL, NULL,
1354 "vmx-invvpid", NULL, NULL, NULL,
1355 NULL, NULL, NULL, NULL,
1356 "vmx-invvpid-single-addr", "vmx-invept-single-context",
1357 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals",
1358 NULL, NULL, NULL, NULL,
1359 NULL, NULL, NULL, NULL,
1360 NULL, NULL, NULL, NULL,
1361 NULL, NULL, NULL, NULL,
1362 NULL, NULL, NULL, NULL,
1364 .msr = {
1365 .index = MSR_IA32_VMX_EPT_VPID_CAP,
1369 [FEAT_VMX_BASIC] = {
1370 .type = MSR_FEATURE_WORD,
1371 .feat_names = {
1372 [54] = "vmx-ins-outs",
1373 [55] = "vmx-true-ctls",
1375 .msr = {
1376 .index = MSR_IA32_VMX_BASIC,
1378 /* Just to be safe - we don't support setting the MSEG version field. */
1379 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR,
1382 [FEAT_VMX_VMFUNC] = {
1383 .type = MSR_FEATURE_WORD,
1384 .feat_names = {
1385 [0] = "vmx-eptp-switching",
1387 .msr = {
1388 .index = MSR_IA32_VMX_VMFUNC,
1394 typedef struct FeatureMask {
1395 FeatureWord index;
1396 uint64_t mask;
1397 } FeatureMask;
1399 typedef struct FeatureDep {
1400 FeatureMask from, to;
1401 } FeatureDep;
1403 static FeatureDep feature_dependencies[] = {
1405 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES },
1406 .to = { FEAT_ARCH_CAPABILITIES, ~0ull },
1409 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY },
1410 .to = { FEAT_CORE_CAPABILITY, ~0ull },
1413 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1414 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull },
1417 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1418 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull },
1421 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1422 .to = { FEAT_VMX_EXIT_CTLS, ~0ull },
1425 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1426 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull },
1429 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1430 .to = { FEAT_VMX_MISC, ~0ull },
1433 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1434 .to = { FEAT_VMX_BASIC, ~0ull },
1437 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM },
1438 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE },
1441 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS },
1442 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull },
1445 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES },
1446 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES },
1449 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND },
1450 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING },
1453 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID },
1454 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID },
1457 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED },
1458 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING },
1461 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP },
1462 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP },
1465 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1466 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull },
1469 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1470 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST },
1473 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID },
1474 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 },
1477 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC },
1478 .to = { FEAT_VMX_VMFUNC, ~0ull },
1482 typedef struct X86RegisterInfo32 {
1483 /* Name of register */
1484 const char *name;
1485 /* QAPI enum value register */
1486 X86CPURegister32 qapi_enum;
1487 } X86RegisterInfo32;
1489 #define REGISTER(reg) \
1490 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1491 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1492 REGISTER(EAX),
1493 REGISTER(ECX),
1494 REGISTER(EDX),
1495 REGISTER(EBX),
1496 REGISTER(ESP),
1497 REGISTER(EBP),
1498 REGISTER(ESI),
1499 REGISTER(EDI),
1501 #undef REGISTER
1503 typedef struct ExtSaveArea {
1504 uint32_t feature, bits;
1505 uint32_t offset, size;
1506 } ExtSaveArea;
1508 static const ExtSaveArea x86_ext_save_areas[] = {
1509 [XSTATE_FP_BIT] = {
1510 /* x87 FP state component is always enabled if XSAVE is supported */
1511 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1512 /* x87 state is in the legacy region of the XSAVE area */
1513 .offset = 0,
1514 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1516 [XSTATE_SSE_BIT] = {
1517 /* SSE state component is always enabled if XSAVE is supported */
1518 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1519 /* SSE state is in the legacy region of the XSAVE area */
1520 .offset = 0,
1521 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1523 [XSTATE_YMM_BIT] =
1524 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1525 .offset = offsetof(X86XSaveArea, avx_state),
1526 .size = sizeof(XSaveAVX) },
1527 [XSTATE_BNDREGS_BIT] =
1528 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1529 .offset = offsetof(X86XSaveArea, bndreg_state),
1530 .size = sizeof(XSaveBNDREG) },
1531 [XSTATE_BNDCSR_BIT] =
1532 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1533 .offset = offsetof(X86XSaveArea, bndcsr_state),
1534 .size = sizeof(XSaveBNDCSR) },
1535 [XSTATE_OPMASK_BIT] =
1536 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1537 .offset = offsetof(X86XSaveArea, opmask_state),
1538 .size = sizeof(XSaveOpmask) },
1539 [XSTATE_ZMM_Hi256_BIT] =
1540 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1541 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1542 .size = sizeof(XSaveZMM_Hi256) },
1543 [XSTATE_Hi16_ZMM_BIT] =
1544 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1545 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1546 .size = sizeof(XSaveHi16_ZMM) },
1547 [XSTATE_PKRU_BIT] =
1548 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1549 .offset = offsetof(X86XSaveArea, pkru_state),
1550 .size = sizeof(XSavePKRU) },
1553 static uint32_t xsave_area_size(uint64_t mask)
1555 int i;
1556 uint64_t ret = 0;
1558 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1559 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1560 if ((mask >> i) & 1) {
1561 ret = MAX(ret, esa->offset + esa->size);
1564 return ret;
1567 static inline bool accel_uses_host_cpuid(void)
1569 return kvm_enabled() || hvf_enabled();
1572 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1574 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1575 cpu->env.features[FEAT_XSAVE_COMP_LO];
1578 const char *get_register_name_32(unsigned int reg)
1580 if (reg >= CPU_NB_REGS32) {
1581 return NULL;
1583 return x86_reg_info_32[reg].name;
1587 * Returns the set of feature flags that are supported and migratable by
1588 * QEMU, for a given FeatureWord.
1590 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w)
1592 FeatureWordInfo *wi = &feature_word_info[w];
1593 uint64_t r = 0;
1594 int i;
1596 for (i = 0; i < 64; i++) {
1597 uint64_t f = 1ULL << i;
1599 /* If the feature name is known, it is implicitly considered migratable,
1600 * unless it is explicitly set in unmigratable_flags */
1601 if ((wi->migratable_flags & f) ||
1602 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1603 r |= f;
1606 return r;
1609 void host_cpuid(uint32_t function, uint32_t count,
1610 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1612 uint32_t vec[4];
1614 #ifdef __x86_64__
1615 asm volatile("cpuid"
1616 : "=a"(vec[0]), "=b"(vec[1]),
1617 "=c"(vec[2]), "=d"(vec[3])
1618 : "0"(function), "c"(count) : "cc");
1619 #elif defined(__i386__)
1620 asm volatile("pusha \n\t"
1621 "cpuid \n\t"
1622 "mov %%eax, 0(%2) \n\t"
1623 "mov %%ebx, 4(%2) \n\t"
1624 "mov %%ecx, 8(%2) \n\t"
1625 "mov %%edx, 12(%2) \n\t"
1626 "popa"
1627 : : "a"(function), "c"(count), "S"(vec)
1628 : "memory", "cc");
1629 #else
1630 abort();
1631 #endif
1633 if (eax)
1634 *eax = vec[0];
1635 if (ebx)
1636 *ebx = vec[1];
1637 if (ecx)
1638 *ecx = vec[2];
1639 if (edx)
1640 *edx = vec[3];
1643 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1645 uint32_t eax, ebx, ecx, edx;
1647 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1648 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1650 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1651 if (family) {
1652 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1654 if (model) {
1655 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1657 if (stepping) {
1658 *stepping = eax & 0x0F;
1662 /* CPU class name definitions: */
1664 /* Return type name for a given CPU model name
1665 * Caller is responsible for freeing the returned string.
1667 static char *x86_cpu_type_name(const char *model_name)
1669 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1672 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1674 ObjectClass *oc;
1675 char *typename = x86_cpu_type_name(cpu_model);
1676 oc = object_class_by_name(typename);
1677 g_free(typename);
1678 return oc;
1681 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1683 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1684 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1685 return g_strndup(class_name,
1686 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1689 typedef struct PropValue {
1690 const char *prop, *value;
1691 } PropValue;
1693 typedef struct X86CPUVersionDefinition {
1694 X86CPUVersion version;
1695 const char *alias;
1696 PropValue *props;
1697 } X86CPUVersionDefinition;
1699 /* Base definition for a CPU model */
1700 typedef struct X86CPUDefinition {
1701 const char *name;
1702 uint32_t level;
1703 uint32_t xlevel;
1704 /* vendor is zero-terminated, 12 character ASCII string */
1705 char vendor[CPUID_VENDOR_SZ + 1];
1706 int family;
1707 int model;
1708 int stepping;
1709 FeatureWordArray features;
1710 const char *model_id;
1711 CPUCaches *cache_info;
1713 * Definitions for alternative versions of CPU model.
1714 * List is terminated by item with version == 0.
1715 * If NULL, version 1 will be registered automatically.
1717 const X86CPUVersionDefinition *versions;
1718 } X86CPUDefinition;
1720 /* Reference to a specific CPU model version */
1721 struct X86CPUModel {
1722 /* Base CPU definition */
1723 X86CPUDefinition *cpudef;
1724 /* CPU model version */
1725 X86CPUVersion version;
1727 * If true, this is an alias CPU model.
1728 * This matters only for "-cpu help" and query-cpu-definitions
1730 bool is_alias;
1733 /* Get full model name for CPU version */
1734 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef,
1735 X86CPUVersion version)
1737 assert(version > 0);
1738 return g_strdup_printf("%s-v%d", cpudef->name, (int)version);
1741 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def)
1743 /* When X86CPUDefinition::versions is NULL, we register only v1 */
1744 static const X86CPUVersionDefinition default_version_list[] = {
1745 { 1 },
1746 { /* end of list */ }
1749 return def->versions ?: default_version_list;
1752 static CPUCaches epyc_cache_info = {
1753 .l1d_cache = &(CPUCacheInfo) {
1754 .type = DATA_CACHE,
1755 .level = 1,
1756 .size = 32 * KiB,
1757 .line_size = 64,
1758 .associativity = 8,
1759 .partitions = 1,
1760 .sets = 64,
1761 .lines_per_tag = 1,
1762 .self_init = 1,
1763 .no_invd_sharing = true,
1765 .l1i_cache = &(CPUCacheInfo) {
1766 .type = INSTRUCTION_CACHE,
1767 .level = 1,
1768 .size = 64 * KiB,
1769 .line_size = 64,
1770 .associativity = 4,
1771 .partitions = 1,
1772 .sets = 256,
1773 .lines_per_tag = 1,
1774 .self_init = 1,
1775 .no_invd_sharing = true,
1777 .l2_cache = &(CPUCacheInfo) {
1778 .type = UNIFIED_CACHE,
1779 .level = 2,
1780 .size = 512 * KiB,
1781 .line_size = 64,
1782 .associativity = 8,
1783 .partitions = 1,
1784 .sets = 1024,
1785 .lines_per_tag = 1,
1787 .l3_cache = &(CPUCacheInfo) {
1788 .type = UNIFIED_CACHE,
1789 .level = 3,
1790 .size = 8 * MiB,
1791 .line_size = 64,
1792 .associativity = 16,
1793 .partitions = 1,
1794 .sets = 8192,
1795 .lines_per_tag = 1,
1796 .self_init = true,
1797 .inclusive = true,
1798 .complex_indexing = true,
1802 static X86CPUDefinition builtin_x86_defs[] = {
1804 .name = "qemu64",
1805 .level = 0xd,
1806 .vendor = CPUID_VENDOR_AMD,
1807 .family = 6,
1808 .model = 6,
1809 .stepping = 3,
1810 .features[FEAT_1_EDX] =
1811 PPRO_FEATURES |
1812 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1813 CPUID_PSE36,
1814 .features[FEAT_1_ECX] =
1815 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1816 .features[FEAT_8000_0001_EDX] =
1817 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1818 .features[FEAT_8000_0001_ECX] =
1819 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1820 .xlevel = 0x8000000A,
1821 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1824 .name = "phenom",
1825 .level = 5,
1826 .vendor = CPUID_VENDOR_AMD,
1827 .family = 16,
1828 .model = 2,
1829 .stepping = 3,
1830 /* Missing: CPUID_HT */
1831 .features[FEAT_1_EDX] =
1832 PPRO_FEATURES |
1833 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1834 CPUID_PSE36 | CPUID_VME,
1835 .features[FEAT_1_ECX] =
1836 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1837 CPUID_EXT_POPCNT,
1838 .features[FEAT_8000_0001_EDX] =
1839 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1840 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1841 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1842 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1843 CPUID_EXT3_CR8LEG,
1844 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1845 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1846 .features[FEAT_8000_0001_ECX] =
1847 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1848 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1849 /* Missing: CPUID_SVM_LBRV */
1850 .features[FEAT_SVM] =
1851 CPUID_SVM_NPT,
1852 .xlevel = 0x8000001A,
1853 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1856 .name = "core2duo",
1857 .level = 10,
1858 .vendor = CPUID_VENDOR_INTEL,
1859 .family = 6,
1860 .model = 15,
1861 .stepping = 11,
1862 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1863 .features[FEAT_1_EDX] =
1864 PPRO_FEATURES |
1865 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1866 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1867 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1868 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1869 .features[FEAT_1_ECX] =
1870 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1871 CPUID_EXT_CX16,
1872 .features[FEAT_8000_0001_EDX] =
1873 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1874 .features[FEAT_8000_0001_ECX] =
1875 CPUID_EXT3_LAHF_LM,
1876 .xlevel = 0x80000008,
1877 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1880 .name = "kvm64",
1881 .level = 0xd,
1882 .vendor = CPUID_VENDOR_INTEL,
1883 .family = 15,
1884 .model = 6,
1885 .stepping = 1,
1886 /* Missing: CPUID_HT */
1887 .features[FEAT_1_EDX] =
1888 PPRO_FEATURES | CPUID_VME |
1889 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1890 CPUID_PSE36,
1891 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1892 .features[FEAT_1_ECX] =
1893 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1894 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1895 .features[FEAT_8000_0001_EDX] =
1896 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1897 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1898 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1899 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1900 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1901 .features[FEAT_8000_0001_ECX] =
1903 .xlevel = 0x80000008,
1904 .model_id = "Common KVM processor"
1907 .name = "qemu32",
1908 .level = 4,
1909 .vendor = CPUID_VENDOR_INTEL,
1910 .family = 6,
1911 .model = 6,
1912 .stepping = 3,
1913 .features[FEAT_1_EDX] =
1914 PPRO_FEATURES,
1915 .features[FEAT_1_ECX] =
1916 CPUID_EXT_SSE3,
1917 .xlevel = 0x80000004,
1918 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1921 .name = "kvm32",
1922 .level = 5,
1923 .vendor = CPUID_VENDOR_INTEL,
1924 .family = 15,
1925 .model = 6,
1926 .stepping = 1,
1927 .features[FEAT_1_EDX] =
1928 PPRO_FEATURES | CPUID_VME |
1929 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1930 .features[FEAT_1_ECX] =
1931 CPUID_EXT_SSE3,
1932 .features[FEAT_8000_0001_ECX] =
1934 .xlevel = 0x80000008,
1935 .model_id = "Common 32-bit KVM processor"
1938 .name = "coreduo",
1939 .level = 10,
1940 .vendor = CPUID_VENDOR_INTEL,
1941 .family = 6,
1942 .model = 14,
1943 .stepping = 8,
1944 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1945 .features[FEAT_1_EDX] =
1946 PPRO_FEATURES | CPUID_VME |
1947 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1948 CPUID_SS,
1949 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1950 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1951 .features[FEAT_1_ECX] =
1952 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1953 .features[FEAT_8000_0001_EDX] =
1954 CPUID_EXT2_NX,
1955 .xlevel = 0x80000008,
1956 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1959 .name = "486",
1960 .level = 1,
1961 .vendor = CPUID_VENDOR_INTEL,
1962 .family = 4,
1963 .model = 8,
1964 .stepping = 0,
1965 .features[FEAT_1_EDX] =
1966 I486_FEATURES,
1967 .xlevel = 0,
1968 .model_id = "",
1971 .name = "pentium",
1972 .level = 1,
1973 .vendor = CPUID_VENDOR_INTEL,
1974 .family = 5,
1975 .model = 4,
1976 .stepping = 3,
1977 .features[FEAT_1_EDX] =
1978 PENTIUM_FEATURES,
1979 .xlevel = 0,
1980 .model_id = "",
1983 .name = "pentium2",
1984 .level = 2,
1985 .vendor = CPUID_VENDOR_INTEL,
1986 .family = 6,
1987 .model = 5,
1988 .stepping = 2,
1989 .features[FEAT_1_EDX] =
1990 PENTIUM2_FEATURES,
1991 .xlevel = 0,
1992 .model_id = "",
1995 .name = "pentium3",
1996 .level = 3,
1997 .vendor = CPUID_VENDOR_INTEL,
1998 .family = 6,
1999 .model = 7,
2000 .stepping = 3,
2001 .features[FEAT_1_EDX] =
2002 PENTIUM3_FEATURES,
2003 .xlevel = 0,
2004 .model_id = "",
2007 .name = "athlon",
2008 .level = 2,
2009 .vendor = CPUID_VENDOR_AMD,
2010 .family = 6,
2011 .model = 2,
2012 .stepping = 3,
2013 .features[FEAT_1_EDX] =
2014 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
2015 CPUID_MCA,
2016 .features[FEAT_8000_0001_EDX] =
2017 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
2018 .xlevel = 0x80000008,
2019 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
2022 .name = "n270",
2023 .level = 10,
2024 .vendor = CPUID_VENDOR_INTEL,
2025 .family = 6,
2026 .model = 28,
2027 .stepping = 2,
2028 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2029 .features[FEAT_1_EDX] =
2030 PPRO_FEATURES |
2031 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
2032 CPUID_ACPI | CPUID_SS,
2033 /* Some CPUs got no CPUID_SEP */
2034 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
2035 * CPUID_EXT_XTPR */
2036 .features[FEAT_1_ECX] =
2037 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
2038 CPUID_EXT_MOVBE,
2039 .features[FEAT_8000_0001_EDX] =
2040 CPUID_EXT2_NX,
2041 .features[FEAT_8000_0001_ECX] =
2042 CPUID_EXT3_LAHF_LM,
2043 .xlevel = 0x80000008,
2044 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
2047 .name = "Conroe",
2048 .level = 10,
2049 .vendor = CPUID_VENDOR_INTEL,
2050 .family = 6,
2051 .model = 15,
2052 .stepping = 3,
2053 .features[FEAT_1_EDX] =
2054 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2055 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2056 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2057 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2058 CPUID_DE | CPUID_FP87,
2059 .features[FEAT_1_ECX] =
2060 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2061 .features[FEAT_8000_0001_EDX] =
2062 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2063 .features[FEAT_8000_0001_ECX] =
2064 CPUID_EXT3_LAHF_LM,
2065 .xlevel = 0x80000008,
2066 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
2069 .name = "Penryn",
2070 .level = 10,
2071 .vendor = CPUID_VENDOR_INTEL,
2072 .family = 6,
2073 .model = 23,
2074 .stepping = 3,
2075 .features[FEAT_1_EDX] =
2076 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2077 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2078 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2079 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2080 CPUID_DE | CPUID_FP87,
2081 .features[FEAT_1_ECX] =
2082 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2083 CPUID_EXT_SSE3,
2084 .features[FEAT_8000_0001_EDX] =
2085 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2086 .features[FEAT_8000_0001_ECX] =
2087 CPUID_EXT3_LAHF_LM,
2088 .xlevel = 0x80000008,
2089 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
2092 .name = "Nehalem",
2093 .level = 11,
2094 .vendor = CPUID_VENDOR_INTEL,
2095 .family = 6,
2096 .model = 26,
2097 .stepping = 3,
2098 .features[FEAT_1_EDX] =
2099 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2100 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2101 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2102 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2103 CPUID_DE | CPUID_FP87,
2104 .features[FEAT_1_ECX] =
2105 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2106 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2107 .features[FEAT_8000_0001_EDX] =
2108 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2109 .features[FEAT_8000_0001_ECX] =
2110 CPUID_EXT3_LAHF_LM,
2111 .xlevel = 0x80000008,
2112 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
2113 .versions = (X86CPUVersionDefinition[]) {
2114 { .version = 1 },
2116 .version = 2,
2117 .alias = "Nehalem-IBRS",
2118 .props = (PropValue[]) {
2119 { "spec-ctrl", "on" },
2120 { "model-id",
2121 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" },
2122 { /* end of list */ }
2125 { /* end of list */ }
2129 .name = "Westmere",
2130 .level = 11,
2131 .vendor = CPUID_VENDOR_INTEL,
2132 .family = 6,
2133 .model = 44,
2134 .stepping = 1,
2135 .features[FEAT_1_EDX] =
2136 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2137 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2138 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2139 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2140 CPUID_DE | CPUID_FP87,
2141 .features[FEAT_1_ECX] =
2142 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2143 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2144 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2145 .features[FEAT_8000_0001_EDX] =
2146 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2147 .features[FEAT_8000_0001_ECX] =
2148 CPUID_EXT3_LAHF_LM,
2149 .features[FEAT_6_EAX] =
2150 CPUID_6_EAX_ARAT,
2151 .xlevel = 0x80000008,
2152 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
2153 .versions = (X86CPUVersionDefinition[]) {
2154 { .version = 1 },
2156 .version = 2,
2157 .alias = "Westmere-IBRS",
2158 .props = (PropValue[]) {
2159 { "spec-ctrl", "on" },
2160 { "model-id",
2161 "Westmere E56xx/L56xx/X56xx (IBRS update)" },
2162 { /* end of list */ }
2165 { /* end of list */ }
2169 .name = "SandyBridge",
2170 .level = 0xd,
2171 .vendor = CPUID_VENDOR_INTEL,
2172 .family = 6,
2173 .model = 42,
2174 .stepping = 1,
2175 .features[FEAT_1_EDX] =
2176 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2180 CPUID_DE | CPUID_FP87,
2181 .features[FEAT_1_ECX] =
2182 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2183 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2184 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2185 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2186 CPUID_EXT_SSE3,
2187 .features[FEAT_8000_0001_EDX] =
2188 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2189 CPUID_EXT2_SYSCALL,
2190 .features[FEAT_8000_0001_ECX] =
2191 CPUID_EXT3_LAHF_LM,
2192 .features[FEAT_XSAVE] =
2193 CPUID_XSAVE_XSAVEOPT,
2194 .features[FEAT_6_EAX] =
2195 CPUID_6_EAX_ARAT,
2196 .xlevel = 0x80000008,
2197 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
2198 .versions = (X86CPUVersionDefinition[]) {
2199 { .version = 1 },
2201 .version = 2,
2202 .alias = "SandyBridge-IBRS",
2203 .props = (PropValue[]) {
2204 { "spec-ctrl", "on" },
2205 { "model-id",
2206 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" },
2207 { /* end of list */ }
2210 { /* end of list */ }
2214 .name = "IvyBridge",
2215 .level = 0xd,
2216 .vendor = CPUID_VENDOR_INTEL,
2217 .family = 6,
2218 .model = 58,
2219 .stepping = 9,
2220 .features[FEAT_1_EDX] =
2221 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2222 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2223 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2224 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2225 CPUID_DE | CPUID_FP87,
2226 .features[FEAT_1_ECX] =
2227 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2228 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2229 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2230 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2231 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2232 .features[FEAT_7_0_EBX] =
2233 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2234 CPUID_7_0_EBX_ERMS,
2235 .features[FEAT_8000_0001_EDX] =
2236 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2237 CPUID_EXT2_SYSCALL,
2238 .features[FEAT_8000_0001_ECX] =
2239 CPUID_EXT3_LAHF_LM,
2240 .features[FEAT_XSAVE] =
2241 CPUID_XSAVE_XSAVEOPT,
2242 .features[FEAT_6_EAX] =
2243 CPUID_6_EAX_ARAT,
2244 .xlevel = 0x80000008,
2245 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
2246 .versions = (X86CPUVersionDefinition[]) {
2247 { .version = 1 },
2249 .version = 2,
2250 .alias = "IvyBridge-IBRS",
2251 .props = (PropValue[]) {
2252 { "spec-ctrl", "on" },
2253 { "model-id",
2254 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" },
2255 { /* end of list */ }
2258 { /* end of list */ }
2262 .name = "Haswell",
2263 .level = 0xd,
2264 .vendor = CPUID_VENDOR_INTEL,
2265 .family = 6,
2266 .model = 60,
2267 .stepping = 4,
2268 .features[FEAT_1_EDX] =
2269 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2270 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2271 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2272 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2273 CPUID_DE | CPUID_FP87,
2274 .features[FEAT_1_ECX] =
2275 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2276 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2277 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2278 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2279 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2280 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2281 .features[FEAT_8000_0001_EDX] =
2282 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2283 CPUID_EXT2_SYSCALL,
2284 .features[FEAT_8000_0001_ECX] =
2285 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2286 .features[FEAT_7_0_EBX] =
2287 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2288 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2289 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2290 CPUID_7_0_EBX_RTM,
2291 .features[FEAT_XSAVE] =
2292 CPUID_XSAVE_XSAVEOPT,
2293 .features[FEAT_6_EAX] =
2294 CPUID_6_EAX_ARAT,
2295 .xlevel = 0x80000008,
2296 .model_id = "Intel Core Processor (Haswell)",
2297 .versions = (X86CPUVersionDefinition[]) {
2298 { .version = 1 },
2300 .version = 2,
2301 .alias = "Haswell-noTSX",
2302 .props = (PropValue[]) {
2303 { "hle", "off" },
2304 { "rtm", "off" },
2305 { "stepping", "1" },
2306 { "model-id", "Intel Core Processor (Haswell, no TSX)", },
2307 { /* end of list */ }
2311 .version = 3,
2312 .alias = "Haswell-IBRS",
2313 .props = (PropValue[]) {
2314 /* Restore TSX features removed by -v2 above */
2315 { "hle", "on" },
2316 { "rtm", "on" },
2318 * Haswell and Haswell-IBRS had stepping=4 in
2319 * QEMU 4.0 and older
2321 { "stepping", "4" },
2322 { "spec-ctrl", "on" },
2323 { "model-id",
2324 "Intel Core Processor (Haswell, IBRS)" },
2325 { /* end of list */ }
2329 .version = 4,
2330 .alias = "Haswell-noTSX-IBRS",
2331 .props = (PropValue[]) {
2332 { "hle", "off" },
2333 { "rtm", "off" },
2334 /* spec-ctrl was already enabled by -v3 above */
2335 { "stepping", "1" },
2336 { "model-id",
2337 "Intel Core Processor (Haswell, no TSX, IBRS)" },
2338 { /* end of list */ }
2341 { /* end of list */ }
2345 .name = "Broadwell",
2346 .level = 0xd,
2347 .vendor = CPUID_VENDOR_INTEL,
2348 .family = 6,
2349 .model = 61,
2350 .stepping = 2,
2351 .features[FEAT_1_EDX] =
2352 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2353 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2354 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2355 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2356 CPUID_DE | CPUID_FP87,
2357 .features[FEAT_1_ECX] =
2358 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2359 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2360 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2361 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2362 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2363 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2364 .features[FEAT_8000_0001_EDX] =
2365 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2366 CPUID_EXT2_SYSCALL,
2367 .features[FEAT_8000_0001_ECX] =
2368 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2369 .features[FEAT_7_0_EBX] =
2370 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2371 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2372 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2373 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2374 CPUID_7_0_EBX_SMAP,
2375 .features[FEAT_XSAVE] =
2376 CPUID_XSAVE_XSAVEOPT,
2377 .features[FEAT_6_EAX] =
2378 CPUID_6_EAX_ARAT,
2379 .xlevel = 0x80000008,
2380 .model_id = "Intel Core Processor (Broadwell)",
2381 .versions = (X86CPUVersionDefinition[]) {
2382 { .version = 1 },
2384 .version = 2,
2385 .alias = "Broadwell-noTSX",
2386 .props = (PropValue[]) {
2387 { "hle", "off" },
2388 { "rtm", "off" },
2389 { "model-id", "Intel Core Processor (Broadwell, no TSX)", },
2390 { /* end of list */ }
2394 .version = 3,
2395 .alias = "Broadwell-IBRS",
2396 .props = (PropValue[]) {
2397 /* Restore TSX features removed by -v2 above */
2398 { "hle", "on" },
2399 { "rtm", "on" },
2400 { "spec-ctrl", "on" },
2401 { "model-id",
2402 "Intel Core Processor (Broadwell, IBRS)" },
2403 { /* end of list */ }
2407 .version = 4,
2408 .alias = "Broadwell-noTSX-IBRS",
2409 .props = (PropValue[]) {
2410 { "hle", "off" },
2411 { "rtm", "off" },
2412 /* spec-ctrl was already enabled by -v3 above */
2413 { "model-id",
2414 "Intel Core Processor (Broadwell, no TSX, IBRS)" },
2415 { /* end of list */ }
2418 { /* end of list */ }
2422 .name = "Skylake-Client",
2423 .level = 0xd,
2424 .vendor = CPUID_VENDOR_INTEL,
2425 .family = 6,
2426 .model = 94,
2427 .stepping = 3,
2428 .features[FEAT_1_EDX] =
2429 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2430 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2431 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2432 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2433 CPUID_DE | CPUID_FP87,
2434 .features[FEAT_1_ECX] =
2435 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2436 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2437 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2438 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2439 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2440 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2441 .features[FEAT_8000_0001_EDX] =
2442 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2443 CPUID_EXT2_SYSCALL,
2444 .features[FEAT_8000_0001_ECX] =
2445 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2446 .features[FEAT_7_0_EBX] =
2447 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2448 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2449 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2450 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2451 CPUID_7_0_EBX_SMAP,
2452 /* Missing: XSAVES (not supported by some Linux versions,
2453 * including v4.1 to v4.12).
2454 * KVM doesn't yet expose any XSAVES state save component,
2455 * and the only one defined in Skylake (processor tracing)
2456 * probably will block migration anyway.
2458 .features[FEAT_XSAVE] =
2459 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2460 CPUID_XSAVE_XGETBV1,
2461 .features[FEAT_6_EAX] =
2462 CPUID_6_EAX_ARAT,
2463 .xlevel = 0x80000008,
2464 .model_id = "Intel Core Processor (Skylake)",
2465 .versions = (X86CPUVersionDefinition[]) {
2466 { .version = 1 },
2468 .version = 2,
2469 .alias = "Skylake-Client-IBRS",
2470 .props = (PropValue[]) {
2471 { "spec-ctrl", "on" },
2472 { "model-id",
2473 "Intel Core Processor (Skylake, IBRS)" },
2474 { /* end of list */ }
2477 { /* end of list */ }
2481 .name = "Skylake-Server",
2482 .level = 0xd,
2483 .vendor = CPUID_VENDOR_INTEL,
2484 .family = 6,
2485 .model = 85,
2486 .stepping = 4,
2487 .features[FEAT_1_EDX] =
2488 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2489 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2490 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2491 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2492 CPUID_DE | CPUID_FP87,
2493 .features[FEAT_1_ECX] =
2494 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2495 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2496 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2497 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2498 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2499 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2500 .features[FEAT_8000_0001_EDX] =
2501 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2502 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2503 .features[FEAT_8000_0001_ECX] =
2504 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2505 .features[FEAT_7_0_EBX] =
2506 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2507 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2508 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2509 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2510 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2511 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2512 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2513 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2514 .features[FEAT_7_0_ECX] =
2515 CPUID_7_0_ECX_PKU,
2516 /* Missing: XSAVES (not supported by some Linux versions,
2517 * including v4.1 to v4.12).
2518 * KVM doesn't yet expose any XSAVES state save component,
2519 * and the only one defined in Skylake (processor tracing)
2520 * probably will block migration anyway.
2522 .features[FEAT_XSAVE] =
2523 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2524 CPUID_XSAVE_XGETBV1,
2525 .features[FEAT_6_EAX] =
2526 CPUID_6_EAX_ARAT,
2527 .xlevel = 0x80000008,
2528 .model_id = "Intel Xeon Processor (Skylake)",
2529 .versions = (X86CPUVersionDefinition[]) {
2530 { .version = 1 },
2532 .version = 2,
2533 .alias = "Skylake-Server-IBRS",
2534 .props = (PropValue[]) {
2535 /* clflushopt was not added to Skylake-Server-IBRS */
2536 /* TODO: add -v3 including clflushopt */
2537 { "clflushopt", "off" },
2538 { "spec-ctrl", "on" },
2539 { "model-id",
2540 "Intel Xeon Processor (Skylake, IBRS)" },
2541 { /* end of list */ }
2544 { /* end of list */ }
2548 .name = "Cascadelake-Server",
2549 .level = 0xd,
2550 .vendor = CPUID_VENDOR_INTEL,
2551 .family = 6,
2552 .model = 85,
2553 .stepping = 6,
2554 .features[FEAT_1_EDX] =
2555 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2556 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2557 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2558 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2559 CPUID_DE | CPUID_FP87,
2560 .features[FEAT_1_ECX] =
2561 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2562 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2563 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2564 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2565 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2566 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2567 .features[FEAT_8000_0001_EDX] =
2568 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2569 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2570 .features[FEAT_8000_0001_ECX] =
2571 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2572 .features[FEAT_7_0_EBX] =
2573 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2574 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2575 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2576 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2577 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2578 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2579 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2580 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2581 .features[FEAT_7_0_ECX] =
2582 CPUID_7_0_ECX_PKU |
2583 CPUID_7_0_ECX_AVX512VNNI,
2584 .features[FEAT_7_0_EDX] =
2585 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2586 /* Missing: XSAVES (not supported by some Linux versions,
2587 * including v4.1 to v4.12).
2588 * KVM doesn't yet expose any XSAVES state save component,
2589 * and the only one defined in Skylake (processor tracing)
2590 * probably will block migration anyway.
2592 .features[FEAT_XSAVE] =
2593 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2594 CPUID_XSAVE_XGETBV1,
2595 .features[FEAT_6_EAX] =
2596 CPUID_6_EAX_ARAT,
2597 .xlevel = 0x80000008,
2598 .model_id = "Intel Xeon Processor (Cascadelake)",
2599 .versions = (X86CPUVersionDefinition[]) {
2600 { .version = 1 },
2601 { .version = 2,
2602 .props = (PropValue[]) {
2603 { "arch-capabilities", "on" },
2604 { "rdctl-no", "on" },
2605 { "ibrs-all", "on" },
2606 { "skip-l1dfl-vmentry", "on" },
2607 { "mds-no", "on" },
2608 { /* end of list */ }
2611 { /* end of list */ }
2615 .name = "Icelake-Client",
2616 .level = 0xd,
2617 .vendor = CPUID_VENDOR_INTEL,
2618 .family = 6,
2619 .model = 126,
2620 .stepping = 0,
2621 .features[FEAT_1_EDX] =
2622 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2623 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2624 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2625 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2626 CPUID_DE | CPUID_FP87,
2627 .features[FEAT_1_ECX] =
2628 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2629 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2630 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2631 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2632 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2633 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2634 .features[FEAT_8000_0001_EDX] =
2635 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2636 CPUID_EXT2_SYSCALL,
2637 .features[FEAT_8000_0001_ECX] =
2638 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2639 .features[FEAT_8000_0008_EBX] =
2640 CPUID_8000_0008_EBX_WBNOINVD,
2641 .features[FEAT_7_0_EBX] =
2642 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2643 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2644 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2645 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2646 CPUID_7_0_EBX_SMAP,
2647 .features[FEAT_7_0_ECX] =
2648 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2649 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2650 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2651 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2652 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2653 .features[FEAT_7_0_EDX] =
2654 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2655 /* Missing: XSAVES (not supported by some Linux versions,
2656 * including v4.1 to v4.12).
2657 * KVM doesn't yet expose any XSAVES state save component,
2658 * and the only one defined in Skylake (processor tracing)
2659 * probably will block migration anyway.
2661 .features[FEAT_XSAVE] =
2662 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2663 CPUID_XSAVE_XGETBV1,
2664 .features[FEAT_6_EAX] =
2665 CPUID_6_EAX_ARAT,
2666 .xlevel = 0x80000008,
2667 .model_id = "Intel Core Processor (Icelake)",
2670 .name = "Icelake-Server",
2671 .level = 0xd,
2672 .vendor = CPUID_VENDOR_INTEL,
2673 .family = 6,
2674 .model = 134,
2675 .stepping = 0,
2676 .features[FEAT_1_EDX] =
2677 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2678 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2679 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2680 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2681 CPUID_DE | CPUID_FP87,
2682 .features[FEAT_1_ECX] =
2683 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2684 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2685 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2686 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2687 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2688 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2689 .features[FEAT_8000_0001_EDX] =
2690 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2691 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2692 .features[FEAT_8000_0001_ECX] =
2693 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2694 .features[FEAT_8000_0008_EBX] =
2695 CPUID_8000_0008_EBX_WBNOINVD,
2696 .features[FEAT_7_0_EBX] =
2697 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2698 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2699 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2700 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2701 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2702 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2703 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2704 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2705 .features[FEAT_7_0_ECX] =
2706 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2707 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2708 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2709 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2710 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2711 .features[FEAT_7_0_EDX] =
2712 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2713 /* Missing: XSAVES (not supported by some Linux versions,
2714 * including v4.1 to v4.12).
2715 * KVM doesn't yet expose any XSAVES state save component,
2716 * and the only one defined in Skylake (processor tracing)
2717 * probably will block migration anyway.
2719 .features[FEAT_XSAVE] =
2720 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2721 CPUID_XSAVE_XGETBV1,
2722 .features[FEAT_6_EAX] =
2723 CPUID_6_EAX_ARAT,
2724 .xlevel = 0x80000008,
2725 .model_id = "Intel Xeon Processor (Icelake)",
2728 .name = "Snowridge",
2729 .level = 27,
2730 .vendor = CPUID_VENDOR_INTEL,
2731 .family = 6,
2732 .model = 134,
2733 .stepping = 1,
2734 .features[FEAT_1_EDX] =
2735 /* missing: CPUID_PN CPUID_IA64 */
2736 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2737 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
2738 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
2739 CPUID_CX8 | CPUID_APIC | CPUID_SEP |
2740 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
2741 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
2742 CPUID_MMX |
2743 CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
2744 .features[FEAT_1_ECX] =
2745 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
2746 CPUID_EXT_SSSE3 |
2747 CPUID_EXT_CX16 |
2748 CPUID_EXT_SSE41 |
2749 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
2750 CPUID_EXT_POPCNT |
2751 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
2752 CPUID_EXT_RDRAND,
2753 .features[FEAT_8000_0001_EDX] =
2754 CPUID_EXT2_SYSCALL |
2755 CPUID_EXT2_NX |
2756 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2757 CPUID_EXT2_LM,
2758 .features[FEAT_8000_0001_ECX] =
2759 CPUID_EXT3_LAHF_LM |
2760 CPUID_EXT3_3DNOWPREFETCH,
2761 .features[FEAT_7_0_EBX] =
2762 CPUID_7_0_EBX_FSGSBASE |
2763 CPUID_7_0_EBX_SMEP |
2764 CPUID_7_0_EBX_ERMS |
2765 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
2766 CPUID_7_0_EBX_RDSEED |
2767 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2768 CPUID_7_0_EBX_CLWB |
2769 CPUID_7_0_EBX_SHA_NI,
2770 .features[FEAT_7_0_ECX] =
2771 CPUID_7_0_ECX_UMIP |
2772 /* missing bit 5 */
2773 CPUID_7_0_ECX_GFNI |
2774 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
2775 CPUID_7_0_ECX_MOVDIR64B,
2776 .features[FEAT_7_0_EDX] =
2777 CPUID_7_0_EDX_SPEC_CTRL |
2778 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
2779 CPUID_7_0_EDX_CORE_CAPABILITY,
2780 .features[FEAT_CORE_CAPABILITY] =
2781 MSR_CORE_CAP_SPLIT_LOCK_DETECT,
2783 * Missing: XSAVES (not supported by some Linux versions,
2784 * including v4.1 to v4.12).
2785 * KVM doesn't yet expose any XSAVES state save component,
2786 * and the only one defined in Skylake (processor tracing)
2787 * probably will block migration anyway.
2789 .features[FEAT_XSAVE] =
2790 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2791 CPUID_XSAVE_XGETBV1,
2792 .features[FEAT_6_EAX] =
2793 CPUID_6_EAX_ARAT,
2794 .xlevel = 0x80000008,
2795 .model_id = "Intel Atom Processor (SnowRidge)",
2798 .name = "KnightsMill",
2799 .level = 0xd,
2800 .vendor = CPUID_VENDOR_INTEL,
2801 .family = 6,
2802 .model = 133,
2803 .stepping = 0,
2804 .features[FEAT_1_EDX] =
2805 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2806 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2807 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2808 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2809 CPUID_PSE | CPUID_DE | CPUID_FP87,
2810 .features[FEAT_1_ECX] =
2811 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2812 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2813 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2814 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2815 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2816 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2817 .features[FEAT_8000_0001_EDX] =
2818 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2819 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2820 .features[FEAT_8000_0001_ECX] =
2821 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2822 .features[FEAT_7_0_EBX] =
2823 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2824 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2825 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2826 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2827 CPUID_7_0_EBX_AVX512ER,
2828 .features[FEAT_7_0_ECX] =
2829 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2830 .features[FEAT_7_0_EDX] =
2831 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2832 .features[FEAT_XSAVE] =
2833 CPUID_XSAVE_XSAVEOPT,
2834 .features[FEAT_6_EAX] =
2835 CPUID_6_EAX_ARAT,
2836 .xlevel = 0x80000008,
2837 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2840 .name = "Opteron_G1",
2841 .level = 5,
2842 .vendor = CPUID_VENDOR_AMD,
2843 .family = 15,
2844 .model = 6,
2845 .stepping = 1,
2846 .features[FEAT_1_EDX] =
2847 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2848 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2849 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2850 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2851 CPUID_DE | CPUID_FP87,
2852 .features[FEAT_1_ECX] =
2853 CPUID_EXT_SSE3,
2854 .features[FEAT_8000_0001_EDX] =
2855 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2856 .xlevel = 0x80000008,
2857 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2860 .name = "Opteron_G2",
2861 .level = 5,
2862 .vendor = CPUID_VENDOR_AMD,
2863 .family = 15,
2864 .model = 6,
2865 .stepping = 1,
2866 .features[FEAT_1_EDX] =
2867 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2868 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2869 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2870 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2871 CPUID_DE | CPUID_FP87,
2872 .features[FEAT_1_ECX] =
2873 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2874 .features[FEAT_8000_0001_EDX] =
2875 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2876 .features[FEAT_8000_0001_ECX] =
2877 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2878 .xlevel = 0x80000008,
2879 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2882 .name = "Opteron_G3",
2883 .level = 5,
2884 .vendor = CPUID_VENDOR_AMD,
2885 .family = 16,
2886 .model = 2,
2887 .stepping = 3,
2888 .features[FEAT_1_EDX] =
2889 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2890 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2891 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2892 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2893 CPUID_DE | CPUID_FP87,
2894 .features[FEAT_1_ECX] =
2895 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2896 CPUID_EXT_SSE3,
2897 .features[FEAT_8000_0001_EDX] =
2898 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
2899 CPUID_EXT2_RDTSCP,
2900 .features[FEAT_8000_0001_ECX] =
2901 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2902 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2903 .xlevel = 0x80000008,
2904 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2907 .name = "Opteron_G4",
2908 .level = 0xd,
2909 .vendor = CPUID_VENDOR_AMD,
2910 .family = 21,
2911 .model = 1,
2912 .stepping = 2,
2913 .features[FEAT_1_EDX] =
2914 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2915 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2916 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2917 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2918 CPUID_DE | CPUID_FP87,
2919 .features[FEAT_1_ECX] =
2920 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2921 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2922 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2923 CPUID_EXT_SSE3,
2924 .features[FEAT_8000_0001_EDX] =
2925 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2926 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2927 .features[FEAT_8000_0001_ECX] =
2928 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2929 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2930 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2931 CPUID_EXT3_LAHF_LM,
2932 .features[FEAT_SVM] =
2933 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2934 /* no xsaveopt! */
2935 .xlevel = 0x8000001A,
2936 .model_id = "AMD Opteron 62xx class CPU",
2939 .name = "Opteron_G5",
2940 .level = 0xd,
2941 .vendor = CPUID_VENDOR_AMD,
2942 .family = 21,
2943 .model = 2,
2944 .stepping = 0,
2945 .features[FEAT_1_EDX] =
2946 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2947 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2948 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2949 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2950 CPUID_DE | CPUID_FP87,
2951 .features[FEAT_1_ECX] =
2952 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2953 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2954 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2955 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2956 .features[FEAT_8000_0001_EDX] =
2957 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2958 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2959 .features[FEAT_8000_0001_ECX] =
2960 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2961 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2962 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2963 CPUID_EXT3_LAHF_LM,
2964 .features[FEAT_SVM] =
2965 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2966 /* no xsaveopt! */
2967 .xlevel = 0x8000001A,
2968 .model_id = "AMD Opteron 63xx class CPU",
2971 .name = "EPYC",
2972 .level = 0xd,
2973 .vendor = CPUID_VENDOR_AMD,
2974 .family = 23,
2975 .model = 1,
2976 .stepping = 2,
2977 .features[FEAT_1_EDX] =
2978 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2979 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2980 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2981 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2982 CPUID_VME | CPUID_FP87,
2983 .features[FEAT_1_ECX] =
2984 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2985 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2986 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2987 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2988 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2989 .features[FEAT_8000_0001_EDX] =
2990 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2991 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2992 CPUID_EXT2_SYSCALL,
2993 .features[FEAT_8000_0001_ECX] =
2994 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2995 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2996 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2997 CPUID_EXT3_TOPOEXT,
2998 .features[FEAT_7_0_EBX] =
2999 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3000 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3001 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3002 CPUID_7_0_EBX_SHA_NI,
3003 /* Missing: XSAVES (not supported by some Linux versions,
3004 * including v4.1 to v4.12).
3005 * KVM doesn't yet expose any XSAVES state save component.
3007 .features[FEAT_XSAVE] =
3008 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3009 CPUID_XSAVE_XGETBV1,
3010 .features[FEAT_6_EAX] =
3011 CPUID_6_EAX_ARAT,
3012 .features[FEAT_SVM] =
3013 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3014 .xlevel = 0x8000001E,
3015 .model_id = "AMD EPYC Processor",
3016 .cache_info = &epyc_cache_info,
3017 .versions = (X86CPUVersionDefinition[]) {
3018 { .version = 1 },
3020 .version = 2,
3021 .alias = "EPYC-IBPB",
3022 .props = (PropValue[]) {
3023 { "ibpb", "on" },
3024 { "model-id",
3025 "AMD EPYC Processor (with IBPB)" },
3026 { /* end of list */ }
3029 { /* end of list */ }
3033 .name = "Dhyana",
3034 .level = 0xd,
3035 .vendor = CPUID_VENDOR_HYGON,
3036 .family = 24,
3037 .model = 0,
3038 .stepping = 1,
3039 .features[FEAT_1_EDX] =
3040 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3041 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3042 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3043 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3044 CPUID_VME | CPUID_FP87,
3045 .features[FEAT_1_ECX] =
3046 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3047 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
3048 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3049 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3050 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
3051 .features[FEAT_8000_0001_EDX] =
3052 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3053 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3054 CPUID_EXT2_SYSCALL,
3055 .features[FEAT_8000_0001_ECX] =
3056 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3057 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3058 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3059 CPUID_EXT3_TOPOEXT,
3060 .features[FEAT_8000_0008_EBX] =
3061 CPUID_8000_0008_EBX_IBPB,
3062 .features[FEAT_7_0_EBX] =
3063 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3064 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3065 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
3067 * Missing: XSAVES (not supported by some Linux versions,
3068 * including v4.1 to v4.12).
3069 * KVM doesn't yet expose any XSAVES state save component.
3071 .features[FEAT_XSAVE] =
3072 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3073 CPUID_XSAVE_XGETBV1,
3074 .features[FEAT_6_EAX] =
3075 CPUID_6_EAX_ARAT,
3076 .features[FEAT_SVM] =
3077 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3078 .xlevel = 0x8000001E,
3079 .model_id = "Hygon Dhyana Processor",
3080 .cache_info = &epyc_cache_info,
3084 /* KVM-specific features that are automatically added/removed
3085 * from all CPU models when KVM is enabled.
3087 static PropValue kvm_default_props[] = {
3088 { "kvmclock", "on" },
3089 { "kvm-nopiodelay", "on" },
3090 { "kvm-asyncpf", "on" },
3091 { "kvm-steal-time", "on" },
3092 { "kvm-pv-eoi", "on" },
3093 { "kvmclock-stable-bit", "on" },
3094 { "x2apic", "on" },
3095 { "acpi", "off" },
3096 { "monitor", "off" },
3097 { "svm", "off" },
3098 { NULL, NULL },
3101 /* TCG-specific defaults that override all CPU models when using TCG
3103 static PropValue tcg_default_props[] = {
3104 { "vme", "off" },
3105 { NULL, NULL },
3109 X86CPUVersion default_cpu_version = CPU_VERSION_LATEST;
3111 void x86_cpu_set_default_version(X86CPUVersion version)
3113 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */
3114 assert(version != CPU_VERSION_AUTO);
3115 default_cpu_version = version;
3118 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model)
3120 int v = 0;
3121 const X86CPUVersionDefinition *vdef =
3122 x86_cpu_def_get_versions(model->cpudef);
3123 while (vdef->version) {
3124 v = vdef->version;
3125 vdef++;
3127 return v;
3130 /* Return the actual version being used for a specific CPU model */
3131 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
3133 X86CPUVersion v = model->version;
3134 if (v == CPU_VERSION_AUTO) {
3135 v = default_cpu_version;
3137 if (v == CPU_VERSION_LATEST) {
3138 return x86_cpu_model_last_version(model);
3140 return v;
3143 void x86_cpu_change_kvm_default(const char *prop, const char *value)
3145 PropValue *pv;
3146 for (pv = kvm_default_props; pv->prop; pv++) {
3147 if (!strcmp(pv->prop, prop)) {
3148 pv->value = value;
3149 break;
3153 /* It is valid to call this function only for properties that
3154 * are already present in the kvm_default_props table.
3156 assert(pv->prop);
3159 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
3160 bool migratable_only);
3162 static bool lmce_supported(void)
3164 uint64_t mce_cap = 0;
3166 #ifdef CONFIG_KVM
3167 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
3168 return false;
3170 #endif
3172 return !!(mce_cap & MCG_LMCE_P);
3175 #define CPUID_MODEL_ID_SZ 48
3178 * cpu_x86_fill_model_id:
3179 * Get CPUID model ID string from host CPU.
3181 * @str should have at least CPUID_MODEL_ID_SZ bytes
3183 * The function does NOT add a null terminator to the string
3184 * automatically.
3186 static int cpu_x86_fill_model_id(char *str)
3188 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3189 int i;
3191 for (i = 0; i < 3; i++) {
3192 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
3193 memcpy(str + i * 16 + 0, &eax, 4);
3194 memcpy(str + i * 16 + 4, &ebx, 4);
3195 memcpy(str + i * 16 + 8, &ecx, 4);
3196 memcpy(str + i * 16 + 12, &edx, 4);
3198 return 0;
3201 static Property max_x86_cpu_properties[] = {
3202 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
3203 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
3204 DEFINE_PROP_END_OF_LIST()
3207 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
3209 DeviceClass *dc = DEVICE_CLASS(oc);
3210 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3212 xcc->ordering = 9;
3214 xcc->model_description =
3215 "Enables all features supported by the accelerator in the current host";
3217 dc->props = max_x86_cpu_properties;
3220 static void max_x86_cpu_initfn(Object *obj)
3222 X86CPU *cpu = X86_CPU(obj);
3223 CPUX86State *env = &cpu->env;
3224 KVMState *s = kvm_state;
3226 /* We can't fill the features array here because we don't know yet if
3227 * "migratable" is true or false.
3229 cpu->max_features = true;
3231 if (accel_uses_host_cpuid()) {
3232 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
3233 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
3234 int family, model, stepping;
3236 host_vendor_fms(vendor, &family, &model, &stepping);
3237 cpu_x86_fill_model_id(model_id);
3239 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
3240 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
3241 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
3242 object_property_set_int(OBJECT(cpu), stepping, "stepping",
3243 &error_abort);
3244 object_property_set_str(OBJECT(cpu), model_id, "model-id",
3245 &error_abort);
3247 if (kvm_enabled()) {
3248 env->cpuid_min_level =
3249 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
3250 env->cpuid_min_xlevel =
3251 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
3252 env->cpuid_min_xlevel2 =
3253 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
3254 } else {
3255 env->cpuid_min_level =
3256 hvf_get_supported_cpuid(0x0, 0, R_EAX);
3257 env->cpuid_min_xlevel =
3258 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
3259 env->cpuid_min_xlevel2 =
3260 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
3263 if (lmce_supported()) {
3264 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
3266 } else {
3267 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
3268 "vendor", &error_abort);
3269 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
3270 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
3271 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
3272 object_property_set_str(OBJECT(cpu),
3273 "QEMU TCG CPU version " QEMU_HW_VERSION,
3274 "model-id", &error_abort);
3277 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
3280 static const TypeInfo max_x86_cpu_type_info = {
3281 .name = X86_CPU_TYPE_NAME("max"),
3282 .parent = TYPE_X86_CPU,
3283 .instance_init = max_x86_cpu_initfn,
3284 .class_init = max_x86_cpu_class_init,
3287 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3288 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
3290 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3292 xcc->host_cpuid_required = true;
3293 xcc->ordering = 8;
3295 #if defined(CONFIG_KVM)
3296 xcc->model_description =
3297 "KVM processor with all supported host features ";
3298 #elif defined(CONFIG_HVF)
3299 xcc->model_description =
3300 "HVF processor with all supported host features ";
3301 #endif
3304 static const TypeInfo host_x86_cpu_type_info = {
3305 .name = X86_CPU_TYPE_NAME("host"),
3306 .parent = X86_CPU_TYPE_NAME("max"),
3307 .class_init = host_x86_cpu_class_init,
3310 #endif
3312 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
3314 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
3316 switch (f->type) {
3317 case CPUID_FEATURE_WORD:
3319 const char *reg = get_register_name_32(f->cpuid.reg);
3320 assert(reg);
3321 return g_strdup_printf("CPUID.%02XH:%s",
3322 f->cpuid.eax, reg);
3324 case MSR_FEATURE_WORD:
3325 return g_strdup_printf("MSR(%02XH)",
3326 f->msr.index);
3329 return NULL;
3332 static bool x86_cpu_have_filtered_features(X86CPU *cpu)
3334 FeatureWord w;
3336 for (w = 0; w < FEATURE_WORDS; w++) {
3337 if (cpu->filtered_features[w]) {
3338 return true;
3342 return false;
3345 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
3346 const char *verbose_prefix)
3348 CPUX86State *env = &cpu->env;
3349 FeatureWordInfo *f = &feature_word_info[w];
3350 int i;
3351 char *feat_word_str;
3353 if (!cpu->force_features) {
3354 env->features[w] &= ~mask;
3356 cpu->filtered_features[w] |= mask;
3358 if (!verbose_prefix) {
3359 return;
3362 for (i = 0; i < 64; ++i) {
3363 if ((1ULL << i) & mask) {
3364 feat_word_str = feature_word_description(f, i);
3365 warn_report("%s: %s%s%s [bit %d]",
3366 verbose_prefix,
3367 feat_word_str,
3368 f->feat_names[i] ? "." : "",
3369 f->feat_names[i] ? f->feat_names[i] : "", i);
3370 g_free(feat_word_str);
3375 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
3376 const char *name, void *opaque,
3377 Error **errp)
3379 X86CPU *cpu = X86_CPU(obj);
3380 CPUX86State *env = &cpu->env;
3381 int64_t value;
3383 value = (env->cpuid_version >> 8) & 0xf;
3384 if (value == 0xf) {
3385 value += (env->cpuid_version >> 20) & 0xff;
3387 visit_type_int(v, name, &value, errp);
3390 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3391 const char *name, void *opaque,
3392 Error **errp)
3394 X86CPU *cpu = X86_CPU(obj);
3395 CPUX86State *env = &cpu->env;
3396 const int64_t min = 0;
3397 const int64_t max = 0xff + 0xf;
3398 Error *local_err = NULL;
3399 int64_t value;
3401 visit_type_int(v, name, &value, &local_err);
3402 if (local_err) {
3403 error_propagate(errp, local_err);
3404 return;
3406 if (value < min || value > max) {
3407 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3408 name ? name : "null", value, min, max);
3409 return;
3412 env->cpuid_version &= ~0xff00f00;
3413 if (value > 0x0f) {
3414 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3415 } else {
3416 env->cpuid_version |= value << 8;
3420 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3421 const char *name, void *opaque,
3422 Error **errp)
3424 X86CPU *cpu = X86_CPU(obj);
3425 CPUX86State *env = &cpu->env;
3426 int64_t value;
3428 value = (env->cpuid_version >> 4) & 0xf;
3429 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3430 visit_type_int(v, name, &value, errp);
3433 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3434 const char *name, void *opaque,
3435 Error **errp)
3437 X86CPU *cpu = X86_CPU(obj);
3438 CPUX86State *env = &cpu->env;
3439 const int64_t min = 0;
3440 const int64_t max = 0xff;
3441 Error *local_err = NULL;
3442 int64_t value;
3444 visit_type_int(v, name, &value, &local_err);
3445 if (local_err) {
3446 error_propagate(errp, local_err);
3447 return;
3449 if (value < min || value > max) {
3450 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3451 name ? name : "null", value, min, max);
3452 return;
3455 env->cpuid_version &= ~0xf00f0;
3456 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3459 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3460 const char *name, void *opaque,
3461 Error **errp)
3463 X86CPU *cpu = X86_CPU(obj);
3464 CPUX86State *env = &cpu->env;
3465 int64_t value;
3467 value = env->cpuid_version & 0xf;
3468 visit_type_int(v, name, &value, errp);
3471 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3472 const char *name, void *opaque,
3473 Error **errp)
3475 X86CPU *cpu = X86_CPU(obj);
3476 CPUX86State *env = &cpu->env;
3477 const int64_t min = 0;
3478 const int64_t max = 0xf;
3479 Error *local_err = NULL;
3480 int64_t value;
3482 visit_type_int(v, name, &value, &local_err);
3483 if (local_err) {
3484 error_propagate(errp, local_err);
3485 return;
3487 if (value < min || value > max) {
3488 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3489 name ? name : "null", value, min, max);
3490 return;
3493 env->cpuid_version &= ~0xf;
3494 env->cpuid_version |= value & 0xf;
3497 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3499 X86CPU *cpu = X86_CPU(obj);
3500 CPUX86State *env = &cpu->env;
3501 char *value;
3503 value = g_malloc(CPUID_VENDOR_SZ + 1);
3504 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3505 env->cpuid_vendor3);
3506 return value;
3509 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3510 Error **errp)
3512 X86CPU *cpu = X86_CPU(obj);
3513 CPUX86State *env = &cpu->env;
3514 int i;
3516 if (strlen(value) != CPUID_VENDOR_SZ) {
3517 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3518 return;
3521 env->cpuid_vendor1 = 0;
3522 env->cpuid_vendor2 = 0;
3523 env->cpuid_vendor3 = 0;
3524 for (i = 0; i < 4; i++) {
3525 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3526 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3527 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3531 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3533 X86CPU *cpu = X86_CPU(obj);
3534 CPUX86State *env = &cpu->env;
3535 char *value;
3536 int i;
3538 value = g_malloc(48 + 1);
3539 for (i = 0; i < 48; i++) {
3540 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3542 value[48] = '\0';
3543 return value;
3546 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3547 Error **errp)
3549 X86CPU *cpu = X86_CPU(obj);
3550 CPUX86State *env = &cpu->env;
3551 int c, len, i;
3553 if (model_id == NULL) {
3554 model_id = "";
3556 len = strlen(model_id);
3557 memset(env->cpuid_model, 0, 48);
3558 for (i = 0; i < 48; i++) {
3559 if (i >= len) {
3560 c = '\0';
3561 } else {
3562 c = (uint8_t)model_id[i];
3564 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3568 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3569 void *opaque, Error **errp)
3571 X86CPU *cpu = X86_CPU(obj);
3572 int64_t value;
3574 value = cpu->env.tsc_khz * 1000;
3575 visit_type_int(v, name, &value, errp);
3578 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3579 void *opaque, Error **errp)
3581 X86CPU *cpu = X86_CPU(obj);
3582 const int64_t min = 0;
3583 const int64_t max = INT64_MAX;
3584 Error *local_err = NULL;
3585 int64_t value;
3587 visit_type_int(v, name, &value, &local_err);
3588 if (local_err) {
3589 error_propagate(errp, local_err);
3590 return;
3592 if (value < min || value > max) {
3593 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3594 name ? name : "null", value, min, max);
3595 return;
3598 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3601 /* Generic getter for "feature-words" and "filtered-features" properties */
3602 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3603 const char *name, void *opaque,
3604 Error **errp)
3606 uint64_t *array = (uint64_t *)opaque;
3607 FeatureWord w;
3608 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3609 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3610 X86CPUFeatureWordInfoList *list = NULL;
3612 for (w = 0; w < FEATURE_WORDS; w++) {
3613 FeatureWordInfo *wi = &feature_word_info[w];
3615 * We didn't have MSR features when "feature-words" was
3616 * introduced. Therefore skipped other type entries.
3618 if (wi->type != CPUID_FEATURE_WORD) {
3619 continue;
3621 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3622 qwi->cpuid_input_eax = wi->cpuid.eax;
3623 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
3624 qwi->cpuid_input_ecx = wi->cpuid.ecx;
3625 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
3626 qwi->features = array[w];
3628 /* List will be in reverse order, but order shouldn't matter */
3629 list_entries[w].next = list;
3630 list_entries[w].value = &word_infos[w];
3631 list = &list_entries[w];
3634 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3637 /* Convert all '_' in a feature string option name to '-', to make feature
3638 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3640 static inline void feat2prop(char *s)
3642 while ((s = strchr(s, '_'))) {
3643 *s = '-';
3647 /* Return the feature property name for a feature flag bit */
3648 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3650 const char *name;
3651 /* XSAVE components are automatically enabled by other features,
3652 * so return the original feature name instead
3654 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3655 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3657 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3658 x86_ext_save_areas[comp].bits) {
3659 w = x86_ext_save_areas[comp].feature;
3660 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3664 assert(bitnr < 64);
3665 assert(w < FEATURE_WORDS);
3666 name = feature_word_info[w].feat_names[bitnr];
3667 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD));
3668 return name;
3671 /* Compatibily hack to maintain legacy +-feat semantic,
3672 * where +-feat overwrites any feature set by
3673 * feat=on|feat even if the later is parsed after +-feat
3674 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3676 static GList *plus_features, *minus_features;
3678 static gint compare_string(gconstpointer a, gconstpointer b)
3680 return g_strcmp0(a, b);
3683 /* Parse "+feature,-feature,feature=foo" CPU feature string
3685 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3686 Error **errp)
3688 char *featurestr; /* Single 'key=value" string being parsed */
3689 static bool cpu_globals_initialized;
3690 bool ambiguous = false;
3692 if (cpu_globals_initialized) {
3693 return;
3695 cpu_globals_initialized = true;
3697 if (!features) {
3698 return;
3701 for (featurestr = strtok(features, ",");
3702 featurestr;
3703 featurestr = strtok(NULL, ",")) {
3704 const char *name;
3705 const char *val = NULL;
3706 char *eq = NULL;
3707 char num[32];
3708 GlobalProperty *prop;
3710 /* Compatibility syntax: */
3711 if (featurestr[0] == '+') {
3712 plus_features = g_list_append(plus_features,
3713 g_strdup(featurestr + 1));
3714 continue;
3715 } else if (featurestr[0] == '-') {
3716 minus_features = g_list_append(minus_features,
3717 g_strdup(featurestr + 1));
3718 continue;
3721 eq = strchr(featurestr, '=');
3722 if (eq) {
3723 *eq++ = 0;
3724 val = eq;
3725 } else {
3726 val = "on";
3729 feat2prop(featurestr);
3730 name = featurestr;
3732 if (g_list_find_custom(plus_features, name, compare_string)) {
3733 warn_report("Ambiguous CPU model string. "
3734 "Don't mix both \"+%s\" and \"%s=%s\"",
3735 name, name, val);
3736 ambiguous = true;
3738 if (g_list_find_custom(minus_features, name, compare_string)) {
3739 warn_report("Ambiguous CPU model string. "
3740 "Don't mix both \"-%s\" and \"%s=%s\"",
3741 name, name, val);
3742 ambiguous = true;
3745 /* Special case: */
3746 if (!strcmp(name, "tsc-freq")) {
3747 int ret;
3748 uint64_t tsc_freq;
3750 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3751 if (ret < 0 || tsc_freq > INT64_MAX) {
3752 error_setg(errp, "bad numerical value %s", val);
3753 return;
3755 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3756 val = num;
3757 name = "tsc-frequency";
3760 prop = g_new0(typeof(*prop), 1);
3761 prop->driver = typename;
3762 prop->property = g_strdup(name);
3763 prop->value = g_strdup(val);
3764 qdev_prop_register_global(prop);
3767 if (ambiguous) {
3768 warn_report("Compatibility of ambiguous CPU model "
3769 "strings won't be kept on future QEMU versions");
3773 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3774 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
3776 /* Build a list with the name of all features on a feature word array */
3777 static void x86_cpu_list_feature_names(FeatureWordArray features,
3778 strList **feat_names)
3780 FeatureWord w;
3781 strList **next = feat_names;
3783 for (w = 0; w < FEATURE_WORDS; w++) {
3784 uint64_t filtered = features[w];
3785 int i;
3786 for (i = 0; i < 64; i++) {
3787 if (filtered & (1ULL << i)) {
3788 strList *new = g_new0(strList, 1);
3789 new->value = g_strdup(x86_cpu_feature_name(w, i));
3790 *next = new;
3791 next = &new->next;
3797 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
3798 const char *name, void *opaque,
3799 Error **errp)
3801 X86CPU *xc = X86_CPU(obj);
3802 strList *result = NULL;
3804 x86_cpu_list_feature_names(xc->filtered_features, &result);
3805 visit_type_strList(v, "unavailable-features", &result, errp);
3808 /* Check for missing features that may prevent the CPU class from
3809 * running using the current machine and accelerator.
3811 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3812 strList **missing_feats)
3814 X86CPU *xc;
3815 Error *err = NULL;
3816 strList **next = missing_feats;
3818 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3819 strList *new = g_new0(strList, 1);
3820 new->value = g_strdup("kvm");
3821 *missing_feats = new;
3822 return;
3825 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3827 x86_cpu_expand_features(xc, &err);
3828 if (err) {
3829 /* Errors at x86_cpu_expand_features should never happen,
3830 * but in case it does, just report the model as not
3831 * runnable at all using the "type" property.
3833 strList *new = g_new0(strList, 1);
3834 new->value = g_strdup("type");
3835 *next = new;
3836 next = &new->next;
3839 x86_cpu_filter_features(xc, false);
3841 x86_cpu_list_feature_names(xc->filtered_features, next);
3843 object_unref(OBJECT(xc));
3846 /* Print all cpuid feature names in featureset
3848 static void listflags(GList *features)
3850 size_t len = 0;
3851 GList *tmp;
3853 for (tmp = features; tmp; tmp = tmp->next) {
3854 const char *name = tmp->data;
3855 if ((len + strlen(name) + 1) >= 75) {
3856 qemu_printf("\n");
3857 len = 0;
3859 qemu_printf("%s%s", len == 0 ? " " : " ", name);
3860 len += strlen(name) + 1;
3862 qemu_printf("\n");
3865 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3866 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3868 ObjectClass *class_a = (ObjectClass *)a;
3869 ObjectClass *class_b = (ObjectClass *)b;
3870 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3871 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3872 char *name_a, *name_b;
3873 int ret;
3875 if (cc_a->ordering != cc_b->ordering) {
3876 ret = cc_a->ordering - cc_b->ordering;
3877 } else {
3878 name_a = x86_cpu_class_get_model_name(cc_a);
3879 name_b = x86_cpu_class_get_model_name(cc_b);
3880 ret = strcmp(name_a, name_b);
3881 g_free(name_a);
3882 g_free(name_b);
3884 return ret;
3887 static GSList *get_sorted_cpu_model_list(void)
3889 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3890 list = g_slist_sort(list, x86_cpu_list_compare);
3891 return list;
3894 static char *x86_cpu_class_get_model_id(X86CPUClass *xc)
3896 Object *obj = object_new(object_class_get_name(OBJECT_CLASS(xc)));
3897 char *r = object_property_get_str(obj, "model-id", &error_abort);
3898 object_unref(obj);
3899 return r;
3902 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc)
3904 X86CPUVersion version;
3906 if (!cc->model || !cc->model->is_alias) {
3907 return NULL;
3909 version = x86_cpu_model_resolve_version(cc->model);
3910 if (version <= 0) {
3911 return NULL;
3913 return x86_cpu_versioned_model_name(cc->model->cpudef, version);
3916 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3918 ObjectClass *oc = data;
3919 X86CPUClass *cc = X86_CPU_CLASS(oc);
3920 char *name = x86_cpu_class_get_model_name(cc);
3921 char *desc = g_strdup(cc->model_description);
3922 char *alias_of = x86_cpu_class_get_alias_of(cc);
3924 if (!desc && alias_of) {
3925 if (cc->model && cc->model->version == CPU_VERSION_AUTO) {
3926 desc = g_strdup("(alias configured by machine type)");
3927 } else {
3928 desc = g_strdup_printf("(alias of %s)", alias_of);
3931 if (!desc) {
3932 desc = x86_cpu_class_get_model_id(cc);
3935 qemu_printf("x86 %-20s %-48s\n", name, desc);
3936 g_free(name);
3937 g_free(desc);
3938 g_free(alias_of);
3941 /* list available CPU models and flags */
3942 void x86_cpu_list(void)
3944 int i, j;
3945 GSList *list;
3946 GList *names = NULL;
3948 qemu_printf("Available CPUs:\n");
3949 list = get_sorted_cpu_model_list();
3950 g_slist_foreach(list, x86_cpu_list_entry, NULL);
3951 g_slist_free(list);
3953 names = NULL;
3954 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3955 FeatureWordInfo *fw = &feature_word_info[i];
3956 for (j = 0; j < 64; j++) {
3957 if (fw->feat_names[j]) {
3958 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3963 names = g_list_sort(names, (GCompareFunc)strcmp);
3965 qemu_printf("\nRecognized CPUID flags:\n");
3966 listflags(names);
3967 qemu_printf("\n");
3968 g_list_free(names);
3971 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3973 ObjectClass *oc = data;
3974 X86CPUClass *cc = X86_CPU_CLASS(oc);
3975 CpuDefinitionInfoList **cpu_list = user_data;
3976 CpuDefinitionInfoList *entry;
3977 CpuDefinitionInfo *info;
3979 info = g_malloc0(sizeof(*info));
3980 info->name = x86_cpu_class_get_model_name(cc);
3981 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3982 info->has_unavailable_features = true;
3983 info->q_typename = g_strdup(object_class_get_name(oc));
3984 info->migration_safe = cc->migration_safe;
3985 info->has_migration_safe = true;
3986 info->q_static = cc->static_model;
3988 * Old machine types won't report aliases, so that alias translation
3989 * doesn't break compatibility with previous QEMU versions.
3991 if (default_cpu_version != CPU_VERSION_LEGACY) {
3992 info->alias_of = x86_cpu_class_get_alias_of(cc);
3993 info->has_alias_of = !!info->alias_of;
3996 entry = g_malloc0(sizeof(*entry));
3997 entry->value = info;
3998 entry->next = *cpu_list;
3999 *cpu_list = entry;
4002 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
4004 CpuDefinitionInfoList *cpu_list = NULL;
4005 GSList *list = get_sorted_cpu_model_list();
4006 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
4007 g_slist_free(list);
4008 return cpu_list;
4011 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4012 bool migratable_only)
4014 FeatureWordInfo *wi = &feature_word_info[w];
4015 uint64_t r = 0;
4017 if (kvm_enabled()) {
4018 switch (wi->type) {
4019 case CPUID_FEATURE_WORD:
4020 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
4021 wi->cpuid.ecx,
4022 wi->cpuid.reg);
4023 break;
4024 case MSR_FEATURE_WORD:
4025 r = kvm_arch_get_supported_msr_feature(kvm_state,
4026 wi->msr.index);
4027 break;
4029 } else if (hvf_enabled()) {
4030 if (wi->type != CPUID_FEATURE_WORD) {
4031 return 0;
4033 r = hvf_get_supported_cpuid(wi->cpuid.eax,
4034 wi->cpuid.ecx,
4035 wi->cpuid.reg);
4036 } else if (tcg_enabled()) {
4037 r = wi->tcg_features;
4038 } else {
4039 return ~0;
4041 if (migratable_only) {
4042 r &= x86_cpu_get_migratable_flags(w);
4044 return r;
4047 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
4049 PropValue *pv;
4050 for (pv = props; pv->prop; pv++) {
4051 if (!pv->value) {
4052 continue;
4054 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
4055 &error_abort);
4059 /* Apply properties for the CPU model version specified in model */
4060 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
4062 const X86CPUVersionDefinition *vdef;
4063 X86CPUVersion version = x86_cpu_model_resolve_version(model);
4065 if (version == CPU_VERSION_LEGACY) {
4066 return;
4069 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) {
4070 PropValue *p;
4072 for (p = vdef->props; p && p->prop; p++) {
4073 object_property_parse(OBJECT(cpu), p->value, p->prop,
4074 &error_abort);
4077 if (vdef->version == version) {
4078 break;
4083 * If we reached the end of the list, version number was invalid
4085 assert(vdef->version == version);
4088 /* Load data from X86CPUDefinition into a X86CPU object
4090 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp)
4092 X86CPUDefinition *def = model->cpudef;
4093 CPUX86State *env = &cpu->env;
4094 const char *vendor;
4095 char host_vendor[CPUID_VENDOR_SZ + 1];
4096 FeatureWord w;
4098 /*NOTE: any property set by this function should be returned by
4099 * x86_cpu_static_props(), so static expansion of
4100 * query-cpu-model-expansion is always complete.
4103 /* CPU models only set _minimum_ values for level/xlevel: */
4104 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
4105 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
4107 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
4108 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
4109 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
4110 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
4111 for (w = 0; w < FEATURE_WORDS; w++) {
4112 env->features[w] = def->features[w];
4115 /* legacy-cache defaults to 'off' if CPU model provides cache info */
4116 cpu->legacy_cache = !def->cache_info;
4118 /* Special cases not set in the X86CPUDefinition structs: */
4119 /* TODO: in-kernel irqchip for hvf */
4120 if (kvm_enabled()) {
4121 if (!kvm_irqchip_in_kernel()) {
4122 x86_cpu_change_kvm_default("x2apic", "off");
4125 x86_cpu_apply_props(cpu, kvm_default_props);
4126 } else if (tcg_enabled()) {
4127 x86_cpu_apply_props(cpu, tcg_default_props);
4130 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
4132 /* sysenter isn't supported in compatibility mode on AMD,
4133 * syscall isn't supported in compatibility mode on Intel.
4134 * Normally we advertise the actual CPU vendor, but you can
4135 * override this using the 'vendor' property if you want to use
4136 * KVM's sysenter/syscall emulation in compatibility mode and
4137 * when doing cross vendor migration
4139 vendor = def->vendor;
4140 if (accel_uses_host_cpuid()) {
4141 uint32_t ebx = 0, ecx = 0, edx = 0;
4142 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
4143 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
4144 vendor = host_vendor;
4147 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
4149 x86_cpu_apply_version_props(cpu, model);
4152 #ifndef CONFIG_USER_ONLY
4153 /* Return a QDict containing keys for all properties that can be included
4154 * in static expansion of CPU models. All properties set by x86_cpu_load_model()
4155 * must be included in the dictionary.
4157 static QDict *x86_cpu_static_props(void)
4159 FeatureWord w;
4160 int i;
4161 static const char *props[] = {
4162 "min-level",
4163 "min-xlevel",
4164 "family",
4165 "model",
4166 "stepping",
4167 "model-id",
4168 "vendor",
4169 "lmce",
4170 NULL,
4172 static QDict *d;
4174 if (d) {
4175 return d;
4178 d = qdict_new();
4179 for (i = 0; props[i]; i++) {
4180 qdict_put_null(d, props[i]);
4183 for (w = 0; w < FEATURE_WORDS; w++) {
4184 FeatureWordInfo *fi = &feature_word_info[w];
4185 int bit;
4186 for (bit = 0; bit < 64; bit++) {
4187 if (!fi->feat_names[bit]) {
4188 continue;
4190 qdict_put_null(d, fi->feat_names[bit]);
4194 return d;
4197 /* Add an entry to @props dict, with the value for property. */
4198 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
4200 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
4201 &error_abort);
4203 qdict_put_obj(props, prop, value);
4206 /* Convert CPU model data from X86CPU object to a property dictionary
4207 * that can recreate exactly the same CPU model.
4209 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
4211 QDict *sprops = x86_cpu_static_props();
4212 const QDictEntry *e;
4214 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
4215 const char *prop = qdict_entry_key(e);
4216 x86_cpu_expand_prop(cpu, props, prop);
4220 /* Convert CPU model data from X86CPU object to a property dictionary
4221 * that can recreate exactly the same CPU model, including every
4222 * writeable QOM property.
4224 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
4226 ObjectPropertyIterator iter;
4227 ObjectProperty *prop;
4229 object_property_iter_init(&iter, OBJECT(cpu));
4230 while ((prop = object_property_iter_next(&iter))) {
4231 /* skip read-only or write-only properties */
4232 if (!prop->get || !prop->set) {
4233 continue;
4236 /* "hotplugged" is the only property that is configurable
4237 * on the command-line but will be set differently on CPUs
4238 * created using "-cpu ... -smp ..." and by CPUs created
4239 * on the fly by x86_cpu_from_model() for querying. Skip it.
4241 if (!strcmp(prop->name, "hotplugged")) {
4242 continue;
4244 x86_cpu_expand_prop(cpu, props, prop->name);
4248 static void object_apply_props(Object *obj, QDict *props, Error **errp)
4250 const QDictEntry *prop;
4251 Error *err = NULL;
4253 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
4254 object_property_set_qobject(obj, qdict_entry_value(prop),
4255 qdict_entry_key(prop), &err);
4256 if (err) {
4257 break;
4261 error_propagate(errp, err);
4264 /* Create X86CPU object according to model+props specification */
4265 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
4267 X86CPU *xc = NULL;
4268 X86CPUClass *xcc;
4269 Error *err = NULL;
4271 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
4272 if (xcc == NULL) {
4273 error_setg(&err, "CPU model '%s' not found", model);
4274 goto out;
4277 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
4278 if (props) {
4279 object_apply_props(OBJECT(xc), props, &err);
4280 if (err) {
4281 goto out;
4285 x86_cpu_expand_features(xc, &err);
4286 if (err) {
4287 goto out;
4290 out:
4291 if (err) {
4292 error_propagate(errp, err);
4293 object_unref(OBJECT(xc));
4294 xc = NULL;
4296 return xc;
4299 CpuModelExpansionInfo *
4300 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
4301 CpuModelInfo *model,
4302 Error **errp)
4304 X86CPU *xc = NULL;
4305 Error *err = NULL;
4306 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
4307 QDict *props = NULL;
4308 const char *base_name;
4310 xc = x86_cpu_from_model(model->name,
4311 model->has_props ?
4312 qobject_to(QDict, model->props) :
4313 NULL, &err);
4314 if (err) {
4315 goto out;
4318 props = qdict_new();
4319 ret->model = g_new0(CpuModelInfo, 1);
4320 ret->model->props = QOBJECT(props);
4321 ret->model->has_props = true;
4323 switch (type) {
4324 case CPU_MODEL_EXPANSION_TYPE_STATIC:
4325 /* Static expansion will be based on "base" only */
4326 base_name = "base";
4327 x86_cpu_to_dict(xc, props);
4328 break;
4329 case CPU_MODEL_EXPANSION_TYPE_FULL:
4330 /* As we don't return every single property, full expansion needs
4331 * to keep the original model name+props, and add extra
4332 * properties on top of that.
4334 base_name = model->name;
4335 x86_cpu_to_dict_full(xc, props);
4336 break;
4337 default:
4338 error_setg(&err, "Unsupported expansion type");
4339 goto out;
4342 x86_cpu_to_dict(xc, props);
4344 ret->model->name = g_strdup(base_name);
4346 out:
4347 object_unref(OBJECT(xc));
4348 if (err) {
4349 error_propagate(errp, err);
4350 qapi_free_CpuModelExpansionInfo(ret);
4351 ret = NULL;
4353 return ret;
4355 #endif /* !CONFIG_USER_ONLY */
4357 static gchar *x86_gdb_arch_name(CPUState *cs)
4359 #ifdef TARGET_X86_64
4360 return g_strdup("i386:x86-64");
4361 #else
4362 return g_strdup("i386");
4363 #endif
4366 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
4368 X86CPUModel *model = data;
4369 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4371 xcc->model = model;
4372 xcc->migration_safe = true;
4375 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model)
4377 char *typename = x86_cpu_type_name(name);
4378 TypeInfo ti = {
4379 .name = typename,
4380 .parent = TYPE_X86_CPU,
4381 .class_init = x86_cpu_cpudef_class_init,
4382 .class_data = model,
4385 type_register(&ti);
4386 g_free(typename);
4389 static void x86_register_cpudef_types(X86CPUDefinition *def)
4391 X86CPUModel *m;
4392 const X86CPUVersionDefinition *vdef;
4393 char *name;
4395 /* AMD aliases are handled at runtime based on CPUID vendor, so
4396 * they shouldn't be set on the CPU model table.
4398 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
4399 /* catch mistakes instead of silently truncating model_id when too long */
4400 assert(def->model_id && strlen(def->model_id) <= 48);
4402 /* Unversioned model: */
4403 m = g_new0(X86CPUModel, 1);
4404 m->cpudef = def;
4405 m->version = CPU_VERSION_AUTO;
4406 m->is_alias = true;
4407 x86_register_cpu_model_type(def->name, m);
4409 /* Versioned models: */
4411 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
4412 X86CPUModel *m = g_new0(X86CPUModel, 1);
4413 m->cpudef = def;
4414 m->version = vdef->version;
4415 name = x86_cpu_versioned_model_name(def, vdef->version);
4416 x86_register_cpu_model_type(name, m);
4417 g_free(name);
4419 if (vdef->alias) {
4420 X86CPUModel *am = g_new0(X86CPUModel, 1);
4421 am->cpudef = def;
4422 am->version = vdef->version;
4423 am->is_alias = true;
4424 x86_register_cpu_model_type(vdef->alias, am);
4430 #if !defined(CONFIG_USER_ONLY)
4432 void cpu_clear_apic_feature(CPUX86State *env)
4434 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
4437 #endif /* !CONFIG_USER_ONLY */
4439 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
4440 uint32_t *eax, uint32_t *ebx,
4441 uint32_t *ecx, uint32_t *edx)
4443 X86CPU *cpu = env_archcpu(env);
4444 CPUState *cs = env_cpu(env);
4445 uint32_t die_offset;
4446 uint32_t limit;
4447 uint32_t signature[3];
4449 /* Calculate & apply limits for different index ranges */
4450 if (index >= 0xC0000000) {
4451 limit = env->cpuid_xlevel2;
4452 } else if (index >= 0x80000000) {
4453 limit = env->cpuid_xlevel;
4454 } else if (index >= 0x40000000) {
4455 limit = 0x40000001;
4456 } else {
4457 limit = env->cpuid_level;
4460 if (index > limit) {
4461 /* Intel documentation states that invalid EAX input will
4462 * return the same information as EAX=cpuid_level
4463 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4465 index = env->cpuid_level;
4468 switch(index) {
4469 case 0:
4470 *eax = env->cpuid_level;
4471 *ebx = env->cpuid_vendor1;
4472 *edx = env->cpuid_vendor2;
4473 *ecx = env->cpuid_vendor3;
4474 break;
4475 case 1:
4476 *eax = env->cpuid_version;
4477 *ebx = (cpu->apic_id << 24) |
4478 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4479 *ecx = env->features[FEAT_1_ECX];
4480 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4481 *ecx |= CPUID_EXT_OSXSAVE;
4483 *edx = env->features[FEAT_1_EDX];
4484 if (cs->nr_cores * cs->nr_threads > 1) {
4485 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4486 *edx |= CPUID_HT;
4488 break;
4489 case 2:
4490 /* cache info: needed for Pentium Pro compatibility */
4491 if (cpu->cache_info_passthrough) {
4492 host_cpuid(index, 0, eax, ebx, ecx, edx);
4493 break;
4495 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4496 *ebx = 0;
4497 if (!cpu->enable_l3_cache) {
4498 *ecx = 0;
4499 } else {
4500 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4502 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4503 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4504 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4505 break;
4506 case 4:
4507 /* cache info: needed for Core compatibility */
4508 if (cpu->cache_info_passthrough) {
4509 host_cpuid(index, count, eax, ebx, ecx, edx);
4510 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4511 *eax &= ~0xFC000000;
4512 if ((*eax & 31) && cs->nr_cores > 1) {
4513 *eax |= (cs->nr_cores - 1) << 26;
4515 } else {
4516 *eax = 0;
4517 switch (count) {
4518 case 0: /* L1 dcache info */
4519 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4520 1, cs->nr_cores,
4521 eax, ebx, ecx, edx);
4522 break;
4523 case 1: /* L1 icache info */
4524 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4525 1, cs->nr_cores,
4526 eax, ebx, ecx, edx);
4527 break;
4528 case 2: /* L2 cache info */
4529 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4530 cs->nr_threads, cs->nr_cores,
4531 eax, ebx, ecx, edx);
4532 break;
4533 case 3: /* L3 cache info */
4534 die_offset = apicid_die_offset(env->nr_dies,
4535 cs->nr_cores, cs->nr_threads);
4536 if (cpu->enable_l3_cache) {
4537 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4538 (1 << die_offset), cs->nr_cores,
4539 eax, ebx, ecx, edx);
4540 break;
4542 /* fall through */
4543 default: /* end of info */
4544 *eax = *ebx = *ecx = *edx = 0;
4545 break;
4548 break;
4549 case 5:
4550 /* MONITOR/MWAIT Leaf */
4551 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4552 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4553 *ecx = cpu->mwait.ecx; /* flags */
4554 *edx = cpu->mwait.edx; /* mwait substates */
4555 break;
4556 case 6:
4557 /* Thermal and Power Leaf */
4558 *eax = env->features[FEAT_6_EAX];
4559 *ebx = 0;
4560 *ecx = 0;
4561 *edx = 0;
4562 break;
4563 case 7:
4564 /* Structured Extended Feature Flags Enumeration Leaf */
4565 if (count == 0) {
4566 /* Maximum ECX value for sub-leaves */
4567 *eax = env->cpuid_level_func7;
4568 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4569 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4570 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4571 *ecx |= CPUID_7_0_ECX_OSPKE;
4573 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4574 } else if (count == 1) {
4575 *eax = env->features[FEAT_7_1_EAX];
4576 *ebx = 0;
4577 *ecx = 0;
4578 *edx = 0;
4579 } else {
4580 *eax = 0;
4581 *ebx = 0;
4582 *ecx = 0;
4583 *edx = 0;
4585 break;
4586 case 9:
4587 /* Direct Cache Access Information Leaf */
4588 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4589 *ebx = 0;
4590 *ecx = 0;
4591 *edx = 0;
4592 break;
4593 case 0xA:
4594 /* Architectural Performance Monitoring Leaf */
4595 if (kvm_enabled() && cpu->enable_pmu) {
4596 KVMState *s = cs->kvm_state;
4598 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4599 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4600 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4601 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4602 } else if (hvf_enabled() && cpu->enable_pmu) {
4603 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4604 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4605 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4606 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4607 } else {
4608 *eax = 0;
4609 *ebx = 0;
4610 *ecx = 0;
4611 *edx = 0;
4613 break;
4614 case 0xB:
4615 /* Extended Topology Enumeration Leaf */
4616 if (!cpu->enable_cpuid_0xb) {
4617 *eax = *ebx = *ecx = *edx = 0;
4618 break;
4621 *ecx = count & 0xff;
4622 *edx = cpu->apic_id;
4624 switch (count) {
4625 case 0:
4626 *eax = apicid_core_offset(env->nr_dies,
4627 cs->nr_cores, cs->nr_threads);
4628 *ebx = cs->nr_threads;
4629 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4630 break;
4631 case 1:
4632 *eax = apicid_pkg_offset(env->nr_dies,
4633 cs->nr_cores, cs->nr_threads);
4634 *ebx = cs->nr_cores * cs->nr_threads;
4635 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4636 break;
4637 default:
4638 *eax = 0;
4639 *ebx = 0;
4640 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4643 assert(!(*eax & ~0x1f));
4644 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4645 break;
4646 case 0x1F:
4647 /* V2 Extended Topology Enumeration Leaf */
4648 if (env->nr_dies < 2) {
4649 *eax = *ebx = *ecx = *edx = 0;
4650 break;
4653 *ecx = count & 0xff;
4654 *edx = cpu->apic_id;
4655 switch (count) {
4656 case 0:
4657 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores,
4658 cs->nr_threads);
4659 *ebx = cs->nr_threads;
4660 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4661 break;
4662 case 1:
4663 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores,
4664 cs->nr_threads);
4665 *ebx = cs->nr_cores * cs->nr_threads;
4666 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4667 break;
4668 case 2:
4669 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores,
4670 cs->nr_threads);
4671 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
4672 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
4673 break;
4674 default:
4675 *eax = 0;
4676 *ebx = 0;
4677 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4679 assert(!(*eax & ~0x1f));
4680 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4681 break;
4682 case 0xD: {
4683 /* Processor Extended State */
4684 *eax = 0;
4685 *ebx = 0;
4686 *ecx = 0;
4687 *edx = 0;
4688 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4689 break;
4692 if (count == 0) {
4693 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4694 *eax = env->features[FEAT_XSAVE_COMP_LO];
4695 *edx = env->features[FEAT_XSAVE_COMP_HI];
4696 *ebx = xsave_area_size(env->xcr0);
4697 } else if (count == 1) {
4698 *eax = env->features[FEAT_XSAVE];
4699 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4700 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4701 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4702 *eax = esa->size;
4703 *ebx = esa->offset;
4706 break;
4708 case 0x14: {
4709 /* Intel Processor Trace Enumeration */
4710 *eax = 0;
4711 *ebx = 0;
4712 *ecx = 0;
4713 *edx = 0;
4714 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4715 !kvm_enabled()) {
4716 break;
4719 if (count == 0) {
4720 *eax = INTEL_PT_MAX_SUBLEAF;
4721 *ebx = INTEL_PT_MINIMAL_EBX;
4722 *ecx = INTEL_PT_MINIMAL_ECX;
4723 } else if (count == 1) {
4724 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4725 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4727 break;
4729 case 0x40000000:
4731 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4732 * set here, but we restrict to TCG none the less.
4734 if (tcg_enabled() && cpu->expose_tcg) {
4735 memcpy(signature, "TCGTCGTCGTCG", 12);
4736 *eax = 0x40000001;
4737 *ebx = signature[0];
4738 *ecx = signature[1];
4739 *edx = signature[2];
4740 } else {
4741 *eax = 0;
4742 *ebx = 0;
4743 *ecx = 0;
4744 *edx = 0;
4746 break;
4747 case 0x40000001:
4748 *eax = 0;
4749 *ebx = 0;
4750 *ecx = 0;
4751 *edx = 0;
4752 break;
4753 case 0x80000000:
4754 *eax = env->cpuid_xlevel;
4755 *ebx = env->cpuid_vendor1;
4756 *edx = env->cpuid_vendor2;
4757 *ecx = env->cpuid_vendor3;
4758 break;
4759 case 0x80000001:
4760 *eax = env->cpuid_version;
4761 *ebx = 0;
4762 *ecx = env->features[FEAT_8000_0001_ECX];
4763 *edx = env->features[FEAT_8000_0001_EDX];
4765 /* The Linux kernel checks for the CMPLegacy bit and
4766 * discards multiple thread information if it is set.
4767 * So don't set it here for Intel to make Linux guests happy.
4769 if (cs->nr_cores * cs->nr_threads > 1) {
4770 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4771 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4772 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4773 *ecx |= 1 << 1; /* CmpLegacy bit */
4776 break;
4777 case 0x80000002:
4778 case 0x80000003:
4779 case 0x80000004:
4780 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4781 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4782 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4783 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4784 break;
4785 case 0x80000005:
4786 /* cache info (L1 cache) */
4787 if (cpu->cache_info_passthrough) {
4788 host_cpuid(index, 0, eax, ebx, ecx, edx);
4789 break;
4791 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4792 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4793 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4794 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4795 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4796 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4797 break;
4798 case 0x80000006:
4799 /* cache info (L2 cache) */
4800 if (cpu->cache_info_passthrough) {
4801 host_cpuid(index, 0, eax, ebx, ecx, edx);
4802 break;
4804 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4805 (L2_DTLB_2M_ENTRIES << 16) | \
4806 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4807 (L2_ITLB_2M_ENTRIES);
4808 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4809 (L2_DTLB_4K_ENTRIES << 16) | \
4810 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4811 (L2_ITLB_4K_ENTRIES);
4812 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4813 cpu->enable_l3_cache ?
4814 env->cache_info_amd.l3_cache : NULL,
4815 ecx, edx);
4816 break;
4817 case 0x80000007:
4818 *eax = 0;
4819 *ebx = 0;
4820 *ecx = 0;
4821 *edx = env->features[FEAT_8000_0007_EDX];
4822 break;
4823 case 0x80000008:
4824 /* virtual & phys address size in low 2 bytes. */
4825 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4826 /* 64 bit processor */
4827 *eax = cpu->phys_bits; /* configurable physical bits */
4828 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4829 *eax |= 0x00003900; /* 57 bits virtual */
4830 } else {
4831 *eax |= 0x00003000; /* 48 bits virtual */
4833 } else {
4834 *eax = cpu->phys_bits;
4836 *ebx = env->features[FEAT_8000_0008_EBX];
4837 *ecx = 0;
4838 *edx = 0;
4839 if (cs->nr_cores * cs->nr_threads > 1) {
4840 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4842 break;
4843 case 0x8000000A:
4844 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4845 *eax = 0x00000001; /* SVM Revision */
4846 *ebx = 0x00000010; /* nr of ASIDs */
4847 *ecx = 0;
4848 *edx = env->features[FEAT_SVM]; /* optional features */
4849 } else {
4850 *eax = 0;
4851 *ebx = 0;
4852 *ecx = 0;
4853 *edx = 0;
4855 break;
4856 case 0x8000001D:
4857 *eax = 0;
4858 if (cpu->cache_info_passthrough) {
4859 host_cpuid(index, count, eax, ebx, ecx, edx);
4860 break;
4862 switch (count) {
4863 case 0: /* L1 dcache info */
4864 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4865 eax, ebx, ecx, edx);
4866 break;
4867 case 1: /* L1 icache info */
4868 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4869 eax, ebx, ecx, edx);
4870 break;
4871 case 2: /* L2 cache info */
4872 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4873 eax, ebx, ecx, edx);
4874 break;
4875 case 3: /* L3 cache info */
4876 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4877 eax, ebx, ecx, edx);
4878 break;
4879 default: /* end of info */
4880 *eax = *ebx = *ecx = *edx = 0;
4881 break;
4883 break;
4884 case 0x8000001E:
4885 assert(cpu->core_id <= 255);
4886 encode_topo_cpuid8000001e(cs, cpu,
4887 eax, ebx, ecx, edx);
4888 break;
4889 case 0xC0000000:
4890 *eax = env->cpuid_xlevel2;
4891 *ebx = 0;
4892 *ecx = 0;
4893 *edx = 0;
4894 break;
4895 case 0xC0000001:
4896 /* Support for VIA CPU's CPUID instruction */
4897 *eax = env->cpuid_version;
4898 *ebx = 0;
4899 *ecx = 0;
4900 *edx = env->features[FEAT_C000_0001_EDX];
4901 break;
4902 case 0xC0000002:
4903 case 0xC0000003:
4904 case 0xC0000004:
4905 /* Reserved for the future, and now filled with zero */
4906 *eax = 0;
4907 *ebx = 0;
4908 *ecx = 0;
4909 *edx = 0;
4910 break;
4911 case 0x8000001F:
4912 *eax = sev_enabled() ? 0x2 : 0;
4913 *ebx = sev_get_cbit_position();
4914 *ebx |= sev_get_reduced_phys_bits() << 6;
4915 *ecx = 0;
4916 *edx = 0;
4917 break;
4918 default:
4919 /* reserved values: zero */
4920 *eax = 0;
4921 *ebx = 0;
4922 *ecx = 0;
4923 *edx = 0;
4924 break;
4928 /* CPUClass::reset() */
4929 static void x86_cpu_reset(CPUState *s)
4931 X86CPU *cpu = X86_CPU(s);
4932 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4933 CPUX86State *env = &cpu->env;
4934 target_ulong cr4;
4935 uint64_t xcr0;
4936 int i;
4938 xcc->parent_reset(s);
4940 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4942 env->old_exception = -1;
4944 /* init to reset state */
4946 env->hflags2 |= HF2_GIF_MASK;
4948 cpu_x86_update_cr0(env, 0x60000010);
4949 env->a20_mask = ~0x0;
4950 env->smbase = 0x30000;
4951 env->msr_smi_count = 0;
4953 env->idt.limit = 0xffff;
4954 env->gdt.limit = 0xffff;
4955 env->ldt.limit = 0xffff;
4956 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4957 env->tr.limit = 0xffff;
4958 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4960 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4961 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4962 DESC_R_MASK | DESC_A_MASK);
4963 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4964 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4965 DESC_A_MASK);
4966 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4967 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4968 DESC_A_MASK);
4969 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4970 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4971 DESC_A_MASK);
4972 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4973 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4974 DESC_A_MASK);
4975 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4976 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4977 DESC_A_MASK);
4979 env->eip = 0xfff0;
4980 env->regs[R_EDX] = env->cpuid_version;
4982 env->eflags = 0x2;
4984 /* FPU init */
4985 for (i = 0; i < 8; i++) {
4986 env->fptags[i] = 1;
4988 cpu_set_fpuc(env, 0x37f);
4990 env->mxcsr = 0x1f80;
4991 /* All units are in INIT state. */
4992 env->xstate_bv = 0;
4994 env->pat = 0x0007040600070406ULL;
4995 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4996 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
4997 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
5000 memset(env->dr, 0, sizeof(env->dr));
5001 env->dr[6] = DR6_FIXED_1;
5002 env->dr[7] = DR7_FIXED_1;
5003 cpu_breakpoint_remove_all(s, BP_CPU);
5004 cpu_watchpoint_remove_all(s, BP_CPU);
5006 cr4 = 0;
5007 xcr0 = XSTATE_FP_MASK;
5009 #ifdef CONFIG_USER_ONLY
5010 /* Enable all the features for user-mode. */
5011 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
5012 xcr0 |= XSTATE_SSE_MASK;
5014 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
5015 const ExtSaveArea *esa = &x86_ext_save_areas[i];
5016 if (env->features[esa->feature] & esa->bits) {
5017 xcr0 |= 1ull << i;
5021 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
5022 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
5024 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
5025 cr4 |= CR4_FSGSBASE_MASK;
5027 #endif
5029 env->xcr0 = xcr0;
5030 cpu_x86_update_cr4(env, cr4);
5033 * SDM 11.11.5 requires:
5034 * - IA32_MTRR_DEF_TYPE MSR.E = 0
5035 * - IA32_MTRR_PHYSMASKn.V = 0
5036 * All other bits are undefined. For simplification, zero it all.
5038 env->mtrr_deftype = 0;
5039 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
5040 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
5042 env->interrupt_injected = -1;
5043 env->exception_nr = -1;
5044 env->exception_pending = 0;
5045 env->exception_injected = 0;
5046 env->exception_has_payload = false;
5047 env->exception_payload = 0;
5048 env->nmi_injected = false;
5049 #if !defined(CONFIG_USER_ONLY)
5050 /* We hard-wire the BSP to the first CPU. */
5051 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
5053 s->halted = !cpu_is_bsp(cpu);
5055 if (kvm_enabled()) {
5056 kvm_arch_reset_vcpu(cpu);
5058 else if (hvf_enabled()) {
5059 hvf_reset_vcpu(s);
5061 #endif
5064 #ifndef CONFIG_USER_ONLY
5065 bool cpu_is_bsp(X86CPU *cpu)
5067 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
5070 /* TODO: remove me, when reset over QOM tree is implemented */
5071 static void x86_cpu_machine_reset_cb(void *opaque)
5073 X86CPU *cpu = opaque;
5074 cpu_reset(CPU(cpu));
5076 #endif
5078 static void mce_init(X86CPU *cpu)
5080 CPUX86State *cenv = &cpu->env;
5081 unsigned int bank;
5083 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
5084 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
5085 (CPUID_MCE | CPUID_MCA)) {
5086 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
5087 (cpu->enable_lmce ? MCG_LMCE_P : 0);
5088 cenv->mcg_ctl = ~(uint64_t)0;
5089 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
5090 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
5095 #ifndef CONFIG_USER_ONLY
5096 APICCommonClass *apic_get_class(void)
5098 const char *apic_type = "apic";
5100 /* TODO: in-kernel irqchip for hvf */
5101 if (kvm_apic_in_kernel()) {
5102 apic_type = "kvm-apic";
5103 } else if (xen_enabled()) {
5104 apic_type = "xen-apic";
5107 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
5110 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
5112 APICCommonState *apic;
5113 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
5115 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
5117 object_property_add_child(OBJECT(cpu), "lapic",
5118 OBJECT(cpu->apic_state), &error_abort);
5119 object_unref(OBJECT(cpu->apic_state));
5121 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
5122 /* TODO: convert to link<> */
5123 apic = APIC_COMMON(cpu->apic_state);
5124 apic->cpu = cpu;
5125 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
5128 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
5130 APICCommonState *apic;
5131 static bool apic_mmio_map_once;
5133 if (cpu->apic_state == NULL) {
5134 return;
5136 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
5137 errp);
5139 /* Map APIC MMIO area */
5140 apic = APIC_COMMON(cpu->apic_state);
5141 if (!apic_mmio_map_once) {
5142 memory_region_add_subregion_overlap(get_system_memory(),
5143 apic->apicbase &
5144 MSR_IA32_APICBASE_BASE,
5145 &apic->io_memory,
5146 0x1000);
5147 apic_mmio_map_once = true;
5151 static void x86_cpu_machine_done(Notifier *n, void *unused)
5153 X86CPU *cpu = container_of(n, X86CPU, machine_done);
5154 MemoryRegion *smram =
5155 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
5157 if (smram) {
5158 cpu->smram = g_new(MemoryRegion, 1);
5159 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
5160 smram, 0, 1ull << 32);
5161 memory_region_set_enabled(cpu->smram, true);
5162 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
5165 #else
5166 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
5169 #endif
5171 /* Note: Only safe for use on x86(-64) hosts */
5172 static uint32_t x86_host_phys_bits(void)
5174 uint32_t eax;
5175 uint32_t host_phys_bits;
5177 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
5178 if (eax >= 0x80000008) {
5179 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
5180 /* Note: According to AMD doc 25481 rev 2.34 they have a field
5181 * at 23:16 that can specify a maximum physical address bits for
5182 * the guest that can override this value; but I've not seen
5183 * anything with that set.
5185 host_phys_bits = eax & 0xff;
5186 } else {
5187 /* It's an odd 64 bit machine that doesn't have the leaf for
5188 * physical address bits; fall back to 36 that's most older
5189 * Intel.
5191 host_phys_bits = 36;
5194 return host_phys_bits;
5197 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
5199 if (*min < value) {
5200 *min = value;
5204 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
5205 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
5207 CPUX86State *env = &cpu->env;
5208 FeatureWordInfo *fi = &feature_word_info[w];
5209 uint32_t eax = fi->cpuid.eax;
5210 uint32_t region = eax & 0xF0000000;
5212 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
5213 if (!env->features[w]) {
5214 return;
5217 switch (region) {
5218 case 0x00000000:
5219 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
5220 break;
5221 case 0x80000000:
5222 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
5223 break;
5224 case 0xC0000000:
5225 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
5226 break;
5229 if (eax == 7) {
5230 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7,
5231 fi->cpuid.ecx);
5235 /* Calculate XSAVE components based on the configured CPU feature flags */
5236 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
5238 CPUX86State *env = &cpu->env;
5239 int i;
5240 uint64_t mask;
5242 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
5243 return;
5246 mask = 0;
5247 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
5248 const ExtSaveArea *esa = &x86_ext_save_areas[i];
5249 if (env->features[esa->feature] & esa->bits) {
5250 mask |= (1ULL << i);
5254 env->features[FEAT_XSAVE_COMP_LO] = mask;
5255 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
5258 /***** Steps involved on loading and filtering CPUID data
5260 * When initializing and realizing a CPU object, the steps
5261 * involved in setting up CPUID data are:
5263 * 1) Loading CPU model definition (X86CPUDefinition). This is
5264 * implemented by x86_cpu_load_model() and should be completely
5265 * transparent, as it is done automatically by instance_init.
5266 * No code should need to look at X86CPUDefinition structs
5267 * outside instance_init.
5269 * 2) CPU expansion. This is done by realize before CPUID
5270 * filtering, and will make sure host/accelerator data is
5271 * loaded for CPU models that depend on host capabilities
5272 * (e.g. "host"). Done by x86_cpu_expand_features().
5274 * 3) CPUID filtering. This initializes extra data related to
5275 * CPUID, and checks if the host supports all capabilities
5276 * required by the CPU. Runnability of a CPU model is
5277 * determined at this step. Done by x86_cpu_filter_features().
5279 * Some operations don't require all steps to be performed.
5280 * More precisely:
5282 * - CPU instance creation (instance_init) will run only CPU
5283 * model loading. CPU expansion can't run at instance_init-time
5284 * because host/accelerator data may be not available yet.
5285 * - CPU realization will perform both CPU model expansion and CPUID
5286 * filtering, and return an error in case one of them fails.
5287 * - query-cpu-definitions needs to run all 3 steps. It needs
5288 * to run CPUID filtering, as the 'unavailable-features'
5289 * field is set based on the filtering results.
5290 * - The query-cpu-model-expansion QMP command only needs to run
5291 * CPU model loading and CPU expansion. It should not filter
5292 * any CPUID data based on host capabilities.
5295 /* Expand CPU configuration data, based on configured features
5296 * and host/accelerator capabilities when appropriate.
5298 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
5300 CPUX86State *env = &cpu->env;
5301 FeatureWord w;
5302 int i;
5303 GList *l;
5304 Error *local_err = NULL;
5306 for (l = plus_features; l; l = l->next) {
5307 const char *prop = l->data;
5308 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
5309 if (local_err) {
5310 goto out;
5314 for (l = minus_features; l; l = l->next) {
5315 const char *prop = l->data;
5316 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
5317 if (local_err) {
5318 goto out;
5322 /*TODO: Now cpu->max_features doesn't overwrite features
5323 * set using QOM properties, and we can convert
5324 * plus_features & minus_features to global properties
5325 * inside x86_cpu_parse_featurestr() too.
5327 if (cpu->max_features) {
5328 for (w = 0; w < FEATURE_WORDS; w++) {
5329 /* Override only features that weren't set explicitly
5330 * by the user.
5332 env->features[w] |=
5333 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
5334 ~env->user_features[w] & \
5335 ~feature_word_info[w].no_autoenable_flags;
5339 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
5340 FeatureDep *d = &feature_dependencies[i];
5341 if (!(env->features[d->from.index] & d->from.mask)) {
5342 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask;
5344 /* Not an error unless the dependent feature was added explicitly. */
5345 mark_unavailable_features(cpu, d->to.index,
5346 unavailable_features & env->user_features[d->to.index],
5347 "This feature depends on other features that were not requested");
5349 env->user_features[d->to.index] |= unavailable_features;
5350 env->features[d->to.index] &= ~unavailable_features;
5354 if (!kvm_enabled() || !cpu->expose_kvm) {
5355 env->features[FEAT_KVM] = 0;
5358 x86_cpu_enable_xsave_components(cpu);
5360 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5361 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
5362 if (cpu->full_cpuid_auto_level) {
5363 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
5364 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
5365 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
5366 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
5367 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
5368 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
5369 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
5370 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
5371 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
5372 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
5373 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
5374 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
5376 /* Intel Processor Trace requires CPUID[0x14] */
5377 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5378 kvm_enabled() && cpu->intel_pt_auto_level) {
5379 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
5382 /* CPU topology with multi-dies support requires CPUID[0x1F] */
5383 if (env->nr_dies > 1) {
5384 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
5387 /* SVM requires CPUID[0x8000000A] */
5388 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5389 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
5392 /* SEV requires CPUID[0x8000001F] */
5393 if (sev_enabled()) {
5394 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
5398 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5399 if (env->cpuid_level_func7 == UINT32_MAX) {
5400 env->cpuid_level_func7 = env->cpuid_min_level_func7;
5402 if (env->cpuid_level == UINT32_MAX) {
5403 env->cpuid_level = env->cpuid_min_level;
5405 if (env->cpuid_xlevel == UINT32_MAX) {
5406 env->cpuid_xlevel = env->cpuid_min_xlevel;
5408 if (env->cpuid_xlevel2 == UINT32_MAX) {
5409 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
5412 out:
5413 if (local_err != NULL) {
5414 error_propagate(errp, local_err);
5419 * Finishes initialization of CPUID data, filters CPU feature
5420 * words based on host availability of each feature.
5422 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5424 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
5426 CPUX86State *env = &cpu->env;
5427 FeatureWord w;
5428 const char *prefix = NULL;
5430 if (verbose) {
5431 prefix = accel_uses_host_cpuid()
5432 ? "host doesn't support requested feature"
5433 : "TCG doesn't support requested feature";
5436 for (w = 0; w < FEATURE_WORDS; w++) {
5437 uint64_t host_feat =
5438 x86_cpu_get_supported_feature_word(w, false);
5439 uint64_t requested_features = env->features[w];
5440 uint64_t unavailable_features = requested_features & ~host_feat;
5441 mark_unavailable_features(cpu, w, unavailable_features, prefix);
5444 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5445 kvm_enabled()) {
5446 KVMState *s = CPU(cpu)->kvm_state;
5447 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
5448 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
5449 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
5450 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
5451 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
5453 if (!eax_0 ||
5454 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
5455 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
5456 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
5457 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
5458 INTEL_PT_ADDR_RANGES_NUM) ||
5459 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
5460 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
5461 (ecx_0 & INTEL_PT_IP_LIP)) {
5463 * Processor Trace capabilities aren't configurable, so if the
5464 * host can't emulate the capabilities we report on
5465 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5467 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix);
5472 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
5474 CPUState *cs = CPU(dev);
5475 X86CPU *cpu = X86_CPU(dev);
5476 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5477 CPUX86State *env = &cpu->env;
5478 Error *local_err = NULL;
5479 static bool ht_warned;
5481 if (xcc->host_cpuid_required) {
5482 if (!accel_uses_host_cpuid()) {
5483 char *name = x86_cpu_class_get_model_name(xcc);
5484 error_setg(&local_err, "CPU model '%s' requires KVM", name);
5485 g_free(name);
5486 goto out;
5489 if (enable_cpu_pm) {
5490 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
5491 &cpu->mwait.ecx, &cpu->mwait.edx);
5492 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
5496 /* mwait extended info: needed for Core compatibility */
5497 /* We always wake on interrupt even if host does not have the capability */
5498 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
5500 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
5501 error_setg(errp, "apic-id property was not initialized properly");
5502 return;
5505 x86_cpu_expand_features(cpu, &local_err);
5506 if (local_err) {
5507 goto out;
5510 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
5512 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
5513 error_setg(&local_err,
5514 accel_uses_host_cpuid() ?
5515 "Host doesn't support requested features" :
5516 "TCG doesn't support requested features");
5517 goto out;
5520 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5521 * CPUID[1].EDX.
5523 if (IS_AMD_CPU(env)) {
5524 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
5525 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
5526 & CPUID_EXT2_AMD_ALIASES);
5529 /* For 64bit systems think about the number of physical bits to present.
5530 * ideally this should be the same as the host; anything other than matching
5531 * the host can cause incorrect guest behaviour.
5532 * QEMU used to pick the magic value of 40 bits that corresponds to
5533 * consumer AMD devices but nothing else.
5535 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5536 if (accel_uses_host_cpuid()) {
5537 uint32_t host_phys_bits = x86_host_phys_bits();
5538 static bool warned;
5540 /* Print a warning if the user set it to a value that's not the
5541 * host value.
5543 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
5544 !warned) {
5545 warn_report("Host physical bits (%u)"
5546 " does not match phys-bits property (%u)",
5547 host_phys_bits, cpu->phys_bits);
5548 warned = true;
5551 if (cpu->host_phys_bits) {
5552 /* The user asked for us to use the host physical bits */
5553 cpu->phys_bits = host_phys_bits;
5554 if (cpu->host_phys_bits_limit &&
5555 cpu->phys_bits > cpu->host_phys_bits_limit) {
5556 cpu->phys_bits = cpu->host_phys_bits_limit;
5560 if (cpu->phys_bits &&
5561 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5562 cpu->phys_bits < 32)) {
5563 error_setg(errp, "phys-bits should be between 32 and %u "
5564 " (but is %u)",
5565 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5566 return;
5568 } else {
5569 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5570 error_setg(errp, "TCG only supports phys-bits=%u",
5571 TCG_PHYS_ADDR_BITS);
5572 return;
5575 /* 0 means it was not explicitly set by the user (or by machine
5576 * compat_props or by the host code above). In this case, the default
5577 * is the value used by TCG (40).
5579 if (cpu->phys_bits == 0) {
5580 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5582 } else {
5583 /* For 32 bit systems don't use the user set value, but keep
5584 * phys_bits consistent with what we tell the guest.
5586 if (cpu->phys_bits != 0) {
5587 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5588 return;
5591 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5592 cpu->phys_bits = 36;
5593 } else {
5594 cpu->phys_bits = 32;
5598 /* Cache information initialization */
5599 if (!cpu->legacy_cache) {
5600 if (!xcc->model || !xcc->model->cpudef->cache_info) {
5601 char *name = x86_cpu_class_get_model_name(xcc);
5602 error_setg(errp,
5603 "CPU model '%s' doesn't support legacy-cache=off", name);
5604 g_free(name);
5605 return;
5607 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5608 *xcc->model->cpudef->cache_info;
5609 } else {
5610 /* Build legacy cache information */
5611 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5612 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5613 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5614 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5616 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5617 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5618 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5619 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5621 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5622 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5623 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5624 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5628 cpu_exec_realizefn(cs, &local_err);
5629 if (local_err != NULL) {
5630 error_propagate(errp, local_err);
5631 return;
5634 #ifndef CONFIG_USER_ONLY
5635 MachineState *ms = MACHINE(qdev_get_machine());
5636 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5638 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
5639 x86_cpu_apic_create(cpu, &local_err);
5640 if (local_err != NULL) {
5641 goto out;
5644 #endif
5646 mce_init(cpu);
5648 #ifndef CONFIG_USER_ONLY
5649 if (tcg_enabled()) {
5650 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5651 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5653 /* Outer container... */
5654 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5655 memory_region_set_enabled(cpu->cpu_as_root, true);
5657 /* ... with two regions inside: normal system memory with low
5658 * priority, and...
5660 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5661 get_system_memory(), 0, ~0ull);
5662 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5663 memory_region_set_enabled(cpu->cpu_as_mem, true);
5665 cs->num_ases = 2;
5666 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5667 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5669 /* ... SMRAM with higher priority, linked from /machine/smram. */
5670 cpu->machine_done.notify = x86_cpu_machine_done;
5671 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5673 #endif
5675 qemu_init_vcpu(cs);
5678 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5679 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5680 * based on inputs (sockets,cores,threads), it is still better to give
5681 * users a warning.
5683 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5684 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5686 if (IS_AMD_CPU(env) &&
5687 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5688 cs->nr_threads > 1 && !ht_warned) {
5689 warn_report("This family of AMD CPU doesn't support "
5690 "hyperthreading(%d)",
5691 cs->nr_threads);
5692 error_printf("Please configure -smp options properly"
5693 " or try enabling topoext feature.\n");
5694 ht_warned = true;
5697 x86_cpu_apic_realize(cpu, &local_err);
5698 if (local_err != NULL) {
5699 goto out;
5701 cpu_reset(cs);
5703 xcc->parent_realize(dev, &local_err);
5705 out:
5706 if (local_err != NULL) {
5707 error_propagate(errp, local_err);
5708 return;
5712 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5714 X86CPU *cpu = X86_CPU(dev);
5715 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5716 Error *local_err = NULL;
5718 #ifndef CONFIG_USER_ONLY
5719 cpu_remove_sync(CPU(dev));
5720 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5721 #endif
5723 if (cpu->apic_state) {
5724 object_unparent(OBJECT(cpu->apic_state));
5725 cpu->apic_state = NULL;
5728 xcc->parent_unrealize(dev, &local_err);
5729 if (local_err != NULL) {
5730 error_propagate(errp, local_err);
5731 return;
5735 typedef struct BitProperty {
5736 FeatureWord w;
5737 uint64_t mask;
5738 } BitProperty;
5740 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5741 void *opaque, Error **errp)
5743 X86CPU *cpu = X86_CPU(obj);
5744 BitProperty *fp = opaque;
5745 uint64_t f = cpu->env.features[fp->w];
5746 bool value = (f & fp->mask) == fp->mask;
5747 visit_type_bool(v, name, &value, errp);
5750 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5751 void *opaque, Error **errp)
5753 DeviceState *dev = DEVICE(obj);
5754 X86CPU *cpu = X86_CPU(obj);
5755 BitProperty *fp = opaque;
5756 Error *local_err = NULL;
5757 bool value;
5759 if (dev->realized) {
5760 qdev_prop_set_after_realize(dev, name, errp);
5761 return;
5764 visit_type_bool(v, name, &value, &local_err);
5765 if (local_err) {
5766 error_propagate(errp, local_err);
5767 return;
5770 if (value) {
5771 cpu->env.features[fp->w] |= fp->mask;
5772 } else {
5773 cpu->env.features[fp->w] &= ~fp->mask;
5775 cpu->env.user_features[fp->w] |= fp->mask;
5778 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5779 void *opaque)
5781 BitProperty *prop = opaque;
5782 g_free(prop);
5785 /* Register a boolean property to get/set a single bit in a uint32_t field.
5787 * The same property name can be registered multiple times to make it affect
5788 * multiple bits in the same FeatureWord. In that case, the getter will return
5789 * true only if all bits are set.
5791 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5792 const char *prop_name,
5793 FeatureWord w,
5794 int bitnr)
5796 BitProperty *fp;
5797 ObjectProperty *op;
5798 uint64_t mask = (1ULL << bitnr);
5800 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5801 if (op) {
5802 fp = op->opaque;
5803 assert(fp->w == w);
5804 fp->mask |= mask;
5805 } else {
5806 fp = g_new0(BitProperty, 1);
5807 fp->w = w;
5808 fp->mask = mask;
5809 object_property_add(OBJECT(cpu), prop_name, "bool",
5810 x86_cpu_get_bit_prop,
5811 x86_cpu_set_bit_prop,
5812 x86_cpu_release_bit_prop, fp, &error_abort);
5816 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5817 FeatureWord w,
5818 int bitnr)
5820 FeatureWordInfo *fi = &feature_word_info[w];
5821 const char *name = fi->feat_names[bitnr];
5823 if (!name) {
5824 return;
5827 /* Property names should use "-" instead of "_".
5828 * Old names containing underscores are registered as aliases
5829 * using object_property_add_alias()
5831 assert(!strchr(name, '_'));
5832 /* aliases don't use "|" delimiters anymore, they are registered
5833 * manually using object_property_add_alias() */
5834 assert(!strchr(name, '|'));
5835 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5838 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5840 X86CPU *cpu = X86_CPU(cs);
5841 CPUX86State *env = &cpu->env;
5842 GuestPanicInformation *panic_info = NULL;
5844 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5845 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5847 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5849 assert(HV_CRASH_PARAMS >= 5);
5850 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5851 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5852 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5853 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5854 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5857 return panic_info;
5859 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5860 const char *name, void *opaque,
5861 Error **errp)
5863 CPUState *cs = CPU(obj);
5864 GuestPanicInformation *panic_info;
5866 if (!cs->crash_occurred) {
5867 error_setg(errp, "No crash occured");
5868 return;
5871 panic_info = x86_cpu_get_crash_info(cs);
5872 if (panic_info == NULL) {
5873 error_setg(errp, "No crash information");
5874 return;
5877 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5878 errp);
5879 qapi_free_GuestPanicInformation(panic_info);
5882 static void x86_cpu_initfn(Object *obj)
5884 X86CPU *cpu = X86_CPU(obj);
5885 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5886 CPUX86State *env = &cpu->env;
5887 FeatureWord w;
5889 env->nr_dies = 1;
5890 cpu_set_cpustate_pointers(cpu);
5892 object_property_add(obj, "family", "int",
5893 x86_cpuid_version_get_family,
5894 x86_cpuid_version_set_family, NULL, NULL, NULL);
5895 object_property_add(obj, "model", "int",
5896 x86_cpuid_version_get_model,
5897 x86_cpuid_version_set_model, NULL, NULL, NULL);
5898 object_property_add(obj, "stepping", "int",
5899 x86_cpuid_version_get_stepping,
5900 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5901 object_property_add_str(obj, "vendor",
5902 x86_cpuid_get_vendor,
5903 x86_cpuid_set_vendor, NULL);
5904 object_property_add_str(obj, "model-id",
5905 x86_cpuid_get_model_id,
5906 x86_cpuid_set_model_id, NULL);
5907 object_property_add(obj, "tsc-frequency", "int",
5908 x86_cpuid_get_tsc_freq,
5909 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5910 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5911 x86_cpu_get_feature_words,
5912 NULL, NULL, (void *)env->features, NULL);
5913 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5914 x86_cpu_get_feature_words,
5915 NULL, NULL, (void *)cpu->filtered_features, NULL);
5917 * The "unavailable-features" property has the same semantics as
5918 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
5919 * QMP command: they list the features that would have prevented the
5920 * CPU from running if the "enforce" flag was set.
5922 object_property_add(obj, "unavailable-features", "strList",
5923 x86_cpu_get_unavailable_features,
5924 NULL, NULL, NULL, &error_abort);
5926 object_property_add(obj, "crash-information", "GuestPanicInformation",
5927 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5929 for (w = 0; w < FEATURE_WORDS; w++) {
5930 int bitnr;
5932 for (bitnr = 0; bitnr < 64; bitnr++) {
5933 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5937 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5938 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5939 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5940 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5941 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5942 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5943 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5945 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5946 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5947 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5948 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5949 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5950 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5951 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5952 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5953 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5954 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5955 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5956 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5957 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5958 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5959 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control",
5960 &error_abort);
5961 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5962 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5963 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5964 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5965 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5966 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5967 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5969 if (xcc->model) {
5970 x86_cpu_load_model(cpu, xcc->model, &error_abort);
5974 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5976 X86CPU *cpu = X86_CPU(cs);
5978 return cpu->apic_id;
5981 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5983 X86CPU *cpu = X86_CPU(cs);
5985 return cpu->env.cr[0] & CR0_PG_MASK;
5988 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5990 X86CPU *cpu = X86_CPU(cs);
5992 cpu->env.eip = value;
5995 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5997 X86CPU *cpu = X86_CPU(cs);
5999 cpu->env.eip = tb->pc - tb->cs_base;
6002 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
6004 X86CPU *cpu = X86_CPU(cs);
6005 CPUX86State *env = &cpu->env;
6007 #if !defined(CONFIG_USER_ONLY)
6008 if (interrupt_request & CPU_INTERRUPT_POLL) {
6009 return CPU_INTERRUPT_POLL;
6011 #endif
6012 if (interrupt_request & CPU_INTERRUPT_SIPI) {
6013 return CPU_INTERRUPT_SIPI;
6016 if (env->hflags2 & HF2_GIF_MASK) {
6017 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
6018 !(env->hflags & HF_SMM_MASK)) {
6019 return CPU_INTERRUPT_SMI;
6020 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
6021 !(env->hflags2 & HF2_NMI_MASK)) {
6022 return CPU_INTERRUPT_NMI;
6023 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
6024 return CPU_INTERRUPT_MCE;
6025 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
6026 (((env->hflags2 & HF2_VINTR_MASK) &&
6027 (env->hflags2 & HF2_HIF_MASK)) ||
6028 (!(env->hflags2 & HF2_VINTR_MASK) &&
6029 (env->eflags & IF_MASK &&
6030 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
6031 return CPU_INTERRUPT_HARD;
6032 #if !defined(CONFIG_USER_ONLY)
6033 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
6034 (env->eflags & IF_MASK) &&
6035 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
6036 return CPU_INTERRUPT_VIRQ;
6037 #endif
6041 return 0;
6044 static bool x86_cpu_has_work(CPUState *cs)
6046 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
6049 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
6051 X86CPU *cpu = X86_CPU(cs);
6052 CPUX86State *env = &cpu->env;
6054 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
6055 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
6056 : bfd_mach_i386_i8086);
6057 info->print_insn = print_insn_i386;
6059 info->cap_arch = CS_ARCH_X86;
6060 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
6061 : env->hflags & HF_CS32_MASK ? CS_MODE_32
6062 : CS_MODE_16);
6063 info->cap_insn_unit = 1;
6064 info->cap_insn_split = 8;
6067 void x86_update_hflags(CPUX86State *env)
6069 uint32_t hflags;
6070 #define HFLAG_COPY_MASK \
6071 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
6072 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
6073 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
6074 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
6076 hflags = env->hflags & HFLAG_COPY_MASK;
6077 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
6078 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
6079 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
6080 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
6081 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
6083 if (env->cr[4] & CR4_OSFXSR_MASK) {
6084 hflags |= HF_OSFXSR_MASK;
6087 if (env->efer & MSR_EFER_LMA) {
6088 hflags |= HF_LMA_MASK;
6091 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
6092 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
6093 } else {
6094 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
6095 (DESC_B_SHIFT - HF_CS32_SHIFT);
6096 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
6097 (DESC_B_SHIFT - HF_SS32_SHIFT);
6098 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
6099 !(hflags & HF_CS32_MASK)) {
6100 hflags |= HF_ADDSEG_MASK;
6101 } else {
6102 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
6103 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
6106 env->hflags = hflags;
6109 static Property x86_cpu_properties[] = {
6110 #ifdef CONFIG_USER_ONLY
6111 /* apic_id = 0 by default for *-user, see commit 9886e834 */
6112 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
6113 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
6114 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
6115 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
6116 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
6117 #else
6118 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
6119 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
6120 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
6121 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
6122 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
6123 #endif
6124 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
6125 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
6127 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
6128 HYPERV_SPINLOCK_NEVER_RETRY),
6129 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
6130 HYPERV_FEAT_RELAXED, 0),
6131 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
6132 HYPERV_FEAT_VAPIC, 0),
6133 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
6134 HYPERV_FEAT_TIME, 0),
6135 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
6136 HYPERV_FEAT_CRASH, 0),
6137 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
6138 HYPERV_FEAT_RESET, 0),
6139 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
6140 HYPERV_FEAT_VPINDEX, 0),
6141 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
6142 HYPERV_FEAT_RUNTIME, 0),
6143 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
6144 HYPERV_FEAT_SYNIC, 0),
6145 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
6146 HYPERV_FEAT_STIMER, 0),
6147 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
6148 HYPERV_FEAT_FREQUENCIES, 0),
6149 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
6150 HYPERV_FEAT_REENLIGHTENMENT, 0),
6151 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
6152 HYPERV_FEAT_TLBFLUSH, 0),
6153 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
6154 HYPERV_FEAT_EVMCS, 0),
6155 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
6156 HYPERV_FEAT_IPI, 0),
6157 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
6158 HYPERV_FEAT_STIMER_DIRECT, 0),
6159 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
6161 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
6162 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
6163 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
6164 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
6165 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
6166 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
6167 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
6168 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
6169 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7,
6170 UINT32_MAX),
6171 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
6172 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
6173 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
6174 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
6175 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
6176 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
6177 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
6178 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
6179 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
6180 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
6181 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
6182 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
6183 false),
6184 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
6185 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
6186 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
6187 true),
6189 * lecacy_cache defaults to true unless the CPU model provides its
6190 * own cache information (see x86_cpu_load_def()).
6192 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
6195 * From "Requirements for Implementing the Microsoft
6196 * Hypervisor Interface":
6197 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
6199 * "Starting with Windows Server 2012 and Windows 8, if
6200 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
6201 * the hypervisor imposes no specific limit to the number of VPs.
6202 * In this case, Windows Server 2012 guest VMs may use more than
6203 * 64 VPs, up to the maximum supported number of processors applicable
6204 * to the specific Windows version being used."
6206 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
6207 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
6208 false),
6209 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
6210 true),
6211 DEFINE_PROP_END_OF_LIST()
6214 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
6216 X86CPUClass *xcc = X86_CPU_CLASS(oc);
6217 CPUClass *cc = CPU_CLASS(oc);
6218 DeviceClass *dc = DEVICE_CLASS(oc);
6220 device_class_set_parent_realize(dc, x86_cpu_realizefn,
6221 &xcc->parent_realize);
6222 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
6223 &xcc->parent_unrealize);
6224 dc->props = x86_cpu_properties;
6226 xcc->parent_reset = cc->reset;
6227 cc->reset = x86_cpu_reset;
6228 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
6230 cc->class_by_name = x86_cpu_class_by_name;
6231 cc->parse_features = x86_cpu_parse_featurestr;
6232 cc->has_work = x86_cpu_has_work;
6233 #ifdef CONFIG_TCG
6234 cc->do_interrupt = x86_cpu_do_interrupt;
6235 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
6236 #endif
6237 cc->dump_state = x86_cpu_dump_state;
6238 cc->get_crash_info = x86_cpu_get_crash_info;
6239 cc->set_pc = x86_cpu_set_pc;
6240 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
6241 cc->gdb_read_register = x86_cpu_gdb_read_register;
6242 cc->gdb_write_register = x86_cpu_gdb_write_register;
6243 cc->get_arch_id = x86_cpu_get_arch_id;
6244 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
6245 #ifndef CONFIG_USER_ONLY
6246 cc->asidx_from_attrs = x86_asidx_from_attrs;
6247 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
6248 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug;
6249 cc->write_elf64_note = x86_cpu_write_elf64_note;
6250 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
6251 cc->write_elf32_note = x86_cpu_write_elf32_note;
6252 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
6253 cc->vmsd = &vmstate_x86_cpu;
6254 #endif
6255 cc->gdb_arch_name = x86_gdb_arch_name;
6256 #ifdef TARGET_X86_64
6257 cc->gdb_core_xml_file = "i386-64bit.xml";
6258 cc->gdb_num_core_regs = 66;
6259 #else
6260 cc->gdb_core_xml_file = "i386-32bit.xml";
6261 cc->gdb_num_core_regs = 50;
6262 #endif
6263 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
6264 cc->debug_excp_handler = breakpoint_handler;
6265 #endif
6266 cc->cpu_exec_enter = x86_cpu_exec_enter;
6267 cc->cpu_exec_exit = x86_cpu_exec_exit;
6268 #ifdef CONFIG_TCG
6269 cc->tcg_initialize = tcg_x86_init;
6270 cc->tlb_fill = x86_cpu_tlb_fill;
6271 #endif
6272 cc->disas_set_info = x86_disas_set_info;
6274 dc->user_creatable = true;
6277 static const TypeInfo x86_cpu_type_info = {
6278 .name = TYPE_X86_CPU,
6279 .parent = TYPE_CPU,
6280 .instance_size = sizeof(X86CPU),
6281 .instance_init = x86_cpu_initfn,
6282 .abstract = true,
6283 .class_size = sizeof(X86CPUClass),
6284 .class_init = x86_cpu_common_class_init,
6288 /* "base" CPU model, used by query-cpu-model-expansion */
6289 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
6291 X86CPUClass *xcc = X86_CPU_CLASS(oc);
6293 xcc->static_model = true;
6294 xcc->migration_safe = true;
6295 xcc->model_description = "base CPU model type with no features enabled";
6296 xcc->ordering = 8;
6299 static const TypeInfo x86_base_cpu_type_info = {
6300 .name = X86_CPU_TYPE_NAME("base"),
6301 .parent = TYPE_X86_CPU,
6302 .class_init = x86_cpu_base_class_init,
6305 static void x86_cpu_register_types(void)
6307 int i;
6309 type_register_static(&x86_cpu_type_info);
6310 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
6311 x86_register_cpudef_types(&builtin_x86_defs[i]);
6313 type_register_static(&max_x86_cpu_type_info);
6314 type_register_static(&x86_base_cpu_type_info);
6315 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
6316 type_register_static(&host_x86_cpu_type_info);
6317 #endif
6320 type_init(x86_cpu_register_types)