i386: Don't print warning if phys-bits was set automatically
[qemu/ar7.git] / target / i386 / cpu.c
blobf538b54150050b45e492e130b8a15f5e2c873f48
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/hvf.h"
30 #include "sysemu/cpus.h"
31 #include "kvm_i386.h"
32 #include "sev_i386.h"
34 #include "qemu/error-report.h"
35 #include "qemu/module.h"
36 #include "qemu/option.h"
37 #include "qemu/config-file.h"
38 #include "qapi/error.h"
39 #include "qapi/qapi-visit-machine.h"
40 #include "qapi/qapi-visit-run-state.h"
41 #include "qapi/qmp/qdict.h"
42 #include "qapi/qmp/qerror.h"
43 #include "qapi/visitor.h"
44 #include "qom/qom-qobject.h"
45 #include "sysemu/arch_init.h"
46 #include "qapi/qapi-commands-machine-target.h"
48 #include "standard-headers/asm-x86/kvm_para.h"
50 #include "sysemu/sysemu.h"
51 #include "sysemu/tcg.h"
52 #include "hw/qdev-properties.h"
53 #include "hw/i386/topology.h"
54 #ifndef CONFIG_USER_ONLY
55 #include "exec/address-spaces.h"
56 #include "hw/hw.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
62 #include "disas/capstone.h"
64 /* Helpers for building CPUID[2] descriptors: */
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
211 int i;
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
229 /* CPUID Leaf 4 constants: */
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
236 #define CACHE_LEVEL(l) (l << 5)
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
296 #define ASSOC_FULL 0xFF
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
344 * Define the constants to build the cpu topology. Right now, TOPOEXT
345 * feature is enabled only on EPYC. So, these constants are based on
346 * EPYC supported configurations. We may need to handle the cases if
347 * these values change in future.
349 /* Maximum core complexes in a node */
350 #define MAX_CCX 2
351 /* Maximum cores in a core complex */
352 #define MAX_CORES_IN_CCX 4
353 /* Maximum cores in a node */
354 #define MAX_CORES_IN_NODE 8
355 /* Maximum nodes in a socket */
356 #define MAX_NODES_PER_SOCKET 4
359 * Figure out the number of nodes required to build this config.
360 * Max cores in a node is 8
362 static int nodes_in_socket(int nr_cores)
364 int nodes;
366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
368 /* Hardware does not support config with 3 nodes, return 4 in that case */
369 return (nodes == 3) ? 4 : nodes;
373 * Decide the number of cores in a core complex with the given nr_cores using
374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
376 * L3 cache is shared across all cores in a core complex. So, this will also
377 * tell us how many cores are sharing the L3 cache.
379 static int cores_in_core_complex(int nr_cores)
381 int nodes;
383 /* Check if we can fit all the cores in one core complex */
384 if (nr_cores <= MAX_CORES_IN_CCX) {
385 return nr_cores;
387 /* Get the number of nodes required to build this config */
388 nodes = nodes_in_socket(nr_cores);
391 * Divide the cores accros all the core complexes
392 * Return rounded up value
394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
397 /* Encode cache info for CPUID[8000001D] */
398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
399 uint32_t *eax, uint32_t *ebx,
400 uint32_t *ecx, uint32_t *edx)
402 uint32_t l3_cores;
403 assert(cache->size == cache->line_size * cache->associativity *
404 cache->partitions * cache->sets);
406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
409 /* L3 is shared among multiple cores */
410 if (cache->level == 3) {
411 l3_cores = cores_in_core_complex(cs->nr_cores);
412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
413 } else {
414 *eax |= ((cs->nr_threads - 1) << 14);
417 assert(cache->line_size > 0);
418 assert(cache->partitions > 0);
419 assert(cache->associativity > 0);
420 /* We don't implement fully-associative caches */
421 assert(cache->associativity < cache->sets);
422 *ebx = (cache->line_size - 1) |
423 ((cache->partitions - 1) << 12) |
424 ((cache->associativity - 1) << 22);
426 assert(cache->sets > 0);
427 *ecx = cache->sets - 1;
429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
430 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
434 /* Data structure to hold the configuration info for a given core index */
435 struct core_topology {
436 /* core complex id of the current core index */
437 int ccx_id;
439 * Adjusted core index for this core in the topology
440 * This can be 0,1,2,3 with max 4 cores in a core complex
442 int core_id;
443 /* Node id for this core index */
444 int node_id;
445 /* Number of nodes in this config */
446 int num_nodes;
450 * Build the configuration closely match the EPYC hardware. Using the EPYC
451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
452 * right now. This could change in future.
453 * nr_cores : Total number of cores in the config
454 * core_id : Core index of the current CPU
455 * topo : Data structure to hold all the config info for this core index
457 static void build_core_topology(int nr_cores, int core_id,
458 struct core_topology *topo)
460 int nodes, cores_in_ccx;
462 /* First get the number of nodes required */
463 nodes = nodes_in_socket(nr_cores);
465 cores_in_ccx = cores_in_core_complex(nr_cores);
467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
469 topo->core_id = core_id % cores_in_ccx;
470 topo->num_nodes = nodes;
473 /* Encode cache info for CPUID[8000001E] */
474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
475 uint32_t *eax, uint32_t *ebx,
476 uint32_t *ecx, uint32_t *edx)
478 struct core_topology topo = {0};
479 unsigned long nodes;
480 int shift;
482 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
483 *eax = cpu->apic_id;
485 * CPUID_Fn8000001E_EBX
486 * 31:16 Reserved
487 * 15:8 Threads per core (The number of threads per core is
488 * Threads per core + 1)
489 * 7:0 Core id (see bit decoding below)
490 * SMT:
491 * 4:3 node id
492 * 2 Core complex id
493 * 1:0 Core id
494 * Non SMT:
495 * 5:4 node id
496 * 3 Core complex id
497 * 1:0 Core id
499 if (cs->nr_threads - 1) {
500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
501 (topo.ccx_id << 2) | topo.core_id;
502 } else {
503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
506 * CPUID_Fn8000001E_ECX
507 * 31:11 Reserved
508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
509 * 7:0 Node id (see bit decoding below)
510 * 2 Socket id
511 * 1:0 Node id
513 if (topo.num_nodes <= 4) {
514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
515 topo.node_id;
516 } else {
518 * Node id fix up. Actual hardware supports up to 4 nodes. But with
519 * more than 32 cores, we may end up with more than 4 nodes.
520 * Node id is a combination of socket id and node id. Only requirement
521 * here is that this number should be unique accross the system.
522 * Shift the socket id to accommodate more nodes. We dont expect both
523 * socket id and node id to be big number at the same time. This is not
524 * an ideal config but we need to to support it. Max nodes we can have
525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
526 * 5 bits for nodes. Find the left most set bit to represent the total
527 * number of nodes. find_last_bit returns last set bit(0 based). Left
528 * shift(+1) the socket id to represent all the nodes.
530 nodes = topo.num_nodes - 1;
531 shift = find_last_bit(&nodes, 8);
532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
533 topo.node_id;
535 *edx = 0;
539 * Definitions of the hardcoded cache entries we expose:
540 * These are legacy cache values. If there is a need to change any
541 * of these values please use builtin_x86_defs
544 /* L1 data cache: */
545 static CPUCacheInfo legacy_l1d_cache = {
546 .type = DATA_CACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1d_cache_amd = {
559 .type = DATA_CACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
571 /* L1 instruction cache: */
572 static CPUCacheInfo legacy_l1i_cache = {
573 .type = INSTRUCTION_CACHE,
574 .level = 1,
575 .size = 32 * KiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 8,
579 .sets = 64,
580 .partitions = 1,
581 .no_invd_sharing = true,
584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
585 static CPUCacheInfo legacy_l1i_cache_amd = {
586 .type = INSTRUCTION_CACHE,
587 .level = 1,
588 .size = 64 * KiB,
589 .self_init = 1,
590 .line_size = 64,
591 .associativity = 2,
592 .sets = 512,
593 .partitions = 1,
594 .lines_per_tag = 1,
595 .no_invd_sharing = true,
598 /* Level 2 unified cache: */
599 static CPUCacheInfo legacy_l2_cache = {
600 .type = UNIFIED_CACHE,
601 .level = 2,
602 .size = 4 * MiB,
603 .self_init = 1,
604 .line_size = 64,
605 .associativity = 16,
606 .sets = 4096,
607 .partitions = 1,
608 .no_invd_sharing = true,
611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
612 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
613 .type = UNIFIED_CACHE,
614 .level = 2,
615 .size = 2 * MiB,
616 .line_size = 64,
617 .associativity = 8,
621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
622 static CPUCacheInfo legacy_l2_cache_amd = {
623 .type = UNIFIED_CACHE,
624 .level = 2,
625 .size = 512 * KiB,
626 .line_size = 64,
627 .lines_per_tag = 1,
628 .associativity = 16,
629 .sets = 512,
630 .partitions = 1,
633 /* Level 3 unified cache: */
634 static CPUCacheInfo legacy_l3_cache = {
635 .type = UNIFIED_CACHE,
636 .level = 3,
637 .size = 16 * MiB,
638 .line_size = 64,
639 .associativity = 16,
640 .sets = 16384,
641 .partitions = 1,
642 .lines_per_tag = 1,
643 .self_init = true,
644 .inclusive = true,
645 .complex_indexing = true,
648 /* TLB definitions: */
650 #define L1_DTLB_2M_ASSOC 1
651 #define L1_DTLB_2M_ENTRIES 255
652 #define L1_DTLB_4K_ASSOC 1
653 #define L1_DTLB_4K_ENTRIES 255
655 #define L1_ITLB_2M_ASSOC 1
656 #define L1_ITLB_2M_ENTRIES 255
657 #define L1_ITLB_4K_ASSOC 1
658 #define L1_ITLB_4K_ENTRIES 255
660 #define L2_DTLB_2M_ASSOC 0 /* disabled */
661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
662 #define L2_DTLB_4K_ASSOC 4
663 #define L2_DTLB_4K_ENTRIES 512
665 #define L2_ITLB_2M_ASSOC 0 /* disabled */
666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
667 #define L2_ITLB_4K_ASSOC 4
668 #define L2_ITLB_4K_ENTRIES 512
670 /* CPUID Leaf 0x14 constants: */
671 #define INTEL_PT_MAX_SUBLEAF 0x1
673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
674 * MSR can be accessed;
675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
677 * of Intel PT MSRs across warm reset;
678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
680 #define INTEL_PT_MINIMAL_EBX 0xf
682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
684 * accessed;
685 * bit[01]: ToPA tables can hold any number of output entries, up to the
686 * maximum allowed by the MaskOrTableOffset field of
687 * IA32_RTIT_OUTPUT_MASK_PTRS;
688 * bit[02]: Support Single-Range Output scheme;
690 #define INTEL_PT_MINIMAL_ECX 0x7
691 /* generated packets which contain IP payloads have LIP values */
692 #define INTEL_PT_IP_LIP (1 << 31)
693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
700 uint32_t vendor2, uint32_t vendor3)
702 int i;
703 for (i = 0; i < 4; i++) {
704 dst[i] = vendor1 >> (8 * i);
705 dst[i + 4] = vendor2 >> (8 * i);
706 dst[i + 8] = vendor3 >> (8 * i);
708 dst[CPUID_VENDOR_SZ] = '\0';
711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
716 CPUID_PSE36 | CPUID_FXSR)
717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
721 CPUID_PAE | CPUID_SEP | CPUID_APIC)
723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
728 /* partly implemented:
729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
730 /* missing:
731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
737 CPUID_EXT_RDRAND)
738 /* missing:
739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
743 CPUID_EXT_F16C */
745 #ifdef TARGET_X86_64
746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
747 #else
748 #define TCG_EXT2_X86_64_FEATURES 0
749 #endif
751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
754 TCG_EXT2_X86_64_FEATURES)
755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
757 #define TCG_EXT4_FEATURES 0
758 #define TCG_SVM_FEATURES CPUID_SVM_NPT
759 #define TCG_KVM_FEATURES 0
760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
764 CPUID_7_0_EBX_ERMS)
765 /* missing:
766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
768 CPUID_7_0_EBX_RDSEED */
769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
771 CPUID_7_0_ECX_LA57)
772 #define TCG_7_0_EDX_FEATURES 0
773 #define TCG_APM_FEATURES 0
774 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
775 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
776 /* missing:
777 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
779 typedef enum FeatureWordType {
780 CPUID_FEATURE_WORD,
781 MSR_FEATURE_WORD,
782 } FeatureWordType;
784 typedef struct FeatureWordInfo {
785 FeatureWordType type;
786 /* feature flags names are taken from "Intel Processor Identification and
787 * the CPUID Instruction" and AMD's "CPUID Specification".
788 * In cases of disagreement between feature naming conventions,
789 * aliases may be added.
791 const char *feat_names[32];
792 union {
793 /* If type==CPUID_FEATURE_WORD */
794 struct {
795 uint32_t eax; /* Input EAX for CPUID */
796 bool needs_ecx; /* CPUID instruction uses ECX as input */
797 uint32_t ecx; /* Input ECX value for CPUID */
798 int reg; /* output register (R_* constant) */
799 } cpuid;
800 /* If type==MSR_FEATURE_WORD */
801 struct {
802 uint32_t index;
803 struct { /*CPUID that enumerate this MSR*/
804 FeatureWord cpuid_class;
805 uint32_t cpuid_flag;
806 } cpuid_dep;
807 } msr;
809 uint32_t tcg_features; /* Feature flags supported by TCG */
810 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
811 uint32_t migratable_flags; /* Feature flags known to be migratable */
812 /* Features that shouldn't be auto-enabled by "-cpu host" */
813 uint32_t no_autoenable_flags;
814 } FeatureWordInfo;
816 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
817 [FEAT_1_EDX] = {
818 .type = CPUID_FEATURE_WORD,
819 .feat_names = {
820 "fpu", "vme", "de", "pse",
821 "tsc", "msr", "pae", "mce",
822 "cx8", "apic", NULL, "sep",
823 "mtrr", "pge", "mca", "cmov",
824 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
825 NULL, "ds" /* Intel dts */, "acpi", "mmx",
826 "fxsr", "sse", "sse2", "ss",
827 "ht" /* Intel htt */, "tm", "ia64", "pbe",
829 .cpuid = {.eax = 1, .reg = R_EDX, },
830 .tcg_features = TCG_FEATURES,
832 [FEAT_1_ECX] = {
833 .type = CPUID_FEATURE_WORD,
834 .feat_names = {
835 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
836 "ds-cpl", "vmx", "smx", "est",
837 "tm2", "ssse3", "cid", NULL,
838 "fma", "cx16", "xtpr", "pdcm",
839 NULL, "pcid", "dca", "sse4.1",
840 "sse4.2", "x2apic", "movbe", "popcnt",
841 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
842 "avx", "f16c", "rdrand", "hypervisor",
844 .cpuid = { .eax = 1, .reg = R_ECX, },
845 .tcg_features = TCG_EXT_FEATURES,
847 /* Feature names that are already defined on feature_name[] but
848 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
849 * names on feat_names below. They are copied automatically
850 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
852 [FEAT_8000_0001_EDX] = {
853 .type = CPUID_FEATURE_WORD,
854 .feat_names = {
855 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
856 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
857 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
858 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
859 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
860 "nx", NULL, "mmxext", NULL /* mmx */,
861 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
862 NULL, "lm", "3dnowext", "3dnow",
864 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
865 .tcg_features = TCG_EXT2_FEATURES,
867 [FEAT_8000_0001_ECX] = {
868 .type = CPUID_FEATURE_WORD,
869 .feat_names = {
870 "lahf-lm", "cmp-legacy", "svm", "extapic",
871 "cr8legacy", "abm", "sse4a", "misalignsse",
872 "3dnowprefetch", "osvw", "ibs", "xop",
873 "skinit", "wdt", NULL, "lwp",
874 "fma4", "tce", NULL, "nodeid-msr",
875 NULL, "tbm", "topoext", "perfctr-core",
876 "perfctr-nb", NULL, NULL, NULL,
877 NULL, NULL, NULL, NULL,
879 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
880 .tcg_features = TCG_EXT3_FEATURES,
882 * TOPOEXT is always allowed but can't be enabled blindly by
883 * "-cpu host", as it requires consistent cache topology info
884 * to be provided so it doesn't confuse guests.
886 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
888 [FEAT_C000_0001_EDX] = {
889 .type = CPUID_FEATURE_WORD,
890 .feat_names = {
891 NULL, NULL, "xstore", "xstore-en",
892 NULL, NULL, "xcrypt", "xcrypt-en",
893 "ace2", "ace2-en", "phe", "phe-en",
894 "pmm", "pmm-en", NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
897 NULL, NULL, NULL, NULL,
898 NULL, NULL, NULL, NULL,
900 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
901 .tcg_features = TCG_EXT4_FEATURES,
903 [FEAT_KVM] = {
904 .type = CPUID_FEATURE_WORD,
905 .feat_names = {
906 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
907 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
908 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
909 NULL, NULL, NULL, NULL,
910 NULL, NULL, NULL, NULL,
911 NULL, NULL, NULL, NULL,
912 "kvmclock-stable-bit", NULL, NULL, NULL,
913 NULL, NULL, NULL, NULL,
915 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
916 .tcg_features = TCG_KVM_FEATURES,
918 [FEAT_KVM_HINTS] = {
919 .type = CPUID_FEATURE_WORD,
920 .feat_names = {
921 "kvm-hint-dedicated", NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
926 NULL, NULL, NULL, NULL,
927 NULL, NULL, NULL, NULL,
928 NULL, NULL, NULL, NULL,
930 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
931 .tcg_features = TCG_KVM_FEATURES,
933 * KVM hints aren't auto-enabled by -cpu host, they need to be
934 * explicitly enabled in the command-line.
936 .no_autoenable_flags = ~0U,
939 * .feat_names are commented out for Hyper-V enlightenments because we
940 * don't want to have two different ways for enabling them on QEMU command
941 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
942 * enabling several feature bits simultaneously, exposing these bits
943 * individually may just confuse guests.
945 [FEAT_HYPERV_EAX] = {
946 .type = CPUID_FEATURE_WORD,
947 .feat_names = {
948 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
949 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
950 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
951 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
952 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
953 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
954 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
955 NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 NULL, NULL, NULL, NULL,
958 NULL, NULL, NULL, NULL,
959 NULL, NULL, NULL, NULL,
961 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
963 [FEAT_HYPERV_EBX] = {
964 .type = CPUID_FEATURE_WORD,
965 .feat_names = {
966 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
967 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
968 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
969 NULL /* hv_create_port */, NULL /* hv_connect_port */,
970 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
971 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
972 NULL, NULL,
973 NULL, NULL, NULL, NULL,
974 NULL, NULL, NULL, NULL,
975 NULL, NULL, NULL, NULL,
976 NULL, NULL, NULL, NULL,
978 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
980 [FEAT_HYPERV_EDX] = {
981 .type = CPUID_FEATURE_WORD,
982 .feat_names = {
983 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
984 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
985 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
986 NULL, NULL,
987 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
990 NULL, NULL, NULL, NULL,
991 NULL, NULL, NULL, NULL,
992 NULL, NULL, NULL, NULL,
994 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
996 [FEAT_HV_RECOMM_EAX] = {
997 .type = CPUID_FEATURE_WORD,
998 .feat_names = {
999 NULL /* hv_recommend_pv_as_switch */,
1000 NULL /* hv_recommend_pv_tlbflush_local */,
1001 NULL /* hv_recommend_pv_tlbflush_remote */,
1002 NULL /* hv_recommend_msr_apic_access */,
1003 NULL /* hv_recommend_msr_reset */,
1004 NULL /* hv_recommend_relaxed_timing */,
1005 NULL /* hv_recommend_dma_remapping */,
1006 NULL /* hv_recommend_int_remapping */,
1007 NULL /* hv_recommend_x2apic_msrs */,
1008 NULL /* hv_recommend_autoeoi_deprecation */,
1009 NULL /* hv_recommend_pv_ipi */,
1010 NULL /* hv_recommend_ex_hypercalls */,
1011 NULL /* hv_hypervisor_is_nested */,
1012 NULL /* hv_recommend_int_mbec */,
1013 NULL /* hv_recommend_evmcs */,
1014 NULL,
1015 NULL, NULL, NULL, NULL,
1016 NULL, NULL, NULL, NULL,
1017 NULL, NULL, NULL, NULL,
1018 NULL, NULL, NULL, NULL,
1020 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1022 [FEAT_HV_NESTED_EAX] = {
1023 .type = CPUID_FEATURE_WORD,
1024 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1026 [FEAT_SVM] = {
1027 .type = CPUID_FEATURE_WORD,
1028 .feat_names = {
1029 "npt", "lbrv", "svm-lock", "nrip-save",
1030 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1031 NULL, NULL, "pause-filter", NULL,
1032 "pfthreshold", NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL,
1035 NULL, NULL, NULL, NULL,
1036 NULL, NULL, NULL, NULL,
1038 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1039 .tcg_features = TCG_SVM_FEATURES,
1041 [FEAT_7_0_EBX] = {
1042 .type = CPUID_FEATURE_WORD,
1043 .feat_names = {
1044 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1045 "hle", "avx2", NULL, "smep",
1046 "bmi2", "erms", "invpcid", "rtm",
1047 NULL, NULL, "mpx", NULL,
1048 "avx512f", "avx512dq", "rdseed", "adx",
1049 "smap", "avx512ifma", "pcommit", "clflushopt",
1050 "clwb", "intel-pt", "avx512pf", "avx512er",
1051 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1053 .cpuid = {
1054 .eax = 7,
1055 .needs_ecx = true, .ecx = 0,
1056 .reg = R_EBX,
1058 .tcg_features = TCG_7_0_EBX_FEATURES,
1060 [FEAT_7_0_ECX] = {
1061 .type = CPUID_FEATURE_WORD,
1062 .feat_names = {
1063 NULL, "avx512vbmi", "umip", "pku",
1064 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
1065 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1066 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1067 "la57", NULL, NULL, NULL,
1068 NULL, NULL, "rdpid", NULL,
1069 NULL, "cldemote", NULL, "movdiri",
1070 "movdir64b", NULL, NULL, NULL,
1072 .cpuid = {
1073 .eax = 7,
1074 .needs_ecx = true, .ecx = 0,
1075 .reg = R_ECX,
1077 .tcg_features = TCG_7_0_ECX_FEATURES,
1079 [FEAT_7_0_EDX] = {
1080 .type = CPUID_FEATURE_WORD,
1081 .feat_names = {
1082 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1083 NULL, NULL, NULL, NULL,
1084 NULL, NULL, "md-clear", NULL,
1085 NULL, NULL, NULL, NULL,
1086 NULL, NULL, NULL, NULL,
1087 NULL, NULL, NULL, NULL,
1088 NULL, NULL, "spec-ctrl", "stibp",
1089 NULL, "arch-capabilities", "core-capability", "ssbd",
1091 .cpuid = {
1092 .eax = 7,
1093 .needs_ecx = true, .ecx = 0,
1094 .reg = R_EDX,
1096 .tcg_features = TCG_7_0_EDX_FEATURES,
1098 [FEAT_8000_0007_EDX] = {
1099 .type = CPUID_FEATURE_WORD,
1100 .feat_names = {
1101 NULL, NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 "invtsc", NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1106 NULL, NULL, NULL, NULL,
1107 NULL, NULL, NULL, NULL,
1108 NULL, NULL, NULL, NULL,
1110 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1111 .tcg_features = TCG_APM_FEATURES,
1112 .unmigratable_flags = CPUID_APM_INVTSC,
1114 [FEAT_8000_0008_EBX] = {
1115 .type = CPUID_FEATURE_WORD,
1116 .feat_names = {
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 NULL, "wbnoinvd", NULL, NULL,
1120 "ibpb", NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL,
1122 NULL, NULL, NULL, NULL,
1123 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1124 NULL, NULL, NULL, NULL,
1126 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1127 .tcg_features = 0,
1128 .unmigratable_flags = 0,
1130 [FEAT_XSAVE] = {
1131 .type = CPUID_FEATURE_WORD,
1132 .feat_names = {
1133 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1134 NULL, NULL, NULL, NULL,
1135 NULL, NULL, NULL, NULL,
1136 NULL, NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 NULL, NULL, NULL, NULL,
1140 NULL, NULL, NULL, NULL,
1142 .cpuid = {
1143 .eax = 0xd,
1144 .needs_ecx = true, .ecx = 1,
1145 .reg = R_EAX,
1147 .tcg_features = TCG_XSAVE_FEATURES,
1149 [FEAT_6_EAX] = {
1150 .type = CPUID_FEATURE_WORD,
1151 .feat_names = {
1152 NULL, NULL, "arat", NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1157 NULL, NULL, NULL, NULL,
1158 NULL, NULL, NULL, NULL,
1159 NULL, NULL, NULL, NULL,
1161 .cpuid = { .eax = 6, .reg = R_EAX, },
1162 .tcg_features = TCG_6_EAX_FEATURES,
1164 [FEAT_XSAVE_COMP_LO] = {
1165 .type = CPUID_FEATURE_WORD,
1166 .cpuid = {
1167 .eax = 0xD,
1168 .needs_ecx = true, .ecx = 0,
1169 .reg = R_EAX,
1171 .tcg_features = ~0U,
1172 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1173 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1174 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1175 XSTATE_PKRU_MASK,
1177 [FEAT_XSAVE_COMP_HI] = {
1178 .type = CPUID_FEATURE_WORD,
1179 .cpuid = {
1180 .eax = 0xD,
1181 .needs_ecx = true, .ecx = 0,
1182 .reg = R_EDX,
1184 .tcg_features = ~0U,
1186 /*Below are MSR exposed features*/
1187 [FEAT_ARCH_CAPABILITIES] = {
1188 .type = MSR_FEATURE_WORD,
1189 .feat_names = {
1190 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1191 "ssb-no", "mds-no", NULL, NULL,
1192 NULL, NULL, NULL, NULL,
1193 NULL, NULL, NULL, NULL,
1194 NULL, NULL, NULL, NULL,
1195 NULL, NULL, NULL, NULL,
1196 NULL, NULL, NULL, NULL,
1197 NULL, NULL, NULL, NULL,
1199 .msr = {
1200 .index = MSR_IA32_ARCH_CAPABILITIES,
1201 .cpuid_dep = {
1202 FEAT_7_0_EDX,
1203 CPUID_7_0_EDX_ARCH_CAPABILITIES
1207 [FEAT_CORE_CAPABILITY] = {
1208 .type = MSR_FEATURE_WORD,
1209 .feat_names = {
1210 NULL, NULL, NULL, NULL,
1211 NULL, "split-lock-detect", NULL, NULL,
1212 NULL, NULL, NULL, NULL,
1213 NULL, NULL, NULL, NULL,
1214 NULL, NULL, NULL, NULL,
1215 NULL, NULL, NULL, NULL,
1216 NULL, NULL, NULL, NULL,
1217 NULL, NULL, NULL, NULL,
1219 .msr = {
1220 .index = MSR_IA32_CORE_CAPABILITY,
1221 .cpuid_dep = {
1222 FEAT_7_0_EDX,
1223 CPUID_7_0_EDX_CORE_CAPABILITY,
1229 typedef struct X86RegisterInfo32 {
1230 /* Name of register */
1231 const char *name;
1232 /* QAPI enum value register */
1233 X86CPURegister32 qapi_enum;
1234 } X86RegisterInfo32;
1236 #define REGISTER(reg) \
1237 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1238 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1239 REGISTER(EAX),
1240 REGISTER(ECX),
1241 REGISTER(EDX),
1242 REGISTER(EBX),
1243 REGISTER(ESP),
1244 REGISTER(EBP),
1245 REGISTER(ESI),
1246 REGISTER(EDI),
1248 #undef REGISTER
1250 typedef struct ExtSaveArea {
1251 uint32_t feature, bits;
1252 uint32_t offset, size;
1253 } ExtSaveArea;
1255 static const ExtSaveArea x86_ext_save_areas[] = {
1256 [XSTATE_FP_BIT] = {
1257 /* x87 FP state component is always enabled if XSAVE is supported */
1258 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1259 /* x87 state is in the legacy region of the XSAVE area */
1260 .offset = 0,
1261 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1263 [XSTATE_SSE_BIT] = {
1264 /* SSE state component is always enabled if XSAVE is supported */
1265 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1266 /* SSE state is in the legacy region of the XSAVE area */
1267 .offset = 0,
1268 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1270 [XSTATE_YMM_BIT] =
1271 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1272 .offset = offsetof(X86XSaveArea, avx_state),
1273 .size = sizeof(XSaveAVX) },
1274 [XSTATE_BNDREGS_BIT] =
1275 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1276 .offset = offsetof(X86XSaveArea, bndreg_state),
1277 .size = sizeof(XSaveBNDREG) },
1278 [XSTATE_BNDCSR_BIT] =
1279 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1280 .offset = offsetof(X86XSaveArea, bndcsr_state),
1281 .size = sizeof(XSaveBNDCSR) },
1282 [XSTATE_OPMASK_BIT] =
1283 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1284 .offset = offsetof(X86XSaveArea, opmask_state),
1285 .size = sizeof(XSaveOpmask) },
1286 [XSTATE_ZMM_Hi256_BIT] =
1287 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1288 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1289 .size = sizeof(XSaveZMM_Hi256) },
1290 [XSTATE_Hi16_ZMM_BIT] =
1291 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1292 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1293 .size = sizeof(XSaveHi16_ZMM) },
1294 [XSTATE_PKRU_BIT] =
1295 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1296 .offset = offsetof(X86XSaveArea, pkru_state),
1297 .size = sizeof(XSavePKRU) },
1300 static uint32_t xsave_area_size(uint64_t mask)
1302 int i;
1303 uint64_t ret = 0;
1305 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1306 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1307 if ((mask >> i) & 1) {
1308 ret = MAX(ret, esa->offset + esa->size);
1311 return ret;
1314 static inline bool accel_uses_host_cpuid(void)
1316 return kvm_enabled() || hvf_enabled();
1319 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1321 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1322 cpu->env.features[FEAT_XSAVE_COMP_LO];
1325 const char *get_register_name_32(unsigned int reg)
1327 if (reg >= CPU_NB_REGS32) {
1328 return NULL;
1330 return x86_reg_info_32[reg].name;
1334 * Returns the set of feature flags that are supported and migratable by
1335 * QEMU, for a given FeatureWord.
1337 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1339 FeatureWordInfo *wi = &feature_word_info[w];
1340 uint32_t r = 0;
1341 int i;
1343 for (i = 0; i < 32; i++) {
1344 uint32_t f = 1U << i;
1346 /* If the feature name is known, it is implicitly considered migratable,
1347 * unless it is explicitly set in unmigratable_flags */
1348 if ((wi->migratable_flags & f) ||
1349 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1350 r |= f;
1353 return r;
1356 void host_cpuid(uint32_t function, uint32_t count,
1357 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1359 uint32_t vec[4];
1361 #ifdef __x86_64__
1362 asm volatile("cpuid"
1363 : "=a"(vec[0]), "=b"(vec[1]),
1364 "=c"(vec[2]), "=d"(vec[3])
1365 : "0"(function), "c"(count) : "cc");
1366 #elif defined(__i386__)
1367 asm volatile("pusha \n\t"
1368 "cpuid \n\t"
1369 "mov %%eax, 0(%2) \n\t"
1370 "mov %%ebx, 4(%2) \n\t"
1371 "mov %%ecx, 8(%2) \n\t"
1372 "mov %%edx, 12(%2) \n\t"
1373 "popa"
1374 : : "a"(function), "c"(count), "S"(vec)
1375 : "memory", "cc");
1376 #else
1377 abort();
1378 #endif
1380 if (eax)
1381 *eax = vec[0];
1382 if (ebx)
1383 *ebx = vec[1];
1384 if (ecx)
1385 *ecx = vec[2];
1386 if (edx)
1387 *edx = vec[3];
1390 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1392 uint32_t eax, ebx, ecx, edx;
1394 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1395 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1397 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1398 if (family) {
1399 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1401 if (model) {
1402 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1404 if (stepping) {
1405 *stepping = eax & 0x0F;
1409 /* CPU class name definitions: */
1411 /* Return type name for a given CPU model name
1412 * Caller is responsible for freeing the returned string.
1414 static char *x86_cpu_type_name(const char *model_name)
1416 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1419 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1421 ObjectClass *oc;
1422 char *typename = x86_cpu_type_name(cpu_model);
1423 oc = object_class_by_name(typename);
1424 g_free(typename);
1425 return oc;
1428 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1430 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1431 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1432 return g_strndup(class_name,
1433 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1436 struct X86CPUDefinition {
1437 const char *name;
1438 uint32_t level;
1439 uint32_t xlevel;
1440 /* vendor is zero-terminated, 12 character ASCII string */
1441 char vendor[CPUID_VENDOR_SZ + 1];
1442 int family;
1443 int model;
1444 int stepping;
1445 FeatureWordArray features;
1446 const char *model_id;
1447 CPUCaches *cache_info;
1450 static CPUCaches epyc_cache_info = {
1451 .l1d_cache = &(CPUCacheInfo) {
1452 .type = DATA_CACHE,
1453 .level = 1,
1454 .size = 32 * KiB,
1455 .line_size = 64,
1456 .associativity = 8,
1457 .partitions = 1,
1458 .sets = 64,
1459 .lines_per_tag = 1,
1460 .self_init = 1,
1461 .no_invd_sharing = true,
1463 .l1i_cache = &(CPUCacheInfo) {
1464 .type = INSTRUCTION_CACHE,
1465 .level = 1,
1466 .size = 64 * KiB,
1467 .line_size = 64,
1468 .associativity = 4,
1469 .partitions = 1,
1470 .sets = 256,
1471 .lines_per_tag = 1,
1472 .self_init = 1,
1473 .no_invd_sharing = true,
1475 .l2_cache = &(CPUCacheInfo) {
1476 .type = UNIFIED_CACHE,
1477 .level = 2,
1478 .size = 512 * KiB,
1479 .line_size = 64,
1480 .associativity = 8,
1481 .partitions = 1,
1482 .sets = 1024,
1483 .lines_per_tag = 1,
1485 .l3_cache = &(CPUCacheInfo) {
1486 .type = UNIFIED_CACHE,
1487 .level = 3,
1488 .size = 8 * MiB,
1489 .line_size = 64,
1490 .associativity = 16,
1491 .partitions = 1,
1492 .sets = 8192,
1493 .lines_per_tag = 1,
1494 .self_init = true,
1495 .inclusive = true,
1496 .complex_indexing = true,
1500 static X86CPUDefinition builtin_x86_defs[] = {
1502 .name = "qemu64",
1503 .level = 0xd,
1504 .vendor = CPUID_VENDOR_AMD,
1505 .family = 6,
1506 .model = 6,
1507 .stepping = 3,
1508 .features[FEAT_1_EDX] =
1509 PPRO_FEATURES |
1510 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1511 CPUID_PSE36,
1512 .features[FEAT_1_ECX] =
1513 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1514 .features[FEAT_8000_0001_EDX] =
1515 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1516 .features[FEAT_8000_0001_ECX] =
1517 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1518 .xlevel = 0x8000000A,
1519 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1522 .name = "phenom",
1523 .level = 5,
1524 .vendor = CPUID_VENDOR_AMD,
1525 .family = 16,
1526 .model = 2,
1527 .stepping = 3,
1528 /* Missing: CPUID_HT */
1529 .features[FEAT_1_EDX] =
1530 PPRO_FEATURES |
1531 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1532 CPUID_PSE36 | CPUID_VME,
1533 .features[FEAT_1_ECX] =
1534 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1535 CPUID_EXT_POPCNT,
1536 .features[FEAT_8000_0001_EDX] =
1537 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1538 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1539 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1540 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1541 CPUID_EXT3_CR8LEG,
1542 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1543 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1544 .features[FEAT_8000_0001_ECX] =
1545 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1546 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1547 /* Missing: CPUID_SVM_LBRV */
1548 .features[FEAT_SVM] =
1549 CPUID_SVM_NPT,
1550 .xlevel = 0x8000001A,
1551 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1554 .name = "core2duo",
1555 .level = 10,
1556 .vendor = CPUID_VENDOR_INTEL,
1557 .family = 6,
1558 .model = 15,
1559 .stepping = 11,
1560 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1561 .features[FEAT_1_EDX] =
1562 PPRO_FEATURES |
1563 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1564 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1565 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1566 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1567 .features[FEAT_1_ECX] =
1568 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1569 CPUID_EXT_CX16,
1570 .features[FEAT_8000_0001_EDX] =
1571 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1572 .features[FEAT_8000_0001_ECX] =
1573 CPUID_EXT3_LAHF_LM,
1574 .xlevel = 0x80000008,
1575 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1578 .name = "kvm64",
1579 .level = 0xd,
1580 .vendor = CPUID_VENDOR_INTEL,
1581 .family = 15,
1582 .model = 6,
1583 .stepping = 1,
1584 /* Missing: CPUID_HT */
1585 .features[FEAT_1_EDX] =
1586 PPRO_FEATURES | CPUID_VME |
1587 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1588 CPUID_PSE36,
1589 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1590 .features[FEAT_1_ECX] =
1591 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1592 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1593 .features[FEAT_8000_0001_EDX] =
1594 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1595 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1596 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1597 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1598 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1599 .features[FEAT_8000_0001_ECX] =
1601 .xlevel = 0x80000008,
1602 .model_id = "Common KVM processor"
1605 .name = "qemu32",
1606 .level = 4,
1607 .vendor = CPUID_VENDOR_INTEL,
1608 .family = 6,
1609 .model = 6,
1610 .stepping = 3,
1611 .features[FEAT_1_EDX] =
1612 PPRO_FEATURES,
1613 .features[FEAT_1_ECX] =
1614 CPUID_EXT_SSE3,
1615 .xlevel = 0x80000004,
1616 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1619 .name = "kvm32",
1620 .level = 5,
1621 .vendor = CPUID_VENDOR_INTEL,
1622 .family = 15,
1623 .model = 6,
1624 .stepping = 1,
1625 .features[FEAT_1_EDX] =
1626 PPRO_FEATURES | CPUID_VME |
1627 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1628 .features[FEAT_1_ECX] =
1629 CPUID_EXT_SSE3,
1630 .features[FEAT_8000_0001_ECX] =
1632 .xlevel = 0x80000008,
1633 .model_id = "Common 32-bit KVM processor"
1636 .name = "coreduo",
1637 .level = 10,
1638 .vendor = CPUID_VENDOR_INTEL,
1639 .family = 6,
1640 .model = 14,
1641 .stepping = 8,
1642 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1643 .features[FEAT_1_EDX] =
1644 PPRO_FEATURES | CPUID_VME |
1645 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1646 CPUID_SS,
1647 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1648 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1649 .features[FEAT_1_ECX] =
1650 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1651 .features[FEAT_8000_0001_EDX] =
1652 CPUID_EXT2_NX,
1653 .xlevel = 0x80000008,
1654 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1657 .name = "486",
1658 .level = 1,
1659 .vendor = CPUID_VENDOR_INTEL,
1660 .family = 4,
1661 .model = 8,
1662 .stepping = 0,
1663 .features[FEAT_1_EDX] =
1664 I486_FEATURES,
1665 .xlevel = 0,
1666 .model_id = "",
1669 .name = "pentium",
1670 .level = 1,
1671 .vendor = CPUID_VENDOR_INTEL,
1672 .family = 5,
1673 .model = 4,
1674 .stepping = 3,
1675 .features[FEAT_1_EDX] =
1676 PENTIUM_FEATURES,
1677 .xlevel = 0,
1678 .model_id = "",
1681 .name = "pentium2",
1682 .level = 2,
1683 .vendor = CPUID_VENDOR_INTEL,
1684 .family = 6,
1685 .model = 5,
1686 .stepping = 2,
1687 .features[FEAT_1_EDX] =
1688 PENTIUM2_FEATURES,
1689 .xlevel = 0,
1690 .model_id = "",
1693 .name = "pentium3",
1694 .level = 3,
1695 .vendor = CPUID_VENDOR_INTEL,
1696 .family = 6,
1697 .model = 7,
1698 .stepping = 3,
1699 .features[FEAT_1_EDX] =
1700 PENTIUM3_FEATURES,
1701 .xlevel = 0,
1702 .model_id = "",
1705 .name = "athlon",
1706 .level = 2,
1707 .vendor = CPUID_VENDOR_AMD,
1708 .family = 6,
1709 .model = 2,
1710 .stepping = 3,
1711 .features[FEAT_1_EDX] =
1712 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1713 CPUID_MCA,
1714 .features[FEAT_8000_0001_EDX] =
1715 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1716 .xlevel = 0x80000008,
1717 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1720 .name = "n270",
1721 .level = 10,
1722 .vendor = CPUID_VENDOR_INTEL,
1723 .family = 6,
1724 .model = 28,
1725 .stepping = 2,
1726 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1727 .features[FEAT_1_EDX] =
1728 PPRO_FEATURES |
1729 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1730 CPUID_ACPI | CPUID_SS,
1731 /* Some CPUs got no CPUID_SEP */
1732 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1733 * CPUID_EXT_XTPR */
1734 .features[FEAT_1_ECX] =
1735 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1736 CPUID_EXT_MOVBE,
1737 .features[FEAT_8000_0001_EDX] =
1738 CPUID_EXT2_NX,
1739 .features[FEAT_8000_0001_ECX] =
1740 CPUID_EXT3_LAHF_LM,
1741 .xlevel = 0x80000008,
1742 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1745 .name = "Conroe",
1746 .level = 10,
1747 .vendor = CPUID_VENDOR_INTEL,
1748 .family = 6,
1749 .model = 15,
1750 .stepping = 3,
1751 .features[FEAT_1_EDX] =
1752 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1753 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1754 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1755 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1756 CPUID_DE | CPUID_FP87,
1757 .features[FEAT_1_ECX] =
1758 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1759 .features[FEAT_8000_0001_EDX] =
1760 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1761 .features[FEAT_8000_0001_ECX] =
1762 CPUID_EXT3_LAHF_LM,
1763 .xlevel = 0x80000008,
1764 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1767 .name = "Penryn",
1768 .level = 10,
1769 .vendor = CPUID_VENDOR_INTEL,
1770 .family = 6,
1771 .model = 23,
1772 .stepping = 3,
1773 .features[FEAT_1_EDX] =
1774 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1775 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1776 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1777 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1778 CPUID_DE | CPUID_FP87,
1779 .features[FEAT_1_ECX] =
1780 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1781 CPUID_EXT_SSE3,
1782 .features[FEAT_8000_0001_EDX] =
1783 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1784 .features[FEAT_8000_0001_ECX] =
1785 CPUID_EXT3_LAHF_LM,
1786 .xlevel = 0x80000008,
1787 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1790 .name = "Nehalem",
1791 .level = 11,
1792 .vendor = CPUID_VENDOR_INTEL,
1793 .family = 6,
1794 .model = 26,
1795 .stepping = 3,
1796 .features[FEAT_1_EDX] =
1797 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1798 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1799 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1800 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1801 CPUID_DE | CPUID_FP87,
1802 .features[FEAT_1_ECX] =
1803 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1804 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1805 .features[FEAT_8000_0001_EDX] =
1806 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1807 .features[FEAT_8000_0001_ECX] =
1808 CPUID_EXT3_LAHF_LM,
1809 .xlevel = 0x80000008,
1810 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1813 .name = "Nehalem-IBRS",
1814 .level = 11,
1815 .vendor = CPUID_VENDOR_INTEL,
1816 .family = 6,
1817 .model = 26,
1818 .stepping = 3,
1819 .features[FEAT_1_EDX] =
1820 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1821 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1822 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1823 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1824 CPUID_DE | CPUID_FP87,
1825 .features[FEAT_1_ECX] =
1826 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1827 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1828 .features[FEAT_7_0_EDX] =
1829 CPUID_7_0_EDX_SPEC_CTRL,
1830 .features[FEAT_8000_0001_EDX] =
1831 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1832 .features[FEAT_8000_0001_ECX] =
1833 CPUID_EXT3_LAHF_LM,
1834 .xlevel = 0x80000008,
1835 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1838 .name = "Westmere",
1839 .level = 11,
1840 .vendor = CPUID_VENDOR_INTEL,
1841 .family = 6,
1842 .model = 44,
1843 .stepping = 1,
1844 .features[FEAT_1_EDX] =
1845 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1846 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1847 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1848 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1849 CPUID_DE | CPUID_FP87,
1850 .features[FEAT_1_ECX] =
1851 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1852 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1853 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1854 .features[FEAT_8000_0001_EDX] =
1855 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1856 .features[FEAT_8000_0001_ECX] =
1857 CPUID_EXT3_LAHF_LM,
1858 .features[FEAT_6_EAX] =
1859 CPUID_6_EAX_ARAT,
1860 .xlevel = 0x80000008,
1861 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1864 .name = "Westmere-IBRS",
1865 .level = 11,
1866 .vendor = CPUID_VENDOR_INTEL,
1867 .family = 6,
1868 .model = 44,
1869 .stepping = 1,
1870 .features[FEAT_1_EDX] =
1871 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1872 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1873 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1874 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1875 CPUID_DE | CPUID_FP87,
1876 .features[FEAT_1_ECX] =
1877 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1878 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1879 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1880 .features[FEAT_8000_0001_EDX] =
1881 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1882 .features[FEAT_8000_0001_ECX] =
1883 CPUID_EXT3_LAHF_LM,
1884 .features[FEAT_7_0_EDX] =
1885 CPUID_7_0_EDX_SPEC_CTRL,
1886 .features[FEAT_6_EAX] =
1887 CPUID_6_EAX_ARAT,
1888 .xlevel = 0x80000008,
1889 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1892 .name = "SandyBridge",
1893 .level = 0xd,
1894 .vendor = CPUID_VENDOR_INTEL,
1895 .family = 6,
1896 .model = 42,
1897 .stepping = 1,
1898 .features[FEAT_1_EDX] =
1899 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1900 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1901 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1902 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1903 CPUID_DE | CPUID_FP87,
1904 .features[FEAT_1_ECX] =
1905 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1906 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1907 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1908 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1909 CPUID_EXT_SSE3,
1910 .features[FEAT_8000_0001_EDX] =
1911 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1912 CPUID_EXT2_SYSCALL,
1913 .features[FEAT_8000_0001_ECX] =
1914 CPUID_EXT3_LAHF_LM,
1915 .features[FEAT_XSAVE] =
1916 CPUID_XSAVE_XSAVEOPT,
1917 .features[FEAT_6_EAX] =
1918 CPUID_6_EAX_ARAT,
1919 .xlevel = 0x80000008,
1920 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1923 .name = "SandyBridge-IBRS",
1924 .level = 0xd,
1925 .vendor = CPUID_VENDOR_INTEL,
1926 .family = 6,
1927 .model = 42,
1928 .stepping = 1,
1929 .features[FEAT_1_EDX] =
1930 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1931 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1932 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1933 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1934 CPUID_DE | CPUID_FP87,
1935 .features[FEAT_1_ECX] =
1936 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1937 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1938 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1939 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1940 CPUID_EXT_SSE3,
1941 .features[FEAT_8000_0001_EDX] =
1942 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1943 CPUID_EXT2_SYSCALL,
1944 .features[FEAT_8000_0001_ECX] =
1945 CPUID_EXT3_LAHF_LM,
1946 .features[FEAT_7_0_EDX] =
1947 CPUID_7_0_EDX_SPEC_CTRL,
1948 .features[FEAT_XSAVE] =
1949 CPUID_XSAVE_XSAVEOPT,
1950 .features[FEAT_6_EAX] =
1951 CPUID_6_EAX_ARAT,
1952 .xlevel = 0x80000008,
1953 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1956 .name = "IvyBridge",
1957 .level = 0xd,
1958 .vendor = CPUID_VENDOR_INTEL,
1959 .family = 6,
1960 .model = 58,
1961 .stepping = 9,
1962 .features[FEAT_1_EDX] =
1963 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1964 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1965 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1966 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1967 CPUID_DE | CPUID_FP87,
1968 .features[FEAT_1_ECX] =
1969 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1970 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1971 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1972 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1973 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1974 .features[FEAT_7_0_EBX] =
1975 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1976 CPUID_7_0_EBX_ERMS,
1977 .features[FEAT_8000_0001_EDX] =
1978 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1979 CPUID_EXT2_SYSCALL,
1980 .features[FEAT_8000_0001_ECX] =
1981 CPUID_EXT3_LAHF_LM,
1982 .features[FEAT_XSAVE] =
1983 CPUID_XSAVE_XSAVEOPT,
1984 .features[FEAT_6_EAX] =
1985 CPUID_6_EAX_ARAT,
1986 .xlevel = 0x80000008,
1987 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1990 .name = "IvyBridge-IBRS",
1991 .level = 0xd,
1992 .vendor = CPUID_VENDOR_INTEL,
1993 .family = 6,
1994 .model = 58,
1995 .stepping = 9,
1996 .features[FEAT_1_EDX] =
1997 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1998 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1999 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2000 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2001 CPUID_DE | CPUID_FP87,
2002 .features[FEAT_1_ECX] =
2003 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2004 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2005 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2006 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2007 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2008 .features[FEAT_7_0_EBX] =
2009 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2010 CPUID_7_0_EBX_ERMS,
2011 .features[FEAT_8000_0001_EDX] =
2012 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2013 CPUID_EXT2_SYSCALL,
2014 .features[FEAT_8000_0001_ECX] =
2015 CPUID_EXT3_LAHF_LM,
2016 .features[FEAT_7_0_EDX] =
2017 CPUID_7_0_EDX_SPEC_CTRL,
2018 .features[FEAT_XSAVE] =
2019 CPUID_XSAVE_XSAVEOPT,
2020 .features[FEAT_6_EAX] =
2021 CPUID_6_EAX_ARAT,
2022 .xlevel = 0x80000008,
2023 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
2026 .name = "Haswell-noTSX",
2027 .level = 0xd,
2028 .vendor = CPUID_VENDOR_INTEL,
2029 .family = 6,
2030 .model = 60,
2031 .stepping = 1,
2032 .features[FEAT_1_EDX] =
2033 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2034 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2035 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2036 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2037 CPUID_DE | CPUID_FP87,
2038 .features[FEAT_1_ECX] =
2039 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2040 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2041 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2042 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2043 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2044 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2045 .features[FEAT_8000_0001_EDX] =
2046 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2047 CPUID_EXT2_SYSCALL,
2048 .features[FEAT_8000_0001_ECX] =
2049 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2050 .features[FEAT_7_0_EBX] =
2051 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2052 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2053 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2054 .features[FEAT_XSAVE] =
2055 CPUID_XSAVE_XSAVEOPT,
2056 .features[FEAT_6_EAX] =
2057 CPUID_6_EAX_ARAT,
2058 .xlevel = 0x80000008,
2059 .model_id = "Intel Core Processor (Haswell, no TSX)",
2062 .name = "Haswell-noTSX-IBRS",
2063 .level = 0xd,
2064 .vendor = CPUID_VENDOR_INTEL,
2065 .family = 6,
2066 .model = 60,
2067 .stepping = 1,
2068 .features[FEAT_1_EDX] =
2069 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2070 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2071 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2072 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2073 CPUID_DE | CPUID_FP87,
2074 .features[FEAT_1_ECX] =
2075 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2076 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2077 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2078 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2079 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2080 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2081 .features[FEAT_8000_0001_EDX] =
2082 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2083 CPUID_EXT2_SYSCALL,
2084 .features[FEAT_8000_0001_ECX] =
2085 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2086 .features[FEAT_7_0_EDX] =
2087 CPUID_7_0_EDX_SPEC_CTRL,
2088 .features[FEAT_7_0_EBX] =
2089 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2090 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2091 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2092 .features[FEAT_XSAVE] =
2093 CPUID_XSAVE_XSAVEOPT,
2094 .features[FEAT_6_EAX] =
2095 CPUID_6_EAX_ARAT,
2096 .xlevel = 0x80000008,
2097 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
2100 .name = "Haswell",
2101 .level = 0xd,
2102 .vendor = CPUID_VENDOR_INTEL,
2103 .family = 6,
2104 .model = 60,
2105 .stepping = 4,
2106 .features[FEAT_1_EDX] =
2107 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2108 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2109 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2110 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2111 CPUID_DE | CPUID_FP87,
2112 .features[FEAT_1_ECX] =
2113 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2114 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2115 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2116 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2117 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2118 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2119 .features[FEAT_8000_0001_EDX] =
2120 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2121 CPUID_EXT2_SYSCALL,
2122 .features[FEAT_8000_0001_ECX] =
2123 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2124 .features[FEAT_7_0_EBX] =
2125 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2126 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2127 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2128 CPUID_7_0_EBX_RTM,
2129 .features[FEAT_XSAVE] =
2130 CPUID_XSAVE_XSAVEOPT,
2131 .features[FEAT_6_EAX] =
2132 CPUID_6_EAX_ARAT,
2133 .xlevel = 0x80000008,
2134 .model_id = "Intel Core Processor (Haswell)",
2137 .name = "Haswell-IBRS",
2138 .level = 0xd,
2139 .vendor = CPUID_VENDOR_INTEL,
2140 .family = 6,
2141 .model = 60,
2142 .stepping = 4,
2143 .features[FEAT_1_EDX] =
2144 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2145 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2146 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2147 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2148 CPUID_DE | CPUID_FP87,
2149 .features[FEAT_1_ECX] =
2150 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2151 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2152 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2153 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2154 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2155 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2156 .features[FEAT_8000_0001_EDX] =
2157 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2158 CPUID_EXT2_SYSCALL,
2159 .features[FEAT_8000_0001_ECX] =
2160 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2161 .features[FEAT_7_0_EDX] =
2162 CPUID_7_0_EDX_SPEC_CTRL,
2163 .features[FEAT_7_0_EBX] =
2164 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2165 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2166 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2167 CPUID_7_0_EBX_RTM,
2168 .features[FEAT_XSAVE] =
2169 CPUID_XSAVE_XSAVEOPT,
2170 .features[FEAT_6_EAX] =
2171 CPUID_6_EAX_ARAT,
2172 .xlevel = 0x80000008,
2173 .model_id = "Intel Core Processor (Haswell, IBRS)",
2176 .name = "Broadwell-noTSX",
2177 .level = 0xd,
2178 .vendor = CPUID_VENDOR_INTEL,
2179 .family = 6,
2180 .model = 61,
2181 .stepping = 2,
2182 .features[FEAT_1_EDX] =
2183 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2184 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2185 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2186 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2187 CPUID_DE | CPUID_FP87,
2188 .features[FEAT_1_ECX] =
2189 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2190 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2191 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2192 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2193 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2194 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2195 .features[FEAT_8000_0001_EDX] =
2196 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2197 CPUID_EXT2_SYSCALL,
2198 .features[FEAT_8000_0001_ECX] =
2199 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2200 .features[FEAT_7_0_EBX] =
2201 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2202 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2203 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2204 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2205 CPUID_7_0_EBX_SMAP,
2206 .features[FEAT_XSAVE] =
2207 CPUID_XSAVE_XSAVEOPT,
2208 .features[FEAT_6_EAX] =
2209 CPUID_6_EAX_ARAT,
2210 .xlevel = 0x80000008,
2211 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2214 .name = "Broadwell-noTSX-IBRS",
2215 .level = 0xd,
2216 .vendor = CPUID_VENDOR_INTEL,
2217 .family = 6,
2218 .model = 61,
2219 .stepping = 2,
2220 .features[FEAT_1_EDX] =
2221 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2222 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2223 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2224 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2225 CPUID_DE | CPUID_FP87,
2226 .features[FEAT_1_ECX] =
2227 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2228 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2229 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2230 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2231 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2232 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2233 .features[FEAT_8000_0001_EDX] =
2234 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2235 CPUID_EXT2_SYSCALL,
2236 .features[FEAT_8000_0001_ECX] =
2237 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2238 .features[FEAT_7_0_EDX] =
2239 CPUID_7_0_EDX_SPEC_CTRL,
2240 .features[FEAT_7_0_EBX] =
2241 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2242 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2243 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2244 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2245 CPUID_7_0_EBX_SMAP,
2246 .features[FEAT_XSAVE] =
2247 CPUID_XSAVE_XSAVEOPT,
2248 .features[FEAT_6_EAX] =
2249 CPUID_6_EAX_ARAT,
2250 .xlevel = 0x80000008,
2251 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2254 .name = "Broadwell",
2255 .level = 0xd,
2256 .vendor = CPUID_VENDOR_INTEL,
2257 .family = 6,
2258 .model = 61,
2259 .stepping = 2,
2260 .features[FEAT_1_EDX] =
2261 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2262 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2263 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2264 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2265 CPUID_DE | CPUID_FP87,
2266 .features[FEAT_1_ECX] =
2267 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2268 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2269 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2270 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2271 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2272 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2273 .features[FEAT_8000_0001_EDX] =
2274 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2275 CPUID_EXT2_SYSCALL,
2276 .features[FEAT_8000_0001_ECX] =
2277 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2278 .features[FEAT_7_0_EBX] =
2279 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2280 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2281 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2282 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2283 CPUID_7_0_EBX_SMAP,
2284 .features[FEAT_XSAVE] =
2285 CPUID_XSAVE_XSAVEOPT,
2286 .features[FEAT_6_EAX] =
2287 CPUID_6_EAX_ARAT,
2288 .xlevel = 0x80000008,
2289 .model_id = "Intel Core Processor (Broadwell)",
2292 .name = "Broadwell-IBRS",
2293 .level = 0xd,
2294 .vendor = CPUID_VENDOR_INTEL,
2295 .family = 6,
2296 .model = 61,
2297 .stepping = 2,
2298 .features[FEAT_1_EDX] =
2299 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2300 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2301 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2302 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2303 CPUID_DE | CPUID_FP87,
2304 .features[FEAT_1_ECX] =
2305 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2306 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2307 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2308 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2309 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2310 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2311 .features[FEAT_8000_0001_EDX] =
2312 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2313 CPUID_EXT2_SYSCALL,
2314 .features[FEAT_8000_0001_ECX] =
2315 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2316 .features[FEAT_7_0_EDX] =
2317 CPUID_7_0_EDX_SPEC_CTRL,
2318 .features[FEAT_7_0_EBX] =
2319 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2320 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2321 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2322 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2323 CPUID_7_0_EBX_SMAP,
2324 .features[FEAT_XSAVE] =
2325 CPUID_XSAVE_XSAVEOPT,
2326 .features[FEAT_6_EAX] =
2327 CPUID_6_EAX_ARAT,
2328 .xlevel = 0x80000008,
2329 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2332 .name = "Skylake-Client",
2333 .level = 0xd,
2334 .vendor = CPUID_VENDOR_INTEL,
2335 .family = 6,
2336 .model = 94,
2337 .stepping = 3,
2338 .features[FEAT_1_EDX] =
2339 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2340 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2341 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2342 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2343 CPUID_DE | CPUID_FP87,
2344 .features[FEAT_1_ECX] =
2345 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2346 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2347 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2348 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2349 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2350 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2351 .features[FEAT_8000_0001_EDX] =
2352 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2353 CPUID_EXT2_SYSCALL,
2354 .features[FEAT_8000_0001_ECX] =
2355 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2356 .features[FEAT_7_0_EBX] =
2357 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2358 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2359 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2360 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2361 CPUID_7_0_EBX_SMAP,
2362 /* Missing: XSAVES (not supported by some Linux versions,
2363 * including v4.1 to v4.12).
2364 * KVM doesn't yet expose any XSAVES state save component,
2365 * and the only one defined in Skylake (processor tracing)
2366 * probably will block migration anyway.
2368 .features[FEAT_XSAVE] =
2369 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2370 CPUID_XSAVE_XGETBV1,
2371 .features[FEAT_6_EAX] =
2372 CPUID_6_EAX_ARAT,
2373 .xlevel = 0x80000008,
2374 .model_id = "Intel Core Processor (Skylake)",
2377 .name = "Skylake-Client-IBRS",
2378 .level = 0xd,
2379 .vendor = CPUID_VENDOR_INTEL,
2380 .family = 6,
2381 .model = 94,
2382 .stepping = 3,
2383 .features[FEAT_1_EDX] =
2384 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2385 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2386 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2387 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2388 CPUID_DE | CPUID_FP87,
2389 .features[FEAT_1_ECX] =
2390 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2391 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2392 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2393 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2394 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2395 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2396 .features[FEAT_8000_0001_EDX] =
2397 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2398 CPUID_EXT2_SYSCALL,
2399 .features[FEAT_8000_0001_ECX] =
2400 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2401 .features[FEAT_7_0_EDX] =
2402 CPUID_7_0_EDX_SPEC_CTRL,
2403 .features[FEAT_7_0_EBX] =
2404 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2405 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2406 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2407 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2408 CPUID_7_0_EBX_SMAP,
2409 /* Missing: XSAVES (not supported by some Linux versions,
2410 * including v4.1 to v4.12).
2411 * KVM doesn't yet expose any XSAVES state save component,
2412 * and the only one defined in Skylake (processor tracing)
2413 * probably will block migration anyway.
2415 .features[FEAT_XSAVE] =
2416 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2417 CPUID_XSAVE_XGETBV1,
2418 .features[FEAT_6_EAX] =
2419 CPUID_6_EAX_ARAT,
2420 .xlevel = 0x80000008,
2421 .model_id = "Intel Core Processor (Skylake, IBRS)",
2424 .name = "Skylake-Server",
2425 .level = 0xd,
2426 .vendor = CPUID_VENDOR_INTEL,
2427 .family = 6,
2428 .model = 85,
2429 .stepping = 4,
2430 .features[FEAT_1_EDX] =
2431 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2432 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2433 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2434 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2435 CPUID_DE | CPUID_FP87,
2436 .features[FEAT_1_ECX] =
2437 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2438 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2439 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2440 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2441 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2442 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2443 .features[FEAT_8000_0001_EDX] =
2444 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2445 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2446 .features[FEAT_8000_0001_ECX] =
2447 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2448 .features[FEAT_7_0_EBX] =
2449 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2450 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2451 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2452 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2453 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2454 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2455 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2456 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2457 .features[FEAT_7_0_ECX] =
2458 CPUID_7_0_ECX_PKU,
2459 /* Missing: XSAVES (not supported by some Linux versions,
2460 * including v4.1 to v4.12).
2461 * KVM doesn't yet expose any XSAVES state save component,
2462 * and the only one defined in Skylake (processor tracing)
2463 * probably will block migration anyway.
2465 .features[FEAT_XSAVE] =
2466 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2467 CPUID_XSAVE_XGETBV1,
2468 .features[FEAT_6_EAX] =
2469 CPUID_6_EAX_ARAT,
2470 .xlevel = 0x80000008,
2471 .model_id = "Intel Xeon Processor (Skylake)",
2474 .name = "Skylake-Server-IBRS",
2475 .level = 0xd,
2476 .vendor = CPUID_VENDOR_INTEL,
2477 .family = 6,
2478 .model = 85,
2479 .stepping = 4,
2480 .features[FEAT_1_EDX] =
2481 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2482 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2483 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2484 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2485 CPUID_DE | CPUID_FP87,
2486 .features[FEAT_1_ECX] =
2487 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2488 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2489 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2490 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2491 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2492 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2493 .features[FEAT_8000_0001_EDX] =
2494 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2495 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2496 .features[FEAT_8000_0001_ECX] =
2497 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2498 .features[FEAT_7_0_EDX] =
2499 CPUID_7_0_EDX_SPEC_CTRL,
2500 .features[FEAT_7_0_EBX] =
2501 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2502 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2503 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2504 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2505 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2506 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2507 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2508 CPUID_7_0_EBX_AVX512VL,
2509 .features[FEAT_7_0_ECX] =
2510 CPUID_7_0_ECX_PKU,
2511 /* Missing: XSAVES (not supported by some Linux versions,
2512 * including v4.1 to v4.12).
2513 * KVM doesn't yet expose any XSAVES state save component,
2514 * and the only one defined in Skylake (processor tracing)
2515 * probably will block migration anyway.
2517 .features[FEAT_XSAVE] =
2518 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2519 CPUID_XSAVE_XGETBV1,
2520 .features[FEAT_6_EAX] =
2521 CPUID_6_EAX_ARAT,
2522 .xlevel = 0x80000008,
2523 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2526 .name = "Cascadelake-Server",
2527 .level = 0xd,
2528 .vendor = CPUID_VENDOR_INTEL,
2529 .family = 6,
2530 .model = 85,
2531 .stepping = 6,
2532 .features[FEAT_1_EDX] =
2533 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2534 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2535 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2536 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2537 CPUID_DE | CPUID_FP87,
2538 .features[FEAT_1_ECX] =
2539 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2540 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2541 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2542 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2543 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2544 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2545 .features[FEAT_8000_0001_EDX] =
2546 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2547 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2548 .features[FEAT_8000_0001_ECX] =
2549 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2550 .features[FEAT_7_0_EBX] =
2551 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2552 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2553 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2554 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2555 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2556 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2557 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2558 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2559 .features[FEAT_7_0_ECX] =
2560 CPUID_7_0_ECX_PKU |
2561 CPUID_7_0_ECX_AVX512VNNI,
2562 .features[FEAT_7_0_EDX] =
2563 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2564 /* Missing: XSAVES (not supported by some Linux versions,
2565 * including v4.1 to v4.12).
2566 * KVM doesn't yet expose any XSAVES state save component,
2567 * and the only one defined in Skylake (processor tracing)
2568 * probably will block migration anyway.
2570 .features[FEAT_XSAVE] =
2571 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2572 CPUID_XSAVE_XGETBV1,
2573 .features[FEAT_6_EAX] =
2574 CPUID_6_EAX_ARAT,
2575 .xlevel = 0x80000008,
2576 .model_id = "Intel Xeon Processor (Cascadelake)",
2579 .name = "Icelake-Client",
2580 .level = 0xd,
2581 .vendor = CPUID_VENDOR_INTEL,
2582 .family = 6,
2583 .model = 126,
2584 .stepping = 0,
2585 .features[FEAT_1_EDX] =
2586 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2587 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2588 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2589 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2590 CPUID_DE | CPUID_FP87,
2591 .features[FEAT_1_ECX] =
2592 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2593 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2594 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2595 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2596 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2597 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2598 .features[FEAT_8000_0001_EDX] =
2599 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2600 CPUID_EXT2_SYSCALL,
2601 .features[FEAT_8000_0001_ECX] =
2602 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2603 .features[FEAT_8000_0008_EBX] =
2604 CPUID_8000_0008_EBX_WBNOINVD,
2605 .features[FEAT_7_0_EBX] =
2606 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2607 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2608 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2609 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2610 CPUID_7_0_EBX_SMAP,
2611 .features[FEAT_7_0_ECX] =
2612 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2613 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2614 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2615 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2616 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2617 .features[FEAT_7_0_EDX] =
2618 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2619 /* Missing: XSAVES (not supported by some Linux versions,
2620 * including v4.1 to v4.12).
2621 * KVM doesn't yet expose any XSAVES state save component,
2622 * and the only one defined in Skylake (processor tracing)
2623 * probably will block migration anyway.
2625 .features[FEAT_XSAVE] =
2626 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2627 CPUID_XSAVE_XGETBV1,
2628 .features[FEAT_6_EAX] =
2629 CPUID_6_EAX_ARAT,
2630 .xlevel = 0x80000008,
2631 .model_id = "Intel Core Processor (Icelake)",
2634 .name = "Icelake-Server",
2635 .level = 0xd,
2636 .vendor = CPUID_VENDOR_INTEL,
2637 .family = 6,
2638 .model = 134,
2639 .stepping = 0,
2640 .features[FEAT_1_EDX] =
2641 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2642 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2643 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2644 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2645 CPUID_DE | CPUID_FP87,
2646 .features[FEAT_1_ECX] =
2647 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2648 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2649 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2650 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2651 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2652 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2653 .features[FEAT_8000_0001_EDX] =
2654 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2655 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2656 .features[FEAT_8000_0001_ECX] =
2657 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2658 .features[FEAT_8000_0008_EBX] =
2659 CPUID_8000_0008_EBX_WBNOINVD,
2660 .features[FEAT_7_0_EBX] =
2661 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2662 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2663 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2664 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2665 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2666 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2667 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2668 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2669 .features[FEAT_7_0_ECX] =
2670 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2671 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2672 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2673 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2674 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2675 .features[FEAT_7_0_EDX] =
2676 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2677 /* Missing: XSAVES (not supported by some Linux versions,
2678 * including v4.1 to v4.12).
2679 * KVM doesn't yet expose any XSAVES state save component,
2680 * and the only one defined in Skylake (processor tracing)
2681 * probably will block migration anyway.
2683 .features[FEAT_XSAVE] =
2684 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2685 CPUID_XSAVE_XGETBV1,
2686 .features[FEAT_6_EAX] =
2687 CPUID_6_EAX_ARAT,
2688 .xlevel = 0x80000008,
2689 .model_id = "Intel Xeon Processor (Icelake)",
2692 .name = "KnightsMill",
2693 .level = 0xd,
2694 .vendor = CPUID_VENDOR_INTEL,
2695 .family = 6,
2696 .model = 133,
2697 .stepping = 0,
2698 .features[FEAT_1_EDX] =
2699 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2700 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2701 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2702 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2703 CPUID_PSE | CPUID_DE | CPUID_FP87,
2704 .features[FEAT_1_ECX] =
2705 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2706 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2707 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2708 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2709 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2710 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2711 .features[FEAT_8000_0001_EDX] =
2712 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2713 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2714 .features[FEAT_8000_0001_ECX] =
2715 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2716 .features[FEAT_7_0_EBX] =
2717 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2718 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2719 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2720 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2721 CPUID_7_0_EBX_AVX512ER,
2722 .features[FEAT_7_0_ECX] =
2723 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2724 .features[FEAT_7_0_EDX] =
2725 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2726 .features[FEAT_XSAVE] =
2727 CPUID_XSAVE_XSAVEOPT,
2728 .features[FEAT_6_EAX] =
2729 CPUID_6_EAX_ARAT,
2730 .xlevel = 0x80000008,
2731 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2734 .name = "Opteron_G1",
2735 .level = 5,
2736 .vendor = CPUID_VENDOR_AMD,
2737 .family = 15,
2738 .model = 6,
2739 .stepping = 1,
2740 .features[FEAT_1_EDX] =
2741 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2742 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2743 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2744 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2745 CPUID_DE | CPUID_FP87,
2746 .features[FEAT_1_ECX] =
2747 CPUID_EXT_SSE3,
2748 .features[FEAT_8000_0001_EDX] =
2749 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2750 .xlevel = 0x80000008,
2751 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2754 .name = "Opteron_G2",
2755 .level = 5,
2756 .vendor = CPUID_VENDOR_AMD,
2757 .family = 15,
2758 .model = 6,
2759 .stepping = 1,
2760 .features[FEAT_1_EDX] =
2761 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2762 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2763 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2764 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2765 CPUID_DE | CPUID_FP87,
2766 .features[FEAT_1_ECX] =
2767 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2768 .features[FEAT_8000_0001_EDX] =
2769 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2770 .features[FEAT_8000_0001_ECX] =
2771 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2772 .xlevel = 0x80000008,
2773 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2776 .name = "Opteron_G3",
2777 .level = 5,
2778 .vendor = CPUID_VENDOR_AMD,
2779 .family = 16,
2780 .model = 2,
2781 .stepping = 3,
2782 .features[FEAT_1_EDX] =
2783 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2784 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2785 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2786 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2787 CPUID_DE | CPUID_FP87,
2788 .features[FEAT_1_ECX] =
2789 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2790 CPUID_EXT_SSE3,
2791 .features[FEAT_8000_0001_EDX] =
2792 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
2793 CPUID_EXT2_RDTSCP,
2794 .features[FEAT_8000_0001_ECX] =
2795 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2796 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2797 .xlevel = 0x80000008,
2798 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2801 .name = "Opteron_G4",
2802 .level = 0xd,
2803 .vendor = CPUID_VENDOR_AMD,
2804 .family = 21,
2805 .model = 1,
2806 .stepping = 2,
2807 .features[FEAT_1_EDX] =
2808 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2809 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2810 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2811 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2812 CPUID_DE | CPUID_FP87,
2813 .features[FEAT_1_ECX] =
2814 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2815 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2816 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2817 CPUID_EXT_SSE3,
2818 .features[FEAT_8000_0001_EDX] =
2819 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2820 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2821 .features[FEAT_8000_0001_ECX] =
2822 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2823 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2824 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2825 CPUID_EXT3_LAHF_LM,
2826 .features[FEAT_SVM] =
2827 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2828 /* no xsaveopt! */
2829 .xlevel = 0x8000001A,
2830 .model_id = "AMD Opteron 62xx class CPU",
2833 .name = "Opteron_G5",
2834 .level = 0xd,
2835 .vendor = CPUID_VENDOR_AMD,
2836 .family = 21,
2837 .model = 2,
2838 .stepping = 0,
2839 .features[FEAT_1_EDX] =
2840 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2841 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2842 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2843 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2844 CPUID_DE | CPUID_FP87,
2845 .features[FEAT_1_ECX] =
2846 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2847 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2848 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2849 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2850 .features[FEAT_8000_0001_EDX] =
2851 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2852 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2853 .features[FEAT_8000_0001_ECX] =
2854 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2855 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2856 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2857 CPUID_EXT3_LAHF_LM,
2858 .features[FEAT_SVM] =
2859 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2860 /* no xsaveopt! */
2861 .xlevel = 0x8000001A,
2862 .model_id = "AMD Opteron 63xx class CPU",
2865 .name = "EPYC",
2866 .level = 0xd,
2867 .vendor = CPUID_VENDOR_AMD,
2868 .family = 23,
2869 .model = 1,
2870 .stepping = 2,
2871 .features[FEAT_1_EDX] =
2872 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2873 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2874 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2875 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2876 CPUID_VME | CPUID_FP87,
2877 .features[FEAT_1_ECX] =
2878 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2879 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2880 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2881 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2882 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2883 .features[FEAT_8000_0001_EDX] =
2884 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2885 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2886 CPUID_EXT2_SYSCALL,
2887 .features[FEAT_8000_0001_ECX] =
2888 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2889 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2890 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2891 CPUID_EXT3_TOPOEXT,
2892 .features[FEAT_7_0_EBX] =
2893 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2894 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2895 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2896 CPUID_7_0_EBX_SHA_NI,
2897 /* Missing: XSAVES (not supported by some Linux versions,
2898 * including v4.1 to v4.12).
2899 * KVM doesn't yet expose any XSAVES state save component.
2901 .features[FEAT_XSAVE] =
2902 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2903 CPUID_XSAVE_XGETBV1,
2904 .features[FEAT_6_EAX] =
2905 CPUID_6_EAX_ARAT,
2906 .features[FEAT_SVM] =
2907 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2908 .xlevel = 0x8000001E,
2909 .model_id = "AMD EPYC Processor",
2910 .cache_info = &epyc_cache_info,
2913 .name = "EPYC-IBPB",
2914 .level = 0xd,
2915 .vendor = CPUID_VENDOR_AMD,
2916 .family = 23,
2917 .model = 1,
2918 .stepping = 2,
2919 .features[FEAT_1_EDX] =
2920 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2921 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2922 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2923 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2924 CPUID_VME | CPUID_FP87,
2925 .features[FEAT_1_ECX] =
2926 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2927 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2928 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2929 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2930 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2931 .features[FEAT_8000_0001_EDX] =
2932 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2933 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2934 CPUID_EXT2_SYSCALL,
2935 .features[FEAT_8000_0001_ECX] =
2936 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2937 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2938 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2939 CPUID_EXT3_TOPOEXT,
2940 .features[FEAT_8000_0008_EBX] =
2941 CPUID_8000_0008_EBX_IBPB,
2942 .features[FEAT_7_0_EBX] =
2943 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2944 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2945 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2946 CPUID_7_0_EBX_SHA_NI,
2947 /* Missing: XSAVES (not supported by some Linux versions,
2948 * including v4.1 to v4.12).
2949 * KVM doesn't yet expose any XSAVES state save component.
2951 .features[FEAT_XSAVE] =
2952 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2953 CPUID_XSAVE_XGETBV1,
2954 .features[FEAT_6_EAX] =
2955 CPUID_6_EAX_ARAT,
2956 .features[FEAT_SVM] =
2957 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2958 .xlevel = 0x8000001E,
2959 .model_id = "AMD EPYC Processor (with IBPB)",
2960 .cache_info = &epyc_cache_info,
2963 .name = "Dhyana",
2964 .level = 0xd,
2965 .vendor = CPUID_VENDOR_HYGON,
2966 .family = 24,
2967 .model = 0,
2968 .stepping = 1,
2969 .features[FEAT_1_EDX] =
2970 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2971 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2972 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2973 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2974 CPUID_VME | CPUID_FP87,
2975 .features[FEAT_1_ECX] =
2976 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2977 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
2978 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2979 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2980 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
2981 .features[FEAT_8000_0001_EDX] =
2982 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2983 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2984 CPUID_EXT2_SYSCALL,
2985 .features[FEAT_8000_0001_ECX] =
2986 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2987 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2988 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2989 CPUID_EXT3_TOPOEXT,
2990 .features[FEAT_8000_0008_EBX] =
2991 CPUID_8000_0008_EBX_IBPB,
2992 .features[FEAT_7_0_EBX] =
2993 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2994 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2995 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
2997 * Missing: XSAVES (not supported by some Linux versions,
2998 * including v4.1 to v4.12).
2999 * KVM doesn't yet expose any XSAVES state save component.
3001 .features[FEAT_XSAVE] =
3002 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3003 CPUID_XSAVE_XGETBV1,
3004 .features[FEAT_6_EAX] =
3005 CPUID_6_EAX_ARAT,
3006 .features[FEAT_SVM] =
3007 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3008 .xlevel = 0x8000001E,
3009 .model_id = "Hygon Dhyana Processor",
3010 .cache_info = &epyc_cache_info,
3014 typedef struct PropValue {
3015 const char *prop, *value;
3016 } PropValue;
3018 /* KVM-specific features that are automatically added/removed
3019 * from all CPU models when KVM is enabled.
3021 static PropValue kvm_default_props[] = {
3022 { "kvmclock", "on" },
3023 { "kvm-nopiodelay", "on" },
3024 { "kvm-asyncpf", "on" },
3025 { "kvm-steal-time", "on" },
3026 { "kvm-pv-eoi", "on" },
3027 { "kvmclock-stable-bit", "on" },
3028 { "x2apic", "on" },
3029 { "acpi", "off" },
3030 { "monitor", "off" },
3031 { "svm", "off" },
3032 { NULL, NULL },
3035 /* TCG-specific defaults that override all CPU models when using TCG
3037 static PropValue tcg_default_props[] = {
3038 { "vme", "off" },
3039 { NULL, NULL },
3043 void x86_cpu_change_kvm_default(const char *prop, const char *value)
3045 PropValue *pv;
3046 for (pv = kvm_default_props; pv->prop; pv++) {
3047 if (!strcmp(pv->prop, prop)) {
3048 pv->value = value;
3049 break;
3053 /* It is valid to call this function only for properties that
3054 * are already present in the kvm_default_props table.
3056 assert(pv->prop);
3059 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3060 bool migratable_only);
3062 static bool lmce_supported(void)
3064 uint64_t mce_cap = 0;
3066 #ifdef CONFIG_KVM
3067 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
3068 return false;
3070 #endif
3072 return !!(mce_cap & MCG_LMCE_P);
3075 #define CPUID_MODEL_ID_SZ 48
3078 * cpu_x86_fill_model_id:
3079 * Get CPUID model ID string from host CPU.
3081 * @str should have at least CPUID_MODEL_ID_SZ bytes
3083 * The function does NOT add a null terminator to the string
3084 * automatically.
3086 static int cpu_x86_fill_model_id(char *str)
3088 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3089 int i;
3091 for (i = 0; i < 3; i++) {
3092 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
3093 memcpy(str + i * 16 + 0, &eax, 4);
3094 memcpy(str + i * 16 + 4, &ebx, 4);
3095 memcpy(str + i * 16 + 8, &ecx, 4);
3096 memcpy(str + i * 16 + 12, &edx, 4);
3098 return 0;
3101 static Property max_x86_cpu_properties[] = {
3102 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
3103 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
3104 DEFINE_PROP_END_OF_LIST()
3107 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
3109 DeviceClass *dc = DEVICE_CLASS(oc);
3110 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3112 xcc->ordering = 9;
3114 xcc->model_description =
3115 "Enables all features supported by the accelerator in the current host";
3117 dc->props = max_x86_cpu_properties;
3120 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
3122 static void max_x86_cpu_initfn(Object *obj)
3124 X86CPU *cpu = X86_CPU(obj);
3125 CPUX86State *env = &cpu->env;
3126 KVMState *s = kvm_state;
3128 /* We can't fill the features array here because we don't know yet if
3129 * "migratable" is true or false.
3131 cpu->max_features = true;
3133 if (accel_uses_host_cpuid()) {
3134 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
3135 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
3136 int family, model, stepping;
3137 X86CPUDefinition host_cpudef = { };
3138 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3140 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
3141 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
3143 host_vendor_fms(vendor, &family, &model, &stepping);
3145 cpu_x86_fill_model_id(model_id);
3147 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
3148 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
3149 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
3150 object_property_set_int(OBJECT(cpu), stepping, "stepping",
3151 &error_abort);
3152 object_property_set_str(OBJECT(cpu), model_id, "model-id",
3153 &error_abort);
3155 if (kvm_enabled()) {
3156 env->cpuid_min_level =
3157 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
3158 env->cpuid_min_xlevel =
3159 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
3160 env->cpuid_min_xlevel2 =
3161 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
3162 } else {
3163 env->cpuid_min_level =
3164 hvf_get_supported_cpuid(0x0, 0, R_EAX);
3165 env->cpuid_min_xlevel =
3166 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
3167 env->cpuid_min_xlevel2 =
3168 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
3171 if (lmce_supported()) {
3172 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
3174 } else {
3175 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
3176 "vendor", &error_abort);
3177 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
3178 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
3179 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
3180 object_property_set_str(OBJECT(cpu),
3181 "QEMU TCG CPU version " QEMU_HW_VERSION,
3182 "model-id", &error_abort);
3185 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
3188 static const TypeInfo max_x86_cpu_type_info = {
3189 .name = X86_CPU_TYPE_NAME("max"),
3190 .parent = TYPE_X86_CPU,
3191 .instance_init = max_x86_cpu_initfn,
3192 .class_init = max_x86_cpu_class_init,
3195 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3196 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
3198 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3200 xcc->host_cpuid_required = true;
3201 xcc->ordering = 8;
3203 #if defined(CONFIG_KVM)
3204 xcc->model_description =
3205 "KVM processor with all supported host features ";
3206 #elif defined(CONFIG_HVF)
3207 xcc->model_description =
3208 "HVF processor with all supported host features ";
3209 #endif
3212 static const TypeInfo host_x86_cpu_type_info = {
3213 .name = X86_CPU_TYPE_NAME("host"),
3214 .parent = X86_CPU_TYPE_NAME("max"),
3215 .class_init = host_x86_cpu_class_init,
3218 #endif
3220 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
3222 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
3224 switch (f->type) {
3225 case CPUID_FEATURE_WORD:
3227 const char *reg = get_register_name_32(f->cpuid.reg);
3228 assert(reg);
3229 return g_strdup_printf("CPUID.%02XH:%s",
3230 f->cpuid.eax, reg);
3232 case MSR_FEATURE_WORD:
3233 return g_strdup_printf("MSR(%02XH)",
3234 f->msr.index);
3237 return NULL;
3240 static void report_unavailable_features(FeatureWord w, uint32_t mask)
3242 FeatureWordInfo *f = &feature_word_info[w];
3243 int i;
3244 char *feat_word_str;
3246 for (i = 0; i < 32; ++i) {
3247 if ((1UL << i) & mask) {
3248 feat_word_str = feature_word_description(f, i);
3249 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
3250 accel_uses_host_cpuid() ? "host" : "TCG",
3251 feat_word_str,
3252 f->feat_names[i] ? "." : "",
3253 f->feat_names[i] ? f->feat_names[i] : "", i);
3254 g_free(feat_word_str);
3259 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
3260 const char *name, void *opaque,
3261 Error **errp)
3263 X86CPU *cpu = X86_CPU(obj);
3264 CPUX86State *env = &cpu->env;
3265 int64_t value;
3267 value = (env->cpuid_version >> 8) & 0xf;
3268 if (value == 0xf) {
3269 value += (env->cpuid_version >> 20) & 0xff;
3271 visit_type_int(v, name, &value, errp);
3274 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3275 const char *name, void *opaque,
3276 Error **errp)
3278 X86CPU *cpu = X86_CPU(obj);
3279 CPUX86State *env = &cpu->env;
3280 const int64_t min = 0;
3281 const int64_t max = 0xff + 0xf;
3282 Error *local_err = NULL;
3283 int64_t value;
3285 visit_type_int(v, name, &value, &local_err);
3286 if (local_err) {
3287 error_propagate(errp, local_err);
3288 return;
3290 if (value < min || value > max) {
3291 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3292 name ? name : "null", value, min, max);
3293 return;
3296 env->cpuid_version &= ~0xff00f00;
3297 if (value > 0x0f) {
3298 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3299 } else {
3300 env->cpuid_version |= value << 8;
3304 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3305 const char *name, void *opaque,
3306 Error **errp)
3308 X86CPU *cpu = X86_CPU(obj);
3309 CPUX86State *env = &cpu->env;
3310 int64_t value;
3312 value = (env->cpuid_version >> 4) & 0xf;
3313 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3314 visit_type_int(v, name, &value, errp);
3317 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3318 const char *name, void *opaque,
3319 Error **errp)
3321 X86CPU *cpu = X86_CPU(obj);
3322 CPUX86State *env = &cpu->env;
3323 const int64_t min = 0;
3324 const int64_t max = 0xff;
3325 Error *local_err = NULL;
3326 int64_t value;
3328 visit_type_int(v, name, &value, &local_err);
3329 if (local_err) {
3330 error_propagate(errp, local_err);
3331 return;
3333 if (value < min || value > max) {
3334 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3335 name ? name : "null", value, min, max);
3336 return;
3339 env->cpuid_version &= ~0xf00f0;
3340 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3343 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3344 const char *name, void *opaque,
3345 Error **errp)
3347 X86CPU *cpu = X86_CPU(obj);
3348 CPUX86State *env = &cpu->env;
3349 int64_t value;
3351 value = env->cpuid_version & 0xf;
3352 visit_type_int(v, name, &value, errp);
3355 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3356 const char *name, void *opaque,
3357 Error **errp)
3359 X86CPU *cpu = X86_CPU(obj);
3360 CPUX86State *env = &cpu->env;
3361 const int64_t min = 0;
3362 const int64_t max = 0xf;
3363 Error *local_err = NULL;
3364 int64_t value;
3366 visit_type_int(v, name, &value, &local_err);
3367 if (local_err) {
3368 error_propagate(errp, local_err);
3369 return;
3371 if (value < min || value > max) {
3372 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3373 name ? name : "null", value, min, max);
3374 return;
3377 env->cpuid_version &= ~0xf;
3378 env->cpuid_version |= value & 0xf;
3381 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3383 X86CPU *cpu = X86_CPU(obj);
3384 CPUX86State *env = &cpu->env;
3385 char *value;
3387 value = g_malloc(CPUID_VENDOR_SZ + 1);
3388 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3389 env->cpuid_vendor3);
3390 return value;
3393 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3394 Error **errp)
3396 X86CPU *cpu = X86_CPU(obj);
3397 CPUX86State *env = &cpu->env;
3398 int i;
3400 if (strlen(value) != CPUID_VENDOR_SZ) {
3401 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3402 return;
3405 env->cpuid_vendor1 = 0;
3406 env->cpuid_vendor2 = 0;
3407 env->cpuid_vendor3 = 0;
3408 for (i = 0; i < 4; i++) {
3409 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3410 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3411 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3415 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3417 X86CPU *cpu = X86_CPU(obj);
3418 CPUX86State *env = &cpu->env;
3419 char *value;
3420 int i;
3422 value = g_malloc(48 + 1);
3423 for (i = 0; i < 48; i++) {
3424 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3426 value[48] = '\0';
3427 return value;
3430 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3431 Error **errp)
3433 X86CPU *cpu = X86_CPU(obj);
3434 CPUX86State *env = &cpu->env;
3435 int c, len, i;
3437 if (model_id == NULL) {
3438 model_id = "";
3440 len = strlen(model_id);
3441 memset(env->cpuid_model, 0, 48);
3442 for (i = 0; i < 48; i++) {
3443 if (i >= len) {
3444 c = '\0';
3445 } else {
3446 c = (uint8_t)model_id[i];
3448 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3452 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3453 void *opaque, Error **errp)
3455 X86CPU *cpu = X86_CPU(obj);
3456 int64_t value;
3458 value = cpu->env.tsc_khz * 1000;
3459 visit_type_int(v, name, &value, errp);
3462 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3463 void *opaque, Error **errp)
3465 X86CPU *cpu = X86_CPU(obj);
3466 const int64_t min = 0;
3467 const int64_t max = INT64_MAX;
3468 Error *local_err = NULL;
3469 int64_t value;
3471 visit_type_int(v, name, &value, &local_err);
3472 if (local_err) {
3473 error_propagate(errp, local_err);
3474 return;
3476 if (value < min || value > max) {
3477 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3478 name ? name : "null", value, min, max);
3479 return;
3482 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3485 /* Generic getter for "feature-words" and "filtered-features" properties */
3486 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3487 const char *name, void *opaque,
3488 Error **errp)
3490 uint32_t *array = (uint32_t *)opaque;
3491 FeatureWord w;
3492 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3493 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3494 X86CPUFeatureWordInfoList *list = NULL;
3496 for (w = 0; w < FEATURE_WORDS; w++) {
3497 FeatureWordInfo *wi = &feature_word_info[w];
3499 * We didn't have MSR features when "feature-words" was
3500 * introduced. Therefore skipped other type entries.
3502 if (wi->type != CPUID_FEATURE_WORD) {
3503 continue;
3505 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3506 qwi->cpuid_input_eax = wi->cpuid.eax;
3507 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
3508 qwi->cpuid_input_ecx = wi->cpuid.ecx;
3509 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
3510 qwi->features = array[w];
3512 /* List will be in reverse order, but order shouldn't matter */
3513 list_entries[w].next = list;
3514 list_entries[w].value = &word_infos[w];
3515 list = &list_entries[w];
3518 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3521 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3522 void *opaque, Error **errp)
3524 X86CPU *cpu = X86_CPU(obj);
3525 int64_t value = cpu->hyperv_spinlock_attempts;
3527 visit_type_int(v, name, &value, errp);
3530 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3531 void *opaque, Error **errp)
3533 const int64_t min = 0xFFF;
3534 const int64_t max = UINT_MAX;
3535 X86CPU *cpu = X86_CPU(obj);
3536 Error *err = NULL;
3537 int64_t value;
3539 visit_type_int(v, name, &value, &err);
3540 if (err) {
3541 error_propagate(errp, err);
3542 return;
3545 if (value < min || value > max) {
3546 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3547 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3548 object_get_typename(obj), name ? name : "null",
3549 value, min, max);
3550 return;
3552 cpu->hyperv_spinlock_attempts = value;
3555 static const PropertyInfo qdev_prop_spinlocks = {
3556 .name = "int",
3557 .get = x86_get_hv_spinlocks,
3558 .set = x86_set_hv_spinlocks,
3561 /* Convert all '_' in a feature string option name to '-', to make feature
3562 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3564 static inline void feat2prop(char *s)
3566 while ((s = strchr(s, '_'))) {
3567 *s = '-';
3571 /* Return the feature property name for a feature flag bit */
3572 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3574 /* XSAVE components are automatically enabled by other features,
3575 * so return the original feature name instead
3577 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3578 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3580 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3581 x86_ext_save_areas[comp].bits) {
3582 w = x86_ext_save_areas[comp].feature;
3583 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3587 assert(bitnr < 32);
3588 assert(w < FEATURE_WORDS);
3589 return feature_word_info[w].feat_names[bitnr];
3592 /* Compatibily hack to maintain legacy +-feat semantic,
3593 * where +-feat overwrites any feature set by
3594 * feat=on|feat even if the later is parsed after +-feat
3595 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3597 static GList *plus_features, *minus_features;
3599 static gint compare_string(gconstpointer a, gconstpointer b)
3601 return g_strcmp0(a, b);
3604 /* Parse "+feature,-feature,feature=foo" CPU feature string
3606 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3607 Error **errp)
3609 char *featurestr; /* Single 'key=value" string being parsed */
3610 static bool cpu_globals_initialized;
3611 bool ambiguous = false;
3613 if (cpu_globals_initialized) {
3614 return;
3616 cpu_globals_initialized = true;
3618 if (!features) {
3619 return;
3622 for (featurestr = strtok(features, ",");
3623 featurestr;
3624 featurestr = strtok(NULL, ",")) {
3625 const char *name;
3626 const char *val = NULL;
3627 char *eq = NULL;
3628 char num[32];
3629 GlobalProperty *prop;
3631 /* Compatibility syntax: */
3632 if (featurestr[0] == '+') {
3633 plus_features = g_list_append(plus_features,
3634 g_strdup(featurestr + 1));
3635 continue;
3636 } else if (featurestr[0] == '-') {
3637 minus_features = g_list_append(minus_features,
3638 g_strdup(featurestr + 1));
3639 continue;
3642 eq = strchr(featurestr, '=');
3643 if (eq) {
3644 *eq++ = 0;
3645 val = eq;
3646 } else {
3647 val = "on";
3650 feat2prop(featurestr);
3651 name = featurestr;
3653 if (g_list_find_custom(plus_features, name, compare_string)) {
3654 warn_report("Ambiguous CPU model string. "
3655 "Don't mix both \"+%s\" and \"%s=%s\"",
3656 name, name, val);
3657 ambiguous = true;
3659 if (g_list_find_custom(minus_features, name, compare_string)) {
3660 warn_report("Ambiguous CPU model string. "
3661 "Don't mix both \"-%s\" and \"%s=%s\"",
3662 name, name, val);
3663 ambiguous = true;
3666 /* Special case: */
3667 if (!strcmp(name, "tsc-freq")) {
3668 int ret;
3669 uint64_t tsc_freq;
3671 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3672 if (ret < 0 || tsc_freq > INT64_MAX) {
3673 error_setg(errp, "bad numerical value %s", val);
3674 return;
3676 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3677 val = num;
3678 name = "tsc-frequency";
3681 prop = g_new0(typeof(*prop), 1);
3682 prop->driver = typename;
3683 prop->property = g_strdup(name);
3684 prop->value = g_strdup(val);
3685 qdev_prop_register_global(prop);
3688 if (ambiguous) {
3689 warn_report("Compatibility of ambiguous CPU model "
3690 "strings won't be kept on future QEMU versions");
3694 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3695 static int x86_cpu_filter_features(X86CPU *cpu);
3697 /* Build a list with the name of all features on a feature word array */
3698 static void x86_cpu_list_feature_names(FeatureWordArray features,
3699 strList **feat_names)
3701 FeatureWord w;
3702 strList **next = feat_names;
3704 for (w = 0; w < FEATURE_WORDS; w++) {
3705 uint32_t filtered = features[w];
3706 int i;
3707 for (i = 0; i < 32; i++) {
3708 if (filtered & (1UL << i)) {
3709 strList *new = g_new0(strList, 1);
3710 new->value = g_strdup(x86_cpu_feature_name(w, i));
3711 *next = new;
3712 next = &new->next;
3718 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
3719 const char *name, void *opaque,
3720 Error **errp)
3722 X86CPU *xc = X86_CPU(obj);
3723 strList *result = NULL;
3725 x86_cpu_list_feature_names(xc->filtered_features, &result);
3726 visit_type_strList(v, "unavailable-features", &result, errp);
3729 /* Check for missing features that may prevent the CPU class from
3730 * running using the current machine and accelerator.
3732 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3733 strList **missing_feats)
3735 X86CPU *xc;
3736 Error *err = NULL;
3737 strList **next = missing_feats;
3739 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3740 strList *new = g_new0(strList, 1);
3741 new->value = g_strdup("kvm");
3742 *missing_feats = new;
3743 return;
3746 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3748 x86_cpu_expand_features(xc, &err);
3749 if (err) {
3750 /* Errors at x86_cpu_expand_features should never happen,
3751 * but in case it does, just report the model as not
3752 * runnable at all using the "type" property.
3754 strList *new = g_new0(strList, 1);
3755 new->value = g_strdup("type");
3756 *next = new;
3757 next = &new->next;
3760 x86_cpu_filter_features(xc);
3762 x86_cpu_list_feature_names(xc->filtered_features, next);
3764 object_unref(OBJECT(xc));
3767 /* Print all cpuid feature names in featureset
3769 static void listflags(GList *features)
3771 size_t len = 0;
3772 GList *tmp;
3774 for (tmp = features; tmp; tmp = tmp->next) {
3775 const char *name = tmp->data;
3776 if ((len + strlen(name) + 1) >= 75) {
3777 qemu_printf("\n");
3778 len = 0;
3780 qemu_printf("%s%s", len == 0 ? " " : " ", name);
3781 len += strlen(name) + 1;
3783 qemu_printf("\n");
3786 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3787 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3789 ObjectClass *class_a = (ObjectClass *)a;
3790 ObjectClass *class_b = (ObjectClass *)b;
3791 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3792 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3793 char *name_a, *name_b;
3794 int ret;
3796 if (cc_a->ordering != cc_b->ordering) {
3797 ret = cc_a->ordering - cc_b->ordering;
3798 } else {
3799 name_a = x86_cpu_class_get_model_name(cc_a);
3800 name_b = x86_cpu_class_get_model_name(cc_b);
3801 ret = strcmp(name_a, name_b);
3802 g_free(name_a);
3803 g_free(name_b);
3805 return ret;
3808 static GSList *get_sorted_cpu_model_list(void)
3810 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3811 list = g_slist_sort(list, x86_cpu_list_compare);
3812 return list;
3815 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3817 ObjectClass *oc = data;
3818 X86CPUClass *cc = X86_CPU_CLASS(oc);
3819 char *name = x86_cpu_class_get_model_name(cc);
3820 const char *desc = cc->model_description;
3821 if (!desc && cc->cpu_def) {
3822 desc = cc->cpu_def->model_id;
3825 qemu_printf("x86 %-20s %-48s\n", name, desc);
3826 g_free(name);
3829 /* list available CPU models and flags */
3830 void x86_cpu_list(void)
3832 int i, j;
3833 GSList *list;
3834 GList *names = NULL;
3836 qemu_printf("Available CPUs:\n");
3837 list = get_sorted_cpu_model_list();
3838 g_slist_foreach(list, x86_cpu_list_entry, NULL);
3839 g_slist_free(list);
3841 names = NULL;
3842 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3843 FeatureWordInfo *fw = &feature_word_info[i];
3844 for (j = 0; j < 32; j++) {
3845 if (fw->feat_names[j]) {
3846 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3851 names = g_list_sort(names, (GCompareFunc)strcmp);
3853 qemu_printf("\nRecognized CPUID flags:\n");
3854 listflags(names);
3855 qemu_printf("\n");
3856 g_list_free(names);
3859 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3861 ObjectClass *oc = data;
3862 X86CPUClass *cc = X86_CPU_CLASS(oc);
3863 CpuDefinitionInfoList **cpu_list = user_data;
3864 CpuDefinitionInfoList *entry;
3865 CpuDefinitionInfo *info;
3867 info = g_malloc0(sizeof(*info));
3868 info->name = x86_cpu_class_get_model_name(cc);
3869 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3870 info->has_unavailable_features = true;
3871 info->q_typename = g_strdup(object_class_get_name(oc));
3872 info->migration_safe = cc->migration_safe;
3873 info->has_migration_safe = true;
3874 info->q_static = cc->static_model;
3876 entry = g_malloc0(sizeof(*entry));
3877 entry->value = info;
3878 entry->next = *cpu_list;
3879 *cpu_list = entry;
3882 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
3884 CpuDefinitionInfoList *cpu_list = NULL;
3885 GSList *list = get_sorted_cpu_model_list();
3886 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3887 g_slist_free(list);
3888 return cpu_list;
3891 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3892 bool migratable_only)
3894 FeatureWordInfo *wi = &feature_word_info[w];
3895 uint32_t r = 0;
3897 if (kvm_enabled()) {
3898 switch (wi->type) {
3899 case CPUID_FEATURE_WORD:
3900 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
3901 wi->cpuid.ecx,
3902 wi->cpuid.reg);
3903 break;
3904 case MSR_FEATURE_WORD:
3905 r = kvm_arch_get_supported_msr_feature(kvm_state,
3906 wi->msr.index);
3907 break;
3909 } else if (hvf_enabled()) {
3910 if (wi->type != CPUID_FEATURE_WORD) {
3911 return 0;
3913 r = hvf_get_supported_cpuid(wi->cpuid.eax,
3914 wi->cpuid.ecx,
3915 wi->cpuid.reg);
3916 } else if (tcg_enabled()) {
3917 r = wi->tcg_features;
3918 } else {
3919 return ~0;
3921 if (migratable_only) {
3922 r &= x86_cpu_get_migratable_flags(w);
3924 return r;
3927 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3929 FeatureWord w;
3931 for (w = 0; w < FEATURE_WORDS; w++) {
3932 report_unavailable_features(w, cpu->filtered_features[w]);
3936 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3938 PropValue *pv;
3939 for (pv = props; pv->prop; pv++) {
3940 if (!pv->value) {
3941 continue;
3943 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3944 &error_abort);
3948 /* Load data from X86CPUDefinition into a X86CPU object
3950 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3952 CPUX86State *env = &cpu->env;
3953 const char *vendor;
3954 char host_vendor[CPUID_VENDOR_SZ + 1];
3955 FeatureWord w;
3957 /*NOTE: any property set by this function should be returned by
3958 * x86_cpu_static_props(), so static expansion of
3959 * query-cpu-model-expansion is always complete.
3962 /* CPU models only set _minimum_ values for level/xlevel: */
3963 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3964 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3966 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3967 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3968 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3969 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3970 for (w = 0; w < FEATURE_WORDS; w++) {
3971 env->features[w] = def->features[w];
3974 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3975 cpu->legacy_cache = !def->cache_info;
3977 /* Special cases not set in the X86CPUDefinition structs: */
3978 /* TODO: in-kernel irqchip for hvf */
3979 if (kvm_enabled()) {
3980 if (!kvm_irqchip_in_kernel()) {
3981 x86_cpu_change_kvm_default("x2apic", "off");
3984 x86_cpu_apply_props(cpu, kvm_default_props);
3985 } else if (tcg_enabled()) {
3986 x86_cpu_apply_props(cpu, tcg_default_props);
3989 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3991 /* sysenter isn't supported in compatibility mode on AMD,
3992 * syscall isn't supported in compatibility mode on Intel.
3993 * Normally we advertise the actual CPU vendor, but you can
3994 * override this using the 'vendor' property if you want to use
3995 * KVM's sysenter/syscall emulation in compatibility mode and
3996 * when doing cross vendor migration
3998 vendor = def->vendor;
3999 if (accel_uses_host_cpuid()) {
4000 uint32_t ebx = 0, ecx = 0, edx = 0;
4001 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
4002 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
4003 vendor = host_vendor;
4006 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
4010 #ifndef CONFIG_USER_ONLY
4011 /* Return a QDict containing keys for all properties that can be included
4012 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
4013 * must be included in the dictionary.
4015 static QDict *x86_cpu_static_props(void)
4017 FeatureWord w;
4018 int i;
4019 static const char *props[] = {
4020 "min-level",
4021 "min-xlevel",
4022 "family",
4023 "model",
4024 "stepping",
4025 "model-id",
4026 "vendor",
4027 "lmce",
4028 NULL,
4030 static QDict *d;
4032 if (d) {
4033 return d;
4036 d = qdict_new();
4037 for (i = 0; props[i]; i++) {
4038 qdict_put_null(d, props[i]);
4041 for (w = 0; w < FEATURE_WORDS; w++) {
4042 FeatureWordInfo *fi = &feature_word_info[w];
4043 int bit;
4044 for (bit = 0; bit < 32; bit++) {
4045 if (!fi->feat_names[bit]) {
4046 continue;
4048 qdict_put_null(d, fi->feat_names[bit]);
4052 return d;
4055 /* Add an entry to @props dict, with the value for property. */
4056 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
4058 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
4059 &error_abort);
4061 qdict_put_obj(props, prop, value);
4064 /* Convert CPU model data from X86CPU object to a property dictionary
4065 * that can recreate exactly the same CPU model.
4067 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
4069 QDict *sprops = x86_cpu_static_props();
4070 const QDictEntry *e;
4072 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
4073 const char *prop = qdict_entry_key(e);
4074 x86_cpu_expand_prop(cpu, props, prop);
4078 /* Convert CPU model data from X86CPU object to a property dictionary
4079 * that can recreate exactly the same CPU model, including every
4080 * writeable QOM property.
4082 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
4084 ObjectPropertyIterator iter;
4085 ObjectProperty *prop;
4087 object_property_iter_init(&iter, OBJECT(cpu));
4088 while ((prop = object_property_iter_next(&iter))) {
4089 /* skip read-only or write-only properties */
4090 if (!prop->get || !prop->set) {
4091 continue;
4094 /* "hotplugged" is the only property that is configurable
4095 * on the command-line but will be set differently on CPUs
4096 * created using "-cpu ... -smp ..." and by CPUs created
4097 * on the fly by x86_cpu_from_model() for querying. Skip it.
4099 if (!strcmp(prop->name, "hotplugged")) {
4100 continue;
4102 x86_cpu_expand_prop(cpu, props, prop->name);
4106 static void object_apply_props(Object *obj, QDict *props, Error **errp)
4108 const QDictEntry *prop;
4109 Error *err = NULL;
4111 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
4112 object_property_set_qobject(obj, qdict_entry_value(prop),
4113 qdict_entry_key(prop), &err);
4114 if (err) {
4115 break;
4119 error_propagate(errp, err);
4122 /* Create X86CPU object according to model+props specification */
4123 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
4125 X86CPU *xc = NULL;
4126 X86CPUClass *xcc;
4127 Error *err = NULL;
4129 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
4130 if (xcc == NULL) {
4131 error_setg(&err, "CPU model '%s' not found", model);
4132 goto out;
4135 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
4136 if (props) {
4137 object_apply_props(OBJECT(xc), props, &err);
4138 if (err) {
4139 goto out;
4143 x86_cpu_expand_features(xc, &err);
4144 if (err) {
4145 goto out;
4148 out:
4149 if (err) {
4150 error_propagate(errp, err);
4151 object_unref(OBJECT(xc));
4152 xc = NULL;
4154 return xc;
4157 CpuModelExpansionInfo *
4158 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
4159 CpuModelInfo *model,
4160 Error **errp)
4162 X86CPU *xc = NULL;
4163 Error *err = NULL;
4164 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
4165 QDict *props = NULL;
4166 const char *base_name;
4168 xc = x86_cpu_from_model(model->name,
4169 model->has_props ?
4170 qobject_to(QDict, model->props) :
4171 NULL, &err);
4172 if (err) {
4173 goto out;
4176 props = qdict_new();
4177 ret->model = g_new0(CpuModelInfo, 1);
4178 ret->model->props = QOBJECT(props);
4179 ret->model->has_props = true;
4181 switch (type) {
4182 case CPU_MODEL_EXPANSION_TYPE_STATIC:
4183 /* Static expansion will be based on "base" only */
4184 base_name = "base";
4185 x86_cpu_to_dict(xc, props);
4186 break;
4187 case CPU_MODEL_EXPANSION_TYPE_FULL:
4188 /* As we don't return every single property, full expansion needs
4189 * to keep the original model name+props, and add extra
4190 * properties on top of that.
4192 base_name = model->name;
4193 x86_cpu_to_dict_full(xc, props);
4194 break;
4195 default:
4196 error_setg(&err, "Unsupported expansion type");
4197 goto out;
4200 x86_cpu_to_dict(xc, props);
4202 ret->model->name = g_strdup(base_name);
4204 out:
4205 object_unref(OBJECT(xc));
4206 if (err) {
4207 error_propagate(errp, err);
4208 qapi_free_CpuModelExpansionInfo(ret);
4209 ret = NULL;
4211 return ret;
4213 #endif /* !CONFIG_USER_ONLY */
4215 static gchar *x86_gdb_arch_name(CPUState *cs)
4217 #ifdef TARGET_X86_64
4218 return g_strdup("i386:x86-64");
4219 #else
4220 return g_strdup("i386");
4221 #endif
4224 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
4226 X86CPUDefinition *cpudef = data;
4227 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4229 xcc->cpu_def = cpudef;
4230 xcc->migration_safe = true;
4233 static void x86_register_cpudef_type(X86CPUDefinition *def)
4235 char *typename = x86_cpu_type_name(def->name);
4236 TypeInfo ti = {
4237 .name = typename,
4238 .parent = TYPE_X86_CPU,
4239 .class_init = x86_cpu_cpudef_class_init,
4240 .class_data = def,
4243 /* AMD aliases are handled at runtime based on CPUID vendor, so
4244 * they shouldn't be set on the CPU model table.
4246 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
4247 /* catch mistakes instead of silently truncating model_id when too long */
4248 assert(def->model_id && strlen(def->model_id) <= 48);
4251 type_register(&ti);
4252 g_free(typename);
4255 #if !defined(CONFIG_USER_ONLY)
4257 void cpu_clear_apic_feature(CPUX86State *env)
4259 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
4262 #endif /* !CONFIG_USER_ONLY */
4264 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
4265 uint32_t *eax, uint32_t *ebx,
4266 uint32_t *ecx, uint32_t *edx)
4268 X86CPU *cpu = env_archcpu(env);
4269 CPUState *cs = env_cpu(env);
4270 uint32_t die_offset;
4271 uint32_t limit;
4272 uint32_t signature[3];
4274 /* Calculate & apply limits for different index ranges */
4275 if (index >= 0xC0000000) {
4276 limit = env->cpuid_xlevel2;
4277 } else if (index >= 0x80000000) {
4278 limit = env->cpuid_xlevel;
4279 } else if (index >= 0x40000000) {
4280 limit = 0x40000001;
4281 } else {
4282 limit = env->cpuid_level;
4285 if (index > limit) {
4286 /* Intel documentation states that invalid EAX input will
4287 * return the same information as EAX=cpuid_level
4288 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4290 index = env->cpuid_level;
4293 switch(index) {
4294 case 0:
4295 *eax = env->cpuid_level;
4296 *ebx = env->cpuid_vendor1;
4297 *edx = env->cpuid_vendor2;
4298 *ecx = env->cpuid_vendor3;
4299 break;
4300 case 1:
4301 *eax = env->cpuid_version;
4302 *ebx = (cpu->apic_id << 24) |
4303 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4304 *ecx = env->features[FEAT_1_ECX];
4305 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4306 *ecx |= CPUID_EXT_OSXSAVE;
4308 *edx = env->features[FEAT_1_EDX];
4309 if (cs->nr_cores * cs->nr_threads > 1) {
4310 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4311 *edx |= CPUID_HT;
4313 break;
4314 case 2:
4315 /* cache info: needed for Pentium Pro compatibility */
4316 if (cpu->cache_info_passthrough) {
4317 host_cpuid(index, 0, eax, ebx, ecx, edx);
4318 break;
4320 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4321 *ebx = 0;
4322 if (!cpu->enable_l3_cache) {
4323 *ecx = 0;
4324 } else {
4325 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4327 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4328 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4329 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4330 break;
4331 case 4:
4332 /* cache info: needed for Core compatibility */
4333 if (cpu->cache_info_passthrough) {
4334 host_cpuid(index, count, eax, ebx, ecx, edx);
4335 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4336 *eax &= ~0xFC000000;
4337 if ((*eax & 31) && cs->nr_cores > 1) {
4338 *eax |= (cs->nr_cores - 1) << 26;
4340 } else {
4341 *eax = 0;
4342 switch (count) {
4343 case 0: /* L1 dcache info */
4344 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4345 1, cs->nr_cores,
4346 eax, ebx, ecx, edx);
4347 break;
4348 case 1: /* L1 icache info */
4349 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4350 1, cs->nr_cores,
4351 eax, ebx, ecx, edx);
4352 break;
4353 case 2: /* L2 cache info */
4354 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4355 cs->nr_threads, cs->nr_cores,
4356 eax, ebx, ecx, edx);
4357 break;
4358 case 3: /* L3 cache info */
4359 die_offset = apicid_die_offset(env->nr_dies,
4360 cs->nr_cores, cs->nr_threads);
4361 if (cpu->enable_l3_cache) {
4362 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4363 (1 << die_offset), cs->nr_cores,
4364 eax, ebx, ecx, edx);
4365 break;
4367 /* fall through */
4368 default: /* end of info */
4369 *eax = *ebx = *ecx = *edx = 0;
4370 break;
4373 break;
4374 case 5:
4375 /* MONITOR/MWAIT Leaf */
4376 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4377 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4378 *ecx = cpu->mwait.ecx; /* flags */
4379 *edx = cpu->mwait.edx; /* mwait substates */
4380 break;
4381 case 6:
4382 /* Thermal and Power Leaf */
4383 *eax = env->features[FEAT_6_EAX];
4384 *ebx = 0;
4385 *ecx = 0;
4386 *edx = 0;
4387 break;
4388 case 7:
4389 /* Structured Extended Feature Flags Enumeration Leaf */
4390 if (count == 0) {
4391 *eax = 0; /* Maximum ECX value for sub-leaves */
4392 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4393 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4394 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4395 *ecx |= CPUID_7_0_ECX_OSPKE;
4397 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4398 } else {
4399 *eax = 0;
4400 *ebx = 0;
4401 *ecx = 0;
4402 *edx = 0;
4404 break;
4405 case 9:
4406 /* Direct Cache Access Information Leaf */
4407 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4408 *ebx = 0;
4409 *ecx = 0;
4410 *edx = 0;
4411 break;
4412 case 0xA:
4413 /* Architectural Performance Monitoring Leaf */
4414 if (kvm_enabled() && cpu->enable_pmu) {
4415 KVMState *s = cs->kvm_state;
4417 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4418 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4419 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4420 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4421 } else if (hvf_enabled() && cpu->enable_pmu) {
4422 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4423 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4424 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4425 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4426 } else {
4427 *eax = 0;
4428 *ebx = 0;
4429 *ecx = 0;
4430 *edx = 0;
4432 break;
4433 case 0xB:
4434 /* Extended Topology Enumeration Leaf */
4435 if (!cpu->enable_cpuid_0xb) {
4436 *eax = *ebx = *ecx = *edx = 0;
4437 break;
4440 *ecx = count & 0xff;
4441 *edx = cpu->apic_id;
4443 switch (count) {
4444 case 0:
4445 *eax = apicid_core_offset(env->nr_dies,
4446 cs->nr_cores, cs->nr_threads);
4447 *ebx = cs->nr_threads;
4448 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4449 break;
4450 case 1:
4451 *eax = apicid_pkg_offset(env->nr_dies,
4452 cs->nr_cores, cs->nr_threads);
4453 *ebx = cs->nr_cores * cs->nr_threads;
4454 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4455 break;
4456 default:
4457 *eax = 0;
4458 *ebx = 0;
4459 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4462 assert(!(*eax & ~0x1f));
4463 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4464 break;
4465 case 0xD: {
4466 /* Processor Extended State */
4467 *eax = 0;
4468 *ebx = 0;
4469 *ecx = 0;
4470 *edx = 0;
4471 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4472 break;
4475 if (count == 0) {
4476 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4477 *eax = env->features[FEAT_XSAVE_COMP_LO];
4478 *edx = env->features[FEAT_XSAVE_COMP_HI];
4479 *ebx = xsave_area_size(env->xcr0);
4480 } else if (count == 1) {
4481 *eax = env->features[FEAT_XSAVE];
4482 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4483 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4484 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4485 *eax = esa->size;
4486 *ebx = esa->offset;
4489 break;
4491 case 0x14: {
4492 /* Intel Processor Trace Enumeration */
4493 *eax = 0;
4494 *ebx = 0;
4495 *ecx = 0;
4496 *edx = 0;
4497 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4498 !kvm_enabled()) {
4499 break;
4502 if (count == 0) {
4503 *eax = INTEL_PT_MAX_SUBLEAF;
4504 *ebx = INTEL_PT_MINIMAL_EBX;
4505 *ecx = INTEL_PT_MINIMAL_ECX;
4506 } else if (count == 1) {
4507 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4508 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4510 break;
4512 case 0x40000000:
4514 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4515 * set here, but we restrict to TCG none the less.
4517 if (tcg_enabled() && cpu->expose_tcg) {
4518 memcpy(signature, "TCGTCGTCGTCG", 12);
4519 *eax = 0x40000001;
4520 *ebx = signature[0];
4521 *ecx = signature[1];
4522 *edx = signature[2];
4523 } else {
4524 *eax = 0;
4525 *ebx = 0;
4526 *ecx = 0;
4527 *edx = 0;
4529 break;
4530 case 0x40000001:
4531 *eax = 0;
4532 *ebx = 0;
4533 *ecx = 0;
4534 *edx = 0;
4535 break;
4536 case 0x80000000:
4537 *eax = env->cpuid_xlevel;
4538 *ebx = env->cpuid_vendor1;
4539 *edx = env->cpuid_vendor2;
4540 *ecx = env->cpuid_vendor3;
4541 break;
4542 case 0x80000001:
4543 *eax = env->cpuid_version;
4544 *ebx = 0;
4545 *ecx = env->features[FEAT_8000_0001_ECX];
4546 *edx = env->features[FEAT_8000_0001_EDX];
4548 /* The Linux kernel checks for the CMPLegacy bit and
4549 * discards multiple thread information if it is set.
4550 * So don't set it here for Intel to make Linux guests happy.
4552 if (cs->nr_cores * cs->nr_threads > 1) {
4553 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4554 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4555 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4556 *ecx |= 1 << 1; /* CmpLegacy bit */
4559 break;
4560 case 0x80000002:
4561 case 0x80000003:
4562 case 0x80000004:
4563 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4564 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4565 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4566 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4567 break;
4568 case 0x80000005:
4569 /* cache info (L1 cache) */
4570 if (cpu->cache_info_passthrough) {
4571 host_cpuid(index, 0, eax, ebx, ecx, edx);
4572 break;
4574 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4575 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4576 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4577 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4578 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4579 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4580 break;
4581 case 0x80000006:
4582 /* cache info (L2 cache) */
4583 if (cpu->cache_info_passthrough) {
4584 host_cpuid(index, 0, eax, ebx, ecx, edx);
4585 break;
4587 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4588 (L2_DTLB_2M_ENTRIES << 16) | \
4589 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4590 (L2_ITLB_2M_ENTRIES);
4591 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4592 (L2_DTLB_4K_ENTRIES << 16) | \
4593 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4594 (L2_ITLB_4K_ENTRIES);
4595 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4596 cpu->enable_l3_cache ?
4597 env->cache_info_amd.l3_cache : NULL,
4598 ecx, edx);
4599 break;
4600 case 0x80000007:
4601 *eax = 0;
4602 *ebx = 0;
4603 *ecx = 0;
4604 *edx = env->features[FEAT_8000_0007_EDX];
4605 break;
4606 case 0x80000008:
4607 /* virtual & phys address size in low 2 bytes. */
4608 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4609 /* 64 bit processor */
4610 *eax = cpu->phys_bits; /* configurable physical bits */
4611 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4612 *eax |= 0x00003900; /* 57 bits virtual */
4613 } else {
4614 *eax |= 0x00003000; /* 48 bits virtual */
4616 } else {
4617 *eax = cpu->phys_bits;
4619 *ebx = env->features[FEAT_8000_0008_EBX];
4620 *ecx = 0;
4621 *edx = 0;
4622 if (cs->nr_cores * cs->nr_threads > 1) {
4623 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4625 break;
4626 case 0x8000000A:
4627 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4628 *eax = 0x00000001; /* SVM Revision */
4629 *ebx = 0x00000010; /* nr of ASIDs */
4630 *ecx = 0;
4631 *edx = env->features[FEAT_SVM]; /* optional features */
4632 } else {
4633 *eax = 0;
4634 *ebx = 0;
4635 *ecx = 0;
4636 *edx = 0;
4638 break;
4639 case 0x8000001D:
4640 *eax = 0;
4641 if (cpu->cache_info_passthrough) {
4642 host_cpuid(index, count, eax, ebx, ecx, edx);
4643 break;
4645 switch (count) {
4646 case 0: /* L1 dcache info */
4647 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4648 eax, ebx, ecx, edx);
4649 break;
4650 case 1: /* L1 icache info */
4651 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4652 eax, ebx, ecx, edx);
4653 break;
4654 case 2: /* L2 cache info */
4655 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4656 eax, ebx, ecx, edx);
4657 break;
4658 case 3: /* L3 cache info */
4659 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4660 eax, ebx, ecx, edx);
4661 break;
4662 default: /* end of info */
4663 *eax = *ebx = *ecx = *edx = 0;
4664 break;
4666 break;
4667 case 0x8000001E:
4668 assert(cpu->core_id <= 255);
4669 encode_topo_cpuid8000001e(cs, cpu,
4670 eax, ebx, ecx, edx);
4671 break;
4672 case 0xC0000000:
4673 *eax = env->cpuid_xlevel2;
4674 *ebx = 0;
4675 *ecx = 0;
4676 *edx = 0;
4677 break;
4678 case 0xC0000001:
4679 /* Support for VIA CPU's CPUID instruction */
4680 *eax = env->cpuid_version;
4681 *ebx = 0;
4682 *ecx = 0;
4683 *edx = env->features[FEAT_C000_0001_EDX];
4684 break;
4685 case 0xC0000002:
4686 case 0xC0000003:
4687 case 0xC0000004:
4688 /* Reserved for the future, and now filled with zero */
4689 *eax = 0;
4690 *ebx = 0;
4691 *ecx = 0;
4692 *edx = 0;
4693 break;
4694 case 0x8000001F:
4695 *eax = sev_enabled() ? 0x2 : 0;
4696 *ebx = sev_get_cbit_position();
4697 *ebx |= sev_get_reduced_phys_bits() << 6;
4698 *ecx = 0;
4699 *edx = 0;
4700 break;
4701 default:
4702 /* reserved values: zero */
4703 *eax = 0;
4704 *ebx = 0;
4705 *ecx = 0;
4706 *edx = 0;
4707 break;
4711 /* CPUClass::reset() */
4712 static void x86_cpu_reset(CPUState *s)
4714 X86CPU *cpu = X86_CPU(s);
4715 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4716 CPUX86State *env = &cpu->env;
4717 target_ulong cr4;
4718 uint64_t xcr0;
4719 int i;
4721 xcc->parent_reset(s);
4723 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4725 env->old_exception = -1;
4727 /* init to reset state */
4729 env->hflags2 |= HF2_GIF_MASK;
4731 cpu_x86_update_cr0(env, 0x60000010);
4732 env->a20_mask = ~0x0;
4733 env->smbase = 0x30000;
4734 env->msr_smi_count = 0;
4736 env->idt.limit = 0xffff;
4737 env->gdt.limit = 0xffff;
4738 env->ldt.limit = 0xffff;
4739 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4740 env->tr.limit = 0xffff;
4741 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4743 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4744 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4745 DESC_R_MASK | DESC_A_MASK);
4746 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4747 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4748 DESC_A_MASK);
4749 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4750 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4751 DESC_A_MASK);
4752 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4753 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4754 DESC_A_MASK);
4755 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4756 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4757 DESC_A_MASK);
4758 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4759 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4760 DESC_A_MASK);
4762 env->eip = 0xfff0;
4763 env->regs[R_EDX] = env->cpuid_version;
4765 env->eflags = 0x2;
4767 /* FPU init */
4768 for (i = 0; i < 8; i++) {
4769 env->fptags[i] = 1;
4771 cpu_set_fpuc(env, 0x37f);
4773 env->mxcsr = 0x1f80;
4774 /* All units are in INIT state. */
4775 env->xstate_bv = 0;
4777 env->pat = 0x0007040600070406ULL;
4778 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4779 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
4780 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
4783 memset(env->dr, 0, sizeof(env->dr));
4784 env->dr[6] = DR6_FIXED_1;
4785 env->dr[7] = DR7_FIXED_1;
4786 cpu_breakpoint_remove_all(s, BP_CPU);
4787 cpu_watchpoint_remove_all(s, BP_CPU);
4789 cr4 = 0;
4790 xcr0 = XSTATE_FP_MASK;
4792 #ifdef CONFIG_USER_ONLY
4793 /* Enable all the features for user-mode. */
4794 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4795 xcr0 |= XSTATE_SSE_MASK;
4797 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4798 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4799 if (env->features[esa->feature] & esa->bits) {
4800 xcr0 |= 1ull << i;
4804 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4805 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4807 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4808 cr4 |= CR4_FSGSBASE_MASK;
4810 #endif
4812 env->xcr0 = xcr0;
4813 cpu_x86_update_cr4(env, cr4);
4816 * SDM 11.11.5 requires:
4817 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4818 * - IA32_MTRR_PHYSMASKn.V = 0
4819 * All other bits are undefined. For simplification, zero it all.
4821 env->mtrr_deftype = 0;
4822 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4823 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4825 env->interrupt_injected = -1;
4826 env->exception_nr = -1;
4827 env->exception_pending = 0;
4828 env->exception_injected = 0;
4829 env->exception_has_payload = false;
4830 env->exception_payload = 0;
4831 env->nmi_injected = false;
4832 #if !defined(CONFIG_USER_ONLY)
4833 /* We hard-wire the BSP to the first CPU. */
4834 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4836 s->halted = !cpu_is_bsp(cpu);
4838 if (kvm_enabled()) {
4839 kvm_arch_reset_vcpu(cpu);
4841 else if (hvf_enabled()) {
4842 hvf_reset_vcpu(s);
4844 #endif
4847 #ifndef CONFIG_USER_ONLY
4848 bool cpu_is_bsp(X86CPU *cpu)
4850 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4853 /* TODO: remove me, when reset over QOM tree is implemented */
4854 static void x86_cpu_machine_reset_cb(void *opaque)
4856 X86CPU *cpu = opaque;
4857 cpu_reset(CPU(cpu));
4859 #endif
4861 static void mce_init(X86CPU *cpu)
4863 CPUX86State *cenv = &cpu->env;
4864 unsigned int bank;
4866 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4867 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4868 (CPUID_MCE | CPUID_MCA)) {
4869 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4870 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4871 cenv->mcg_ctl = ~(uint64_t)0;
4872 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4873 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4878 #ifndef CONFIG_USER_ONLY
4879 APICCommonClass *apic_get_class(void)
4881 const char *apic_type = "apic";
4883 /* TODO: in-kernel irqchip for hvf */
4884 if (kvm_apic_in_kernel()) {
4885 apic_type = "kvm-apic";
4886 } else if (xen_enabled()) {
4887 apic_type = "xen-apic";
4890 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4893 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4895 APICCommonState *apic;
4896 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4898 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4900 object_property_add_child(OBJECT(cpu), "lapic",
4901 OBJECT(cpu->apic_state), &error_abort);
4902 object_unref(OBJECT(cpu->apic_state));
4904 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4905 /* TODO: convert to link<> */
4906 apic = APIC_COMMON(cpu->apic_state);
4907 apic->cpu = cpu;
4908 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4911 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4913 APICCommonState *apic;
4914 static bool apic_mmio_map_once;
4916 if (cpu->apic_state == NULL) {
4917 return;
4919 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4920 errp);
4922 /* Map APIC MMIO area */
4923 apic = APIC_COMMON(cpu->apic_state);
4924 if (!apic_mmio_map_once) {
4925 memory_region_add_subregion_overlap(get_system_memory(),
4926 apic->apicbase &
4927 MSR_IA32_APICBASE_BASE,
4928 &apic->io_memory,
4929 0x1000);
4930 apic_mmio_map_once = true;
4934 static void x86_cpu_machine_done(Notifier *n, void *unused)
4936 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4937 MemoryRegion *smram =
4938 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4940 if (smram) {
4941 cpu->smram = g_new(MemoryRegion, 1);
4942 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4943 smram, 0, 1ull << 32);
4944 memory_region_set_enabled(cpu->smram, true);
4945 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4948 #else
4949 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4952 #endif
4954 /* Note: Only safe for use on x86(-64) hosts */
4955 static uint32_t x86_host_phys_bits(void)
4957 uint32_t eax;
4958 uint32_t host_phys_bits;
4960 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4961 if (eax >= 0x80000008) {
4962 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4963 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4964 * at 23:16 that can specify a maximum physical address bits for
4965 * the guest that can override this value; but I've not seen
4966 * anything with that set.
4968 host_phys_bits = eax & 0xff;
4969 } else {
4970 /* It's an odd 64 bit machine that doesn't have the leaf for
4971 * physical address bits; fall back to 36 that's most older
4972 * Intel.
4974 host_phys_bits = 36;
4977 return host_phys_bits;
4980 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4982 if (*min < value) {
4983 *min = value;
4987 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4988 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4990 CPUX86State *env = &cpu->env;
4991 FeatureWordInfo *fi = &feature_word_info[w];
4992 uint32_t eax = fi->cpuid.eax;
4993 uint32_t region = eax & 0xF0000000;
4995 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
4996 if (!env->features[w]) {
4997 return;
5000 switch (region) {
5001 case 0x00000000:
5002 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
5003 break;
5004 case 0x80000000:
5005 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
5006 break;
5007 case 0xC0000000:
5008 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
5009 break;
5013 /* Calculate XSAVE components based on the configured CPU feature flags */
5014 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
5016 CPUX86State *env = &cpu->env;
5017 int i;
5018 uint64_t mask;
5020 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
5021 return;
5024 mask = 0;
5025 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
5026 const ExtSaveArea *esa = &x86_ext_save_areas[i];
5027 if (env->features[esa->feature] & esa->bits) {
5028 mask |= (1ULL << i);
5032 env->features[FEAT_XSAVE_COMP_LO] = mask;
5033 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
5036 /***** Steps involved on loading and filtering CPUID data
5038 * When initializing and realizing a CPU object, the steps
5039 * involved in setting up CPUID data are:
5041 * 1) Loading CPU model definition (X86CPUDefinition). This is
5042 * implemented by x86_cpu_load_def() and should be completely
5043 * transparent, as it is done automatically by instance_init.
5044 * No code should need to look at X86CPUDefinition structs
5045 * outside instance_init.
5047 * 2) CPU expansion. This is done by realize before CPUID
5048 * filtering, and will make sure host/accelerator data is
5049 * loaded for CPU models that depend on host capabilities
5050 * (e.g. "host"). Done by x86_cpu_expand_features().
5052 * 3) CPUID filtering. This initializes extra data related to
5053 * CPUID, and checks if the host supports all capabilities
5054 * required by the CPU. Runnability of a CPU model is
5055 * determined at this step. Done by x86_cpu_filter_features().
5057 * Some operations don't require all steps to be performed.
5058 * More precisely:
5060 * - CPU instance creation (instance_init) will run only CPU
5061 * model loading. CPU expansion can't run at instance_init-time
5062 * because host/accelerator data may be not available yet.
5063 * - CPU realization will perform both CPU model expansion and CPUID
5064 * filtering, and return an error in case one of them fails.
5065 * - query-cpu-definitions needs to run all 3 steps. It needs
5066 * to run CPUID filtering, as the 'unavailable-features'
5067 * field is set based on the filtering results.
5068 * - The query-cpu-model-expansion QMP command only needs to run
5069 * CPU model loading and CPU expansion. It should not filter
5070 * any CPUID data based on host capabilities.
5073 /* Expand CPU configuration data, based on configured features
5074 * and host/accelerator capabilities when appropriate.
5076 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
5078 CPUX86State *env = &cpu->env;
5079 FeatureWord w;
5080 GList *l;
5081 Error *local_err = NULL;
5083 /*TODO: Now cpu->max_features doesn't overwrite features
5084 * set using QOM properties, and we can convert
5085 * plus_features & minus_features to global properties
5086 * inside x86_cpu_parse_featurestr() too.
5088 if (cpu->max_features) {
5089 for (w = 0; w < FEATURE_WORDS; w++) {
5090 /* Override only features that weren't set explicitly
5091 * by the user.
5093 env->features[w] |=
5094 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
5095 ~env->user_features[w] & \
5096 ~feature_word_info[w].no_autoenable_flags;
5100 for (l = plus_features; l; l = l->next) {
5101 const char *prop = l->data;
5102 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
5103 if (local_err) {
5104 goto out;
5108 for (l = minus_features; l; l = l->next) {
5109 const char *prop = l->data;
5110 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
5111 if (local_err) {
5112 goto out;
5116 if (!kvm_enabled() || !cpu->expose_kvm) {
5117 env->features[FEAT_KVM] = 0;
5120 x86_cpu_enable_xsave_components(cpu);
5122 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5123 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
5124 if (cpu->full_cpuid_auto_level) {
5125 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
5126 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
5127 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
5128 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
5129 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
5130 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
5131 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
5132 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
5133 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
5134 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
5135 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
5137 /* Intel Processor Trace requires CPUID[0x14] */
5138 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5139 kvm_enabled() && cpu->intel_pt_auto_level) {
5140 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
5143 /* SVM requires CPUID[0x8000000A] */
5144 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5145 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
5148 /* SEV requires CPUID[0x8000001F] */
5149 if (sev_enabled()) {
5150 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
5154 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5155 if (env->cpuid_level == UINT32_MAX) {
5156 env->cpuid_level = env->cpuid_min_level;
5158 if (env->cpuid_xlevel == UINT32_MAX) {
5159 env->cpuid_xlevel = env->cpuid_min_xlevel;
5161 if (env->cpuid_xlevel2 == UINT32_MAX) {
5162 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
5165 out:
5166 if (local_err != NULL) {
5167 error_propagate(errp, local_err);
5172 * Finishes initialization of CPUID data, filters CPU feature
5173 * words based on host availability of each feature.
5175 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5177 static int x86_cpu_filter_features(X86CPU *cpu)
5179 CPUX86State *env = &cpu->env;
5180 FeatureWord w;
5181 int rv = 0;
5183 for (w = 0; w < FEATURE_WORDS; w++) {
5184 uint32_t host_feat =
5185 x86_cpu_get_supported_feature_word(w, false);
5186 uint32_t requested_features = env->features[w];
5187 env->features[w] &= host_feat;
5188 cpu->filtered_features[w] = requested_features & ~env->features[w];
5189 if (cpu->filtered_features[w]) {
5190 rv = 1;
5194 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5195 kvm_enabled()) {
5196 KVMState *s = CPU(cpu)->kvm_state;
5197 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
5198 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
5199 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
5200 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
5201 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
5203 if (!eax_0 ||
5204 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
5205 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
5206 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
5207 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
5208 INTEL_PT_ADDR_RANGES_NUM) ||
5209 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
5210 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
5211 (ecx_0 & INTEL_PT_IP_LIP)) {
5213 * Processor Trace capabilities aren't configurable, so if the
5214 * host can't emulate the capabilities we report on
5215 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5217 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
5218 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
5219 rv = 1;
5223 return rv;
5226 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
5228 CPUState *cs = CPU(dev);
5229 X86CPU *cpu = X86_CPU(dev);
5230 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5231 CPUX86State *env = &cpu->env;
5232 Error *local_err = NULL;
5233 static bool ht_warned;
5235 if (xcc->host_cpuid_required) {
5236 if (!accel_uses_host_cpuid()) {
5237 char *name = x86_cpu_class_get_model_name(xcc);
5238 error_setg(&local_err, "CPU model '%s' requires KVM", name);
5239 g_free(name);
5240 goto out;
5243 if (enable_cpu_pm) {
5244 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
5245 &cpu->mwait.ecx, &cpu->mwait.edx);
5246 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
5250 /* mwait extended info: needed for Core compatibility */
5251 /* We always wake on interrupt even if host does not have the capability */
5252 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
5254 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
5255 error_setg(errp, "apic-id property was not initialized properly");
5256 return;
5259 x86_cpu_expand_features(cpu, &local_err);
5260 if (local_err) {
5261 goto out;
5264 if (x86_cpu_filter_features(cpu) &&
5265 (cpu->check_cpuid || cpu->enforce_cpuid)) {
5266 x86_cpu_report_filtered_features(cpu);
5267 if (cpu->enforce_cpuid) {
5268 error_setg(&local_err,
5269 accel_uses_host_cpuid() ?
5270 "Host doesn't support requested features" :
5271 "TCG doesn't support requested features");
5272 goto out;
5276 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5277 * CPUID[1].EDX.
5279 if (IS_AMD_CPU(env)) {
5280 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
5281 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
5282 & CPUID_EXT2_AMD_ALIASES);
5285 /* For 64bit systems think about the number of physical bits to present.
5286 * ideally this should be the same as the host; anything other than matching
5287 * the host can cause incorrect guest behaviour.
5288 * QEMU used to pick the magic value of 40 bits that corresponds to
5289 * consumer AMD devices but nothing else.
5291 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5292 if (accel_uses_host_cpuid()) {
5293 uint32_t host_phys_bits = x86_host_phys_bits();
5294 static bool warned;
5296 /* Print a warning if the user set it to a value that's not the
5297 * host value.
5299 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
5300 !warned) {
5301 warn_report("Host physical bits (%u)"
5302 " does not match phys-bits property (%u)",
5303 host_phys_bits, cpu->phys_bits);
5304 warned = true;
5307 if (cpu->host_phys_bits) {
5308 /* The user asked for us to use the host physical bits */
5309 cpu->phys_bits = host_phys_bits;
5310 if (cpu->host_phys_bits_limit &&
5311 cpu->phys_bits > cpu->host_phys_bits_limit) {
5312 cpu->phys_bits = cpu->host_phys_bits_limit;
5316 if (cpu->phys_bits &&
5317 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5318 cpu->phys_bits < 32)) {
5319 error_setg(errp, "phys-bits should be between 32 and %u "
5320 " (but is %u)",
5321 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5322 return;
5324 } else {
5325 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5326 error_setg(errp, "TCG only supports phys-bits=%u",
5327 TCG_PHYS_ADDR_BITS);
5328 return;
5331 /* 0 means it was not explicitly set by the user (or by machine
5332 * compat_props or by the host code above). In this case, the default
5333 * is the value used by TCG (40).
5335 if (cpu->phys_bits == 0) {
5336 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5338 } else {
5339 /* For 32 bit systems don't use the user set value, but keep
5340 * phys_bits consistent with what we tell the guest.
5342 if (cpu->phys_bits != 0) {
5343 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5344 return;
5347 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5348 cpu->phys_bits = 36;
5349 } else {
5350 cpu->phys_bits = 32;
5354 /* Cache information initialization */
5355 if (!cpu->legacy_cache) {
5356 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
5357 char *name = x86_cpu_class_get_model_name(xcc);
5358 error_setg(errp,
5359 "CPU model '%s' doesn't support legacy-cache=off", name);
5360 g_free(name);
5361 return;
5363 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5364 *xcc->cpu_def->cache_info;
5365 } else {
5366 /* Build legacy cache information */
5367 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5368 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5369 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5370 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5372 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5373 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5374 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5375 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5377 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5378 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5379 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5380 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5384 cpu_exec_realizefn(cs, &local_err);
5385 if (local_err != NULL) {
5386 error_propagate(errp, local_err);
5387 return;
5390 #ifndef CONFIG_USER_ONLY
5391 MachineState *ms = MACHINE(qdev_get_machine());
5392 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5394 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
5395 x86_cpu_apic_create(cpu, &local_err);
5396 if (local_err != NULL) {
5397 goto out;
5400 #endif
5402 mce_init(cpu);
5404 #ifndef CONFIG_USER_ONLY
5405 if (tcg_enabled()) {
5406 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5407 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5409 /* Outer container... */
5410 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5411 memory_region_set_enabled(cpu->cpu_as_root, true);
5413 /* ... with two regions inside: normal system memory with low
5414 * priority, and...
5416 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5417 get_system_memory(), 0, ~0ull);
5418 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5419 memory_region_set_enabled(cpu->cpu_as_mem, true);
5421 cs->num_ases = 2;
5422 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5423 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5425 /* ... SMRAM with higher priority, linked from /machine/smram. */
5426 cpu->machine_done.notify = x86_cpu_machine_done;
5427 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5429 #endif
5431 qemu_init_vcpu(cs);
5434 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5435 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5436 * based on inputs (sockets,cores,threads), it is still better to give
5437 * users a warning.
5439 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5440 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5442 if (IS_AMD_CPU(env) &&
5443 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5444 cs->nr_threads > 1 && !ht_warned) {
5445 warn_report("This family of AMD CPU doesn't support "
5446 "hyperthreading(%d)",
5447 cs->nr_threads);
5448 error_printf("Please configure -smp options properly"
5449 " or try enabling topoext feature.\n");
5450 ht_warned = true;
5453 x86_cpu_apic_realize(cpu, &local_err);
5454 if (local_err != NULL) {
5455 goto out;
5457 cpu_reset(cs);
5459 xcc->parent_realize(dev, &local_err);
5461 out:
5462 if (local_err != NULL) {
5463 error_propagate(errp, local_err);
5464 return;
5468 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5470 X86CPU *cpu = X86_CPU(dev);
5471 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5472 Error *local_err = NULL;
5474 #ifndef CONFIG_USER_ONLY
5475 cpu_remove_sync(CPU(dev));
5476 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5477 #endif
5479 if (cpu->apic_state) {
5480 object_unparent(OBJECT(cpu->apic_state));
5481 cpu->apic_state = NULL;
5484 xcc->parent_unrealize(dev, &local_err);
5485 if (local_err != NULL) {
5486 error_propagate(errp, local_err);
5487 return;
5491 typedef struct BitProperty {
5492 FeatureWord w;
5493 uint32_t mask;
5494 } BitProperty;
5496 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5497 void *opaque, Error **errp)
5499 X86CPU *cpu = X86_CPU(obj);
5500 BitProperty *fp = opaque;
5501 uint32_t f = cpu->env.features[fp->w];
5502 bool value = (f & fp->mask) == fp->mask;
5503 visit_type_bool(v, name, &value, errp);
5506 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5507 void *opaque, Error **errp)
5509 DeviceState *dev = DEVICE(obj);
5510 X86CPU *cpu = X86_CPU(obj);
5511 BitProperty *fp = opaque;
5512 Error *local_err = NULL;
5513 bool value;
5515 if (dev->realized) {
5516 qdev_prop_set_after_realize(dev, name, errp);
5517 return;
5520 visit_type_bool(v, name, &value, &local_err);
5521 if (local_err) {
5522 error_propagate(errp, local_err);
5523 return;
5526 if (value) {
5527 cpu->env.features[fp->w] |= fp->mask;
5528 } else {
5529 cpu->env.features[fp->w] &= ~fp->mask;
5531 cpu->env.user_features[fp->w] |= fp->mask;
5534 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5535 void *opaque)
5537 BitProperty *prop = opaque;
5538 g_free(prop);
5541 /* Register a boolean property to get/set a single bit in a uint32_t field.
5543 * The same property name can be registered multiple times to make it affect
5544 * multiple bits in the same FeatureWord. In that case, the getter will return
5545 * true only if all bits are set.
5547 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5548 const char *prop_name,
5549 FeatureWord w,
5550 int bitnr)
5552 BitProperty *fp;
5553 ObjectProperty *op;
5554 uint32_t mask = (1UL << bitnr);
5556 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5557 if (op) {
5558 fp = op->opaque;
5559 assert(fp->w == w);
5560 fp->mask |= mask;
5561 } else {
5562 fp = g_new0(BitProperty, 1);
5563 fp->w = w;
5564 fp->mask = mask;
5565 object_property_add(OBJECT(cpu), prop_name, "bool",
5566 x86_cpu_get_bit_prop,
5567 x86_cpu_set_bit_prop,
5568 x86_cpu_release_bit_prop, fp, &error_abort);
5572 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5573 FeatureWord w,
5574 int bitnr)
5576 FeatureWordInfo *fi = &feature_word_info[w];
5577 const char *name = fi->feat_names[bitnr];
5579 if (!name) {
5580 return;
5583 /* Property names should use "-" instead of "_".
5584 * Old names containing underscores are registered as aliases
5585 * using object_property_add_alias()
5587 assert(!strchr(name, '_'));
5588 /* aliases don't use "|" delimiters anymore, they are registered
5589 * manually using object_property_add_alias() */
5590 assert(!strchr(name, '|'));
5591 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5594 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5596 X86CPU *cpu = X86_CPU(cs);
5597 CPUX86State *env = &cpu->env;
5598 GuestPanicInformation *panic_info = NULL;
5600 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5601 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5603 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5605 assert(HV_CRASH_PARAMS >= 5);
5606 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5607 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5608 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5609 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5610 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5613 return panic_info;
5615 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5616 const char *name, void *opaque,
5617 Error **errp)
5619 CPUState *cs = CPU(obj);
5620 GuestPanicInformation *panic_info;
5622 if (!cs->crash_occurred) {
5623 error_setg(errp, "No crash occured");
5624 return;
5627 panic_info = x86_cpu_get_crash_info(cs);
5628 if (panic_info == NULL) {
5629 error_setg(errp, "No crash information");
5630 return;
5633 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5634 errp);
5635 qapi_free_GuestPanicInformation(panic_info);
5638 static void x86_cpu_initfn(Object *obj)
5640 X86CPU *cpu = X86_CPU(obj);
5641 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5642 CPUX86State *env = &cpu->env;
5643 FeatureWord w;
5645 env->nr_dies = 1;
5646 cpu_set_cpustate_pointers(cpu);
5648 object_property_add(obj, "family", "int",
5649 x86_cpuid_version_get_family,
5650 x86_cpuid_version_set_family, NULL, NULL, NULL);
5651 object_property_add(obj, "model", "int",
5652 x86_cpuid_version_get_model,
5653 x86_cpuid_version_set_model, NULL, NULL, NULL);
5654 object_property_add(obj, "stepping", "int",
5655 x86_cpuid_version_get_stepping,
5656 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5657 object_property_add_str(obj, "vendor",
5658 x86_cpuid_get_vendor,
5659 x86_cpuid_set_vendor, NULL);
5660 object_property_add_str(obj, "model-id",
5661 x86_cpuid_get_model_id,
5662 x86_cpuid_set_model_id, NULL);
5663 object_property_add(obj, "tsc-frequency", "int",
5664 x86_cpuid_get_tsc_freq,
5665 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5666 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5667 x86_cpu_get_feature_words,
5668 NULL, NULL, (void *)env->features, NULL);
5669 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5670 x86_cpu_get_feature_words,
5671 NULL, NULL, (void *)cpu->filtered_features, NULL);
5673 * The "unavailable-features" property has the same semantics as
5674 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
5675 * QMP command: they list the features that would have prevented the
5676 * CPU from running if the "enforce" flag was set.
5678 object_property_add(obj, "unavailable-features", "strList",
5679 x86_cpu_get_unavailable_features,
5680 NULL, NULL, NULL, &error_abort);
5682 object_property_add(obj, "crash-information", "GuestPanicInformation",
5683 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5685 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5687 for (w = 0; w < FEATURE_WORDS; w++) {
5688 int bitnr;
5690 for (bitnr = 0; bitnr < 32; bitnr++) {
5691 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5695 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5696 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5697 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5698 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5699 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5700 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5701 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5703 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5704 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5705 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5706 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5707 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5708 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5709 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5710 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5711 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5712 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5713 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5714 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5715 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5716 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5717 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5718 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5719 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5720 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5721 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5722 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5723 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5725 if (xcc->cpu_def) {
5726 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5730 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5732 X86CPU *cpu = X86_CPU(cs);
5734 return cpu->apic_id;
5737 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5739 X86CPU *cpu = X86_CPU(cs);
5741 return cpu->env.cr[0] & CR0_PG_MASK;
5744 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5746 X86CPU *cpu = X86_CPU(cs);
5748 cpu->env.eip = value;
5751 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5753 X86CPU *cpu = X86_CPU(cs);
5755 cpu->env.eip = tb->pc - tb->cs_base;
5758 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
5760 X86CPU *cpu = X86_CPU(cs);
5761 CPUX86State *env = &cpu->env;
5763 #if !defined(CONFIG_USER_ONLY)
5764 if (interrupt_request & CPU_INTERRUPT_POLL) {
5765 return CPU_INTERRUPT_POLL;
5767 #endif
5768 if (interrupt_request & CPU_INTERRUPT_SIPI) {
5769 return CPU_INTERRUPT_SIPI;
5772 if (env->hflags2 & HF2_GIF_MASK) {
5773 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
5774 !(env->hflags & HF_SMM_MASK)) {
5775 return CPU_INTERRUPT_SMI;
5776 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
5777 !(env->hflags2 & HF2_NMI_MASK)) {
5778 return CPU_INTERRUPT_NMI;
5779 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
5780 return CPU_INTERRUPT_MCE;
5781 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
5782 (((env->hflags2 & HF2_VINTR_MASK) &&
5783 (env->hflags2 & HF2_HIF_MASK)) ||
5784 (!(env->hflags2 & HF2_VINTR_MASK) &&
5785 (env->eflags & IF_MASK &&
5786 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
5787 return CPU_INTERRUPT_HARD;
5788 #if !defined(CONFIG_USER_ONLY)
5789 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
5790 (env->eflags & IF_MASK) &&
5791 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
5792 return CPU_INTERRUPT_VIRQ;
5793 #endif
5797 return 0;
5800 static bool x86_cpu_has_work(CPUState *cs)
5802 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
5805 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5807 X86CPU *cpu = X86_CPU(cs);
5808 CPUX86State *env = &cpu->env;
5810 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5811 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5812 : bfd_mach_i386_i8086);
5813 info->print_insn = print_insn_i386;
5815 info->cap_arch = CS_ARCH_X86;
5816 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5817 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5818 : CS_MODE_16);
5819 info->cap_insn_unit = 1;
5820 info->cap_insn_split = 8;
5823 void x86_update_hflags(CPUX86State *env)
5825 uint32_t hflags;
5826 #define HFLAG_COPY_MASK \
5827 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5828 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5829 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5830 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5832 hflags = env->hflags & HFLAG_COPY_MASK;
5833 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5834 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5835 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5836 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5837 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5839 if (env->cr[4] & CR4_OSFXSR_MASK) {
5840 hflags |= HF_OSFXSR_MASK;
5843 if (env->efer & MSR_EFER_LMA) {
5844 hflags |= HF_LMA_MASK;
5847 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5848 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5849 } else {
5850 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5851 (DESC_B_SHIFT - HF_CS32_SHIFT);
5852 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5853 (DESC_B_SHIFT - HF_SS32_SHIFT);
5854 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5855 !(hflags & HF_CS32_MASK)) {
5856 hflags |= HF_ADDSEG_MASK;
5857 } else {
5858 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5859 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5862 env->hflags = hflags;
5865 static Property x86_cpu_properties[] = {
5866 #ifdef CONFIG_USER_ONLY
5867 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5868 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5869 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5870 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5871 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
5872 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5873 #else
5874 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5875 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5876 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5877 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
5878 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5879 #endif
5880 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5881 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5883 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5884 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
5885 HYPERV_FEAT_RELAXED, 0),
5886 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
5887 HYPERV_FEAT_VAPIC, 0),
5888 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
5889 HYPERV_FEAT_TIME, 0),
5890 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
5891 HYPERV_FEAT_CRASH, 0),
5892 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
5893 HYPERV_FEAT_RESET, 0),
5894 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
5895 HYPERV_FEAT_VPINDEX, 0),
5896 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
5897 HYPERV_FEAT_RUNTIME, 0),
5898 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
5899 HYPERV_FEAT_SYNIC, 0),
5900 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
5901 HYPERV_FEAT_STIMER, 0),
5902 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
5903 HYPERV_FEAT_FREQUENCIES, 0),
5904 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
5905 HYPERV_FEAT_REENLIGHTENMENT, 0),
5906 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
5907 HYPERV_FEAT_TLBFLUSH, 0),
5908 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
5909 HYPERV_FEAT_EVMCS, 0),
5910 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
5911 HYPERV_FEAT_IPI, 0),
5912 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
5913 HYPERV_FEAT_STIMER_DIRECT, 0),
5914 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
5916 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5917 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5918 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5919 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5920 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5921 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
5922 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5923 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5924 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5925 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5926 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5927 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5928 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5929 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5930 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5931 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5932 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5933 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5934 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5935 false),
5936 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5937 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5938 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
5939 true),
5941 * lecacy_cache defaults to true unless the CPU model provides its
5942 * own cache information (see x86_cpu_load_def()).
5944 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5947 * From "Requirements for Implementing the Microsoft
5948 * Hypervisor Interface":
5949 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5951 * "Starting with Windows Server 2012 and Windows 8, if
5952 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5953 * the hypervisor imposes no specific limit to the number of VPs.
5954 * In this case, Windows Server 2012 guest VMs may use more than
5955 * 64 VPs, up to the maximum supported number of processors applicable
5956 * to the specific Windows version being used."
5958 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5959 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
5960 false),
5961 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
5962 true),
5963 DEFINE_PROP_END_OF_LIST()
5966 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5968 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5969 CPUClass *cc = CPU_CLASS(oc);
5970 DeviceClass *dc = DEVICE_CLASS(oc);
5972 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5973 &xcc->parent_realize);
5974 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5975 &xcc->parent_unrealize);
5976 dc->props = x86_cpu_properties;
5978 xcc->parent_reset = cc->reset;
5979 cc->reset = x86_cpu_reset;
5980 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5982 cc->class_by_name = x86_cpu_class_by_name;
5983 cc->parse_features = x86_cpu_parse_featurestr;
5984 cc->has_work = x86_cpu_has_work;
5985 #ifdef CONFIG_TCG
5986 cc->do_interrupt = x86_cpu_do_interrupt;
5987 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5988 #endif
5989 cc->dump_state = x86_cpu_dump_state;
5990 cc->get_crash_info = x86_cpu_get_crash_info;
5991 cc->set_pc = x86_cpu_set_pc;
5992 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5993 cc->gdb_read_register = x86_cpu_gdb_read_register;
5994 cc->gdb_write_register = x86_cpu_gdb_write_register;
5995 cc->get_arch_id = x86_cpu_get_arch_id;
5996 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5997 #ifndef CONFIG_USER_ONLY
5998 cc->asidx_from_attrs = x86_asidx_from_attrs;
5999 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
6000 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
6001 cc->write_elf64_note = x86_cpu_write_elf64_note;
6002 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
6003 cc->write_elf32_note = x86_cpu_write_elf32_note;
6004 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
6005 cc->vmsd = &vmstate_x86_cpu;
6006 #endif
6007 cc->gdb_arch_name = x86_gdb_arch_name;
6008 #ifdef TARGET_X86_64
6009 cc->gdb_core_xml_file = "i386-64bit.xml";
6010 cc->gdb_num_core_regs = 66;
6011 #else
6012 cc->gdb_core_xml_file = "i386-32bit.xml";
6013 cc->gdb_num_core_regs = 50;
6014 #endif
6015 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
6016 cc->debug_excp_handler = breakpoint_handler;
6017 #endif
6018 cc->cpu_exec_enter = x86_cpu_exec_enter;
6019 cc->cpu_exec_exit = x86_cpu_exec_exit;
6020 #ifdef CONFIG_TCG
6021 cc->tcg_initialize = tcg_x86_init;
6022 cc->tlb_fill = x86_cpu_tlb_fill;
6023 #endif
6024 cc->disas_set_info = x86_disas_set_info;
6026 dc->user_creatable = true;
6029 static const TypeInfo x86_cpu_type_info = {
6030 .name = TYPE_X86_CPU,
6031 .parent = TYPE_CPU,
6032 .instance_size = sizeof(X86CPU),
6033 .instance_init = x86_cpu_initfn,
6034 .abstract = true,
6035 .class_size = sizeof(X86CPUClass),
6036 .class_init = x86_cpu_common_class_init,
6040 /* "base" CPU model, used by query-cpu-model-expansion */
6041 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
6043 X86CPUClass *xcc = X86_CPU_CLASS(oc);
6045 xcc->static_model = true;
6046 xcc->migration_safe = true;
6047 xcc->model_description = "base CPU model type with no features enabled";
6048 xcc->ordering = 8;
6051 static const TypeInfo x86_base_cpu_type_info = {
6052 .name = X86_CPU_TYPE_NAME("base"),
6053 .parent = TYPE_X86_CPU,
6054 .class_init = x86_cpu_base_class_init,
6057 static void x86_cpu_register_types(void)
6059 int i;
6061 type_register_static(&x86_cpu_type_info);
6062 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
6063 x86_register_cpudef_type(&builtin_x86_defs[i]);
6065 type_register_static(&max_x86_cpu_type_info);
6066 type_register_static(&x86_base_cpu_type_info);
6067 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
6068 type_register_static(&host_x86_cpu_type_info);
6069 #endif
6072 type_init(x86_cpu_register_types)