cpus hw target: Use warn_report() & friends to report warnings
[qemu/ar7.git] / target / i386 / cpu.c
blob9d4217afba7e75c66b5a8af0962d66c213c9244f
1 /*
2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
25 #include "cpu.h"
26 #include "exec/exec-all.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/hvf.h"
29 #include "sysemu/cpus.h"
30 #include "kvm_i386.h"
31 #include "sev_i386.h"
33 #include "qemu/error-report.h"
34 #include "qemu/option.h"
35 #include "qemu/config-file.h"
36 #include "qapi/error.h"
37 #include "qapi/qapi-visit-misc.h"
38 #include "qapi/qapi-visit-run-state.h"
39 #include "qapi/qmp/qdict.h"
40 #include "qapi/qmp/qerror.h"
41 #include "qapi/visitor.h"
42 #include "qom/qom-qobject.h"
43 #include "sysemu/arch_init.h"
45 #include "standard-headers/asm-x86/kvm_para.h"
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
52 #include "hw/hw.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
55 #endif
57 #include "disas/capstone.h"
59 /* Helpers for building CPUID[2] descriptors: */
61 struct CPUID2CacheDescriptorInfo {
62 enum CacheType type;
63 int level;
64 int size;
65 int line_size;
66 int associativity;
70 * Known CPUID 2 cache descriptors.
71 * From Intel SDM Volume 2A, CPUID instruction
73 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
74 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
75 .associativity = 4, .line_size = 32, },
76 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
77 .associativity = 4, .line_size = 32, },
78 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
79 .associativity = 4, .line_size = 64, },
80 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
81 .associativity = 2, .line_size = 32, },
82 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
83 .associativity = 4, .line_size = 32, },
84 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
85 .associativity = 4, .line_size = 64, },
86 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
87 .associativity = 6, .line_size = 64, },
88 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
89 .associativity = 2, .line_size = 64, },
90 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
91 .associativity = 8, .line_size = 64, },
92 /* lines per sector is not supported cpuid2_cache_descriptor(),
93 * so descriptors 0x22, 0x23 are not included
95 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
96 .associativity = 16, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x25, 0x20 are not included
100 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
101 .associativity = 8, .line_size = 64, },
102 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
103 .associativity = 8, .line_size = 64, },
104 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
105 .associativity = 4, .line_size = 32, },
106 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
107 .associativity = 4, .line_size = 32, },
108 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
109 .associativity = 4, .line_size = 32, },
110 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
111 .associativity = 4, .line_size = 32, },
112 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
113 .associativity = 4, .line_size = 32, },
114 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
115 .associativity = 4, .line_size = 64, },
116 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
117 .associativity = 8, .line_size = 64, },
118 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
119 .associativity = 12, .line_size = 64, },
120 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
121 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
122 .associativity = 12, .line_size = 64, },
123 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
124 .associativity = 16, .line_size = 64, },
125 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
126 .associativity = 12, .line_size = 64, },
127 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
128 .associativity = 16, .line_size = 64, },
129 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
130 .associativity = 24, .line_size = 64, },
131 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
132 .associativity = 8, .line_size = 64, },
133 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
134 .associativity = 4, .line_size = 64, },
135 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
136 .associativity = 4, .line_size = 64, },
137 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
138 .associativity = 4, .line_size = 64, },
139 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
140 .associativity = 4, .line_size = 64, },
141 /* lines per sector is not supported cpuid2_cache_descriptor(),
142 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
144 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
145 .associativity = 8, .line_size = 64, },
146 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
147 .associativity = 2, .line_size = 64, },
148 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
149 .associativity = 8, .line_size = 64, },
150 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
151 .associativity = 8, .line_size = 32, },
152 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
153 .associativity = 8, .line_size = 32, },
154 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
155 .associativity = 8, .line_size = 32, },
156 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
157 .associativity = 8, .line_size = 32, },
158 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
159 .associativity = 4, .line_size = 64, },
160 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
161 .associativity = 8, .line_size = 64, },
162 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
163 .associativity = 4, .line_size = 64, },
164 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
165 .associativity = 4, .line_size = 64, },
166 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
167 .associativity = 4, .line_size = 64, },
168 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
169 .associativity = 8, .line_size = 64, },
170 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
171 .associativity = 8, .line_size = 64, },
172 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
173 .associativity = 8, .line_size = 64, },
174 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
175 .associativity = 12, .line_size = 64, },
176 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
177 .associativity = 12, .line_size = 64, },
178 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
179 .associativity = 12, .line_size = 64, },
180 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
181 .associativity = 16, .line_size = 64, },
182 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
183 .associativity = 16, .line_size = 64, },
184 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
185 .associativity = 16, .line_size = 64, },
186 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
187 .associativity = 24, .line_size = 64, },
188 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
189 .associativity = 24, .line_size = 64, },
190 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
191 .associativity = 24, .line_size = 64, },
195 * "CPUID leaf 2 does not report cache descriptor information,
196 * use CPUID leaf 4 to query cache parameters"
198 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
201 * Return a CPUID 2 cache descriptor for a given cache.
202 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
204 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
206 int i;
208 assert(cache->size > 0);
209 assert(cache->level > 0);
210 assert(cache->line_size > 0);
211 assert(cache->associativity > 0);
212 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
213 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
214 if (d->level == cache->level && d->type == cache->type &&
215 d->size == cache->size && d->line_size == cache->line_size &&
216 d->associativity == cache->associativity) {
217 return i;
221 return CACHE_DESCRIPTOR_UNAVAILABLE;
224 /* CPUID Leaf 4 constants: */
226 /* EAX: */
227 #define CACHE_TYPE_D 1
228 #define CACHE_TYPE_I 2
229 #define CACHE_TYPE_UNIFIED 3
231 #define CACHE_LEVEL(l) (l << 5)
233 #define CACHE_SELF_INIT_LEVEL (1 << 8)
235 /* EDX: */
236 #define CACHE_NO_INVD_SHARING (1 << 0)
237 #define CACHE_INCLUSIVE (1 << 1)
238 #define CACHE_COMPLEX_IDX (1 << 2)
240 /* Encode CacheType for CPUID[4].EAX */
241 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
242 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
243 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
244 0 /* Invalid value */)
247 /* Encode cache info for CPUID[4] */
248 static void encode_cache_cpuid4(CPUCacheInfo *cache,
249 int num_apic_ids, int num_cores,
250 uint32_t *eax, uint32_t *ebx,
251 uint32_t *ecx, uint32_t *edx)
253 assert(cache->size == cache->line_size * cache->associativity *
254 cache->partitions * cache->sets);
256 assert(num_apic_ids > 0);
257 *eax = CACHE_TYPE(cache->type) |
258 CACHE_LEVEL(cache->level) |
259 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
260 ((num_cores - 1) << 26) |
261 ((num_apic_ids - 1) << 14);
263 assert(cache->line_size > 0);
264 assert(cache->partitions > 0);
265 assert(cache->associativity > 0);
266 /* We don't implement fully-associative caches */
267 assert(cache->associativity < cache->sets);
268 *ebx = (cache->line_size - 1) |
269 ((cache->partitions - 1) << 12) |
270 ((cache->associativity - 1) << 22);
272 assert(cache->sets > 0);
273 *ecx = cache->sets - 1;
275 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
276 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
277 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
280 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
281 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
283 assert(cache->size % 1024 == 0);
284 assert(cache->lines_per_tag > 0);
285 assert(cache->associativity > 0);
286 assert(cache->line_size > 0);
287 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
288 (cache->lines_per_tag << 8) | (cache->line_size);
291 #define ASSOC_FULL 0xFF
293 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
294 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
295 a == 2 ? 0x2 : \
296 a == 4 ? 0x4 : \
297 a == 8 ? 0x6 : \
298 a == 16 ? 0x8 : \
299 a == 32 ? 0xA : \
300 a == 48 ? 0xB : \
301 a == 64 ? 0xC : \
302 a == 96 ? 0xD : \
303 a == 128 ? 0xE : \
304 a == ASSOC_FULL ? 0xF : \
305 0 /* invalid value */)
308 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
309 * @l3 can be NULL.
311 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
312 CPUCacheInfo *l3,
313 uint32_t *ecx, uint32_t *edx)
315 assert(l2->size % 1024 == 0);
316 assert(l2->associativity > 0);
317 assert(l2->lines_per_tag > 0);
318 assert(l2->line_size > 0);
319 *ecx = ((l2->size / 1024) << 16) |
320 (AMD_ENC_ASSOC(l2->associativity) << 12) |
321 (l2->lines_per_tag << 8) | (l2->line_size);
323 if (l3) {
324 assert(l3->size % (512 * 1024) == 0);
325 assert(l3->associativity > 0);
326 assert(l3->lines_per_tag > 0);
327 assert(l3->line_size > 0);
328 *edx = ((l3->size / (512 * 1024)) << 18) |
329 (AMD_ENC_ASSOC(l3->associativity) << 12) |
330 (l3->lines_per_tag << 8) | (l3->line_size);
331 } else {
332 *edx = 0;
337 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
338 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
339 * Define the constants to build the cpu topology. Right now, TOPOEXT
340 * feature is enabled only on EPYC. So, these constants are based on
341 * EPYC supported configurations. We may need to handle the cases if
342 * these values change in future.
344 /* Maximum core complexes in a node */
345 #define MAX_CCX 2
346 /* Maximum cores in a core complex */
347 #define MAX_CORES_IN_CCX 4
348 /* Maximum cores in a node */
349 #define MAX_CORES_IN_NODE 8
350 /* Maximum nodes in a socket */
351 #define MAX_NODES_PER_SOCKET 4
354 * Figure out the number of nodes required to build this config.
355 * Max cores in a node is 8
357 static int nodes_in_socket(int nr_cores)
359 int nodes;
361 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
363 /* Hardware does not support config with 3 nodes, return 4 in that case */
364 return (nodes == 3) ? 4 : nodes;
368 * Decide the number of cores in a core complex with the given nr_cores using
369 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
370 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
371 * L3 cache is shared across all cores in a core complex. So, this will also
372 * tell us how many cores are sharing the L3 cache.
374 static int cores_in_core_complex(int nr_cores)
376 int nodes;
378 /* Check if we can fit all the cores in one core complex */
379 if (nr_cores <= MAX_CORES_IN_CCX) {
380 return nr_cores;
382 /* Get the number of nodes required to build this config */
383 nodes = nodes_in_socket(nr_cores);
386 * Divide the cores accros all the core complexes
387 * Return rounded up value
389 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
392 /* Encode cache info for CPUID[8000001D] */
393 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
394 uint32_t *eax, uint32_t *ebx,
395 uint32_t *ecx, uint32_t *edx)
397 uint32_t l3_cores;
398 assert(cache->size == cache->line_size * cache->associativity *
399 cache->partitions * cache->sets);
401 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
402 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
404 /* L3 is shared among multiple cores */
405 if (cache->level == 3) {
406 l3_cores = cores_in_core_complex(cs->nr_cores);
407 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
408 } else {
409 *eax |= ((cs->nr_threads - 1) << 14);
412 assert(cache->line_size > 0);
413 assert(cache->partitions > 0);
414 assert(cache->associativity > 0);
415 /* We don't implement fully-associative caches */
416 assert(cache->associativity < cache->sets);
417 *ebx = (cache->line_size - 1) |
418 ((cache->partitions - 1) << 12) |
419 ((cache->associativity - 1) << 22);
421 assert(cache->sets > 0);
422 *ecx = cache->sets - 1;
424 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
425 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
426 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
429 /* Data structure to hold the configuration info for a given core index */
430 struct core_topology {
431 /* core complex id of the current core index */
432 int ccx_id;
434 * Adjusted core index for this core in the topology
435 * This can be 0,1,2,3 with max 4 cores in a core complex
437 int core_id;
438 /* Node id for this core index */
439 int node_id;
440 /* Number of nodes in this config */
441 int num_nodes;
445 * Build the configuration closely match the EPYC hardware. Using the EPYC
446 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
447 * right now. This could change in future.
448 * nr_cores : Total number of cores in the config
449 * core_id : Core index of the current CPU
450 * topo : Data structure to hold all the config info for this core index
452 static void build_core_topology(int nr_cores, int core_id,
453 struct core_topology *topo)
455 int nodes, cores_in_ccx;
457 /* First get the number of nodes required */
458 nodes = nodes_in_socket(nr_cores);
460 cores_in_ccx = cores_in_core_complex(nr_cores);
462 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
463 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
464 topo->core_id = core_id % cores_in_ccx;
465 topo->num_nodes = nodes;
468 /* Encode cache info for CPUID[8000001E] */
469 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
470 uint32_t *eax, uint32_t *ebx,
471 uint32_t *ecx, uint32_t *edx)
473 struct core_topology topo = {0};
474 unsigned long nodes;
475 int shift;
477 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
478 *eax = cpu->apic_id;
480 * CPUID_Fn8000001E_EBX
481 * 31:16 Reserved
482 * 15:8 Threads per core (The number of threads per core is
483 * Threads per core + 1)
484 * 7:0 Core id (see bit decoding below)
485 * SMT:
486 * 4:3 node id
487 * 2 Core complex id
488 * 1:0 Core id
489 * Non SMT:
490 * 5:4 node id
491 * 3 Core complex id
492 * 1:0 Core id
494 if (cs->nr_threads - 1) {
495 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
496 (topo.ccx_id << 2) | topo.core_id;
497 } else {
498 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
501 * CPUID_Fn8000001E_ECX
502 * 31:11 Reserved
503 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
504 * 7:0 Node id (see bit decoding below)
505 * 2 Socket id
506 * 1:0 Node id
508 if (topo.num_nodes <= 4) {
509 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
510 topo.node_id;
511 } else {
513 * Node id fix up. Actual hardware supports up to 4 nodes. But with
514 * more than 32 cores, we may end up with more than 4 nodes.
515 * Node id is a combination of socket id and node id. Only requirement
516 * here is that this number should be unique accross the system.
517 * Shift the socket id to accommodate more nodes. We dont expect both
518 * socket id and node id to be big number at the same time. This is not
519 * an ideal config but we need to to support it. Max nodes we can have
520 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
521 * 5 bits for nodes. Find the left most set bit to represent the total
522 * number of nodes. find_last_bit returns last set bit(0 based). Left
523 * shift(+1) the socket id to represent all the nodes.
525 nodes = topo.num_nodes - 1;
526 shift = find_last_bit(&nodes, 8);
527 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
528 topo.node_id;
530 *edx = 0;
534 * Definitions of the hardcoded cache entries we expose:
535 * These are legacy cache values. If there is a need to change any
536 * of these values please use builtin_x86_defs
539 /* L1 data cache: */
540 static CPUCacheInfo legacy_l1d_cache = {
541 .type = DATA_CACHE,
542 .level = 1,
543 .size = 32 * KiB,
544 .self_init = 1,
545 .line_size = 64,
546 .associativity = 8,
547 .sets = 64,
548 .partitions = 1,
549 .no_invd_sharing = true,
552 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
553 static CPUCacheInfo legacy_l1d_cache_amd = {
554 .type = DATA_CACHE,
555 .level = 1,
556 .size = 64 * KiB,
557 .self_init = 1,
558 .line_size = 64,
559 .associativity = 2,
560 .sets = 512,
561 .partitions = 1,
562 .lines_per_tag = 1,
563 .no_invd_sharing = true,
566 /* L1 instruction cache: */
567 static CPUCacheInfo legacy_l1i_cache = {
568 .type = INSTRUCTION_CACHE,
569 .level = 1,
570 .size = 32 * KiB,
571 .self_init = 1,
572 .line_size = 64,
573 .associativity = 8,
574 .sets = 64,
575 .partitions = 1,
576 .no_invd_sharing = true,
579 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
580 static CPUCacheInfo legacy_l1i_cache_amd = {
581 .type = INSTRUCTION_CACHE,
582 .level = 1,
583 .size = 64 * KiB,
584 .self_init = 1,
585 .line_size = 64,
586 .associativity = 2,
587 .sets = 512,
588 .partitions = 1,
589 .lines_per_tag = 1,
590 .no_invd_sharing = true,
593 /* Level 2 unified cache: */
594 static CPUCacheInfo legacy_l2_cache = {
595 .type = UNIFIED_CACHE,
596 .level = 2,
597 .size = 4 * MiB,
598 .self_init = 1,
599 .line_size = 64,
600 .associativity = 16,
601 .sets = 4096,
602 .partitions = 1,
603 .no_invd_sharing = true,
606 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
607 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
608 .type = UNIFIED_CACHE,
609 .level = 2,
610 .size = 2 * MiB,
611 .line_size = 64,
612 .associativity = 8,
616 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
617 static CPUCacheInfo legacy_l2_cache_amd = {
618 .type = UNIFIED_CACHE,
619 .level = 2,
620 .size = 512 * KiB,
621 .line_size = 64,
622 .lines_per_tag = 1,
623 .associativity = 16,
624 .sets = 512,
625 .partitions = 1,
628 /* Level 3 unified cache: */
629 static CPUCacheInfo legacy_l3_cache = {
630 .type = UNIFIED_CACHE,
631 .level = 3,
632 .size = 16 * MiB,
633 .line_size = 64,
634 .associativity = 16,
635 .sets = 16384,
636 .partitions = 1,
637 .lines_per_tag = 1,
638 .self_init = true,
639 .inclusive = true,
640 .complex_indexing = true,
643 /* TLB definitions: */
645 #define L1_DTLB_2M_ASSOC 1
646 #define L1_DTLB_2M_ENTRIES 255
647 #define L1_DTLB_4K_ASSOC 1
648 #define L1_DTLB_4K_ENTRIES 255
650 #define L1_ITLB_2M_ASSOC 1
651 #define L1_ITLB_2M_ENTRIES 255
652 #define L1_ITLB_4K_ASSOC 1
653 #define L1_ITLB_4K_ENTRIES 255
655 #define L2_DTLB_2M_ASSOC 0 /* disabled */
656 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
657 #define L2_DTLB_4K_ASSOC 4
658 #define L2_DTLB_4K_ENTRIES 512
660 #define L2_ITLB_2M_ASSOC 0 /* disabled */
661 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
662 #define L2_ITLB_4K_ASSOC 4
663 #define L2_ITLB_4K_ENTRIES 512
665 /* CPUID Leaf 0x14 constants: */
666 #define INTEL_PT_MAX_SUBLEAF 0x1
668 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
669 * MSR can be accessed;
670 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
671 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
672 * of Intel PT MSRs across warm reset;
673 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
675 #define INTEL_PT_MINIMAL_EBX 0xf
677 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
678 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
679 * accessed;
680 * bit[01]: ToPA tables can hold any number of output entries, up to the
681 * maximum allowed by the MaskOrTableOffset field of
682 * IA32_RTIT_OUTPUT_MASK_PTRS;
683 * bit[02]: Support Single-Range Output scheme;
685 #define INTEL_PT_MINIMAL_ECX 0x7
686 /* generated packets which contain IP payloads have LIP values */
687 #define INTEL_PT_IP_LIP (1 << 31)
688 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
689 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
690 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
691 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
692 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
694 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
695 uint32_t vendor2, uint32_t vendor3)
697 int i;
698 for (i = 0; i < 4; i++) {
699 dst[i] = vendor1 >> (8 * i);
700 dst[i + 4] = vendor2 >> (8 * i);
701 dst[i + 8] = vendor3 >> (8 * i);
703 dst[CPUID_VENDOR_SZ] = '\0';
706 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
707 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
708 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
709 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
710 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
711 CPUID_PSE36 | CPUID_FXSR)
712 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
713 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
714 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
715 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
716 CPUID_PAE | CPUID_SEP | CPUID_APIC)
718 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
719 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
720 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
721 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
722 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
723 /* partly implemented:
724 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
725 /* missing:
726 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
727 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
728 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
729 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
730 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
731 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
732 /* missing:
733 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
734 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
735 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
736 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
737 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
739 #ifdef TARGET_X86_64
740 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
741 #else
742 #define TCG_EXT2_X86_64_FEATURES 0
743 #endif
745 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
746 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
747 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
748 TCG_EXT2_X86_64_FEATURES)
749 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
750 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
751 #define TCG_EXT4_FEATURES 0
752 #define TCG_SVM_FEATURES CPUID_SVM_NPT
753 #define TCG_KVM_FEATURES 0
754 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
755 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
756 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
757 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
758 CPUID_7_0_EBX_ERMS)
759 /* missing:
760 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
761 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
762 CPUID_7_0_EBX_RDSEED */
763 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
764 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
765 CPUID_7_0_ECX_LA57)
766 #define TCG_7_0_EDX_FEATURES 0
767 #define TCG_APM_FEATURES 0
768 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
769 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
770 /* missing:
771 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
773 typedef struct FeatureWordInfo {
774 /* feature flags names are taken from "Intel Processor Identification and
775 * the CPUID Instruction" and AMD's "CPUID Specification".
776 * In cases of disagreement between feature naming conventions,
777 * aliases may be added.
779 const char *feat_names[32];
780 uint32_t cpuid_eax; /* Input EAX for CPUID */
781 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
782 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
783 int cpuid_reg; /* output register (R_* constant) */
784 uint32_t tcg_features; /* Feature flags supported by TCG */
785 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
786 uint32_t migratable_flags; /* Feature flags known to be migratable */
787 /* Features that shouldn't be auto-enabled by "-cpu host" */
788 uint32_t no_autoenable_flags;
789 } FeatureWordInfo;
791 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
792 [FEAT_1_EDX] = {
793 .feat_names = {
794 "fpu", "vme", "de", "pse",
795 "tsc", "msr", "pae", "mce",
796 "cx8", "apic", NULL, "sep",
797 "mtrr", "pge", "mca", "cmov",
798 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
799 NULL, "ds" /* Intel dts */, "acpi", "mmx",
800 "fxsr", "sse", "sse2", "ss",
801 "ht" /* Intel htt */, "tm", "ia64", "pbe",
803 .cpuid_eax = 1, .cpuid_reg = R_EDX,
804 .tcg_features = TCG_FEATURES,
806 [FEAT_1_ECX] = {
807 .feat_names = {
808 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
809 "ds-cpl", "vmx", "smx", "est",
810 "tm2", "ssse3", "cid", NULL,
811 "fma", "cx16", "xtpr", "pdcm",
812 NULL, "pcid", "dca", "sse4.1",
813 "sse4.2", "x2apic", "movbe", "popcnt",
814 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
815 "avx", "f16c", "rdrand", "hypervisor",
817 .cpuid_eax = 1, .cpuid_reg = R_ECX,
818 .tcg_features = TCG_EXT_FEATURES,
820 /* Feature names that are already defined on feature_name[] but
821 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
822 * names on feat_names below. They are copied automatically
823 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
825 [FEAT_8000_0001_EDX] = {
826 .feat_names = {
827 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
828 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
829 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
830 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
831 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
832 "nx", NULL, "mmxext", NULL /* mmx */,
833 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
834 NULL, "lm", "3dnowext", "3dnow",
836 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
837 .tcg_features = TCG_EXT2_FEATURES,
839 [FEAT_8000_0001_ECX] = {
840 .feat_names = {
841 "lahf-lm", "cmp-legacy", "svm", "extapic",
842 "cr8legacy", "abm", "sse4a", "misalignsse",
843 "3dnowprefetch", "osvw", "ibs", "xop",
844 "skinit", "wdt", NULL, "lwp",
845 "fma4", "tce", NULL, "nodeid-msr",
846 NULL, "tbm", "topoext", "perfctr-core",
847 "perfctr-nb", NULL, NULL, NULL,
848 NULL, NULL, NULL, NULL,
850 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
851 .tcg_features = TCG_EXT3_FEATURES,
853 * TOPOEXT is always allowed but can't be enabled blindly by
854 * "-cpu host", as it requires consistent cache topology info
855 * to be provided so it doesn't confuse guests.
857 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
859 [FEAT_C000_0001_EDX] = {
860 .feat_names = {
861 NULL, NULL, "xstore", "xstore-en",
862 NULL, NULL, "xcrypt", "xcrypt-en",
863 "ace2", "ace2-en", "phe", "phe-en",
864 "pmm", "pmm-en", NULL, NULL,
865 NULL, NULL, NULL, NULL,
866 NULL, NULL, NULL, NULL,
867 NULL, NULL, NULL, NULL,
868 NULL, NULL, NULL, NULL,
870 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
871 .tcg_features = TCG_EXT4_FEATURES,
873 [FEAT_KVM] = {
874 .feat_names = {
875 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
876 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
877 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
878 NULL, NULL, NULL, NULL,
879 NULL, NULL, NULL, NULL,
880 NULL, NULL, NULL, NULL,
881 "kvmclock-stable-bit", NULL, NULL, NULL,
882 NULL, NULL, NULL, NULL,
884 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
885 .tcg_features = TCG_KVM_FEATURES,
887 [FEAT_KVM_HINTS] = {
888 .feat_names = {
889 "kvm-hint-dedicated", NULL, NULL, NULL,
890 NULL, NULL, NULL, NULL,
891 NULL, NULL, NULL, NULL,
892 NULL, NULL, NULL, NULL,
893 NULL, NULL, NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
898 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
899 .tcg_features = TCG_KVM_FEATURES,
901 * KVM hints aren't auto-enabled by -cpu host, they need to be
902 * explicitly enabled in the command-line.
904 .no_autoenable_flags = ~0U,
906 [FEAT_HYPERV_EAX] = {
907 .feat_names = {
908 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
909 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
910 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
911 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
912 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
913 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
914 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
915 NULL, NULL,
916 NULL, NULL, NULL, NULL,
917 NULL, NULL, NULL, NULL,
918 NULL, NULL, NULL, NULL,
919 NULL, NULL, NULL, NULL,
921 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
923 [FEAT_HYPERV_EBX] = {
924 .feat_names = {
925 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
926 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
927 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
928 NULL /* hv_create_port */, NULL /* hv_connect_port */,
929 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
930 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
931 NULL, NULL,
932 NULL, NULL, NULL, NULL,
933 NULL, NULL, NULL, NULL,
934 NULL, NULL, NULL, NULL,
935 NULL, NULL, NULL, NULL,
937 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
939 [FEAT_HYPERV_EDX] = {
940 .feat_names = {
941 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
942 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
943 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
944 NULL, NULL,
945 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
946 NULL, NULL, NULL, NULL,
947 NULL, NULL, NULL, NULL,
948 NULL, NULL, NULL, NULL,
949 NULL, NULL, NULL, NULL,
950 NULL, NULL, NULL, NULL,
952 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
954 [FEAT_SVM] = {
955 .feat_names = {
956 "npt", "lbrv", "svm-lock", "nrip-save",
957 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
958 NULL, NULL, "pause-filter", NULL,
959 "pfthreshold", NULL, NULL, NULL,
960 NULL, NULL, NULL, NULL,
961 NULL, NULL, NULL, NULL,
962 NULL, NULL, NULL, NULL,
963 NULL, NULL, NULL, NULL,
965 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
966 .tcg_features = TCG_SVM_FEATURES,
968 [FEAT_7_0_EBX] = {
969 .feat_names = {
970 "fsgsbase", "tsc-adjust", NULL, "bmi1",
971 "hle", "avx2", NULL, "smep",
972 "bmi2", "erms", "invpcid", "rtm",
973 NULL, NULL, "mpx", NULL,
974 "avx512f", "avx512dq", "rdseed", "adx",
975 "smap", "avx512ifma", "pcommit", "clflushopt",
976 "clwb", "intel-pt", "avx512pf", "avx512er",
977 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
979 .cpuid_eax = 7,
980 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
981 .cpuid_reg = R_EBX,
982 .tcg_features = TCG_7_0_EBX_FEATURES,
984 [FEAT_7_0_ECX] = {
985 .feat_names = {
986 NULL, "avx512vbmi", "umip", "pku",
987 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
988 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
989 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
990 "la57", NULL, NULL, NULL,
991 NULL, NULL, "rdpid", NULL,
992 NULL, "cldemote", NULL, NULL,
993 NULL, NULL, NULL, NULL,
995 .cpuid_eax = 7,
996 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
997 .cpuid_reg = R_ECX,
998 .tcg_features = TCG_7_0_ECX_FEATURES,
1000 [FEAT_7_0_EDX] = {
1001 .feat_names = {
1002 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1003 NULL, NULL, NULL, NULL,
1004 NULL, NULL, NULL, NULL,
1005 NULL, NULL, NULL, NULL,
1006 NULL, NULL, "pconfig", NULL,
1007 NULL, NULL, NULL, NULL,
1008 NULL, NULL, "spec-ctrl", NULL,
1009 NULL, "arch-capabilities", NULL, "ssbd",
1011 .cpuid_eax = 7,
1012 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1013 .cpuid_reg = R_EDX,
1014 .tcg_features = TCG_7_0_EDX_FEATURES,
1015 .unmigratable_flags = CPUID_7_0_EDX_ARCH_CAPABILITIES,
1017 [FEAT_8000_0007_EDX] = {
1018 .feat_names = {
1019 NULL, NULL, NULL, NULL,
1020 NULL, NULL, NULL, NULL,
1021 "invtsc", NULL, NULL, NULL,
1022 NULL, NULL, NULL, NULL,
1023 NULL, NULL, NULL, NULL,
1024 NULL, NULL, NULL, NULL,
1025 NULL, NULL, NULL, NULL,
1026 NULL, NULL, NULL, NULL,
1028 .cpuid_eax = 0x80000007,
1029 .cpuid_reg = R_EDX,
1030 .tcg_features = TCG_APM_FEATURES,
1031 .unmigratable_flags = CPUID_APM_INVTSC,
1033 [FEAT_8000_0008_EBX] = {
1034 .feat_names = {
1035 NULL, NULL, NULL, NULL,
1036 NULL, NULL, NULL, NULL,
1037 NULL, "wbnoinvd", NULL, NULL,
1038 "ibpb", NULL, NULL, NULL,
1039 NULL, NULL, NULL, NULL,
1040 NULL, NULL, NULL, NULL,
1041 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1042 NULL, NULL, NULL, NULL,
1044 .cpuid_eax = 0x80000008,
1045 .cpuid_reg = R_EBX,
1046 .tcg_features = 0,
1047 .unmigratable_flags = 0,
1049 [FEAT_XSAVE] = {
1050 .feat_names = {
1051 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1052 NULL, NULL, NULL, NULL,
1053 NULL, NULL, NULL, NULL,
1054 NULL, NULL, NULL, NULL,
1055 NULL, NULL, NULL, NULL,
1056 NULL, NULL, NULL, NULL,
1057 NULL, NULL, NULL, NULL,
1058 NULL, NULL, NULL, NULL,
1060 .cpuid_eax = 0xd,
1061 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
1062 .cpuid_reg = R_EAX,
1063 .tcg_features = TCG_XSAVE_FEATURES,
1065 [FEAT_6_EAX] = {
1066 .feat_names = {
1067 NULL, NULL, "arat", NULL,
1068 NULL, NULL, NULL, NULL,
1069 NULL, NULL, NULL, NULL,
1070 NULL, NULL, NULL, NULL,
1071 NULL, NULL, NULL, NULL,
1072 NULL, NULL, NULL, NULL,
1073 NULL, NULL, NULL, NULL,
1074 NULL, NULL, NULL, NULL,
1076 .cpuid_eax = 6, .cpuid_reg = R_EAX,
1077 .tcg_features = TCG_6_EAX_FEATURES,
1079 [FEAT_XSAVE_COMP_LO] = {
1080 .cpuid_eax = 0xD,
1081 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1082 .cpuid_reg = R_EAX,
1083 .tcg_features = ~0U,
1084 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1085 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1086 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1087 XSTATE_PKRU_MASK,
1089 [FEAT_XSAVE_COMP_HI] = {
1090 .cpuid_eax = 0xD,
1091 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1092 .cpuid_reg = R_EDX,
1093 .tcg_features = ~0U,
1097 typedef struct X86RegisterInfo32 {
1098 /* Name of register */
1099 const char *name;
1100 /* QAPI enum value register */
1101 X86CPURegister32 qapi_enum;
1102 } X86RegisterInfo32;
1104 #define REGISTER(reg) \
1105 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1106 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1107 REGISTER(EAX),
1108 REGISTER(ECX),
1109 REGISTER(EDX),
1110 REGISTER(EBX),
1111 REGISTER(ESP),
1112 REGISTER(EBP),
1113 REGISTER(ESI),
1114 REGISTER(EDI),
1116 #undef REGISTER
1118 typedef struct ExtSaveArea {
1119 uint32_t feature, bits;
1120 uint32_t offset, size;
1121 } ExtSaveArea;
1123 static const ExtSaveArea x86_ext_save_areas[] = {
1124 [XSTATE_FP_BIT] = {
1125 /* x87 FP state component is always enabled if XSAVE is supported */
1126 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1127 /* x87 state is in the legacy region of the XSAVE area */
1128 .offset = 0,
1129 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1131 [XSTATE_SSE_BIT] = {
1132 /* SSE state component is always enabled if XSAVE is supported */
1133 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1134 /* SSE state is in the legacy region of the XSAVE area */
1135 .offset = 0,
1136 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1138 [XSTATE_YMM_BIT] =
1139 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1140 .offset = offsetof(X86XSaveArea, avx_state),
1141 .size = sizeof(XSaveAVX) },
1142 [XSTATE_BNDREGS_BIT] =
1143 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1144 .offset = offsetof(X86XSaveArea, bndreg_state),
1145 .size = sizeof(XSaveBNDREG) },
1146 [XSTATE_BNDCSR_BIT] =
1147 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1148 .offset = offsetof(X86XSaveArea, bndcsr_state),
1149 .size = sizeof(XSaveBNDCSR) },
1150 [XSTATE_OPMASK_BIT] =
1151 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1152 .offset = offsetof(X86XSaveArea, opmask_state),
1153 .size = sizeof(XSaveOpmask) },
1154 [XSTATE_ZMM_Hi256_BIT] =
1155 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1156 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1157 .size = sizeof(XSaveZMM_Hi256) },
1158 [XSTATE_Hi16_ZMM_BIT] =
1159 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1160 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1161 .size = sizeof(XSaveHi16_ZMM) },
1162 [XSTATE_PKRU_BIT] =
1163 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1164 .offset = offsetof(X86XSaveArea, pkru_state),
1165 .size = sizeof(XSavePKRU) },
1168 static uint32_t xsave_area_size(uint64_t mask)
1170 int i;
1171 uint64_t ret = 0;
1173 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1174 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1175 if ((mask >> i) & 1) {
1176 ret = MAX(ret, esa->offset + esa->size);
1179 return ret;
1182 static inline bool accel_uses_host_cpuid(void)
1184 return kvm_enabled() || hvf_enabled();
1187 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1189 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1190 cpu->env.features[FEAT_XSAVE_COMP_LO];
1193 const char *get_register_name_32(unsigned int reg)
1195 if (reg >= CPU_NB_REGS32) {
1196 return NULL;
1198 return x86_reg_info_32[reg].name;
1202 * Returns the set of feature flags that are supported and migratable by
1203 * QEMU, for a given FeatureWord.
1205 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1207 FeatureWordInfo *wi = &feature_word_info[w];
1208 uint32_t r = 0;
1209 int i;
1211 for (i = 0; i < 32; i++) {
1212 uint32_t f = 1U << i;
1214 /* If the feature name is known, it is implicitly considered migratable,
1215 * unless it is explicitly set in unmigratable_flags */
1216 if ((wi->migratable_flags & f) ||
1217 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1218 r |= f;
1221 return r;
1224 void host_cpuid(uint32_t function, uint32_t count,
1225 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1227 uint32_t vec[4];
1229 #ifdef __x86_64__
1230 asm volatile("cpuid"
1231 : "=a"(vec[0]), "=b"(vec[1]),
1232 "=c"(vec[2]), "=d"(vec[3])
1233 : "0"(function), "c"(count) : "cc");
1234 #elif defined(__i386__)
1235 asm volatile("pusha \n\t"
1236 "cpuid \n\t"
1237 "mov %%eax, 0(%2) \n\t"
1238 "mov %%ebx, 4(%2) \n\t"
1239 "mov %%ecx, 8(%2) \n\t"
1240 "mov %%edx, 12(%2) \n\t"
1241 "popa"
1242 : : "a"(function), "c"(count), "S"(vec)
1243 : "memory", "cc");
1244 #else
1245 abort();
1246 #endif
1248 if (eax)
1249 *eax = vec[0];
1250 if (ebx)
1251 *ebx = vec[1];
1252 if (ecx)
1253 *ecx = vec[2];
1254 if (edx)
1255 *edx = vec[3];
1258 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1260 uint32_t eax, ebx, ecx, edx;
1262 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1263 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1265 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1266 if (family) {
1267 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1269 if (model) {
1270 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1272 if (stepping) {
1273 *stepping = eax & 0x0F;
1277 /* CPU class name definitions: */
1279 /* Return type name for a given CPU model name
1280 * Caller is responsible for freeing the returned string.
1282 static char *x86_cpu_type_name(const char *model_name)
1284 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1287 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1289 ObjectClass *oc;
1290 char *typename = x86_cpu_type_name(cpu_model);
1291 oc = object_class_by_name(typename);
1292 g_free(typename);
1293 return oc;
1296 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1298 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1299 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1300 return g_strndup(class_name,
1301 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1304 struct X86CPUDefinition {
1305 const char *name;
1306 uint32_t level;
1307 uint32_t xlevel;
1308 /* vendor is zero-terminated, 12 character ASCII string */
1309 char vendor[CPUID_VENDOR_SZ + 1];
1310 int family;
1311 int model;
1312 int stepping;
1313 FeatureWordArray features;
1314 const char *model_id;
1315 CPUCaches *cache_info;
1318 static CPUCaches epyc_cache_info = {
1319 .l1d_cache = &(CPUCacheInfo) {
1320 .type = DATA_CACHE,
1321 .level = 1,
1322 .size = 32 * KiB,
1323 .line_size = 64,
1324 .associativity = 8,
1325 .partitions = 1,
1326 .sets = 64,
1327 .lines_per_tag = 1,
1328 .self_init = 1,
1329 .no_invd_sharing = true,
1331 .l1i_cache = &(CPUCacheInfo) {
1332 .type = INSTRUCTION_CACHE,
1333 .level = 1,
1334 .size = 64 * KiB,
1335 .line_size = 64,
1336 .associativity = 4,
1337 .partitions = 1,
1338 .sets = 256,
1339 .lines_per_tag = 1,
1340 .self_init = 1,
1341 .no_invd_sharing = true,
1343 .l2_cache = &(CPUCacheInfo) {
1344 .type = UNIFIED_CACHE,
1345 .level = 2,
1346 .size = 512 * KiB,
1347 .line_size = 64,
1348 .associativity = 8,
1349 .partitions = 1,
1350 .sets = 1024,
1351 .lines_per_tag = 1,
1353 .l3_cache = &(CPUCacheInfo) {
1354 .type = UNIFIED_CACHE,
1355 .level = 3,
1356 .size = 8 * MiB,
1357 .line_size = 64,
1358 .associativity = 16,
1359 .partitions = 1,
1360 .sets = 8192,
1361 .lines_per_tag = 1,
1362 .self_init = true,
1363 .inclusive = true,
1364 .complex_indexing = true,
1368 static X86CPUDefinition builtin_x86_defs[] = {
1370 .name = "qemu64",
1371 .level = 0xd,
1372 .vendor = CPUID_VENDOR_AMD,
1373 .family = 6,
1374 .model = 6,
1375 .stepping = 3,
1376 .features[FEAT_1_EDX] =
1377 PPRO_FEATURES |
1378 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1379 CPUID_PSE36,
1380 .features[FEAT_1_ECX] =
1381 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1382 .features[FEAT_8000_0001_EDX] =
1383 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1384 .features[FEAT_8000_0001_ECX] =
1385 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1386 .xlevel = 0x8000000A,
1387 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1390 .name = "phenom",
1391 .level = 5,
1392 .vendor = CPUID_VENDOR_AMD,
1393 .family = 16,
1394 .model = 2,
1395 .stepping = 3,
1396 /* Missing: CPUID_HT */
1397 .features[FEAT_1_EDX] =
1398 PPRO_FEATURES |
1399 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1400 CPUID_PSE36 | CPUID_VME,
1401 .features[FEAT_1_ECX] =
1402 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1403 CPUID_EXT_POPCNT,
1404 .features[FEAT_8000_0001_EDX] =
1405 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1406 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1407 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1408 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1409 CPUID_EXT3_CR8LEG,
1410 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1411 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1412 .features[FEAT_8000_0001_ECX] =
1413 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1414 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1415 /* Missing: CPUID_SVM_LBRV */
1416 .features[FEAT_SVM] =
1417 CPUID_SVM_NPT,
1418 .xlevel = 0x8000001A,
1419 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1422 .name = "core2duo",
1423 .level = 10,
1424 .vendor = CPUID_VENDOR_INTEL,
1425 .family = 6,
1426 .model = 15,
1427 .stepping = 11,
1428 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1429 .features[FEAT_1_EDX] =
1430 PPRO_FEATURES |
1431 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1432 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1433 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1434 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1435 .features[FEAT_1_ECX] =
1436 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1437 CPUID_EXT_CX16,
1438 .features[FEAT_8000_0001_EDX] =
1439 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1440 .features[FEAT_8000_0001_ECX] =
1441 CPUID_EXT3_LAHF_LM,
1442 .xlevel = 0x80000008,
1443 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1446 .name = "kvm64",
1447 .level = 0xd,
1448 .vendor = CPUID_VENDOR_INTEL,
1449 .family = 15,
1450 .model = 6,
1451 .stepping = 1,
1452 /* Missing: CPUID_HT */
1453 .features[FEAT_1_EDX] =
1454 PPRO_FEATURES | CPUID_VME |
1455 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1456 CPUID_PSE36,
1457 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1458 .features[FEAT_1_ECX] =
1459 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1460 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1461 .features[FEAT_8000_0001_EDX] =
1462 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1463 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1464 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1465 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1466 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1467 .features[FEAT_8000_0001_ECX] =
1469 .xlevel = 0x80000008,
1470 .model_id = "Common KVM processor"
1473 .name = "qemu32",
1474 .level = 4,
1475 .vendor = CPUID_VENDOR_INTEL,
1476 .family = 6,
1477 .model = 6,
1478 .stepping = 3,
1479 .features[FEAT_1_EDX] =
1480 PPRO_FEATURES,
1481 .features[FEAT_1_ECX] =
1482 CPUID_EXT_SSE3,
1483 .xlevel = 0x80000004,
1484 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1487 .name = "kvm32",
1488 .level = 5,
1489 .vendor = CPUID_VENDOR_INTEL,
1490 .family = 15,
1491 .model = 6,
1492 .stepping = 1,
1493 .features[FEAT_1_EDX] =
1494 PPRO_FEATURES | CPUID_VME |
1495 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1496 .features[FEAT_1_ECX] =
1497 CPUID_EXT_SSE3,
1498 .features[FEAT_8000_0001_ECX] =
1500 .xlevel = 0x80000008,
1501 .model_id = "Common 32-bit KVM processor"
1504 .name = "coreduo",
1505 .level = 10,
1506 .vendor = CPUID_VENDOR_INTEL,
1507 .family = 6,
1508 .model = 14,
1509 .stepping = 8,
1510 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1511 .features[FEAT_1_EDX] =
1512 PPRO_FEATURES | CPUID_VME |
1513 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1514 CPUID_SS,
1515 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1516 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1517 .features[FEAT_1_ECX] =
1518 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1519 .features[FEAT_8000_0001_EDX] =
1520 CPUID_EXT2_NX,
1521 .xlevel = 0x80000008,
1522 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1525 .name = "486",
1526 .level = 1,
1527 .vendor = CPUID_VENDOR_INTEL,
1528 .family = 4,
1529 .model = 8,
1530 .stepping = 0,
1531 .features[FEAT_1_EDX] =
1532 I486_FEATURES,
1533 .xlevel = 0,
1534 .model_id = "",
1537 .name = "pentium",
1538 .level = 1,
1539 .vendor = CPUID_VENDOR_INTEL,
1540 .family = 5,
1541 .model = 4,
1542 .stepping = 3,
1543 .features[FEAT_1_EDX] =
1544 PENTIUM_FEATURES,
1545 .xlevel = 0,
1546 .model_id = "",
1549 .name = "pentium2",
1550 .level = 2,
1551 .vendor = CPUID_VENDOR_INTEL,
1552 .family = 6,
1553 .model = 5,
1554 .stepping = 2,
1555 .features[FEAT_1_EDX] =
1556 PENTIUM2_FEATURES,
1557 .xlevel = 0,
1558 .model_id = "",
1561 .name = "pentium3",
1562 .level = 3,
1563 .vendor = CPUID_VENDOR_INTEL,
1564 .family = 6,
1565 .model = 7,
1566 .stepping = 3,
1567 .features[FEAT_1_EDX] =
1568 PENTIUM3_FEATURES,
1569 .xlevel = 0,
1570 .model_id = "",
1573 .name = "athlon",
1574 .level = 2,
1575 .vendor = CPUID_VENDOR_AMD,
1576 .family = 6,
1577 .model = 2,
1578 .stepping = 3,
1579 .features[FEAT_1_EDX] =
1580 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1581 CPUID_MCA,
1582 .features[FEAT_8000_0001_EDX] =
1583 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1584 .xlevel = 0x80000008,
1585 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1588 .name = "n270",
1589 .level = 10,
1590 .vendor = CPUID_VENDOR_INTEL,
1591 .family = 6,
1592 .model = 28,
1593 .stepping = 2,
1594 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1595 .features[FEAT_1_EDX] =
1596 PPRO_FEATURES |
1597 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1598 CPUID_ACPI | CPUID_SS,
1599 /* Some CPUs got no CPUID_SEP */
1600 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1601 * CPUID_EXT_XTPR */
1602 .features[FEAT_1_ECX] =
1603 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1604 CPUID_EXT_MOVBE,
1605 .features[FEAT_8000_0001_EDX] =
1606 CPUID_EXT2_NX,
1607 .features[FEAT_8000_0001_ECX] =
1608 CPUID_EXT3_LAHF_LM,
1609 .xlevel = 0x80000008,
1610 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1613 .name = "Conroe",
1614 .level = 10,
1615 .vendor = CPUID_VENDOR_INTEL,
1616 .family = 6,
1617 .model = 15,
1618 .stepping = 3,
1619 .features[FEAT_1_EDX] =
1620 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1621 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1622 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1623 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1624 CPUID_DE | CPUID_FP87,
1625 .features[FEAT_1_ECX] =
1626 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1627 .features[FEAT_8000_0001_EDX] =
1628 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1629 .features[FEAT_8000_0001_ECX] =
1630 CPUID_EXT3_LAHF_LM,
1631 .xlevel = 0x80000008,
1632 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1635 .name = "Penryn",
1636 .level = 10,
1637 .vendor = CPUID_VENDOR_INTEL,
1638 .family = 6,
1639 .model = 23,
1640 .stepping = 3,
1641 .features[FEAT_1_EDX] =
1642 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1643 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1644 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1645 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1646 CPUID_DE | CPUID_FP87,
1647 .features[FEAT_1_ECX] =
1648 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1649 CPUID_EXT_SSE3,
1650 .features[FEAT_8000_0001_EDX] =
1651 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1652 .features[FEAT_8000_0001_ECX] =
1653 CPUID_EXT3_LAHF_LM,
1654 .xlevel = 0x80000008,
1655 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1658 .name = "Nehalem",
1659 .level = 11,
1660 .vendor = CPUID_VENDOR_INTEL,
1661 .family = 6,
1662 .model = 26,
1663 .stepping = 3,
1664 .features[FEAT_1_EDX] =
1665 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1666 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1667 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1668 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1669 CPUID_DE | CPUID_FP87,
1670 .features[FEAT_1_ECX] =
1671 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1672 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1673 .features[FEAT_8000_0001_EDX] =
1674 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1675 .features[FEAT_8000_0001_ECX] =
1676 CPUID_EXT3_LAHF_LM,
1677 .xlevel = 0x80000008,
1678 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1681 .name = "Nehalem-IBRS",
1682 .level = 11,
1683 .vendor = CPUID_VENDOR_INTEL,
1684 .family = 6,
1685 .model = 26,
1686 .stepping = 3,
1687 .features[FEAT_1_EDX] =
1688 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1689 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1690 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1691 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1692 CPUID_DE | CPUID_FP87,
1693 .features[FEAT_1_ECX] =
1694 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1695 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1696 .features[FEAT_7_0_EDX] =
1697 CPUID_7_0_EDX_SPEC_CTRL,
1698 .features[FEAT_8000_0001_EDX] =
1699 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1700 .features[FEAT_8000_0001_ECX] =
1701 CPUID_EXT3_LAHF_LM,
1702 .xlevel = 0x80000008,
1703 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1706 .name = "Westmere",
1707 .level = 11,
1708 .vendor = CPUID_VENDOR_INTEL,
1709 .family = 6,
1710 .model = 44,
1711 .stepping = 1,
1712 .features[FEAT_1_EDX] =
1713 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1714 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1715 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1716 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1717 CPUID_DE | CPUID_FP87,
1718 .features[FEAT_1_ECX] =
1719 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1720 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1721 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1722 .features[FEAT_8000_0001_EDX] =
1723 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1724 .features[FEAT_8000_0001_ECX] =
1725 CPUID_EXT3_LAHF_LM,
1726 .features[FEAT_6_EAX] =
1727 CPUID_6_EAX_ARAT,
1728 .xlevel = 0x80000008,
1729 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1732 .name = "Westmere-IBRS",
1733 .level = 11,
1734 .vendor = CPUID_VENDOR_INTEL,
1735 .family = 6,
1736 .model = 44,
1737 .stepping = 1,
1738 .features[FEAT_1_EDX] =
1739 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1740 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1741 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1742 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1743 CPUID_DE | CPUID_FP87,
1744 .features[FEAT_1_ECX] =
1745 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1746 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1747 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1748 .features[FEAT_8000_0001_EDX] =
1749 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1750 .features[FEAT_8000_0001_ECX] =
1751 CPUID_EXT3_LAHF_LM,
1752 .features[FEAT_7_0_EDX] =
1753 CPUID_7_0_EDX_SPEC_CTRL,
1754 .features[FEAT_6_EAX] =
1755 CPUID_6_EAX_ARAT,
1756 .xlevel = 0x80000008,
1757 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1760 .name = "SandyBridge",
1761 .level = 0xd,
1762 .vendor = CPUID_VENDOR_INTEL,
1763 .family = 6,
1764 .model = 42,
1765 .stepping = 1,
1766 .features[FEAT_1_EDX] =
1767 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1768 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1769 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1770 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1771 CPUID_DE | CPUID_FP87,
1772 .features[FEAT_1_ECX] =
1773 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1774 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1775 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1776 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1777 CPUID_EXT_SSE3,
1778 .features[FEAT_8000_0001_EDX] =
1779 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1780 CPUID_EXT2_SYSCALL,
1781 .features[FEAT_8000_0001_ECX] =
1782 CPUID_EXT3_LAHF_LM,
1783 .features[FEAT_XSAVE] =
1784 CPUID_XSAVE_XSAVEOPT,
1785 .features[FEAT_6_EAX] =
1786 CPUID_6_EAX_ARAT,
1787 .xlevel = 0x80000008,
1788 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1791 .name = "SandyBridge-IBRS",
1792 .level = 0xd,
1793 .vendor = CPUID_VENDOR_INTEL,
1794 .family = 6,
1795 .model = 42,
1796 .stepping = 1,
1797 .features[FEAT_1_EDX] =
1798 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1799 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1800 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1801 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1802 CPUID_DE | CPUID_FP87,
1803 .features[FEAT_1_ECX] =
1804 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1805 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1806 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1807 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1808 CPUID_EXT_SSE3,
1809 .features[FEAT_8000_0001_EDX] =
1810 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1811 CPUID_EXT2_SYSCALL,
1812 .features[FEAT_8000_0001_ECX] =
1813 CPUID_EXT3_LAHF_LM,
1814 .features[FEAT_7_0_EDX] =
1815 CPUID_7_0_EDX_SPEC_CTRL,
1816 .features[FEAT_XSAVE] =
1817 CPUID_XSAVE_XSAVEOPT,
1818 .features[FEAT_6_EAX] =
1819 CPUID_6_EAX_ARAT,
1820 .xlevel = 0x80000008,
1821 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1824 .name = "IvyBridge",
1825 .level = 0xd,
1826 .vendor = CPUID_VENDOR_INTEL,
1827 .family = 6,
1828 .model = 58,
1829 .stepping = 9,
1830 .features[FEAT_1_EDX] =
1831 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1832 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1833 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1834 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1835 CPUID_DE | CPUID_FP87,
1836 .features[FEAT_1_ECX] =
1837 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1838 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1839 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1840 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1841 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1842 .features[FEAT_7_0_EBX] =
1843 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1844 CPUID_7_0_EBX_ERMS,
1845 .features[FEAT_8000_0001_EDX] =
1846 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1847 CPUID_EXT2_SYSCALL,
1848 .features[FEAT_8000_0001_ECX] =
1849 CPUID_EXT3_LAHF_LM,
1850 .features[FEAT_XSAVE] =
1851 CPUID_XSAVE_XSAVEOPT,
1852 .features[FEAT_6_EAX] =
1853 CPUID_6_EAX_ARAT,
1854 .xlevel = 0x80000008,
1855 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1858 .name = "IvyBridge-IBRS",
1859 .level = 0xd,
1860 .vendor = CPUID_VENDOR_INTEL,
1861 .family = 6,
1862 .model = 58,
1863 .stepping = 9,
1864 .features[FEAT_1_EDX] =
1865 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1866 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1867 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1868 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1869 CPUID_DE | CPUID_FP87,
1870 .features[FEAT_1_ECX] =
1871 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1872 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1873 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1874 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1875 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1876 .features[FEAT_7_0_EBX] =
1877 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1878 CPUID_7_0_EBX_ERMS,
1879 .features[FEAT_8000_0001_EDX] =
1880 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1881 CPUID_EXT2_SYSCALL,
1882 .features[FEAT_8000_0001_ECX] =
1883 CPUID_EXT3_LAHF_LM,
1884 .features[FEAT_7_0_EDX] =
1885 CPUID_7_0_EDX_SPEC_CTRL,
1886 .features[FEAT_XSAVE] =
1887 CPUID_XSAVE_XSAVEOPT,
1888 .features[FEAT_6_EAX] =
1889 CPUID_6_EAX_ARAT,
1890 .xlevel = 0x80000008,
1891 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1894 .name = "Haswell-noTSX",
1895 .level = 0xd,
1896 .vendor = CPUID_VENDOR_INTEL,
1897 .family = 6,
1898 .model = 60,
1899 .stepping = 1,
1900 .features[FEAT_1_EDX] =
1901 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1902 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1903 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1904 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1905 CPUID_DE | CPUID_FP87,
1906 .features[FEAT_1_ECX] =
1907 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1908 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1909 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1910 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1911 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1912 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1913 .features[FEAT_8000_0001_EDX] =
1914 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1915 CPUID_EXT2_SYSCALL,
1916 .features[FEAT_8000_0001_ECX] =
1917 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1918 .features[FEAT_7_0_EBX] =
1919 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1920 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1921 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1922 .features[FEAT_XSAVE] =
1923 CPUID_XSAVE_XSAVEOPT,
1924 .features[FEAT_6_EAX] =
1925 CPUID_6_EAX_ARAT,
1926 .xlevel = 0x80000008,
1927 .model_id = "Intel Core Processor (Haswell, no TSX)",
1930 .name = "Haswell-noTSX-IBRS",
1931 .level = 0xd,
1932 .vendor = CPUID_VENDOR_INTEL,
1933 .family = 6,
1934 .model = 60,
1935 .stepping = 1,
1936 .features[FEAT_1_EDX] =
1937 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1938 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1939 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1940 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1941 CPUID_DE | CPUID_FP87,
1942 .features[FEAT_1_ECX] =
1943 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1944 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1945 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1946 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1947 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1948 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1949 .features[FEAT_8000_0001_EDX] =
1950 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1951 CPUID_EXT2_SYSCALL,
1952 .features[FEAT_8000_0001_ECX] =
1953 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1954 .features[FEAT_7_0_EDX] =
1955 CPUID_7_0_EDX_SPEC_CTRL,
1956 .features[FEAT_7_0_EBX] =
1957 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1958 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1959 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1960 .features[FEAT_XSAVE] =
1961 CPUID_XSAVE_XSAVEOPT,
1962 .features[FEAT_6_EAX] =
1963 CPUID_6_EAX_ARAT,
1964 .xlevel = 0x80000008,
1965 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1968 .name = "Haswell",
1969 .level = 0xd,
1970 .vendor = CPUID_VENDOR_INTEL,
1971 .family = 6,
1972 .model = 60,
1973 .stepping = 4,
1974 .features[FEAT_1_EDX] =
1975 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1976 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1977 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1978 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1979 CPUID_DE | CPUID_FP87,
1980 .features[FEAT_1_ECX] =
1981 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1982 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1983 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1984 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1985 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1986 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1987 .features[FEAT_8000_0001_EDX] =
1988 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1989 CPUID_EXT2_SYSCALL,
1990 .features[FEAT_8000_0001_ECX] =
1991 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1992 .features[FEAT_7_0_EBX] =
1993 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1994 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1995 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1996 CPUID_7_0_EBX_RTM,
1997 .features[FEAT_XSAVE] =
1998 CPUID_XSAVE_XSAVEOPT,
1999 .features[FEAT_6_EAX] =
2000 CPUID_6_EAX_ARAT,
2001 .xlevel = 0x80000008,
2002 .model_id = "Intel Core Processor (Haswell)",
2005 .name = "Haswell-IBRS",
2006 .level = 0xd,
2007 .vendor = CPUID_VENDOR_INTEL,
2008 .family = 6,
2009 .model = 60,
2010 .stepping = 4,
2011 .features[FEAT_1_EDX] =
2012 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2013 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2014 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2015 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2016 CPUID_DE | CPUID_FP87,
2017 .features[FEAT_1_ECX] =
2018 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2019 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2020 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2021 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2022 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2023 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2024 .features[FEAT_8000_0001_EDX] =
2025 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2026 CPUID_EXT2_SYSCALL,
2027 .features[FEAT_8000_0001_ECX] =
2028 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2029 .features[FEAT_7_0_EDX] =
2030 CPUID_7_0_EDX_SPEC_CTRL,
2031 .features[FEAT_7_0_EBX] =
2032 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2033 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2034 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2035 CPUID_7_0_EBX_RTM,
2036 .features[FEAT_XSAVE] =
2037 CPUID_XSAVE_XSAVEOPT,
2038 .features[FEAT_6_EAX] =
2039 CPUID_6_EAX_ARAT,
2040 .xlevel = 0x80000008,
2041 .model_id = "Intel Core Processor (Haswell, IBRS)",
2044 .name = "Broadwell-noTSX",
2045 .level = 0xd,
2046 .vendor = CPUID_VENDOR_INTEL,
2047 .family = 6,
2048 .model = 61,
2049 .stepping = 2,
2050 .features[FEAT_1_EDX] =
2051 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2052 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2053 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2054 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2055 CPUID_DE | CPUID_FP87,
2056 .features[FEAT_1_ECX] =
2057 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2058 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2059 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2060 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2061 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2062 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2063 .features[FEAT_8000_0001_EDX] =
2064 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2065 CPUID_EXT2_SYSCALL,
2066 .features[FEAT_8000_0001_ECX] =
2067 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2068 .features[FEAT_7_0_EBX] =
2069 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2070 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2071 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2072 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2073 CPUID_7_0_EBX_SMAP,
2074 .features[FEAT_XSAVE] =
2075 CPUID_XSAVE_XSAVEOPT,
2076 .features[FEAT_6_EAX] =
2077 CPUID_6_EAX_ARAT,
2078 .xlevel = 0x80000008,
2079 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2082 .name = "Broadwell-noTSX-IBRS",
2083 .level = 0xd,
2084 .vendor = CPUID_VENDOR_INTEL,
2085 .family = 6,
2086 .model = 61,
2087 .stepping = 2,
2088 .features[FEAT_1_EDX] =
2089 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2090 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2091 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2092 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2093 CPUID_DE | CPUID_FP87,
2094 .features[FEAT_1_ECX] =
2095 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2096 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2097 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2098 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2099 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2100 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2101 .features[FEAT_8000_0001_EDX] =
2102 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2103 CPUID_EXT2_SYSCALL,
2104 .features[FEAT_8000_0001_ECX] =
2105 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2106 .features[FEAT_7_0_EDX] =
2107 CPUID_7_0_EDX_SPEC_CTRL,
2108 .features[FEAT_7_0_EBX] =
2109 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2110 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2111 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2112 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2113 CPUID_7_0_EBX_SMAP,
2114 .features[FEAT_XSAVE] =
2115 CPUID_XSAVE_XSAVEOPT,
2116 .features[FEAT_6_EAX] =
2117 CPUID_6_EAX_ARAT,
2118 .xlevel = 0x80000008,
2119 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2122 .name = "Broadwell",
2123 .level = 0xd,
2124 .vendor = CPUID_VENDOR_INTEL,
2125 .family = 6,
2126 .model = 61,
2127 .stepping = 2,
2128 .features[FEAT_1_EDX] =
2129 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2130 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2131 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2132 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2133 CPUID_DE | CPUID_FP87,
2134 .features[FEAT_1_ECX] =
2135 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2136 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2137 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2138 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2139 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2140 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2141 .features[FEAT_8000_0001_EDX] =
2142 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2143 CPUID_EXT2_SYSCALL,
2144 .features[FEAT_8000_0001_ECX] =
2145 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2146 .features[FEAT_7_0_EBX] =
2147 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2148 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2149 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2150 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2151 CPUID_7_0_EBX_SMAP,
2152 .features[FEAT_XSAVE] =
2153 CPUID_XSAVE_XSAVEOPT,
2154 .features[FEAT_6_EAX] =
2155 CPUID_6_EAX_ARAT,
2156 .xlevel = 0x80000008,
2157 .model_id = "Intel Core Processor (Broadwell)",
2160 .name = "Broadwell-IBRS",
2161 .level = 0xd,
2162 .vendor = CPUID_VENDOR_INTEL,
2163 .family = 6,
2164 .model = 61,
2165 .stepping = 2,
2166 .features[FEAT_1_EDX] =
2167 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2168 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2169 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2170 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2171 CPUID_DE | CPUID_FP87,
2172 .features[FEAT_1_ECX] =
2173 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2174 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2175 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2176 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2177 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2178 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2179 .features[FEAT_8000_0001_EDX] =
2180 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2181 CPUID_EXT2_SYSCALL,
2182 .features[FEAT_8000_0001_ECX] =
2183 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2184 .features[FEAT_7_0_EDX] =
2185 CPUID_7_0_EDX_SPEC_CTRL,
2186 .features[FEAT_7_0_EBX] =
2187 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2188 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2189 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2190 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2191 CPUID_7_0_EBX_SMAP,
2192 .features[FEAT_XSAVE] =
2193 CPUID_XSAVE_XSAVEOPT,
2194 .features[FEAT_6_EAX] =
2195 CPUID_6_EAX_ARAT,
2196 .xlevel = 0x80000008,
2197 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2200 .name = "Skylake-Client",
2201 .level = 0xd,
2202 .vendor = CPUID_VENDOR_INTEL,
2203 .family = 6,
2204 .model = 94,
2205 .stepping = 3,
2206 .features[FEAT_1_EDX] =
2207 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2208 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2209 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2210 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2211 CPUID_DE | CPUID_FP87,
2212 .features[FEAT_1_ECX] =
2213 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2214 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2215 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2216 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2217 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2218 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2219 .features[FEAT_8000_0001_EDX] =
2220 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2221 CPUID_EXT2_SYSCALL,
2222 .features[FEAT_8000_0001_ECX] =
2223 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2224 .features[FEAT_7_0_EBX] =
2225 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2226 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2227 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2228 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2229 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2230 /* Missing: XSAVES (not supported by some Linux versions,
2231 * including v4.1 to v4.12).
2232 * KVM doesn't yet expose any XSAVES state save component,
2233 * and the only one defined in Skylake (processor tracing)
2234 * probably will block migration anyway.
2236 .features[FEAT_XSAVE] =
2237 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2238 CPUID_XSAVE_XGETBV1,
2239 .features[FEAT_6_EAX] =
2240 CPUID_6_EAX_ARAT,
2241 .xlevel = 0x80000008,
2242 .model_id = "Intel Core Processor (Skylake)",
2245 .name = "Skylake-Client-IBRS",
2246 .level = 0xd,
2247 .vendor = CPUID_VENDOR_INTEL,
2248 .family = 6,
2249 .model = 94,
2250 .stepping = 3,
2251 .features[FEAT_1_EDX] =
2252 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2253 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2254 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2255 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2256 CPUID_DE | CPUID_FP87,
2257 .features[FEAT_1_ECX] =
2258 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2259 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2260 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2261 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2262 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2263 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2264 .features[FEAT_8000_0001_EDX] =
2265 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2266 CPUID_EXT2_SYSCALL,
2267 .features[FEAT_8000_0001_ECX] =
2268 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2269 .features[FEAT_7_0_EDX] =
2270 CPUID_7_0_EDX_SPEC_CTRL,
2271 .features[FEAT_7_0_EBX] =
2272 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2273 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2274 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2275 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2276 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2277 /* Missing: XSAVES (not supported by some Linux versions,
2278 * including v4.1 to v4.12).
2279 * KVM doesn't yet expose any XSAVES state save component,
2280 * and the only one defined in Skylake (processor tracing)
2281 * probably will block migration anyway.
2283 .features[FEAT_XSAVE] =
2284 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2285 CPUID_XSAVE_XGETBV1,
2286 .features[FEAT_6_EAX] =
2287 CPUID_6_EAX_ARAT,
2288 .xlevel = 0x80000008,
2289 .model_id = "Intel Core Processor (Skylake, IBRS)",
2292 .name = "Skylake-Server",
2293 .level = 0xd,
2294 .vendor = CPUID_VENDOR_INTEL,
2295 .family = 6,
2296 .model = 85,
2297 .stepping = 4,
2298 .features[FEAT_1_EDX] =
2299 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2300 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2301 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2302 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2303 CPUID_DE | CPUID_FP87,
2304 .features[FEAT_1_ECX] =
2305 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2306 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2307 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2308 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2309 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2310 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2311 .features[FEAT_8000_0001_EDX] =
2312 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2313 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2314 .features[FEAT_8000_0001_ECX] =
2315 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2316 .features[FEAT_7_0_EBX] =
2317 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2318 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2319 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2320 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2321 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2322 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2323 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2324 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2325 /* Missing: XSAVES (not supported by some Linux versions,
2326 * including v4.1 to v4.12).
2327 * KVM doesn't yet expose any XSAVES state save component,
2328 * and the only one defined in Skylake (processor tracing)
2329 * probably will block migration anyway.
2331 .features[FEAT_XSAVE] =
2332 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2333 CPUID_XSAVE_XGETBV1,
2334 .features[FEAT_6_EAX] =
2335 CPUID_6_EAX_ARAT,
2336 .xlevel = 0x80000008,
2337 .model_id = "Intel Xeon Processor (Skylake)",
2340 .name = "Skylake-Server-IBRS",
2341 .level = 0xd,
2342 .vendor = CPUID_VENDOR_INTEL,
2343 .family = 6,
2344 .model = 85,
2345 .stepping = 4,
2346 .features[FEAT_1_EDX] =
2347 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2348 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2349 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2350 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2351 CPUID_DE | CPUID_FP87,
2352 .features[FEAT_1_ECX] =
2353 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2354 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2355 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2356 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2357 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2358 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2359 .features[FEAT_8000_0001_EDX] =
2360 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2361 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2362 .features[FEAT_8000_0001_ECX] =
2363 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2364 .features[FEAT_7_0_EDX] =
2365 CPUID_7_0_EDX_SPEC_CTRL,
2366 .features[FEAT_7_0_EBX] =
2367 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2368 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2369 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2370 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2371 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2372 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2373 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2374 CPUID_7_0_EBX_AVX512VL,
2375 /* Missing: XSAVES (not supported by some Linux versions,
2376 * including v4.1 to v4.12).
2377 * KVM doesn't yet expose any XSAVES state save component,
2378 * and the only one defined in Skylake (processor tracing)
2379 * probably will block migration anyway.
2381 .features[FEAT_XSAVE] =
2382 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2383 CPUID_XSAVE_XGETBV1,
2384 .features[FEAT_6_EAX] =
2385 CPUID_6_EAX_ARAT,
2386 .xlevel = 0x80000008,
2387 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2390 .name = "Icelake-Client",
2391 .level = 0xd,
2392 .vendor = CPUID_VENDOR_INTEL,
2393 .family = 6,
2394 .model = 126,
2395 .stepping = 0,
2396 .features[FEAT_1_EDX] =
2397 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2398 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2399 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2400 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2401 CPUID_DE | CPUID_FP87,
2402 .features[FEAT_1_ECX] =
2403 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2404 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2405 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2406 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2407 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2408 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2409 .features[FEAT_8000_0001_EDX] =
2410 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2411 CPUID_EXT2_SYSCALL,
2412 .features[FEAT_8000_0001_ECX] =
2413 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2414 .features[FEAT_8000_0008_EBX] =
2415 CPUID_8000_0008_EBX_WBNOINVD,
2416 .features[FEAT_7_0_EBX] =
2417 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2418 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2419 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2420 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2421 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_INTEL_PT,
2422 .features[FEAT_7_0_ECX] =
2423 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2424 CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2425 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2426 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2427 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2428 .features[FEAT_7_0_EDX] =
2429 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2430 /* Missing: XSAVES (not supported by some Linux versions,
2431 * including v4.1 to v4.12).
2432 * KVM doesn't yet expose any XSAVES state save component,
2433 * and the only one defined in Skylake (processor tracing)
2434 * probably will block migration anyway.
2436 .features[FEAT_XSAVE] =
2437 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2438 CPUID_XSAVE_XGETBV1,
2439 .features[FEAT_6_EAX] =
2440 CPUID_6_EAX_ARAT,
2441 .xlevel = 0x80000008,
2442 .model_id = "Intel Core Processor (Icelake)",
2445 .name = "Icelake-Server",
2446 .level = 0xd,
2447 .vendor = CPUID_VENDOR_INTEL,
2448 .family = 6,
2449 .model = 134,
2450 .stepping = 0,
2451 .features[FEAT_1_EDX] =
2452 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2453 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2454 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2455 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2456 CPUID_DE | CPUID_FP87,
2457 .features[FEAT_1_ECX] =
2458 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2459 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2460 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2461 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2462 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2463 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2464 .features[FEAT_8000_0001_EDX] =
2465 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2466 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2467 .features[FEAT_8000_0001_ECX] =
2468 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2469 .features[FEAT_8000_0008_EBX] =
2470 CPUID_8000_0008_EBX_WBNOINVD,
2471 .features[FEAT_7_0_EBX] =
2472 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2473 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2474 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2475 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2476 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2477 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2478 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2479 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT |
2480 CPUID_7_0_EBX_INTEL_PT,
2481 .features[FEAT_7_0_ECX] =
2482 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2483 CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2484 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2485 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2486 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2487 .features[FEAT_7_0_EDX] =
2488 CPUID_7_0_EDX_PCONFIG | CPUID_7_0_EDX_SPEC_CTRL |
2489 CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2490 /* Missing: XSAVES (not supported by some Linux versions,
2491 * including v4.1 to v4.12).
2492 * KVM doesn't yet expose any XSAVES state save component,
2493 * and the only one defined in Skylake (processor tracing)
2494 * probably will block migration anyway.
2496 .features[FEAT_XSAVE] =
2497 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2498 CPUID_XSAVE_XGETBV1,
2499 .features[FEAT_6_EAX] =
2500 CPUID_6_EAX_ARAT,
2501 .xlevel = 0x80000008,
2502 .model_id = "Intel Xeon Processor (Icelake)",
2505 .name = "KnightsMill",
2506 .level = 0xd,
2507 .vendor = CPUID_VENDOR_INTEL,
2508 .family = 6,
2509 .model = 133,
2510 .stepping = 0,
2511 .features[FEAT_1_EDX] =
2512 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2513 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2514 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2515 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2516 CPUID_PSE | CPUID_DE | CPUID_FP87,
2517 .features[FEAT_1_ECX] =
2518 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2519 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2520 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2521 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2522 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2523 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2524 .features[FEAT_8000_0001_EDX] =
2525 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2526 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2527 .features[FEAT_8000_0001_ECX] =
2528 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2529 .features[FEAT_7_0_EBX] =
2530 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2531 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2532 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2533 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2534 CPUID_7_0_EBX_AVX512ER,
2535 .features[FEAT_7_0_ECX] =
2536 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2537 .features[FEAT_7_0_EDX] =
2538 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2539 .features[FEAT_XSAVE] =
2540 CPUID_XSAVE_XSAVEOPT,
2541 .features[FEAT_6_EAX] =
2542 CPUID_6_EAX_ARAT,
2543 .xlevel = 0x80000008,
2544 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2547 .name = "Opteron_G1",
2548 .level = 5,
2549 .vendor = CPUID_VENDOR_AMD,
2550 .family = 15,
2551 .model = 6,
2552 .stepping = 1,
2553 .features[FEAT_1_EDX] =
2554 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2555 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2556 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2557 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2558 CPUID_DE | CPUID_FP87,
2559 .features[FEAT_1_ECX] =
2560 CPUID_EXT_SSE3,
2561 .features[FEAT_8000_0001_EDX] =
2562 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2563 .xlevel = 0x80000008,
2564 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2567 .name = "Opteron_G2",
2568 .level = 5,
2569 .vendor = CPUID_VENDOR_AMD,
2570 .family = 15,
2571 .model = 6,
2572 .stepping = 1,
2573 .features[FEAT_1_EDX] =
2574 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2575 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2576 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2577 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2578 CPUID_DE | CPUID_FP87,
2579 .features[FEAT_1_ECX] =
2580 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2581 /* Missing: CPUID_EXT2_RDTSCP */
2582 .features[FEAT_8000_0001_EDX] =
2583 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2584 .features[FEAT_8000_0001_ECX] =
2585 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2586 .xlevel = 0x80000008,
2587 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2590 .name = "Opteron_G3",
2591 .level = 5,
2592 .vendor = CPUID_VENDOR_AMD,
2593 .family = 16,
2594 .model = 2,
2595 .stepping = 3,
2596 .features[FEAT_1_EDX] =
2597 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2598 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2599 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2600 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2601 CPUID_DE | CPUID_FP87,
2602 .features[FEAT_1_ECX] =
2603 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2604 CPUID_EXT_SSE3,
2605 /* Missing: CPUID_EXT2_RDTSCP */
2606 .features[FEAT_8000_0001_EDX] =
2607 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2608 .features[FEAT_8000_0001_ECX] =
2609 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2610 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2611 .xlevel = 0x80000008,
2612 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2615 .name = "Opteron_G4",
2616 .level = 0xd,
2617 .vendor = CPUID_VENDOR_AMD,
2618 .family = 21,
2619 .model = 1,
2620 .stepping = 2,
2621 .features[FEAT_1_EDX] =
2622 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2623 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2624 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2625 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2626 CPUID_DE | CPUID_FP87,
2627 .features[FEAT_1_ECX] =
2628 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2629 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2630 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2631 CPUID_EXT_SSE3,
2632 /* Missing: CPUID_EXT2_RDTSCP */
2633 .features[FEAT_8000_0001_EDX] =
2634 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2635 CPUID_EXT2_SYSCALL,
2636 .features[FEAT_8000_0001_ECX] =
2637 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2638 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2639 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2640 CPUID_EXT3_LAHF_LM,
2641 /* no xsaveopt! */
2642 .xlevel = 0x8000001A,
2643 .model_id = "AMD Opteron 62xx class CPU",
2646 .name = "Opteron_G5",
2647 .level = 0xd,
2648 .vendor = CPUID_VENDOR_AMD,
2649 .family = 21,
2650 .model = 2,
2651 .stepping = 0,
2652 .features[FEAT_1_EDX] =
2653 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2654 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2655 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2656 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2657 CPUID_DE | CPUID_FP87,
2658 .features[FEAT_1_ECX] =
2659 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2660 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2661 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2662 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2663 /* Missing: CPUID_EXT2_RDTSCP */
2664 .features[FEAT_8000_0001_EDX] =
2665 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2666 CPUID_EXT2_SYSCALL,
2667 .features[FEAT_8000_0001_ECX] =
2668 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2669 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2670 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2671 CPUID_EXT3_LAHF_LM,
2672 /* no xsaveopt! */
2673 .xlevel = 0x8000001A,
2674 .model_id = "AMD Opteron 63xx class CPU",
2677 .name = "EPYC",
2678 .level = 0xd,
2679 .vendor = CPUID_VENDOR_AMD,
2680 .family = 23,
2681 .model = 1,
2682 .stepping = 2,
2683 .features[FEAT_1_EDX] =
2684 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2685 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2686 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2687 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2688 CPUID_VME | CPUID_FP87,
2689 .features[FEAT_1_ECX] =
2690 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2691 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2692 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2693 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2694 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2695 .features[FEAT_8000_0001_EDX] =
2696 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2697 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2698 CPUID_EXT2_SYSCALL,
2699 .features[FEAT_8000_0001_ECX] =
2700 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2701 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2702 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2703 CPUID_EXT3_TOPOEXT,
2704 .features[FEAT_7_0_EBX] =
2705 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2706 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2707 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2708 CPUID_7_0_EBX_SHA_NI,
2709 /* Missing: XSAVES (not supported by some Linux versions,
2710 * including v4.1 to v4.12).
2711 * KVM doesn't yet expose any XSAVES state save component.
2713 .features[FEAT_XSAVE] =
2714 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2715 CPUID_XSAVE_XGETBV1,
2716 .features[FEAT_6_EAX] =
2717 CPUID_6_EAX_ARAT,
2718 .xlevel = 0x8000001E,
2719 .model_id = "AMD EPYC Processor",
2720 .cache_info = &epyc_cache_info,
2723 .name = "EPYC-IBPB",
2724 .level = 0xd,
2725 .vendor = CPUID_VENDOR_AMD,
2726 .family = 23,
2727 .model = 1,
2728 .stepping = 2,
2729 .features[FEAT_1_EDX] =
2730 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2731 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2732 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2733 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2734 CPUID_VME | CPUID_FP87,
2735 .features[FEAT_1_ECX] =
2736 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2737 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2738 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2739 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2740 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2741 .features[FEAT_8000_0001_EDX] =
2742 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2743 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2744 CPUID_EXT2_SYSCALL,
2745 .features[FEAT_8000_0001_ECX] =
2746 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2747 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2748 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2749 CPUID_EXT3_TOPOEXT,
2750 .features[FEAT_8000_0008_EBX] =
2751 CPUID_8000_0008_EBX_IBPB,
2752 .features[FEAT_7_0_EBX] =
2753 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2754 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2755 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2756 CPUID_7_0_EBX_SHA_NI,
2757 /* Missing: XSAVES (not supported by some Linux versions,
2758 * including v4.1 to v4.12).
2759 * KVM doesn't yet expose any XSAVES state save component.
2761 .features[FEAT_XSAVE] =
2762 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2763 CPUID_XSAVE_XGETBV1,
2764 .features[FEAT_6_EAX] =
2765 CPUID_6_EAX_ARAT,
2766 .xlevel = 0x8000001E,
2767 .model_id = "AMD EPYC Processor (with IBPB)",
2768 .cache_info = &epyc_cache_info,
2772 typedef struct PropValue {
2773 const char *prop, *value;
2774 } PropValue;
2776 /* KVM-specific features that are automatically added/removed
2777 * from all CPU models when KVM is enabled.
2779 static PropValue kvm_default_props[] = {
2780 { "kvmclock", "on" },
2781 { "kvm-nopiodelay", "on" },
2782 { "kvm-asyncpf", "on" },
2783 { "kvm-steal-time", "on" },
2784 { "kvm-pv-eoi", "on" },
2785 { "kvmclock-stable-bit", "on" },
2786 { "x2apic", "on" },
2787 { "acpi", "off" },
2788 { "monitor", "off" },
2789 { "svm", "off" },
2790 { NULL, NULL },
2793 /* TCG-specific defaults that override all CPU models when using TCG
2795 static PropValue tcg_default_props[] = {
2796 { "vme", "off" },
2797 { NULL, NULL },
2801 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2803 PropValue *pv;
2804 for (pv = kvm_default_props; pv->prop; pv++) {
2805 if (!strcmp(pv->prop, prop)) {
2806 pv->value = value;
2807 break;
2811 /* It is valid to call this function only for properties that
2812 * are already present in the kvm_default_props table.
2814 assert(pv->prop);
2817 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2818 bool migratable_only);
2820 static bool lmce_supported(void)
2822 uint64_t mce_cap = 0;
2824 #ifdef CONFIG_KVM
2825 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2826 return false;
2828 #endif
2830 return !!(mce_cap & MCG_LMCE_P);
2833 #define CPUID_MODEL_ID_SZ 48
2836 * cpu_x86_fill_model_id:
2837 * Get CPUID model ID string from host CPU.
2839 * @str should have at least CPUID_MODEL_ID_SZ bytes
2841 * The function does NOT add a null terminator to the string
2842 * automatically.
2844 static int cpu_x86_fill_model_id(char *str)
2846 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2847 int i;
2849 for (i = 0; i < 3; i++) {
2850 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2851 memcpy(str + i * 16 + 0, &eax, 4);
2852 memcpy(str + i * 16 + 4, &ebx, 4);
2853 memcpy(str + i * 16 + 8, &ecx, 4);
2854 memcpy(str + i * 16 + 12, &edx, 4);
2856 return 0;
2859 static Property max_x86_cpu_properties[] = {
2860 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2861 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2862 DEFINE_PROP_END_OF_LIST()
2865 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2867 DeviceClass *dc = DEVICE_CLASS(oc);
2868 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2870 xcc->ordering = 9;
2872 xcc->model_description =
2873 "Enables all features supported by the accelerator in the current host";
2875 dc->props = max_x86_cpu_properties;
2878 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2880 static void max_x86_cpu_initfn(Object *obj)
2882 X86CPU *cpu = X86_CPU(obj);
2883 CPUX86State *env = &cpu->env;
2884 KVMState *s = kvm_state;
2886 /* We can't fill the features array here because we don't know yet if
2887 * "migratable" is true or false.
2889 cpu->max_features = true;
2891 if (accel_uses_host_cpuid()) {
2892 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2893 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2894 int family, model, stepping;
2895 X86CPUDefinition host_cpudef = { };
2896 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2898 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2899 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2901 host_vendor_fms(vendor, &family, &model, &stepping);
2903 cpu_x86_fill_model_id(model_id);
2905 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2906 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2907 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2908 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2909 &error_abort);
2910 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2911 &error_abort);
2913 if (kvm_enabled()) {
2914 env->cpuid_min_level =
2915 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2916 env->cpuid_min_xlevel =
2917 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2918 env->cpuid_min_xlevel2 =
2919 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2920 } else {
2921 env->cpuid_min_level =
2922 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2923 env->cpuid_min_xlevel =
2924 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2925 env->cpuid_min_xlevel2 =
2926 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2929 if (lmce_supported()) {
2930 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2932 } else {
2933 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2934 "vendor", &error_abort);
2935 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2936 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2937 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2938 object_property_set_str(OBJECT(cpu),
2939 "QEMU TCG CPU version " QEMU_HW_VERSION,
2940 "model-id", &error_abort);
2943 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2946 static const TypeInfo max_x86_cpu_type_info = {
2947 .name = X86_CPU_TYPE_NAME("max"),
2948 .parent = TYPE_X86_CPU,
2949 .instance_init = max_x86_cpu_initfn,
2950 .class_init = max_x86_cpu_class_init,
2953 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2954 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2956 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2958 xcc->host_cpuid_required = true;
2959 xcc->ordering = 8;
2961 #if defined(CONFIG_KVM)
2962 xcc->model_description =
2963 "KVM processor with all supported host features ";
2964 #elif defined(CONFIG_HVF)
2965 xcc->model_description =
2966 "HVF processor with all supported host features ";
2967 #endif
2970 static const TypeInfo host_x86_cpu_type_info = {
2971 .name = X86_CPU_TYPE_NAME("host"),
2972 .parent = X86_CPU_TYPE_NAME("max"),
2973 .class_init = host_x86_cpu_class_init,
2976 #endif
2978 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2980 FeatureWordInfo *f = &feature_word_info[w];
2981 int i;
2983 for (i = 0; i < 32; ++i) {
2984 if ((1UL << i) & mask) {
2985 const char *reg = get_register_name_32(f->cpuid_reg);
2986 assert(reg);
2987 warn_report("%s doesn't support requested feature: "
2988 "CPUID.%02XH:%s%s%s [bit %d]",
2989 accel_uses_host_cpuid() ? "host" : "TCG",
2990 f->cpuid_eax, reg,
2991 f->feat_names[i] ? "." : "",
2992 f->feat_names[i] ? f->feat_names[i] : "", i);
2997 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2998 const char *name, void *opaque,
2999 Error **errp)
3001 X86CPU *cpu = X86_CPU(obj);
3002 CPUX86State *env = &cpu->env;
3003 int64_t value;
3005 value = (env->cpuid_version >> 8) & 0xf;
3006 if (value == 0xf) {
3007 value += (env->cpuid_version >> 20) & 0xff;
3009 visit_type_int(v, name, &value, errp);
3012 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3013 const char *name, void *opaque,
3014 Error **errp)
3016 X86CPU *cpu = X86_CPU(obj);
3017 CPUX86State *env = &cpu->env;
3018 const int64_t min = 0;
3019 const int64_t max = 0xff + 0xf;
3020 Error *local_err = NULL;
3021 int64_t value;
3023 visit_type_int(v, name, &value, &local_err);
3024 if (local_err) {
3025 error_propagate(errp, local_err);
3026 return;
3028 if (value < min || value > max) {
3029 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3030 name ? name : "null", value, min, max);
3031 return;
3034 env->cpuid_version &= ~0xff00f00;
3035 if (value > 0x0f) {
3036 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3037 } else {
3038 env->cpuid_version |= value << 8;
3042 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3043 const char *name, void *opaque,
3044 Error **errp)
3046 X86CPU *cpu = X86_CPU(obj);
3047 CPUX86State *env = &cpu->env;
3048 int64_t value;
3050 value = (env->cpuid_version >> 4) & 0xf;
3051 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3052 visit_type_int(v, name, &value, errp);
3055 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3056 const char *name, void *opaque,
3057 Error **errp)
3059 X86CPU *cpu = X86_CPU(obj);
3060 CPUX86State *env = &cpu->env;
3061 const int64_t min = 0;
3062 const int64_t max = 0xff;
3063 Error *local_err = NULL;
3064 int64_t value;
3066 visit_type_int(v, name, &value, &local_err);
3067 if (local_err) {
3068 error_propagate(errp, local_err);
3069 return;
3071 if (value < min || value > max) {
3072 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3073 name ? name : "null", value, min, max);
3074 return;
3077 env->cpuid_version &= ~0xf00f0;
3078 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3081 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3082 const char *name, void *opaque,
3083 Error **errp)
3085 X86CPU *cpu = X86_CPU(obj);
3086 CPUX86State *env = &cpu->env;
3087 int64_t value;
3089 value = env->cpuid_version & 0xf;
3090 visit_type_int(v, name, &value, errp);
3093 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3094 const char *name, void *opaque,
3095 Error **errp)
3097 X86CPU *cpu = X86_CPU(obj);
3098 CPUX86State *env = &cpu->env;
3099 const int64_t min = 0;
3100 const int64_t max = 0xf;
3101 Error *local_err = NULL;
3102 int64_t value;
3104 visit_type_int(v, name, &value, &local_err);
3105 if (local_err) {
3106 error_propagate(errp, local_err);
3107 return;
3109 if (value < min || value > max) {
3110 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3111 name ? name : "null", value, min, max);
3112 return;
3115 env->cpuid_version &= ~0xf;
3116 env->cpuid_version |= value & 0xf;
3119 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3121 X86CPU *cpu = X86_CPU(obj);
3122 CPUX86State *env = &cpu->env;
3123 char *value;
3125 value = g_malloc(CPUID_VENDOR_SZ + 1);
3126 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3127 env->cpuid_vendor3);
3128 return value;
3131 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3132 Error **errp)
3134 X86CPU *cpu = X86_CPU(obj);
3135 CPUX86State *env = &cpu->env;
3136 int i;
3138 if (strlen(value) != CPUID_VENDOR_SZ) {
3139 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3140 return;
3143 env->cpuid_vendor1 = 0;
3144 env->cpuid_vendor2 = 0;
3145 env->cpuid_vendor3 = 0;
3146 for (i = 0; i < 4; i++) {
3147 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3148 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3149 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3153 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3155 X86CPU *cpu = X86_CPU(obj);
3156 CPUX86State *env = &cpu->env;
3157 char *value;
3158 int i;
3160 value = g_malloc(48 + 1);
3161 for (i = 0; i < 48; i++) {
3162 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3164 value[48] = '\0';
3165 return value;
3168 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3169 Error **errp)
3171 X86CPU *cpu = X86_CPU(obj);
3172 CPUX86State *env = &cpu->env;
3173 int c, len, i;
3175 if (model_id == NULL) {
3176 model_id = "";
3178 len = strlen(model_id);
3179 memset(env->cpuid_model, 0, 48);
3180 for (i = 0; i < 48; i++) {
3181 if (i >= len) {
3182 c = '\0';
3183 } else {
3184 c = (uint8_t)model_id[i];
3186 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3190 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3191 void *opaque, Error **errp)
3193 X86CPU *cpu = X86_CPU(obj);
3194 int64_t value;
3196 value = cpu->env.tsc_khz * 1000;
3197 visit_type_int(v, name, &value, errp);
3200 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3201 void *opaque, Error **errp)
3203 X86CPU *cpu = X86_CPU(obj);
3204 const int64_t min = 0;
3205 const int64_t max = INT64_MAX;
3206 Error *local_err = NULL;
3207 int64_t value;
3209 visit_type_int(v, name, &value, &local_err);
3210 if (local_err) {
3211 error_propagate(errp, local_err);
3212 return;
3214 if (value < min || value > max) {
3215 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3216 name ? name : "null", value, min, max);
3217 return;
3220 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3223 /* Generic getter for "feature-words" and "filtered-features" properties */
3224 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3225 const char *name, void *opaque,
3226 Error **errp)
3228 uint32_t *array = (uint32_t *)opaque;
3229 FeatureWord w;
3230 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3231 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3232 X86CPUFeatureWordInfoList *list = NULL;
3234 for (w = 0; w < FEATURE_WORDS; w++) {
3235 FeatureWordInfo *wi = &feature_word_info[w];
3236 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3237 qwi->cpuid_input_eax = wi->cpuid_eax;
3238 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
3239 qwi->cpuid_input_ecx = wi->cpuid_ecx;
3240 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
3241 qwi->features = array[w];
3243 /* List will be in reverse order, but order shouldn't matter */
3244 list_entries[w].next = list;
3245 list_entries[w].value = &word_infos[w];
3246 list = &list_entries[w];
3249 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3252 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3253 void *opaque, Error **errp)
3255 X86CPU *cpu = X86_CPU(obj);
3256 int64_t value = cpu->hyperv_spinlock_attempts;
3258 visit_type_int(v, name, &value, errp);
3261 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3262 void *opaque, Error **errp)
3264 const int64_t min = 0xFFF;
3265 const int64_t max = UINT_MAX;
3266 X86CPU *cpu = X86_CPU(obj);
3267 Error *err = NULL;
3268 int64_t value;
3270 visit_type_int(v, name, &value, &err);
3271 if (err) {
3272 error_propagate(errp, err);
3273 return;
3276 if (value < min || value > max) {
3277 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3278 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3279 object_get_typename(obj), name ? name : "null",
3280 value, min, max);
3281 return;
3283 cpu->hyperv_spinlock_attempts = value;
3286 static const PropertyInfo qdev_prop_spinlocks = {
3287 .name = "int",
3288 .get = x86_get_hv_spinlocks,
3289 .set = x86_set_hv_spinlocks,
3292 /* Convert all '_' in a feature string option name to '-', to make feature
3293 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3295 static inline void feat2prop(char *s)
3297 while ((s = strchr(s, '_'))) {
3298 *s = '-';
3302 /* Return the feature property name for a feature flag bit */
3303 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3305 /* XSAVE components are automatically enabled by other features,
3306 * so return the original feature name instead
3308 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3309 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3311 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3312 x86_ext_save_areas[comp].bits) {
3313 w = x86_ext_save_areas[comp].feature;
3314 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3318 assert(bitnr < 32);
3319 assert(w < FEATURE_WORDS);
3320 return feature_word_info[w].feat_names[bitnr];
3323 /* Compatibily hack to maintain legacy +-feat semantic,
3324 * where +-feat overwrites any feature set by
3325 * feat=on|feat even if the later is parsed after +-feat
3326 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3328 static GList *plus_features, *minus_features;
3330 static gint compare_string(gconstpointer a, gconstpointer b)
3332 return g_strcmp0(a, b);
3335 /* Parse "+feature,-feature,feature=foo" CPU feature string
3337 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3338 Error **errp)
3340 char *featurestr; /* Single 'key=value" string being parsed */
3341 static bool cpu_globals_initialized;
3342 bool ambiguous = false;
3344 if (cpu_globals_initialized) {
3345 return;
3347 cpu_globals_initialized = true;
3349 if (!features) {
3350 return;
3353 for (featurestr = strtok(features, ",");
3354 featurestr;
3355 featurestr = strtok(NULL, ",")) {
3356 const char *name;
3357 const char *val = NULL;
3358 char *eq = NULL;
3359 char num[32];
3360 GlobalProperty *prop;
3362 /* Compatibility syntax: */
3363 if (featurestr[0] == '+') {
3364 plus_features = g_list_append(plus_features,
3365 g_strdup(featurestr + 1));
3366 continue;
3367 } else if (featurestr[0] == '-') {
3368 minus_features = g_list_append(minus_features,
3369 g_strdup(featurestr + 1));
3370 continue;
3373 eq = strchr(featurestr, '=');
3374 if (eq) {
3375 *eq++ = 0;
3376 val = eq;
3377 } else {
3378 val = "on";
3381 feat2prop(featurestr);
3382 name = featurestr;
3384 if (g_list_find_custom(plus_features, name, compare_string)) {
3385 warn_report("Ambiguous CPU model string. "
3386 "Don't mix both \"+%s\" and \"%s=%s\"",
3387 name, name, val);
3388 ambiguous = true;
3390 if (g_list_find_custom(minus_features, name, compare_string)) {
3391 warn_report("Ambiguous CPU model string. "
3392 "Don't mix both \"-%s\" and \"%s=%s\"",
3393 name, name, val);
3394 ambiguous = true;
3397 /* Special case: */
3398 if (!strcmp(name, "tsc-freq")) {
3399 int ret;
3400 uint64_t tsc_freq;
3402 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3403 if (ret < 0 || tsc_freq > INT64_MAX) {
3404 error_setg(errp, "bad numerical value %s", val);
3405 return;
3407 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3408 val = num;
3409 name = "tsc-frequency";
3412 prop = g_new0(typeof(*prop), 1);
3413 prop->driver = typename;
3414 prop->property = g_strdup(name);
3415 prop->value = g_strdup(val);
3416 prop->errp = &error_fatal;
3417 qdev_prop_register_global(prop);
3420 if (ambiguous) {
3421 warn_report("Compatibility of ambiguous CPU model "
3422 "strings won't be kept on future QEMU versions");
3426 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3427 static int x86_cpu_filter_features(X86CPU *cpu);
3429 /* Check for missing features that may prevent the CPU class from
3430 * running using the current machine and accelerator.
3432 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3433 strList **missing_feats)
3435 X86CPU *xc;
3436 FeatureWord w;
3437 Error *err = NULL;
3438 strList **next = missing_feats;
3440 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3441 strList *new = g_new0(strList, 1);
3442 new->value = g_strdup("kvm");
3443 *missing_feats = new;
3444 return;
3447 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3449 x86_cpu_expand_features(xc, &err);
3450 if (err) {
3451 /* Errors at x86_cpu_expand_features should never happen,
3452 * but in case it does, just report the model as not
3453 * runnable at all using the "type" property.
3455 strList *new = g_new0(strList, 1);
3456 new->value = g_strdup("type");
3457 *next = new;
3458 next = &new->next;
3461 x86_cpu_filter_features(xc);
3463 for (w = 0; w < FEATURE_WORDS; w++) {
3464 uint32_t filtered = xc->filtered_features[w];
3465 int i;
3466 for (i = 0; i < 32; i++) {
3467 if (filtered & (1UL << i)) {
3468 strList *new = g_new0(strList, 1);
3469 new->value = g_strdup(x86_cpu_feature_name(w, i));
3470 *next = new;
3471 next = &new->next;
3476 object_unref(OBJECT(xc));
3479 /* Print all cpuid feature names in featureset
3481 static void listflags(FILE *f, fprintf_function print, GList *features)
3483 size_t len = 0;
3484 GList *tmp;
3486 for (tmp = features; tmp; tmp = tmp->next) {
3487 const char *name = tmp->data;
3488 if ((len + strlen(name) + 1) >= 75) {
3489 print(f, "\n");
3490 len = 0;
3492 print(f, "%s%s", len == 0 ? " " : " ", name);
3493 len += strlen(name) + 1;
3495 print(f, "\n");
3498 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3499 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3501 ObjectClass *class_a = (ObjectClass *)a;
3502 ObjectClass *class_b = (ObjectClass *)b;
3503 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3504 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3505 char *name_a, *name_b;
3506 int ret;
3508 if (cc_a->ordering != cc_b->ordering) {
3509 ret = cc_a->ordering - cc_b->ordering;
3510 } else {
3511 name_a = x86_cpu_class_get_model_name(cc_a);
3512 name_b = x86_cpu_class_get_model_name(cc_b);
3513 ret = strcmp(name_a, name_b);
3514 g_free(name_a);
3515 g_free(name_b);
3517 return ret;
3520 static GSList *get_sorted_cpu_model_list(void)
3522 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3523 list = g_slist_sort(list, x86_cpu_list_compare);
3524 return list;
3527 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3529 ObjectClass *oc = data;
3530 X86CPUClass *cc = X86_CPU_CLASS(oc);
3531 CPUListState *s = user_data;
3532 char *name = x86_cpu_class_get_model_name(cc);
3533 const char *desc = cc->model_description;
3534 if (!desc && cc->cpu_def) {
3535 desc = cc->cpu_def->model_id;
3538 (*s->cpu_fprintf)(s->file, "x86 %-20s %-48s\n",
3539 name, desc);
3540 g_free(name);
3543 /* list available CPU models and flags */
3544 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3546 int i, j;
3547 CPUListState s = {
3548 .file = f,
3549 .cpu_fprintf = cpu_fprintf,
3551 GSList *list;
3552 GList *names = NULL;
3554 (*cpu_fprintf)(f, "Available CPUs:\n");
3555 list = get_sorted_cpu_model_list();
3556 g_slist_foreach(list, x86_cpu_list_entry, &s);
3557 g_slist_free(list);
3559 names = NULL;
3560 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3561 FeatureWordInfo *fw = &feature_word_info[i];
3562 for (j = 0; j < 32; j++) {
3563 if (fw->feat_names[j]) {
3564 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3569 names = g_list_sort(names, (GCompareFunc)strcmp);
3571 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3572 listflags(f, cpu_fprintf, names);
3573 (*cpu_fprintf)(f, "\n");
3574 g_list_free(names);
3577 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3579 ObjectClass *oc = data;
3580 X86CPUClass *cc = X86_CPU_CLASS(oc);
3581 CpuDefinitionInfoList **cpu_list = user_data;
3582 CpuDefinitionInfoList *entry;
3583 CpuDefinitionInfo *info;
3585 info = g_malloc0(sizeof(*info));
3586 info->name = x86_cpu_class_get_model_name(cc);
3587 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3588 info->has_unavailable_features = true;
3589 info->q_typename = g_strdup(object_class_get_name(oc));
3590 info->migration_safe = cc->migration_safe;
3591 info->has_migration_safe = true;
3592 info->q_static = cc->static_model;
3594 entry = g_malloc0(sizeof(*entry));
3595 entry->value = info;
3596 entry->next = *cpu_list;
3597 *cpu_list = entry;
3600 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3602 CpuDefinitionInfoList *cpu_list = NULL;
3603 GSList *list = get_sorted_cpu_model_list();
3604 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3605 g_slist_free(list);
3606 return cpu_list;
3609 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3610 bool migratable_only)
3612 FeatureWordInfo *wi = &feature_word_info[w];
3613 uint32_t r;
3615 if (kvm_enabled()) {
3616 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3617 wi->cpuid_ecx,
3618 wi->cpuid_reg);
3619 } else if (hvf_enabled()) {
3620 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3621 wi->cpuid_ecx,
3622 wi->cpuid_reg);
3623 } else if (tcg_enabled()) {
3624 r = wi->tcg_features;
3625 } else {
3626 return ~0;
3628 if (migratable_only) {
3629 r &= x86_cpu_get_migratable_flags(w);
3631 return r;
3634 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3636 FeatureWord w;
3638 for (w = 0; w < FEATURE_WORDS; w++) {
3639 report_unavailable_features(w, cpu->filtered_features[w]);
3643 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3645 PropValue *pv;
3646 for (pv = props; pv->prop; pv++) {
3647 if (!pv->value) {
3648 continue;
3650 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3651 &error_abort);
3655 /* Load data from X86CPUDefinition into a X86CPU object
3657 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3659 CPUX86State *env = &cpu->env;
3660 const char *vendor;
3661 char host_vendor[CPUID_VENDOR_SZ + 1];
3662 FeatureWord w;
3664 /*NOTE: any property set by this function should be returned by
3665 * x86_cpu_static_props(), so static expansion of
3666 * query-cpu-model-expansion is always complete.
3669 /* CPU models only set _minimum_ values for level/xlevel: */
3670 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3671 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3673 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3674 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3675 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3676 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3677 for (w = 0; w < FEATURE_WORDS; w++) {
3678 env->features[w] = def->features[w];
3681 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3682 cpu->legacy_cache = !def->cache_info;
3684 /* Special cases not set in the X86CPUDefinition structs: */
3685 /* TODO: in-kernel irqchip for hvf */
3686 if (kvm_enabled()) {
3687 if (!kvm_irqchip_in_kernel()) {
3688 x86_cpu_change_kvm_default("x2apic", "off");
3691 x86_cpu_apply_props(cpu, kvm_default_props);
3692 } else if (tcg_enabled()) {
3693 x86_cpu_apply_props(cpu, tcg_default_props);
3696 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3698 /* sysenter isn't supported in compatibility mode on AMD,
3699 * syscall isn't supported in compatibility mode on Intel.
3700 * Normally we advertise the actual CPU vendor, but you can
3701 * override this using the 'vendor' property if you want to use
3702 * KVM's sysenter/syscall emulation in compatibility mode and
3703 * when doing cross vendor migration
3705 vendor = def->vendor;
3706 if (accel_uses_host_cpuid()) {
3707 uint32_t ebx = 0, ecx = 0, edx = 0;
3708 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3709 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3710 vendor = host_vendor;
3713 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3717 /* Return a QDict containing keys for all properties that can be included
3718 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3719 * must be included in the dictionary.
3721 static QDict *x86_cpu_static_props(void)
3723 FeatureWord w;
3724 int i;
3725 static const char *props[] = {
3726 "min-level",
3727 "min-xlevel",
3728 "family",
3729 "model",
3730 "stepping",
3731 "model-id",
3732 "vendor",
3733 "lmce",
3734 NULL,
3736 static QDict *d;
3738 if (d) {
3739 return d;
3742 d = qdict_new();
3743 for (i = 0; props[i]; i++) {
3744 qdict_put_null(d, props[i]);
3747 for (w = 0; w < FEATURE_WORDS; w++) {
3748 FeatureWordInfo *fi = &feature_word_info[w];
3749 int bit;
3750 for (bit = 0; bit < 32; bit++) {
3751 if (!fi->feat_names[bit]) {
3752 continue;
3754 qdict_put_null(d, fi->feat_names[bit]);
3758 return d;
3761 /* Add an entry to @props dict, with the value for property. */
3762 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3764 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3765 &error_abort);
3767 qdict_put_obj(props, prop, value);
3770 /* Convert CPU model data from X86CPU object to a property dictionary
3771 * that can recreate exactly the same CPU model.
3773 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3775 QDict *sprops = x86_cpu_static_props();
3776 const QDictEntry *e;
3778 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3779 const char *prop = qdict_entry_key(e);
3780 x86_cpu_expand_prop(cpu, props, prop);
3784 /* Convert CPU model data from X86CPU object to a property dictionary
3785 * that can recreate exactly the same CPU model, including every
3786 * writeable QOM property.
3788 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3790 ObjectPropertyIterator iter;
3791 ObjectProperty *prop;
3793 object_property_iter_init(&iter, OBJECT(cpu));
3794 while ((prop = object_property_iter_next(&iter))) {
3795 /* skip read-only or write-only properties */
3796 if (!prop->get || !prop->set) {
3797 continue;
3800 /* "hotplugged" is the only property that is configurable
3801 * on the command-line but will be set differently on CPUs
3802 * created using "-cpu ... -smp ..." and by CPUs created
3803 * on the fly by x86_cpu_from_model() for querying. Skip it.
3805 if (!strcmp(prop->name, "hotplugged")) {
3806 continue;
3808 x86_cpu_expand_prop(cpu, props, prop->name);
3812 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3814 const QDictEntry *prop;
3815 Error *err = NULL;
3817 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3818 object_property_set_qobject(obj, qdict_entry_value(prop),
3819 qdict_entry_key(prop), &err);
3820 if (err) {
3821 break;
3825 error_propagate(errp, err);
3828 /* Create X86CPU object according to model+props specification */
3829 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3831 X86CPU *xc = NULL;
3832 X86CPUClass *xcc;
3833 Error *err = NULL;
3835 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3836 if (xcc == NULL) {
3837 error_setg(&err, "CPU model '%s' not found", model);
3838 goto out;
3841 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3842 if (props) {
3843 object_apply_props(OBJECT(xc), props, &err);
3844 if (err) {
3845 goto out;
3849 x86_cpu_expand_features(xc, &err);
3850 if (err) {
3851 goto out;
3854 out:
3855 if (err) {
3856 error_propagate(errp, err);
3857 object_unref(OBJECT(xc));
3858 xc = NULL;
3860 return xc;
3863 CpuModelExpansionInfo *
3864 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3865 CpuModelInfo *model,
3866 Error **errp)
3868 X86CPU *xc = NULL;
3869 Error *err = NULL;
3870 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3871 QDict *props = NULL;
3872 const char *base_name;
3874 xc = x86_cpu_from_model(model->name,
3875 model->has_props ?
3876 qobject_to(QDict, model->props) :
3877 NULL, &err);
3878 if (err) {
3879 goto out;
3882 props = qdict_new();
3883 ret->model = g_new0(CpuModelInfo, 1);
3884 ret->model->props = QOBJECT(props);
3885 ret->model->has_props = true;
3887 switch (type) {
3888 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3889 /* Static expansion will be based on "base" only */
3890 base_name = "base";
3891 x86_cpu_to_dict(xc, props);
3892 break;
3893 case CPU_MODEL_EXPANSION_TYPE_FULL:
3894 /* As we don't return every single property, full expansion needs
3895 * to keep the original model name+props, and add extra
3896 * properties on top of that.
3898 base_name = model->name;
3899 x86_cpu_to_dict_full(xc, props);
3900 break;
3901 default:
3902 error_setg(&err, "Unsupportted expansion type");
3903 goto out;
3906 x86_cpu_to_dict(xc, props);
3908 ret->model->name = g_strdup(base_name);
3910 out:
3911 object_unref(OBJECT(xc));
3912 if (err) {
3913 error_propagate(errp, err);
3914 qapi_free_CpuModelExpansionInfo(ret);
3915 ret = NULL;
3917 return ret;
3920 static gchar *x86_gdb_arch_name(CPUState *cs)
3922 #ifdef TARGET_X86_64
3923 return g_strdup("i386:x86-64");
3924 #else
3925 return g_strdup("i386");
3926 #endif
3929 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3931 X86CPUDefinition *cpudef = data;
3932 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3934 xcc->cpu_def = cpudef;
3935 xcc->migration_safe = true;
3938 static void x86_register_cpudef_type(X86CPUDefinition *def)
3940 char *typename = x86_cpu_type_name(def->name);
3941 TypeInfo ti = {
3942 .name = typename,
3943 .parent = TYPE_X86_CPU,
3944 .class_init = x86_cpu_cpudef_class_init,
3945 .class_data = def,
3948 /* AMD aliases are handled at runtime based on CPUID vendor, so
3949 * they shouldn't be set on the CPU model table.
3951 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3952 /* catch mistakes instead of silently truncating model_id when too long */
3953 assert(def->model_id && strlen(def->model_id) <= 48);
3956 type_register(&ti);
3957 g_free(typename);
3960 #if !defined(CONFIG_USER_ONLY)
3962 void cpu_clear_apic_feature(CPUX86State *env)
3964 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3967 #endif /* !CONFIG_USER_ONLY */
3969 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3970 uint32_t *eax, uint32_t *ebx,
3971 uint32_t *ecx, uint32_t *edx)
3973 X86CPU *cpu = x86_env_get_cpu(env);
3974 CPUState *cs = CPU(cpu);
3975 uint32_t pkg_offset;
3976 uint32_t limit;
3977 uint32_t signature[3];
3979 /* Calculate & apply limits for different index ranges */
3980 if (index >= 0xC0000000) {
3981 limit = env->cpuid_xlevel2;
3982 } else if (index >= 0x80000000) {
3983 limit = env->cpuid_xlevel;
3984 } else if (index >= 0x40000000) {
3985 limit = 0x40000001;
3986 } else {
3987 limit = env->cpuid_level;
3990 if (index > limit) {
3991 /* Intel documentation states that invalid EAX input will
3992 * return the same information as EAX=cpuid_level
3993 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3995 index = env->cpuid_level;
3998 switch(index) {
3999 case 0:
4000 *eax = env->cpuid_level;
4001 *ebx = env->cpuid_vendor1;
4002 *edx = env->cpuid_vendor2;
4003 *ecx = env->cpuid_vendor3;
4004 break;
4005 case 1:
4006 *eax = env->cpuid_version;
4007 *ebx = (cpu->apic_id << 24) |
4008 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4009 *ecx = env->features[FEAT_1_ECX];
4010 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4011 *ecx |= CPUID_EXT_OSXSAVE;
4013 *edx = env->features[FEAT_1_EDX];
4014 if (cs->nr_cores * cs->nr_threads > 1) {
4015 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4016 *edx |= CPUID_HT;
4018 break;
4019 case 2:
4020 /* cache info: needed for Pentium Pro compatibility */
4021 if (cpu->cache_info_passthrough) {
4022 host_cpuid(index, 0, eax, ebx, ecx, edx);
4023 break;
4025 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4026 *ebx = 0;
4027 if (!cpu->enable_l3_cache) {
4028 *ecx = 0;
4029 } else {
4030 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4032 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4033 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4034 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4035 break;
4036 case 4:
4037 /* cache info: needed for Core compatibility */
4038 if (cpu->cache_info_passthrough) {
4039 host_cpuid(index, count, eax, ebx, ecx, edx);
4040 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4041 *eax &= ~0xFC000000;
4042 if ((*eax & 31) && cs->nr_cores > 1) {
4043 *eax |= (cs->nr_cores - 1) << 26;
4045 } else {
4046 *eax = 0;
4047 switch (count) {
4048 case 0: /* L1 dcache info */
4049 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4050 1, cs->nr_cores,
4051 eax, ebx, ecx, edx);
4052 break;
4053 case 1: /* L1 icache info */
4054 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4055 1, cs->nr_cores,
4056 eax, ebx, ecx, edx);
4057 break;
4058 case 2: /* L2 cache info */
4059 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4060 cs->nr_threads, cs->nr_cores,
4061 eax, ebx, ecx, edx);
4062 break;
4063 case 3: /* L3 cache info */
4064 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4065 if (cpu->enable_l3_cache) {
4066 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4067 (1 << pkg_offset), cs->nr_cores,
4068 eax, ebx, ecx, edx);
4069 break;
4071 /* fall through */
4072 default: /* end of info */
4073 *eax = *ebx = *ecx = *edx = 0;
4074 break;
4077 break;
4078 case 5:
4079 /* MONITOR/MWAIT Leaf */
4080 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4081 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4082 *ecx = cpu->mwait.ecx; /* flags */
4083 *edx = cpu->mwait.edx; /* mwait substates */
4084 break;
4085 case 6:
4086 /* Thermal and Power Leaf */
4087 *eax = env->features[FEAT_6_EAX];
4088 *ebx = 0;
4089 *ecx = 0;
4090 *edx = 0;
4091 break;
4092 case 7:
4093 /* Structured Extended Feature Flags Enumeration Leaf */
4094 if (count == 0) {
4095 *eax = 0; /* Maximum ECX value for sub-leaves */
4096 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4097 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4098 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4099 *ecx |= CPUID_7_0_ECX_OSPKE;
4101 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4102 } else {
4103 *eax = 0;
4104 *ebx = 0;
4105 *ecx = 0;
4106 *edx = 0;
4108 break;
4109 case 9:
4110 /* Direct Cache Access Information Leaf */
4111 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4112 *ebx = 0;
4113 *ecx = 0;
4114 *edx = 0;
4115 break;
4116 case 0xA:
4117 /* Architectural Performance Monitoring Leaf */
4118 if (kvm_enabled() && cpu->enable_pmu) {
4119 KVMState *s = cs->kvm_state;
4121 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4122 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4123 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4124 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4125 } else if (hvf_enabled() && cpu->enable_pmu) {
4126 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4127 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4128 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4129 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4130 } else {
4131 *eax = 0;
4132 *ebx = 0;
4133 *ecx = 0;
4134 *edx = 0;
4136 break;
4137 case 0xB:
4138 /* Extended Topology Enumeration Leaf */
4139 if (!cpu->enable_cpuid_0xb) {
4140 *eax = *ebx = *ecx = *edx = 0;
4141 break;
4144 *ecx = count & 0xff;
4145 *edx = cpu->apic_id;
4147 switch (count) {
4148 case 0:
4149 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
4150 *ebx = cs->nr_threads;
4151 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4152 break;
4153 case 1:
4154 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4155 *ebx = cs->nr_cores * cs->nr_threads;
4156 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4157 break;
4158 default:
4159 *eax = 0;
4160 *ebx = 0;
4161 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4164 assert(!(*eax & ~0x1f));
4165 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4166 break;
4167 case 0xD: {
4168 /* Processor Extended State */
4169 *eax = 0;
4170 *ebx = 0;
4171 *ecx = 0;
4172 *edx = 0;
4173 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4174 break;
4177 if (count == 0) {
4178 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4179 *eax = env->features[FEAT_XSAVE_COMP_LO];
4180 *edx = env->features[FEAT_XSAVE_COMP_HI];
4181 *ebx = *ecx;
4182 } else if (count == 1) {
4183 *eax = env->features[FEAT_XSAVE];
4184 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4185 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4186 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4187 *eax = esa->size;
4188 *ebx = esa->offset;
4191 break;
4193 case 0x14: {
4194 /* Intel Processor Trace Enumeration */
4195 *eax = 0;
4196 *ebx = 0;
4197 *ecx = 0;
4198 *edx = 0;
4199 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4200 !kvm_enabled()) {
4201 break;
4204 if (count == 0) {
4205 *eax = INTEL_PT_MAX_SUBLEAF;
4206 *ebx = INTEL_PT_MINIMAL_EBX;
4207 *ecx = INTEL_PT_MINIMAL_ECX;
4208 } else if (count == 1) {
4209 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4210 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4212 break;
4214 case 0x40000000:
4216 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4217 * set here, but we restrict to TCG none the less.
4219 if (tcg_enabled() && cpu->expose_tcg) {
4220 memcpy(signature, "TCGTCGTCGTCG", 12);
4221 *eax = 0x40000001;
4222 *ebx = signature[0];
4223 *ecx = signature[1];
4224 *edx = signature[2];
4225 } else {
4226 *eax = 0;
4227 *ebx = 0;
4228 *ecx = 0;
4229 *edx = 0;
4231 break;
4232 case 0x40000001:
4233 *eax = 0;
4234 *ebx = 0;
4235 *ecx = 0;
4236 *edx = 0;
4237 break;
4238 case 0x80000000:
4239 *eax = env->cpuid_xlevel;
4240 *ebx = env->cpuid_vendor1;
4241 *edx = env->cpuid_vendor2;
4242 *ecx = env->cpuid_vendor3;
4243 break;
4244 case 0x80000001:
4245 *eax = env->cpuid_version;
4246 *ebx = 0;
4247 *ecx = env->features[FEAT_8000_0001_ECX];
4248 *edx = env->features[FEAT_8000_0001_EDX];
4250 /* The Linux kernel checks for the CMPLegacy bit and
4251 * discards multiple thread information if it is set.
4252 * So don't set it here for Intel to make Linux guests happy.
4254 if (cs->nr_cores * cs->nr_threads > 1) {
4255 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4256 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4257 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4258 *ecx |= 1 << 1; /* CmpLegacy bit */
4261 break;
4262 case 0x80000002:
4263 case 0x80000003:
4264 case 0x80000004:
4265 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4266 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4267 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4268 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4269 break;
4270 case 0x80000005:
4271 /* cache info (L1 cache) */
4272 if (cpu->cache_info_passthrough) {
4273 host_cpuid(index, 0, eax, ebx, ecx, edx);
4274 break;
4276 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4277 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4278 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4279 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4280 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4281 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4282 break;
4283 case 0x80000006:
4284 /* cache info (L2 cache) */
4285 if (cpu->cache_info_passthrough) {
4286 host_cpuid(index, 0, eax, ebx, ecx, edx);
4287 break;
4289 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4290 (L2_DTLB_2M_ENTRIES << 16) | \
4291 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4292 (L2_ITLB_2M_ENTRIES);
4293 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4294 (L2_DTLB_4K_ENTRIES << 16) | \
4295 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4296 (L2_ITLB_4K_ENTRIES);
4297 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4298 cpu->enable_l3_cache ?
4299 env->cache_info_amd.l3_cache : NULL,
4300 ecx, edx);
4301 break;
4302 case 0x80000007:
4303 *eax = 0;
4304 *ebx = 0;
4305 *ecx = 0;
4306 *edx = env->features[FEAT_8000_0007_EDX];
4307 break;
4308 case 0x80000008:
4309 /* virtual & phys address size in low 2 bytes. */
4310 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4311 /* 64 bit processor */
4312 *eax = cpu->phys_bits; /* configurable physical bits */
4313 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4314 *eax |= 0x00003900; /* 57 bits virtual */
4315 } else {
4316 *eax |= 0x00003000; /* 48 bits virtual */
4318 } else {
4319 *eax = cpu->phys_bits;
4321 *ebx = env->features[FEAT_8000_0008_EBX];
4322 *ecx = 0;
4323 *edx = 0;
4324 if (cs->nr_cores * cs->nr_threads > 1) {
4325 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4327 break;
4328 case 0x8000000A:
4329 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4330 *eax = 0x00000001; /* SVM Revision */
4331 *ebx = 0x00000010; /* nr of ASIDs */
4332 *ecx = 0;
4333 *edx = env->features[FEAT_SVM]; /* optional features */
4334 } else {
4335 *eax = 0;
4336 *ebx = 0;
4337 *ecx = 0;
4338 *edx = 0;
4340 break;
4341 case 0x8000001D:
4342 *eax = 0;
4343 switch (count) {
4344 case 0: /* L1 dcache info */
4345 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4346 eax, ebx, ecx, edx);
4347 break;
4348 case 1: /* L1 icache info */
4349 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4350 eax, ebx, ecx, edx);
4351 break;
4352 case 2: /* L2 cache info */
4353 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4354 eax, ebx, ecx, edx);
4355 break;
4356 case 3: /* L3 cache info */
4357 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4358 eax, ebx, ecx, edx);
4359 break;
4360 default: /* end of info */
4361 *eax = *ebx = *ecx = *edx = 0;
4362 break;
4364 break;
4365 case 0x8000001E:
4366 assert(cpu->core_id <= 255);
4367 encode_topo_cpuid8000001e(cs, cpu,
4368 eax, ebx, ecx, edx);
4369 break;
4370 case 0xC0000000:
4371 *eax = env->cpuid_xlevel2;
4372 *ebx = 0;
4373 *ecx = 0;
4374 *edx = 0;
4375 break;
4376 case 0xC0000001:
4377 /* Support for VIA CPU's CPUID instruction */
4378 *eax = env->cpuid_version;
4379 *ebx = 0;
4380 *ecx = 0;
4381 *edx = env->features[FEAT_C000_0001_EDX];
4382 break;
4383 case 0xC0000002:
4384 case 0xC0000003:
4385 case 0xC0000004:
4386 /* Reserved for the future, and now filled with zero */
4387 *eax = 0;
4388 *ebx = 0;
4389 *ecx = 0;
4390 *edx = 0;
4391 break;
4392 case 0x8000001F:
4393 *eax = sev_enabled() ? 0x2 : 0;
4394 *ebx = sev_get_cbit_position();
4395 *ebx |= sev_get_reduced_phys_bits() << 6;
4396 *ecx = 0;
4397 *edx = 0;
4398 break;
4399 default:
4400 /* reserved values: zero */
4401 *eax = 0;
4402 *ebx = 0;
4403 *ecx = 0;
4404 *edx = 0;
4405 break;
4409 /* CPUClass::reset() */
4410 static void x86_cpu_reset(CPUState *s)
4412 X86CPU *cpu = X86_CPU(s);
4413 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4414 CPUX86State *env = &cpu->env;
4415 target_ulong cr4;
4416 uint64_t xcr0;
4417 int i;
4419 xcc->parent_reset(s);
4421 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4423 env->old_exception = -1;
4425 /* init to reset state */
4427 env->hflags2 |= HF2_GIF_MASK;
4429 cpu_x86_update_cr0(env, 0x60000010);
4430 env->a20_mask = ~0x0;
4431 env->smbase = 0x30000;
4432 env->msr_smi_count = 0;
4434 env->idt.limit = 0xffff;
4435 env->gdt.limit = 0xffff;
4436 env->ldt.limit = 0xffff;
4437 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4438 env->tr.limit = 0xffff;
4439 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4441 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4442 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4443 DESC_R_MASK | DESC_A_MASK);
4444 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4445 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4446 DESC_A_MASK);
4447 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4448 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4449 DESC_A_MASK);
4450 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4451 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4452 DESC_A_MASK);
4453 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4454 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4455 DESC_A_MASK);
4456 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4457 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4458 DESC_A_MASK);
4460 env->eip = 0xfff0;
4461 env->regs[R_EDX] = env->cpuid_version;
4463 env->eflags = 0x2;
4465 /* FPU init */
4466 for (i = 0; i < 8; i++) {
4467 env->fptags[i] = 1;
4469 cpu_set_fpuc(env, 0x37f);
4471 env->mxcsr = 0x1f80;
4472 /* All units are in INIT state. */
4473 env->xstate_bv = 0;
4475 env->pat = 0x0007040600070406ULL;
4476 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4478 memset(env->dr, 0, sizeof(env->dr));
4479 env->dr[6] = DR6_FIXED_1;
4480 env->dr[7] = DR7_FIXED_1;
4481 cpu_breakpoint_remove_all(s, BP_CPU);
4482 cpu_watchpoint_remove_all(s, BP_CPU);
4484 cr4 = 0;
4485 xcr0 = XSTATE_FP_MASK;
4487 #ifdef CONFIG_USER_ONLY
4488 /* Enable all the features for user-mode. */
4489 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4490 xcr0 |= XSTATE_SSE_MASK;
4492 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4493 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4494 if (env->features[esa->feature] & esa->bits) {
4495 xcr0 |= 1ull << i;
4499 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4500 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4502 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4503 cr4 |= CR4_FSGSBASE_MASK;
4505 #endif
4507 env->xcr0 = xcr0;
4508 cpu_x86_update_cr4(env, cr4);
4511 * SDM 11.11.5 requires:
4512 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4513 * - IA32_MTRR_PHYSMASKn.V = 0
4514 * All other bits are undefined. For simplification, zero it all.
4516 env->mtrr_deftype = 0;
4517 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4518 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4520 env->interrupt_injected = -1;
4521 env->exception_injected = -1;
4522 env->nmi_injected = false;
4523 #if !defined(CONFIG_USER_ONLY)
4524 /* We hard-wire the BSP to the first CPU. */
4525 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4527 s->halted = !cpu_is_bsp(cpu);
4529 if (kvm_enabled()) {
4530 kvm_arch_reset_vcpu(cpu);
4532 else if (hvf_enabled()) {
4533 hvf_reset_vcpu(s);
4535 #endif
4538 #ifndef CONFIG_USER_ONLY
4539 bool cpu_is_bsp(X86CPU *cpu)
4541 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4544 /* TODO: remove me, when reset over QOM tree is implemented */
4545 static void x86_cpu_machine_reset_cb(void *opaque)
4547 X86CPU *cpu = opaque;
4548 cpu_reset(CPU(cpu));
4550 #endif
4552 static void mce_init(X86CPU *cpu)
4554 CPUX86State *cenv = &cpu->env;
4555 unsigned int bank;
4557 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4558 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4559 (CPUID_MCE | CPUID_MCA)) {
4560 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4561 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4562 cenv->mcg_ctl = ~(uint64_t)0;
4563 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4564 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4569 #ifndef CONFIG_USER_ONLY
4570 APICCommonClass *apic_get_class(void)
4572 const char *apic_type = "apic";
4574 /* TODO: in-kernel irqchip for hvf */
4575 if (kvm_apic_in_kernel()) {
4576 apic_type = "kvm-apic";
4577 } else if (xen_enabled()) {
4578 apic_type = "xen-apic";
4581 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4584 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4586 APICCommonState *apic;
4587 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4589 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4591 object_property_add_child(OBJECT(cpu), "lapic",
4592 OBJECT(cpu->apic_state), &error_abort);
4593 object_unref(OBJECT(cpu->apic_state));
4595 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4596 /* TODO: convert to link<> */
4597 apic = APIC_COMMON(cpu->apic_state);
4598 apic->cpu = cpu;
4599 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4602 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4604 APICCommonState *apic;
4605 static bool apic_mmio_map_once;
4607 if (cpu->apic_state == NULL) {
4608 return;
4610 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4611 errp);
4613 /* Map APIC MMIO area */
4614 apic = APIC_COMMON(cpu->apic_state);
4615 if (!apic_mmio_map_once) {
4616 memory_region_add_subregion_overlap(get_system_memory(),
4617 apic->apicbase &
4618 MSR_IA32_APICBASE_BASE,
4619 &apic->io_memory,
4620 0x1000);
4621 apic_mmio_map_once = true;
4625 static void x86_cpu_machine_done(Notifier *n, void *unused)
4627 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4628 MemoryRegion *smram =
4629 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4631 if (smram) {
4632 cpu->smram = g_new(MemoryRegion, 1);
4633 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4634 smram, 0, 1ull << 32);
4635 memory_region_set_enabled(cpu->smram, true);
4636 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4639 #else
4640 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4643 #endif
4645 /* Note: Only safe for use on x86(-64) hosts */
4646 static uint32_t x86_host_phys_bits(void)
4648 uint32_t eax;
4649 uint32_t host_phys_bits;
4651 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4652 if (eax >= 0x80000008) {
4653 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4654 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4655 * at 23:16 that can specify a maximum physical address bits for
4656 * the guest that can override this value; but I've not seen
4657 * anything with that set.
4659 host_phys_bits = eax & 0xff;
4660 } else {
4661 /* It's an odd 64 bit machine that doesn't have the leaf for
4662 * physical address bits; fall back to 36 that's most older
4663 * Intel.
4665 host_phys_bits = 36;
4668 return host_phys_bits;
4671 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4673 if (*min < value) {
4674 *min = value;
4678 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4679 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4681 CPUX86State *env = &cpu->env;
4682 FeatureWordInfo *fi = &feature_word_info[w];
4683 uint32_t eax = fi->cpuid_eax;
4684 uint32_t region = eax & 0xF0000000;
4686 if (!env->features[w]) {
4687 return;
4690 switch (region) {
4691 case 0x00000000:
4692 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4693 break;
4694 case 0x80000000:
4695 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4696 break;
4697 case 0xC0000000:
4698 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4699 break;
4703 /* Calculate XSAVE components based on the configured CPU feature flags */
4704 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4706 CPUX86State *env = &cpu->env;
4707 int i;
4708 uint64_t mask;
4710 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4711 return;
4714 mask = 0;
4715 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4716 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4717 if (env->features[esa->feature] & esa->bits) {
4718 mask |= (1ULL << i);
4722 env->features[FEAT_XSAVE_COMP_LO] = mask;
4723 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4726 /***** Steps involved on loading and filtering CPUID data
4728 * When initializing and realizing a CPU object, the steps
4729 * involved in setting up CPUID data are:
4731 * 1) Loading CPU model definition (X86CPUDefinition). This is
4732 * implemented by x86_cpu_load_def() and should be completely
4733 * transparent, as it is done automatically by instance_init.
4734 * No code should need to look at X86CPUDefinition structs
4735 * outside instance_init.
4737 * 2) CPU expansion. This is done by realize before CPUID
4738 * filtering, and will make sure host/accelerator data is
4739 * loaded for CPU models that depend on host capabilities
4740 * (e.g. "host"). Done by x86_cpu_expand_features().
4742 * 3) CPUID filtering. This initializes extra data related to
4743 * CPUID, and checks if the host supports all capabilities
4744 * required by the CPU. Runnability of a CPU model is
4745 * determined at this step. Done by x86_cpu_filter_features().
4747 * Some operations don't require all steps to be performed.
4748 * More precisely:
4750 * - CPU instance creation (instance_init) will run only CPU
4751 * model loading. CPU expansion can't run at instance_init-time
4752 * because host/accelerator data may be not available yet.
4753 * - CPU realization will perform both CPU model expansion and CPUID
4754 * filtering, and return an error in case one of them fails.
4755 * - query-cpu-definitions needs to run all 3 steps. It needs
4756 * to run CPUID filtering, as the 'unavailable-features'
4757 * field is set based on the filtering results.
4758 * - The query-cpu-model-expansion QMP command only needs to run
4759 * CPU model loading and CPU expansion. It should not filter
4760 * any CPUID data based on host capabilities.
4763 /* Expand CPU configuration data, based on configured features
4764 * and host/accelerator capabilities when appropriate.
4766 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4768 CPUX86State *env = &cpu->env;
4769 FeatureWord w;
4770 GList *l;
4771 Error *local_err = NULL;
4773 /*TODO: Now cpu->max_features doesn't overwrite features
4774 * set using QOM properties, and we can convert
4775 * plus_features & minus_features to global properties
4776 * inside x86_cpu_parse_featurestr() too.
4778 if (cpu->max_features) {
4779 for (w = 0; w < FEATURE_WORDS; w++) {
4780 /* Override only features that weren't set explicitly
4781 * by the user.
4783 env->features[w] |=
4784 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4785 ~env->user_features[w] & \
4786 ~feature_word_info[w].no_autoenable_flags;
4790 for (l = plus_features; l; l = l->next) {
4791 const char *prop = l->data;
4792 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4793 if (local_err) {
4794 goto out;
4798 for (l = minus_features; l; l = l->next) {
4799 const char *prop = l->data;
4800 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4801 if (local_err) {
4802 goto out;
4806 if (!kvm_enabled() || !cpu->expose_kvm) {
4807 env->features[FEAT_KVM] = 0;
4810 x86_cpu_enable_xsave_components(cpu);
4812 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4813 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4814 if (cpu->full_cpuid_auto_level) {
4815 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4816 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4817 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4818 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4819 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4820 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4821 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4822 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4823 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4824 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4825 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4826 /* SVM requires CPUID[0x8000000A] */
4827 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4828 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4831 /* SEV requires CPUID[0x8000001F] */
4832 if (sev_enabled()) {
4833 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4837 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4838 if (env->cpuid_level == UINT32_MAX) {
4839 env->cpuid_level = env->cpuid_min_level;
4841 if (env->cpuid_xlevel == UINT32_MAX) {
4842 env->cpuid_xlevel = env->cpuid_min_xlevel;
4844 if (env->cpuid_xlevel2 == UINT32_MAX) {
4845 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4848 out:
4849 if (local_err != NULL) {
4850 error_propagate(errp, local_err);
4855 * Finishes initialization of CPUID data, filters CPU feature
4856 * words based on host availability of each feature.
4858 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4860 static int x86_cpu_filter_features(X86CPU *cpu)
4862 CPUX86State *env = &cpu->env;
4863 FeatureWord w;
4864 int rv = 0;
4866 for (w = 0; w < FEATURE_WORDS; w++) {
4867 uint32_t host_feat =
4868 x86_cpu_get_supported_feature_word(w, false);
4869 uint32_t requested_features = env->features[w];
4870 env->features[w] &= host_feat;
4871 cpu->filtered_features[w] = requested_features & ~env->features[w];
4872 if (cpu->filtered_features[w]) {
4873 rv = 1;
4877 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4878 kvm_enabled()) {
4879 KVMState *s = CPU(cpu)->kvm_state;
4880 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4881 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4882 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4883 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4884 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4886 if (!eax_0 ||
4887 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4888 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4889 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4890 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4891 INTEL_PT_ADDR_RANGES_NUM) ||
4892 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4893 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4894 (ecx_0 & INTEL_PT_IP_LIP)) {
4896 * Processor Trace capabilities aren't configurable, so if the
4897 * host can't emulate the capabilities we report on
4898 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4900 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4901 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4902 rv = 1;
4906 return rv;
4909 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4910 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4911 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4912 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4913 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4914 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4915 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4917 CPUState *cs = CPU(dev);
4918 X86CPU *cpu = X86_CPU(dev);
4919 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4920 CPUX86State *env = &cpu->env;
4921 Error *local_err = NULL;
4922 static bool ht_warned;
4924 if (xcc->host_cpuid_required) {
4925 if (!accel_uses_host_cpuid()) {
4926 char *name = x86_cpu_class_get_model_name(xcc);
4927 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4928 g_free(name);
4929 goto out;
4932 if (enable_cpu_pm) {
4933 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
4934 &cpu->mwait.ecx, &cpu->mwait.edx);
4935 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
4939 /* mwait extended info: needed for Core compatibility */
4940 /* We always wake on interrupt even if host does not have the capability */
4941 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
4943 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4944 error_setg(errp, "apic-id property was not initialized properly");
4945 return;
4948 x86_cpu_expand_features(cpu, &local_err);
4949 if (local_err) {
4950 goto out;
4953 if (x86_cpu_filter_features(cpu) &&
4954 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4955 x86_cpu_report_filtered_features(cpu);
4956 if (cpu->enforce_cpuid) {
4957 error_setg(&local_err,
4958 accel_uses_host_cpuid() ?
4959 "Host doesn't support requested features" :
4960 "TCG doesn't support requested features");
4961 goto out;
4965 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4966 * CPUID[1].EDX.
4968 if (IS_AMD_CPU(env)) {
4969 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4970 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4971 & CPUID_EXT2_AMD_ALIASES);
4974 /* For 64bit systems think about the number of physical bits to present.
4975 * ideally this should be the same as the host; anything other than matching
4976 * the host can cause incorrect guest behaviour.
4977 * QEMU used to pick the magic value of 40 bits that corresponds to
4978 * consumer AMD devices but nothing else.
4980 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4981 if (accel_uses_host_cpuid()) {
4982 uint32_t host_phys_bits = x86_host_phys_bits();
4983 static bool warned;
4985 if (cpu->host_phys_bits) {
4986 /* The user asked for us to use the host physical bits */
4987 cpu->phys_bits = host_phys_bits;
4990 /* Print a warning if the user set it to a value that's not the
4991 * host value.
4993 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4994 !warned) {
4995 warn_report("Host physical bits (%u)"
4996 " does not match phys-bits property (%u)",
4997 host_phys_bits, cpu->phys_bits);
4998 warned = true;
5001 if (cpu->phys_bits &&
5002 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5003 cpu->phys_bits < 32)) {
5004 error_setg(errp, "phys-bits should be between 32 and %u "
5005 " (but is %u)",
5006 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5007 return;
5009 } else {
5010 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5011 error_setg(errp, "TCG only supports phys-bits=%u",
5012 TCG_PHYS_ADDR_BITS);
5013 return;
5016 /* 0 means it was not explicitly set by the user (or by machine
5017 * compat_props or by the host code above). In this case, the default
5018 * is the value used by TCG (40).
5020 if (cpu->phys_bits == 0) {
5021 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5023 } else {
5024 /* For 32 bit systems don't use the user set value, but keep
5025 * phys_bits consistent with what we tell the guest.
5027 if (cpu->phys_bits != 0) {
5028 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5029 return;
5032 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5033 cpu->phys_bits = 36;
5034 } else {
5035 cpu->phys_bits = 32;
5039 /* Cache information initialization */
5040 if (!cpu->legacy_cache) {
5041 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
5042 char *name = x86_cpu_class_get_model_name(xcc);
5043 error_setg(errp,
5044 "CPU model '%s' doesn't support legacy-cache=off", name);
5045 g_free(name);
5046 return;
5048 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5049 *xcc->cpu_def->cache_info;
5050 } else {
5051 /* Build legacy cache information */
5052 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5053 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5054 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5055 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5057 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5058 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5059 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5060 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5062 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5063 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5064 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5065 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5069 cpu_exec_realizefn(cs, &local_err);
5070 if (local_err != NULL) {
5071 error_propagate(errp, local_err);
5072 return;
5075 #ifndef CONFIG_USER_ONLY
5076 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5078 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
5079 x86_cpu_apic_create(cpu, &local_err);
5080 if (local_err != NULL) {
5081 goto out;
5084 #endif
5086 mce_init(cpu);
5088 #ifndef CONFIG_USER_ONLY
5089 if (tcg_enabled()) {
5090 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5091 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5093 /* Outer container... */
5094 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5095 memory_region_set_enabled(cpu->cpu_as_root, true);
5097 /* ... with two regions inside: normal system memory with low
5098 * priority, and...
5100 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5101 get_system_memory(), 0, ~0ull);
5102 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5103 memory_region_set_enabled(cpu->cpu_as_mem, true);
5105 cs->num_ases = 2;
5106 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5107 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5109 /* ... SMRAM with higher priority, linked from /machine/smram. */
5110 cpu->machine_done.notify = x86_cpu_machine_done;
5111 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5113 #endif
5115 qemu_init_vcpu(cs);
5118 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5119 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5120 * based on inputs (sockets,cores,threads), it is still better to give
5121 * users a warning.
5123 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5124 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5126 if (IS_AMD_CPU(env) &&
5127 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5128 cs->nr_threads > 1 && !ht_warned) {
5129 warn_report("This family of AMD CPU doesn't support "
5130 "hyperthreading(%d)",
5131 cs->nr_threads);
5132 error_printf("Please configure -smp options properly"
5133 " or try enabling topoext feature.\n");
5134 ht_warned = true;
5137 x86_cpu_apic_realize(cpu, &local_err);
5138 if (local_err != NULL) {
5139 goto out;
5141 cpu_reset(cs);
5143 xcc->parent_realize(dev, &local_err);
5145 out:
5146 if (local_err != NULL) {
5147 error_propagate(errp, local_err);
5148 return;
5152 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5154 X86CPU *cpu = X86_CPU(dev);
5155 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5156 Error *local_err = NULL;
5158 #ifndef CONFIG_USER_ONLY
5159 cpu_remove_sync(CPU(dev));
5160 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5161 #endif
5163 if (cpu->apic_state) {
5164 object_unparent(OBJECT(cpu->apic_state));
5165 cpu->apic_state = NULL;
5168 xcc->parent_unrealize(dev, &local_err);
5169 if (local_err != NULL) {
5170 error_propagate(errp, local_err);
5171 return;
5175 typedef struct BitProperty {
5176 FeatureWord w;
5177 uint32_t mask;
5178 } BitProperty;
5180 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5181 void *opaque, Error **errp)
5183 X86CPU *cpu = X86_CPU(obj);
5184 BitProperty *fp = opaque;
5185 uint32_t f = cpu->env.features[fp->w];
5186 bool value = (f & fp->mask) == fp->mask;
5187 visit_type_bool(v, name, &value, errp);
5190 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5191 void *opaque, Error **errp)
5193 DeviceState *dev = DEVICE(obj);
5194 X86CPU *cpu = X86_CPU(obj);
5195 BitProperty *fp = opaque;
5196 Error *local_err = NULL;
5197 bool value;
5199 if (dev->realized) {
5200 qdev_prop_set_after_realize(dev, name, errp);
5201 return;
5204 visit_type_bool(v, name, &value, &local_err);
5205 if (local_err) {
5206 error_propagate(errp, local_err);
5207 return;
5210 if (value) {
5211 cpu->env.features[fp->w] |= fp->mask;
5212 } else {
5213 cpu->env.features[fp->w] &= ~fp->mask;
5215 cpu->env.user_features[fp->w] |= fp->mask;
5218 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5219 void *opaque)
5221 BitProperty *prop = opaque;
5222 g_free(prop);
5225 /* Register a boolean property to get/set a single bit in a uint32_t field.
5227 * The same property name can be registered multiple times to make it affect
5228 * multiple bits in the same FeatureWord. In that case, the getter will return
5229 * true only if all bits are set.
5231 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5232 const char *prop_name,
5233 FeatureWord w,
5234 int bitnr)
5236 BitProperty *fp;
5237 ObjectProperty *op;
5238 uint32_t mask = (1UL << bitnr);
5240 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5241 if (op) {
5242 fp = op->opaque;
5243 assert(fp->w == w);
5244 fp->mask |= mask;
5245 } else {
5246 fp = g_new0(BitProperty, 1);
5247 fp->w = w;
5248 fp->mask = mask;
5249 object_property_add(OBJECT(cpu), prop_name, "bool",
5250 x86_cpu_get_bit_prop,
5251 x86_cpu_set_bit_prop,
5252 x86_cpu_release_bit_prop, fp, &error_abort);
5256 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5257 FeatureWord w,
5258 int bitnr)
5260 FeatureWordInfo *fi = &feature_word_info[w];
5261 const char *name = fi->feat_names[bitnr];
5263 if (!name) {
5264 return;
5267 /* Property names should use "-" instead of "_".
5268 * Old names containing underscores are registered as aliases
5269 * using object_property_add_alias()
5271 assert(!strchr(name, '_'));
5272 /* aliases don't use "|" delimiters anymore, they are registered
5273 * manually using object_property_add_alias() */
5274 assert(!strchr(name, '|'));
5275 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5278 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5280 X86CPU *cpu = X86_CPU(cs);
5281 CPUX86State *env = &cpu->env;
5282 GuestPanicInformation *panic_info = NULL;
5284 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5285 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5287 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5289 assert(HV_CRASH_PARAMS >= 5);
5290 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5291 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5292 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5293 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5294 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5297 return panic_info;
5299 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5300 const char *name, void *opaque,
5301 Error **errp)
5303 CPUState *cs = CPU(obj);
5304 GuestPanicInformation *panic_info;
5306 if (!cs->crash_occurred) {
5307 error_setg(errp, "No crash occured");
5308 return;
5311 panic_info = x86_cpu_get_crash_info(cs);
5312 if (panic_info == NULL) {
5313 error_setg(errp, "No crash information");
5314 return;
5317 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5318 errp);
5319 qapi_free_GuestPanicInformation(panic_info);
5322 static void x86_cpu_initfn(Object *obj)
5324 CPUState *cs = CPU(obj);
5325 X86CPU *cpu = X86_CPU(obj);
5326 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5327 CPUX86State *env = &cpu->env;
5328 FeatureWord w;
5330 cs->env_ptr = env;
5332 object_property_add(obj, "family", "int",
5333 x86_cpuid_version_get_family,
5334 x86_cpuid_version_set_family, NULL, NULL, NULL);
5335 object_property_add(obj, "model", "int",
5336 x86_cpuid_version_get_model,
5337 x86_cpuid_version_set_model, NULL, NULL, NULL);
5338 object_property_add(obj, "stepping", "int",
5339 x86_cpuid_version_get_stepping,
5340 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5341 object_property_add_str(obj, "vendor",
5342 x86_cpuid_get_vendor,
5343 x86_cpuid_set_vendor, NULL);
5344 object_property_add_str(obj, "model-id",
5345 x86_cpuid_get_model_id,
5346 x86_cpuid_set_model_id, NULL);
5347 object_property_add(obj, "tsc-frequency", "int",
5348 x86_cpuid_get_tsc_freq,
5349 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5350 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5351 x86_cpu_get_feature_words,
5352 NULL, NULL, (void *)env->features, NULL);
5353 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5354 x86_cpu_get_feature_words,
5355 NULL, NULL, (void *)cpu->filtered_features, NULL);
5357 object_property_add(obj, "crash-information", "GuestPanicInformation",
5358 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5360 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5362 for (w = 0; w < FEATURE_WORDS; w++) {
5363 int bitnr;
5365 for (bitnr = 0; bitnr < 32; bitnr++) {
5366 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5370 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5371 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5372 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5373 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5374 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5375 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5376 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5378 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5379 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5380 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5381 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5382 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5383 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5384 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5385 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5386 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5387 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5388 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5389 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5390 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5391 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5392 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5393 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5394 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5395 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5396 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5397 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5398 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5400 if (xcc->cpu_def) {
5401 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5405 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5407 X86CPU *cpu = X86_CPU(cs);
5409 return cpu->apic_id;
5412 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5414 X86CPU *cpu = X86_CPU(cs);
5416 return cpu->env.cr[0] & CR0_PG_MASK;
5419 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5421 X86CPU *cpu = X86_CPU(cs);
5423 cpu->env.eip = value;
5426 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5428 X86CPU *cpu = X86_CPU(cs);
5430 cpu->env.eip = tb->pc - tb->cs_base;
5433 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
5435 X86CPU *cpu = X86_CPU(cs);
5436 CPUX86State *env = &cpu->env;
5438 #if !defined(CONFIG_USER_ONLY)
5439 if (interrupt_request & CPU_INTERRUPT_POLL) {
5440 return CPU_INTERRUPT_POLL;
5442 #endif
5443 if (interrupt_request & CPU_INTERRUPT_SIPI) {
5444 return CPU_INTERRUPT_SIPI;
5447 if (env->hflags2 & HF2_GIF_MASK) {
5448 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
5449 !(env->hflags & HF_SMM_MASK)) {
5450 return CPU_INTERRUPT_SMI;
5451 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
5452 !(env->hflags2 & HF2_NMI_MASK)) {
5453 return CPU_INTERRUPT_NMI;
5454 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
5455 return CPU_INTERRUPT_MCE;
5456 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
5457 (((env->hflags2 & HF2_VINTR_MASK) &&
5458 (env->hflags2 & HF2_HIF_MASK)) ||
5459 (!(env->hflags2 & HF2_VINTR_MASK) &&
5460 (env->eflags & IF_MASK &&
5461 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
5462 return CPU_INTERRUPT_HARD;
5463 #if !defined(CONFIG_USER_ONLY)
5464 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
5465 (env->eflags & IF_MASK) &&
5466 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
5467 return CPU_INTERRUPT_VIRQ;
5468 #endif
5472 return 0;
5475 static bool x86_cpu_has_work(CPUState *cs)
5477 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
5480 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5482 X86CPU *cpu = X86_CPU(cs);
5483 CPUX86State *env = &cpu->env;
5485 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5486 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5487 : bfd_mach_i386_i8086);
5488 info->print_insn = print_insn_i386;
5490 info->cap_arch = CS_ARCH_X86;
5491 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5492 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5493 : CS_MODE_16);
5494 info->cap_insn_unit = 1;
5495 info->cap_insn_split = 8;
5498 void x86_update_hflags(CPUX86State *env)
5500 uint32_t hflags;
5501 #define HFLAG_COPY_MASK \
5502 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5503 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5504 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5505 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5507 hflags = env->hflags & HFLAG_COPY_MASK;
5508 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5509 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5510 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5511 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5512 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5514 if (env->cr[4] & CR4_OSFXSR_MASK) {
5515 hflags |= HF_OSFXSR_MASK;
5518 if (env->efer & MSR_EFER_LMA) {
5519 hflags |= HF_LMA_MASK;
5522 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5523 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5524 } else {
5525 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5526 (DESC_B_SHIFT - HF_CS32_SHIFT);
5527 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5528 (DESC_B_SHIFT - HF_SS32_SHIFT);
5529 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5530 !(hflags & HF_CS32_MASK)) {
5531 hflags |= HF_ADDSEG_MASK;
5532 } else {
5533 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5534 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5537 env->hflags = hflags;
5540 static Property x86_cpu_properties[] = {
5541 #ifdef CONFIG_USER_ONLY
5542 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5543 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5544 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5545 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5546 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5547 #else
5548 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5549 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5550 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5551 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5552 #endif
5553 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5554 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5555 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5556 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5557 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5558 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5559 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5560 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5561 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5562 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5563 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5564 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5565 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5566 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5567 DEFINE_PROP_BOOL("hv-tlbflush", X86CPU, hyperv_tlbflush, false),
5568 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5569 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5570 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5571 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5572 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5573 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5574 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5575 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5576 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5577 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5578 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5579 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5580 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5581 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5582 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5583 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5584 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5585 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5586 false),
5587 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5588 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5589 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
5590 true),
5592 * lecacy_cache defaults to true unless the CPU model provides its
5593 * own cache information (see x86_cpu_load_def()).
5595 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5598 * From "Requirements for Implementing the Microsoft
5599 * Hypervisor Interface":
5600 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5602 * "Starting with Windows Server 2012 and Windows 8, if
5603 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5604 * the hypervisor imposes no specific limit to the number of VPs.
5605 * In this case, Windows Server 2012 guest VMs may use more than
5606 * 64 VPs, up to the maximum supported number of processors applicable
5607 * to the specific Windows version being used."
5609 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5610 DEFINE_PROP_END_OF_LIST()
5613 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5615 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5616 CPUClass *cc = CPU_CLASS(oc);
5617 DeviceClass *dc = DEVICE_CLASS(oc);
5619 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5620 &xcc->parent_realize);
5621 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5622 &xcc->parent_unrealize);
5623 dc->props = x86_cpu_properties;
5625 xcc->parent_reset = cc->reset;
5626 cc->reset = x86_cpu_reset;
5627 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5629 cc->class_by_name = x86_cpu_class_by_name;
5630 cc->parse_features = x86_cpu_parse_featurestr;
5631 cc->has_work = x86_cpu_has_work;
5632 #ifdef CONFIG_TCG
5633 cc->do_interrupt = x86_cpu_do_interrupt;
5634 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5635 #endif
5636 cc->dump_state = x86_cpu_dump_state;
5637 cc->get_crash_info = x86_cpu_get_crash_info;
5638 cc->set_pc = x86_cpu_set_pc;
5639 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5640 cc->gdb_read_register = x86_cpu_gdb_read_register;
5641 cc->gdb_write_register = x86_cpu_gdb_write_register;
5642 cc->get_arch_id = x86_cpu_get_arch_id;
5643 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5644 #ifdef CONFIG_USER_ONLY
5645 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5646 #else
5647 cc->asidx_from_attrs = x86_asidx_from_attrs;
5648 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5649 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5650 cc->write_elf64_note = x86_cpu_write_elf64_note;
5651 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5652 cc->write_elf32_note = x86_cpu_write_elf32_note;
5653 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5654 cc->vmsd = &vmstate_x86_cpu;
5655 #endif
5656 cc->gdb_arch_name = x86_gdb_arch_name;
5657 #ifdef TARGET_X86_64
5658 cc->gdb_core_xml_file = "i386-64bit.xml";
5659 cc->gdb_num_core_regs = 57;
5660 #else
5661 cc->gdb_core_xml_file = "i386-32bit.xml";
5662 cc->gdb_num_core_regs = 41;
5663 #endif
5664 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5665 cc->debug_excp_handler = breakpoint_handler;
5666 #endif
5667 cc->cpu_exec_enter = x86_cpu_exec_enter;
5668 cc->cpu_exec_exit = x86_cpu_exec_exit;
5669 #ifdef CONFIG_TCG
5670 cc->tcg_initialize = tcg_x86_init;
5671 #endif
5672 cc->disas_set_info = x86_disas_set_info;
5674 dc->user_creatable = true;
5677 static const TypeInfo x86_cpu_type_info = {
5678 .name = TYPE_X86_CPU,
5679 .parent = TYPE_CPU,
5680 .instance_size = sizeof(X86CPU),
5681 .instance_init = x86_cpu_initfn,
5682 .abstract = true,
5683 .class_size = sizeof(X86CPUClass),
5684 .class_init = x86_cpu_common_class_init,
5688 /* "base" CPU model, used by query-cpu-model-expansion */
5689 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5691 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5693 xcc->static_model = true;
5694 xcc->migration_safe = true;
5695 xcc->model_description = "base CPU model type with no features enabled";
5696 xcc->ordering = 8;
5699 static const TypeInfo x86_base_cpu_type_info = {
5700 .name = X86_CPU_TYPE_NAME("base"),
5701 .parent = TYPE_X86_CPU,
5702 .class_init = x86_cpu_base_class_init,
5705 static void x86_cpu_register_types(void)
5707 int i;
5709 type_register_static(&x86_cpu_type_info);
5710 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5711 x86_register_cpudef_type(&builtin_x86_defs[i]);
5713 type_register_static(&max_x86_cpu_type_info);
5714 type_register_static(&x86_base_cpu_type_info);
5715 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5716 type_register_static(&host_x86_cpu_type_info);
5717 #endif
5720 type_init(x86_cpu_register_types)