2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
26 #include "exec/exec-all.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/hvf.h"
29 #include "sysemu/cpus.h"
33 #include "qemu/error-report.h"
34 #include "qemu/option.h"
35 #include "qemu/config-file.h"
36 #include "qapi/error.h"
37 #include "qapi/qapi-visit-misc.h"
38 #include "qapi/qapi-visit-run-state.h"
39 #include "qapi/qmp/qdict.h"
40 #include "qapi/qmp/qerror.h"
41 #include "qapi/visitor.h"
42 #include "qom/qom-qobject.h"
43 #include "sysemu/arch_init.h"
45 #include "standard-headers/asm-x86/kvm_para.h"
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
57 #include "disas/capstone.h"
59 /* Helpers for building CPUID[2] descriptors: */
61 struct CPUID2CacheDescriptorInfo
{
70 * Known CPUID 2 cache descriptors.
71 * From Intel SDM Volume 2A, CPUID instruction
73 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors
[] = {
74 [0x06] = { .level
= 1, .type
= INSTRUCTION_CACHE
, .size
= 8 * KiB
,
75 .associativity
= 4, .line_size
= 32, },
76 [0x08] = { .level
= 1, .type
= INSTRUCTION_CACHE
, .size
= 16 * KiB
,
77 .associativity
= 4, .line_size
= 32, },
78 [0x09] = { .level
= 1, .type
= INSTRUCTION_CACHE
, .size
= 32 * KiB
,
79 .associativity
= 4, .line_size
= 64, },
80 [0x0A] = { .level
= 1, .type
= DATA_CACHE
, .size
= 8 * KiB
,
81 .associativity
= 2, .line_size
= 32, },
82 [0x0C] = { .level
= 1, .type
= DATA_CACHE
, .size
= 16 * KiB
,
83 .associativity
= 4, .line_size
= 32, },
84 [0x0D] = { .level
= 1, .type
= DATA_CACHE
, .size
= 16 * KiB
,
85 .associativity
= 4, .line_size
= 64, },
86 [0x0E] = { .level
= 1, .type
= DATA_CACHE
, .size
= 24 * KiB
,
87 .associativity
= 6, .line_size
= 64, },
88 [0x1D] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 128 * KiB
,
89 .associativity
= 2, .line_size
= 64, },
90 [0x21] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 256 * KiB
,
91 .associativity
= 8, .line_size
= 64, },
92 /* lines per sector is not supported cpuid2_cache_descriptor(),
93 * so descriptors 0x22, 0x23 are not included
95 [0x24] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
96 .associativity
= 16, .line_size
= 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x25, 0x20 are not included
100 [0x2C] = { .level
= 1, .type
= DATA_CACHE
, .size
= 32 * KiB
,
101 .associativity
= 8, .line_size
= 64, },
102 [0x30] = { .level
= 1, .type
= INSTRUCTION_CACHE
, .size
= 32 * KiB
,
103 .associativity
= 8, .line_size
= 64, },
104 [0x41] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 128 * KiB
,
105 .associativity
= 4, .line_size
= 32, },
106 [0x42] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 256 * KiB
,
107 .associativity
= 4, .line_size
= 32, },
108 [0x43] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
109 .associativity
= 4, .line_size
= 32, },
110 [0x44] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
111 .associativity
= 4, .line_size
= 32, },
112 [0x45] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
113 .associativity
= 4, .line_size
= 32, },
114 [0x46] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 4 * MiB
,
115 .associativity
= 4, .line_size
= 64, },
116 [0x47] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 8 * MiB
,
117 .associativity
= 8, .line_size
= 64, },
118 [0x48] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 3 * MiB
,
119 .associativity
= 12, .line_size
= 64, },
120 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
121 [0x4A] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 6 * MiB
,
122 .associativity
= 12, .line_size
= 64, },
123 [0x4B] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 8 * MiB
,
124 .associativity
= 16, .line_size
= 64, },
125 [0x4C] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 12 * MiB
,
126 .associativity
= 12, .line_size
= 64, },
127 [0x4D] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 16 * MiB
,
128 .associativity
= 16, .line_size
= 64, },
129 [0x4E] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 6 * MiB
,
130 .associativity
= 24, .line_size
= 64, },
131 [0x60] = { .level
= 1, .type
= DATA_CACHE
, .size
= 16 * KiB
,
132 .associativity
= 8, .line_size
= 64, },
133 [0x66] = { .level
= 1, .type
= DATA_CACHE
, .size
= 8 * KiB
,
134 .associativity
= 4, .line_size
= 64, },
135 [0x67] = { .level
= 1, .type
= DATA_CACHE
, .size
= 16 * KiB
,
136 .associativity
= 4, .line_size
= 64, },
137 [0x68] = { .level
= 1, .type
= DATA_CACHE
, .size
= 32 * KiB
,
138 .associativity
= 4, .line_size
= 64, },
139 [0x78] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
140 .associativity
= 4, .line_size
= 64, },
141 /* lines per sector is not supported cpuid2_cache_descriptor(),
142 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
144 [0x7D] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
145 .associativity
= 8, .line_size
= 64, },
146 [0x7F] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
147 .associativity
= 2, .line_size
= 64, },
148 [0x80] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
149 .associativity
= 8, .line_size
= 64, },
150 [0x82] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 256 * KiB
,
151 .associativity
= 8, .line_size
= 32, },
152 [0x83] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
153 .associativity
= 8, .line_size
= 32, },
154 [0x84] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
155 .associativity
= 8, .line_size
= 32, },
156 [0x85] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
157 .associativity
= 8, .line_size
= 32, },
158 [0x86] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
159 .associativity
= 4, .line_size
= 64, },
160 [0x87] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
161 .associativity
= 8, .line_size
= 64, },
162 [0xD0] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
163 .associativity
= 4, .line_size
= 64, },
164 [0xD1] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
165 .associativity
= 4, .line_size
= 64, },
166 [0xD2] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
167 .associativity
= 4, .line_size
= 64, },
168 [0xD6] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
169 .associativity
= 8, .line_size
= 64, },
170 [0xD7] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
171 .associativity
= 8, .line_size
= 64, },
172 [0xD8] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 4 * MiB
,
173 .associativity
= 8, .line_size
= 64, },
174 [0xDC] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 1.5 * MiB
,
175 .associativity
= 12, .line_size
= 64, },
176 [0xDD] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 3 * MiB
,
177 .associativity
= 12, .line_size
= 64, },
178 [0xDE] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 6 * MiB
,
179 .associativity
= 12, .line_size
= 64, },
180 [0xE2] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
181 .associativity
= 16, .line_size
= 64, },
182 [0xE3] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 4 * MiB
,
183 .associativity
= 16, .line_size
= 64, },
184 [0xE4] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 8 * MiB
,
185 .associativity
= 16, .line_size
= 64, },
186 [0xEA] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 12 * MiB
,
187 .associativity
= 24, .line_size
= 64, },
188 [0xEB] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 18 * MiB
,
189 .associativity
= 24, .line_size
= 64, },
190 [0xEC] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 24 * MiB
,
191 .associativity
= 24, .line_size
= 64, },
195 * "CPUID leaf 2 does not report cache descriptor information,
196 * use CPUID leaf 4 to query cache parameters"
198 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
201 * Return a CPUID 2 cache descriptor for a given cache.
202 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
204 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo
*cache
)
208 assert(cache
->size
> 0);
209 assert(cache
->level
> 0);
210 assert(cache
->line_size
> 0);
211 assert(cache
->associativity
> 0);
212 for (i
= 0; i
< ARRAY_SIZE(cpuid2_cache_descriptors
); i
++) {
213 struct CPUID2CacheDescriptorInfo
*d
= &cpuid2_cache_descriptors
[i
];
214 if (d
->level
== cache
->level
&& d
->type
== cache
->type
&&
215 d
->size
== cache
->size
&& d
->line_size
== cache
->line_size
&&
216 d
->associativity
== cache
->associativity
) {
221 return CACHE_DESCRIPTOR_UNAVAILABLE
;
224 /* CPUID Leaf 4 constants: */
227 #define CACHE_TYPE_D 1
228 #define CACHE_TYPE_I 2
229 #define CACHE_TYPE_UNIFIED 3
231 #define CACHE_LEVEL(l) (l << 5)
233 #define CACHE_SELF_INIT_LEVEL (1 << 8)
236 #define CACHE_NO_INVD_SHARING (1 << 0)
237 #define CACHE_INCLUSIVE (1 << 1)
238 #define CACHE_COMPLEX_IDX (1 << 2)
240 /* Encode CacheType for CPUID[4].EAX */
241 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
242 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
243 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
244 0 /* Invalid value */)
247 /* Encode cache info for CPUID[4] */
248 static void encode_cache_cpuid4(CPUCacheInfo
*cache
,
249 int num_apic_ids
, int num_cores
,
250 uint32_t *eax
, uint32_t *ebx
,
251 uint32_t *ecx
, uint32_t *edx
)
253 assert(cache
->size
== cache
->line_size
* cache
->associativity
*
254 cache
->partitions
* cache
->sets
);
256 assert(num_apic_ids
> 0);
257 *eax
= CACHE_TYPE(cache
->type
) |
258 CACHE_LEVEL(cache
->level
) |
259 (cache
->self_init
? CACHE_SELF_INIT_LEVEL
: 0) |
260 ((num_cores
- 1) << 26) |
261 ((num_apic_ids
- 1) << 14);
263 assert(cache
->line_size
> 0);
264 assert(cache
->partitions
> 0);
265 assert(cache
->associativity
> 0);
266 /* We don't implement fully-associative caches */
267 assert(cache
->associativity
< cache
->sets
);
268 *ebx
= (cache
->line_size
- 1) |
269 ((cache
->partitions
- 1) << 12) |
270 ((cache
->associativity
- 1) << 22);
272 assert(cache
->sets
> 0);
273 *ecx
= cache
->sets
- 1;
275 *edx
= (cache
->no_invd_sharing
? CACHE_NO_INVD_SHARING
: 0) |
276 (cache
->inclusive
? CACHE_INCLUSIVE
: 0) |
277 (cache
->complex_indexing
? CACHE_COMPLEX_IDX
: 0);
280 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
281 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo
*cache
)
283 assert(cache
->size
% 1024 == 0);
284 assert(cache
->lines_per_tag
> 0);
285 assert(cache
->associativity
> 0);
286 assert(cache
->line_size
> 0);
287 return ((cache
->size
/ 1024) << 24) | (cache
->associativity
<< 16) |
288 (cache
->lines_per_tag
<< 8) | (cache
->line_size
);
291 #define ASSOC_FULL 0xFF
293 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
294 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
304 a == ASSOC_FULL ? 0xF : \
305 0 /* invalid value */)
308 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
311 static void encode_cache_cpuid80000006(CPUCacheInfo
*l2
,
313 uint32_t *ecx
, uint32_t *edx
)
315 assert(l2
->size
% 1024 == 0);
316 assert(l2
->associativity
> 0);
317 assert(l2
->lines_per_tag
> 0);
318 assert(l2
->line_size
> 0);
319 *ecx
= ((l2
->size
/ 1024) << 16) |
320 (AMD_ENC_ASSOC(l2
->associativity
) << 12) |
321 (l2
->lines_per_tag
<< 8) | (l2
->line_size
);
324 assert(l3
->size
% (512 * 1024) == 0);
325 assert(l3
->associativity
> 0);
326 assert(l3
->lines_per_tag
> 0);
327 assert(l3
->line_size
> 0);
328 *edx
= ((l3
->size
/ (512 * 1024)) << 18) |
329 (AMD_ENC_ASSOC(l3
->associativity
) << 12) |
330 (l3
->lines_per_tag
<< 8) | (l3
->line_size
);
337 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
338 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
339 * Define the constants to build the cpu topology. Right now, TOPOEXT
340 * feature is enabled only on EPYC. So, these constants are based on
341 * EPYC supported configurations. We may need to handle the cases if
342 * these values change in future.
344 /* Maximum core complexes in a node */
346 /* Maximum cores in a core complex */
347 #define MAX_CORES_IN_CCX 4
348 /* Maximum cores in a node */
349 #define MAX_CORES_IN_NODE 8
350 /* Maximum nodes in a socket */
351 #define MAX_NODES_PER_SOCKET 4
354 * Figure out the number of nodes required to build this config.
355 * Max cores in a node is 8
357 static int nodes_in_socket(int nr_cores
)
361 nodes
= DIV_ROUND_UP(nr_cores
, MAX_CORES_IN_NODE
);
363 /* Hardware does not support config with 3 nodes, return 4 in that case */
364 return (nodes
== 3) ? 4 : nodes
;
368 * Decide the number of cores in a core complex with the given nr_cores using
369 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
370 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
371 * L3 cache is shared across all cores in a core complex. So, this will also
372 * tell us how many cores are sharing the L3 cache.
374 static int cores_in_core_complex(int nr_cores
)
378 /* Check if we can fit all the cores in one core complex */
379 if (nr_cores
<= MAX_CORES_IN_CCX
) {
382 /* Get the number of nodes required to build this config */
383 nodes
= nodes_in_socket(nr_cores
);
386 * Divide the cores accros all the core complexes
387 * Return rounded up value
389 return DIV_ROUND_UP(nr_cores
, nodes
* MAX_CCX
);
392 /* Encode cache info for CPUID[8000001D] */
393 static void encode_cache_cpuid8000001d(CPUCacheInfo
*cache
, CPUState
*cs
,
394 uint32_t *eax
, uint32_t *ebx
,
395 uint32_t *ecx
, uint32_t *edx
)
398 assert(cache
->size
== cache
->line_size
* cache
->associativity
*
399 cache
->partitions
* cache
->sets
);
401 *eax
= CACHE_TYPE(cache
->type
) | CACHE_LEVEL(cache
->level
) |
402 (cache
->self_init
? CACHE_SELF_INIT_LEVEL
: 0);
404 /* L3 is shared among multiple cores */
405 if (cache
->level
== 3) {
406 l3_cores
= cores_in_core_complex(cs
->nr_cores
);
407 *eax
|= ((l3_cores
* cs
->nr_threads
) - 1) << 14;
409 *eax
|= ((cs
->nr_threads
- 1) << 14);
412 assert(cache
->line_size
> 0);
413 assert(cache
->partitions
> 0);
414 assert(cache
->associativity
> 0);
415 /* We don't implement fully-associative caches */
416 assert(cache
->associativity
< cache
->sets
);
417 *ebx
= (cache
->line_size
- 1) |
418 ((cache
->partitions
- 1) << 12) |
419 ((cache
->associativity
- 1) << 22);
421 assert(cache
->sets
> 0);
422 *ecx
= cache
->sets
- 1;
424 *edx
= (cache
->no_invd_sharing
? CACHE_NO_INVD_SHARING
: 0) |
425 (cache
->inclusive
? CACHE_INCLUSIVE
: 0) |
426 (cache
->complex_indexing
? CACHE_COMPLEX_IDX
: 0);
429 /* Data structure to hold the configuration info for a given core index */
430 struct core_topology
{
431 /* core complex id of the current core index */
434 * Adjusted core index for this core in the topology
435 * This can be 0,1,2,3 with max 4 cores in a core complex
438 /* Node id for this core index */
440 /* Number of nodes in this config */
445 * Build the configuration closely match the EPYC hardware. Using the EPYC
446 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
447 * right now. This could change in future.
448 * nr_cores : Total number of cores in the config
449 * core_id : Core index of the current CPU
450 * topo : Data structure to hold all the config info for this core index
452 static void build_core_topology(int nr_cores
, int core_id
,
453 struct core_topology
*topo
)
455 int nodes
, cores_in_ccx
;
457 /* First get the number of nodes required */
458 nodes
= nodes_in_socket(nr_cores
);
460 cores_in_ccx
= cores_in_core_complex(nr_cores
);
462 topo
->node_id
= core_id
/ (cores_in_ccx
* MAX_CCX
);
463 topo
->ccx_id
= (core_id
% (cores_in_ccx
* MAX_CCX
)) / cores_in_ccx
;
464 topo
->core_id
= core_id
% cores_in_ccx
;
465 topo
->num_nodes
= nodes
;
468 /* Encode cache info for CPUID[8000001E] */
469 static void encode_topo_cpuid8000001e(CPUState
*cs
, X86CPU
*cpu
,
470 uint32_t *eax
, uint32_t *ebx
,
471 uint32_t *ecx
, uint32_t *edx
)
473 struct core_topology topo
= {0};
477 build_core_topology(cs
->nr_cores
, cpu
->core_id
, &topo
);
480 * CPUID_Fn8000001E_EBX
482 * 15:8 Threads per core (The number of threads per core is
483 * Threads per core + 1)
484 * 7:0 Core id (see bit decoding below)
494 if (cs
->nr_threads
- 1) {
495 *ebx
= ((cs
->nr_threads
- 1) << 8) | (topo
.node_id
<< 3) |
496 (topo
.ccx_id
<< 2) | topo
.core_id
;
498 *ebx
= (topo
.node_id
<< 4) | (topo
.ccx_id
<< 3) | topo
.core_id
;
501 * CPUID_Fn8000001E_ECX
503 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
504 * 7:0 Node id (see bit decoding below)
508 if (topo
.num_nodes
<= 4) {
509 *ecx
= ((topo
.num_nodes
- 1) << 8) | (cpu
->socket_id
<< 2) |
513 * Node id fix up. Actual hardware supports up to 4 nodes. But with
514 * more than 32 cores, we may end up with more than 4 nodes.
515 * Node id is a combination of socket id and node id. Only requirement
516 * here is that this number should be unique accross the system.
517 * Shift the socket id to accommodate more nodes. We dont expect both
518 * socket id and node id to be big number at the same time. This is not
519 * an ideal config but we need to to support it. Max nodes we can have
520 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
521 * 5 bits for nodes. Find the left most set bit to represent the total
522 * number of nodes. find_last_bit returns last set bit(0 based). Left
523 * shift(+1) the socket id to represent all the nodes.
525 nodes
= topo
.num_nodes
- 1;
526 shift
= find_last_bit(&nodes
, 8);
527 *ecx
= ((topo
.num_nodes
- 1) << 8) | (cpu
->socket_id
<< (shift
+ 1)) |
534 * Definitions of the hardcoded cache entries we expose:
535 * These are legacy cache values. If there is a need to change any
536 * of these values please use builtin_x86_defs
540 static CPUCacheInfo legacy_l1d_cache
= {
549 .no_invd_sharing
= true,
552 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
553 static CPUCacheInfo legacy_l1d_cache_amd
= {
563 .no_invd_sharing
= true,
566 /* L1 instruction cache: */
567 static CPUCacheInfo legacy_l1i_cache
= {
568 .type
= INSTRUCTION_CACHE
,
576 .no_invd_sharing
= true,
579 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
580 static CPUCacheInfo legacy_l1i_cache_amd
= {
581 .type
= INSTRUCTION_CACHE
,
590 .no_invd_sharing
= true,
593 /* Level 2 unified cache: */
594 static CPUCacheInfo legacy_l2_cache
= {
595 .type
= UNIFIED_CACHE
,
603 .no_invd_sharing
= true,
606 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
607 static CPUCacheInfo legacy_l2_cache_cpuid2
= {
608 .type
= UNIFIED_CACHE
,
616 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
617 static CPUCacheInfo legacy_l2_cache_amd
= {
618 .type
= UNIFIED_CACHE
,
628 /* Level 3 unified cache: */
629 static CPUCacheInfo legacy_l3_cache
= {
630 .type
= UNIFIED_CACHE
,
640 .complex_indexing
= true,
643 /* TLB definitions: */
645 #define L1_DTLB_2M_ASSOC 1
646 #define L1_DTLB_2M_ENTRIES 255
647 #define L1_DTLB_4K_ASSOC 1
648 #define L1_DTLB_4K_ENTRIES 255
650 #define L1_ITLB_2M_ASSOC 1
651 #define L1_ITLB_2M_ENTRIES 255
652 #define L1_ITLB_4K_ASSOC 1
653 #define L1_ITLB_4K_ENTRIES 255
655 #define L2_DTLB_2M_ASSOC 0 /* disabled */
656 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
657 #define L2_DTLB_4K_ASSOC 4
658 #define L2_DTLB_4K_ENTRIES 512
660 #define L2_ITLB_2M_ASSOC 0 /* disabled */
661 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
662 #define L2_ITLB_4K_ASSOC 4
663 #define L2_ITLB_4K_ENTRIES 512
665 /* CPUID Leaf 0x14 constants: */
666 #define INTEL_PT_MAX_SUBLEAF 0x1
668 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
669 * MSR can be accessed;
670 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
671 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
672 * of Intel PT MSRs across warm reset;
673 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
675 #define INTEL_PT_MINIMAL_EBX 0xf
677 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
678 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
680 * bit[01]: ToPA tables can hold any number of output entries, up to the
681 * maximum allowed by the MaskOrTableOffset field of
682 * IA32_RTIT_OUTPUT_MASK_PTRS;
683 * bit[02]: Support Single-Range Output scheme;
685 #define INTEL_PT_MINIMAL_ECX 0x7
686 /* generated packets which contain IP payloads have LIP values */
687 #define INTEL_PT_IP_LIP (1 << 31)
688 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
689 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
690 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
691 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
692 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
694 static void x86_cpu_vendor_words2str(char *dst
, uint32_t vendor1
,
695 uint32_t vendor2
, uint32_t vendor3
)
698 for (i
= 0; i
< 4; i
++) {
699 dst
[i
] = vendor1
>> (8 * i
);
700 dst
[i
+ 4] = vendor2
>> (8 * i
);
701 dst
[i
+ 8] = vendor3
>> (8 * i
);
703 dst
[CPUID_VENDOR_SZ
] = '\0';
706 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
707 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
708 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
709 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
710 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
711 CPUID_PSE36 | CPUID_FXSR)
712 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
713 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
714 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
715 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
716 CPUID_PAE | CPUID_SEP | CPUID_APIC)
718 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
719 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
720 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
721 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
722 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
723 /* partly implemented:
724 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
726 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
727 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
728 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
729 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
730 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
731 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
733 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
734 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
735 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
736 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
737 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
740 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
742 #define TCG_EXT2_X86_64_FEATURES 0
745 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
746 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
747 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
748 TCG_EXT2_X86_64_FEATURES)
749 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
750 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
751 #define TCG_EXT4_FEATURES 0
752 #define TCG_SVM_FEATURES CPUID_SVM_NPT
753 #define TCG_KVM_FEATURES 0
754 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
755 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
756 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
757 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
760 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
761 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
762 CPUID_7_0_EBX_RDSEED */
763 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
764 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
766 #define TCG_7_0_EDX_FEATURES 0
767 #define TCG_APM_FEATURES 0
768 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
769 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
771 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
773 typedef enum FeatureWordType
{
778 typedef struct FeatureWordInfo
{
779 FeatureWordType type
;
780 /* feature flags names are taken from "Intel Processor Identification and
781 * the CPUID Instruction" and AMD's "CPUID Specification".
782 * In cases of disagreement between feature naming conventions,
783 * aliases may be added.
785 const char *feat_names
[32];
787 /* If type==CPUID_FEATURE_WORD */
789 uint32_t eax
; /* Input EAX for CPUID */
790 bool needs_ecx
; /* CPUID instruction uses ECX as input */
791 uint32_t ecx
; /* Input ECX value for CPUID */
792 int reg
; /* output register (R_* constant) */
794 /* If type==MSR_FEATURE_WORD */
797 struct { /*CPUID that enumerate this MSR*/
798 FeatureWord cpuid_class
;
803 uint32_t tcg_features
; /* Feature flags supported by TCG */
804 uint32_t unmigratable_flags
; /* Feature flags known to be unmigratable */
805 uint32_t migratable_flags
; /* Feature flags known to be migratable */
806 /* Features that shouldn't be auto-enabled by "-cpu host" */
807 uint32_t no_autoenable_flags
;
810 static FeatureWordInfo feature_word_info
[FEATURE_WORDS
] = {
812 .type
= CPUID_FEATURE_WORD
,
814 "fpu", "vme", "de", "pse",
815 "tsc", "msr", "pae", "mce",
816 "cx8", "apic", NULL
, "sep",
817 "mtrr", "pge", "mca", "cmov",
818 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
819 NULL
, "ds" /* Intel dts */, "acpi", "mmx",
820 "fxsr", "sse", "sse2", "ss",
821 "ht" /* Intel htt */, "tm", "ia64", "pbe",
823 .cpuid
= {.eax
= 1, .reg
= R_EDX
, },
824 .tcg_features
= TCG_FEATURES
,
827 .type
= CPUID_FEATURE_WORD
,
829 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
830 "ds-cpl", "vmx", "smx", "est",
831 "tm2", "ssse3", "cid", NULL
,
832 "fma", "cx16", "xtpr", "pdcm",
833 NULL
, "pcid", "dca", "sse4.1",
834 "sse4.2", "x2apic", "movbe", "popcnt",
835 "tsc-deadline", "aes", "xsave", NULL
/* osxsave */,
836 "avx", "f16c", "rdrand", "hypervisor",
838 .cpuid
= { .eax
= 1, .reg
= R_ECX
, },
839 .tcg_features
= TCG_EXT_FEATURES
,
841 /* Feature names that are already defined on feature_name[] but
842 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
843 * names on feat_names below. They are copied automatically
844 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
846 [FEAT_8000_0001_EDX
] = {
847 .type
= CPUID_FEATURE_WORD
,
849 NULL
/* fpu */, NULL
/* vme */, NULL
/* de */, NULL
/* pse */,
850 NULL
/* tsc */, NULL
/* msr */, NULL
/* pae */, NULL
/* mce */,
851 NULL
/* cx8 */, NULL
/* apic */, NULL
, "syscall",
852 NULL
/* mtrr */, NULL
/* pge */, NULL
/* mca */, NULL
/* cmov */,
853 NULL
/* pat */, NULL
/* pse36 */, NULL
, NULL
/* Linux mp */,
854 "nx", NULL
, "mmxext", NULL
/* mmx */,
855 NULL
/* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
856 NULL
, "lm", "3dnowext", "3dnow",
858 .cpuid
= { .eax
= 0x80000001, .reg
= R_EDX
, },
859 .tcg_features
= TCG_EXT2_FEATURES
,
861 [FEAT_8000_0001_ECX
] = {
862 .type
= CPUID_FEATURE_WORD
,
864 "lahf-lm", "cmp-legacy", "svm", "extapic",
865 "cr8legacy", "abm", "sse4a", "misalignsse",
866 "3dnowprefetch", "osvw", "ibs", "xop",
867 "skinit", "wdt", NULL
, "lwp",
868 "fma4", "tce", NULL
, "nodeid-msr",
869 NULL
, "tbm", "topoext", "perfctr-core",
870 "perfctr-nb", NULL
, NULL
, NULL
,
871 NULL
, NULL
, NULL
, NULL
,
873 .cpuid
= { .eax
= 0x80000001, .reg
= R_ECX
, },
874 .tcg_features
= TCG_EXT3_FEATURES
,
876 * TOPOEXT is always allowed but can't be enabled blindly by
877 * "-cpu host", as it requires consistent cache topology info
878 * to be provided so it doesn't confuse guests.
880 .no_autoenable_flags
= CPUID_EXT3_TOPOEXT
,
882 [FEAT_C000_0001_EDX
] = {
883 .type
= CPUID_FEATURE_WORD
,
885 NULL
, NULL
, "xstore", "xstore-en",
886 NULL
, NULL
, "xcrypt", "xcrypt-en",
887 "ace2", "ace2-en", "phe", "phe-en",
888 "pmm", "pmm-en", NULL
, NULL
,
889 NULL
, NULL
, NULL
, NULL
,
890 NULL
, NULL
, NULL
, NULL
,
891 NULL
, NULL
, NULL
, NULL
,
892 NULL
, NULL
, NULL
, NULL
,
894 .cpuid
= { .eax
= 0xC0000001, .reg
= R_EDX
, },
895 .tcg_features
= TCG_EXT4_FEATURES
,
898 .type
= CPUID_FEATURE_WORD
,
900 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
901 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
902 NULL
, "kvm-pv-tlb-flush", NULL
, "kvm-pv-ipi",
903 NULL
, NULL
, NULL
, NULL
,
904 NULL
, NULL
, NULL
, NULL
,
905 NULL
, NULL
, NULL
, NULL
,
906 "kvmclock-stable-bit", NULL
, NULL
, NULL
,
907 NULL
, NULL
, NULL
, NULL
,
909 .cpuid
= { .eax
= KVM_CPUID_FEATURES
, .reg
= R_EAX
, },
910 .tcg_features
= TCG_KVM_FEATURES
,
913 .type
= CPUID_FEATURE_WORD
,
915 "kvm-hint-dedicated", NULL
, NULL
, NULL
,
916 NULL
, NULL
, NULL
, NULL
,
917 NULL
, NULL
, NULL
, NULL
,
918 NULL
, NULL
, NULL
, NULL
,
919 NULL
, NULL
, NULL
, NULL
,
920 NULL
, NULL
, NULL
, NULL
,
921 NULL
, NULL
, NULL
, NULL
,
922 NULL
, NULL
, NULL
, NULL
,
924 .cpuid
= { .eax
= KVM_CPUID_FEATURES
, .reg
= R_EDX
, },
925 .tcg_features
= TCG_KVM_FEATURES
,
927 * KVM hints aren't auto-enabled by -cpu host, they need to be
928 * explicitly enabled in the command-line.
930 .no_autoenable_flags
= ~0U,
932 [FEAT_HYPERV_EAX
] = {
933 .type
= CPUID_FEATURE_WORD
,
935 NULL
/* hv_msr_vp_runtime_access */, NULL
/* hv_msr_time_refcount_access */,
936 NULL
/* hv_msr_synic_access */, NULL
/* hv_msr_stimer_access */,
937 NULL
/* hv_msr_apic_access */, NULL
/* hv_msr_hypercall_access */,
938 NULL
/* hv_vpindex_access */, NULL
/* hv_msr_reset_access */,
939 NULL
/* hv_msr_stats_access */, NULL
/* hv_reftsc_access */,
940 NULL
/* hv_msr_idle_access */, NULL
/* hv_msr_frequency_access */,
941 NULL
/* hv_msr_debug_access */, NULL
/* hv_msr_reenlightenment_access */,
943 NULL
, NULL
, NULL
, NULL
,
944 NULL
, NULL
, NULL
, NULL
,
945 NULL
, NULL
, NULL
, NULL
,
946 NULL
, NULL
, NULL
, NULL
,
948 .cpuid
= { .eax
= 0x40000003, .reg
= R_EAX
, },
950 [FEAT_HYPERV_EBX
] = {
951 .type
= CPUID_FEATURE_WORD
,
953 NULL
/* hv_create_partitions */, NULL
/* hv_access_partition_id */,
954 NULL
/* hv_access_memory_pool */, NULL
/* hv_adjust_message_buffers */,
955 NULL
/* hv_post_messages */, NULL
/* hv_signal_events */,
956 NULL
/* hv_create_port */, NULL
/* hv_connect_port */,
957 NULL
/* hv_access_stats */, NULL
, NULL
, NULL
/* hv_debugging */,
958 NULL
/* hv_cpu_power_management */, NULL
/* hv_configure_profiler */,
960 NULL
, NULL
, NULL
, NULL
,
961 NULL
, NULL
, NULL
, NULL
,
962 NULL
, NULL
, NULL
, NULL
,
963 NULL
, NULL
, NULL
, NULL
,
965 .cpuid
= { .eax
= 0x40000003, .reg
= R_EBX
, },
967 [FEAT_HYPERV_EDX
] = {
968 .type
= CPUID_FEATURE_WORD
,
970 NULL
/* hv_mwait */, NULL
/* hv_guest_debugging */,
971 NULL
/* hv_perf_monitor */, NULL
/* hv_cpu_dynamic_part */,
972 NULL
/* hv_hypercall_params_xmm */, NULL
/* hv_guest_idle_state */,
974 NULL
, NULL
, NULL
/* hv_guest_crash_msr */, NULL
,
975 NULL
, NULL
, NULL
, NULL
,
976 NULL
, NULL
, NULL
, NULL
,
977 NULL
, NULL
, NULL
, NULL
,
978 NULL
, NULL
, NULL
, NULL
,
979 NULL
, NULL
, NULL
, NULL
,
981 .cpuid
= { .eax
= 0x40000003, .reg
= R_EDX
, },
984 .type
= CPUID_FEATURE_WORD
,
986 "npt", "lbrv", "svm-lock", "nrip-save",
987 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
988 NULL
, NULL
, "pause-filter", NULL
,
989 "pfthreshold", NULL
, NULL
, NULL
,
990 NULL
, NULL
, NULL
, NULL
,
991 NULL
, NULL
, NULL
, NULL
,
992 NULL
, NULL
, NULL
, NULL
,
993 NULL
, NULL
, NULL
, NULL
,
995 .cpuid
= { .eax
= 0x8000000A, .reg
= R_EDX
, },
996 .tcg_features
= TCG_SVM_FEATURES
,
999 .type
= CPUID_FEATURE_WORD
,
1001 "fsgsbase", "tsc-adjust", NULL
, "bmi1",
1002 "hle", "avx2", NULL
, "smep",
1003 "bmi2", "erms", "invpcid", "rtm",
1004 NULL
, NULL
, "mpx", NULL
,
1005 "avx512f", "avx512dq", "rdseed", "adx",
1006 "smap", "avx512ifma", "pcommit", "clflushopt",
1007 "clwb", "intel-pt", "avx512pf", "avx512er",
1008 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1012 .needs_ecx
= true, .ecx
= 0,
1015 .tcg_features
= TCG_7_0_EBX_FEATURES
,
1018 .type
= CPUID_FEATURE_WORD
,
1020 NULL
, "avx512vbmi", "umip", "pku",
1021 NULL
/* ospke */, NULL
, "avx512vbmi2", NULL
,
1022 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1023 "avx512bitalg", NULL
, "avx512-vpopcntdq", NULL
,
1024 "la57", NULL
, NULL
, NULL
,
1025 NULL
, NULL
, "rdpid", NULL
,
1026 NULL
, "cldemote", NULL
, NULL
,
1027 NULL
, NULL
, NULL
, NULL
,
1031 .needs_ecx
= true, .ecx
= 0,
1034 .tcg_features
= TCG_7_0_ECX_FEATURES
,
1037 .type
= CPUID_FEATURE_WORD
,
1039 NULL
, NULL
, "avx512-4vnniw", "avx512-4fmaps",
1040 NULL
, NULL
, NULL
, NULL
,
1041 NULL
, NULL
, NULL
, NULL
,
1042 NULL
, NULL
, NULL
, NULL
,
1043 NULL
, NULL
, "pconfig", NULL
,
1044 NULL
, NULL
, NULL
, NULL
,
1045 NULL
, NULL
, "spec-ctrl", NULL
,
1046 NULL
, "arch-capabilities", NULL
, "ssbd",
1050 .needs_ecx
= true, .ecx
= 0,
1053 .tcg_features
= TCG_7_0_EDX_FEATURES
,
1054 .unmigratable_flags
= CPUID_7_0_EDX_ARCH_CAPABILITIES
,
1056 [FEAT_8000_0007_EDX
] = {
1057 .type
= CPUID_FEATURE_WORD
,
1059 NULL
, NULL
, NULL
, NULL
,
1060 NULL
, NULL
, NULL
, NULL
,
1061 "invtsc", NULL
, NULL
, NULL
,
1062 NULL
, NULL
, NULL
, NULL
,
1063 NULL
, NULL
, NULL
, NULL
,
1064 NULL
, NULL
, NULL
, NULL
,
1065 NULL
, NULL
, NULL
, NULL
,
1066 NULL
, NULL
, NULL
, NULL
,
1068 .cpuid
= { .eax
= 0x80000007, .reg
= R_EDX
, },
1069 .tcg_features
= TCG_APM_FEATURES
,
1070 .unmigratable_flags
= CPUID_APM_INVTSC
,
1072 [FEAT_8000_0008_EBX
] = {
1073 .type
= CPUID_FEATURE_WORD
,
1075 NULL
, NULL
, NULL
, NULL
,
1076 NULL
, NULL
, NULL
, NULL
,
1077 NULL
, "wbnoinvd", NULL
, NULL
,
1078 "ibpb", NULL
, NULL
, NULL
,
1079 NULL
, NULL
, NULL
, NULL
,
1080 NULL
, NULL
, NULL
, NULL
,
1081 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL
,
1082 NULL
, NULL
, NULL
, NULL
,
1084 .cpuid
= { .eax
= 0x80000008, .reg
= R_EBX
, },
1086 .unmigratable_flags
= 0,
1089 .type
= CPUID_FEATURE_WORD
,
1091 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1092 NULL
, NULL
, NULL
, NULL
,
1093 NULL
, NULL
, NULL
, NULL
,
1094 NULL
, NULL
, NULL
, NULL
,
1095 NULL
, NULL
, NULL
, NULL
,
1096 NULL
, NULL
, NULL
, NULL
,
1097 NULL
, NULL
, NULL
, NULL
,
1098 NULL
, NULL
, NULL
, NULL
,
1102 .needs_ecx
= true, .ecx
= 1,
1105 .tcg_features
= TCG_XSAVE_FEATURES
,
1108 .type
= CPUID_FEATURE_WORD
,
1110 NULL
, NULL
, "arat", NULL
,
1111 NULL
, NULL
, NULL
, NULL
,
1112 NULL
, NULL
, NULL
, NULL
,
1113 NULL
, NULL
, NULL
, NULL
,
1114 NULL
, NULL
, NULL
, NULL
,
1115 NULL
, NULL
, NULL
, NULL
,
1116 NULL
, NULL
, NULL
, NULL
,
1117 NULL
, NULL
, NULL
, NULL
,
1119 .cpuid
= { .eax
= 6, .reg
= R_EAX
, },
1120 .tcg_features
= TCG_6_EAX_FEATURES
,
1122 [FEAT_XSAVE_COMP_LO
] = {
1123 .type
= CPUID_FEATURE_WORD
,
1126 .needs_ecx
= true, .ecx
= 0,
1129 .tcg_features
= ~0U,
1130 .migratable_flags
= XSTATE_FP_MASK
| XSTATE_SSE_MASK
|
1131 XSTATE_YMM_MASK
| XSTATE_BNDREGS_MASK
| XSTATE_BNDCSR_MASK
|
1132 XSTATE_OPMASK_MASK
| XSTATE_ZMM_Hi256_MASK
| XSTATE_Hi16_ZMM_MASK
|
1135 [FEAT_XSAVE_COMP_HI
] = {
1136 .type
= CPUID_FEATURE_WORD
,
1139 .needs_ecx
= true, .ecx
= 0,
1142 .tcg_features
= ~0U,
1144 /*Below are MSR exposed features*/
1145 [FEAT_ARCH_CAPABILITIES
] = {
1146 .type
= MSR_FEATURE_WORD
,
1148 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1149 "ssb-no", NULL
, NULL
, NULL
,
1150 NULL
, NULL
, NULL
, NULL
,
1151 NULL
, NULL
, NULL
, NULL
,
1152 NULL
, NULL
, NULL
, NULL
,
1153 NULL
, NULL
, NULL
, NULL
,
1154 NULL
, NULL
, NULL
, NULL
,
1155 NULL
, NULL
, NULL
, NULL
,
1158 .index
= MSR_IA32_ARCH_CAPABILITIES
,
1161 CPUID_7_0_EDX_ARCH_CAPABILITIES
1167 typedef struct X86RegisterInfo32
{
1168 /* Name of register */
1170 /* QAPI enum value register */
1171 X86CPURegister32 qapi_enum
;
1172 } X86RegisterInfo32
;
1174 #define REGISTER(reg) \
1175 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1176 static const X86RegisterInfo32 x86_reg_info_32
[CPU_NB_REGS32
] = {
1188 typedef struct ExtSaveArea
{
1189 uint32_t feature
, bits
;
1190 uint32_t offset
, size
;
1193 static const ExtSaveArea x86_ext_save_areas
[] = {
1195 /* x87 FP state component is always enabled if XSAVE is supported */
1196 .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_XSAVE
,
1197 /* x87 state is in the legacy region of the XSAVE area */
1199 .size
= sizeof(X86LegacyXSaveArea
) + sizeof(X86XSaveHeader
),
1201 [XSTATE_SSE_BIT
] = {
1202 /* SSE state component is always enabled if XSAVE is supported */
1203 .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_XSAVE
,
1204 /* SSE state is in the legacy region of the XSAVE area */
1206 .size
= sizeof(X86LegacyXSaveArea
) + sizeof(X86XSaveHeader
),
1209 { .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_AVX
,
1210 .offset
= offsetof(X86XSaveArea
, avx_state
),
1211 .size
= sizeof(XSaveAVX
) },
1212 [XSTATE_BNDREGS_BIT
] =
1213 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_MPX
,
1214 .offset
= offsetof(X86XSaveArea
, bndreg_state
),
1215 .size
= sizeof(XSaveBNDREG
) },
1216 [XSTATE_BNDCSR_BIT
] =
1217 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_MPX
,
1218 .offset
= offsetof(X86XSaveArea
, bndcsr_state
),
1219 .size
= sizeof(XSaveBNDCSR
) },
1220 [XSTATE_OPMASK_BIT
] =
1221 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
1222 .offset
= offsetof(X86XSaveArea
, opmask_state
),
1223 .size
= sizeof(XSaveOpmask
) },
1224 [XSTATE_ZMM_Hi256_BIT
] =
1225 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
1226 .offset
= offsetof(X86XSaveArea
, zmm_hi256_state
),
1227 .size
= sizeof(XSaveZMM_Hi256
) },
1228 [XSTATE_Hi16_ZMM_BIT
] =
1229 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
1230 .offset
= offsetof(X86XSaveArea
, hi16_zmm_state
),
1231 .size
= sizeof(XSaveHi16_ZMM
) },
1233 { .feature
= FEAT_7_0_ECX
, .bits
= CPUID_7_0_ECX_PKU
,
1234 .offset
= offsetof(X86XSaveArea
, pkru_state
),
1235 .size
= sizeof(XSavePKRU
) },
1238 static uint32_t xsave_area_size(uint64_t mask
)
1243 for (i
= 0; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
1244 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
1245 if ((mask
>> i
) & 1) {
1246 ret
= MAX(ret
, esa
->offset
+ esa
->size
);
1252 static inline bool accel_uses_host_cpuid(void)
1254 return kvm_enabled() || hvf_enabled();
1257 static inline uint64_t x86_cpu_xsave_components(X86CPU
*cpu
)
1259 return ((uint64_t)cpu
->env
.features
[FEAT_XSAVE_COMP_HI
]) << 32 |
1260 cpu
->env
.features
[FEAT_XSAVE_COMP_LO
];
1263 const char *get_register_name_32(unsigned int reg
)
1265 if (reg
>= CPU_NB_REGS32
) {
1268 return x86_reg_info_32
[reg
].name
;
1272 * Returns the set of feature flags that are supported and migratable by
1273 * QEMU, for a given FeatureWord.
1275 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w
)
1277 FeatureWordInfo
*wi
= &feature_word_info
[w
];
1281 for (i
= 0; i
< 32; i
++) {
1282 uint32_t f
= 1U << i
;
1284 /* If the feature name is known, it is implicitly considered migratable,
1285 * unless it is explicitly set in unmigratable_flags */
1286 if ((wi
->migratable_flags
& f
) ||
1287 (wi
->feat_names
[i
] && !(wi
->unmigratable_flags
& f
))) {
1294 void host_cpuid(uint32_t function
, uint32_t count
,
1295 uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
1300 asm volatile("cpuid"
1301 : "=a"(vec
[0]), "=b"(vec
[1]),
1302 "=c"(vec
[2]), "=d"(vec
[3])
1303 : "0"(function
), "c"(count
) : "cc");
1304 #elif defined(__i386__)
1305 asm volatile("pusha \n\t"
1307 "mov %%eax, 0(%2) \n\t"
1308 "mov %%ebx, 4(%2) \n\t"
1309 "mov %%ecx, 8(%2) \n\t"
1310 "mov %%edx, 12(%2) \n\t"
1312 : : "a"(function
), "c"(count
), "S"(vec
)
1328 void host_vendor_fms(char *vendor
, int *family
, int *model
, int *stepping
)
1330 uint32_t eax
, ebx
, ecx
, edx
;
1332 host_cpuid(0x0, 0, &eax
, &ebx
, &ecx
, &edx
);
1333 x86_cpu_vendor_words2str(vendor
, ebx
, edx
, ecx
);
1335 host_cpuid(0x1, 0, &eax
, &ebx
, &ecx
, &edx
);
1337 *family
= ((eax
>> 8) & 0x0F) + ((eax
>> 20) & 0xFF);
1340 *model
= ((eax
>> 4) & 0x0F) | ((eax
& 0xF0000) >> 12);
1343 *stepping
= eax
& 0x0F;
1347 /* CPU class name definitions: */
1349 /* Return type name for a given CPU model name
1350 * Caller is responsible for freeing the returned string.
1352 static char *x86_cpu_type_name(const char *model_name
)
1354 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name
);
1357 static ObjectClass
*x86_cpu_class_by_name(const char *cpu_model
)
1360 char *typename
= x86_cpu_type_name(cpu_model
);
1361 oc
= object_class_by_name(typename
);
1366 static char *x86_cpu_class_get_model_name(X86CPUClass
*cc
)
1368 const char *class_name
= object_class_get_name(OBJECT_CLASS(cc
));
1369 assert(g_str_has_suffix(class_name
, X86_CPU_TYPE_SUFFIX
));
1370 return g_strndup(class_name
,
1371 strlen(class_name
) - strlen(X86_CPU_TYPE_SUFFIX
));
1374 struct X86CPUDefinition
{
1378 /* vendor is zero-terminated, 12 character ASCII string */
1379 char vendor
[CPUID_VENDOR_SZ
+ 1];
1383 FeatureWordArray features
;
1384 const char *model_id
;
1385 CPUCaches
*cache_info
;
1388 static CPUCaches epyc_cache_info
= {
1389 .l1d_cache
= &(CPUCacheInfo
) {
1399 .no_invd_sharing
= true,
1401 .l1i_cache
= &(CPUCacheInfo
) {
1402 .type
= INSTRUCTION_CACHE
,
1411 .no_invd_sharing
= true,
1413 .l2_cache
= &(CPUCacheInfo
) {
1414 .type
= UNIFIED_CACHE
,
1423 .l3_cache
= &(CPUCacheInfo
) {
1424 .type
= UNIFIED_CACHE
,
1428 .associativity
= 16,
1434 .complex_indexing
= true,
1438 static X86CPUDefinition builtin_x86_defs
[] = {
1442 .vendor
= CPUID_VENDOR_AMD
,
1446 .features
[FEAT_1_EDX
] =
1448 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
1450 .features
[FEAT_1_ECX
] =
1451 CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
1452 .features
[FEAT_8000_0001_EDX
] =
1453 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1454 .features
[FEAT_8000_0001_ECX
] =
1455 CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
,
1456 .xlevel
= 0x8000000A,
1457 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
1462 .vendor
= CPUID_VENDOR_AMD
,
1466 /* Missing: CPUID_HT */
1467 .features
[FEAT_1_EDX
] =
1469 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
1470 CPUID_PSE36
| CPUID_VME
,
1471 .features
[FEAT_1_ECX
] =
1472 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_CX16
|
1474 .features
[FEAT_8000_0001_EDX
] =
1475 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
|
1476 CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
| CPUID_EXT2_MMXEXT
|
1477 CPUID_EXT2_FFXSR
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
,
1478 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1480 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1481 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1482 .features
[FEAT_8000_0001_ECX
] =
1483 CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
1484 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
1485 /* Missing: CPUID_SVM_LBRV */
1486 .features
[FEAT_SVM
] =
1488 .xlevel
= 0x8000001A,
1489 .model_id
= "AMD Phenom(tm) 9550 Quad-Core Processor"
1494 .vendor
= CPUID_VENDOR_INTEL
,
1498 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1499 .features
[FEAT_1_EDX
] =
1501 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
1502 CPUID_PSE36
| CPUID_VME
| CPUID_ACPI
| CPUID_SS
,
1503 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1504 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1505 .features
[FEAT_1_ECX
] =
1506 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
1508 .features
[FEAT_8000_0001_EDX
] =
1509 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1510 .features
[FEAT_8000_0001_ECX
] =
1512 .xlevel
= 0x80000008,
1513 .model_id
= "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1518 .vendor
= CPUID_VENDOR_INTEL
,
1522 /* Missing: CPUID_HT */
1523 .features
[FEAT_1_EDX
] =
1524 PPRO_FEATURES
| CPUID_VME
|
1525 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
1527 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1528 .features
[FEAT_1_ECX
] =
1529 CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
1530 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1531 .features
[FEAT_8000_0001_EDX
] =
1532 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1533 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1534 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1535 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1536 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1537 .features
[FEAT_8000_0001_ECX
] =
1539 .xlevel
= 0x80000008,
1540 .model_id
= "Common KVM processor"
1545 .vendor
= CPUID_VENDOR_INTEL
,
1549 .features
[FEAT_1_EDX
] =
1551 .features
[FEAT_1_ECX
] =
1553 .xlevel
= 0x80000004,
1554 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
1559 .vendor
= CPUID_VENDOR_INTEL
,
1563 .features
[FEAT_1_EDX
] =
1564 PPRO_FEATURES
| CPUID_VME
|
1565 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_PSE36
,
1566 .features
[FEAT_1_ECX
] =
1568 .features
[FEAT_8000_0001_ECX
] =
1570 .xlevel
= 0x80000008,
1571 .model_id
= "Common 32-bit KVM processor"
1576 .vendor
= CPUID_VENDOR_INTEL
,
1580 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1581 .features
[FEAT_1_EDX
] =
1582 PPRO_FEATURES
| CPUID_VME
|
1583 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_ACPI
|
1585 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1586 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1587 .features
[FEAT_1_ECX
] =
1588 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
,
1589 .features
[FEAT_8000_0001_EDX
] =
1591 .xlevel
= 0x80000008,
1592 .model_id
= "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1597 .vendor
= CPUID_VENDOR_INTEL
,
1601 .features
[FEAT_1_EDX
] =
1609 .vendor
= CPUID_VENDOR_INTEL
,
1613 .features
[FEAT_1_EDX
] =
1621 .vendor
= CPUID_VENDOR_INTEL
,
1625 .features
[FEAT_1_EDX
] =
1633 .vendor
= CPUID_VENDOR_INTEL
,
1637 .features
[FEAT_1_EDX
] =
1645 .vendor
= CPUID_VENDOR_AMD
,
1649 .features
[FEAT_1_EDX
] =
1650 PPRO_FEATURES
| CPUID_PSE36
| CPUID_VME
| CPUID_MTRR
|
1652 .features
[FEAT_8000_0001_EDX
] =
1653 CPUID_EXT2_MMXEXT
| CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
,
1654 .xlevel
= 0x80000008,
1655 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
1660 .vendor
= CPUID_VENDOR_INTEL
,
1664 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1665 .features
[FEAT_1_EDX
] =
1667 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_VME
|
1668 CPUID_ACPI
| CPUID_SS
,
1669 /* Some CPUs got no CPUID_SEP */
1670 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1672 .features
[FEAT_1_ECX
] =
1673 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
1675 .features
[FEAT_8000_0001_EDX
] =
1677 .features
[FEAT_8000_0001_ECX
] =
1679 .xlevel
= 0x80000008,
1680 .model_id
= "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1685 .vendor
= CPUID_VENDOR_INTEL
,
1689 .features
[FEAT_1_EDX
] =
1690 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1691 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1692 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1693 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1694 CPUID_DE
| CPUID_FP87
,
1695 .features
[FEAT_1_ECX
] =
1696 CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
1697 .features
[FEAT_8000_0001_EDX
] =
1698 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1699 .features
[FEAT_8000_0001_ECX
] =
1701 .xlevel
= 0x80000008,
1702 .model_id
= "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1707 .vendor
= CPUID_VENDOR_INTEL
,
1711 .features
[FEAT_1_EDX
] =
1712 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1713 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1714 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1715 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1716 CPUID_DE
| CPUID_FP87
,
1717 .features
[FEAT_1_ECX
] =
1718 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1720 .features
[FEAT_8000_0001_EDX
] =
1721 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1722 .features
[FEAT_8000_0001_ECX
] =
1724 .xlevel
= 0x80000008,
1725 .model_id
= "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1730 .vendor
= CPUID_VENDOR_INTEL
,
1734 .features
[FEAT_1_EDX
] =
1735 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1736 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1737 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1738 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1739 CPUID_DE
| CPUID_FP87
,
1740 .features
[FEAT_1_ECX
] =
1741 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1742 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
1743 .features
[FEAT_8000_0001_EDX
] =
1744 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1745 .features
[FEAT_8000_0001_ECX
] =
1747 .xlevel
= 0x80000008,
1748 .model_id
= "Intel Core i7 9xx (Nehalem Class Core i7)",
1751 .name
= "Nehalem-IBRS",
1753 .vendor
= CPUID_VENDOR_INTEL
,
1757 .features
[FEAT_1_EDX
] =
1758 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1759 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1760 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1761 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1762 CPUID_DE
| CPUID_FP87
,
1763 .features
[FEAT_1_ECX
] =
1764 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1765 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
1766 .features
[FEAT_7_0_EDX
] =
1767 CPUID_7_0_EDX_SPEC_CTRL
,
1768 .features
[FEAT_8000_0001_EDX
] =
1769 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1770 .features
[FEAT_8000_0001_ECX
] =
1772 .xlevel
= 0x80000008,
1773 .model_id
= "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1778 .vendor
= CPUID_VENDOR_INTEL
,
1782 .features
[FEAT_1_EDX
] =
1783 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1784 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1785 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1786 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1787 CPUID_DE
| CPUID_FP87
,
1788 .features
[FEAT_1_ECX
] =
1789 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
1790 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1791 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
1792 .features
[FEAT_8000_0001_EDX
] =
1793 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1794 .features
[FEAT_8000_0001_ECX
] =
1796 .features
[FEAT_6_EAX
] =
1798 .xlevel
= 0x80000008,
1799 .model_id
= "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1802 .name
= "Westmere-IBRS",
1804 .vendor
= CPUID_VENDOR_INTEL
,
1808 .features
[FEAT_1_EDX
] =
1809 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1810 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1811 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1812 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1813 CPUID_DE
| CPUID_FP87
,
1814 .features
[FEAT_1_ECX
] =
1815 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
1816 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1817 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
1818 .features
[FEAT_8000_0001_EDX
] =
1819 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1820 .features
[FEAT_8000_0001_ECX
] =
1822 .features
[FEAT_7_0_EDX
] =
1823 CPUID_7_0_EDX_SPEC_CTRL
,
1824 .features
[FEAT_6_EAX
] =
1826 .xlevel
= 0x80000008,
1827 .model_id
= "Westmere E56xx/L56xx/X56xx (IBRS update)",
1830 .name
= "SandyBridge",
1832 .vendor
= CPUID_VENDOR_INTEL
,
1836 .features
[FEAT_1_EDX
] =
1837 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1838 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1839 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1840 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1841 CPUID_DE
| CPUID_FP87
,
1842 .features
[FEAT_1_ECX
] =
1843 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1844 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1845 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1846 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1848 .features
[FEAT_8000_0001_EDX
] =
1849 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1851 .features
[FEAT_8000_0001_ECX
] =
1853 .features
[FEAT_XSAVE
] =
1854 CPUID_XSAVE_XSAVEOPT
,
1855 .features
[FEAT_6_EAX
] =
1857 .xlevel
= 0x80000008,
1858 .model_id
= "Intel Xeon E312xx (Sandy Bridge)",
1861 .name
= "SandyBridge-IBRS",
1863 .vendor
= CPUID_VENDOR_INTEL
,
1867 .features
[FEAT_1_EDX
] =
1868 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1869 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1870 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1871 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1872 CPUID_DE
| CPUID_FP87
,
1873 .features
[FEAT_1_ECX
] =
1874 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1875 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1876 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1877 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1879 .features
[FEAT_8000_0001_EDX
] =
1880 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1882 .features
[FEAT_8000_0001_ECX
] =
1884 .features
[FEAT_7_0_EDX
] =
1885 CPUID_7_0_EDX_SPEC_CTRL
,
1886 .features
[FEAT_XSAVE
] =
1887 CPUID_XSAVE_XSAVEOPT
,
1888 .features
[FEAT_6_EAX
] =
1890 .xlevel
= 0x80000008,
1891 .model_id
= "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1894 .name
= "IvyBridge",
1896 .vendor
= CPUID_VENDOR_INTEL
,
1900 .features
[FEAT_1_EDX
] =
1901 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1902 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1903 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1904 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1905 CPUID_DE
| CPUID_FP87
,
1906 .features
[FEAT_1_ECX
] =
1907 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1908 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1909 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1910 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1911 CPUID_EXT_SSE3
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1912 .features
[FEAT_7_0_EBX
] =
1913 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_SMEP
|
1915 .features
[FEAT_8000_0001_EDX
] =
1916 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1918 .features
[FEAT_8000_0001_ECX
] =
1920 .features
[FEAT_XSAVE
] =
1921 CPUID_XSAVE_XSAVEOPT
,
1922 .features
[FEAT_6_EAX
] =
1924 .xlevel
= 0x80000008,
1925 .model_id
= "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1928 .name
= "IvyBridge-IBRS",
1930 .vendor
= CPUID_VENDOR_INTEL
,
1934 .features
[FEAT_1_EDX
] =
1935 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1936 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1937 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1938 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1939 CPUID_DE
| CPUID_FP87
,
1940 .features
[FEAT_1_ECX
] =
1941 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1942 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1943 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1944 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1945 CPUID_EXT_SSE3
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1946 .features
[FEAT_7_0_EBX
] =
1947 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_SMEP
|
1949 .features
[FEAT_8000_0001_EDX
] =
1950 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1952 .features
[FEAT_8000_0001_ECX
] =
1954 .features
[FEAT_7_0_EDX
] =
1955 CPUID_7_0_EDX_SPEC_CTRL
,
1956 .features
[FEAT_XSAVE
] =
1957 CPUID_XSAVE_XSAVEOPT
,
1958 .features
[FEAT_6_EAX
] =
1960 .xlevel
= 0x80000008,
1961 .model_id
= "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1964 .name
= "Haswell-noTSX",
1966 .vendor
= CPUID_VENDOR_INTEL
,
1970 .features
[FEAT_1_EDX
] =
1971 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1972 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1973 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1974 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1975 CPUID_DE
| CPUID_FP87
,
1976 .features
[FEAT_1_ECX
] =
1977 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1978 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1979 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1980 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1981 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1982 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1983 .features
[FEAT_8000_0001_EDX
] =
1984 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1986 .features
[FEAT_8000_0001_ECX
] =
1987 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
1988 .features
[FEAT_7_0_EBX
] =
1989 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1990 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1991 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
,
1992 .features
[FEAT_XSAVE
] =
1993 CPUID_XSAVE_XSAVEOPT
,
1994 .features
[FEAT_6_EAX
] =
1996 .xlevel
= 0x80000008,
1997 .model_id
= "Intel Core Processor (Haswell, no TSX)",
2000 .name
= "Haswell-noTSX-IBRS",
2002 .vendor
= CPUID_VENDOR_INTEL
,
2006 .features
[FEAT_1_EDX
] =
2007 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2008 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2009 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2010 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2011 CPUID_DE
| CPUID_FP87
,
2012 .features
[FEAT_1_ECX
] =
2013 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2014 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2015 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2016 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2017 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2018 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2019 .features
[FEAT_8000_0001_EDX
] =
2020 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2022 .features
[FEAT_8000_0001_ECX
] =
2023 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
2024 .features
[FEAT_7_0_EDX
] =
2025 CPUID_7_0_EDX_SPEC_CTRL
,
2026 .features
[FEAT_7_0_EBX
] =
2027 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2028 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2029 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
,
2030 .features
[FEAT_XSAVE
] =
2031 CPUID_XSAVE_XSAVEOPT
,
2032 .features
[FEAT_6_EAX
] =
2034 .xlevel
= 0x80000008,
2035 .model_id
= "Intel Core Processor (Haswell, no TSX, IBRS)",
2040 .vendor
= CPUID_VENDOR_INTEL
,
2044 .features
[FEAT_1_EDX
] =
2045 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2046 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2047 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2048 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2049 CPUID_DE
| CPUID_FP87
,
2050 .features
[FEAT_1_ECX
] =
2051 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2052 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2053 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2054 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2055 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2056 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2057 .features
[FEAT_8000_0001_EDX
] =
2058 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2060 .features
[FEAT_8000_0001_ECX
] =
2061 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
2062 .features
[FEAT_7_0_EBX
] =
2063 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2064 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2065 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2067 .features
[FEAT_XSAVE
] =
2068 CPUID_XSAVE_XSAVEOPT
,
2069 .features
[FEAT_6_EAX
] =
2071 .xlevel
= 0x80000008,
2072 .model_id
= "Intel Core Processor (Haswell)",
2075 .name
= "Haswell-IBRS",
2077 .vendor
= CPUID_VENDOR_INTEL
,
2081 .features
[FEAT_1_EDX
] =
2082 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2083 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2084 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2085 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2086 CPUID_DE
| CPUID_FP87
,
2087 .features
[FEAT_1_ECX
] =
2088 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2089 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2090 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2091 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2092 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2093 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2094 .features
[FEAT_8000_0001_EDX
] =
2095 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2097 .features
[FEAT_8000_0001_ECX
] =
2098 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
2099 .features
[FEAT_7_0_EDX
] =
2100 CPUID_7_0_EDX_SPEC_CTRL
,
2101 .features
[FEAT_7_0_EBX
] =
2102 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2103 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2104 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2106 .features
[FEAT_XSAVE
] =
2107 CPUID_XSAVE_XSAVEOPT
,
2108 .features
[FEAT_6_EAX
] =
2110 .xlevel
= 0x80000008,
2111 .model_id
= "Intel Core Processor (Haswell, IBRS)",
2114 .name
= "Broadwell-noTSX",
2116 .vendor
= CPUID_VENDOR_INTEL
,
2120 .features
[FEAT_1_EDX
] =
2121 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2122 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2123 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2124 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2125 CPUID_DE
| CPUID_FP87
,
2126 .features
[FEAT_1_ECX
] =
2127 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2128 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2129 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2130 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2131 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2132 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2133 .features
[FEAT_8000_0001_EDX
] =
2134 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2136 .features
[FEAT_8000_0001_ECX
] =
2137 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2138 .features
[FEAT_7_0_EBX
] =
2139 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2140 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2141 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2142 CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2144 .features
[FEAT_XSAVE
] =
2145 CPUID_XSAVE_XSAVEOPT
,
2146 .features
[FEAT_6_EAX
] =
2148 .xlevel
= 0x80000008,
2149 .model_id
= "Intel Core Processor (Broadwell, no TSX)",
2152 .name
= "Broadwell-noTSX-IBRS",
2154 .vendor
= CPUID_VENDOR_INTEL
,
2158 .features
[FEAT_1_EDX
] =
2159 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2160 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2161 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2162 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2163 CPUID_DE
| CPUID_FP87
,
2164 .features
[FEAT_1_ECX
] =
2165 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2166 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2167 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2168 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2169 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2170 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2171 .features
[FEAT_8000_0001_EDX
] =
2172 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2174 .features
[FEAT_8000_0001_ECX
] =
2175 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2176 .features
[FEAT_7_0_EDX
] =
2177 CPUID_7_0_EDX_SPEC_CTRL
,
2178 .features
[FEAT_7_0_EBX
] =
2179 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2180 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2181 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2182 CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2184 .features
[FEAT_XSAVE
] =
2185 CPUID_XSAVE_XSAVEOPT
,
2186 .features
[FEAT_6_EAX
] =
2188 .xlevel
= 0x80000008,
2189 .model_id
= "Intel Core Processor (Broadwell, no TSX, IBRS)",
2192 .name
= "Broadwell",
2194 .vendor
= CPUID_VENDOR_INTEL
,
2198 .features
[FEAT_1_EDX
] =
2199 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2200 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2201 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2202 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2203 CPUID_DE
| CPUID_FP87
,
2204 .features
[FEAT_1_ECX
] =
2205 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2206 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2207 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2208 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2209 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2210 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2211 .features
[FEAT_8000_0001_EDX
] =
2212 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2214 .features
[FEAT_8000_0001_ECX
] =
2215 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2216 .features
[FEAT_7_0_EBX
] =
2217 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2218 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2219 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2220 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2222 .features
[FEAT_XSAVE
] =
2223 CPUID_XSAVE_XSAVEOPT
,
2224 .features
[FEAT_6_EAX
] =
2226 .xlevel
= 0x80000008,
2227 .model_id
= "Intel Core Processor (Broadwell)",
2230 .name
= "Broadwell-IBRS",
2232 .vendor
= CPUID_VENDOR_INTEL
,
2236 .features
[FEAT_1_EDX
] =
2237 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2238 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2239 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2240 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2241 CPUID_DE
| CPUID_FP87
,
2242 .features
[FEAT_1_ECX
] =
2243 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2244 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2245 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2246 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2247 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2248 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2249 .features
[FEAT_8000_0001_EDX
] =
2250 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2252 .features
[FEAT_8000_0001_ECX
] =
2253 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2254 .features
[FEAT_7_0_EDX
] =
2255 CPUID_7_0_EDX_SPEC_CTRL
,
2256 .features
[FEAT_7_0_EBX
] =
2257 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2258 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2259 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2260 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2262 .features
[FEAT_XSAVE
] =
2263 CPUID_XSAVE_XSAVEOPT
,
2264 .features
[FEAT_6_EAX
] =
2266 .xlevel
= 0x80000008,
2267 .model_id
= "Intel Core Processor (Broadwell, IBRS)",
2270 .name
= "Skylake-Client",
2272 .vendor
= CPUID_VENDOR_INTEL
,
2276 .features
[FEAT_1_EDX
] =
2277 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2278 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2279 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2280 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2281 CPUID_DE
| CPUID_FP87
,
2282 .features
[FEAT_1_ECX
] =
2283 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2284 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2285 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2286 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2287 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2288 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2289 .features
[FEAT_8000_0001_EDX
] =
2290 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2292 .features
[FEAT_8000_0001_ECX
] =
2293 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2294 .features
[FEAT_7_0_EBX
] =
2295 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2296 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2297 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2298 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2299 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
,
2300 /* Missing: XSAVES (not supported by some Linux versions,
2301 * including v4.1 to v4.12).
2302 * KVM doesn't yet expose any XSAVES state save component,
2303 * and the only one defined in Skylake (processor tracing)
2304 * probably will block migration anyway.
2306 .features
[FEAT_XSAVE
] =
2307 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2308 CPUID_XSAVE_XGETBV1
,
2309 .features
[FEAT_6_EAX
] =
2311 .xlevel
= 0x80000008,
2312 .model_id
= "Intel Core Processor (Skylake)",
2315 .name
= "Skylake-Client-IBRS",
2317 .vendor
= CPUID_VENDOR_INTEL
,
2321 .features
[FEAT_1_EDX
] =
2322 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2323 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2324 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2325 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2326 CPUID_DE
| CPUID_FP87
,
2327 .features
[FEAT_1_ECX
] =
2328 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2329 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2330 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2331 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2332 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2333 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2334 .features
[FEAT_8000_0001_EDX
] =
2335 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2337 .features
[FEAT_8000_0001_ECX
] =
2338 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2339 .features
[FEAT_7_0_EDX
] =
2340 CPUID_7_0_EDX_SPEC_CTRL
,
2341 .features
[FEAT_7_0_EBX
] =
2342 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2343 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2344 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2345 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2346 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
,
2347 /* Missing: XSAVES (not supported by some Linux versions,
2348 * including v4.1 to v4.12).
2349 * KVM doesn't yet expose any XSAVES state save component,
2350 * and the only one defined in Skylake (processor tracing)
2351 * probably will block migration anyway.
2353 .features
[FEAT_XSAVE
] =
2354 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2355 CPUID_XSAVE_XGETBV1
,
2356 .features
[FEAT_6_EAX
] =
2358 .xlevel
= 0x80000008,
2359 .model_id
= "Intel Core Processor (Skylake, IBRS)",
2362 .name
= "Skylake-Server",
2364 .vendor
= CPUID_VENDOR_INTEL
,
2368 .features
[FEAT_1_EDX
] =
2369 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2370 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2371 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2372 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2373 CPUID_DE
| CPUID_FP87
,
2374 .features
[FEAT_1_ECX
] =
2375 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2376 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2377 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2378 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2379 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2380 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2381 .features
[FEAT_8000_0001_EDX
] =
2382 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
2383 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2384 .features
[FEAT_8000_0001_ECX
] =
2385 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2386 .features
[FEAT_7_0_EBX
] =
2387 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2388 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2389 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2390 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2391 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
| CPUID_7_0_EBX_CLWB
|
2392 CPUID_7_0_EBX_AVX512F
| CPUID_7_0_EBX_AVX512DQ
|
2393 CPUID_7_0_EBX_AVX512BW
| CPUID_7_0_EBX_AVX512CD
|
2394 CPUID_7_0_EBX_AVX512VL
| CPUID_7_0_EBX_CLFLUSHOPT
,
2395 .features
[FEAT_7_0_ECX
] =
2397 /* Missing: XSAVES (not supported by some Linux versions,
2398 * including v4.1 to v4.12).
2399 * KVM doesn't yet expose any XSAVES state save component,
2400 * and the only one defined in Skylake (processor tracing)
2401 * probably will block migration anyway.
2403 .features
[FEAT_XSAVE
] =
2404 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2405 CPUID_XSAVE_XGETBV1
,
2406 .features
[FEAT_6_EAX
] =
2408 .xlevel
= 0x80000008,
2409 .model_id
= "Intel Xeon Processor (Skylake)",
2412 .name
= "Skylake-Server-IBRS",
2414 .vendor
= CPUID_VENDOR_INTEL
,
2418 .features
[FEAT_1_EDX
] =
2419 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2420 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2421 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2422 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2423 CPUID_DE
| CPUID_FP87
,
2424 .features
[FEAT_1_ECX
] =
2425 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2426 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2427 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2428 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2429 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2430 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2431 .features
[FEAT_8000_0001_EDX
] =
2432 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
2433 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2434 .features
[FEAT_8000_0001_ECX
] =
2435 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2436 .features
[FEAT_7_0_EDX
] =
2437 CPUID_7_0_EDX_SPEC_CTRL
,
2438 .features
[FEAT_7_0_EBX
] =
2439 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2440 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2441 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2442 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2443 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
| CPUID_7_0_EBX_CLWB
|
2444 CPUID_7_0_EBX_AVX512F
| CPUID_7_0_EBX_AVX512DQ
|
2445 CPUID_7_0_EBX_AVX512BW
| CPUID_7_0_EBX_AVX512CD
|
2446 CPUID_7_0_EBX_AVX512VL
,
2447 .features
[FEAT_7_0_ECX
] =
2449 /* Missing: XSAVES (not supported by some Linux versions,
2450 * including v4.1 to v4.12).
2451 * KVM doesn't yet expose any XSAVES state save component,
2452 * and the only one defined in Skylake (processor tracing)
2453 * probably will block migration anyway.
2455 .features
[FEAT_XSAVE
] =
2456 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2457 CPUID_XSAVE_XGETBV1
,
2458 .features
[FEAT_6_EAX
] =
2460 .xlevel
= 0x80000008,
2461 .model_id
= "Intel Xeon Processor (Skylake, IBRS)",
2464 .name
= "Cascadelake-Server",
2466 .vendor
= CPUID_VENDOR_INTEL
,
2470 .features
[FEAT_1_EDX
] =
2471 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2472 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2473 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2474 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2475 CPUID_DE
| CPUID_FP87
,
2476 .features
[FEAT_1_ECX
] =
2477 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2478 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2479 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2480 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2481 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2482 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2483 .features
[FEAT_8000_0001_EDX
] =
2484 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
2485 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2486 .features
[FEAT_8000_0001_ECX
] =
2487 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2488 .features
[FEAT_7_0_EBX
] =
2489 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2490 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2491 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2492 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2493 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
| CPUID_7_0_EBX_CLWB
|
2494 CPUID_7_0_EBX_AVX512F
| CPUID_7_0_EBX_AVX512DQ
|
2495 CPUID_7_0_EBX_AVX512BW
| CPUID_7_0_EBX_AVX512CD
|
2496 CPUID_7_0_EBX_AVX512VL
| CPUID_7_0_EBX_CLFLUSHOPT
|
2497 CPUID_7_0_EBX_INTEL_PT
,
2498 .features
[FEAT_7_0_ECX
] =
2499 CPUID_7_0_ECX_PKU
| CPUID_7_0_ECX_OSPKE
|
2500 CPUID_7_0_ECX_AVX512VNNI
,
2501 .features
[FEAT_7_0_EDX
] =
2502 CPUID_7_0_EDX_SPEC_CTRL
| CPUID_7_0_EDX_SPEC_CTRL_SSBD
,
2503 /* Missing: XSAVES (not supported by some Linux versions,
2504 * including v4.1 to v4.12).
2505 * KVM doesn't yet expose any XSAVES state save component,
2506 * and the only one defined in Skylake (processor tracing)
2507 * probably will block migration anyway.
2509 .features
[FEAT_XSAVE
] =
2510 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2511 CPUID_XSAVE_XGETBV1
,
2512 .features
[FEAT_6_EAX
] =
2514 .xlevel
= 0x80000008,
2515 .model_id
= "Intel Xeon Processor (Cascadelake)",
2518 .name
= "Icelake-Client",
2520 .vendor
= CPUID_VENDOR_INTEL
,
2524 .features
[FEAT_1_EDX
] =
2525 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2526 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2527 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2528 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2529 CPUID_DE
| CPUID_FP87
,
2530 .features
[FEAT_1_ECX
] =
2531 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2532 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2533 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2534 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2535 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2536 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2537 .features
[FEAT_8000_0001_EDX
] =
2538 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2540 .features
[FEAT_8000_0001_ECX
] =
2541 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2542 .features
[FEAT_8000_0008_EBX
] =
2543 CPUID_8000_0008_EBX_WBNOINVD
,
2544 .features
[FEAT_7_0_EBX
] =
2545 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2546 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2547 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2548 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2549 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
| CPUID_7_0_EBX_INTEL_PT
,
2550 .features
[FEAT_7_0_ECX
] =
2551 CPUID_7_0_ECX_VBMI
| CPUID_7_0_ECX_UMIP
| CPUID_7_0_ECX_PKU
|
2552 CPUID_7_0_ECX_OSPKE
| CPUID_7_0_ECX_VBMI2
| CPUID_7_0_ECX_GFNI
|
2553 CPUID_7_0_ECX_VAES
| CPUID_7_0_ECX_VPCLMULQDQ
|
2554 CPUID_7_0_ECX_AVX512VNNI
| CPUID_7_0_ECX_AVX512BITALG
|
2555 CPUID_7_0_ECX_AVX512_VPOPCNTDQ
,
2556 .features
[FEAT_7_0_EDX
] =
2557 CPUID_7_0_EDX_SPEC_CTRL
| CPUID_7_0_EDX_SPEC_CTRL_SSBD
,
2558 /* Missing: XSAVES (not supported by some Linux versions,
2559 * including v4.1 to v4.12).
2560 * KVM doesn't yet expose any XSAVES state save component,
2561 * and the only one defined in Skylake (processor tracing)
2562 * probably will block migration anyway.
2564 .features
[FEAT_XSAVE
] =
2565 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2566 CPUID_XSAVE_XGETBV1
,
2567 .features
[FEAT_6_EAX
] =
2569 .xlevel
= 0x80000008,
2570 .model_id
= "Intel Core Processor (Icelake)",
2573 .name
= "Icelake-Server",
2575 .vendor
= CPUID_VENDOR_INTEL
,
2579 .features
[FEAT_1_EDX
] =
2580 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2581 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2582 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2583 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2584 CPUID_DE
| CPUID_FP87
,
2585 .features
[FEAT_1_ECX
] =
2586 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2587 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2588 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2589 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2590 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2591 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2592 .features
[FEAT_8000_0001_EDX
] =
2593 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
2594 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2595 .features
[FEAT_8000_0001_ECX
] =
2596 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2597 .features
[FEAT_8000_0008_EBX
] =
2598 CPUID_8000_0008_EBX_WBNOINVD
,
2599 .features
[FEAT_7_0_EBX
] =
2600 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2601 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2602 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2603 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2604 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
| CPUID_7_0_EBX_CLWB
|
2605 CPUID_7_0_EBX_AVX512F
| CPUID_7_0_EBX_AVX512DQ
|
2606 CPUID_7_0_EBX_AVX512BW
| CPUID_7_0_EBX_AVX512CD
|
2607 CPUID_7_0_EBX_AVX512VL
| CPUID_7_0_EBX_CLFLUSHOPT
|
2608 CPUID_7_0_EBX_INTEL_PT
,
2609 .features
[FEAT_7_0_ECX
] =
2610 CPUID_7_0_ECX_VBMI
| CPUID_7_0_ECX_UMIP
| CPUID_7_0_ECX_PKU
|
2611 CPUID_7_0_ECX_OSPKE
| CPUID_7_0_ECX_VBMI2
| CPUID_7_0_ECX_GFNI
|
2612 CPUID_7_0_ECX_VAES
| CPUID_7_0_ECX_VPCLMULQDQ
|
2613 CPUID_7_0_ECX_AVX512VNNI
| CPUID_7_0_ECX_AVX512BITALG
|
2614 CPUID_7_0_ECX_AVX512_VPOPCNTDQ
| CPUID_7_0_ECX_LA57
,
2615 .features
[FEAT_7_0_EDX
] =
2616 CPUID_7_0_EDX_PCONFIG
| CPUID_7_0_EDX_SPEC_CTRL
|
2617 CPUID_7_0_EDX_SPEC_CTRL_SSBD
,
2618 /* Missing: XSAVES (not supported by some Linux versions,
2619 * including v4.1 to v4.12).
2620 * KVM doesn't yet expose any XSAVES state save component,
2621 * and the only one defined in Skylake (processor tracing)
2622 * probably will block migration anyway.
2624 .features
[FEAT_XSAVE
] =
2625 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2626 CPUID_XSAVE_XGETBV1
,
2627 .features
[FEAT_6_EAX
] =
2629 .xlevel
= 0x80000008,
2630 .model_id
= "Intel Xeon Processor (Icelake)",
2633 .name
= "KnightsMill",
2635 .vendor
= CPUID_VENDOR_INTEL
,
2639 .features
[FEAT_1_EDX
] =
2640 CPUID_VME
| CPUID_SS
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
|
2641 CPUID_MMX
| CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
|
2642 CPUID_MCA
| CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
|
2643 CPUID_CX8
| CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
|
2644 CPUID_PSE
| CPUID_DE
| CPUID_FP87
,
2645 .features
[FEAT_1_ECX
] =
2646 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2647 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2648 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2649 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2650 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2651 CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2652 .features
[FEAT_8000_0001_EDX
] =
2653 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
2654 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2655 .features
[FEAT_8000_0001_ECX
] =
2656 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2657 .features
[FEAT_7_0_EBX
] =
2658 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
| CPUID_7_0_EBX_AVX2
|
2659 CPUID_7_0_EBX_SMEP
| CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
|
2660 CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
| CPUID_7_0_EBX_AVX512F
|
2661 CPUID_7_0_EBX_AVX512CD
| CPUID_7_0_EBX_AVX512PF
|
2662 CPUID_7_0_EBX_AVX512ER
,
2663 .features
[FEAT_7_0_ECX
] =
2664 CPUID_7_0_ECX_AVX512_VPOPCNTDQ
,
2665 .features
[FEAT_7_0_EDX
] =
2666 CPUID_7_0_EDX_AVX512_4VNNIW
| CPUID_7_0_EDX_AVX512_4FMAPS
,
2667 .features
[FEAT_XSAVE
] =
2668 CPUID_XSAVE_XSAVEOPT
,
2669 .features
[FEAT_6_EAX
] =
2671 .xlevel
= 0x80000008,
2672 .model_id
= "Intel Xeon Phi Processor (Knights Mill)",
2675 .name
= "Opteron_G1",
2677 .vendor
= CPUID_VENDOR_AMD
,
2681 .features
[FEAT_1_EDX
] =
2682 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2683 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2684 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2685 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2686 CPUID_DE
| CPUID_FP87
,
2687 .features
[FEAT_1_ECX
] =
2689 .features
[FEAT_8000_0001_EDX
] =
2690 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2691 .xlevel
= 0x80000008,
2692 .model_id
= "AMD Opteron 240 (Gen 1 Class Opteron)",
2695 .name
= "Opteron_G2",
2697 .vendor
= CPUID_VENDOR_AMD
,
2701 .features
[FEAT_1_EDX
] =
2702 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2703 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2704 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2705 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2706 CPUID_DE
| CPUID_FP87
,
2707 .features
[FEAT_1_ECX
] =
2708 CPUID_EXT_CX16
| CPUID_EXT_SSE3
,
2709 /* Missing: CPUID_EXT2_RDTSCP */
2710 .features
[FEAT_8000_0001_EDX
] =
2711 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2712 .features
[FEAT_8000_0001_ECX
] =
2713 CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
2714 .xlevel
= 0x80000008,
2715 .model_id
= "AMD Opteron 22xx (Gen 2 Class Opteron)",
2718 .name
= "Opteron_G3",
2720 .vendor
= CPUID_VENDOR_AMD
,
2724 .features
[FEAT_1_EDX
] =
2725 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2726 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2727 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2728 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2729 CPUID_DE
| CPUID_FP87
,
2730 .features
[FEAT_1_ECX
] =
2731 CPUID_EXT_POPCNT
| CPUID_EXT_CX16
| CPUID_EXT_MONITOR
|
2733 /* Missing: CPUID_EXT2_RDTSCP */
2734 .features
[FEAT_8000_0001_EDX
] =
2735 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2736 .features
[FEAT_8000_0001_ECX
] =
2737 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
|
2738 CPUID_EXT3_ABM
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
2739 .xlevel
= 0x80000008,
2740 .model_id
= "AMD Opteron 23xx (Gen 3 Class Opteron)",
2743 .name
= "Opteron_G4",
2745 .vendor
= CPUID_VENDOR_AMD
,
2749 .features
[FEAT_1_EDX
] =
2750 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2751 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2752 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2753 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2754 CPUID_DE
| CPUID_FP87
,
2755 .features
[FEAT_1_ECX
] =
2756 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2757 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
2758 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
2760 /* Missing: CPUID_EXT2_RDTSCP */
2761 .features
[FEAT_8000_0001_EDX
] =
2762 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_NX
|
2764 .features
[FEAT_8000_0001_ECX
] =
2765 CPUID_EXT3_FMA4
| CPUID_EXT3_XOP
|
2766 CPUID_EXT3_3DNOWPREFETCH
| CPUID_EXT3_MISALIGNSSE
|
2767 CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
| CPUID_EXT3_SVM
|
2770 .xlevel
= 0x8000001A,
2771 .model_id
= "AMD Opteron 62xx class CPU",
2774 .name
= "Opteron_G5",
2776 .vendor
= CPUID_VENDOR_AMD
,
2780 .features
[FEAT_1_EDX
] =
2781 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2782 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2783 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2784 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2785 CPUID_DE
| CPUID_FP87
,
2786 .features
[FEAT_1_ECX
] =
2787 CPUID_EXT_F16C
| CPUID_EXT_AVX
| CPUID_EXT_XSAVE
|
2788 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
2789 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_FMA
|
2790 CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
2791 /* Missing: CPUID_EXT2_RDTSCP */
2792 .features
[FEAT_8000_0001_EDX
] =
2793 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_NX
|
2795 .features
[FEAT_8000_0001_ECX
] =
2796 CPUID_EXT3_TBM
| CPUID_EXT3_FMA4
| CPUID_EXT3_XOP
|
2797 CPUID_EXT3_3DNOWPREFETCH
| CPUID_EXT3_MISALIGNSSE
|
2798 CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
| CPUID_EXT3_SVM
|
2801 .xlevel
= 0x8000001A,
2802 .model_id
= "AMD Opteron 63xx class CPU",
2807 .vendor
= CPUID_VENDOR_AMD
,
2811 .features
[FEAT_1_EDX
] =
2812 CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
| CPUID_CLFLUSH
|
2813 CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
| CPUID_PGE
|
2814 CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
| CPUID_MCE
|
2815 CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
| CPUID_DE
|
2816 CPUID_VME
| CPUID_FP87
,
2817 .features
[FEAT_1_ECX
] =
2818 CPUID_EXT_RDRAND
| CPUID_EXT_F16C
| CPUID_EXT_AVX
|
2819 CPUID_EXT_XSAVE
| CPUID_EXT_AES
| CPUID_EXT_POPCNT
|
2820 CPUID_EXT_MOVBE
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
2821 CPUID_EXT_CX16
| CPUID_EXT_FMA
| CPUID_EXT_SSSE3
|
2822 CPUID_EXT_MONITOR
| CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
2823 .features
[FEAT_8000_0001_EDX
] =
2824 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_PDPE1GB
|
2825 CPUID_EXT2_FFXSR
| CPUID_EXT2_MMXEXT
| CPUID_EXT2_NX
|
2827 .features
[FEAT_8000_0001_ECX
] =
2828 CPUID_EXT3_OSVW
| CPUID_EXT3_3DNOWPREFETCH
|
2829 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
|
2830 CPUID_EXT3_CR8LEG
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
|
2832 .features
[FEAT_7_0_EBX
] =
2833 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
| CPUID_7_0_EBX_AVX2
|
2834 CPUID_7_0_EBX_SMEP
| CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_RDSEED
|
2835 CPUID_7_0_EBX_ADX
| CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_CLFLUSHOPT
|
2836 CPUID_7_0_EBX_SHA_NI
,
2837 /* Missing: XSAVES (not supported by some Linux versions,
2838 * including v4.1 to v4.12).
2839 * KVM doesn't yet expose any XSAVES state save component.
2841 .features
[FEAT_XSAVE
] =
2842 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2843 CPUID_XSAVE_XGETBV1
,
2844 .features
[FEAT_6_EAX
] =
2846 .xlevel
= 0x8000001E,
2847 .model_id
= "AMD EPYC Processor",
2848 .cache_info
= &epyc_cache_info
,
2851 .name
= "EPYC-IBPB",
2853 .vendor
= CPUID_VENDOR_AMD
,
2857 .features
[FEAT_1_EDX
] =
2858 CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
| CPUID_CLFLUSH
|
2859 CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
| CPUID_PGE
|
2860 CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
| CPUID_MCE
|
2861 CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
| CPUID_DE
|
2862 CPUID_VME
| CPUID_FP87
,
2863 .features
[FEAT_1_ECX
] =
2864 CPUID_EXT_RDRAND
| CPUID_EXT_F16C
| CPUID_EXT_AVX
|
2865 CPUID_EXT_XSAVE
| CPUID_EXT_AES
| CPUID_EXT_POPCNT
|
2866 CPUID_EXT_MOVBE
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
2867 CPUID_EXT_CX16
| CPUID_EXT_FMA
| CPUID_EXT_SSSE3
|
2868 CPUID_EXT_MONITOR
| CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
2869 .features
[FEAT_8000_0001_EDX
] =
2870 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_PDPE1GB
|
2871 CPUID_EXT2_FFXSR
| CPUID_EXT2_MMXEXT
| CPUID_EXT2_NX
|
2873 .features
[FEAT_8000_0001_ECX
] =
2874 CPUID_EXT3_OSVW
| CPUID_EXT3_3DNOWPREFETCH
|
2875 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
|
2876 CPUID_EXT3_CR8LEG
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
|
2878 .features
[FEAT_8000_0008_EBX
] =
2879 CPUID_8000_0008_EBX_IBPB
,
2880 .features
[FEAT_7_0_EBX
] =
2881 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
| CPUID_7_0_EBX_AVX2
|
2882 CPUID_7_0_EBX_SMEP
| CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_RDSEED
|
2883 CPUID_7_0_EBX_ADX
| CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_CLFLUSHOPT
|
2884 CPUID_7_0_EBX_SHA_NI
,
2885 /* Missing: XSAVES (not supported by some Linux versions,
2886 * including v4.1 to v4.12).
2887 * KVM doesn't yet expose any XSAVES state save component.
2889 .features
[FEAT_XSAVE
] =
2890 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2891 CPUID_XSAVE_XGETBV1
,
2892 .features
[FEAT_6_EAX
] =
2894 .xlevel
= 0x8000001E,
2895 .model_id
= "AMD EPYC Processor (with IBPB)",
2896 .cache_info
= &epyc_cache_info
,
2900 typedef struct PropValue
{
2901 const char *prop
, *value
;
2904 /* KVM-specific features that are automatically added/removed
2905 * from all CPU models when KVM is enabled.
2907 static PropValue kvm_default_props
[] = {
2908 { "kvmclock", "on" },
2909 { "kvm-nopiodelay", "on" },
2910 { "kvm-asyncpf", "on" },
2911 { "kvm-steal-time", "on" },
2912 { "kvm-pv-eoi", "on" },
2913 { "kvmclock-stable-bit", "on" },
2916 { "monitor", "off" },
2921 /* TCG-specific defaults that override all CPU models when using TCG
2923 static PropValue tcg_default_props
[] = {
2929 void x86_cpu_change_kvm_default(const char *prop
, const char *value
)
2932 for (pv
= kvm_default_props
; pv
->prop
; pv
++) {
2933 if (!strcmp(pv
->prop
, prop
)) {
2939 /* It is valid to call this function only for properties that
2940 * are already present in the kvm_default_props table.
2945 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w
,
2946 bool migratable_only
);
2948 static bool lmce_supported(void)
2950 uint64_t mce_cap
= 0;
2953 if (kvm_ioctl(kvm_state
, KVM_X86_GET_MCE_CAP_SUPPORTED
, &mce_cap
) < 0) {
2958 return !!(mce_cap
& MCG_LMCE_P
);
2961 #define CPUID_MODEL_ID_SZ 48
2964 * cpu_x86_fill_model_id:
2965 * Get CPUID model ID string from host CPU.
2967 * @str should have at least CPUID_MODEL_ID_SZ bytes
2969 * The function does NOT add a null terminator to the string
2972 static int cpu_x86_fill_model_id(char *str
)
2974 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
2977 for (i
= 0; i
< 3; i
++) {
2978 host_cpuid(0x80000002 + i
, 0, &eax
, &ebx
, &ecx
, &edx
);
2979 memcpy(str
+ i
* 16 + 0, &eax
, 4);
2980 memcpy(str
+ i
* 16 + 4, &ebx
, 4);
2981 memcpy(str
+ i
* 16 + 8, &ecx
, 4);
2982 memcpy(str
+ i
* 16 + 12, &edx
, 4);
2987 static Property max_x86_cpu_properties
[] = {
2988 DEFINE_PROP_BOOL("migratable", X86CPU
, migratable
, true),
2989 DEFINE_PROP_BOOL("host-cache-info", X86CPU
, cache_info_passthrough
, false),
2990 DEFINE_PROP_END_OF_LIST()
2993 static void max_x86_cpu_class_init(ObjectClass
*oc
, void *data
)
2995 DeviceClass
*dc
= DEVICE_CLASS(oc
);
2996 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
3000 xcc
->model_description
=
3001 "Enables all features supported by the accelerator in the current host";
3003 dc
->props
= max_x86_cpu_properties
;
3006 static void x86_cpu_load_def(X86CPU
*cpu
, X86CPUDefinition
*def
, Error
**errp
);
3008 static void max_x86_cpu_initfn(Object
*obj
)
3010 X86CPU
*cpu
= X86_CPU(obj
);
3011 CPUX86State
*env
= &cpu
->env
;
3012 KVMState
*s
= kvm_state
;
3014 /* We can't fill the features array here because we don't know yet if
3015 * "migratable" is true or false.
3017 cpu
->max_features
= true;
3019 if (accel_uses_host_cpuid()) {
3020 char vendor
[CPUID_VENDOR_SZ
+ 1] = { 0 };
3021 char model_id
[CPUID_MODEL_ID_SZ
+ 1] = { 0 };
3022 int family
, model
, stepping
;
3023 X86CPUDefinition host_cpudef
= { };
3024 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
3026 host_cpuid(0x0, 0, &eax
, &ebx
, &ecx
, &edx
);
3027 x86_cpu_vendor_words2str(host_cpudef
.vendor
, ebx
, edx
, ecx
);
3029 host_vendor_fms(vendor
, &family
, &model
, &stepping
);
3031 cpu_x86_fill_model_id(model_id
);
3033 object_property_set_str(OBJECT(cpu
), vendor
, "vendor", &error_abort
);
3034 object_property_set_int(OBJECT(cpu
), family
, "family", &error_abort
);
3035 object_property_set_int(OBJECT(cpu
), model
, "model", &error_abort
);
3036 object_property_set_int(OBJECT(cpu
), stepping
, "stepping",
3038 object_property_set_str(OBJECT(cpu
), model_id
, "model-id",
3041 if (kvm_enabled()) {
3042 env
->cpuid_min_level
=
3043 kvm_arch_get_supported_cpuid(s
, 0x0, 0, R_EAX
);
3044 env
->cpuid_min_xlevel
=
3045 kvm_arch_get_supported_cpuid(s
, 0x80000000, 0, R_EAX
);
3046 env
->cpuid_min_xlevel2
=
3047 kvm_arch_get_supported_cpuid(s
, 0xC0000000, 0, R_EAX
);
3049 env
->cpuid_min_level
=
3050 hvf_get_supported_cpuid(0x0, 0, R_EAX
);
3051 env
->cpuid_min_xlevel
=
3052 hvf_get_supported_cpuid(0x80000000, 0, R_EAX
);
3053 env
->cpuid_min_xlevel2
=
3054 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX
);
3057 if (lmce_supported()) {
3058 object_property_set_bool(OBJECT(cpu
), true, "lmce", &error_abort
);
3061 object_property_set_str(OBJECT(cpu
), CPUID_VENDOR_AMD
,
3062 "vendor", &error_abort
);
3063 object_property_set_int(OBJECT(cpu
), 6, "family", &error_abort
);
3064 object_property_set_int(OBJECT(cpu
), 6, "model", &error_abort
);
3065 object_property_set_int(OBJECT(cpu
), 3, "stepping", &error_abort
);
3066 object_property_set_str(OBJECT(cpu
),
3067 "QEMU TCG CPU version " QEMU_HW_VERSION
,
3068 "model-id", &error_abort
);
3071 object_property_set_bool(OBJECT(cpu
), true, "pmu", &error_abort
);
3074 static const TypeInfo max_x86_cpu_type_info
= {
3075 .name
= X86_CPU_TYPE_NAME("max"),
3076 .parent
= TYPE_X86_CPU
,
3077 .instance_init
= max_x86_cpu_initfn
,
3078 .class_init
= max_x86_cpu_class_init
,
3081 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3082 static void host_x86_cpu_class_init(ObjectClass
*oc
, void *data
)
3084 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
3086 xcc
->host_cpuid_required
= true;
3089 #if defined(CONFIG_KVM)
3090 xcc
->model_description
=
3091 "KVM processor with all supported host features ";
3092 #elif defined(CONFIG_HVF)
3093 xcc
->model_description
=
3094 "HVF processor with all supported host features ";
3098 static const TypeInfo host_x86_cpu_type_info
= {
3099 .name
= X86_CPU_TYPE_NAME("host"),
3100 .parent
= X86_CPU_TYPE_NAME("max"),
3101 .class_init
= host_x86_cpu_class_init
,
3106 static char *feature_word_description(FeatureWordInfo
*f
, uint32_t bit
)
3108 assert(f
->type
== CPUID_FEATURE_WORD
|| f
->type
== MSR_FEATURE_WORD
);
3111 case CPUID_FEATURE_WORD
:
3113 const char *reg
= get_register_name_32(f
->cpuid
.reg
);
3115 return g_strdup_printf("CPUID.%02XH:%s",
3118 case MSR_FEATURE_WORD
:
3119 return g_strdup_printf("MSR(%02XH)",
3126 static void report_unavailable_features(FeatureWord w
, uint32_t mask
)
3128 FeatureWordInfo
*f
= &feature_word_info
[w
];
3130 char *feat_word_str
;
3132 for (i
= 0; i
< 32; ++i
) {
3133 if ((1UL << i
) & mask
) {
3134 feat_word_str
= feature_word_description(f
, i
);
3135 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
3136 accel_uses_host_cpuid() ? "host" : "TCG",
3138 f
->feat_names
[i
] ? "." : "",
3139 f
->feat_names
[i
] ? f
->feat_names
[i
] : "", i
);
3140 g_free(feat_word_str
);
3145 static void x86_cpuid_version_get_family(Object
*obj
, Visitor
*v
,
3146 const char *name
, void *opaque
,
3149 X86CPU
*cpu
= X86_CPU(obj
);
3150 CPUX86State
*env
= &cpu
->env
;
3153 value
= (env
->cpuid_version
>> 8) & 0xf;
3155 value
+= (env
->cpuid_version
>> 20) & 0xff;
3157 visit_type_int(v
, name
, &value
, errp
);
3160 static void x86_cpuid_version_set_family(Object
*obj
, Visitor
*v
,
3161 const char *name
, void *opaque
,
3164 X86CPU
*cpu
= X86_CPU(obj
);
3165 CPUX86State
*env
= &cpu
->env
;
3166 const int64_t min
= 0;
3167 const int64_t max
= 0xff + 0xf;
3168 Error
*local_err
= NULL
;
3171 visit_type_int(v
, name
, &value
, &local_err
);
3173 error_propagate(errp
, local_err
);
3176 if (value
< min
|| value
> max
) {
3177 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
3178 name
? name
: "null", value
, min
, max
);
3182 env
->cpuid_version
&= ~0xff00f00;
3184 env
->cpuid_version
|= 0xf00 | ((value
- 0x0f) << 20);
3186 env
->cpuid_version
|= value
<< 8;
3190 static void x86_cpuid_version_get_model(Object
*obj
, Visitor
*v
,
3191 const char *name
, void *opaque
,
3194 X86CPU
*cpu
= X86_CPU(obj
);
3195 CPUX86State
*env
= &cpu
->env
;
3198 value
= (env
->cpuid_version
>> 4) & 0xf;
3199 value
|= ((env
->cpuid_version
>> 16) & 0xf) << 4;
3200 visit_type_int(v
, name
, &value
, errp
);
3203 static void x86_cpuid_version_set_model(Object
*obj
, Visitor
*v
,
3204 const char *name
, void *opaque
,
3207 X86CPU
*cpu
= X86_CPU(obj
);
3208 CPUX86State
*env
= &cpu
->env
;
3209 const int64_t min
= 0;
3210 const int64_t max
= 0xff;
3211 Error
*local_err
= NULL
;
3214 visit_type_int(v
, name
, &value
, &local_err
);
3216 error_propagate(errp
, local_err
);
3219 if (value
< min
|| value
> max
) {
3220 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
3221 name
? name
: "null", value
, min
, max
);
3225 env
->cpuid_version
&= ~0xf00f0;
3226 env
->cpuid_version
|= ((value
& 0xf) << 4) | ((value
>> 4) << 16);
3229 static void x86_cpuid_version_get_stepping(Object
*obj
, Visitor
*v
,
3230 const char *name
, void *opaque
,
3233 X86CPU
*cpu
= X86_CPU(obj
);
3234 CPUX86State
*env
= &cpu
->env
;
3237 value
= env
->cpuid_version
& 0xf;
3238 visit_type_int(v
, name
, &value
, errp
);
3241 static void x86_cpuid_version_set_stepping(Object
*obj
, Visitor
*v
,
3242 const char *name
, void *opaque
,
3245 X86CPU
*cpu
= X86_CPU(obj
);
3246 CPUX86State
*env
= &cpu
->env
;
3247 const int64_t min
= 0;
3248 const int64_t max
= 0xf;
3249 Error
*local_err
= NULL
;
3252 visit_type_int(v
, name
, &value
, &local_err
);
3254 error_propagate(errp
, local_err
);
3257 if (value
< min
|| value
> max
) {
3258 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
3259 name
? name
: "null", value
, min
, max
);
3263 env
->cpuid_version
&= ~0xf;
3264 env
->cpuid_version
|= value
& 0xf;
3267 static char *x86_cpuid_get_vendor(Object
*obj
, Error
**errp
)
3269 X86CPU
*cpu
= X86_CPU(obj
);
3270 CPUX86State
*env
= &cpu
->env
;
3273 value
= g_malloc(CPUID_VENDOR_SZ
+ 1);
3274 x86_cpu_vendor_words2str(value
, env
->cpuid_vendor1
, env
->cpuid_vendor2
,
3275 env
->cpuid_vendor3
);
3279 static void x86_cpuid_set_vendor(Object
*obj
, const char *value
,
3282 X86CPU
*cpu
= X86_CPU(obj
);
3283 CPUX86State
*env
= &cpu
->env
;
3286 if (strlen(value
) != CPUID_VENDOR_SZ
) {
3287 error_setg(errp
, QERR_PROPERTY_VALUE_BAD
, "", "vendor", value
);
3291 env
->cpuid_vendor1
= 0;
3292 env
->cpuid_vendor2
= 0;
3293 env
->cpuid_vendor3
= 0;
3294 for (i
= 0; i
< 4; i
++) {
3295 env
->cpuid_vendor1
|= ((uint8_t)value
[i
]) << (8 * i
);
3296 env
->cpuid_vendor2
|= ((uint8_t)value
[i
+ 4]) << (8 * i
);
3297 env
->cpuid_vendor3
|= ((uint8_t)value
[i
+ 8]) << (8 * i
);
3301 static char *x86_cpuid_get_model_id(Object
*obj
, Error
**errp
)
3303 X86CPU
*cpu
= X86_CPU(obj
);
3304 CPUX86State
*env
= &cpu
->env
;
3308 value
= g_malloc(48 + 1);
3309 for (i
= 0; i
< 48; i
++) {
3310 value
[i
] = env
->cpuid_model
[i
>> 2] >> (8 * (i
& 3));
3316 static void x86_cpuid_set_model_id(Object
*obj
, const char *model_id
,
3319 X86CPU
*cpu
= X86_CPU(obj
);
3320 CPUX86State
*env
= &cpu
->env
;
3323 if (model_id
== NULL
) {
3326 len
= strlen(model_id
);
3327 memset(env
->cpuid_model
, 0, 48);
3328 for (i
= 0; i
< 48; i
++) {
3332 c
= (uint8_t)model_id
[i
];
3334 env
->cpuid_model
[i
>> 2] |= c
<< (8 * (i
& 3));
3338 static void x86_cpuid_get_tsc_freq(Object
*obj
, Visitor
*v
, const char *name
,
3339 void *opaque
, Error
**errp
)
3341 X86CPU
*cpu
= X86_CPU(obj
);
3344 value
= cpu
->env
.tsc_khz
* 1000;
3345 visit_type_int(v
, name
, &value
, errp
);
3348 static void x86_cpuid_set_tsc_freq(Object
*obj
, Visitor
*v
, const char *name
,
3349 void *opaque
, Error
**errp
)
3351 X86CPU
*cpu
= X86_CPU(obj
);
3352 const int64_t min
= 0;
3353 const int64_t max
= INT64_MAX
;
3354 Error
*local_err
= NULL
;
3357 visit_type_int(v
, name
, &value
, &local_err
);
3359 error_propagate(errp
, local_err
);
3362 if (value
< min
|| value
> max
) {
3363 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
3364 name
? name
: "null", value
, min
, max
);
3368 cpu
->env
.tsc_khz
= cpu
->env
.user_tsc_khz
= value
/ 1000;
3371 /* Generic getter for "feature-words" and "filtered-features" properties */
3372 static void x86_cpu_get_feature_words(Object
*obj
, Visitor
*v
,
3373 const char *name
, void *opaque
,
3376 uint32_t *array
= (uint32_t *)opaque
;
3378 X86CPUFeatureWordInfo word_infos
[FEATURE_WORDS
] = { };
3379 X86CPUFeatureWordInfoList list_entries
[FEATURE_WORDS
] = { };
3380 X86CPUFeatureWordInfoList
*list
= NULL
;
3382 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3383 FeatureWordInfo
*wi
= &feature_word_info
[w
];
3385 * We didn't have MSR features when "feature-words" was
3386 * introduced. Therefore skipped other type entries.
3388 if (wi
->type
!= CPUID_FEATURE_WORD
) {
3391 X86CPUFeatureWordInfo
*qwi
= &word_infos
[w
];
3392 qwi
->cpuid_input_eax
= wi
->cpuid
.eax
;
3393 qwi
->has_cpuid_input_ecx
= wi
->cpuid
.needs_ecx
;
3394 qwi
->cpuid_input_ecx
= wi
->cpuid
.ecx
;
3395 qwi
->cpuid_register
= x86_reg_info_32
[wi
->cpuid
.reg
].qapi_enum
;
3396 qwi
->features
= array
[w
];
3398 /* List will be in reverse order, but order shouldn't matter */
3399 list_entries
[w
].next
= list
;
3400 list_entries
[w
].value
= &word_infos
[w
];
3401 list
= &list_entries
[w
];
3404 visit_type_X86CPUFeatureWordInfoList(v
, "feature-words", &list
, errp
);
3407 static void x86_get_hv_spinlocks(Object
*obj
, Visitor
*v
, const char *name
,
3408 void *opaque
, Error
**errp
)
3410 X86CPU
*cpu
= X86_CPU(obj
);
3411 int64_t value
= cpu
->hyperv_spinlock_attempts
;
3413 visit_type_int(v
, name
, &value
, errp
);
3416 static void x86_set_hv_spinlocks(Object
*obj
, Visitor
*v
, const char *name
,
3417 void *opaque
, Error
**errp
)
3419 const int64_t min
= 0xFFF;
3420 const int64_t max
= UINT_MAX
;
3421 X86CPU
*cpu
= X86_CPU(obj
);
3425 visit_type_int(v
, name
, &value
, &err
);
3427 error_propagate(errp
, err
);
3431 if (value
< min
|| value
> max
) {
3432 error_setg(errp
, "Property %s.%s doesn't take value %" PRId64
3433 " (minimum: %" PRId64
", maximum: %" PRId64
")",
3434 object_get_typename(obj
), name
? name
: "null",
3438 cpu
->hyperv_spinlock_attempts
= value
;
3441 static const PropertyInfo qdev_prop_spinlocks
= {
3443 .get
= x86_get_hv_spinlocks
,
3444 .set
= x86_set_hv_spinlocks
,
3447 /* Convert all '_' in a feature string option name to '-', to make feature
3448 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3450 static inline void feat2prop(char *s
)
3452 while ((s
= strchr(s
, '_'))) {
3457 /* Return the feature property name for a feature flag bit */
3458 static const char *x86_cpu_feature_name(FeatureWord w
, int bitnr
)
3460 /* XSAVE components are automatically enabled by other features,
3461 * so return the original feature name instead
3463 if (w
== FEAT_XSAVE_COMP_LO
|| w
== FEAT_XSAVE_COMP_HI
) {
3464 int comp
= (w
== FEAT_XSAVE_COMP_HI
) ? bitnr
+ 32 : bitnr
;
3466 if (comp
< ARRAY_SIZE(x86_ext_save_areas
) &&
3467 x86_ext_save_areas
[comp
].bits
) {
3468 w
= x86_ext_save_areas
[comp
].feature
;
3469 bitnr
= ctz32(x86_ext_save_areas
[comp
].bits
);
3474 assert(w
< FEATURE_WORDS
);
3475 return feature_word_info
[w
].feat_names
[bitnr
];
3478 /* Compatibily hack to maintain legacy +-feat semantic,
3479 * where +-feat overwrites any feature set by
3480 * feat=on|feat even if the later is parsed after +-feat
3481 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3483 static GList
*plus_features
, *minus_features
;
3485 static gint
compare_string(gconstpointer a
, gconstpointer b
)
3487 return g_strcmp0(a
, b
);
3490 /* Parse "+feature,-feature,feature=foo" CPU feature string
3492 static void x86_cpu_parse_featurestr(const char *typename
, char *features
,
3495 char *featurestr
; /* Single 'key=value" string being parsed */
3496 static bool cpu_globals_initialized
;
3497 bool ambiguous
= false;
3499 if (cpu_globals_initialized
) {
3502 cpu_globals_initialized
= true;
3508 for (featurestr
= strtok(features
, ",");
3510 featurestr
= strtok(NULL
, ",")) {
3512 const char *val
= NULL
;
3515 GlobalProperty
*prop
;
3517 /* Compatibility syntax: */
3518 if (featurestr
[0] == '+') {
3519 plus_features
= g_list_append(plus_features
,
3520 g_strdup(featurestr
+ 1));
3522 } else if (featurestr
[0] == '-') {
3523 minus_features
= g_list_append(minus_features
,
3524 g_strdup(featurestr
+ 1));
3528 eq
= strchr(featurestr
, '=');
3536 feat2prop(featurestr
);
3539 if (g_list_find_custom(plus_features
, name
, compare_string
)) {
3540 warn_report("Ambiguous CPU model string. "
3541 "Don't mix both \"+%s\" and \"%s=%s\"",
3545 if (g_list_find_custom(minus_features
, name
, compare_string
)) {
3546 warn_report("Ambiguous CPU model string. "
3547 "Don't mix both \"-%s\" and \"%s=%s\"",
3553 if (!strcmp(name
, "tsc-freq")) {
3557 ret
= qemu_strtosz_metric(val
, NULL
, &tsc_freq
);
3558 if (ret
< 0 || tsc_freq
> INT64_MAX
) {
3559 error_setg(errp
, "bad numerical value %s", val
);
3562 snprintf(num
, sizeof(num
), "%" PRId64
, tsc_freq
);
3564 name
= "tsc-frequency";
3567 prop
= g_new0(typeof(*prop
), 1);
3568 prop
->driver
= typename
;
3569 prop
->property
= g_strdup(name
);
3570 prop
->value
= g_strdup(val
);
3571 prop
->errp
= &error_fatal
;
3572 qdev_prop_register_global(prop
);
3576 warn_report("Compatibility of ambiguous CPU model "
3577 "strings won't be kept on future QEMU versions");
3581 static void x86_cpu_expand_features(X86CPU
*cpu
, Error
**errp
);
3582 static int x86_cpu_filter_features(X86CPU
*cpu
);
3584 /* Check for missing features that may prevent the CPU class from
3585 * running using the current machine and accelerator.
3587 static void x86_cpu_class_check_missing_features(X86CPUClass
*xcc
,
3588 strList
**missing_feats
)
3593 strList
**next
= missing_feats
;
3595 if (xcc
->host_cpuid_required
&& !accel_uses_host_cpuid()) {
3596 strList
*new = g_new0(strList
, 1);
3597 new->value
= g_strdup("kvm");
3598 *missing_feats
= new;
3602 xc
= X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc
))));
3604 x86_cpu_expand_features(xc
, &err
);
3606 /* Errors at x86_cpu_expand_features should never happen,
3607 * but in case it does, just report the model as not
3608 * runnable at all using the "type" property.
3610 strList
*new = g_new0(strList
, 1);
3611 new->value
= g_strdup("type");
3616 x86_cpu_filter_features(xc
);
3618 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3619 uint32_t filtered
= xc
->filtered_features
[w
];
3621 for (i
= 0; i
< 32; i
++) {
3622 if (filtered
& (1UL << i
)) {
3623 strList
*new = g_new0(strList
, 1);
3624 new->value
= g_strdup(x86_cpu_feature_name(w
, i
));
3631 object_unref(OBJECT(xc
));
3634 /* Print all cpuid feature names in featureset
3636 static void listflags(FILE *f
, fprintf_function print
, GList
*features
)
3641 for (tmp
= features
; tmp
; tmp
= tmp
->next
) {
3642 const char *name
= tmp
->data
;
3643 if ((len
+ strlen(name
) + 1) >= 75) {
3647 print(f
, "%s%s", len
== 0 ? " " : " ", name
);
3648 len
+= strlen(name
) + 1;
3653 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3654 static gint
x86_cpu_list_compare(gconstpointer a
, gconstpointer b
)
3656 ObjectClass
*class_a
= (ObjectClass
*)a
;
3657 ObjectClass
*class_b
= (ObjectClass
*)b
;
3658 X86CPUClass
*cc_a
= X86_CPU_CLASS(class_a
);
3659 X86CPUClass
*cc_b
= X86_CPU_CLASS(class_b
);
3660 char *name_a
, *name_b
;
3663 if (cc_a
->ordering
!= cc_b
->ordering
) {
3664 ret
= cc_a
->ordering
- cc_b
->ordering
;
3666 name_a
= x86_cpu_class_get_model_name(cc_a
);
3667 name_b
= x86_cpu_class_get_model_name(cc_b
);
3668 ret
= strcmp(name_a
, name_b
);
3675 static GSList
*get_sorted_cpu_model_list(void)
3677 GSList
*list
= object_class_get_list(TYPE_X86_CPU
, false);
3678 list
= g_slist_sort(list
, x86_cpu_list_compare
);
3682 static void x86_cpu_list_entry(gpointer data
, gpointer user_data
)
3684 ObjectClass
*oc
= data
;
3685 X86CPUClass
*cc
= X86_CPU_CLASS(oc
);
3686 CPUListState
*s
= user_data
;
3687 char *name
= x86_cpu_class_get_model_name(cc
);
3688 const char *desc
= cc
->model_description
;
3689 if (!desc
&& cc
->cpu_def
) {
3690 desc
= cc
->cpu_def
->model_id
;
3693 (*s
->cpu_fprintf
)(s
->file
, "x86 %-20s %-48s\n",
3698 /* list available CPU models and flags */
3699 void x86_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
3704 .cpu_fprintf
= cpu_fprintf
,
3707 GList
*names
= NULL
;
3709 (*cpu_fprintf
)(f
, "Available CPUs:\n");
3710 list
= get_sorted_cpu_model_list();
3711 g_slist_foreach(list
, x86_cpu_list_entry
, &s
);
3715 for (i
= 0; i
< ARRAY_SIZE(feature_word_info
); i
++) {
3716 FeatureWordInfo
*fw
= &feature_word_info
[i
];
3717 for (j
= 0; j
< 32; j
++) {
3718 if (fw
->feat_names
[j
]) {
3719 names
= g_list_append(names
, (gpointer
)fw
->feat_names
[j
]);
3724 names
= g_list_sort(names
, (GCompareFunc
)strcmp
);
3726 (*cpu_fprintf
)(f
, "\nRecognized CPUID flags:\n");
3727 listflags(f
, cpu_fprintf
, names
);
3728 (*cpu_fprintf
)(f
, "\n");
3732 static void x86_cpu_definition_entry(gpointer data
, gpointer user_data
)
3734 ObjectClass
*oc
= data
;
3735 X86CPUClass
*cc
= X86_CPU_CLASS(oc
);
3736 CpuDefinitionInfoList
**cpu_list
= user_data
;
3737 CpuDefinitionInfoList
*entry
;
3738 CpuDefinitionInfo
*info
;
3740 info
= g_malloc0(sizeof(*info
));
3741 info
->name
= x86_cpu_class_get_model_name(cc
);
3742 x86_cpu_class_check_missing_features(cc
, &info
->unavailable_features
);
3743 info
->has_unavailable_features
= true;
3744 info
->q_typename
= g_strdup(object_class_get_name(oc
));
3745 info
->migration_safe
= cc
->migration_safe
;
3746 info
->has_migration_safe
= true;
3747 info
->q_static
= cc
->static_model
;
3749 entry
= g_malloc0(sizeof(*entry
));
3750 entry
->value
= info
;
3751 entry
->next
= *cpu_list
;
3755 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
3757 CpuDefinitionInfoList
*cpu_list
= NULL
;
3758 GSList
*list
= get_sorted_cpu_model_list();
3759 g_slist_foreach(list
, x86_cpu_definition_entry
, &cpu_list
);
3764 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w
,
3765 bool migratable_only
)
3767 FeatureWordInfo
*wi
= &feature_word_info
[w
];
3770 if (kvm_enabled()) {
3772 case CPUID_FEATURE_WORD
:
3773 r
= kvm_arch_get_supported_cpuid(kvm_state
, wi
->cpuid
.eax
,
3777 case MSR_FEATURE_WORD
:
3778 r
= kvm_arch_get_supported_msr_feature(kvm_state
,
3782 } else if (hvf_enabled()) {
3783 if (wi
->type
!= CPUID_FEATURE_WORD
) {
3786 r
= hvf_get_supported_cpuid(wi
->cpuid
.eax
,
3789 } else if (tcg_enabled()) {
3790 r
= wi
->tcg_features
;
3794 if (migratable_only
) {
3795 r
&= x86_cpu_get_migratable_flags(w
);
3800 static void x86_cpu_report_filtered_features(X86CPU
*cpu
)
3804 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3805 report_unavailable_features(w
, cpu
->filtered_features
[w
]);
3809 static void x86_cpu_apply_props(X86CPU
*cpu
, PropValue
*props
)
3812 for (pv
= props
; pv
->prop
; pv
++) {
3816 object_property_parse(OBJECT(cpu
), pv
->value
, pv
->prop
,
3821 /* Load data from X86CPUDefinition into a X86CPU object
3823 static void x86_cpu_load_def(X86CPU
*cpu
, X86CPUDefinition
*def
, Error
**errp
)
3825 CPUX86State
*env
= &cpu
->env
;
3827 char host_vendor
[CPUID_VENDOR_SZ
+ 1];
3830 /*NOTE: any property set by this function should be returned by
3831 * x86_cpu_static_props(), so static expansion of
3832 * query-cpu-model-expansion is always complete.
3835 /* CPU models only set _minimum_ values for level/xlevel: */
3836 object_property_set_uint(OBJECT(cpu
), def
->level
, "min-level", errp
);
3837 object_property_set_uint(OBJECT(cpu
), def
->xlevel
, "min-xlevel", errp
);
3839 object_property_set_int(OBJECT(cpu
), def
->family
, "family", errp
);
3840 object_property_set_int(OBJECT(cpu
), def
->model
, "model", errp
);
3841 object_property_set_int(OBJECT(cpu
), def
->stepping
, "stepping", errp
);
3842 object_property_set_str(OBJECT(cpu
), def
->model_id
, "model-id", errp
);
3843 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3844 env
->features
[w
] = def
->features
[w
];
3847 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3848 cpu
->legacy_cache
= !def
->cache_info
;
3850 /* Special cases not set in the X86CPUDefinition structs: */
3851 /* TODO: in-kernel irqchip for hvf */
3852 if (kvm_enabled()) {
3853 if (!kvm_irqchip_in_kernel()) {
3854 x86_cpu_change_kvm_default("x2apic", "off");
3857 x86_cpu_apply_props(cpu
, kvm_default_props
);
3858 } else if (tcg_enabled()) {
3859 x86_cpu_apply_props(cpu
, tcg_default_props
);
3862 env
->features
[FEAT_1_ECX
] |= CPUID_EXT_HYPERVISOR
;
3864 /* sysenter isn't supported in compatibility mode on AMD,
3865 * syscall isn't supported in compatibility mode on Intel.
3866 * Normally we advertise the actual CPU vendor, but you can
3867 * override this using the 'vendor' property if you want to use
3868 * KVM's sysenter/syscall emulation in compatibility mode and
3869 * when doing cross vendor migration
3871 vendor
= def
->vendor
;
3872 if (accel_uses_host_cpuid()) {
3873 uint32_t ebx
= 0, ecx
= 0, edx
= 0;
3874 host_cpuid(0, 0, NULL
, &ebx
, &ecx
, &edx
);
3875 x86_cpu_vendor_words2str(host_vendor
, ebx
, edx
, ecx
);
3876 vendor
= host_vendor
;
3879 object_property_set_str(OBJECT(cpu
), vendor
, "vendor", errp
);
3883 /* Return a QDict containing keys for all properties that can be included
3884 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3885 * must be included in the dictionary.
3887 static QDict
*x86_cpu_static_props(void)
3891 static const char *props
[] = {
3909 for (i
= 0; props
[i
]; i
++) {
3910 qdict_put_null(d
, props
[i
]);
3913 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3914 FeatureWordInfo
*fi
= &feature_word_info
[w
];
3916 for (bit
= 0; bit
< 32; bit
++) {
3917 if (!fi
->feat_names
[bit
]) {
3920 qdict_put_null(d
, fi
->feat_names
[bit
]);
3927 /* Add an entry to @props dict, with the value for property. */
3928 static void x86_cpu_expand_prop(X86CPU
*cpu
, QDict
*props
, const char *prop
)
3930 QObject
*value
= object_property_get_qobject(OBJECT(cpu
), prop
,
3933 qdict_put_obj(props
, prop
, value
);
3936 /* Convert CPU model data from X86CPU object to a property dictionary
3937 * that can recreate exactly the same CPU model.
3939 static void x86_cpu_to_dict(X86CPU
*cpu
, QDict
*props
)
3941 QDict
*sprops
= x86_cpu_static_props();
3942 const QDictEntry
*e
;
3944 for (e
= qdict_first(sprops
); e
; e
= qdict_next(sprops
, e
)) {
3945 const char *prop
= qdict_entry_key(e
);
3946 x86_cpu_expand_prop(cpu
, props
, prop
);
3950 /* Convert CPU model data from X86CPU object to a property dictionary
3951 * that can recreate exactly the same CPU model, including every
3952 * writeable QOM property.
3954 static void x86_cpu_to_dict_full(X86CPU
*cpu
, QDict
*props
)
3956 ObjectPropertyIterator iter
;
3957 ObjectProperty
*prop
;
3959 object_property_iter_init(&iter
, OBJECT(cpu
));
3960 while ((prop
= object_property_iter_next(&iter
))) {
3961 /* skip read-only or write-only properties */
3962 if (!prop
->get
|| !prop
->set
) {
3966 /* "hotplugged" is the only property that is configurable
3967 * on the command-line but will be set differently on CPUs
3968 * created using "-cpu ... -smp ..." and by CPUs created
3969 * on the fly by x86_cpu_from_model() for querying. Skip it.
3971 if (!strcmp(prop
->name
, "hotplugged")) {
3974 x86_cpu_expand_prop(cpu
, props
, prop
->name
);
3978 static void object_apply_props(Object
*obj
, QDict
*props
, Error
**errp
)
3980 const QDictEntry
*prop
;
3983 for (prop
= qdict_first(props
); prop
; prop
= qdict_next(props
, prop
)) {
3984 object_property_set_qobject(obj
, qdict_entry_value(prop
),
3985 qdict_entry_key(prop
), &err
);
3991 error_propagate(errp
, err
);
3994 /* Create X86CPU object according to model+props specification */
3995 static X86CPU
*x86_cpu_from_model(const char *model
, QDict
*props
, Error
**errp
)
4001 xcc
= X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU
, model
));
4003 error_setg(&err
, "CPU model '%s' not found", model
);
4007 xc
= X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc
))));
4009 object_apply_props(OBJECT(xc
), props
, &err
);
4015 x86_cpu_expand_features(xc
, &err
);
4022 error_propagate(errp
, err
);
4023 object_unref(OBJECT(xc
));
4029 CpuModelExpansionInfo
*
4030 arch_query_cpu_model_expansion(CpuModelExpansionType type
,
4031 CpuModelInfo
*model
,
4036 CpuModelExpansionInfo
*ret
= g_new0(CpuModelExpansionInfo
, 1);
4037 QDict
*props
= NULL
;
4038 const char *base_name
;
4040 xc
= x86_cpu_from_model(model
->name
,
4042 qobject_to(QDict
, model
->props
) :
4048 props
= qdict_new();
4049 ret
->model
= g_new0(CpuModelInfo
, 1);
4050 ret
->model
->props
= QOBJECT(props
);
4051 ret
->model
->has_props
= true;
4054 case CPU_MODEL_EXPANSION_TYPE_STATIC
:
4055 /* Static expansion will be based on "base" only */
4057 x86_cpu_to_dict(xc
, props
);
4059 case CPU_MODEL_EXPANSION_TYPE_FULL
:
4060 /* As we don't return every single property, full expansion needs
4061 * to keep the original model name+props, and add extra
4062 * properties on top of that.
4064 base_name
= model
->name
;
4065 x86_cpu_to_dict_full(xc
, props
);
4068 error_setg(&err
, "Unsupportted expansion type");
4072 x86_cpu_to_dict(xc
, props
);
4074 ret
->model
->name
= g_strdup(base_name
);
4077 object_unref(OBJECT(xc
));
4079 error_propagate(errp
, err
);
4080 qapi_free_CpuModelExpansionInfo(ret
);
4086 static gchar
*x86_gdb_arch_name(CPUState
*cs
)
4088 #ifdef TARGET_X86_64
4089 return g_strdup("i386:x86-64");
4091 return g_strdup("i386");
4095 static void x86_cpu_cpudef_class_init(ObjectClass
*oc
, void *data
)
4097 X86CPUDefinition
*cpudef
= data
;
4098 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
4100 xcc
->cpu_def
= cpudef
;
4101 xcc
->migration_safe
= true;
4104 static void x86_register_cpudef_type(X86CPUDefinition
*def
)
4106 char *typename
= x86_cpu_type_name(def
->name
);
4109 .parent
= TYPE_X86_CPU
,
4110 .class_init
= x86_cpu_cpudef_class_init
,
4114 /* AMD aliases are handled at runtime based on CPUID vendor, so
4115 * they shouldn't be set on the CPU model table.
4117 assert(!(def
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_AMD_ALIASES
));
4118 /* catch mistakes instead of silently truncating model_id when too long */
4119 assert(def
->model_id
&& strlen(def
->model_id
) <= 48);
4126 #if !defined(CONFIG_USER_ONLY)
4128 void cpu_clear_apic_feature(CPUX86State
*env
)
4130 env
->features
[FEAT_1_EDX
] &= ~CPUID_APIC
;
4133 #endif /* !CONFIG_USER_ONLY */
4135 void cpu_x86_cpuid(CPUX86State
*env
, uint32_t index
, uint32_t count
,
4136 uint32_t *eax
, uint32_t *ebx
,
4137 uint32_t *ecx
, uint32_t *edx
)
4139 X86CPU
*cpu
= x86_env_get_cpu(env
);
4140 CPUState
*cs
= CPU(cpu
);
4141 uint32_t pkg_offset
;
4143 uint32_t signature
[3];
4145 /* Calculate & apply limits for different index ranges */
4146 if (index
>= 0xC0000000) {
4147 limit
= env
->cpuid_xlevel2
;
4148 } else if (index
>= 0x80000000) {
4149 limit
= env
->cpuid_xlevel
;
4150 } else if (index
>= 0x40000000) {
4153 limit
= env
->cpuid_level
;
4156 if (index
> limit
) {
4157 /* Intel documentation states that invalid EAX input will
4158 * return the same information as EAX=cpuid_level
4159 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4161 index
= env
->cpuid_level
;
4166 *eax
= env
->cpuid_level
;
4167 *ebx
= env
->cpuid_vendor1
;
4168 *edx
= env
->cpuid_vendor2
;
4169 *ecx
= env
->cpuid_vendor3
;
4172 *eax
= env
->cpuid_version
;
4173 *ebx
= (cpu
->apic_id
<< 24) |
4174 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4175 *ecx
= env
->features
[FEAT_1_ECX
];
4176 if ((*ecx
& CPUID_EXT_XSAVE
) && (env
->cr
[4] & CR4_OSXSAVE_MASK
)) {
4177 *ecx
|= CPUID_EXT_OSXSAVE
;
4179 *edx
= env
->features
[FEAT_1_EDX
];
4180 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
4181 *ebx
|= (cs
->nr_cores
* cs
->nr_threads
) << 16;
4186 /* cache info: needed for Pentium Pro compatibility */
4187 if (cpu
->cache_info_passthrough
) {
4188 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
4191 *eax
= 1; /* Number of CPUID[EAX=2] calls required */
4193 if (!cpu
->enable_l3_cache
) {
4196 *ecx
= cpuid2_cache_descriptor(env
->cache_info_cpuid2
.l3_cache
);
4198 *edx
= (cpuid2_cache_descriptor(env
->cache_info_cpuid2
.l1d_cache
) << 16) |
4199 (cpuid2_cache_descriptor(env
->cache_info_cpuid2
.l1i_cache
) << 8) |
4200 (cpuid2_cache_descriptor(env
->cache_info_cpuid2
.l2_cache
));
4203 /* cache info: needed for Core compatibility */
4204 if (cpu
->cache_info_passthrough
) {
4205 host_cpuid(index
, count
, eax
, ebx
, ecx
, edx
);
4206 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4207 *eax
&= ~0xFC000000;
4208 if ((*eax
& 31) && cs
->nr_cores
> 1) {
4209 *eax
|= (cs
->nr_cores
- 1) << 26;
4214 case 0: /* L1 dcache info */
4215 encode_cache_cpuid4(env
->cache_info_cpuid4
.l1d_cache
,
4217 eax
, ebx
, ecx
, edx
);
4219 case 1: /* L1 icache info */
4220 encode_cache_cpuid4(env
->cache_info_cpuid4
.l1i_cache
,
4222 eax
, ebx
, ecx
, edx
);
4224 case 2: /* L2 cache info */
4225 encode_cache_cpuid4(env
->cache_info_cpuid4
.l2_cache
,
4226 cs
->nr_threads
, cs
->nr_cores
,
4227 eax
, ebx
, ecx
, edx
);
4229 case 3: /* L3 cache info */
4230 pkg_offset
= apicid_pkg_offset(cs
->nr_cores
, cs
->nr_threads
);
4231 if (cpu
->enable_l3_cache
) {
4232 encode_cache_cpuid4(env
->cache_info_cpuid4
.l3_cache
,
4233 (1 << pkg_offset
), cs
->nr_cores
,
4234 eax
, ebx
, ecx
, edx
);
4238 default: /* end of info */
4239 *eax
= *ebx
= *ecx
= *edx
= 0;
4245 /* MONITOR/MWAIT Leaf */
4246 *eax
= cpu
->mwait
.eax
; /* Smallest monitor-line size in bytes */
4247 *ebx
= cpu
->mwait
.ebx
; /* Largest monitor-line size in bytes */
4248 *ecx
= cpu
->mwait
.ecx
; /* flags */
4249 *edx
= cpu
->mwait
.edx
; /* mwait substates */
4252 /* Thermal and Power Leaf */
4253 *eax
= env
->features
[FEAT_6_EAX
];
4259 /* Structured Extended Feature Flags Enumeration Leaf */
4261 *eax
= 0; /* Maximum ECX value for sub-leaves */
4262 *ebx
= env
->features
[FEAT_7_0_EBX
]; /* Feature flags */
4263 *ecx
= env
->features
[FEAT_7_0_ECX
]; /* Feature flags */
4264 if ((*ecx
& CPUID_7_0_ECX_PKU
) && env
->cr
[4] & CR4_PKE_MASK
) {
4265 *ecx
|= CPUID_7_0_ECX_OSPKE
;
4267 *edx
= env
->features
[FEAT_7_0_EDX
]; /* Feature flags */
4276 /* Direct Cache Access Information Leaf */
4277 *eax
= 0; /* Bits 0-31 in DCA_CAP MSR */
4283 /* Architectural Performance Monitoring Leaf */
4284 if (kvm_enabled() && cpu
->enable_pmu
) {
4285 KVMState
*s
= cs
->kvm_state
;
4287 *eax
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EAX
);
4288 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EBX
);
4289 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_ECX
);
4290 *edx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EDX
);
4291 } else if (hvf_enabled() && cpu
->enable_pmu
) {
4292 *eax
= hvf_get_supported_cpuid(0xA, count
, R_EAX
);
4293 *ebx
= hvf_get_supported_cpuid(0xA, count
, R_EBX
);
4294 *ecx
= hvf_get_supported_cpuid(0xA, count
, R_ECX
);
4295 *edx
= hvf_get_supported_cpuid(0xA, count
, R_EDX
);
4304 /* Extended Topology Enumeration Leaf */
4305 if (!cpu
->enable_cpuid_0xb
) {
4306 *eax
= *ebx
= *ecx
= *edx
= 0;
4310 *ecx
= count
& 0xff;
4311 *edx
= cpu
->apic_id
;
4315 *eax
= apicid_core_offset(cs
->nr_cores
, cs
->nr_threads
);
4316 *ebx
= cs
->nr_threads
;
4317 *ecx
|= CPUID_TOPOLOGY_LEVEL_SMT
;
4320 *eax
= apicid_pkg_offset(cs
->nr_cores
, cs
->nr_threads
);
4321 *ebx
= cs
->nr_cores
* cs
->nr_threads
;
4322 *ecx
|= CPUID_TOPOLOGY_LEVEL_CORE
;
4327 *ecx
|= CPUID_TOPOLOGY_LEVEL_INVALID
;
4330 assert(!(*eax
& ~0x1f));
4331 *ebx
&= 0xffff; /* The count doesn't need to be reliable. */
4334 /* Processor Extended State */
4339 if (!(env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
)) {
4344 *ecx
= xsave_area_size(x86_cpu_xsave_components(cpu
));
4345 *eax
= env
->features
[FEAT_XSAVE_COMP_LO
];
4346 *edx
= env
->features
[FEAT_XSAVE_COMP_HI
];
4347 *ebx
= xsave_area_size(env
->xcr0
);
4348 } else if (count
== 1) {
4349 *eax
= env
->features
[FEAT_XSAVE
];
4350 } else if (count
< ARRAY_SIZE(x86_ext_save_areas
)) {
4351 if ((x86_cpu_xsave_components(cpu
) >> count
) & 1) {
4352 const ExtSaveArea
*esa
= &x86_ext_save_areas
[count
];
4360 /* Intel Processor Trace Enumeration */
4365 if (!(env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) ||
4371 *eax
= INTEL_PT_MAX_SUBLEAF
;
4372 *ebx
= INTEL_PT_MINIMAL_EBX
;
4373 *ecx
= INTEL_PT_MINIMAL_ECX
;
4374 } else if (count
== 1) {
4375 *eax
= INTEL_PT_MTC_BITMAP
| INTEL_PT_ADDR_RANGES_NUM
;
4376 *ebx
= INTEL_PT_PSB_BITMAP
| INTEL_PT_CYCLE_BITMAP
;
4382 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4383 * set here, but we restrict to TCG none the less.
4385 if (tcg_enabled() && cpu
->expose_tcg
) {
4386 memcpy(signature
, "TCGTCGTCGTCG", 12);
4388 *ebx
= signature
[0];
4389 *ecx
= signature
[1];
4390 *edx
= signature
[2];
4405 *eax
= env
->cpuid_xlevel
;
4406 *ebx
= env
->cpuid_vendor1
;
4407 *edx
= env
->cpuid_vendor2
;
4408 *ecx
= env
->cpuid_vendor3
;
4411 *eax
= env
->cpuid_version
;
4413 *ecx
= env
->features
[FEAT_8000_0001_ECX
];
4414 *edx
= env
->features
[FEAT_8000_0001_EDX
];
4416 /* The Linux kernel checks for the CMPLegacy bit and
4417 * discards multiple thread information if it is set.
4418 * So don't set it here for Intel to make Linux guests happy.
4420 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
4421 if (env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
||
4422 env
->cpuid_vendor2
!= CPUID_VENDOR_INTEL_2
||
4423 env
->cpuid_vendor3
!= CPUID_VENDOR_INTEL_3
) {
4424 *ecx
|= 1 << 1; /* CmpLegacy bit */
4431 *eax
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
4432 *ebx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
4433 *ecx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
4434 *edx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
4437 /* cache info (L1 cache) */
4438 if (cpu
->cache_info_passthrough
) {
4439 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
4442 *eax
= (L1_DTLB_2M_ASSOC
<< 24) | (L1_DTLB_2M_ENTRIES
<< 16) | \
4443 (L1_ITLB_2M_ASSOC
<< 8) | (L1_ITLB_2M_ENTRIES
);
4444 *ebx
= (L1_DTLB_4K_ASSOC
<< 24) | (L1_DTLB_4K_ENTRIES
<< 16) | \
4445 (L1_ITLB_4K_ASSOC
<< 8) | (L1_ITLB_4K_ENTRIES
);
4446 *ecx
= encode_cache_cpuid80000005(env
->cache_info_amd
.l1d_cache
);
4447 *edx
= encode_cache_cpuid80000005(env
->cache_info_amd
.l1i_cache
);
4450 /* cache info (L2 cache) */
4451 if (cpu
->cache_info_passthrough
) {
4452 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
4455 *eax
= (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC
) << 28) | \
4456 (L2_DTLB_2M_ENTRIES
<< 16) | \
4457 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC
) << 12) | \
4458 (L2_ITLB_2M_ENTRIES
);
4459 *ebx
= (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC
) << 28) | \
4460 (L2_DTLB_4K_ENTRIES
<< 16) | \
4461 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC
) << 12) | \
4462 (L2_ITLB_4K_ENTRIES
);
4463 encode_cache_cpuid80000006(env
->cache_info_amd
.l2_cache
,
4464 cpu
->enable_l3_cache
?
4465 env
->cache_info_amd
.l3_cache
: NULL
,
4472 *edx
= env
->features
[FEAT_8000_0007_EDX
];
4475 /* virtual & phys address size in low 2 bytes. */
4476 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
) {
4477 /* 64 bit processor */
4478 *eax
= cpu
->phys_bits
; /* configurable physical bits */
4479 if (env
->features
[FEAT_7_0_ECX
] & CPUID_7_0_ECX_LA57
) {
4480 *eax
|= 0x00003900; /* 57 bits virtual */
4482 *eax
|= 0x00003000; /* 48 bits virtual */
4485 *eax
= cpu
->phys_bits
;
4487 *ebx
= env
->features
[FEAT_8000_0008_EBX
];
4490 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
4491 *ecx
|= (cs
->nr_cores
* cs
->nr_threads
) - 1;
4495 if (env
->features
[FEAT_8000_0001_ECX
] & CPUID_EXT3_SVM
) {
4496 *eax
= 0x00000001; /* SVM Revision */
4497 *ebx
= 0x00000010; /* nr of ASIDs */
4499 *edx
= env
->features
[FEAT_SVM
]; /* optional features */
4510 case 0: /* L1 dcache info */
4511 encode_cache_cpuid8000001d(env
->cache_info_amd
.l1d_cache
, cs
,
4512 eax
, ebx
, ecx
, edx
);
4514 case 1: /* L1 icache info */
4515 encode_cache_cpuid8000001d(env
->cache_info_amd
.l1i_cache
, cs
,
4516 eax
, ebx
, ecx
, edx
);
4518 case 2: /* L2 cache info */
4519 encode_cache_cpuid8000001d(env
->cache_info_amd
.l2_cache
, cs
,
4520 eax
, ebx
, ecx
, edx
);
4522 case 3: /* L3 cache info */
4523 encode_cache_cpuid8000001d(env
->cache_info_amd
.l3_cache
, cs
,
4524 eax
, ebx
, ecx
, edx
);
4526 default: /* end of info */
4527 *eax
= *ebx
= *ecx
= *edx
= 0;
4532 assert(cpu
->core_id
<= 255);
4533 encode_topo_cpuid8000001e(cs
, cpu
,
4534 eax
, ebx
, ecx
, edx
);
4537 *eax
= env
->cpuid_xlevel2
;
4543 /* Support for VIA CPU's CPUID instruction */
4544 *eax
= env
->cpuid_version
;
4547 *edx
= env
->features
[FEAT_C000_0001_EDX
];
4552 /* Reserved for the future, and now filled with zero */
4559 *eax
= sev_enabled() ? 0x2 : 0;
4560 *ebx
= sev_get_cbit_position();
4561 *ebx
|= sev_get_reduced_phys_bits() << 6;
4566 /* reserved values: zero */
4575 /* CPUClass::reset() */
4576 static void x86_cpu_reset(CPUState
*s
)
4578 X86CPU
*cpu
= X86_CPU(s
);
4579 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(cpu
);
4580 CPUX86State
*env
= &cpu
->env
;
4585 xcc
->parent_reset(s
);
4587 memset(env
, 0, offsetof(CPUX86State
, end_reset_fields
));
4589 env
->old_exception
= -1;
4591 /* init to reset state */
4593 env
->hflags2
|= HF2_GIF_MASK
;
4595 cpu_x86_update_cr0(env
, 0x60000010);
4596 env
->a20_mask
= ~0x0;
4597 env
->smbase
= 0x30000;
4598 env
->msr_smi_count
= 0;
4600 env
->idt
.limit
= 0xffff;
4601 env
->gdt
.limit
= 0xffff;
4602 env
->ldt
.limit
= 0xffff;
4603 env
->ldt
.flags
= DESC_P_MASK
| (2 << DESC_TYPE_SHIFT
);
4604 env
->tr
.limit
= 0xffff;
4605 env
->tr
.flags
= DESC_P_MASK
| (11 << DESC_TYPE_SHIFT
);
4607 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff,
4608 DESC_P_MASK
| DESC_S_MASK
| DESC_CS_MASK
|
4609 DESC_R_MASK
| DESC_A_MASK
);
4610 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff,
4611 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4613 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff,
4614 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4616 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff,
4617 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4619 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff,
4620 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4622 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff,
4623 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4627 env
->regs
[R_EDX
] = env
->cpuid_version
;
4632 for (i
= 0; i
< 8; i
++) {
4635 cpu_set_fpuc(env
, 0x37f);
4637 env
->mxcsr
= 0x1f80;
4638 /* All units are in INIT state. */
4641 env
->pat
= 0x0007040600070406ULL
;
4642 env
->msr_ia32_misc_enable
= MSR_IA32_MISC_ENABLE_DEFAULT
;
4644 memset(env
->dr
, 0, sizeof(env
->dr
));
4645 env
->dr
[6] = DR6_FIXED_1
;
4646 env
->dr
[7] = DR7_FIXED_1
;
4647 cpu_breakpoint_remove_all(s
, BP_CPU
);
4648 cpu_watchpoint_remove_all(s
, BP_CPU
);
4651 xcr0
= XSTATE_FP_MASK
;
4653 #ifdef CONFIG_USER_ONLY
4654 /* Enable all the features for user-mode. */
4655 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4656 xcr0
|= XSTATE_SSE_MASK
;
4658 for (i
= 2; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
4659 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
4660 if (env
->features
[esa
->feature
] & esa
->bits
) {
4665 if (env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
) {
4666 cr4
|= CR4_OSFXSR_MASK
| CR4_OSXSAVE_MASK
;
4668 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_FSGSBASE
) {
4669 cr4
|= CR4_FSGSBASE_MASK
;
4674 cpu_x86_update_cr4(env
, cr4
);
4677 * SDM 11.11.5 requires:
4678 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4679 * - IA32_MTRR_PHYSMASKn.V = 0
4680 * All other bits are undefined. For simplification, zero it all.
4682 env
->mtrr_deftype
= 0;
4683 memset(env
->mtrr_var
, 0, sizeof(env
->mtrr_var
));
4684 memset(env
->mtrr_fixed
, 0, sizeof(env
->mtrr_fixed
));
4686 env
->interrupt_injected
= -1;
4687 env
->exception_injected
= -1;
4688 env
->nmi_injected
= false;
4689 #if !defined(CONFIG_USER_ONLY)
4690 /* We hard-wire the BSP to the first CPU. */
4691 apic_designate_bsp(cpu
->apic_state
, s
->cpu_index
== 0);
4693 s
->halted
= !cpu_is_bsp(cpu
);
4695 if (kvm_enabled()) {
4696 kvm_arch_reset_vcpu(cpu
);
4698 else if (hvf_enabled()) {
4704 #ifndef CONFIG_USER_ONLY
4705 bool cpu_is_bsp(X86CPU
*cpu
)
4707 return cpu_get_apic_base(cpu
->apic_state
) & MSR_IA32_APICBASE_BSP
;
4710 /* TODO: remove me, when reset over QOM tree is implemented */
4711 static void x86_cpu_machine_reset_cb(void *opaque
)
4713 X86CPU
*cpu
= opaque
;
4714 cpu_reset(CPU(cpu
));
4718 static void mce_init(X86CPU
*cpu
)
4720 CPUX86State
*cenv
= &cpu
->env
;
4723 if (((cenv
->cpuid_version
>> 8) & 0xf) >= 6
4724 && (cenv
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
4725 (CPUID_MCE
| CPUID_MCA
)) {
4726 cenv
->mcg_cap
= MCE_CAP_DEF
| MCE_BANKS_DEF
|
4727 (cpu
->enable_lmce
? MCG_LMCE_P
: 0);
4728 cenv
->mcg_ctl
= ~(uint64_t)0;
4729 for (bank
= 0; bank
< MCE_BANKS_DEF
; bank
++) {
4730 cenv
->mce_banks
[bank
* 4] = ~(uint64_t)0;
4735 #ifndef CONFIG_USER_ONLY
4736 APICCommonClass
*apic_get_class(void)
4738 const char *apic_type
= "apic";
4740 /* TODO: in-kernel irqchip for hvf */
4741 if (kvm_apic_in_kernel()) {
4742 apic_type
= "kvm-apic";
4743 } else if (xen_enabled()) {
4744 apic_type
= "xen-apic";
4747 return APIC_COMMON_CLASS(object_class_by_name(apic_type
));
4750 static void x86_cpu_apic_create(X86CPU
*cpu
, Error
**errp
)
4752 APICCommonState
*apic
;
4753 ObjectClass
*apic_class
= OBJECT_CLASS(apic_get_class());
4755 cpu
->apic_state
= DEVICE(object_new(object_class_get_name(apic_class
)));
4757 object_property_add_child(OBJECT(cpu
), "lapic",
4758 OBJECT(cpu
->apic_state
), &error_abort
);
4759 object_unref(OBJECT(cpu
->apic_state
));
4761 qdev_prop_set_uint32(cpu
->apic_state
, "id", cpu
->apic_id
);
4762 /* TODO: convert to link<> */
4763 apic
= APIC_COMMON(cpu
->apic_state
);
4765 apic
->apicbase
= APIC_DEFAULT_ADDRESS
| MSR_IA32_APICBASE_ENABLE
;
4768 static void x86_cpu_apic_realize(X86CPU
*cpu
, Error
**errp
)
4770 APICCommonState
*apic
;
4771 static bool apic_mmio_map_once
;
4773 if (cpu
->apic_state
== NULL
) {
4776 object_property_set_bool(OBJECT(cpu
->apic_state
), true, "realized",
4779 /* Map APIC MMIO area */
4780 apic
= APIC_COMMON(cpu
->apic_state
);
4781 if (!apic_mmio_map_once
) {
4782 memory_region_add_subregion_overlap(get_system_memory(),
4784 MSR_IA32_APICBASE_BASE
,
4787 apic_mmio_map_once
= true;
4791 static void x86_cpu_machine_done(Notifier
*n
, void *unused
)
4793 X86CPU
*cpu
= container_of(n
, X86CPU
, machine_done
);
4794 MemoryRegion
*smram
=
4795 (MemoryRegion
*) object_resolve_path("/machine/smram", NULL
);
4798 cpu
->smram
= g_new(MemoryRegion
, 1);
4799 memory_region_init_alias(cpu
->smram
, OBJECT(cpu
), "smram",
4800 smram
, 0, 1ull << 32);
4801 memory_region_set_enabled(cpu
->smram
, true);
4802 memory_region_add_subregion_overlap(cpu
->cpu_as_root
, 0, cpu
->smram
, 1);
4806 static void x86_cpu_apic_realize(X86CPU
*cpu
, Error
**errp
)
4811 /* Note: Only safe for use on x86(-64) hosts */
4812 static uint32_t x86_host_phys_bits(void)
4815 uint32_t host_phys_bits
;
4817 host_cpuid(0x80000000, 0, &eax
, NULL
, NULL
, NULL
);
4818 if (eax
>= 0x80000008) {
4819 host_cpuid(0x80000008, 0, &eax
, NULL
, NULL
, NULL
);
4820 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4821 * at 23:16 that can specify a maximum physical address bits for
4822 * the guest that can override this value; but I've not seen
4823 * anything with that set.
4825 host_phys_bits
= eax
& 0xff;
4827 /* It's an odd 64 bit machine that doesn't have the leaf for
4828 * physical address bits; fall back to 36 that's most older
4831 host_phys_bits
= 36;
4834 return host_phys_bits
;
4837 static void x86_cpu_adjust_level(X86CPU
*cpu
, uint32_t *min
, uint32_t value
)
4844 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4845 static void x86_cpu_adjust_feat_level(X86CPU
*cpu
, FeatureWord w
)
4847 CPUX86State
*env
= &cpu
->env
;
4848 FeatureWordInfo
*fi
= &feature_word_info
[w
];
4849 uint32_t eax
= fi
->cpuid
.eax
;
4850 uint32_t region
= eax
& 0xF0000000;
4852 assert(feature_word_info
[w
].type
== CPUID_FEATURE_WORD
);
4853 if (!env
->features
[w
]) {
4859 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_level
, eax
);
4862 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel
, eax
);
4865 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel2
, eax
);
4870 /* Calculate XSAVE components based on the configured CPU feature flags */
4871 static void x86_cpu_enable_xsave_components(X86CPU
*cpu
)
4873 CPUX86State
*env
= &cpu
->env
;
4877 if (!(env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
)) {
4882 for (i
= 0; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
4883 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
4884 if (env
->features
[esa
->feature
] & esa
->bits
) {
4885 mask
|= (1ULL << i
);
4889 env
->features
[FEAT_XSAVE_COMP_LO
] = mask
;
4890 env
->features
[FEAT_XSAVE_COMP_HI
] = mask
>> 32;
4893 /***** Steps involved on loading and filtering CPUID data
4895 * When initializing and realizing a CPU object, the steps
4896 * involved in setting up CPUID data are:
4898 * 1) Loading CPU model definition (X86CPUDefinition). This is
4899 * implemented by x86_cpu_load_def() and should be completely
4900 * transparent, as it is done automatically by instance_init.
4901 * No code should need to look at X86CPUDefinition structs
4902 * outside instance_init.
4904 * 2) CPU expansion. This is done by realize before CPUID
4905 * filtering, and will make sure host/accelerator data is
4906 * loaded for CPU models that depend on host capabilities
4907 * (e.g. "host"). Done by x86_cpu_expand_features().
4909 * 3) CPUID filtering. This initializes extra data related to
4910 * CPUID, and checks if the host supports all capabilities
4911 * required by the CPU. Runnability of a CPU model is
4912 * determined at this step. Done by x86_cpu_filter_features().
4914 * Some operations don't require all steps to be performed.
4917 * - CPU instance creation (instance_init) will run only CPU
4918 * model loading. CPU expansion can't run at instance_init-time
4919 * because host/accelerator data may be not available yet.
4920 * - CPU realization will perform both CPU model expansion and CPUID
4921 * filtering, and return an error in case one of them fails.
4922 * - query-cpu-definitions needs to run all 3 steps. It needs
4923 * to run CPUID filtering, as the 'unavailable-features'
4924 * field is set based on the filtering results.
4925 * - The query-cpu-model-expansion QMP command only needs to run
4926 * CPU model loading and CPU expansion. It should not filter
4927 * any CPUID data based on host capabilities.
4930 /* Expand CPU configuration data, based on configured features
4931 * and host/accelerator capabilities when appropriate.
4933 static void x86_cpu_expand_features(X86CPU
*cpu
, Error
**errp
)
4935 CPUX86State
*env
= &cpu
->env
;
4938 Error
*local_err
= NULL
;
4940 /*TODO: Now cpu->max_features doesn't overwrite features
4941 * set using QOM properties, and we can convert
4942 * plus_features & minus_features to global properties
4943 * inside x86_cpu_parse_featurestr() too.
4945 if (cpu
->max_features
) {
4946 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
4947 /* Override only features that weren't set explicitly
4951 x86_cpu_get_supported_feature_word(w
, cpu
->migratable
) &
4952 ~env
->user_features
[w
] & \
4953 ~feature_word_info
[w
].no_autoenable_flags
;
4957 for (l
= plus_features
; l
; l
= l
->next
) {
4958 const char *prop
= l
->data
;
4959 object_property_set_bool(OBJECT(cpu
), true, prop
, &local_err
);
4965 for (l
= minus_features
; l
; l
= l
->next
) {
4966 const char *prop
= l
->data
;
4967 object_property_set_bool(OBJECT(cpu
), false, prop
, &local_err
);
4973 if (!kvm_enabled() || !cpu
->expose_kvm
) {
4974 env
->features
[FEAT_KVM
] = 0;
4977 x86_cpu_enable_xsave_components(cpu
);
4979 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4980 x86_cpu_adjust_feat_level(cpu
, FEAT_7_0_EBX
);
4981 if (cpu
->full_cpuid_auto_level
) {
4982 x86_cpu_adjust_feat_level(cpu
, FEAT_1_EDX
);
4983 x86_cpu_adjust_feat_level(cpu
, FEAT_1_ECX
);
4984 x86_cpu_adjust_feat_level(cpu
, FEAT_6_EAX
);
4985 x86_cpu_adjust_feat_level(cpu
, FEAT_7_0_ECX
);
4986 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0001_EDX
);
4987 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0001_ECX
);
4988 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0007_EDX
);
4989 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0008_EBX
);
4990 x86_cpu_adjust_feat_level(cpu
, FEAT_C000_0001_EDX
);
4991 x86_cpu_adjust_feat_level(cpu
, FEAT_SVM
);
4992 x86_cpu_adjust_feat_level(cpu
, FEAT_XSAVE
);
4993 /* SVM requires CPUID[0x8000000A] */
4994 if (env
->features
[FEAT_8000_0001_ECX
] & CPUID_EXT3_SVM
) {
4995 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel
, 0x8000000A);
4998 /* SEV requires CPUID[0x8000001F] */
4999 if (sev_enabled()) {
5000 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel
, 0x8000001F);
5004 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5005 if (env
->cpuid_level
== UINT32_MAX
) {
5006 env
->cpuid_level
= env
->cpuid_min_level
;
5008 if (env
->cpuid_xlevel
== UINT32_MAX
) {
5009 env
->cpuid_xlevel
= env
->cpuid_min_xlevel
;
5011 if (env
->cpuid_xlevel2
== UINT32_MAX
) {
5012 env
->cpuid_xlevel2
= env
->cpuid_min_xlevel2
;
5016 if (local_err
!= NULL
) {
5017 error_propagate(errp
, local_err
);
5022 * Finishes initialization of CPUID data, filters CPU feature
5023 * words based on host availability of each feature.
5025 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5027 static int x86_cpu_filter_features(X86CPU
*cpu
)
5029 CPUX86State
*env
= &cpu
->env
;
5033 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
5034 uint32_t host_feat
=
5035 x86_cpu_get_supported_feature_word(w
, false);
5036 uint32_t requested_features
= env
->features
[w
];
5037 env
->features
[w
] &= host_feat
;
5038 cpu
->filtered_features
[w
] = requested_features
& ~env
->features
[w
];
5039 if (cpu
->filtered_features
[w
]) {
5044 if ((env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) &&
5046 KVMState
*s
= CPU(cpu
)->kvm_state
;
5047 uint32_t eax_0
= kvm_arch_get_supported_cpuid(s
, 0x14, 0, R_EAX
);
5048 uint32_t ebx_0
= kvm_arch_get_supported_cpuid(s
, 0x14, 0, R_EBX
);
5049 uint32_t ecx_0
= kvm_arch_get_supported_cpuid(s
, 0x14, 0, R_ECX
);
5050 uint32_t eax_1
= kvm_arch_get_supported_cpuid(s
, 0x14, 1, R_EAX
);
5051 uint32_t ebx_1
= kvm_arch_get_supported_cpuid(s
, 0x14, 1, R_EBX
);
5054 ((ebx_0
& INTEL_PT_MINIMAL_EBX
) != INTEL_PT_MINIMAL_EBX
) ||
5055 ((ecx_0
& INTEL_PT_MINIMAL_ECX
) != INTEL_PT_MINIMAL_ECX
) ||
5056 ((eax_1
& INTEL_PT_MTC_BITMAP
) != INTEL_PT_MTC_BITMAP
) ||
5057 ((eax_1
& INTEL_PT_ADDR_RANGES_NUM_MASK
) <
5058 INTEL_PT_ADDR_RANGES_NUM
) ||
5059 ((ebx_1
& (INTEL_PT_PSB_BITMAP
| INTEL_PT_CYCLE_BITMAP
)) !=
5060 (INTEL_PT_PSB_BITMAP
| INTEL_PT_CYCLE_BITMAP
)) ||
5061 (ecx_0
& INTEL_PT_IP_LIP
)) {
5063 * Processor Trace capabilities aren't configurable, so if the
5064 * host can't emulate the capabilities we report on
5065 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5067 env
->features
[FEAT_7_0_EBX
] &= ~CPUID_7_0_EBX_INTEL_PT
;
5068 cpu
->filtered_features
[FEAT_7_0_EBX
] |= CPUID_7_0_EBX_INTEL_PT
;
5076 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
5077 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
5078 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
5079 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
5080 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
5081 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
5082 static void x86_cpu_realizefn(DeviceState
*dev
, Error
**errp
)
5084 CPUState
*cs
= CPU(dev
);
5085 X86CPU
*cpu
= X86_CPU(dev
);
5086 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(dev
);
5087 CPUX86State
*env
= &cpu
->env
;
5088 Error
*local_err
= NULL
;
5089 static bool ht_warned
;
5091 if (xcc
->host_cpuid_required
) {
5092 if (!accel_uses_host_cpuid()) {
5093 char *name
= x86_cpu_class_get_model_name(xcc
);
5094 error_setg(&local_err
, "CPU model '%s' requires KVM", name
);
5099 if (enable_cpu_pm
) {
5100 host_cpuid(5, 0, &cpu
->mwait
.eax
, &cpu
->mwait
.ebx
,
5101 &cpu
->mwait
.ecx
, &cpu
->mwait
.edx
);
5102 env
->features
[FEAT_1_ECX
] |= CPUID_EXT_MONITOR
;
5106 /* mwait extended info: needed for Core compatibility */
5107 /* We always wake on interrupt even if host does not have the capability */
5108 cpu
->mwait
.ecx
|= CPUID_MWAIT_EMX
| CPUID_MWAIT_IBE
;
5110 if (cpu
->apic_id
== UNASSIGNED_APIC_ID
) {
5111 error_setg(errp
, "apic-id property was not initialized properly");
5115 x86_cpu_expand_features(cpu
, &local_err
);
5120 if (x86_cpu_filter_features(cpu
) &&
5121 (cpu
->check_cpuid
|| cpu
->enforce_cpuid
)) {
5122 x86_cpu_report_filtered_features(cpu
);
5123 if (cpu
->enforce_cpuid
) {
5124 error_setg(&local_err
,
5125 accel_uses_host_cpuid() ?
5126 "Host doesn't support requested features" :
5127 "TCG doesn't support requested features");
5132 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5135 if (IS_AMD_CPU(env
)) {
5136 env
->features
[FEAT_8000_0001_EDX
] &= ~CPUID_EXT2_AMD_ALIASES
;
5137 env
->features
[FEAT_8000_0001_EDX
] |= (env
->features
[FEAT_1_EDX
]
5138 & CPUID_EXT2_AMD_ALIASES
);
5141 /* For 64bit systems think about the number of physical bits to present.
5142 * ideally this should be the same as the host; anything other than matching
5143 * the host can cause incorrect guest behaviour.
5144 * QEMU used to pick the magic value of 40 bits that corresponds to
5145 * consumer AMD devices but nothing else.
5147 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
) {
5148 if (accel_uses_host_cpuid()) {
5149 uint32_t host_phys_bits
= x86_host_phys_bits();
5152 if (cpu
->host_phys_bits
) {
5153 /* The user asked for us to use the host physical bits */
5154 cpu
->phys_bits
= host_phys_bits
;
5157 /* Print a warning if the user set it to a value that's not the
5160 if (cpu
->phys_bits
!= host_phys_bits
&& cpu
->phys_bits
!= 0 &&
5162 warn_report("Host physical bits (%u)"
5163 " does not match phys-bits property (%u)",
5164 host_phys_bits
, cpu
->phys_bits
);
5168 if (cpu
->phys_bits
&&
5169 (cpu
->phys_bits
> TARGET_PHYS_ADDR_SPACE_BITS
||
5170 cpu
->phys_bits
< 32)) {
5171 error_setg(errp
, "phys-bits should be between 32 and %u "
5173 TARGET_PHYS_ADDR_SPACE_BITS
, cpu
->phys_bits
);
5177 if (cpu
->phys_bits
&& cpu
->phys_bits
!= TCG_PHYS_ADDR_BITS
) {
5178 error_setg(errp
, "TCG only supports phys-bits=%u",
5179 TCG_PHYS_ADDR_BITS
);
5183 /* 0 means it was not explicitly set by the user (or by machine
5184 * compat_props or by the host code above). In this case, the default
5185 * is the value used by TCG (40).
5187 if (cpu
->phys_bits
== 0) {
5188 cpu
->phys_bits
= TCG_PHYS_ADDR_BITS
;
5191 /* For 32 bit systems don't use the user set value, but keep
5192 * phys_bits consistent with what we tell the guest.
5194 if (cpu
->phys_bits
!= 0) {
5195 error_setg(errp
, "phys-bits is not user-configurable in 32 bit");
5199 if (env
->features
[FEAT_1_EDX
] & CPUID_PSE36
) {
5200 cpu
->phys_bits
= 36;
5202 cpu
->phys_bits
= 32;
5206 /* Cache information initialization */
5207 if (!cpu
->legacy_cache
) {
5208 if (!xcc
->cpu_def
|| !xcc
->cpu_def
->cache_info
) {
5209 char *name
= x86_cpu_class_get_model_name(xcc
);
5211 "CPU model '%s' doesn't support legacy-cache=off", name
);
5215 env
->cache_info_cpuid2
= env
->cache_info_cpuid4
= env
->cache_info_amd
=
5216 *xcc
->cpu_def
->cache_info
;
5218 /* Build legacy cache information */
5219 env
->cache_info_cpuid2
.l1d_cache
= &legacy_l1d_cache
;
5220 env
->cache_info_cpuid2
.l1i_cache
= &legacy_l1i_cache
;
5221 env
->cache_info_cpuid2
.l2_cache
= &legacy_l2_cache_cpuid2
;
5222 env
->cache_info_cpuid2
.l3_cache
= &legacy_l3_cache
;
5224 env
->cache_info_cpuid4
.l1d_cache
= &legacy_l1d_cache
;
5225 env
->cache_info_cpuid4
.l1i_cache
= &legacy_l1i_cache
;
5226 env
->cache_info_cpuid4
.l2_cache
= &legacy_l2_cache
;
5227 env
->cache_info_cpuid4
.l3_cache
= &legacy_l3_cache
;
5229 env
->cache_info_amd
.l1d_cache
= &legacy_l1d_cache_amd
;
5230 env
->cache_info_amd
.l1i_cache
= &legacy_l1i_cache_amd
;
5231 env
->cache_info_amd
.l2_cache
= &legacy_l2_cache_amd
;
5232 env
->cache_info_amd
.l3_cache
= &legacy_l3_cache
;
5236 cpu_exec_realizefn(cs
, &local_err
);
5237 if (local_err
!= NULL
) {
5238 error_propagate(errp
, local_err
);
5242 #ifndef CONFIG_USER_ONLY
5243 qemu_register_reset(x86_cpu_machine_reset_cb
, cpu
);
5245 if (cpu
->env
.features
[FEAT_1_EDX
] & CPUID_APIC
|| smp_cpus
> 1) {
5246 x86_cpu_apic_create(cpu
, &local_err
);
5247 if (local_err
!= NULL
) {
5255 #ifndef CONFIG_USER_ONLY
5256 if (tcg_enabled()) {
5257 cpu
->cpu_as_mem
= g_new(MemoryRegion
, 1);
5258 cpu
->cpu_as_root
= g_new(MemoryRegion
, 1);
5260 /* Outer container... */
5261 memory_region_init(cpu
->cpu_as_root
, OBJECT(cpu
), "memory", ~0ull);
5262 memory_region_set_enabled(cpu
->cpu_as_root
, true);
5264 /* ... with two regions inside: normal system memory with low
5267 memory_region_init_alias(cpu
->cpu_as_mem
, OBJECT(cpu
), "memory",
5268 get_system_memory(), 0, ~0ull);
5269 memory_region_add_subregion_overlap(cpu
->cpu_as_root
, 0, cpu
->cpu_as_mem
, 0);
5270 memory_region_set_enabled(cpu
->cpu_as_mem
, true);
5273 cpu_address_space_init(cs
, 0, "cpu-memory", cs
->memory
);
5274 cpu_address_space_init(cs
, 1, "cpu-smm", cpu
->cpu_as_root
);
5276 /* ... SMRAM with higher priority, linked from /machine/smram. */
5277 cpu
->machine_done
.notify
= x86_cpu_machine_done
;
5278 qemu_add_machine_init_done_notifier(&cpu
->machine_done
);
5285 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5286 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5287 * based on inputs (sockets,cores,threads), it is still better to give
5290 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5291 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5293 if (IS_AMD_CPU(env
) &&
5294 !(env
->features
[FEAT_8000_0001_ECX
] & CPUID_EXT3_TOPOEXT
) &&
5295 cs
->nr_threads
> 1 && !ht_warned
) {
5296 warn_report("This family of AMD CPU doesn't support "
5297 "hyperthreading(%d)",
5299 error_printf("Please configure -smp options properly"
5300 " or try enabling topoext feature.\n");
5304 x86_cpu_apic_realize(cpu
, &local_err
);
5305 if (local_err
!= NULL
) {
5310 xcc
->parent_realize(dev
, &local_err
);
5313 if (local_err
!= NULL
) {
5314 error_propagate(errp
, local_err
);
5319 static void x86_cpu_unrealizefn(DeviceState
*dev
, Error
**errp
)
5321 X86CPU
*cpu
= X86_CPU(dev
);
5322 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(dev
);
5323 Error
*local_err
= NULL
;
5325 #ifndef CONFIG_USER_ONLY
5326 cpu_remove_sync(CPU(dev
));
5327 qemu_unregister_reset(x86_cpu_machine_reset_cb
, dev
);
5330 if (cpu
->apic_state
) {
5331 object_unparent(OBJECT(cpu
->apic_state
));
5332 cpu
->apic_state
= NULL
;
5335 xcc
->parent_unrealize(dev
, &local_err
);
5336 if (local_err
!= NULL
) {
5337 error_propagate(errp
, local_err
);
5342 typedef struct BitProperty
{
5347 static void x86_cpu_get_bit_prop(Object
*obj
, Visitor
*v
, const char *name
,
5348 void *opaque
, Error
**errp
)
5350 X86CPU
*cpu
= X86_CPU(obj
);
5351 BitProperty
*fp
= opaque
;
5352 uint32_t f
= cpu
->env
.features
[fp
->w
];
5353 bool value
= (f
& fp
->mask
) == fp
->mask
;
5354 visit_type_bool(v
, name
, &value
, errp
);
5357 static void x86_cpu_set_bit_prop(Object
*obj
, Visitor
*v
, const char *name
,
5358 void *opaque
, Error
**errp
)
5360 DeviceState
*dev
= DEVICE(obj
);
5361 X86CPU
*cpu
= X86_CPU(obj
);
5362 BitProperty
*fp
= opaque
;
5363 Error
*local_err
= NULL
;
5366 if (dev
->realized
) {
5367 qdev_prop_set_after_realize(dev
, name
, errp
);
5371 visit_type_bool(v
, name
, &value
, &local_err
);
5373 error_propagate(errp
, local_err
);
5378 cpu
->env
.features
[fp
->w
] |= fp
->mask
;
5380 cpu
->env
.features
[fp
->w
] &= ~fp
->mask
;
5382 cpu
->env
.user_features
[fp
->w
] |= fp
->mask
;
5385 static void x86_cpu_release_bit_prop(Object
*obj
, const char *name
,
5388 BitProperty
*prop
= opaque
;
5392 /* Register a boolean property to get/set a single bit in a uint32_t field.
5394 * The same property name can be registered multiple times to make it affect
5395 * multiple bits in the same FeatureWord. In that case, the getter will return
5396 * true only if all bits are set.
5398 static void x86_cpu_register_bit_prop(X86CPU
*cpu
,
5399 const char *prop_name
,
5405 uint32_t mask
= (1UL << bitnr
);
5407 op
= object_property_find(OBJECT(cpu
), prop_name
, NULL
);
5413 fp
= g_new0(BitProperty
, 1);
5416 object_property_add(OBJECT(cpu
), prop_name
, "bool",
5417 x86_cpu_get_bit_prop
,
5418 x86_cpu_set_bit_prop
,
5419 x86_cpu_release_bit_prop
, fp
, &error_abort
);
5423 static void x86_cpu_register_feature_bit_props(X86CPU
*cpu
,
5427 FeatureWordInfo
*fi
= &feature_word_info
[w
];
5428 const char *name
= fi
->feat_names
[bitnr
];
5434 /* Property names should use "-" instead of "_".
5435 * Old names containing underscores are registered as aliases
5436 * using object_property_add_alias()
5438 assert(!strchr(name
, '_'));
5439 /* aliases don't use "|" delimiters anymore, they are registered
5440 * manually using object_property_add_alias() */
5441 assert(!strchr(name
, '|'));
5442 x86_cpu_register_bit_prop(cpu
, name
, w
, bitnr
);
5445 static GuestPanicInformation
*x86_cpu_get_crash_info(CPUState
*cs
)
5447 X86CPU
*cpu
= X86_CPU(cs
);
5448 CPUX86State
*env
= &cpu
->env
;
5449 GuestPanicInformation
*panic_info
= NULL
;
5451 if (env
->features
[FEAT_HYPERV_EDX
] & HV_GUEST_CRASH_MSR_AVAILABLE
) {
5452 panic_info
= g_malloc0(sizeof(GuestPanicInformation
));
5454 panic_info
->type
= GUEST_PANIC_INFORMATION_TYPE_HYPER_V
;
5456 assert(HV_CRASH_PARAMS
>= 5);
5457 panic_info
->u
.hyper_v
.arg1
= env
->msr_hv_crash_params
[0];
5458 panic_info
->u
.hyper_v
.arg2
= env
->msr_hv_crash_params
[1];
5459 panic_info
->u
.hyper_v
.arg3
= env
->msr_hv_crash_params
[2];
5460 panic_info
->u
.hyper_v
.arg4
= env
->msr_hv_crash_params
[3];
5461 panic_info
->u
.hyper_v
.arg5
= env
->msr_hv_crash_params
[4];
5466 static void x86_cpu_get_crash_info_qom(Object
*obj
, Visitor
*v
,
5467 const char *name
, void *opaque
,
5470 CPUState
*cs
= CPU(obj
);
5471 GuestPanicInformation
*panic_info
;
5473 if (!cs
->crash_occurred
) {
5474 error_setg(errp
, "No crash occured");
5478 panic_info
= x86_cpu_get_crash_info(cs
);
5479 if (panic_info
== NULL
) {
5480 error_setg(errp
, "No crash information");
5484 visit_type_GuestPanicInformation(v
, "crash-information", &panic_info
,
5486 qapi_free_GuestPanicInformation(panic_info
);
5489 static void x86_cpu_initfn(Object
*obj
)
5491 CPUState
*cs
= CPU(obj
);
5492 X86CPU
*cpu
= X86_CPU(obj
);
5493 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(obj
);
5494 CPUX86State
*env
= &cpu
->env
;
5499 object_property_add(obj
, "family", "int",
5500 x86_cpuid_version_get_family
,
5501 x86_cpuid_version_set_family
, NULL
, NULL
, NULL
);
5502 object_property_add(obj
, "model", "int",
5503 x86_cpuid_version_get_model
,
5504 x86_cpuid_version_set_model
, NULL
, NULL
, NULL
);
5505 object_property_add(obj
, "stepping", "int",
5506 x86_cpuid_version_get_stepping
,
5507 x86_cpuid_version_set_stepping
, NULL
, NULL
, NULL
);
5508 object_property_add_str(obj
, "vendor",
5509 x86_cpuid_get_vendor
,
5510 x86_cpuid_set_vendor
, NULL
);
5511 object_property_add_str(obj
, "model-id",
5512 x86_cpuid_get_model_id
,
5513 x86_cpuid_set_model_id
, NULL
);
5514 object_property_add(obj
, "tsc-frequency", "int",
5515 x86_cpuid_get_tsc_freq
,
5516 x86_cpuid_set_tsc_freq
, NULL
, NULL
, NULL
);
5517 object_property_add(obj
, "feature-words", "X86CPUFeatureWordInfo",
5518 x86_cpu_get_feature_words
,
5519 NULL
, NULL
, (void *)env
->features
, NULL
);
5520 object_property_add(obj
, "filtered-features", "X86CPUFeatureWordInfo",
5521 x86_cpu_get_feature_words
,
5522 NULL
, NULL
, (void *)cpu
->filtered_features
, NULL
);
5524 object_property_add(obj
, "crash-information", "GuestPanicInformation",
5525 x86_cpu_get_crash_info_qom
, NULL
, NULL
, NULL
, NULL
);
5527 cpu
->hyperv_spinlock_attempts
= HYPERV_SPINLOCK_NEVER_RETRY
;
5529 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
5532 for (bitnr
= 0; bitnr
< 32; bitnr
++) {
5533 x86_cpu_register_feature_bit_props(cpu
, w
, bitnr
);
5537 object_property_add_alias(obj
, "sse3", obj
, "pni", &error_abort
);
5538 object_property_add_alias(obj
, "pclmuldq", obj
, "pclmulqdq", &error_abort
);
5539 object_property_add_alias(obj
, "sse4-1", obj
, "sse4.1", &error_abort
);
5540 object_property_add_alias(obj
, "sse4-2", obj
, "sse4.2", &error_abort
);
5541 object_property_add_alias(obj
, "xd", obj
, "nx", &error_abort
);
5542 object_property_add_alias(obj
, "ffxsr", obj
, "fxsr-opt", &error_abort
);
5543 object_property_add_alias(obj
, "i64", obj
, "lm", &error_abort
);
5545 object_property_add_alias(obj
, "ds_cpl", obj
, "ds-cpl", &error_abort
);
5546 object_property_add_alias(obj
, "tsc_adjust", obj
, "tsc-adjust", &error_abort
);
5547 object_property_add_alias(obj
, "fxsr_opt", obj
, "fxsr-opt", &error_abort
);
5548 object_property_add_alias(obj
, "lahf_lm", obj
, "lahf-lm", &error_abort
);
5549 object_property_add_alias(obj
, "cmp_legacy", obj
, "cmp-legacy", &error_abort
);
5550 object_property_add_alias(obj
, "nodeid_msr", obj
, "nodeid-msr", &error_abort
);
5551 object_property_add_alias(obj
, "perfctr_core", obj
, "perfctr-core", &error_abort
);
5552 object_property_add_alias(obj
, "perfctr_nb", obj
, "perfctr-nb", &error_abort
);
5553 object_property_add_alias(obj
, "kvm_nopiodelay", obj
, "kvm-nopiodelay", &error_abort
);
5554 object_property_add_alias(obj
, "kvm_mmu", obj
, "kvm-mmu", &error_abort
);
5555 object_property_add_alias(obj
, "kvm_asyncpf", obj
, "kvm-asyncpf", &error_abort
);
5556 object_property_add_alias(obj
, "kvm_steal_time", obj
, "kvm-steal-time", &error_abort
);
5557 object_property_add_alias(obj
, "kvm_pv_eoi", obj
, "kvm-pv-eoi", &error_abort
);
5558 object_property_add_alias(obj
, "kvm_pv_unhalt", obj
, "kvm-pv-unhalt", &error_abort
);
5559 object_property_add_alias(obj
, "svm_lock", obj
, "svm-lock", &error_abort
);
5560 object_property_add_alias(obj
, "nrip_save", obj
, "nrip-save", &error_abort
);
5561 object_property_add_alias(obj
, "tsc_scale", obj
, "tsc-scale", &error_abort
);
5562 object_property_add_alias(obj
, "vmcb_clean", obj
, "vmcb-clean", &error_abort
);
5563 object_property_add_alias(obj
, "pause_filter", obj
, "pause-filter", &error_abort
);
5564 object_property_add_alias(obj
, "sse4_1", obj
, "sse4.1", &error_abort
);
5565 object_property_add_alias(obj
, "sse4_2", obj
, "sse4.2", &error_abort
);
5568 x86_cpu_load_def(cpu
, xcc
->cpu_def
, &error_abort
);
5572 static int64_t x86_cpu_get_arch_id(CPUState
*cs
)
5574 X86CPU
*cpu
= X86_CPU(cs
);
5576 return cpu
->apic_id
;
5579 static bool x86_cpu_get_paging_enabled(const CPUState
*cs
)
5581 X86CPU
*cpu
= X86_CPU(cs
);
5583 return cpu
->env
.cr
[0] & CR0_PG_MASK
;
5586 static void x86_cpu_set_pc(CPUState
*cs
, vaddr value
)
5588 X86CPU
*cpu
= X86_CPU(cs
);
5590 cpu
->env
.eip
= value
;
5593 static void x86_cpu_synchronize_from_tb(CPUState
*cs
, TranslationBlock
*tb
)
5595 X86CPU
*cpu
= X86_CPU(cs
);
5597 cpu
->env
.eip
= tb
->pc
- tb
->cs_base
;
5600 int x86_cpu_pending_interrupt(CPUState
*cs
, int interrupt_request
)
5602 X86CPU
*cpu
= X86_CPU(cs
);
5603 CPUX86State
*env
= &cpu
->env
;
5605 #if !defined(CONFIG_USER_ONLY)
5606 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
5607 return CPU_INTERRUPT_POLL
;
5610 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
5611 return CPU_INTERRUPT_SIPI
;
5614 if (env
->hflags2
& HF2_GIF_MASK
) {
5615 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
5616 !(env
->hflags
& HF_SMM_MASK
)) {
5617 return CPU_INTERRUPT_SMI
;
5618 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
5619 !(env
->hflags2
& HF2_NMI_MASK
)) {
5620 return CPU_INTERRUPT_NMI
;
5621 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
5622 return CPU_INTERRUPT_MCE
;
5623 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
5624 (((env
->hflags2
& HF2_VINTR_MASK
) &&
5625 (env
->hflags2
& HF2_HIF_MASK
)) ||
5626 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
5627 (env
->eflags
& IF_MASK
&&
5628 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
5629 return CPU_INTERRUPT_HARD
;
5630 #if !defined(CONFIG_USER_ONLY)
5631 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
5632 (env
->eflags
& IF_MASK
) &&
5633 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
5634 return CPU_INTERRUPT_VIRQ
;
5642 static bool x86_cpu_has_work(CPUState
*cs
)
5644 return x86_cpu_pending_interrupt(cs
, cs
->interrupt_request
) != 0;
5647 static void x86_disas_set_info(CPUState
*cs
, disassemble_info
*info
)
5649 X86CPU
*cpu
= X86_CPU(cs
);
5650 CPUX86State
*env
= &cpu
->env
;
5652 info
->mach
= (env
->hflags
& HF_CS64_MASK
? bfd_mach_x86_64
5653 : env
->hflags
& HF_CS32_MASK
? bfd_mach_i386_i386
5654 : bfd_mach_i386_i8086
);
5655 info
->print_insn
= print_insn_i386
;
5657 info
->cap_arch
= CS_ARCH_X86
;
5658 info
->cap_mode
= (env
->hflags
& HF_CS64_MASK
? CS_MODE_64
5659 : env
->hflags
& HF_CS32_MASK
? CS_MODE_32
5661 info
->cap_insn_unit
= 1;
5662 info
->cap_insn_split
= 8;
5665 void x86_update_hflags(CPUX86State
*env
)
5668 #define HFLAG_COPY_MASK \
5669 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5670 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5671 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5672 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5674 hflags
= env
->hflags
& HFLAG_COPY_MASK
;
5675 hflags
|= (env
->segs
[R_SS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
5676 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
5677 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
5678 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
5679 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
5681 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
5682 hflags
|= HF_OSFXSR_MASK
;
5685 if (env
->efer
& MSR_EFER_LMA
) {
5686 hflags
|= HF_LMA_MASK
;
5689 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
5690 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
5692 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
5693 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
5694 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
5695 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
5696 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
) ||
5697 !(hflags
& HF_CS32_MASK
)) {
5698 hflags
|= HF_ADDSEG_MASK
;
5700 hflags
|= ((env
->segs
[R_DS
].base
| env
->segs
[R_ES
].base
|
5701 env
->segs
[R_SS
].base
) != 0) << HF_ADDSEG_SHIFT
;
5704 env
->hflags
= hflags
;
5707 static Property x86_cpu_properties
[] = {
5708 #ifdef CONFIG_USER_ONLY
5709 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5710 DEFINE_PROP_UINT32("apic-id", X86CPU
, apic_id
, 0),
5711 DEFINE_PROP_INT32("thread-id", X86CPU
, thread_id
, 0),
5712 DEFINE_PROP_INT32("core-id", X86CPU
, core_id
, 0),
5713 DEFINE_PROP_INT32("socket-id", X86CPU
, socket_id
, 0),
5715 DEFINE_PROP_UINT32("apic-id", X86CPU
, apic_id
, UNASSIGNED_APIC_ID
),
5716 DEFINE_PROP_INT32("thread-id", X86CPU
, thread_id
, -1),
5717 DEFINE_PROP_INT32("core-id", X86CPU
, core_id
, -1),
5718 DEFINE_PROP_INT32("socket-id", X86CPU
, socket_id
, -1),
5720 DEFINE_PROP_INT32("node-id", X86CPU
, node_id
, CPU_UNSET_NUMA_NODE_ID
),
5721 DEFINE_PROP_BOOL("pmu", X86CPU
, enable_pmu
, false),
5722 { .name
= "hv-spinlocks", .info
= &qdev_prop_spinlocks
},
5723 DEFINE_PROP_BOOL("hv-relaxed", X86CPU
, hyperv_relaxed_timing
, false),
5724 DEFINE_PROP_BOOL("hv-vapic", X86CPU
, hyperv_vapic
, false),
5725 DEFINE_PROP_BOOL("hv-time", X86CPU
, hyperv_time
, false),
5726 DEFINE_PROP_BOOL("hv-crash", X86CPU
, hyperv_crash
, false),
5727 DEFINE_PROP_BOOL("hv-reset", X86CPU
, hyperv_reset
, false),
5728 DEFINE_PROP_BOOL("hv-vpindex", X86CPU
, hyperv_vpindex
, false),
5729 DEFINE_PROP_BOOL("hv-runtime", X86CPU
, hyperv_runtime
, false),
5730 DEFINE_PROP_BOOL("hv-synic", X86CPU
, hyperv_synic
, false),
5731 DEFINE_PROP_BOOL("hv-stimer", X86CPU
, hyperv_stimer
, false),
5732 DEFINE_PROP_BOOL("hv-frequencies", X86CPU
, hyperv_frequencies
, false),
5733 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU
, hyperv_reenlightenment
, false),
5734 DEFINE_PROP_BOOL("hv-tlbflush", X86CPU
, hyperv_tlbflush
, false),
5735 DEFINE_PROP_BOOL("hv-evmcs", X86CPU
, hyperv_evmcs
, false),
5736 DEFINE_PROP_BOOL("hv-ipi", X86CPU
, hyperv_ipi
, false),
5737 DEFINE_PROP_BOOL("check", X86CPU
, check_cpuid
, true),
5738 DEFINE_PROP_BOOL("enforce", X86CPU
, enforce_cpuid
, false),
5739 DEFINE_PROP_BOOL("kvm", X86CPU
, expose_kvm
, true),
5740 DEFINE_PROP_UINT32("phys-bits", X86CPU
, phys_bits
, 0),
5741 DEFINE_PROP_BOOL("host-phys-bits", X86CPU
, host_phys_bits
, false),
5742 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU
, fill_mtrr_mask
, true),
5743 DEFINE_PROP_UINT32("level", X86CPU
, env
.cpuid_level
, UINT32_MAX
),
5744 DEFINE_PROP_UINT32("xlevel", X86CPU
, env
.cpuid_xlevel
, UINT32_MAX
),
5745 DEFINE_PROP_UINT32("xlevel2", X86CPU
, env
.cpuid_xlevel2
, UINT32_MAX
),
5746 DEFINE_PROP_UINT32("min-level", X86CPU
, env
.cpuid_min_level
, 0),
5747 DEFINE_PROP_UINT32("min-xlevel", X86CPU
, env
.cpuid_min_xlevel
, 0),
5748 DEFINE_PROP_UINT32("min-xlevel2", X86CPU
, env
.cpuid_min_xlevel2
, 0),
5749 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU
, full_cpuid_auto_level
, true),
5750 DEFINE_PROP_STRING("hv-vendor-id", X86CPU
, hyperv_vendor_id
),
5751 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU
, enable_cpuid_0xb
, true),
5752 DEFINE_PROP_BOOL("lmce", X86CPU
, enable_lmce
, false),
5753 DEFINE_PROP_BOOL("l3-cache", X86CPU
, enable_l3_cache
, true),
5754 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU
, kvm_no_smi_migration
,
5756 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU
, vmware_cpuid_freq
, true),
5757 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU
, expose_tcg
, true),
5758 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU
, migrate_smi_count
,
5761 * lecacy_cache defaults to true unless the CPU model provides its
5762 * own cache information (see x86_cpu_load_def()).
5764 DEFINE_PROP_BOOL("legacy-cache", X86CPU
, legacy_cache
, true),
5767 * From "Requirements for Implementing the Microsoft
5768 * Hypervisor Interface":
5769 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5771 * "Starting with Windows Server 2012 and Windows 8, if
5772 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5773 * the hypervisor imposes no specific limit to the number of VPs.
5774 * In this case, Windows Server 2012 guest VMs may use more than
5775 * 64 VPs, up to the maximum supported number of processors applicable
5776 * to the specific Windows version being used."
5778 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU
, hv_max_vps
, -1),
5779 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU
, hyperv_synic_kvm_only
,
5781 DEFINE_PROP_END_OF_LIST()
5784 static void x86_cpu_common_class_init(ObjectClass
*oc
, void *data
)
5786 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
5787 CPUClass
*cc
= CPU_CLASS(oc
);
5788 DeviceClass
*dc
= DEVICE_CLASS(oc
);
5790 device_class_set_parent_realize(dc
, x86_cpu_realizefn
,
5791 &xcc
->parent_realize
);
5792 device_class_set_parent_unrealize(dc
, x86_cpu_unrealizefn
,
5793 &xcc
->parent_unrealize
);
5794 dc
->props
= x86_cpu_properties
;
5796 xcc
->parent_reset
= cc
->reset
;
5797 cc
->reset
= x86_cpu_reset
;
5798 cc
->reset_dump_flags
= CPU_DUMP_FPU
| CPU_DUMP_CCOP
;
5800 cc
->class_by_name
= x86_cpu_class_by_name
;
5801 cc
->parse_features
= x86_cpu_parse_featurestr
;
5802 cc
->has_work
= x86_cpu_has_work
;
5804 cc
->do_interrupt
= x86_cpu_do_interrupt
;
5805 cc
->cpu_exec_interrupt
= x86_cpu_exec_interrupt
;
5807 cc
->dump_state
= x86_cpu_dump_state
;
5808 cc
->get_crash_info
= x86_cpu_get_crash_info
;
5809 cc
->set_pc
= x86_cpu_set_pc
;
5810 cc
->synchronize_from_tb
= x86_cpu_synchronize_from_tb
;
5811 cc
->gdb_read_register
= x86_cpu_gdb_read_register
;
5812 cc
->gdb_write_register
= x86_cpu_gdb_write_register
;
5813 cc
->get_arch_id
= x86_cpu_get_arch_id
;
5814 cc
->get_paging_enabled
= x86_cpu_get_paging_enabled
;
5815 #ifdef CONFIG_USER_ONLY
5816 cc
->handle_mmu_fault
= x86_cpu_handle_mmu_fault
;
5818 cc
->asidx_from_attrs
= x86_asidx_from_attrs
;
5819 cc
->get_memory_mapping
= x86_cpu_get_memory_mapping
;
5820 cc
->get_phys_page_debug
= x86_cpu_get_phys_page_debug
;
5821 cc
->write_elf64_note
= x86_cpu_write_elf64_note
;
5822 cc
->write_elf64_qemunote
= x86_cpu_write_elf64_qemunote
;
5823 cc
->write_elf32_note
= x86_cpu_write_elf32_note
;
5824 cc
->write_elf32_qemunote
= x86_cpu_write_elf32_qemunote
;
5825 cc
->vmsd
= &vmstate_x86_cpu
;
5827 cc
->gdb_arch_name
= x86_gdb_arch_name
;
5828 #ifdef TARGET_X86_64
5829 cc
->gdb_core_xml_file
= "i386-64bit.xml";
5830 cc
->gdb_num_core_regs
= 57;
5832 cc
->gdb_core_xml_file
= "i386-32bit.xml";
5833 cc
->gdb_num_core_regs
= 41;
5835 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5836 cc
->debug_excp_handler
= breakpoint_handler
;
5838 cc
->cpu_exec_enter
= x86_cpu_exec_enter
;
5839 cc
->cpu_exec_exit
= x86_cpu_exec_exit
;
5841 cc
->tcg_initialize
= tcg_x86_init
;
5843 cc
->disas_set_info
= x86_disas_set_info
;
5845 dc
->user_creatable
= true;
5848 static const TypeInfo x86_cpu_type_info
= {
5849 .name
= TYPE_X86_CPU
,
5851 .instance_size
= sizeof(X86CPU
),
5852 .instance_init
= x86_cpu_initfn
,
5854 .class_size
= sizeof(X86CPUClass
),
5855 .class_init
= x86_cpu_common_class_init
,
5859 /* "base" CPU model, used by query-cpu-model-expansion */
5860 static void x86_cpu_base_class_init(ObjectClass
*oc
, void *data
)
5862 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
5864 xcc
->static_model
= true;
5865 xcc
->migration_safe
= true;
5866 xcc
->model_description
= "base CPU model type with no features enabled";
5870 static const TypeInfo x86_base_cpu_type_info
= {
5871 .name
= X86_CPU_TYPE_NAME("base"),
5872 .parent
= TYPE_X86_CPU
,
5873 .class_init
= x86_cpu_base_class_init
,
5876 static void x86_cpu_register_types(void)
5880 type_register_static(&x86_cpu_type_info
);
5881 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); i
++) {
5882 x86_register_cpudef_type(&builtin_x86_defs
[i
]);
5884 type_register_static(&max_x86_cpu_type_info
);
5885 type_register_static(&x86_base_cpu_type_info
);
5886 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5887 type_register_static(&host_x86_cpu_type_info
);
5891 type_init(x86_cpu_register_types
)