2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
43 #include "standard-headers/asm-x86/kvm_para.h"
45 #include "sysemu/sysemu.h"
46 #include "hw/qdev-properties.h"
47 #include "hw/i386/topology.h"
48 #ifndef CONFIG_USER_ONLY
49 #include "exec/address-spaces.h"
51 #include "hw/xen/xen.h"
52 #include "hw/i386/apic_internal.h"
55 #include "disas/capstone.h"
57 /* Helpers for building CPUID[2] descriptors: */
59 struct CPUID2CacheDescriptorInfo
{
68 #define MiB (1024 * 1024)
71 * Known CPUID 2 cache descriptors.
72 * From Intel SDM Volume 2A, CPUID instruction
74 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors
[] = {
75 [0x06] = { .level
= 1, .type
= ICACHE
, .size
= 8 * KiB
,
76 .associativity
= 4, .line_size
= 32, },
77 [0x08] = { .level
= 1, .type
= ICACHE
, .size
= 16 * KiB
,
78 .associativity
= 4, .line_size
= 32, },
79 [0x09] = { .level
= 1, .type
= ICACHE
, .size
= 32 * KiB
,
80 .associativity
= 4, .line_size
= 64, },
81 [0x0A] = { .level
= 1, .type
= DCACHE
, .size
= 8 * KiB
,
82 .associativity
= 2, .line_size
= 32, },
83 [0x0C] = { .level
= 1, .type
= DCACHE
, .size
= 16 * KiB
,
84 .associativity
= 4, .line_size
= 32, },
85 [0x0D] = { .level
= 1, .type
= DCACHE
, .size
= 16 * KiB
,
86 .associativity
= 4, .line_size
= 64, },
87 [0x0E] = { .level
= 1, .type
= DCACHE
, .size
= 24 * KiB
,
88 .associativity
= 6, .line_size
= 64, },
89 [0x1D] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 128 * KiB
,
90 .associativity
= 2, .line_size
= 64, },
91 [0x21] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 256 * KiB
,
92 .associativity
= 8, .line_size
= 64, },
93 /* lines per sector is not supported cpuid2_cache_descriptor(),
94 * so descriptors 0x22, 0x23 are not included
96 [0x24] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
97 .associativity
= 16, .line_size
= 64, },
98 /* lines per sector is not supported cpuid2_cache_descriptor(),
99 * so descriptors 0x25, 0x20 are not included
101 [0x2C] = { .level
= 1, .type
= DCACHE
, .size
= 32 * KiB
,
102 .associativity
= 8, .line_size
= 64, },
103 [0x30] = { .level
= 1, .type
= ICACHE
, .size
= 32 * KiB
,
104 .associativity
= 8, .line_size
= 64, },
105 [0x41] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 128 * KiB
,
106 .associativity
= 4, .line_size
= 32, },
107 [0x42] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 256 * KiB
,
108 .associativity
= 4, .line_size
= 32, },
109 [0x43] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
110 .associativity
= 4, .line_size
= 32, },
111 [0x44] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
112 .associativity
= 4, .line_size
= 32, },
113 [0x45] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
114 .associativity
= 4, .line_size
= 32, },
115 [0x46] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 4 * MiB
,
116 .associativity
= 4, .line_size
= 64, },
117 [0x47] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 8 * MiB
,
118 .associativity
= 8, .line_size
= 64, },
119 [0x48] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 3 * MiB
,
120 .associativity
= 12, .line_size
= 64, },
121 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
122 [0x4A] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 6 * MiB
,
123 .associativity
= 12, .line_size
= 64, },
124 [0x4B] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 8 * MiB
,
125 .associativity
= 16, .line_size
= 64, },
126 [0x4C] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 12 * MiB
,
127 .associativity
= 12, .line_size
= 64, },
128 [0x4D] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 16 * MiB
,
129 .associativity
= 16, .line_size
= 64, },
130 [0x4E] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 6 * MiB
,
131 .associativity
= 24, .line_size
= 64, },
132 [0x60] = { .level
= 1, .type
= DCACHE
, .size
= 16 * KiB
,
133 .associativity
= 8, .line_size
= 64, },
134 [0x66] = { .level
= 1, .type
= DCACHE
, .size
= 8 * KiB
,
135 .associativity
= 4, .line_size
= 64, },
136 [0x67] = { .level
= 1, .type
= DCACHE
, .size
= 16 * KiB
,
137 .associativity
= 4, .line_size
= 64, },
138 [0x68] = { .level
= 1, .type
= DCACHE
, .size
= 32 * KiB
,
139 .associativity
= 4, .line_size
= 64, },
140 [0x78] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
141 .associativity
= 4, .line_size
= 64, },
142 /* lines per sector is not supported cpuid2_cache_descriptor(),
143 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
145 [0x7D] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
146 .associativity
= 8, .line_size
= 64, },
147 [0x7F] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
148 .associativity
= 2, .line_size
= 64, },
149 [0x80] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
150 .associativity
= 8, .line_size
= 64, },
151 [0x82] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 256 * KiB
,
152 .associativity
= 8, .line_size
= 32, },
153 [0x83] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
154 .associativity
= 8, .line_size
= 32, },
155 [0x84] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
156 .associativity
= 8, .line_size
= 32, },
157 [0x85] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
158 .associativity
= 8, .line_size
= 32, },
159 [0x86] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
160 .associativity
= 4, .line_size
= 64, },
161 [0x87] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
162 .associativity
= 8, .line_size
= 64, },
163 [0xD0] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
164 .associativity
= 4, .line_size
= 64, },
165 [0xD1] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
166 .associativity
= 4, .line_size
= 64, },
167 [0xD2] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
168 .associativity
= 4, .line_size
= 64, },
169 [0xD6] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
170 .associativity
= 8, .line_size
= 64, },
171 [0xD7] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
172 .associativity
= 8, .line_size
= 64, },
173 [0xD8] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 4 * MiB
,
174 .associativity
= 8, .line_size
= 64, },
175 [0xDC] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 1.5 * MiB
,
176 .associativity
= 12, .line_size
= 64, },
177 [0xDD] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 3 * MiB
,
178 .associativity
= 12, .line_size
= 64, },
179 [0xDE] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 6 * MiB
,
180 .associativity
= 12, .line_size
= 64, },
181 [0xE2] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
182 .associativity
= 16, .line_size
= 64, },
183 [0xE3] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 4 * MiB
,
184 .associativity
= 16, .line_size
= 64, },
185 [0xE4] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 8 * MiB
,
186 .associativity
= 16, .line_size
= 64, },
187 [0xEA] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 12 * MiB
,
188 .associativity
= 24, .line_size
= 64, },
189 [0xEB] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 18 * MiB
,
190 .associativity
= 24, .line_size
= 64, },
191 [0xEC] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 24 * MiB
,
192 .associativity
= 24, .line_size
= 64, },
196 * "CPUID leaf 2 does not report cache descriptor information,
197 * use CPUID leaf 4 to query cache parameters"
199 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
202 * Return a CPUID 2 cache descriptor for a given cache.
203 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
205 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo
*cache
)
209 assert(cache
->size
> 0);
210 assert(cache
->level
> 0);
211 assert(cache
->line_size
> 0);
212 assert(cache
->associativity
> 0);
213 for (i
= 0; i
< ARRAY_SIZE(cpuid2_cache_descriptors
); i
++) {
214 struct CPUID2CacheDescriptorInfo
*d
= &cpuid2_cache_descriptors
[i
];
215 if (d
->level
== cache
->level
&& d
->type
== cache
->type
&&
216 d
->size
== cache
->size
&& d
->line_size
== cache
->line_size
&&
217 d
->associativity
== cache
->associativity
) {
222 return CACHE_DESCRIPTOR_UNAVAILABLE
;
225 /* CPUID Leaf 4 constants: */
228 #define CACHE_TYPE_D 1
229 #define CACHE_TYPE_I 2
230 #define CACHE_TYPE_UNIFIED 3
232 #define CACHE_LEVEL(l) (l << 5)
234 #define CACHE_SELF_INIT_LEVEL (1 << 8)
237 #define CACHE_NO_INVD_SHARING (1 << 0)
238 #define CACHE_INCLUSIVE (1 << 1)
239 #define CACHE_COMPLEX_IDX (1 << 2)
241 /* Encode CacheType for CPUID[4].EAX */
242 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \
243 ((t) == ICACHE) ? CACHE_TYPE_I : \
244 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
245 0 /* Invalid value */)
248 /* Encode cache info for CPUID[4] */
249 static void encode_cache_cpuid4(CPUCacheInfo
*cache
,
250 int num_apic_ids
, int num_cores
,
251 uint32_t *eax
, uint32_t *ebx
,
252 uint32_t *ecx
, uint32_t *edx
)
254 assert(cache
->size
== cache
->line_size
* cache
->associativity
*
255 cache
->partitions
* cache
->sets
);
257 assert(num_apic_ids
> 0);
258 *eax
= CACHE_TYPE(cache
->type
) |
259 CACHE_LEVEL(cache
->level
) |
260 (cache
->self_init
? CACHE_SELF_INIT_LEVEL
: 0) |
261 ((num_cores
- 1) << 26) |
262 ((num_apic_ids
- 1) << 14);
264 assert(cache
->line_size
> 0);
265 assert(cache
->partitions
> 0);
266 assert(cache
->associativity
> 0);
267 /* We don't implement fully-associative caches */
268 assert(cache
->associativity
< cache
->sets
);
269 *ebx
= (cache
->line_size
- 1) |
270 ((cache
->partitions
- 1) << 12) |
271 ((cache
->associativity
- 1) << 22);
273 assert(cache
->sets
> 0);
274 *ecx
= cache
->sets
- 1;
276 *edx
= (cache
->no_invd_sharing
? CACHE_NO_INVD_SHARING
: 0) |
277 (cache
->inclusive
? CACHE_INCLUSIVE
: 0) |
278 (cache
->complex_indexing
? CACHE_COMPLEX_IDX
: 0);
281 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
282 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo
*cache
)
284 assert(cache
->size
% 1024 == 0);
285 assert(cache
->lines_per_tag
> 0);
286 assert(cache
->associativity
> 0);
287 assert(cache
->line_size
> 0);
288 return ((cache
->size
/ 1024) << 24) | (cache
->associativity
<< 16) |
289 (cache
->lines_per_tag
<< 8) | (cache
->line_size
);
292 #define ASSOC_FULL 0xFF
294 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
295 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
305 a == ASSOC_FULL ? 0xF : \
306 0 /* invalid value */)
309 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
312 static void encode_cache_cpuid80000006(CPUCacheInfo
*l2
,
314 uint32_t *ecx
, uint32_t *edx
)
316 assert(l2
->size
% 1024 == 0);
317 assert(l2
->associativity
> 0);
318 assert(l2
->lines_per_tag
> 0);
319 assert(l2
->line_size
> 0);
320 *ecx
= ((l2
->size
/ 1024) << 16) |
321 (AMD_ENC_ASSOC(l2
->associativity
) << 12) |
322 (l2
->lines_per_tag
<< 8) | (l2
->line_size
);
325 assert(l3
->size
% (512 * 1024) == 0);
326 assert(l3
->associativity
> 0);
327 assert(l3
->lines_per_tag
> 0);
328 assert(l3
->line_size
> 0);
329 *edx
= ((l3
->size
/ (512 * 1024)) << 18) |
330 (AMD_ENC_ASSOC(l3
->associativity
) << 12) |
331 (l3
->lines_per_tag
<< 8) | (l3
->line_size
);
338 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
339 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
340 * Define the constants to build the cpu topology. Right now, TOPOEXT
341 * feature is enabled only on EPYC. So, these constants are based on
342 * EPYC supported configurations. We may need to handle the cases if
343 * these values change in future.
345 /* Maximum core complexes in a node */
347 /* Maximum cores in a core complex */
348 #define MAX_CORES_IN_CCX 4
349 /* Maximum cores in a node */
350 #define MAX_CORES_IN_NODE 8
351 /* Maximum nodes in a socket */
352 #define MAX_NODES_PER_SOCKET 4
355 * Figure out the number of nodes required to build this config.
356 * Max cores in a node is 8
358 static int nodes_in_socket(int nr_cores
)
362 nodes
= DIV_ROUND_UP(nr_cores
, MAX_CORES_IN_NODE
);
364 /* Hardware does not support config with 3 nodes, return 4 in that case */
365 return (nodes
== 3) ? 4 : nodes
;
369 * Decide the number of cores in a core complex with the given nr_cores using
370 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
371 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
372 * L3 cache is shared across all cores in a core complex. So, this will also
373 * tell us how many cores are sharing the L3 cache.
375 static int cores_in_core_complex(int nr_cores
)
379 /* Check if we can fit all the cores in one core complex */
380 if (nr_cores
<= MAX_CORES_IN_CCX
) {
383 /* Get the number of nodes required to build this config */
384 nodes
= nodes_in_socket(nr_cores
);
387 * Divide the cores accros all the core complexes
388 * Return rounded up value
390 return DIV_ROUND_UP(nr_cores
, nodes
* MAX_CCX
);
393 /* Encode cache info for CPUID[8000001D] */
394 static void encode_cache_cpuid8000001d(CPUCacheInfo
*cache
, CPUState
*cs
,
395 uint32_t *eax
, uint32_t *ebx
,
396 uint32_t *ecx
, uint32_t *edx
)
399 assert(cache
->size
== cache
->line_size
* cache
->associativity
*
400 cache
->partitions
* cache
->sets
);
402 *eax
= CACHE_TYPE(cache
->type
) | CACHE_LEVEL(cache
->level
) |
403 (cache
->self_init
? CACHE_SELF_INIT_LEVEL
: 0);
405 /* L3 is shared among multiple cores */
406 if (cache
->level
== 3) {
407 l3_cores
= cores_in_core_complex(cs
->nr_cores
);
408 *eax
|= ((l3_cores
* cs
->nr_threads
) - 1) << 14;
410 *eax
|= ((cs
->nr_threads
- 1) << 14);
413 assert(cache
->line_size
> 0);
414 assert(cache
->partitions
> 0);
415 assert(cache
->associativity
> 0);
416 /* We don't implement fully-associative caches */
417 assert(cache
->associativity
< cache
->sets
);
418 *ebx
= (cache
->line_size
- 1) |
419 ((cache
->partitions
- 1) << 12) |
420 ((cache
->associativity
- 1) << 22);
422 assert(cache
->sets
> 0);
423 *ecx
= cache
->sets
- 1;
425 *edx
= (cache
->no_invd_sharing
? CACHE_NO_INVD_SHARING
: 0) |
426 (cache
->inclusive
? CACHE_INCLUSIVE
: 0) |
427 (cache
->complex_indexing
? CACHE_COMPLEX_IDX
: 0);
430 /* Data structure to hold the configuration info for a given core index */
431 struct core_topology
{
432 /* core complex id of the current core index */
435 * Adjusted core index for this core in the topology
436 * This can be 0,1,2,3 with max 4 cores in a core complex
439 /* Node id for this core index */
441 /* Number of nodes in this config */
446 * Build the configuration closely match the EPYC hardware. Using the EPYC
447 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
448 * right now. This could change in future.
449 * nr_cores : Total number of cores in the config
450 * core_id : Core index of the current CPU
451 * topo : Data structure to hold all the config info for this core index
453 static void build_core_topology(int nr_cores
, int core_id
,
454 struct core_topology
*topo
)
456 int nodes
, cores_in_ccx
;
458 /* First get the number of nodes required */
459 nodes
= nodes_in_socket(nr_cores
);
461 cores_in_ccx
= cores_in_core_complex(nr_cores
);
463 topo
->node_id
= core_id
/ (cores_in_ccx
* MAX_CCX
);
464 topo
->ccx_id
= (core_id
% (cores_in_ccx
* MAX_CCX
)) / cores_in_ccx
;
465 topo
->core_id
= core_id
% cores_in_ccx
;
466 topo
->num_nodes
= nodes
;
469 /* Encode cache info for CPUID[8000001E] */
470 static void encode_topo_cpuid8000001e(CPUState
*cs
, X86CPU
*cpu
,
471 uint32_t *eax
, uint32_t *ebx
,
472 uint32_t *ecx
, uint32_t *edx
)
474 struct core_topology topo
= {0};
476 build_core_topology(cs
->nr_cores
, cpu
->core_id
, &topo
);
479 * CPUID_Fn8000001E_EBX
481 * 15:8 Threads per core (The number of threads per core is
482 * Threads per core + 1)
483 * 7:0 Core id (see bit decoding below)
493 if (cs
->nr_threads
- 1) {
494 *ebx
= ((cs
->nr_threads
- 1) << 8) | (topo
.node_id
<< 3) |
495 (topo
.ccx_id
<< 2) | topo
.core_id
;
497 *ebx
= (topo
.node_id
<< 4) | (topo
.ccx_id
<< 3) | topo
.core_id
;
500 * CPUID_Fn8000001E_ECX
502 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
503 * 7:0 Node id (see bit decoding below)
507 *ecx
= ((topo
.num_nodes
- 1) << 8) | (cpu
->socket_id
<< 2) | topo
.node_id
;
512 * Definitions of the hardcoded cache entries we expose:
513 * These are legacy cache values. If there is a need to change any
514 * of these values please use builtin_x86_defs
518 static CPUCacheInfo legacy_l1d_cache
= {
527 .no_invd_sharing
= true,
530 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
531 static CPUCacheInfo legacy_l1d_cache_amd
= {
541 .no_invd_sharing
= true,
544 /* L1 instruction cache: */
545 static CPUCacheInfo legacy_l1i_cache
= {
554 .no_invd_sharing
= true,
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1i_cache_amd
= {
568 .no_invd_sharing
= true,
571 /* Level 2 unified cache: */
572 static CPUCacheInfo legacy_l2_cache
= {
573 .type
= UNIFIED_CACHE
,
581 .no_invd_sharing
= true,
584 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
585 static CPUCacheInfo legacy_l2_cache_cpuid2
= {
586 .type
= UNIFIED_CACHE
,
594 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
595 static CPUCacheInfo legacy_l2_cache_amd
= {
596 .type
= UNIFIED_CACHE
,
606 /* Level 3 unified cache: */
607 static CPUCacheInfo legacy_l3_cache
= {
608 .type
= UNIFIED_CACHE
,
618 .complex_indexing
= true,
621 /* TLB definitions: */
623 #define L1_DTLB_2M_ASSOC 1
624 #define L1_DTLB_2M_ENTRIES 255
625 #define L1_DTLB_4K_ASSOC 1
626 #define L1_DTLB_4K_ENTRIES 255
628 #define L1_ITLB_2M_ASSOC 1
629 #define L1_ITLB_2M_ENTRIES 255
630 #define L1_ITLB_4K_ASSOC 1
631 #define L1_ITLB_4K_ENTRIES 255
633 #define L2_DTLB_2M_ASSOC 0 /* disabled */
634 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
635 #define L2_DTLB_4K_ASSOC 4
636 #define L2_DTLB_4K_ENTRIES 512
638 #define L2_ITLB_2M_ASSOC 0 /* disabled */
639 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
640 #define L2_ITLB_4K_ASSOC 4
641 #define L2_ITLB_4K_ENTRIES 512
643 /* CPUID Leaf 0x14 constants: */
644 #define INTEL_PT_MAX_SUBLEAF 0x1
646 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
647 * MSR can be accessed;
648 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
649 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
650 * of Intel PT MSRs across warm reset;
651 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
653 #define INTEL_PT_MINIMAL_EBX 0xf
655 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
656 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
658 * bit[01]: ToPA tables can hold any number of output entries, up to the
659 * maximum allowed by the MaskOrTableOffset field of
660 * IA32_RTIT_OUTPUT_MASK_PTRS;
661 * bit[02]: Support Single-Range Output scheme;
663 #define INTEL_PT_MINIMAL_ECX 0x7
664 /* generated packets which contain IP payloads have LIP values */
665 #define INTEL_PT_IP_LIP (1 << 31)
666 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
667 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
668 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
669 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
670 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
672 static void x86_cpu_vendor_words2str(char *dst
, uint32_t vendor1
,
673 uint32_t vendor2
, uint32_t vendor3
)
676 for (i
= 0; i
< 4; i
++) {
677 dst
[i
] = vendor1
>> (8 * i
);
678 dst
[i
+ 4] = vendor2
>> (8 * i
);
679 dst
[i
+ 8] = vendor3
>> (8 * i
);
681 dst
[CPUID_VENDOR_SZ
] = '\0';
684 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
685 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
686 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
687 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
688 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
689 CPUID_PSE36 | CPUID_FXSR)
690 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
691 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
692 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
693 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
694 CPUID_PAE | CPUID_SEP | CPUID_APIC)
696 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
697 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
698 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
699 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
700 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
701 /* partly implemented:
702 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
704 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
705 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
706 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
707 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
708 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
709 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
711 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
712 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
713 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
714 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
715 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
718 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
720 #define TCG_EXT2_X86_64_FEATURES 0
723 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
724 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
725 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
726 TCG_EXT2_X86_64_FEATURES)
727 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
728 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
729 #define TCG_EXT4_FEATURES 0
730 #define TCG_SVM_FEATURES 0
731 #define TCG_KVM_FEATURES 0
732 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
733 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
734 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
735 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
738 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
739 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
740 CPUID_7_0_EBX_RDSEED */
741 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
743 #define TCG_7_0_EDX_FEATURES 0
744 #define TCG_APM_FEATURES 0
745 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
746 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
748 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
750 typedef struct FeatureWordInfo
{
751 /* feature flags names are taken from "Intel Processor Identification and
752 * the CPUID Instruction" and AMD's "CPUID Specification".
753 * In cases of disagreement between feature naming conventions,
754 * aliases may be added.
756 const char *feat_names
[32];
757 uint32_t cpuid_eax
; /* Input EAX for CPUID */
758 bool cpuid_needs_ecx
; /* CPUID instruction uses ECX as input */
759 uint32_t cpuid_ecx
; /* Input ECX value for CPUID */
760 int cpuid_reg
; /* output register (R_* constant) */
761 uint32_t tcg_features
; /* Feature flags supported by TCG */
762 uint32_t unmigratable_flags
; /* Feature flags known to be unmigratable */
763 uint32_t migratable_flags
; /* Feature flags known to be migratable */
764 /* Features that shouldn't be auto-enabled by "-cpu host" */
765 uint32_t no_autoenable_flags
;
768 static FeatureWordInfo feature_word_info
[FEATURE_WORDS
] = {
771 "fpu", "vme", "de", "pse",
772 "tsc", "msr", "pae", "mce",
773 "cx8", "apic", NULL
, "sep",
774 "mtrr", "pge", "mca", "cmov",
775 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
776 NULL
, "ds" /* Intel dts */, "acpi", "mmx",
777 "fxsr", "sse", "sse2", "ss",
778 "ht" /* Intel htt */, "tm", "ia64", "pbe",
780 .cpuid_eax
= 1, .cpuid_reg
= R_EDX
,
781 .tcg_features
= TCG_FEATURES
,
785 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
786 "ds-cpl", "vmx", "smx", "est",
787 "tm2", "ssse3", "cid", NULL
,
788 "fma", "cx16", "xtpr", "pdcm",
789 NULL
, "pcid", "dca", "sse4.1",
790 "sse4.2", "x2apic", "movbe", "popcnt",
791 "tsc-deadline", "aes", "xsave", "osxsave",
792 "avx", "f16c", "rdrand", "hypervisor",
794 .cpuid_eax
= 1, .cpuid_reg
= R_ECX
,
795 .tcg_features
= TCG_EXT_FEATURES
,
797 /* Feature names that are already defined on feature_name[] but
798 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
799 * names on feat_names below. They are copied automatically
800 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
802 [FEAT_8000_0001_EDX
] = {
804 NULL
/* fpu */, NULL
/* vme */, NULL
/* de */, NULL
/* pse */,
805 NULL
/* tsc */, NULL
/* msr */, NULL
/* pae */, NULL
/* mce */,
806 NULL
/* cx8 */, NULL
/* apic */, NULL
, "syscall",
807 NULL
/* mtrr */, NULL
/* pge */, NULL
/* mca */, NULL
/* cmov */,
808 NULL
/* pat */, NULL
/* pse36 */, NULL
, NULL
/* Linux mp */,
809 "nx", NULL
, "mmxext", NULL
/* mmx */,
810 NULL
/* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
811 NULL
, "lm", "3dnowext", "3dnow",
813 .cpuid_eax
= 0x80000001, .cpuid_reg
= R_EDX
,
814 .tcg_features
= TCG_EXT2_FEATURES
,
816 [FEAT_8000_0001_ECX
] = {
818 "lahf-lm", "cmp-legacy", "svm", "extapic",
819 "cr8legacy", "abm", "sse4a", "misalignsse",
820 "3dnowprefetch", "osvw", "ibs", "xop",
821 "skinit", "wdt", NULL
, "lwp",
822 "fma4", "tce", NULL
, "nodeid-msr",
823 NULL
, "tbm", "topoext", "perfctr-core",
824 "perfctr-nb", NULL
, NULL
, NULL
,
825 NULL
, NULL
, NULL
, NULL
,
827 .cpuid_eax
= 0x80000001, .cpuid_reg
= R_ECX
,
828 .tcg_features
= TCG_EXT3_FEATURES
,
830 [FEAT_C000_0001_EDX
] = {
832 NULL
, NULL
, "xstore", "xstore-en",
833 NULL
, NULL
, "xcrypt", "xcrypt-en",
834 "ace2", "ace2-en", "phe", "phe-en",
835 "pmm", "pmm-en", NULL
, NULL
,
836 NULL
, NULL
, NULL
, NULL
,
837 NULL
, NULL
, NULL
, NULL
,
838 NULL
, NULL
, NULL
, NULL
,
839 NULL
, NULL
, NULL
, NULL
,
841 .cpuid_eax
= 0xC0000001, .cpuid_reg
= R_EDX
,
842 .tcg_features
= TCG_EXT4_FEATURES
,
846 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
847 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
848 NULL
, "kvm-pv-tlb-flush", NULL
, NULL
,
849 NULL
, NULL
, NULL
, NULL
,
850 NULL
, NULL
, NULL
, NULL
,
851 NULL
, NULL
, NULL
, NULL
,
852 "kvmclock-stable-bit", NULL
, NULL
, NULL
,
853 NULL
, NULL
, NULL
, NULL
,
855 .cpuid_eax
= KVM_CPUID_FEATURES
, .cpuid_reg
= R_EAX
,
856 .tcg_features
= TCG_KVM_FEATURES
,
860 "kvm-hint-dedicated", NULL
, NULL
, NULL
,
861 NULL
, NULL
, NULL
, NULL
,
862 NULL
, NULL
, NULL
, NULL
,
863 NULL
, NULL
, NULL
, NULL
,
864 NULL
, NULL
, NULL
, NULL
,
865 NULL
, NULL
, NULL
, NULL
,
866 NULL
, NULL
, NULL
, NULL
,
867 NULL
, NULL
, NULL
, NULL
,
869 .cpuid_eax
= KVM_CPUID_FEATURES
, .cpuid_reg
= R_EDX
,
870 .tcg_features
= TCG_KVM_FEATURES
,
872 * KVM hints aren't auto-enabled by -cpu host, they need to be
873 * explicitly enabled in the command-line.
875 .no_autoenable_flags
= ~0U,
877 [FEAT_HYPERV_EAX
] = {
879 NULL
/* hv_msr_vp_runtime_access */, NULL
/* hv_msr_time_refcount_access */,
880 NULL
/* hv_msr_synic_access */, NULL
/* hv_msr_stimer_access */,
881 NULL
/* hv_msr_apic_access */, NULL
/* hv_msr_hypercall_access */,
882 NULL
/* hv_vpindex_access */, NULL
/* hv_msr_reset_access */,
883 NULL
/* hv_msr_stats_access */, NULL
/* hv_reftsc_access */,
884 NULL
/* hv_msr_idle_access */, NULL
/* hv_msr_frequency_access */,
885 NULL
/* hv_msr_debug_access */, NULL
/* hv_msr_reenlightenment_access */,
887 NULL
, NULL
, NULL
, NULL
,
888 NULL
, NULL
, NULL
, NULL
,
889 NULL
, NULL
, NULL
, NULL
,
890 NULL
, NULL
, NULL
, NULL
,
892 .cpuid_eax
= 0x40000003, .cpuid_reg
= R_EAX
,
894 [FEAT_HYPERV_EBX
] = {
896 NULL
/* hv_create_partitions */, NULL
/* hv_access_partition_id */,
897 NULL
/* hv_access_memory_pool */, NULL
/* hv_adjust_message_buffers */,
898 NULL
/* hv_post_messages */, NULL
/* hv_signal_events */,
899 NULL
/* hv_create_port */, NULL
/* hv_connect_port */,
900 NULL
/* hv_access_stats */, NULL
, NULL
, NULL
/* hv_debugging */,
901 NULL
/* hv_cpu_power_management */, NULL
/* hv_configure_profiler */,
903 NULL
, NULL
, NULL
, NULL
,
904 NULL
, NULL
, NULL
, NULL
,
905 NULL
, NULL
, NULL
, NULL
,
906 NULL
, NULL
, NULL
, NULL
,
908 .cpuid_eax
= 0x40000003, .cpuid_reg
= R_EBX
,
910 [FEAT_HYPERV_EDX
] = {
912 NULL
/* hv_mwait */, NULL
/* hv_guest_debugging */,
913 NULL
/* hv_perf_monitor */, NULL
/* hv_cpu_dynamic_part */,
914 NULL
/* hv_hypercall_params_xmm */, NULL
/* hv_guest_idle_state */,
916 NULL
, NULL
, NULL
/* hv_guest_crash_msr */, NULL
,
917 NULL
, NULL
, NULL
, NULL
,
918 NULL
, NULL
, NULL
, NULL
,
919 NULL
, NULL
, NULL
, NULL
,
920 NULL
, NULL
, NULL
, NULL
,
921 NULL
, NULL
, NULL
, NULL
,
923 .cpuid_eax
= 0x40000003, .cpuid_reg
= R_EDX
,
927 "npt", "lbrv", "svm-lock", "nrip-save",
928 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
929 NULL
, NULL
, "pause-filter", NULL
,
930 "pfthreshold", NULL
, NULL
, NULL
,
931 NULL
, NULL
, NULL
, NULL
,
932 NULL
, NULL
, NULL
, NULL
,
933 NULL
, NULL
, NULL
, NULL
,
934 NULL
, NULL
, NULL
, NULL
,
936 .cpuid_eax
= 0x8000000A, .cpuid_reg
= R_EDX
,
937 .tcg_features
= TCG_SVM_FEATURES
,
941 "fsgsbase", "tsc-adjust", NULL
, "bmi1",
942 "hle", "avx2", NULL
, "smep",
943 "bmi2", "erms", "invpcid", "rtm",
944 NULL
, NULL
, "mpx", NULL
,
945 "avx512f", "avx512dq", "rdseed", "adx",
946 "smap", "avx512ifma", "pcommit", "clflushopt",
947 "clwb", "intel-pt", "avx512pf", "avx512er",
948 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
951 .cpuid_needs_ecx
= true, .cpuid_ecx
= 0,
953 .tcg_features
= TCG_7_0_EBX_FEATURES
,
957 NULL
, "avx512vbmi", "umip", "pku",
958 "ospke", NULL
, "avx512vbmi2", NULL
,
959 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
960 "avx512bitalg", NULL
, "avx512-vpopcntdq", NULL
,
961 "la57", NULL
, NULL
, NULL
,
962 NULL
, NULL
, "rdpid", NULL
,
963 NULL
, "cldemote", NULL
, NULL
,
964 NULL
, NULL
, NULL
, NULL
,
967 .cpuid_needs_ecx
= true, .cpuid_ecx
= 0,
969 .tcg_features
= TCG_7_0_ECX_FEATURES
,
973 NULL
, NULL
, "avx512-4vnniw", "avx512-4fmaps",
974 NULL
, NULL
, NULL
, NULL
,
975 NULL
, NULL
, NULL
, NULL
,
976 NULL
, NULL
, NULL
, NULL
,
977 NULL
, NULL
, NULL
, NULL
,
978 NULL
, NULL
, NULL
, NULL
,
979 NULL
, NULL
, "spec-ctrl", NULL
,
980 NULL
, NULL
, NULL
, "ssbd",
983 .cpuid_needs_ecx
= true, .cpuid_ecx
= 0,
985 .tcg_features
= TCG_7_0_EDX_FEATURES
,
987 [FEAT_8000_0007_EDX
] = {
989 NULL
, NULL
, NULL
, NULL
,
990 NULL
, NULL
, NULL
, NULL
,
991 "invtsc", NULL
, NULL
, NULL
,
992 NULL
, NULL
, NULL
, NULL
,
993 NULL
, NULL
, NULL
, NULL
,
994 NULL
, NULL
, NULL
, NULL
,
995 NULL
, NULL
, NULL
, NULL
,
996 NULL
, NULL
, NULL
, NULL
,
998 .cpuid_eax
= 0x80000007,
1000 .tcg_features
= TCG_APM_FEATURES
,
1001 .unmigratable_flags
= CPUID_APM_INVTSC
,
1003 [FEAT_8000_0008_EBX
] = {
1005 NULL
, NULL
, NULL
, NULL
,
1006 NULL
, NULL
, NULL
, NULL
,
1007 NULL
, NULL
, NULL
, NULL
,
1008 "ibpb", NULL
, NULL
, NULL
,
1009 NULL
, NULL
, NULL
, NULL
,
1010 NULL
, NULL
, NULL
, NULL
,
1011 NULL
, "virt-ssbd", NULL
, NULL
,
1012 NULL
, NULL
, NULL
, NULL
,
1014 .cpuid_eax
= 0x80000008,
1017 .unmigratable_flags
= 0,
1021 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1022 NULL
, NULL
, NULL
, NULL
,
1023 NULL
, NULL
, NULL
, NULL
,
1024 NULL
, NULL
, NULL
, NULL
,
1025 NULL
, NULL
, NULL
, NULL
,
1026 NULL
, NULL
, NULL
, NULL
,
1027 NULL
, NULL
, NULL
, NULL
,
1028 NULL
, NULL
, NULL
, NULL
,
1031 .cpuid_needs_ecx
= true, .cpuid_ecx
= 1,
1033 .tcg_features
= TCG_XSAVE_FEATURES
,
1037 NULL
, NULL
, "arat", NULL
,
1038 NULL
, NULL
, NULL
, NULL
,
1039 NULL
, NULL
, NULL
, NULL
,
1040 NULL
, NULL
, NULL
, NULL
,
1041 NULL
, NULL
, NULL
, NULL
,
1042 NULL
, NULL
, NULL
, NULL
,
1043 NULL
, NULL
, NULL
, NULL
,
1044 NULL
, NULL
, NULL
, NULL
,
1046 .cpuid_eax
= 6, .cpuid_reg
= R_EAX
,
1047 .tcg_features
= TCG_6_EAX_FEATURES
,
1049 [FEAT_XSAVE_COMP_LO
] = {
1051 .cpuid_needs_ecx
= true, .cpuid_ecx
= 0,
1053 .tcg_features
= ~0U,
1054 .migratable_flags
= XSTATE_FP_MASK
| XSTATE_SSE_MASK
|
1055 XSTATE_YMM_MASK
| XSTATE_BNDREGS_MASK
| XSTATE_BNDCSR_MASK
|
1056 XSTATE_OPMASK_MASK
| XSTATE_ZMM_Hi256_MASK
| XSTATE_Hi16_ZMM_MASK
|
1059 [FEAT_XSAVE_COMP_HI
] = {
1061 .cpuid_needs_ecx
= true, .cpuid_ecx
= 0,
1063 .tcg_features
= ~0U,
1067 typedef struct X86RegisterInfo32
{
1068 /* Name of register */
1070 /* QAPI enum value register */
1071 X86CPURegister32 qapi_enum
;
1072 } X86RegisterInfo32
;
1074 #define REGISTER(reg) \
1075 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1076 static const X86RegisterInfo32 x86_reg_info_32
[CPU_NB_REGS32
] = {
1088 typedef struct ExtSaveArea
{
1089 uint32_t feature
, bits
;
1090 uint32_t offset
, size
;
1093 static const ExtSaveArea x86_ext_save_areas
[] = {
1095 /* x87 FP state component is always enabled if XSAVE is supported */
1096 .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_XSAVE
,
1097 /* x87 state is in the legacy region of the XSAVE area */
1099 .size
= sizeof(X86LegacyXSaveArea
) + sizeof(X86XSaveHeader
),
1101 [XSTATE_SSE_BIT
] = {
1102 /* SSE state component is always enabled if XSAVE is supported */
1103 .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_XSAVE
,
1104 /* SSE state is in the legacy region of the XSAVE area */
1106 .size
= sizeof(X86LegacyXSaveArea
) + sizeof(X86XSaveHeader
),
1109 { .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_AVX
,
1110 .offset
= offsetof(X86XSaveArea
, avx_state
),
1111 .size
= sizeof(XSaveAVX
) },
1112 [XSTATE_BNDREGS_BIT
] =
1113 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_MPX
,
1114 .offset
= offsetof(X86XSaveArea
, bndreg_state
),
1115 .size
= sizeof(XSaveBNDREG
) },
1116 [XSTATE_BNDCSR_BIT
] =
1117 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_MPX
,
1118 .offset
= offsetof(X86XSaveArea
, bndcsr_state
),
1119 .size
= sizeof(XSaveBNDCSR
) },
1120 [XSTATE_OPMASK_BIT
] =
1121 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
1122 .offset
= offsetof(X86XSaveArea
, opmask_state
),
1123 .size
= sizeof(XSaveOpmask
) },
1124 [XSTATE_ZMM_Hi256_BIT
] =
1125 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
1126 .offset
= offsetof(X86XSaveArea
, zmm_hi256_state
),
1127 .size
= sizeof(XSaveZMM_Hi256
) },
1128 [XSTATE_Hi16_ZMM_BIT
] =
1129 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
1130 .offset
= offsetof(X86XSaveArea
, hi16_zmm_state
),
1131 .size
= sizeof(XSaveHi16_ZMM
) },
1133 { .feature
= FEAT_7_0_ECX
, .bits
= CPUID_7_0_ECX_PKU
,
1134 .offset
= offsetof(X86XSaveArea
, pkru_state
),
1135 .size
= sizeof(XSavePKRU
) },
1138 static uint32_t xsave_area_size(uint64_t mask
)
1143 for (i
= 0; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
1144 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
1145 if ((mask
>> i
) & 1) {
1146 ret
= MAX(ret
, esa
->offset
+ esa
->size
);
1152 static inline bool accel_uses_host_cpuid(void)
1154 return kvm_enabled() || hvf_enabled();
1157 static inline uint64_t x86_cpu_xsave_components(X86CPU
*cpu
)
1159 return ((uint64_t)cpu
->env
.features
[FEAT_XSAVE_COMP_HI
]) << 32 |
1160 cpu
->env
.features
[FEAT_XSAVE_COMP_LO
];
1163 const char *get_register_name_32(unsigned int reg
)
1165 if (reg
>= CPU_NB_REGS32
) {
1168 return x86_reg_info_32
[reg
].name
;
1172 * Returns the set of feature flags that are supported and migratable by
1173 * QEMU, for a given FeatureWord.
1175 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w
)
1177 FeatureWordInfo
*wi
= &feature_word_info
[w
];
1181 for (i
= 0; i
< 32; i
++) {
1182 uint32_t f
= 1U << i
;
1184 /* If the feature name is known, it is implicitly considered migratable,
1185 * unless it is explicitly set in unmigratable_flags */
1186 if ((wi
->migratable_flags
& f
) ||
1187 (wi
->feat_names
[i
] && !(wi
->unmigratable_flags
& f
))) {
1194 void host_cpuid(uint32_t function
, uint32_t count
,
1195 uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
1200 asm volatile("cpuid"
1201 : "=a"(vec
[0]), "=b"(vec
[1]),
1202 "=c"(vec
[2]), "=d"(vec
[3])
1203 : "0"(function
), "c"(count
) : "cc");
1204 #elif defined(__i386__)
1205 asm volatile("pusha \n\t"
1207 "mov %%eax, 0(%2) \n\t"
1208 "mov %%ebx, 4(%2) \n\t"
1209 "mov %%ecx, 8(%2) \n\t"
1210 "mov %%edx, 12(%2) \n\t"
1212 : : "a"(function
), "c"(count
), "S"(vec
)
1228 void host_vendor_fms(char *vendor
, int *family
, int *model
, int *stepping
)
1230 uint32_t eax
, ebx
, ecx
, edx
;
1232 host_cpuid(0x0, 0, &eax
, &ebx
, &ecx
, &edx
);
1233 x86_cpu_vendor_words2str(vendor
, ebx
, edx
, ecx
);
1235 host_cpuid(0x1, 0, &eax
, &ebx
, &ecx
, &edx
);
1237 *family
= ((eax
>> 8) & 0x0F) + ((eax
>> 20) & 0xFF);
1240 *model
= ((eax
>> 4) & 0x0F) | ((eax
& 0xF0000) >> 12);
1243 *stepping
= eax
& 0x0F;
1247 /* CPU class name definitions: */
1249 /* Return type name for a given CPU model name
1250 * Caller is responsible for freeing the returned string.
1252 static char *x86_cpu_type_name(const char *model_name
)
1254 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name
);
1257 static ObjectClass
*x86_cpu_class_by_name(const char *cpu_model
)
1260 char *typename
= x86_cpu_type_name(cpu_model
);
1261 oc
= object_class_by_name(typename
);
1266 static char *x86_cpu_class_get_model_name(X86CPUClass
*cc
)
1268 const char *class_name
= object_class_get_name(OBJECT_CLASS(cc
));
1269 assert(g_str_has_suffix(class_name
, X86_CPU_TYPE_SUFFIX
));
1270 return g_strndup(class_name
,
1271 strlen(class_name
) - strlen(X86_CPU_TYPE_SUFFIX
));
1274 struct X86CPUDefinition
{
1278 /* vendor is zero-terminated, 12 character ASCII string */
1279 char vendor
[CPUID_VENDOR_SZ
+ 1];
1283 FeatureWordArray features
;
1284 const char *model_id
;
1285 CPUCaches
*cache_info
;
1288 static CPUCaches epyc_cache_info
= {
1289 .l1d_cache
= &(CPUCacheInfo
) {
1299 .no_invd_sharing
= true,
1301 .l1i_cache
= &(CPUCacheInfo
) {
1311 .no_invd_sharing
= true,
1313 .l2_cache
= &(CPUCacheInfo
) {
1314 .type
= UNIFIED_CACHE
,
1323 .l3_cache
= &(CPUCacheInfo
) {
1324 .type
= UNIFIED_CACHE
,
1328 .associativity
= 16,
1334 .complex_indexing
= true,
1338 static X86CPUDefinition builtin_x86_defs
[] = {
1342 .vendor
= CPUID_VENDOR_AMD
,
1346 .features
[FEAT_1_EDX
] =
1348 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
1350 .features
[FEAT_1_ECX
] =
1351 CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
1352 .features
[FEAT_8000_0001_EDX
] =
1353 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1354 .features
[FEAT_8000_0001_ECX
] =
1355 CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
,
1356 .xlevel
= 0x8000000A,
1357 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
1362 .vendor
= CPUID_VENDOR_AMD
,
1366 /* Missing: CPUID_HT */
1367 .features
[FEAT_1_EDX
] =
1369 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
1370 CPUID_PSE36
| CPUID_VME
,
1371 .features
[FEAT_1_ECX
] =
1372 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_CX16
|
1374 .features
[FEAT_8000_0001_EDX
] =
1375 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
|
1376 CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
| CPUID_EXT2_MMXEXT
|
1377 CPUID_EXT2_FFXSR
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
,
1378 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1380 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1381 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1382 .features
[FEAT_8000_0001_ECX
] =
1383 CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
1384 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
1385 /* Missing: CPUID_SVM_LBRV */
1386 .features
[FEAT_SVM
] =
1388 .xlevel
= 0x8000001A,
1389 .model_id
= "AMD Phenom(tm) 9550 Quad-Core Processor"
1394 .vendor
= CPUID_VENDOR_INTEL
,
1398 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1399 .features
[FEAT_1_EDX
] =
1401 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
1402 CPUID_PSE36
| CPUID_VME
| CPUID_ACPI
| CPUID_SS
,
1403 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1404 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1405 .features
[FEAT_1_ECX
] =
1406 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
1408 .features
[FEAT_8000_0001_EDX
] =
1409 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1410 .features
[FEAT_8000_0001_ECX
] =
1412 .xlevel
= 0x80000008,
1413 .model_id
= "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1418 .vendor
= CPUID_VENDOR_INTEL
,
1422 /* Missing: CPUID_HT */
1423 .features
[FEAT_1_EDX
] =
1424 PPRO_FEATURES
| CPUID_VME
|
1425 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
1427 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1428 .features
[FEAT_1_ECX
] =
1429 CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
1430 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1431 .features
[FEAT_8000_0001_EDX
] =
1432 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1433 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1434 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1435 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1436 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1437 .features
[FEAT_8000_0001_ECX
] =
1439 .xlevel
= 0x80000008,
1440 .model_id
= "Common KVM processor"
1445 .vendor
= CPUID_VENDOR_INTEL
,
1449 .features
[FEAT_1_EDX
] =
1451 .features
[FEAT_1_ECX
] =
1453 .xlevel
= 0x80000004,
1454 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
1459 .vendor
= CPUID_VENDOR_INTEL
,
1463 .features
[FEAT_1_EDX
] =
1464 PPRO_FEATURES
| CPUID_VME
|
1465 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_PSE36
,
1466 .features
[FEAT_1_ECX
] =
1468 .features
[FEAT_8000_0001_ECX
] =
1470 .xlevel
= 0x80000008,
1471 .model_id
= "Common 32-bit KVM processor"
1476 .vendor
= CPUID_VENDOR_INTEL
,
1480 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1481 .features
[FEAT_1_EDX
] =
1482 PPRO_FEATURES
| CPUID_VME
|
1483 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_ACPI
|
1485 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1486 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1487 .features
[FEAT_1_ECX
] =
1488 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
,
1489 .features
[FEAT_8000_0001_EDX
] =
1491 .xlevel
= 0x80000008,
1492 .model_id
= "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1497 .vendor
= CPUID_VENDOR_INTEL
,
1501 .features
[FEAT_1_EDX
] =
1509 .vendor
= CPUID_VENDOR_INTEL
,
1513 .features
[FEAT_1_EDX
] =
1521 .vendor
= CPUID_VENDOR_INTEL
,
1525 .features
[FEAT_1_EDX
] =
1533 .vendor
= CPUID_VENDOR_INTEL
,
1537 .features
[FEAT_1_EDX
] =
1545 .vendor
= CPUID_VENDOR_AMD
,
1549 .features
[FEAT_1_EDX
] =
1550 PPRO_FEATURES
| CPUID_PSE36
| CPUID_VME
| CPUID_MTRR
|
1552 .features
[FEAT_8000_0001_EDX
] =
1553 CPUID_EXT2_MMXEXT
| CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
,
1554 .xlevel
= 0x80000008,
1555 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
1560 .vendor
= CPUID_VENDOR_INTEL
,
1564 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1565 .features
[FEAT_1_EDX
] =
1567 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_VME
|
1568 CPUID_ACPI
| CPUID_SS
,
1569 /* Some CPUs got no CPUID_SEP */
1570 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1572 .features
[FEAT_1_ECX
] =
1573 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
1575 .features
[FEAT_8000_0001_EDX
] =
1577 .features
[FEAT_8000_0001_ECX
] =
1579 .xlevel
= 0x80000008,
1580 .model_id
= "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1585 .vendor
= CPUID_VENDOR_INTEL
,
1589 .features
[FEAT_1_EDX
] =
1590 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1591 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1592 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1593 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1594 CPUID_DE
| CPUID_FP87
,
1595 .features
[FEAT_1_ECX
] =
1596 CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
1597 .features
[FEAT_8000_0001_EDX
] =
1598 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1599 .features
[FEAT_8000_0001_ECX
] =
1601 .xlevel
= 0x80000008,
1602 .model_id
= "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1607 .vendor
= CPUID_VENDOR_INTEL
,
1611 .features
[FEAT_1_EDX
] =
1612 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1613 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1614 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1615 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1616 CPUID_DE
| CPUID_FP87
,
1617 .features
[FEAT_1_ECX
] =
1618 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1620 .features
[FEAT_8000_0001_EDX
] =
1621 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1622 .features
[FEAT_8000_0001_ECX
] =
1624 .xlevel
= 0x80000008,
1625 .model_id
= "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1630 .vendor
= CPUID_VENDOR_INTEL
,
1634 .features
[FEAT_1_EDX
] =
1635 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1636 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1637 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1638 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1639 CPUID_DE
| CPUID_FP87
,
1640 .features
[FEAT_1_ECX
] =
1641 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1642 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
1643 .features
[FEAT_8000_0001_EDX
] =
1644 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1645 .features
[FEAT_8000_0001_ECX
] =
1647 .xlevel
= 0x80000008,
1648 .model_id
= "Intel Core i7 9xx (Nehalem Class Core i7)",
1651 .name
= "Nehalem-IBRS",
1653 .vendor
= CPUID_VENDOR_INTEL
,
1657 .features
[FEAT_1_EDX
] =
1658 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1659 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1660 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1661 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1662 CPUID_DE
| CPUID_FP87
,
1663 .features
[FEAT_1_ECX
] =
1664 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1665 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
1666 .features
[FEAT_7_0_EDX
] =
1667 CPUID_7_0_EDX_SPEC_CTRL
,
1668 .features
[FEAT_8000_0001_EDX
] =
1669 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1670 .features
[FEAT_8000_0001_ECX
] =
1672 .xlevel
= 0x80000008,
1673 .model_id
= "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1678 .vendor
= CPUID_VENDOR_INTEL
,
1682 .features
[FEAT_1_EDX
] =
1683 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1684 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1685 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1686 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1687 CPUID_DE
| CPUID_FP87
,
1688 .features
[FEAT_1_ECX
] =
1689 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
1690 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1691 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
1692 .features
[FEAT_8000_0001_EDX
] =
1693 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1694 .features
[FEAT_8000_0001_ECX
] =
1696 .features
[FEAT_6_EAX
] =
1698 .xlevel
= 0x80000008,
1699 .model_id
= "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1702 .name
= "Westmere-IBRS",
1704 .vendor
= CPUID_VENDOR_INTEL
,
1708 .features
[FEAT_1_EDX
] =
1709 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1710 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1711 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1712 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1713 CPUID_DE
| CPUID_FP87
,
1714 .features
[FEAT_1_ECX
] =
1715 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
1716 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1717 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
1718 .features
[FEAT_8000_0001_EDX
] =
1719 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1720 .features
[FEAT_8000_0001_ECX
] =
1722 .features
[FEAT_7_0_EDX
] =
1723 CPUID_7_0_EDX_SPEC_CTRL
,
1724 .features
[FEAT_6_EAX
] =
1726 .xlevel
= 0x80000008,
1727 .model_id
= "Westmere E56xx/L56xx/X56xx (IBRS update)",
1730 .name
= "SandyBridge",
1732 .vendor
= CPUID_VENDOR_INTEL
,
1736 .features
[FEAT_1_EDX
] =
1737 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1738 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1739 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1740 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1741 CPUID_DE
| CPUID_FP87
,
1742 .features
[FEAT_1_ECX
] =
1743 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1744 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1745 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1746 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1748 .features
[FEAT_8000_0001_EDX
] =
1749 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1751 .features
[FEAT_8000_0001_ECX
] =
1753 .features
[FEAT_XSAVE
] =
1754 CPUID_XSAVE_XSAVEOPT
,
1755 .features
[FEAT_6_EAX
] =
1757 .xlevel
= 0x80000008,
1758 .model_id
= "Intel Xeon E312xx (Sandy Bridge)",
1761 .name
= "SandyBridge-IBRS",
1763 .vendor
= CPUID_VENDOR_INTEL
,
1767 .features
[FEAT_1_EDX
] =
1768 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1769 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1770 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1771 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1772 CPUID_DE
| CPUID_FP87
,
1773 .features
[FEAT_1_ECX
] =
1774 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1775 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1776 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1777 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1779 .features
[FEAT_8000_0001_EDX
] =
1780 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1782 .features
[FEAT_8000_0001_ECX
] =
1784 .features
[FEAT_7_0_EDX
] =
1785 CPUID_7_0_EDX_SPEC_CTRL
,
1786 .features
[FEAT_XSAVE
] =
1787 CPUID_XSAVE_XSAVEOPT
,
1788 .features
[FEAT_6_EAX
] =
1790 .xlevel
= 0x80000008,
1791 .model_id
= "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1794 .name
= "IvyBridge",
1796 .vendor
= CPUID_VENDOR_INTEL
,
1800 .features
[FEAT_1_EDX
] =
1801 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1802 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1803 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1804 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1805 CPUID_DE
| CPUID_FP87
,
1806 .features
[FEAT_1_ECX
] =
1807 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1808 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1809 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1810 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1811 CPUID_EXT_SSE3
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1812 .features
[FEAT_7_0_EBX
] =
1813 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_SMEP
|
1815 .features
[FEAT_8000_0001_EDX
] =
1816 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1818 .features
[FEAT_8000_0001_ECX
] =
1820 .features
[FEAT_XSAVE
] =
1821 CPUID_XSAVE_XSAVEOPT
,
1822 .features
[FEAT_6_EAX
] =
1824 .xlevel
= 0x80000008,
1825 .model_id
= "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1828 .name
= "IvyBridge-IBRS",
1830 .vendor
= CPUID_VENDOR_INTEL
,
1834 .features
[FEAT_1_EDX
] =
1835 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1836 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1837 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1838 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1839 CPUID_DE
| CPUID_FP87
,
1840 .features
[FEAT_1_ECX
] =
1841 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1842 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1843 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1844 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1845 CPUID_EXT_SSE3
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1846 .features
[FEAT_7_0_EBX
] =
1847 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_SMEP
|
1849 .features
[FEAT_8000_0001_EDX
] =
1850 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1852 .features
[FEAT_8000_0001_ECX
] =
1854 .features
[FEAT_7_0_EDX
] =
1855 CPUID_7_0_EDX_SPEC_CTRL
,
1856 .features
[FEAT_XSAVE
] =
1857 CPUID_XSAVE_XSAVEOPT
,
1858 .features
[FEAT_6_EAX
] =
1860 .xlevel
= 0x80000008,
1861 .model_id
= "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1864 .name
= "Haswell-noTSX",
1866 .vendor
= CPUID_VENDOR_INTEL
,
1870 .features
[FEAT_1_EDX
] =
1871 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1872 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1873 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1874 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1875 CPUID_DE
| CPUID_FP87
,
1876 .features
[FEAT_1_ECX
] =
1877 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1878 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1879 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1880 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1881 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1882 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1883 .features
[FEAT_8000_0001_EDX
] =
1884 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1886 .features
[FEAT_8000_0001_ECX
] =
1887 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
1888 .features
[FEAT_7_0_EBX
] =
1889 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1890 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1891 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
,
1892 .features
[FEAT_XSAVE
] =
1893 CPUID_XSAVE_XSAVEOPT
,
1894 .features
[FEAT_6_EAX
] =
1896 .xlevel
= 0x80000008,
1897 .model_id
= "Intel Core Processor (Haswell, no TSX)",
1900 .name
= "Haswell-noTSX-IBRS",
1902 .vendor
= CPUID_VENDOR_INTEL
,
1906 .features
[FEAT_1_EDX
] =
1907 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1908 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1909 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1910 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1911 CPUID_DE
| CPUID_FP87
,
1912 .features
[FEAT_1_ECX
] =
1913 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1914 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1915 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1916 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1917 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1918 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1919 .features
[FEAT_8000_0001_EDX
] =
1920 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1922 .features
[FEAT_8000_0001_ECX
] =
1923 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
1924 .features
[FEAT_7_0_EDX
] =
1925 CPUID_7_0_EDX_SPEC_CTRL
,
1926 .features
[FEAT_7_0_EBX
] =
1927 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1928 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1929 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
,
1930 .features
[FEAT_XSAVE
] =
1931 CPUID_XSAVE_XSAVEOPT
,
1932 .features
[FEAT_6_EAX
] =
1934 .xlevel
= 0x80000008,
1935 .model_id
= "Intel Core Processor (Haswell, no TSX, IBRS)",
1940 .vendor
= CPUID_VENDOR_INTEL
,
1944 .features
[FEAT_1_EDX
] =
1945 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1946 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1947 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1948 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1949 CPUID_DE
| CPUID_FP87
,
1950 .features
[FEAT_1_ECX
] =
1951 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1952 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1953 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1954 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1955 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1956 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1957 .features
[FEAT_8000_0001_EDX
] =
1958 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1960 .features
[FEAT_8000_0001_ECX
] =
1961 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
1962 .features
[FEAT_7_0_EBX
] =
1963 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
1964 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
1965 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
1967 .features
[FEAT_XSAVE
] =
1968 CPUID_XSAVE_XSAVEOPT
,
1969 .features
[FEAT_6_EAX
] =
1971 .xlevel
= 0x80000008,
1972 .model_id
= "Intel Core Processor (Haswell)",
1975 .name
= "Haswell-IBRS",
1977 .vendor
= CPUID_VENDOR_INTEL
,
1981 .features
[FEAT_1_EDX
] =
1982 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1983 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1984 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1985 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1986 CPUID_DE
| CPUID_FP87
,
1987 .features
[FEAT_1_ECX
] =
1988 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1989 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
1990 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1991 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
1992 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
1993 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1994 .features
[FEAT_8000_0001_EDX
] =
1995 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1997 .features
[FEAT_8000_0001_ECX
] =
1998 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
1999 .features
[FEAT_7_0_EDX
] =
2000 CPUID_7_0_EDX_SPEC_CTRL
,
2001 .features
[FEAT_7_0_EBX
] =
2002 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2003 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2004 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2006 .features
[FEAT_XSAVE
] =
2007 CPUID_XSAVE_XSAVEOPT
,
2008 .features
[FEAT_6_EAX
] =
2010 .xlevel
= 0x80000008,
2011 .model_id
= "Intel Core Processor (Haswell, IBRS)",
2014 .name
= "Broadwell-noTSX",
2016 .vendor
= CPUID_VENDOR_INTEL
,
2020 .features
[FEAT_1_EDX
] =
2021 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2022 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2023 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2024 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2025 CPUID_DE
| CPUID_FP87
,
2026 .features
[FEAT_1_ECX
] =
2027 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2028 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2029 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2030 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2031 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2032 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2033 .features
[FEAT_8000_0001_EDX
] =
2034 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2036 .features
[FEAT_8000_0001_ECX
] =
2037 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2038 .features
[FEAT_7_0_EBX
] =
2039 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2040 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2041 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2042 CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2044 .features
[FEAT_XSAVE
] =
2045 CPUID_XSAVE_XSAVEOPT
,
2046 .features
[FEAT_6_EAX
] =
2048 .xlevel
= 0x80000008,
2049 .model_id
= "Intel Core Processor (Broadwell, no TSX)",
2052 .name
= "Broadwell-noTSX-IBRS",
2054 .vendor
= CPUID_VENDOR_INTEL
,
2058 .features
[FEAT_1_EDX
] =
2059 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2060 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2061 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2062 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2063 CPUID_DE
| CPUID_FP87
,
2064 .features
[FEAT_1_ECX
] =
2065 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2066 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2067 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2068 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2069 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2070 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2071 .features
[FEAT_8000_0001_EDX
] =
2072 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2074 .features
[FEAT_8000_0001_ECX
] =
2075 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2076 .features
[FEAT_7_0_EDX
] =
2077 CPUID_7_0_EDX_SPEC_CTRL
,
2078 .features
[FEAT_7_0_EBX
] =
2079 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2080 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2081 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2082 CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2084 .features
[FEAT_XSAVE
] =
2085 CPUID_XSAVE_XSAVEOPT
,
2086 .features
[FEAT_6_EAX
] =
2088 .xlevel
= 0x80000008,
2089 .model_id
= "Intel Core Processor (Broadwell, no TSX, IBRS)",
2092 .name
= "Broadwell",
2094 .vendor
= CPUID_VENDOR_INTEL
,
2098 .features
[FEAT_1_EDX
] =
2099 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2100 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2101 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2102 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2103 CPUID_DE
| CPUID_FP87
,
2104 .features
[FEAT_1_ECX
] =
2105 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2106 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2107 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2108 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2109 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2110 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2111 .features
[FEAT_8000_0001_EDX
] =
2112 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2114 .features
[FEAT_8000_0001_ECX
] =
2115 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2116 .features
[FEAT_7_0_EBX
] =
2117 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2118 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2119 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2120 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2122 .features
[FEAT_XSAVE
] =
2123 CPUID_XSAVE_XSAVEOPT
,
2124 .features
[FEAT_6_EAX
] =
2126 .xlevel
= 0x80000008,
2127 .model_id
= "Intel Core Processor (Broadwell)",
2130 .name
= "Broadwell-IBRS",
2132 .vendor
= CPUID_VENDOR_INTEL
,
2136 .features
[FEAT_1_EDX
] =
2137 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2138 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2139 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2140 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2141 CPUID_DE
| CPUID_FP87
,
2142 .features
[FEAT_1_ECX
] =
2143 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2144 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2145 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2146 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2147 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2148 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2149 .features
[FEAT_8000_0001_EDX
] =
2150 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2152 .features
[FEAT_8000_0001_ECX
] =
2153 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2154 .features
[FEAT_7_0_EDX
] =
2155 CPUID_7_0_EDX_SPEC_CTRL
,
2156 .features
[FEAT_7_0_EBX
] =
2157 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2158 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2159 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2160 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2162 .features
[FEAT_XSAVE
] =
2163 CPUID_XSAVE_XSAVEOPT
,
2164 .features
[FEAT_6_EAX
] =
2166 .xlevel
= 0x80000008,
2167 .model_id
= "Intel Core Processor (Broadwell, IBRS)",
2170 .name
= "Skylake-Client",
2172 .vendor
= CPUID_VENDOR_INTEL
,
2176 .features
[FEAT_1_EDX
] =
2177 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2178 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2179 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2180 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2181 CPUID_DE
| CPUID_FP87
,
2182 .features
[FEAT_1_ECX
] =
2183 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2184 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2185 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2186 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2187 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2188 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2189 .features
[FEAT_8000_0001_EDX
] =
2190 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2192 .features
[FEAT_8000_0001_ECX
] =
2193 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2194 .features
[FEAT_7_0_EBX
] =
2195 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2196 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2197 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2198 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2199 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
,
2200 /* Missing: XSAVES (not supported by some Linux versions,
2201 * including v4.1 to v4.12).
2202 * KVM doesn't yet expose any XSAVES state save component,
2203 * and the only one defined in Skylake (processor tracing)
2204 * probably will block migration anyway.
2206 .features
[FEAT_XSAVE
] =
2207 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2208 CPUID_XSAVE_XGETBV1
,
2209 .features
[FEAT_6_EAX
] =
2211 .xlevel
= 0x80000008,
2212 .model_id
= "Intel Core Processor (Skylake)",
2215 .name
= "Skylake-Client-IBRS",
2217 .vendor
= CPUID_VENDOR_INTEL
,
2221 .features
[FEAT_1_EDX
] =
2222 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2223 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2224 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2225 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2226 CPUID_DE
| CPUID_FP87
,
2227 .features
[FEAT_1_ECX
] =
2228 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2229 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2230 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2231 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2232 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2233 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2234 .features
[FEAT_8000_0001_EDX
] =
2235 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2237 .features
[FEAT_8000_0001_ECX
] =
2238 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2239 .features
[FEAT_7_0_EDX
] =
2240 CPUID_7_0_EDX_SPEC_CTRL
,
2241 .features
[FEAT_7_0_EBX
] =
2242 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2243 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2244 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2245 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2246 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
,
2247 /* Missing: XSAVES (not supported by some Linux versions,
2248 * including v4.1 to v4.12).
2249 * KVM doesn't yet expose any XSAVES state save component,
2250 * and the only one defined in Skylake (processor tracing)
2251 * probably will block migration anyway.
2253 .features
[FEAT_XSAVE
] =
2254 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2255 CPUID_XSAVE_XGETBV1
,
2256 .features
[FEAT_6_EAX
] =
2258 .xlevel
= 0x80000008,
2259 .model_id
= "Intel Core Processor (Skylake, IBRS)",
2262 .name
= "Skylake-Server",
2264 .vendor
= CPUID_VENDOR_INTEL
,
2268 .features
[FEAT_1_EDX
] =
2269 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2270 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2271 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2272 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2273 CPUID_DE
| CPUID_FP87
,
2274 .features
[FEAT_1_ECX
] =
2275 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2276 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2277 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2278 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2279 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2280 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2281 .features
[FEAT_8000_0001_EDX
] =
2282 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
2283 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2284 .features
[FEAT_8000_0001_ECX
] =
2285 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2286 .features
[FEAT_7_0_EBX
] =
2287 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2288 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2289 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2290 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2291 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
| CPUID_7_0_EBX_CLWB
|
2292 CPUID_7_0_EBX_AVX512F
| CPUID_7_0_EBX_AVX512DQ
|
2293 CPUID_7_0_EBX_AVX512BW
| CPUID_7_0_EBX_AVX512CD
|
2294 CPUID_7_0_EBX_AVX512VL
| CPUID_7_0_EBX_CLFLUSHOPT
,
2295 /* Missing: XSAVES (not supported by some Linux versions,
2296 * including v4.1 to v4.12).
2297 * KVM doesn't yet expose any XSAVES state save component,
2298 * and the only one defined in Skylake (processor tracing)
2299 * probably will block migration anyway.
2301 .features
[FEAT_XSAVE
] =
2302 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2303 CPUID_XSAVE_XGETBV1
,
2304 .features
[FEAT_6_EAX
] =
2306 .xlevel
= 0x80000008,
2307 .model_id
= "Intel Xeon Processor (Skylake)",
2310 .name
= "Skylake-Server-IBRS",
2312 .vendor
= CPUID_VENDOR_INTEL
,
2316 .features
[FEAT_1_EDX
] =
2317 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2318 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2319 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2320 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2321 CPUID_DE
| CPUID_FP87
,
2322 .features
[FEAT_1_ECX
] =
2323 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2324 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2325 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2326 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2327 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2328 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2329 .features
[FEAT_8000_0001_EDX
] =
2330 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
2331 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2332 .features
[FEAT_8000_0001_ECX
] =
2333 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2334 .features
[FEAT_7_0_EDX
] =
2335 CPUID_7_0_EDX_SPEC_CTRL
,
2336 .features
[FEAT_7_0_EBX
] =
2337 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2338 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2339 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2340 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2341 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_MPX
| CPUID_7_0_EBX_CLWB
|
2342 CPUID_7_0_EBX_AVX512F
| CPUID_7_0_EBX_AVX512DQ
|
2343 CPUID_7_0_EBX_AVX512BW
| CPUID_7_0_EBX_AVX512CD
|
2344 CPUID_7_0_EBX_AVX512VL
,
2345 /* Missing: XSAVES (not supported by some Linux versions,
2346 * including v4.1 to v4.12).
2347 * KVM doesn't yet expose any XSAVES state save component,
2348 * and the only one defined in Skylake (processor tracing)
2349 * probably will block migration anyway.
2351 .features
[FEAT_XSAVE
] =
2352 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2353 CPUID_XSAVE_XGETBV1
,
2354 .features
[FEAT_6_EAX
] =
2356 .xlevel
= 0x80000008,
2357 .model_id
= "Intel Xeon Processor (Skylake, IBRS)",
2360 .name
= "KnightsMill",
2362 .vendor
= CPUID_VENDOR_INTEL
,
2366 .features
[FEAT_1_EDX
] =
2367 CPUID_VME
| CPUID_SS
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
|
2368 CPUID_MMX
| CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
|
2369 CPUID_MCA
| CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
|
2370 CPUID_CX8
| CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
|
2371 CPUID_PSE
| CPUID_DE
| CPUID_FP87
,
2372 .features
[FEAT_1_ECX
] =
2373 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2374 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2375 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2376 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2377 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2378 CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2379 .features
[FEAT_8000_0001_EDX
] =
2380 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
2381 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2382 .features
[FEAT_8000_0001_ECX
] =
2383 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2384 .features
[FEAT_7_0_EBX
] =
2385 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
| CPUID_7_0_EBX_AVX2
|
2386 CPUID_7_0_EBX_SMEP
| CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
|
2387 CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
| CPUID_7_0_EBX_AVX512F
|
2388 CPUID_7_0_EBX_AVX512CD
| CPUID_7_0_EBX_AVX512PF
|
2389 CPUID_7_0_EBX_AVX512ER
,
2390 .features
[FEAT_7_0_ECX
] =
2391 CPUID_7_0_ECX_AVX512_VPOPCNTDQ
,
2392 .features
[FEAT_7_0_EDX
] =
2393 CPUID_7_0_EDX_AVX512_4VNNIW
| CPUID_7_0_EDX_AVX512_4FMAPS
,
2394 .features
[FEAT_XSAVE
] =
2395 CPUID_XSAVE_XSAVEOPT
,
2396 .features
[FEAT_6_EAX
] =
2398 .xlevel
= 0x80000008,
2399 .model_id
= "Intel Xeon Phi Processor (Knights Mill)",
2402 .name
= "Opteron_G1",
2404 .vendor
= CPUID_VENDOR_AMD
,
2408 .features
[FEAT_1_EDX
] =
2409 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2410 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2411 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2412 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2413 CPUID_DE
| CPUID_FP87
,
2414 .features
[FEAT_1_ECX
] =
2416 .features
[FEAT_8000_0001_EDX
] =
2417 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2418 .xlevel
= 0x80000008,
2419 .model_id
= "AMD Opteron 240 (Gen 1 Class Opteron)",
2422 .name
= "Opteron_G2",
2424 .vendor
= CPUID_VENDOR_AMD
,
2428 .features
[FEAT_1_EDX
] =
2429 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2430 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2431 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2432 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2433 CPUID_DE
| CPUID_FP87
,
2434 .features
[FEAT_1_ECX
] =
2435 CPUID_EXT_CX16
| CPUID_EXT_SSE3
,
2436 /* Missing: CPUID_EXT2_RDTSCP */
2437 .features
[FEAT_8000_0001_EDX
] =
2438 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2439 .features
[FEAT_8000_0001_ECX
] =
2440 CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
2441 .xlevel
= 0x80000008,
2442 .model_id
= "AMD Opteron 22xx (Gen 2 Class Opteron)",
2445 .name
= "Opteron_G3",
2447 .vendor
= CPUID_VENDOR_AMD
,
2451 .features
[FEAT_1_EDX
] =
2452 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2453 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2454 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2455 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2456 CPUID_DE
| CPUID_FP87
,
2457 .features
[FEAT_1_ECX
] =
2458 CPUID_EXT_POPCNT
| CPUID_EXT_CX16
| CPUID_EXT_MONITOR
|
2460 /* Missing: CPUID_EXT2_RDTSCP */
2461 .features
[FEAT_8000_0001_EDX
] =
2462 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2463 .features
[FEAT_8000_0001_ECX
] =
2464 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
|
2465 CPUID_EXT3_ABM
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
2466 .xlevel
= 0x80000008,
2467 .model_id
= "AMD Opteron 23xx (Gen 3 Class Opteron)",
2470 .name
= "Opteron_G4",
2472 .vendor
= CPUID_VENDOR_AMD
,
2476 .features
[FEAT_1_EDX
] =
2477 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2478 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2479 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2480 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2481 CPUID_DE
| CPUID_FP87
,
2482 .features
[FEAT_1_ECX
] =
2483 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2484 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
2485 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
2487 /* Missing: CPUID_EXT2_RDTSCP */
2488 .features
[FEAT_8000_0001_EDX
] =
2489 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_NX
|
2491 .features
[FEAT_8000_0001_ECX
] =
2492 CPUID_EXT3_FMA4
| CPUID_EXT3_XOP
|
2493 CPUID_EXT3_3DNOWPREFETCH
| CPUID_EXT3_MISALIGNSSE
|
2494 CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
| CPUID_EXT3_SVM
|
2497 .xlevel
= 0x8000001A,
2498 .model_id
= "AMD Opteron 62xx class CPU",
2501 .name
= "Opteron_G5",
2503 .vendor
= CPUID_VENDOR_AMD
,
2507 .features
[FEAT_1_EDX
] =
2508 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2509 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2510 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2511 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2512 CPUID_DE
| CPUID_FP87
,
2513 .features
[FEAT_1_ECX
] =
2514 CPUID_EXT_F16C
| CPUID_EXT_AVX
| CPUID_EXT_XSAVE
|
2515 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
2516 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_FMA
|
2517 CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
2518 /* Missing: CPUID_EXT2_RDTSCP */
2519 .features
[FEAT_8000_0001_EDX
] =
2520 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_NX
|
2522 .features
[FEAT_8000_0001_ECX
] =
2523 CPUID_EXT3_TBM
| CPUID_EXT3_FMA4
| CPUID_EXT3_XOP
|
2524 CPUID_EXT3_3DNOWPREFETCH
| CPUID_EXT3_MISALIGNSSE
|
2525 CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
| CPUID_EXT3_SVM
|
2528 .xlevel
= 0x8000001A,
2529 .model_id
= "AMD Opteron 63xx class CPU",
2534 .vendor
= CPUID_VENDOR_AMD
,
2538 .features
[FEAT_1_EDX
] =
2539 CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
| CPUID_CLFLUSH
|
2540 CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
| CPUID_PGE
|
2541 CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
| CPUID_MCE
|
2542 CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
| CPUID_DE
|
2543 CPUID_VME
| CPUID_FP87
,
2544 .features
[FEAT_1_ECX
] =
2545 CPUID_EXT_RDRAND
| CPUID_EXT_F16C
| CPUID_EXT_AVX
|
2546 CPUID_EXT_XSAVE
| CPUID_EXT_AES
| CPUID_EXT_POPCNT
|
2547 CPUID_EXT_MOVBE
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
2548 CPUID_EXT_CX16
| CPUID_EXT_FMA
| CPUID_EXT_SSSE3
|
2549 CPUID_EXT_MONITOR
| CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
2550 .features
[FEAT_8000_0001_EDX
] =
2551 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_PDPE1GB
|
2552 CPUID_EXT2_FFXSR
| CPUID_EXT2_MMXEXT
| CPUID_EXT2_NX
|
2554 .features
[FEAT_8000_0001_ECX
] =
2555 CPUID_EXT3_OSVW
| CPUID_EXT3_3DNOWPREFETCH
|
2556 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
|
2557 CPUID_EXT3_CR8LEG
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
2558 .features
[FEAT_7_0_EBX
] =
2559 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
| CPUID_7_0_EBX_AVX2
|
2560 CPUID_7_0_EBX_SMEP
| CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_RDSEED
|
2561 CPUID_7_0_EBX_ADX
| CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_CLFLUSHOPT
|
2562 CPUID_7_0_EBX_SHA_NI
,
2563 /* Missing: XSAVES (not supported by some Linux versions,
2564 * including v4.1 to v4.12).
2565 * KVM doesn't yet expose any XSAVES state save component.
2567 .features
[FEAT_XSAVE
] =
2568 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2569 CPUID_XSAVE_XGETBV1
,
2570 .features
[FEAT_6_EAX
] =
2572 .xlevel
= 0x8000000A,
2573 .model_id
= "AMD EPYC Processor",
2574 .cache_info
= &epyc_cache_info
,
2577 .name
= "EPYC-IBPB",
2579 .vendor
= CPUID_VENDOR_AMD
,
2583 .features
[FEAT_1_EDX
] =
2584 CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
| CPUID_CLFLUSH
|
2585 CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
| CPUID_PGE
|
2586 CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
| CPUID_MCE
|
2587 CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
| CPUID_DE
|
2588 CPUID_VME
| CPUID_FP87
,
2589 .features
[FEAT_1_ECX
] =
2590 CPUID_EXT_RDRAND
| CPUID_EXT_F16C
| CPUID_EXT_AVX
|
2591 CPUID_EXT_XSAVE
| CPUID_EXT_AES
| CPUID_EXT_POPCNT
|
2592 CPUID_EXT_MOVBE
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
2593 CPUID_EXT_CX16
| CPUID_EXT_FMA
| CPUID_EXT_SSSE3
|
2594 CPUID_EXT_MONITOR
| CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
2595 .features
[FEAT_8000_0001_EDX
] =
2596 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_PDPE1GB
|
2597 CPUID_EXT2_FFXSR
| CPUID_EXT2_MMXEXT
| CPUID_EXT2_NX
|
2599 .features
[FEAT_8000_0001_ECX
] =
2600 CPUID_EXT3_OSVW
| CPUID_EXT3_3DNOWPREFETCH
|
2601 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
|
2602 CPUID_EXT3_CR8LEG
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
2603 .features
[FEAT_8000_0008_EBX
] =
2604 CPUID_8000_0008_EBX_IBPB
,
2605 .features
[FEAT_7_0_EBX
] =
2606 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
| CPUID_7_0_EBX_AVX2
|
2607 CPUID_7_0_EBX_SMEP
| CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_RDSEED
|
2608 CPUID_7_0_EBX_ADX
| CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_CLFLUSHOPT
|
2609 CPUID_7_0_EBX_SHA_NI
,
2610 /* Missing: XSAVES (not supported by some Linux versions,
2611 * including v4.1 to v4.12).
2612 * KVM doesn't yet expose any XSAVES state save component.
2614 .features
[FEAT_XSAVE
] =
2615 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2616 CPUID_XSAVE_XGETBV1
,
2617 .features
[FEAT_6_EAX
] =
2619 .xlevel
= 0x8000000A,
2620 .model_id
= "AMD EPYC Processor (with IBPB)",
2621 .cache_info
= &epyc_cache_info
,
2625 typedef struct PropValue
{
2626 const char *prop
, *value
;
2629 /* KVM-specific features that are automatically added/removed
2630 * from all CPU models when KVM is enabled.
2632 static PropValue kvm_default_props
[] = {
2633 { "kvmclock", "on" },
2634 { "kvm-nopiodelay", "on" },
2635 { "kvm-asyncpf", "on" },
2636 { "kvm-steal-time", "on" },
2637 { "kvm-pv-eoi", "on" },
2638 { "kvmclock-stable-bit", "on" },
2641 { "monitor", "off" },
2646 /* TCG-specific defaults that override all CPU models when using TCG
2648 static PropValue tcg_default_props
[] = {
2654 void x86_cpu_change_kvm_default(const char *prop
, const char *value
)
2657 for (pv
= kvm_default_props
; pv
->prop
; pv
++) {
2658 if (!strcmp(pv
->prop
, prop
)) {
2664 /* It is valid to call this function only for properties that
2665 * are already present in the kvm_default_props table.
2670 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w
,
2671 bool migratable_only
);
2673 static bool lmce_supported(void)
2675 uint64_t mce_cap
= 0;
2678 if (kvm_ioctl(kvm_state
, KVM_X86_GET_MCE_CAP_SUPPORTED
, &mce_cap
) < 0) {
2683 return !!(mce_cap
& MCG_LMCE_P
);
2686 #define CPUID_MODEL_ID_SZ 48
2689 * cpu_x86_fill_model_id:
2690 * Get CPUID model ID string from host CPU.
2692 * @str should have at least CPUID_MODEL_ID_SZ bytes
2694 * The function does NOT add a null terminator to the string
2697 static int cpu_x86_fill_model_id(char *str
)
2699 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
2702 for (i
= 0; i
< 3; i
++) {
2703 host_cpuid(0x80000002 + i
, 0, &eax
, &ebx
, &ecx
, &edx
);
2704 memcpy(str
+ i
* 16 + 0, &eax
, 4);
2705 memcpy(str
+ i
* 16 + 4, &ebx
, 4);
2706 memcpy(str
+ i
* 16 + 8, &ecx
, 4);
2707 memcpy(str
+ i
* 16 + 12, &edx
, 4);
2712 static Property max_x86_cpu_properties
[] = {
2713 DEFINE_PROP_BOOL("migratable", X86CPU
, migratable
, true),
2714 DEFINE_PROP_BOOL("host-cache-info", X86CPU
, cache_info_passthrough
, false),
2715 DEFINE_PROP_END_OF_LIST()
2718 static void max_x86_cpu_class_init(ObjectClass
*oc
, void *data
)
2720 DeviceClass
*dc
= DEVICE_CLASS(oc
);
2721 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
2725 xcc
->model_description
=
2726 "Enables all features supported by the accelerator in the current host";
2728 dc
->props
= max_x86_cpu_properties
;
2731 static void x86_cpu_load_def(X86CPU
*cpu
, X86CPUDefinition
*def
, Error
**errp
);
2733 static void max_x86_cpu_initfn(Object
*obj
)
2735 X86CPU
*cpu
= X86_CPU(obj
);
2736 CPUX86State
*env
= &cpu
->env
;
2737 KVMState
*s
= kvm_state
;
2739 /* We can't fill the features array here because we don't know yet if
2740 * "migratable" is true or false.
2742 cpu
->max_features
= true;
2744 if (accel_uses_host_cpuid()) {
2745 char vendor
[CPUID_VENDOR_SZ
+ 1] = { 0 };
2746 char model_id
[CPUID_MODEL_ID_SZ
+ 1] = { 0 };
2747 int family
, model
, stepping
;
2748 X86CPUDefinition host_cpudef
= { };
2749 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
2751 host_cpuid(0x0, 0, &eax
, &ebx
, &ecx
, &edx
);
2752 x86_cpu_vendor_words2str(host_cpudef
.vendor
, ebx
, edx
, ecx
);
2754 host_vendor_fms(vendor
, &family
, &model
, &stepping
);
2756 cpu_x86_fill_model_id(model_id
);
2758 object_property_set_str(OBJECT(cpu
), vendor
, "vendor", &error_abort
);
2759 object_property_set_int(OBJECT(cpu
), family
, "family", &error_abort
);
2760 object_property_set_int(OBJECT(cpu
), model
, "model", &error_abort
);
2761 object_property_set_int(OBJECT(cpu
), stepping
, "stepping",
2763 object_property_set_str(OBJECT(cpu
), model_id
, "model-id",
2766 if (kvm_enabled()) {
2767 env
->cpuid_min_level
=
2768 kvm_arch_get_supported_cpuid(s
, 0x0, 0, R_EAX
);
2769 env
->cpuid_min_xlevel
=
2770 kvm_arch_get_supported_cpuid(s
, 0x80000000, 0, R_EAX
);
2771 env
->cpuid_min_xlevel2
=
2772 kvm_arch_get_supported_cpuid(s
, 0xC0000000, 0, R_EAX
);
2774 env
->cpuid_min_level
=
2775 hvf_get_supported_cpuid(0x0, 0, R_EAX
);
2776 env
->cpuid_min_xlevel
=
2777 hvf_get_supported_cpuid(0x80000000, 0, R_EAX
);
2778 env
->cpuid_min_xlevel2
=
2779 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX
);
2782 if (lmce_supported()) {
2783 object_property_set_bool(OBJECT(cpu
), true, "lmce", &error_abort
);
2786 object_property_set_str(OBJECT(cpu
), CPUID_VENDOR_AMD
,
2787 "vendor", &error_abort
);
2788 object_property_set_int(OBJECT(cpu
), 6, "family", &error_abort
);
2789 object_property_set_int(OBJECT(cpu
), 6, "model", &error_abort
);
2790 object_property_set_int(OBJECT(cpu
), 3, "stepping", &error_abort
);
2791 object_property_set_str(OBJECT(cpu
),
2792 "QEMU TCG CPU version " QEMU_HW_VERSION
,
2793 "model-id", &error_abort
);
2796 object_property_set_bool(OBJECT(cpu
), true, "pmu", &error_abort
);
2799 static const TypeInfo max_x86_cpu_type_info
= {
2800 .name
= X86_CPU_TYPE_NAME("max"),
2801 .parent
= TYPE_X86_CPU
,
2802 .instance_init
= max_x86_cpu_initfn
,
2803 .class_init
= max_x86_cpu_class_init
,
2806 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2807 static void host_x86_cpu_class_init(ObjectClass
*oc
, void *data
)
2809 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
2811 xcc
->host_cpuid_required
= true;
2814 if (kvm_enabled()) {
2815 xcc
->model_description
=
2816 "KVM processor with all supported host features ";
2817 } else if (hvf_enabled()) {
2818 xcc
->model_description
=
2819 "HVF processor with all supported host features ";
2823 static const TypeInfo host_x86_cpu_type_info
= {
2824 .name
= X86_CPU_TYPE_NAME("host"),
2825 .parent
= X86_CPU_TYPE_NAME("max"),
2826 .class_init
= host_x86_cpu_class_init
,
2831 static void report_unavailable_features(FeatureWord w
, uint32_t mask
)
2833 FeatureWordInfo
*f
= &feature_word_info
[w
];
2836 for (i
= 0; i
< 32; ++i
) {
2837 if ((1UL << i
) & mask
) {
2838 const char *reg
= get_register_name_32(f
->cpuid_reg
);
2840 warn_report("%s doesn't support requested feature: "
2841 "CPUID.%02XH:%s%s%s [bit %d]",
2842 accel_uses_host_cpuid() ? "host" : "TCG",
2844 f
->feat_names
[i
] ? "." : "",
2845 f
->feat_names
[i
] ? f
->feat_names
[i
] : "", i
);
2850 static void x86_cpuid_version_get_family(Object
*obj
, Visitor
*v
,
2851 const char *name
, void *opaque
,
2854 X86CPU
*cpu
= X86_CPU(obj
);
2855 CPUX86State
*env
= &cpu
->env
;
2858 value
= (env
->cpuid_version
>> 8) & 0xf;
2860 value
+= (env
->cpuid_version
>> 20) & 0xff;
2862 visit_type_int(v
, name
, &value
, errp
);
2865 static void x86_cpuid_version_set_family(Object
*obj
, Visitor
*v
,
2866 const char *name
, void *opaque
,
2869 X86CPU
*cpu
= X86_CPU(obj
);
2870 CPUX86State
*env
= &cpu
->env
;
2871 const int64_t min
= 0;
2872 const int64_t max
= 0xff + 0xf;
2873 Error
*local_err
= NULL
;
2876 visit_type_int(v
, name
, &value
, &local_err
);
2878 error_propagate(errp
, local_err
);
2881 if (value
< min
|| value
> max
) {
2882 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
2883 name
? name
: "null", value
, min
, max
);
2887 env
->cpuid_version
&= ~0xff00f00;
2889 env
->cpuid_version
|= 0xf00 | ((value
- 0x0f) << 20);
2891 env
->cpuid_version
|= value
<< 8;
2895 static void x86_cpuid_version_get_model(Object
*obj
, Visitor
*v
,
2896 const char *name
, void *opaque
,
2899 X86CPU
*cpu
= X86_CPU(obj
);
2900 CPUX86State
*env
= &cpu
->env
;
2903 value
= (env
->cpuid_version
>> 4) & 0xf;
2904 value
|= ((env
->cpuid_version
>> 16) & 0xf) << 4;
2905 visit_type_int(v
, name
, &value
, errp
);
2908 static void x86_cpuid_version_set_model(Object
*obj
, Visitor
*v
,
2909 const char *name
, void *opaque
,
2912 X86CPU
*cpu
= X86_CPU(obj
);
2913 CPUX86State
*env
= &cpu
->env
;
2914 const int64_t min
= 0;
2915 const int64_t max
= 0xff;
2916 Error
*local_err
= NULL
;
2919 visit_type_int(v
, name
, &value
, &local_err
);
2921 error_propagate(errp
, local_err
);
2924 if (value
< min
|| value
> max
) {
2925 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
2926 name
? name
: "null", value
, min
, max
);
2930 env
->cpuid_version
&= ~0xf00f0;
2931 env
->cpuid_version
|= ((value
& 0xf) << 4) | ((value
>> 4) << 16);
2934 static void x86_cpuid_version_get_stepping(Object
*obj
, Visitor
*v
,
2935 const char *name
, void *opaque
,
2938 X86CPU
*cpu
= X86_CPU(obj
);
2939 CPUX86State
*env
= &cpu
->env
;
2942 value
= env
->cpuid_version
& 0xf;
2943 visit_type_int(v
, name
, &value
, errp
);
2946 static void x86_cpuid_version_set_stepping(Object
*obj
, Visitor
*v
,
2947 const char *name
, void *opaque
,
2950 X86CPU
*cpu
= X86_CPU(obj
);
2951 CPUX86State
*env
= &cpu
->env
;
2952 const int64_t min
= 0;
2953 const int64_t max
= 0xf;
2954 Error
*local_err
= NULL
;
2957 visit_type_int(v
, name
, &value
, &local_err
);
2959 error_propagate(errp
, local_err
);
2962 if (value
< min
|| value
> max
) {
2963 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
2964 name
? name
: "null", value
, min
, max
);
2968 env
->cpuid_version
&= ~0xf;
2969 env
->cpuid_version
|= value
& 0xf;
2972 static char *x86_cpuid_get_vendor(Object
*obj
, Error
**errp
)
2974 X86CPU
*cpu
= X86_CPU(obj
);
2975 CPUX86State
*env
= &cpu
->env
;
2978 value
= g_malloc(CPUID_VENDOR_SZ
+ 1);
2979 x86_cpu_vendor_words2str(value
, env
->cpuid_vendor1
, env
->cpuid_vendor2
,
2980 env
->cpuid_vendor3
);
2984 static void x86_cpuid_set_vendor(Object
*obj
, const char *value
,
2987 X86CPU
*cpu
= X86_CPU(obj
);
2988 CPUX86State
*env
= &cpu
->env
;
2991 if (strlen(value
) != CPUID_VENDOR_SZ
) {
2992 error_setg(errp
, QERR_PROPERTY_VALUE_BAD
, "", "vendor", value
);
2996 env
->cpuid_vendor1
= 0;
2997 env
->cpuid_vendor2
= 0;
2998 env
->cpuid_vendor3
= 0;
2999 for (i
= 0; i
< 4; i
++) {
3000 env
->cpuid_vendor1
|= ((uint8_t)value
[i
]) << (8 * i
);
3001 env
->cpuid_vendor2
|= ((uint8_t)value
[i
+ 4]) << (8 * i
);
3002 env
->cpuid_vendor3
|= ((uint8_t)value
[i
+ 8]) << (8 * i
);
3006 static char *x86_cpuid_get_model_id(Object
*obj
, Error
**errp
)
3008 X86CPU
*cpu
= X86_CPU(obj
);
3009 CPUX86State
*env
= &cpu
->env
;
3013 value
= g_malloc(48 + 1);
3014 for (i
= 0; i
< 48; i
++) {
3015 value
[i
] = env
->cpuid_model
[i
>> 2] >> (8 * (i
& 3));
3021 static void x86_cpuid_set_model_id(Object
*obj
, const char *model_id
,
3024 X86CPU
*cpu
= X86_CPU(obj
);
3025 CPUX86State
*env
= &cpu
->env
;
3028 if (model_id
== NULL
) {
3031 len
= strlen(model_id
);
3032 memset(env
->cpuid_model
, 0, 48);
3033 for (i
= 0; i
< 48; i
++) {
3037 c
= (uint8_t)model_id
[i
];
3039 env
->cpuid_model
[i
>> 2] |= c
<< (8 * (i
& 3));
3043 static void x86_cpuid_get_tsc_freq(Object
*obj
, Visitor
*v
, const char *name
,
3044 void *opaque
, Error
**errp
)
3046 X86CPU
*cpu
= X86_CPU(obj
);
3049 value
= cpu
->env
.tsc_khz
* 1000;
3050 visit_type_int(v
, name
, &value
, errp
);
3053 static void x86_cpuid_set_tsc_freq(Object
*obj
, Visitor
*v
, const char *name
,
3054 void *opaque
, Error
**errp
)
3056 X86CPU
*cpu
= X86_CPU(obj
);
3057 const int64_t min
= 0;
3058 const int64_t max
= INT64_MAX
;
3059 Error
*local_err
= NULL
;
3062 visit_type_int(v
, name
, &value
, &local_err
);
3064 error_propagate(errp
, local_err
);
3067 if (value
< min
|| value
> max
) {
3068 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
3069 name
? name
: "null", value
, min
, max
);
3073 cpu
->env
.tsc_khz
= cpu
->env
.user_tsc_khz
= value
/ 1000;
3076 /* Generic getter for "feature-words" and "filtered-features" properties */
3077 static void x86_cpu_get_feature_words(Object
*obj
, Visitor
*v
,
3078 const char *name
, void *opaque
,
3081 uint32_t *array
= (uint32_t *)opaque
;
3083 X86CPUFeatureWordInfo word_infos
[FEATURE_WORDS
] = { };
3084 X86CPUFeatureWordInfoList list_entries
[FEATURE_WORDS
] = { };
3085 X86CPUFeatureWordInfoList
*list
= NULL
;
3087 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3088 FeatureWordInfo
*wi
= &feature_word_info
[w
];
3089 X86CPUFeatureWordInfo
*qwi
= &word_infos
[w
];
3090 qwi
->cpuid_input_eax
= wi
->cpuid_eax
;
3091 qwi
->has_cpuid_input_ecx
= wi
->cpuid_needs_ecx
;
3092 qwi
->cpuid_input_ecx
= wi
->cpuid_ecx
;
3093 qwi
->cpuid_register
= x86_reg_info_32
[wi
->cpuid_reg
].qapi_enum
;
3094 qwi
->features
= array
[w
];
3096 /* List will be in reverse order, but order shouldn't matter */
3097 list_entries
[w
].next
= list
;
3098 list_entries
[w
].value
= &word_infos
[w
];
3099 list
= &list_entries
[w
];
3102 visit_type_X86CPUFeatureWordInfoList(v
, "feature-words", &list
, errp
);
3105 static void x86_get_hv_spinlocks(Object
*obj
, Visitor
*v
, const char *name
,
3106 void *opaque
, Error
**errp
)
3108 X86CPU
*cpu
= X86_CPU(obj
);
3109 int64_t value
= cpu
->hyperv_spinlock_attempts
;
3111 visit_type_int(v
, name
, &value
, errp
);
3114 static void x86_set_hv_spinlocks(Object
*obj
, Visitor
*v
, const char *name
,
3115 void *opaque
, Error
**errp
)
3117 const int64_t min
= 0xFFF;
3118 const int64_t max
= UINT_MAX
;
3119 X86CPU
*cpu
= X86_CPU(obj
);
3123 visit_type_int(v
, name
, &value
, &err
);
3125 error_propagate(errp
, err
);
3129 if (value
< min
|| value
> max
) {
3130 error_setg(errp
, "Property %s.%s doesn't take value %" PRId64
3131 " (minimum: %" PRId64
", maximum: %" PRId64
")",
3132 object_get_typename(obj
), name
? name
: "null",
3136 cpu
->hyperv_spinlock_attempts
= value
;
3139 static const PropertyInfo qdev_prop_spinlocks
= {
3141 .get
= x86_get_hv_spinlocks
,
3142 .set
= x86_set_hv_spinlocks
,
3145 /* Convert all '_' in a feature string option name to '-', to make feature
3146 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3148 static inline void feat2prop(char *s
)
3150 while ((s
= strchr(s
, '_'))) {
3155 /* Return the feature property name for a feature flag bit */
3156 static const char *x86_cpu_feature_name(FeatureWord w
, int bitnr
)
3158 /* XSAVE components are automatically enabled by other features,
3159 * so return the original feature name instead
3161 if (w
== FEAT_XSAVE_COMP_LO
|| w
== FEAT_XSAVE_COMP_HI
) {
3162 int comp
= (w
== FEAT_XSAVE_COMP_HI
) ? bitnr
+ 32 : bitnr
;
3164 if (comp
< ARRAY_SIZE(x86_ext_save_areas
) &&
3165 x86_ext_save_areas
[comp
].bits
) {
3166 w
= x86_ext_save_areas
[comp
].feature
;
3167 bitnr
= ctz32(x86_ext_save_areas
[comp
].bits
);
3172 assert(w
< FEATURE_WORDS
);
3173 return feature_word_info
[w
].feat_names
[bitnr
];
3176 /* Compatibily hack to maintain legacy +-feat semantic,
3177 * where +-feat overwrites any feature set by
3178 * feat=on|feat even if the later is parsed after +-feat
3179 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3181 static GList
*plus_features
, *minus_features
;
3183 static gint
compare_string(gconstpointer a
, gconstpointer b
)
3185 return g_strcmp0(a
, b
);
3188 /* Parse "+feature,-feature,feature=foo" CPU feature string
3190 static void x86_cpu_parse_featurestr(const char *typename
, char *features
,
3193 char *featurestr
; /* Single 'key=value" string being parsed */
3194 static bool cpu_globals_initialized
;
3195 bool ambiguous
= false;
3197 if (cpu_globals_initialized
) {
3200 cpu_globals_initialized
= true;
3206 for (featurestr
= strtok(features
, ",");
3208 featurestr
= strtok(NULL
, ",")) {
3210 const char *val
= NULL
;
3213 GlobalProperty
*prop
;
3215 /* Compatibility syntax: */
3216 if (featurestr
[0] == '+') {
3217 plus_features
= g_list_append(plus_features
,
3218 g_strdup(featurestr
+ 1));
3220 } else if (featurestr
[0] == '-') {
3221 minus_features
= g_list_append(minus_features
,
3222 g_strdup(featurestr
+ 1));
3226 eq
= strchr(featurestr
, '=');
3234 feat2prop(featurestr
);
3237 if (g_list_find_custom(plus_features
, name
, compare_string
)) {
3238 warn_report("Ambiguous CPU model string. "
3239 "Don't mix both \"+%s\" and \"%s=%s\"",
3243 if (g_list_find_custom(minus_features
, name
, compare_string
)) {
3244 warn_report("Ambiguous CPU model string. "
3245 "Don't mix both \"-%s\" and \"%s=%s\"",
3251 if (!strcmp(name
, "tsc-freq")) {
3255 ret
= qemu_strtosz_metric(val
, NULL
, &tsc_freq
);
3256 if (ret
< 0 || tsc_freq
> INT64_MAX
) {
3257 error_setg(errp
, "bad numerical value %s", val
);
3260 snprintf(num
, sizeof(num
), "%" PRId64
, tsc_freq
);
3262 name
= "tsc-frequency";
3265 prop
= g_new0(typeof(*prop
), 1);
3266 prop
->driver
= typename
;
3267 prop
->property
= g_strdup(name
);
3268 prop
->value
= g_strdup(val
);
3269 prop
->errp
= &error_fatal
;
3270 qdev_prop_register_global(prop
);
3274 warn_report("Compatibility of ambiguous CPU model "
3275 "strings won't be kept on future QEMU versions");
3279 static void x86_cpu_expand_features(X86CPU
*cpu
, Error
**errp
);
3280 static int x86_cpu_filter_features(X86CPU
*cpu
);
3282 /* Check for missing features that may prevent the CPU class from
3283 * running using the current machine and accelerator.
3285 static void x86_cpu_class_check_missing_features(X86CPUClass
*xcc
,
3286 strList
**missing_feats
)
3291 strList
**next
= missing_feats
;
3293 if (xcc
->host_cpuid_required
&& !accel_uses_host_cpuid()) {
3294 strList
*new = g_new0(strList
, 1);
3295 new->value
= g_strdup("kvm");
3296 *missing_feats
= new;
3300 xc
= X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc
))));
3302 x86_cpu_expand_features(xc
, &err
);
3304 /* Errors at x86_cpu_expand_features should never happen,
3305 * but in case it does, just report the model as not
3306 * runnable at all using the "type" property.
3308 strList
*new = g_new0(strList
, 1);
3309 new->value
= g_strdup("type");
3314 x86_cpu_filter_features(xc
);
3316 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3317 uint32_t filtered
= xc
->filtered_features
[w
];
3319 for (i
= 0; i
< 32; i
++) {
3320 if (filtered
& (1UL << i
)) {
3321 strList
*new = g_new0(strList
, 1);
3322 new->value
= g_strdup(x86_cpu_feature_name(w
, i
));
3329 object_unref(OBJECT(xc
));
3332 /* Print all cpuid feature names in featureset
3334 static void listflags(FILE *f
, fprintf_function print
, const char **featureset
)
3339 for (bit
= 0; bit
< 32; bit
++) {
3340 if (featureset
[bit
]) {
3341 print(f
, "%s%s", first
? "" : " ", featureset
[bit
]);
3347 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3348 static gint
x86_cpu_list_compare(gconstpointer a
, gconstpointer b
)
3350 ObjectClass
*class_a
= (ObjectClass
*)a
;
3351 ObjectClass
*class_b
= (ObjectClass
*)b
;
3352 X86CPUClass
*cc_a
= X86_CPU_CLASS(class_a
);
3353 X86CPUClass
*cc_b
= X86_CPU_CLASS(class_b
);
3354 const char *name_a
, *name_b
;
3356 if (cc_a
->ordering
!= cc_b
->ordering
) {
3357 return cc_a
->ordering
- cc_b
->ordering
;
3359 name_a
= object_class_get_name(class_a
);
3360 name_b
= object_class_get_name(class_b
);
3361 return strcmp(name_a
, name_b
);
3365 static GSList
*get_sorted_cpu_model_list(void)
3367 GSList
*list
= object_class_get_list(TYPE_X86_CPU
, false);
3368 list
= g_slist_sort(list
, x86_cpu_list_compare
);
3372 static void x86_cpu_list_entry(gpointer data
, gpointer user_data
)
3374 ObjectClass
*oc
= data
;
3375 X86CPUClass
*cc
= X86_CPU_CLASS(oc
);
3376 CPUListState
*s
= user_data
;
3377 char *name
= x86_cpu_class_get_model_name(cc
);
3378 const char *desc
= cc
->model_description
;
3379 if (!desc
&& cc
->cpu_def
) {
3380 desc
= cc
->cpu_def
->model_id
;
3383 (*s
->cpu_fprintf
)(s
->file
, "x86 %16s %-48s\n",
3388 /* list available CPU models and flags */
3389 void x86_cpu_list(FILE *f
, fprintf_function cpu_fprintf
)
3394 .cpu_fprintf
= cpu_fprintf
,
3398 (*cpu_fprintf
)(f
, "Available CPUs:\n");
3399 list
= get_sorted_cpu_model_list();
3400 g_slist_foreach(list
, x86_cpu_list_entry
, &s
);
3403 (*cpu_fprintf
)(f
, "\nRecognized CPUID flags:\n");
3404 for (i
= 0; i
< ARRAY_SIZE(feature_word_info
); i
++) {
3405 FeatureWordInfo
*fw
= &feature_word_info
[i
];
3407 (*cpu_fprintf
)(f
, " ");
3408 listflags(f
, cpu_fprintf
, fw
->feat_names
);
3409 (*cpu_fprintf
)(f
, "\n");
3413 static void x86_cpu_definition_entry(gpointer data
, gpointer user_data
)
3415 ObjectClass
*oc
= data
;
3416 X86CPUClass
*cc
= X86_CPU_CLASS(oc
);
3417 CpuDefinitionInfoList
**cpu_list
= user_data
;
3418 CpuDefinitionInfoList
*entry
;
3419 CpuDefinitionInfo
*info
;
3421 info
= g_malloc0(sizeof(*info
));
3422 info
->name
= x86_cpu_class_get_model_name(cc
);
3423 x86_cpu_class_check_missing_features(cc
, &info
->unavailable_features
);
3424 info
->has_unavailable_features
= true;
3425 info
->q_typename
= g_strdup(object_class_get_name(oc
));
3426 info
->migration_safe
= cc
->migration_safe
;
3427 info
->has_migration_safe
= true;
3428 info
->q_static
= cc
->static_model
;
3430 entry
= g_malloc0(sizeof(*entry
));
3431 entry
->value
= info
;
3432 entry
->next
= *cpu_list
;
3436 CpuDefinitionInfoList
*arch_query_cpu_definitions(Error
**errp
)
3438 CpuDefinitionInfoList
*cpu_list
= NULL
;
3439 GSList
*list
= get_sorted_cpu_model_list();
3440 g_slist_foreach(list
, x86_cpu_definition_entry
, &cpu_list
);
3445 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w
,
3446 bool migratable_only
)
3448 FeatureWordInfo
*wi
= &feature_word_info
[w
];
3451 if (kvm_enabled()) {
3452 r
= kvm_arch_get_supported_cpuid(kvm_state
, wi
->cpuid_eax
,
3455 } else if (hvf_enabled()) {
3456 r
= hvf_get_supported_cpuid(wi
->cpuid_eax
,
3459 } else if (tcg_enabled()) {
3460 r
= wi
->tcg_features
;
3464 if (migratable_only
) {
3465 r
&= x86_cpu_get_migratable_flags(w
);
3470 static void x86_cpu_report_filtered_features(X86CPU
*cpu
)
3474 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3475 report_unavailable_features(w
, cpu
->filtered_features
[w
]);
3479 static void x86_cpu_apply_props(X86CPU
*cpu
, PropValue
*props
)
3482 for (pv
= props
; pv
->prop
; pv
++) {
3486 object_property_parse(OBJECT(cpu
), pv
->value
, pv
->prop
,
3491 /* Load data from X86CPUDefinition into a X86CPU object
3493 static void x86_cpu_load_def(X86CPU
*cpu
, X86CPUDefinition
*def
, Error
**errp
)
3495 CPUX86State
*env
= &cpu
->env
;
3497 char host_vendor
[CPUID_VENDOR_SZ
+ 1];
3500 /*NOTE: any property set by this function should be returned by
3501 * x86_cpu_static_props(), so static expansion of
3502 * query-cpu-model-expansion is always complete.
3505 /* CPU models only set _minimum_ values for level/xlevel: */
3506 object_property_set_uint(OBJECT(cpu
), def
->level
, "min-level", errp
);
3507 object_property_set_uint(OBJECT(cpu
), def
->xlevel
, "min-xlevel", errp
);
3509 object_property_set_int(OBJECT(cpu
), def
->family
, "family", errp
);
3510 object_property_set_int(OBJECT(cpu
), def
->model
, "model", errp
);
3511 object_property_set_int(OBJECT(cpu
), def
->stepping
, "stepping", errp
);
3512 object_property_set_str(OBJECT(cpu
), def
->model_id
, "model-id", errp
);
3513 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3514 env
->features
[w
] = def
->features
[w
];
3517 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3518 cpu
->legacy_cache
= !def
->cache_info
;
3520 /* Special cases not set in the X86CPUDefinition structs: */
3521 /* TODO: in-kernel irqchip for hvf */
3522 if (kvm_enabled()) {
3523 if (!kvm_irqchip_in_kernel()) {
3524 x86_cpu_change_kvm_default("x2apic", "off");
3527 x86_cpu_apply_props(cpu
, kvm_default_props
);
3528 } else if (tcg_enabled()) {
3529 x86_cpu_apply_props(cpu
, tcg_default_props
);
3532 env
->features
[FEAT_1_ECX
] |= CPUID_EXT_HYPERVISOR
;
3534 /* sysenter isn't supported in compatibility mode on AMD,
3535 * syscall isn't supported in compatibility mode on Intel.
3536 * Normally we advertise the actual CPU vendor, but you can
3537 * override this using the 'vendor' property if you want to use
3538 * KVM's sysenter/syscall emulation in compatibility mode and
3539 * when doing cross vendor migration
3541 vendor
= def
->vendor
;
3542 if (accel_uses_host_cpuid()) {
3543 uint32_t ebx
= 0, ecx
= 0, edx
= 0;
3544 host_cpuid(0, 0, NULL
, &ebx
, &ecx
, &edx
);
3545 x86_cpu_vendor_words2str(host_vendor
, ebx
, edx
, ecx
);
3546 vendor
= host_vendor
;
3549 object_property_set_str(OBJECT(cpu
), vendor
, "vendor", errp
);
3553 /* Return a QDict containing keys for all properties that can be included
3554 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3555 * must be included in the dictionary.
3557 static QDict
*x86_cpu_static_props(void)
3561 static const char *props
[] = {
3579 for (i
= 0; props
[i
]; i
++) {
3580 qdict_put_null(d
, props
[i
]);
3583 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3584 FeatureWordInfo
*fi
= &feature_word_info
[w
];
3586 for (bit
= 0; bit
< 32; bit
++) {
3587 if (!fi
->feat_names
[bit
]) {
3590 qdict_put_null(d
, fi
->feat_names
[bit
]);
3597 /* Add an entry to @props dict, with the value for property. */
3598 static void x86_cpu_expand_prop(X86CPU
*cpu
, QDict
*props
, const char *prop
)
3600 QObject
*value
= object_property_get_qobject(OBJECT(cpu
), prop
,
3603 qdict_put_obj(props
, prop
, value
);
3606 /* Convert CPU model data from X86CPU object to a property dictionary
3607 * that can recreate exactly the same CPU model.
3609 static void x86_cpu_to_dict(X86CPU
*cpu
, QDict
*props
)
3611 QDict
*sprops
= x86_cpu_static_props();
3612 const QDictEntry
*e
;
3614 for (e
= qdict_first(sprops
); e
; e
= qdict_next(sprops
, e
)) {
3615 const char *prop
= qdict_entry_key(e
);
3616 x86_cpu_expand_prop(cpu
, props
, prop
);
3620 /* Convert CPU model data from X86CPU object to a property dictionary
3621 * that can recreate exactly the same CPU model, including every
3622 * writeable QOM property.
3624 static void x86_cpu_to_dict_full(X86CPU
*cpu
, QDict
*props
)
3626 ObjectPropertyIterator iter
;
3627 ObjectProperty
*prop
;
3629 object_property_iter_init(&iter
, OBJECT(cpu
));
3630 while ((prop
= object_property_iter_next(&iter
))) {
3631 /* skip read-only or write-only properties */
3632 if (!prop
->get
|| !prop
->set
) {
3636 /* "hotplugged" is the only property that is configurable
3637 * on the command-line but will be set differently on CPUs
3638 * created using "-cpu ... -smp ..." and by CPUs created
3639 * on the fly by x86_cpu_from_model() for querying. Skip it.
3641 if (!strcmp(prop
->name
, "hotplugged")) {
3644 x86_cpu_expand_prop(cpu
, props
, prop
->name
);
3648 static void object_apply_props(Object
*obj
, QDict
*props
, Error
**errp
)
3650 const QDictEntry
*prop
;
3653 for (prop
= qdict_first(props
); prop
; prop
= qdict_next(props
, prop
)) {
3654 object_property_set_qobject(obj
, qdict_entry_value(prop
),
3655 qdict_entry_key(prop
), &err
);
3661 error_propagate(errp
, err
);
3664 /* Create X86CPU object according to model+props specification */
3665 static X86CPU
*x86_cpu_from_model(const char *model
, QDict
*props
, Error
**errp
)
3671 xcc
= X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU
, model
));
3673 error_setg(&err
, "CPU model '%s' not found", model
);
3677 xc
= X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc
))));
3679 object_apply_props(OBJECT(xc
), props
, &err
);
3685 x86_cpu_expand_features(xc
, &err
);
3692 error_propagate(errp
, err
);
3693 object_unref(OBJECT(xc
));
3699 CpuModelExpansionInfo
*
3700 arch_query_cpu_model_expansion(CpuModelExpansionType type
,
3701 CpuModelInfo
*model
,
3706 CpuModelExpansionInfo
*ret
= g_new0(CpuModelExpansionInfo
, 1);
3707 QDict
*props
= NULL
;
3708 const char *base_name
;
3710 xc
= x86_cpu_from_model(model
->name
,
3712 qobject_to(QDict
, model
->props
) :
3718 props
= qdict_new();
3721 case CPU_MODEL_EXPANSION_TYPE_STATIC
:
3722 /* Static expansion will be based on "base" only */
3724 x86_cpu_to_dict(xc
, props
);
3726 case CPU_MODEL_EXPANSION_TYPE_FULL
:
3727 /* As we don't return every single property, full expansion needs
3728 * to keep the original model name+props, and add extra
3729 * properties on top of that.
3731 base_name
= model
->name
;
3732 x86_cpu_to_dict_full(xc
, props
);
3735 error_setg(&err
, "Unsupportted expansion type");
3740 props
= qdict_new();
3742 x86_cpu_to_dict(xc
, props
);
3744 ret
->model
= g_new0(CpuModelInfo
, 1);
3745 ret
->model
->name
= g_strdup(base_name
);
3746 ret
->model
->props
= QOBJECT(props
);
3747 ret
->model
->has_props
= true;
3750 object_unref(OBJECT(xc
));
3752 error_propagate(errp
, err
);
3753 qapi_free_CpuModelExpansionInfo(ret
);
3759 static gchar
*x86_gdb_arch_name(CPUState
*cs
)
3761 #ifdef TARGET_X86_64
3762 return g_strdup("i386:x86-64");
3764 return g_strdup("i386");
3768 static void x86_cpu_cpudef_class_init(ObjectClass
*oc
, void *data
)
3770 X86CPUDefinition
*cpudef
= data
;
3771 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
3773 xcc
->cpu_def
= cpudef
;
3774 xcc
->migration_safe
= true;
3777 static void x86_register_cpudef_type(X86CPUDefinition
*def
)
3779 char *typename
= x86_cpu_type_name(def
->name
);
3782 .parent
= TYPE_X86_CPU
,
3783 .class_init
= x86_cpu_cpudef_class_init
,
3787 /* AMD aliases are handled at runtime based on CPUID vendor, so
3788 * they shouldn't be set on the CPU model table.
3790 assert(!(def
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_AMD_ALIASES
));
3791 /* catch mistakes instead of silently truncating model_id when too long */
3792 assert(def
->model_id
&& strlen(def
->model_id
) <= 48);
3799 #if !defined(CONFIG_USER_ONLY)
3801 void cpu_clear_apic_feature(CPUX86State
*env
)
3803 env
->features
[FEAT_1_EDX
] &= ~CPUID_APIC
;
3806 #endif /* !CONFIG_USER_ONLY */
3808 void cpu_x86_cpuid(CPUX86State
*env
, uint32_t index
, uint32_t count
,
3809 uint32_t *eax
, uint32_t *ebx
,
3810 uint32_t *ecx
, uint32_t *edx
)
3812 X86CPU
*cpu
= x86_env_get_cpu(env
);
3813 CPUState
*cs
= CPU(cpu
);
3814 uint32_t pkg_offset
;
3816 uint32_t signature
[3];
3818 /* Calculate & apply limits for different index ranges */
3819 if (index
>= 0xC0000000) {
3820 limit
= env
->cpuid_xlevel2
;
3821 } else if (index
>= 0x80000000) {
3822 limit
= env
->cpuid_xlevel
;
3823 } else if (index
>= 0x40000000) {
3826 limit
= env
->cpuid_level
;
3829 if (index
> limit
) {
3830 /* Intel documentation states that invalid EAX input will
3831 * return the same information as EAX=cpuid_level
3832 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3834 index
= env
->cpuid_level
;
3839 *eax
= env
->cpuid_level
;
3840 *ebx
= env
->cpuid_vendor1
;
3841 *edx
= env
->cpuid_vendor2
;
3842 *ecx
= env
->cpuid_vendor3
;
3845 *eax
= env
->cpuid_version
;
3846 *ebx
= (cpu
->apic_id
<< 24) |
3847 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3848 *ecx
= env
->features
[FEAT_1_ECX
];
3849 if ((*ecx
& CPUID_EXT_XSAVE
) && (env
->cr
[4] & CR4_OSXSAVE_MASK
)) {
3850 *ecx
|= CPUID_EXT_OSXSAVE
;
3852 *edx
= env
->features
[FEAT_1_EDX
];
3853 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
3854 *ebx
|= (cs
->nr_cores
* cs
->nr_threads
) << 16;
3859 /* cache info: needed for Pentium Pro compatibility */
3860 if (cpu
->cache_info_passthrough
) {
3861 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
3864 *eax
= 1; /* Number of CPUID[EAX=2] calls required */
3866 if (!cpu
->enable_l3_cache
) {
3869 *ecx
= cpuid2_cache_descriptor(env
->cache_info_cpuid2
.l3_cache
);
3871 *edx
= (cpuid2_cache_descriptor(env
->cache_info_cpuid2
.l1d_cache
) << 16) |
3872 (cpuid2_cache_descriptor(env
->cache_info_cpuid2
.l1i_cache
) << 8) |
3873 (cpuid2_cache_descriptor(env
->cache_info_cpuid2
.l2_cache
));
3876 /* cache info: needed for Core compatibility */
3877 if (cpu
->cache_info_passthrough
) {
3878 host_cpuid(index
, count
, eax
, ebx
, ecx
, edx
);
3879 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3880 *eax
&= ~0xFC000000;
3881 if ((*eax
& 31) && cs
->nr_cores
> 1) {
3882 *eax
|= (cs
->nr_cores
- 1) << 26;
3887 case 0: /* L1 dcache info */
3888 encode_cache_cpuid4(env
->cache_info_cpuid4
.l1d_cache
,
3890 eax
, ebx
, ecx
, edx
);
3892 case 1: /* L1 icache info */
3893 encode_cache_cpuid4(env
->cache_info_cpuid4
.l1i_cache
,
3895 eax
, ebx
, ecx
, edx
);
3897 case 2: /* L2 cache info */
3898 encode_cache_cpuid4(env
->cache_info_cpuid4
.l2_cache
,
3899 cs
->nr_threads
, cs
->nr_cores
,
3900 eax
, ebx
, ecx
, edx
);
3902 case 3: /* L3 cache info */
3903 pkg_offset
= apicid_pkg_offset(cs
->nr_cores
, cs
->nr_threads
);
3904 if (cpu
->enable_l3_cache
) {
3905 encode_cache_cpuid4(env
->cache_info_cpuid4
.l3_cache
,
3906 (1 << pkg_offset
), cs
->nr_cores
,
3907 eax
, ebx
, ecx
, edx
);
3911 default: /* end of info */
3912 *eax
= *ebx
= *ecx
= *edx
= 0;
3918 /* mwait info: needed for Core compatibility */
3919 *eax
= 0; /* Smallest monitor-line size in bytes */
3920 *ebx
= 0; /* Largest monitor-line size in bytes */
3921 *ecx
= CPUID_MWAIT_EMX
| CPUID_MWAIT_IBE
;
3925 /* Thermal and Power Leaf */
3926 *eax
= env
->features
[FEAT_6_EAX
];
3932 /* Structured Extended Feature Flags Enumeration Leaf */
3934 *eax
= 0; /* Maximum ECX value for sub-leaves */
3935 *ebx
= env
->features
[FEAT_7_0_EBX
]; /* Feature flags */
3936 *ecx
= env
->features
[FEAT_7_0_ECX
]; /* Feature flags */
3937 if ((*ecx
& CPUID_7_0_ECX_PKU
) && env
->cr
[4] & CR4_PKE_MASK
) {
3938 *ecx
|= CPUID_7_0_ECX_OSPKE
;
3940 *edx
= env
->features
[FEAT_7_0_EDX
]; /* Feature flags */
3949 /* Direct Cache Access Information Leaf */
3950 *eax
= 0; /* Bits 0-31 in DCA_CAP MSR */
3956 /* Architectural Performance Monitoring Leaf */
3957 if (kvm_enabled() && cpu
->enable_pmu
) {
3958 KVMState
*s
= cs
->kvm_state
;
3960 *eax
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EAX
);
3961 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EBX
);
3962 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_ECX
);
3963 *edx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EDX
);
3964 } else if (hvf_enabled() && cpu
->enable_pmu
) {
3965 *eax
= hvf_get_supported_cpuid(0xA, count
, R_EAX
);
3966 *ebx
= hvf_get_supported_cpuid(0xA, count
, R_EBX
);
3967 *ecx
= hvf_get_supported_cpuid(0xA, count
, R_ECX
);
3968 *edx
= hvf_get_supported_cpuid(0xA, count
, R_EDX
);
3977 /* Extended Topology Enumeration Leaf */
3978 if (!cpu
->enable_cpuid_0xb
) {
3979 *eax
= *ebx
= *ecx
= *edx
= 0;
3983 *ecx
= count
& 0xff;
3984 *edx
= cpu
->apic_id
;
3988 *eax
= apicid_core_offset(cs
->nr_cores
, cs
->nr_threads
);
3989 *ebx
= cs
->nr_threads
;
3990 *ecx
|= CPUID_TOPOLOGY_LEVEL_SMT
;
3993 *eax
= apicid_pkg_offset(cs
->nr_cores
, cs
->nr_threads
);
3994 *ebx
= cs
->nr_cores
* cs
->nr_threads
;
3995 *ecx
|= CPUID_TOPOLOGY_LEVEL_CORE
;
4000 *ecx
|= CPUID_TOPOLOGY_LEVEL_INVALID
;
4003 assert(!(*eax
& ~0x1f));
4004 *ebx
&= 0xffff; /* The count doesn't need to be reliable. */
4007 /* Processor Extended State */
4012 if (!(env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
)) {
4017 *ecx
= xsave_area_size(x86_cpu_xsave_components(cpu
));
4018 *eax
= env
->features
[FEAT_XSAVE_COMP_LO
];
4019 *edx
= env
->features
[FEAT_XSAVE_COMP_HI
];
4021 } else if (count
== 1) {
4022 *eax
= env
->features
[FEAT_XSAVE
];
4023 } else if (count
< ARRAY_SIZE(x86_ext_save_areas
)) {
4024 if ((x86_cpu_xsave_components(cpu
) >> count
) & 1) {
4025 const ExtSaveArea
*esa
= &x86_ext_save_areas
[count
];
4033 /* Intel Processor Trace Enumeration */
4038 if (!(env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) ||
4044 *eax
= INTEL_PT_MAX_SUBLEAF
;
4045 *ebx
= INTEL_PT_MINIMAL_EBX
;
4046 *ecx
= INTEL_PT_MINIMAL_ECX
;
4047 } else if (count
== 1) {
4048 *eax
= INTEL_PT_MTC_BITMAP
| INTEL_PT_ADDR_RANGES_NUM
;
4049 *ebx
= INTEL_PT_PSB_BITMAP
| INTEL_PT_CYCLE_BITMAP
;
4055 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4056 * set here, but we restrict to TCG none the less.
4058 if (tcg_enabled() && cpu
->expose_tcg
) {
4059 memcpy(signature
, "TCGTCGTCGTCG", 12);
4061 *ebx
= signature
[0];
4062 *ecx
= signature
[1];
4063 *edx
= signature
[2];
4078 *eax
= env
->cpuid_xlevel
;
4079 *ebx
= env
->cpuid_vendor1
;
4080 *edx
= env
->cpuid_vendor2
;
4081 *ecx
= env
->cpuid_vendor3
;
4084 *eax
= env
->cpuid_version
;
4086 *ecx
= env
->features
[FEAT_8000_0001_ECX
];
4087 *edx
= env
->features
[FEAT_8000_0001_EDX
];
4089 /* The Linux kernel checks for the CMPLegacy bit and
4090 * discards multiple thread information if it is set.
4091 * So don't set it here for Intel to make Linux guests happy.
4093 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
4094 if (env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
||
4095 env
->cpuid_vendor2
!= CPUID_VENDOR_INTEL_2
||
4096 env
->cpuid_vendor3
!= CPUID_VENDOR_INTEL_3
) {
4097 *ecx
|= 1 << 1; /* CmpLegacy bit */
4104 *eax
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
4105 *ebx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
4106 *ecx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
4107 *edx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
4110 /* cache info (L1 cache) */
4111 if (cpu
->cache_info_passthrough
) {
4112 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
4115 *eax
= (L1_DTLB_2M_ASSOC
<< 24) | (L1_DTLB_2M_ENTRIES
<< 16) | \
4116 (L1_ITLB_2M_ASSOC
<< 8) | (L1_ITLB_2M_ENTRIES
);
4117 *ebx
= (L1_DTLB_4K_ASSOC
<< 24) | (L1_DTLB_4K_ENTRIES
<< 16) | \
4118 (L1_ITLB_4K_ASSOC
<< 8) | (L1_ITLB_4K_ENTRIES
);
4119 *ecx
= encode_cache_cpuid80000005(env
->cache_info_amd
.l1d_cache
);
4120 *edx
= encode_cache_cpuid80000005(env
->cache_info_amd
.l1i_cache
);
4123 /* cache info (L2 cache) */
4124 if (cpu
->cache_info_passthrough
) {
4125 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
4128 *eax
= (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC
) << 28) | \
4129 (L2_DTLB_2M_ENTRIES
<< 16) | \
4130 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC
) << 12) | \
4131 (L2_ITLB_2M_ENTRIES
);
4132 *ebx
= (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC
) << 28) | \
4133 (L2_DTLB_4K_ENTRIES
<< 16) | \
4134 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC
) << 12) | \
4135 (L2_ITLB_4K_ENTRIES
);
4136 encode_cache_cpuid80000006(env
->cache_info_amd
.l2_cache
,
4137 cpu
->enable_l3_cache
?
4138 env
->cache_info_amd
.l3_cache
: NULL
,
4145 *edx
= env
->features
[FEAT_8000_0007_EDX
];
4148 /* virtual & phys address size in low 2 bytes. */
4149 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
) {
4150 /* 64 bit processor */
4151 *eax
= cpu
->phys_bits
; /* configurable physical bits */
4152 if (env
->features
[FEAT_7_0_ECX
] & CPUID_7_0_ECX_LA57
) {
4153 *eax
|= 0x00003900; /* 57 bits virtual */
4155 *eax
|= 0x00003000; /* 48 bits virtual */
4158 *eax
= cpu
->phys_bits
;
4160 *ebx
= env
->features
[FEAT_8000_0008_EBX
];
4163 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
4164 *ecx
|= (cs
->nr_cores
* cs
->nr_threads
) - 1;
4168 if (env
->features
[FEAT_8000_0001_ECX
] & CPUID_EXT3_SVM
) {
4169 *eax
= 0x00000001; /* SVM Revision */
4170 *ebx
= 0x00000010; /* nr of ASIDs */
4172 *edx
= env
->features
[FEAT_SVM
]; /* optional features */
4183 case 0: /* L1 dcache info */
4184 encode_cache_cpuid8000001d(env
->cache_info_amd
.l1d_cache
, cs
,
4185 eax
, ebx
, ecx
, edx
);
4187 case 1: /* L1 icache info */
4188 encode_cache_cpuid8000001d(env
->cache_info_amd
.l1i_cache
, cs
,
4189 eax
, ebx
, ecx
, edx
);
4191 case 2: /* L2 cache info */
4192 encode_cache_cpuid8000001d(env
->cache_info_amd
.l2_cache
, cs
,
4193 eax
, ebx
, ecx
, edx
);
4195 case 3: /* L3 cache info */
4196 encode_cache_cpuid8000001d(env
->cache_info_amd
.l3_cache
, cs
,
4197 eax
, ebx
, ecx
, edx
);
4199 default: /* end of info */
4200 *eax
= *ebx
= *ecx
= *edx
= 0;
4205 assert(cpu
->core_id
<= 255);
4206 encode_topo_cpuid8000001e(cs
, cpu
,
4207 eax
, ebx
, ecx
, edx
);
4210 *eax
= env
->cpuid_xlevel2
;
4216 /* Support for VIA CPU's CPUID instruction */
4217 *eax
= env
->cpuid_version
;
4220 *edx
= env
->features
[FEAT_C000_0001_EDX
];
4225 /* Reserved for the future, and now filled with zero */
4232 *eax
= sev_enabled() ? 0x2 : 0;
4233 *ebx
= sev_get_cbit_position();
4234 *ebx
|= sev_get_reduced_phys_bits() << 6;
4239 /* reserved values: zero */
4248 /* CPUClass::reset() */
4249 static void x86_cpu_reset(CPUState
*s
)
4251 X86CPU
*cpu
= X86_CPU(s
);
4252 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(cpu
);
4253 CPUX86State
*env
= &cpu
->env
;
4258 xcc
->parent_reset(s
);
4260 memset(env
, 0, offsetof(CPUX86State
, end_reset_fields
));
4262 env
->old_exception
= -1;
4264 /* init to reset state */
4266 env
->hflags2
|= HF2_GIF_MASK
;
4268 cpu_x86_update_cr0(env
, 0x60000010);
4269 env
->a20_mask
= ~0x0;
4270 env
->smbase
= 0x30000;
4271 env
->msr_smi_count
= 0;
4273 env
->idt
.limit
= 0xffff;
4274 env
->gdt
.limit
= 0xffff;
4275 env
->ldt
.limit
= 0xffff;
4276 env
->ldt
.flags
= DESC_P_MASK
| (2 << DESC_TYPE_SHIFT
);
4277 env
->tr
.limit
= 0xffff;
4278 env
->tr
.flags
= DESC_P_MASK
| (11 << DESC_TYPE_SHIFT
);
4280 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff,
4281 DESC_P_MASK
| DESC_S_MASK
| DESC_CS_MASK
|
4282 DESC_R_MASK
| DESC_A_MASK
);
4283 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff,
4284 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4286 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff,
4287 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4289 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff,
4290 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4292 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff,
4293 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4295 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff,
4296 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4300 env
->regs
[R_EDX
] = env
->cpuid_version
;
4305 for (i
= 0; i
< 8; i
++) {
4308 cpu_set_fpuc(env
, 0x37f);
4310 env
->mxcsr
= 0x1f80;
4311 /* All units are in INIT state. */
4314 env
->pat
= 0x0007040600070406ULL
;
4315 env
->msr_ia32_misc_enable
= MSR_IA32_MISC_ENABLE_DEFAULT
;
4317 memset(env
->dr
, 0, sizeof(env
->dr
));
4318 env
->dr
[6] = DR6_FIXED_1
;
4319 env
->dr
[7] = DR7_FIXED_1
;
4320 cpu_breakpoint_remove_all(s
, BP_CPU
);
4321 cpu_watchpoint_remove_all(s
, BP_CPU
);
4324 xcr0
= XSTATE_FP_MASK
;
4326 #ifdef CONFIG_USER_ONLY
4327 /* Enable all the features for user-mode. */
4328 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4329 xcr0
|= XSTATE_SSE_MASK
;
4331 for (i
= 2; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
4332 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
4333 if (env
->features
[esa
->feature
] & esa
->bits
) {
4338 if (env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
) {
4339 cr4
|= CR4_OSFXSR_MASK
| CR4_OSXSAVE_MASK
;
4341 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_FSGSBASE
) {
4342 cr4
|= CR4_FSGSBASE_MASK
;
4347 cpu_x86_update_cr4(env
, cr4
);
4350 * SDM 11.11.5 requires:
4351 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4352 * - IA32_MTRR_PHYSMASKn.V = 0
4353 * All other bits are undefined. For simplification, zero it all.
4355 env
->mtrr_deftype
= 0;
4356 memset(env
->mtrr_var
, 0, sizeof(env
->mtrr_var
));
4357 memset(env
->mtrr_fixed
, 0, sizeof(env
->mtrr_fixed
));
4359 env
->interrupt_injected
= -1;
4360 env
->exception_injected
= -1;
4361 env
->nmi_injected
= false;
4362 #if !defined(CONFIG_USER_ONLY)
4363 /* We hard-wire the BSP to the first CPU. */
4364 apic_designate_bsp(cpu
->apic_state
, s
->cpu_index
== 0);
4366 s
->halted
= !cpu_is_bsp(cpu
);
4368 if (kvm_enabled()) {
4369 kvm_arch_reset_vcpu(cpu
);
4371 else if (hvf_enabled()) {
4377 #ifndef CONFIG_USER_ONLY
4378 bool cpu_is_bsp(X86CPU
*cpu
)
4380 return cpu_get_apic_base(cpu
->apic_state
) & MSR_IA32_APICBASE_BSP
;
4383 /* TODO: remove me, when reset over QOM tree is implemented */
4384 static void x86_cpu_machine_reset_cb(void *opaque
)
4386 X86CPU
*cpu
= opaque
;
4387 cpu_reset(CPU(cpu
));
4391 static void mce_init(X86CPU
*cpu
)
4393 CPUX86State
*cenv
= &cpu
->env
;
4396 if (((cenv
->cpuid_version
>> 8) & 0xf) >= 6
4397 && (cenv
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
4398 (CPUID_MCE
| CPUID_MCA
)) {
4399 cenv
->mcg_cap
= MCE_CAP_DEF
| MCE_BANKS_DEF
|
4400 (cpu
->enable_lmce
? MCG_LMCE_P
: 0);
4401 cenv
->mcg_ctl
= ~(uint64_t)0;
4402 for (bank
= 0; bank
< MCE_BANKS_DEF
; bank
++) {
4403 cenv
->mce_banks
[bank
* 4] = ~(uint64_t)0;
4408 #ifndef CONFIG_USER_ONLY
4409 APICCommonClass
*apic_get_class(void)
4411 const char *apic_type
= "apic";
4413 /* TODO: in-kernel irqchip for hvf */
4414 if (kvm_apic_in_kernel()) {
4415 apic_type
= "kvm-apic";
4416 } else if (xen_enabled()) {
4417 apic_type
= "xen-apic";
4420 return APIC_COMMON_CLASS(object_class_by_name(apic_type
));
4423 static void x86_cpu_apic_create(X86CPU
*cpu
, Error
**errp
)
4425 APICCommonState
*apic
;
4426 ObjectClass
*apic_class
= OBJECT_CLASS(apic_get_class());
4428 cpu
->apic_state
= DEVICE(object_new(object_class_get_name(apic_class
)));
4430 object_property_add_child(OBJECT(cpu
), "lapic",
4431 OBJECT(cpu
->apic_state
), &error_abort
);
4432 object_unref(OBJECT(cpu
->apic_state
));
4434 qdev_prop_set_uint32(cpu
->apic_state
, "id", cpu
->apic_id
);
4435 /* TODO: convert to link<> */
4436 apic
= APIC_COMMON(cpu
->apic_state
);
4438 apic
->apicbase
= APIC_DEFAULT_ADDRESS
| MSR_IA32_APICBASE_ENABLE
;
4441 static void x86_cpu_apic_realize(X86CPU
*cpu
, Error
**errp
)
4443 APICCommonState
*apic
;
4444 static bool apic_mmio_map_once
;
4446 if (cpu
->apic_state
== NULL
) {
4449 object_property_set_bool(OBJECT(cpu
->apic_state
), true, "realized",
4452 /* Map APIC MMIO area */
4453 apic
= APIC_COMMON(cpu
->apic_state
);
4454 if (!apic_mmio_map_once
) {
4455 memory_region_add_subregion_overlap(get_system_memory(),
4457 MSR_IA32_APICBASE_BASE
,
4460 apic_mmio_map_once
= true;
4464 static void x86_cpu_machine_done(Notifier
*n
, void *unused
)
4466 X86CPU
*cpu
= container_of(n
, X86CPU
, machine_done
);
4467 MemoryRegion
*smram
=
4468 (MemoryRegion
*) object_resolve_path("/machine/smram", NULL
);
4471 cpu
->smram
= g_new(MemoryRegion
, 1);
4472 memory_region_init_alias(cpu
->smram
, OBJECT(cpu
), "smram",
4473 smram
, 0, 1ull << 32);
4474 memory_region_set_enabled(cpu
->smram
, true);
4475 memory_region_add_subregion_overlap(cpu
->cpu_as_root
, 0, cpu
->smram
, 1);
4479 static void x86_cpu_apic_realize(X86CPU
*cpu
, Error
**errp
)
4484 /* Note: Only safe for use on x86(-64) hosts */
4485 static uint32_t x86_host_phys_bits(void)
4488 uint32_t host_phys_bits
;
4490 host_cpuid(0x80000000, 0, &eax
, NULL
, NULL
, NULL
);
4491 if (eax
>= 0x80000008) {
4492 host_cpuid(0x80000008, 0, &eax
, NULL
, NULL
, NULL
);
4493 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4494 * at 23:16 that can specify a maximum physical address bits for
4495 * the guest that can override this value; but I've not seen
4496 * anything with that set.
4498 host_phys_bits
= eax
& 0xff;
4500 /* It's an odd 64 bit machine that doesn't have the leaf for
4501 * physical address bits; fall back to 36 that's most older
4504 host_phys_bits
= 36;
4507 return host_phys_bits
;
4510 static void x86_cpu_adjust_level(X86CPU
*cpu
, uint32_t *min
, uint32_t value
)
4517 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4518 static void x86_cpu_adjust_feat_level(X86CPU
*cpu
, FeatureWord w
)
4520 CPUX86State
*env
= &cpu
->env
;
4521 FeatureWordInfo
*fi
= &feature_word_info
[w
];
4522 uint32_t eax
= fi
->cpuid_eax
;
4523 uint32_t region
= eax
& 0xF0000000;
4525 if (!env
->features
[w
]) {
4531 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_level
, eax
);
4534 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel
, eax
);
4537 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel2
, eax
);
4542 /* Calculate XSAVE components based on the configured CPU feature flags */
4543 static void x86_cpu_enable_xsave_components(X86CPU
*cpu
)
4545 CPUX86State
*env
= &cpu
->env
;
4549 if (!(env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
)) {
4554 for (i
= 0; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
4555 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
4556 if (env
->features
[esa
->feature
] & esa
->bits
) {
4557 mask
|= (1ULL << i
);
4561 env
->features
[FEAT_XSAVE_COMP_LO
] = mask
;
4562 env
->features
[FEAT_XSAVE_COMP_HI
] = mask
>> 32;
4565 /***** Steps involved on loading and filtering CPUID data
4567 * When initializing and realizing a CPU object, the steps
4568 * involved in setting up CPUID data are:
4570 * 1) Loading CPU model definition (X86CPUDefinition). This is
4571 * implemented by x86_cpu_load_def() and should be completely
4572 * transparent, as it is done automatically by instance_init.
4573 * No code should need to look at X86CPUDefinition structs
4574 * outside instance_init.
4576 * 2) CPU expansion. This is done by realize before CPUID
4577 * filtering, and will make sure host/accelerator data is
4578 * loaded for CPU models that depend on host capabilities
4579 * (e.g. "host"). Done by x86_cpu_expand_features().
4581 * 3) CPUID filtering. This initializes extra data related to
4582 * CPUID, and checks if the host supports all capabilities
4583 * required by the CPU. Runnability of a CPU model is
4584 * determined at this step. Done by x86_cpu_filter_features().
4586 * Some operations don't require all steps to be performed.
4589 * - CPU instance creation (instance_init) will run only CPU
4590 * model loading. CPU expansion can't run at instance_init-time
4591 * because host/accelerator data may be not available yet.
4592 * - CPU realization will perform both CPU model expansion and CPUID
4593 * filtering, and return an error in case one of them fails.
4594 * - query-cpu-definitions needs to run all 3 steps. It needs
4595 * to run CPUID filtering, as the 'unavailable-features'
4596 * field is set based on the filtering results.
4597 * - The query-cpu-model-expansion QMP command only needs to run
4598 * CPU model loading and CPU expansion. It should not filter
4599 * any CPUID data based on host capabilities.
4602 /* Expand CPU configuration data, based on configured features
4603 * and host/accelerator capabilities when appropriate.
4605 static void x86_cpu_expand_features(X86CPU
*cpu
, Error
**errp
)
4607 CPUX86State
*env
= &cpu
->env
;
4610 Error
*local_err
= NULL
;
4612 /*TODO: Now cpu->max_features doesn't overwrite features
4613 * set using QOM properties, and we can convert
4614 * plus_features & minus_features to global properties
4615 * inside x86_cpu_parse_featurestr() too.
4617 if (cpu
->max_features
) {
4618 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
4619 /* Override only features that weren't set explicitly
4623 x86_cpu_get_supported_feature_word(w
, cpu
->migratable
) &
4624 ~env
->user_features
[w
] & \
4625 ~feature_word_info
[w
].no_autoenable_flags
;
4629 for (l
= plus_features
; l
; l
= l
->next
) {
4630 const char *prop
= l
->data
;
4631 object_property_set_bool(OBJECT(cpu
), true, prop
, &local_err
);
4637 for (l
= minus_features
; l
; l
= l
->next
) {
4638 const char *prop
= l
->data
;
4639 object_property_set_bool(OBJECT(cpu
), false, prop
, &local_err
);
4645 if (!kvm_enabled() || !cpu
->expose_kvm
) {
4646 env
->features
[FEAT_KVM
] = 0;
4649 x86_cpu_enable_xsave_components(cpu
);
4651 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4652 x86_cpu_adjust_feat_level(cpu
, FEAT_7_0_EBX
);
4653 if (cpu
->full_cpuid_auto_level
) {
4654 x86_cpu_adjust_feat_level(cpu
, FEAT_1_EDX
);
4655 x86_cpu_adjust_feat_level(cpu
, FEAT_1_ECX
);
4656 x86_cpu_adjust_feat_level(cpu
, FEAT_6_EAX
);
4657 x86_cpu_adjust_feat_level(cpu
, FEAT_7_0_ECX
);
4658 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0001_EDX
);
4659 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0001_ECX
);
4660 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0007_EDX
);
4661 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0008_EBX
);
4662 x86_cpu_adjust_feat_level(cpu
, FEAT_C000_0001_EDX
);
4663 x86_cpu_adjust_feat_level(cpu
, FEAT_SVM
);
4664 x86_cpu_adjust_feat_level(cpu
, FEAT_XSAVE
);
4665 /* SVM requires CPUID[0x8000000A] */
4666 if (env
->features
[FEAT_8000_0001_ECX
] & CPUID_EXT3_SVM
) {
4667 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel
, 0x8000000A);
4670 /* SEV requires CPUID[0x8000001F] */
4671 if (sev_enabled()) {
4672 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel
, 0x8000001F);
4676 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4677 if (env
->cpuid_level
== UINT32_MAX
) {
4678 env
->cpuid_level
= env
->cpuid_min_level
;
4680 if (env
->cpuid_xlevel
== UINT32_MAX
) {
4681 env
->cpuid_xlevel
= env
->cpuid_min_xlevel
;
4683 if (env
->cpuid_xlevel2
== UINT32_MAX
) {
4684 env
->cpuid_xlevel2
= env
->cpuid_min_xlevel2
;
4688 if (local_err
!= NULL
) {
4689 error_propagate(errp
, local_err
);
4694 * Finishes initialization of CPUID data, filters CPU feature
4695 * words based on host availability of each feature.
4697 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4699 static int x86_cpu_filter_features(X86CPU
*cpu
)
4701 CPUX86State
*env
= &cpu
->env
;
4705 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
4706 uint32_t host_feat
=
4707 x86_cpu_get_supported_feature_word(w
, false);
4708 uint32_t requested_features
= env
->features
[w
];
4709 env
->features
[w
] &= host_feat
;
4710 cpu
->filtered_features
[w
] = requested_features
& ~env
->features
[w
];
4711 if (cpu
->filtered_features
[w
]) {
4716 if ((env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) &&
4718 KVMState
*s
= CPU(cpu
)->kvm_state
;
4719 uint32_t eax_0
= kvm_arch_get_supported_cpuid(s
, 0x14, 0, R_EAX
);
4720 uint32_t ebx_0
= kvm_arch_get_supported_cpuid(s
, 0x14, 0, R_EBX
);
4721 uint32_t ecx_0
= kvm_arch_get_supported_cpuid(s
, 0x14, 0, R_ECX
);
4722 uint32_t eax_1
= kvm_arch_get_supported_cpuid(s
, 0x14, 1, R_EAX
);
4723 uint32_t ebx_1
= kvm_arch_get_supported_cpuid(s
, 0x14, 1, R_EBX
);
4726 ((ebx_0
& INTEL_PT_MINIMAL_EBX
) != INTEL_PT_MINIMAL_EBX
) ||
4727 ((ecx_0
& INTEL_PT_MINIMAL_ECX
) != INTEL_PT_MINIMAL_ECX
) ||
4728 ((eax_1
& INTEL_PT_MTC_BITMAP
) != INTEL_PT_MTC_BITMAP
) ||
4729 ((eax_1
& INTEL_PT_ADDR_RANGES_NUM_MASK
) <
4730 INTEL_PT_ADDR_RANGES_NUM
) ||
4731 ((ebx_1
& (INTEL_PT_PSB_BITMAP
| INTEL_PT_CYCLE_BITMAP
)) !=
4732 (INTEL_PT_PSB_BITMAP
| INTEL_PT_CYCLE_BITMAP
)) ||
4733 (ecx_0
& INTEL_PT_IP_LIP
)) {
4735 * Processor Trace capabilities aren't configurable, so if the
4736 * host can't emulate the capabilities we report on
4737 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4739 env
->features
[FEAT_7_0_EBX
] &= ~CPUID_7_0_EBX_INTEL_PT
;
4740 cpu
->filtered_features
[FEAT_7_0_EBX
] |= CPUID_7_0_EBX_INTEL_PT
;
4748 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4749 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4750 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4751 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4752 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4753 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4754 static void x86_cpu_realizefn(DeviceState
*dev
, Error
**errp
)
4756 CPUState
*cs
= CPU(dev
);
4757 X86CPU
*cpu
= X86_CPU(dev
);
4758 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(dev
);
4759 CPUX86State
*env
= &cpu
->env
;
4760 Error
*local_err
= NULL
;
4761 static bool ht_warned
;
4763 if (xcc
->host_cpuid_required
&& !accel_uses_host_cpuid()) {
4764 char *name
= x86_cpu_class_get_model_name(xcc
);
4765 error_setg(&local_err
, "CPU model '%s' requires KVM", name
);
4770 if (cpu
->apic_id
== UNASSIGNED_APIC_ID
) {
4771 error_setg(errp
, "apic-id property was not initialized properly");
4775 x86_cpu_expand_features(cpu
, &local_err
);
4780 if (x86_cpu_filter_features(cpu
) &&
4781 (cpu
->check_cpuid
|| cpu
->enforce_cpuid
)) {
4782 x86_cpu_report_filtered_features(cpu
);
4783 if (cpu
->enforce_cpuid
) {
4784 error_setg(&local_err
,
4785 accel_uses_host_cpuid() ?
4786 "Host doesn't support requested features" :
4787 "TCG doesn't support requested features");
4792 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4795 if (IS_AMD_CPU(env
)) {
4796 env
->features
[FEAT_8000_0001_EDX
] &= ~CPUID_EXT2_AMD_ALIASES
;
4797 env
->features
[FEAT_8000_0001_EDX
] |= (env
->features
[FEAT_1_EDX
]
4798 & CPUID_EXT2_AMD_ALIASES
);
4801 /* For 64bit systems think about the number of physical bits to present.
4802 * ideally this should be the same as the host; anything other than matching
4803 * the host can cause incorrect guest behaviour.
4804 * QEMU used to pick the magic value of 40 bits that corresponds to
4805 * consumer AMD devices but nothing else.
4807 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
) {
4808 if (accel_uses_host_cpuid()) {
4809 uint32_t host_phys_bits
= x86_host_phys_bits();
4812 if (cpu
->host_phys_bits
) {
4813 /* The user asked for us to use the host physical bits */
4814 cpu
->phys_bits
= host_phys_bits
;
4817 /* Print a warning if the user set it to a value that's not the
4820 if (cpu
->phys_bits
!= host_phys_bits
&& cpu
->phys_bits
!= 0 &&
4822 warn_report("Host physical bits (%u)"
4823 " does not match phys-bits property (%u)",
4824 host_phys_bits
, cpu
->phys_bits
);
4828 if (cpu
->phys_bits
&&
4829 (cpu
->phys_bits
> TARGET_PHYS_ADDR_SPACE_BITS
||
4830 cpu
->phys_bits
< 32)) {
4831 error_setg(errp
, "phys-bits should be between 32 and %u "
4833 TARGET_PHYS_ADDR_SPACE_BITS
, cpu
->phys_bits
);
4837 if (cpu
->phys_bits
&& cpu
->phys_bits
!= TCG_PHYS_ADDR_BITS
) {
4838 error_setg(errp
, "TCG only supports phys-bits=%u",
4839 TCG_PHYS_ADDR_BITS
);
4843 /* 0 means it was not explicitly set by the user (or by machine
4844 * compat_props or by the host code above). In this case, the default
4845 * is the value used by TCG (40).
4847 if (cpu
->phys_bits
== 0) {
4848 cpu
->phys_bits
= TCG_PHYS_ADDR_BITS
;
4851 /* For 32 bit systems don't use the user set value, but keep
4852 * phys_bits consistent with what we tell the guest.
4854 if (cpu
->phys_bits
!= 0) {
4855 error_setg(errp
, "phys-bits is not user-configurable in 32 bit");
4859 if (env
->features
[FEAT_1_EDX
] & CPUID_PSE36
) {
4860 cpu
->phys_bits
= 36;
4862 cpu
->phys_bits
= 32;
4866 /* Cache information initialization */
4867 if (!cpu
->legacy_cache
) {
4868 if (!xcc
->cpu_def
|| !xcc
->cpu_def
->cache_info
) {
4869 char *name
= x86_cpu_class_get_model_name(xcc
);
4871 "CPU model '%s' doesn't support legacy-cache=off", name
);
4875 env
->cache_info_cpuid2
= env
->cache_info_cpuid4
= env
->cache_info_amd
=
4876 *xcc
->cpu_def
->cache_info
;
4878 /* Build legacy cache information */
4879 env
->cache_info_cpuid2
.l1d_cache
= &legacy_l1d_cache
;
4880 env
->cache_info_cpuid2
.l1i_cache
= &legacy_l1i_cache
;
4881 env
->cache_info_cpuid2
.l2_cache
= &legacy_l2_cache_cpuid2
;
4882 env
->cache_info_cpuid2
.l3_cache
= &legacy_l3_cache
;
4884 env
->cache_info_cpuid4
.l1d_cache
= &legacy_l1d_cache
;
4885 env
->cache_info_cpuid4
.l1i_cache
= &legacy_l1i_cache
;
4886 env
->cache_info_cpuid4
.l2_cache
= &legacy_l2_cache
;
4887 env
->cache_info_cpuid4
.l3_cache
= &legacy_l3_cache
;
4889 env
->cache_info_amd
.l1d_cache
= &legacy_l1d_cache_amd
;
4890 env
->cache_info_amd
.l1i_cache
= &legacy_l1i_cache_amd
;
4891 env
->cache_info_amd
.l2_cache
= &legacy_l2_cache_amd
;
4892 env
->cache_info_amd
.l3_cache
= &legacy_l3_cache
;
4896 cpu_exec_realizefn(cs
, &local_err
);
4897 if (local_err
!= NULL
) {
4898 error_propagate(errp
, local_err
);
4902 #ifndef CONFIG_USER_ONLY
4903 qemu_register_reset(x86_cpu_machine_reset_cb
, cpu
);
4905 if (cpu
->env
.features
[FEAT_1_EDX
] & CPUID_APIC
|| smp_cpus
> 1) {
4906 x86_cpu_apic_create(cpu
, &local_err
);
4907 if (local_err
!= NULL
) {
4915 #ifndef CONFIG_USER_ONLY
4916 if (tcg_enabled()) {
4917 cpu
->cpu_as_mem
= g_new(MemoryRegion
, 1);
4918 cpu
->cpu_as_root
= g_new(MemoryRegion
, 1);
4920 /* Outer container... */
4921 memory_region_init(cpu
->cpu_as_root
, OBJECT(cpu
), "memory", ~0ull);
4922 memory_region_set_enabled(cpu
->cpu_as_root
, true);
4924 /* ... with two regions inside: normal system memory with low
4927 memory_region_init_alias(cpu
->cpu_as_mem
, OBJECT(cpu
), "memory",
4928 get_system_memory(), 0, ~0ull);
4929 memory_region_add_subregion_overlap(cpu
->cpu_as_root
, 0, cpu
->cpu_as_mem
, 0);
4930 memory_region_set_enabled(cpu
->cpu_as_mem
, true);
4933 cpu_address_space_init(cs
, 0, "cpu-memory", cs
->memory
);
4934 cpu_address_space_init(cs
, 1, "cpu-smm", cpu
->cpu_as_root
);
4936 /* ... SMRAM with higher priority, linked from /machine/smram. */
4937 cpu
->machine_done
.notify
= x86_cpu_machine_done
;
4938 qemu_add_machine_init_done_notifier(&cpu
->machine_done
);
4944 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4945 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4946 * based on inputs (sockets,cores,threads), it is still better to gives
4949 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4950 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4952 if (!IS_INTEL_CPU(env
) && cs
->nr_threads
> 1 && !ht_warned
) {
4953 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4954 " -smp options properly.");
4958 x86_cpu_apic_realize(cpu
, &local_err
);
4959 if (local_err
!= NULL
) {
4964 xcc
->parent_realize(dev
, &local_err
);
4967 if (local_err
!= NULL
) {
4968 error_propagate(errp
, local_err
);
4973 static void x86_cpu_unrealizefn(DeviceState
*dev
, Error
**errp
)
4975 X86CPU
*cpu
= X86_CPU(dev
);
4976 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(dev
);
4977 Error
*local_err
= NULL
;
4979 #ifndef CONFIG_USER_ONLY
4980 cpu_remove_sync(CPU(dev
));
4981 qemu_unregister_reset(x86_cpu_machine_reset_cb
, dev
);
4984 if (cpu
->apic_state
) {
4985 object_unparent(OBJECT(cpu
->apic_state
));
4986 cpu
->apic_state
= NULL
;
4989 xcc
->parent_unrealize(dev
, &local_err
);
4990 if (local_err
!= NULL
) {
4991 error_propagate(errp
, local_err
);
4996 typedef struct BitProperty
{
5001 static void x86_cpu_get_bit_prop(Object
*obj
, Visitor
*v
, const char *name
,
5002 void *opaque
, Error
**errp
)
5004 X86CPU
*cpu
= X86_CPU(obj
);
5005 BitProperty
*fp
= opaque
;
5006 uint32_t f
= cpu
->env
.features
[fp
->w
];
5007 bool value
= (f
& fp
->mask
) == fp
->mask
;
5008 visit_type_bool(v
, name
, &value
, errp
);
5011 static void x86_cpu_set_bit_prop(Object
*obj
, Visitor
*v
, const char *name
,
5012 void *opaque
, Error
**errp
)
5014 DeviceState
*dev
= DEVICE(obj
);
5015 X86CPU
*cpu
= X86_CPU(obj
);
5016 BitProperty
*fp
= opaque
;
5017 Error
*local_err
= NULL
;
5020 if (dev
->realized
) {
5021 qdev_prop_set_after_realize(dev
, name
, errp
);
5025 visit_type_bool(v
, name
, &value
, &local_err
);
5027 error_propagate(errp
, local_err
);
5032 cpu
->env
.features
[fp
->w
] |= fp
->mask
;
5034 cpu
->env
.features
[fp
->w
] &= ~fp
->mask
;
5036 cpu
->env
.user_features
[fp
->w
] |= fp
->mask
;
5039 static void x86_cpu_release_bit_prop(Object
*obj
, const char *name
,
5042 BitProperty
*prop
= opaque
;
5046 /* Register a boolean property to get/set a single bit in a uint32_t field.
5048 * The same property name can be registered multiple times to make it affect
5049 * multiple bits in the same FeatureWord. In that case, the getter will return
5050 * true only if all bits are set.
5052 static void x86_cpu_register_bit_prop(X86CPU
*cpu
,
5053 const char *prop_name
,
5059 uint32_t mask
= (1UL << bitnr
);
5061 op
= object_property_find(OBJECT(cpu
), prop_name
, NULL
);
5067 fp
= g_new0(BitProperty
, 1);
5070 object_property_add(OBJECT(cpu
), prop_name
, "bool",
5071 x86_cpu_get_bit_prop
,
5072 x86_cpu_set_bit_prop
,
5073 x86_cpu_release_bit_prop
, fp
, &error_abort
);
5077 static void x86_cpu_register_feature_bit_props(X86CPU
*cpu
,
5081 FeatureWordInfo
*fi
= &feature_word_info
[w
];
5082 const char *name
= fi
->feat_names
[bitnr
];
5088 /* Property names should use "-" instead of "_".
5089 * Old names containing underscores are registered as aliases
5090 * using object_property_add_alias()
5092 assert(!strchr(name
, '_'));
5093 /* aliases don't use "|" delimiters anymore, they are registered
5094 * manually using object_property_add_alias() */
5095 assert(!strchr(name
, '|'));
5096 x86_cpu_register_bit_prop(cpu
, name
, w
, bitnr
);
5099 static GuestPanicInformation
*x86_cpu_get_crash_info(CPUState
*cs
)
5101 X86CPU
*cpu
= X86_CPU(cs
);
5102 CPUX86State
*env
= &cpu
->env
;
5103 GuestPanicInformation
*panic_info
= NULL
;
5105 if (env
->features
[FEAT_HYPERV_EDX
] & HV_GUEST_CRASH_MSR_AVAILABLE
) {
5106 panic_info
= g_malloc0(sizeof(GuestPanicInformation
));
5108 panic_info
->type
= GUEST_PANIC_INFORMATION_TYPE_HYPER_V
;
5110 assert(HV_CRASH_PARAMS
>= 5);
5111 panic_info
->u
.hyper_v
.arg1
= env
->msr_hv_crash_params
[0];
5112 panic_info
->u
.hyper_v
.arg2
= env
->msr_hv_crash_params
[1];
5113 panic_info
->u
.hyper_v
.arg3
= env
->msr_hv_crash_params
[2];
5114 panic_info
->u
.hyper_v
.arg4
= env
->msr_hv_crash_params
[3];
5115 panic_info
->u
.hyper_v
.arg5
= env
->msr_hv_crash_params
[4];
5120 static void x86_cpu_get_crash_info_qom(Object
*obj
, Visitor
*v
,
5121 const char *name
, void *opaque
,
5124 CPUState
*cs
= CPU(obj
);
5125 GuestPanicInformation
*panic_info
;
5127 if (!cs
->crash_occurred
) {
5128 error_setg(errp
, "No crash occured");
5132 panic_info
= x86_cpu_get_crash_info(cs
);
5133 if (panic_info
== NULL
) {
5134 error_setg(errp
, "No crash information");
5138 visit_type_GuestPanicInformation(v
, "crash-information", &panic_info
,
5140 qapi_free_GuestPanicInformation(panic_info
);
5143 static void x86_cpu_initfn(Object
*obj
)
5145 CPUState
*cs
= CPU(obj
);
5146 X86CPU
*cpu
= X86_CPU(obj
);
5147 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(obj
);
5148 CPUX86State
*env
= &cpu
->env
;
5153 object_property_add(obj
, "family", "int",
5154 x86_cpuid_version_get_family
,
5155 x86_cpuid_version_set_family
, NULL
, NULL
, NULL
);
5156 object_property_add(obj
, "model", "int",
5157 x86_cpuid_version_get_model
,
5158 x86_cpuid_version_set_model
, NULL
, NULL
, NULL
);
5159 object_property_add(obj
, "stepping", "int",
5160 x86_cpuid_version_get_stepping
,
5161 x86_cpuid_version_set_stepping
, NULL
, NULL
, NULL
);
5162 object_property_add_str(obj
, "vendor",
5163 x86_cpuid_get_vendor
,
5164 x86_cpuid_set_vendor
, NULL
);
5165 object_property_add_str(obj
, "model-id",
5166 x86_cpuid_get_model_id
,
5167 x86_cpuid_set_model_id
, NULL
);
5168 object_property_add(obj
, "tsc-frequency", "int",
5169 x86_cpuid_get_tsc_freq
,
5170 x86_cpuid_set_tsc_freq
, NULL
, NULL
, NULL
);
5171 object_property_add(obj
, "feature-words", "X86CPUFeatureWordInfo",
5172 x86_cpu_get_feature_words
,
5173 NULL
, NULL
, (void *)env
->features
, NULL
);
5174 object_property_add(obj
, "filtered-features", "X86CPUFeatureWordInfo",
5175 x86_cpu_get_feature_words
,
5176 NULL
, NULL
, (void *)cpu
->filtered_features
, NULL
);
5178 object_property_add(obj
, "crash-information", "GuestPanicInformation",
5179 x86_cpu_get_crash_info_qom
, NULL
, NULL
, NULL
, NULL
);
5181 cpu
->hyperv_spinlock_attempts
= HYPERV_SPINLOCK_NEVER_RETRY
;
5183 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
5186 for (bitnr
= 0; bitnr
< 32; bitnr
++) {
5187 x86_cpu_register_feature_bit_props(cpu
, w
, bitnr
);
5191 object_property_add_alias(obj
, "sse3", obj
, "pni", &error_abort
);
5192 object_property_add_alias(obj
, "pclmuldq", obj
, "pclmulqdq", &error_abort
);
5193 object_property_add_alias(obj
, "sse4-1", obj
, "sse4.1", &error_abort
);
5194 object_property_add_alias(obj
, "sse4-2", obj
, "sse4.2", &error_abort
);
5195 object_property_add_alias(obj
, "xd", obj
, "nx", &error_abort
);
5196 object_property_add_alias(obj
, "ffxsr", obj
, "fxsr-opt", &error_abort
);
5197 object_property_add_alias(obj
, "i64", obj
, "lm", &error_abort
);
5199 object_property_add_alias(obj
, "ds_cpl", obj
, "ds-cpl", &error_abort
);
5200 object_property_add_alias(obj
, "tsc_adjust", obj
, "tsc-adjust", &error_abort
);
5201 object_property_add_alias(obj
, "fxsr_opt", obj
, "fxsr-opt", &error_abort
);
5202 object_property_add_alias(obj
, "lahf_lm", obj
, "lahf-lm", &error_abort
);
5203 object_property_add_alias(obj
, "cmp_legacy", obj
, "cmp-legacy", &error_abort
);
5204 object_property_add_alias(obj
, "nodeid_msr", obj
, "nodeid-msr", &error_abort
);
5205 object_property_add_alias(obj
, "perfctr_core", obj
, "perfctr-core", &error_abort
);
5206 object_property_add_alias(obj
, "perfctr_nb", obj
, "perfctr-nb", &error_abort
);
5207 object_property_add_alias(obj
, "kvm_nopiodelay", obj
, "kvm-nopiodelay", &error_abort
);
5208 object_property_add_alias(obj
, "kvm_mmu", obj
, "kvm-mmu", &error_abort
);
5209 object_property_add_alias(obj
, "kvm_asyncpf", obj
, "kvm-asyncpf", &error_abort
);
5210 object_property_add_alias(obj
, "kvm_steal_time", obj
, "kvm-steal-time", &error_abort
);
5211 object_property_add_alias(obj
, "kvm_pv_eoi", obj
, "kvm-pv-eoi", &error_abort
);
5212 object_property_add_alias(obj
, "kvm_pv_unhalt", obj
, "kvm-pv-unhalt", &error_abort
);
5213 object_property_add_alias(obj
, "svm_lock", obj
, "svm-lock", &error_abort
);
5214 object_property_add_alias(obj
, "nrip_save", obj
, "nrip-save", &error_abort
);
5215 object_property_add_alias(obj
, "tsc_scale", obj
, "tsc-scale", &error_abort
);
5216 object_property_add_alias(obj
, "vmcb_clean", obj
, "vmcb-clean", &error_abort
);
5217 object_property_add_alias(obj
, "pause_filter", obj
, "pause-filter", &error_abort
);
5218 object_property_add_alias(obj
, "sse4_1", obj
, "sse4.1", &error_abort
);
5219 object_property_add_alias(obj
, "sse4_2", obj
, "sse4.2", &error_abort
);
5222 x86_cpu_load_def(cpu
, xcc
->cpu_def
, &error_abort
);
5226 static int64_t x86_cpu_get_arch_id(CPUState
*cs
)
5228 X86CPU
*cpu
= X86_CPU(cs
);
5230 return cpu
->apic_id
;
5233 static bool x86_cpu_get_paging_enabled(const CPUState
*cs
)
5235 X86CPU
*cpu
= X86_CPU(cs
);
5237 return cpu
->env
.cr
[0] & CR0_PG_MASK
;
5240 static void x86_cpu_set_pc(CPUState
*cs
, vaddr value
)
5242 X86CPU
*cpu
= X86_CPU(cs
);
5244 cpu
->env
.eip
= value
;
5247 static void x86_cpu_synchronize_from_tb(CPUState
*cs
, TranslationBlock
*tb
)
5249 X86CPU
*cpu
= X86_CPU(cs
);
5251 cpu
->env
.eip
= tb
->pc
- tb
->cs_base
;
5254 static bool x86_cpu_has_work(CPUState
*cs
)
5256 X86CPU
*cpu
= X86_CPU(cs
);
5257 CPUX86State
*env
= &cpu
->env
;
5259 return ((cs
->interrupt_request
& (CPU_INTERRUPT_HARD
|
5260 CPU_INTERRUPT_POLL
)) &&
5261 (env
->eflags
& IF_MASK
)) ||
5262 (cs
->interrupt_request
& (CPU_INTERRUPT_NMI
|
5263 CPU_INTERRUPT_INIT
|
5264 CPU_INTERRUPT_SIPI
|
5265 CPU_INTERRUPT_MCE
)) ||
5266 ((cs
->interrupt_request
& CPU_INTERRUPT_SMI
) &&
5267 !(env
->hflags
& HF_SMM_MASK
));
5270 static void x86_disas_set_info(CPUState
*cs
, disassemble_info
*info
)
5272 X86CPU
*cpu
= X86_CPU(cs
);
5273 CPUX86State
*env
= &cpu
->env
;
5275 info
->mach
= (env
->hflags
& HF_CS64_MASK
? bfd_mach_x86_64
5276 : env
->hflags
& HF_CS32_MASK
? bfd_mach_i386_i386
5277 : bfd_mach_i386_i8086
);
5278 info
->print_insn
= print_insn_i386
;
5280 info
->cap_arch
= CS_ARCH_X86
;
5281 info
->cap_mode
= (env
->hflags
& HF_CS64_MASK
? CS_MODE_64
5282 : env
->hflags
& HF_CS32_MASK
? CS_MODE_32
5284 info
->cap_insn_unit
= 1;
5285 info
->cap_insn_split
= 8;
5288 void x86_update_hflags(CPUX86State
*env
)
5291 #define HFLAG_COPY_MASK \
5292 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5293 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5294 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5295 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5297 hflags
= env
->hflags
& HFLAG_COPY_MASK
;
5298 hflags
|= (env
->segs
[R_SS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
5299 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
5300 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
5301 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
5302 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
5304 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
5305 hflags
|= HF_OSFXSR_MASK
;
5308 if (env
->efer
& MSR_EFER_LMA
) {
5309 hflags
|= HF_LMA_MASK
;
5312 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
5313 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
5315 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
5316 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
5317 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
5318 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
5319 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
) ||
5320 !(hflags
& HF_CS32_MASK
)) {
5321 hflags
|= HF_ADDSEG_MASK
;
5323 hflags
|= ((env
->segs
[R_DS
].base
| env
->segs
[R_ES
].base
|
5324 env
->segs
[R_SS
].base
) != 0) << HF_ADDSEG_SHIFT
;
5327 env
->hflags
= hflags
;
5330 static Property x86_cpu_properties
[] = {
5331 #ifdef CONFIG_USER_ONLY
5332 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5333 DEFINE_PROP_UINT32("apic-id", X86CPU
, apic_id
, 0),
5334 DEFINE_PROP_INT32("thread-id", X86CPU
, thread_id
, 0),
5335 DEFINE_PROP_INT32("core-id", X86CPU
, core_id
, 0),
5336 DEFINE_PROP_INT32("socket-id", X86CPU
, socket_id
, 0),
5338 DEFINE_PROP_UINT32("apic-id", X86CPU
, apic_id
, UNASSIGNED_APIC_ID
),
5339 DEFINE_PROP_INT32("thread-id", X86CPU
, thread_id
, -1),
5340 DEFINE_PROP_INT32("core-id", X86CPU
, core_id
, -1),
5341 DEFINE_PROP_INT32("socket-id", X86CPU
, socket_id
, -1),
5343 DEFINE_PROP_INT32("node-id", X86CPU
, node_id
, CPU_UNSET_NUMA_NODE_ID
),
5344 DEFINE_PROP_BOOL("pmu", X86CPU
, enable_pmu
, false),
5345 { .name
= "hv-spinlocks", .info
= &qdev_prop_spinlocks
},
5346 DEFINE_PROP_BOOL("hv-relaxed", X86CPU
, hyperv_relaxed_timing
, false),
5347 DEFINE_PROP_BOOL("hv-vapic", X86CPU
, hyperv_vapic
, false),
5348 DEFINE_PROP_BOOL("hv-time", X86CPU
, hyperv_time
, false),
5349 DEFINE_PROP_BOOL("hv-crash", X86CPU
, hyperv_crash
, false),
5350 DEFINE_PROP_BOOL("hv-reset", X86CPU
, hyperv_reset
, false),
5351 DEFINE_PROP_BOOL("hv-vpindex", X86CPU
, hyperv_vpindex
, false),
5352 DEFINE_PROP_BOOL("hv-runtime", X86CPU
, hyperv_runtime
, false),
5353 DEFINE_PROP_BOOL("hv-synic", X86CPU
, hyperv_synic
, false),
5354 DEFINE_PROP_BOOL("hv-stimer", X86CPU
, hyperv_stimer
, false),
5355 DEFINE_PROP_BOOL("hv-frequencies", X86CPU
, hyperv_frequencies
, false),
5356 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU
, hyperv_reenlightenment
, false),
5357 DEFINE_PROP_BOOL("check", X86CPU
, check_cpuid
, true),
5358 DEFINE_PROP_BOOL("enforce", X86CPU
, enforce_cpuid
, false),
5359 DEFINE_PROP_BOOL("kvm", X86CPU
, expose_kvm
, true),
5360 DEFINE_PROP_UINT32("phys-bits", X86CPU
, phys_bits
, 0),
5361 DEFINE_PROP_BOOL("host-phys-bits", X86CPU
, host_phys_bits
, false),
5362 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU
, fill_mtrr_mask
, true),
5363 DEFINE_PROP_UINT32("level", X86CPU
, env
.cpuid_level
, UINT32_MAX
),
5364 DEFINE_PROP_UINT32("xlevel", X86CPU
, env
.cpuid_xlevel
, UINT32_MAX
),
5365 DEFINE_PROP_UINT32("xlevel2", X86CPU
, env
.cpuid_xlevel2
, UINT32_MAX
),
5366 DEFINE_PROP_UINT32("min-level", X86CPU
, env
.cpuid_min_level
, 0),
5367 DEFINE_PROP_UINT32("min-xlevel", X86CPU
, env
.cpuid_min_xlevel
, 0),
5368 DEFINE_PROP_UINT32("min-xlevel2", X86CPU
, env
.cpuid_min_xlevel2
, 0),
5369 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU
, full_cpuid_auto_level
, true),
5370 DEFINE_PROP_STRING("hv-vendor-id", X86CPU
, hyperv_vendor_id
),
5371 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU
, enable_cpuid_0xb
, true),
5372 DEFINE_PROP_BOOL("lmce", X86CPU
, enable_lmce
, false),
5373 DEFINE_PROP_BOOL("l3-cache", X86CPU
, enable_l3_cache
, true),
5374 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU
, kvm_no_smi_migration
,
5376 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU
, vmware_cpuid_freq
, true),
5377 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU
, expose_tcg
, true),
5379 * lecacy_cache defaults to true unless the CPU model provides its
5380 * own cache information (see x86_cpu_load_def()).
5382 DEFINE_PROP_BOOL("legacy-cache", X86CPU
, legacy_cache
, true),
5385 * From "Requirements for Implementing the Microsoft
5386 * Hypervisor Interface":
5387 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5389 * "Starting with Windows Server 2012 and Windows 8, if
5390 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5391 * the hypervisor imposes no specific limit to the number of VPs.
5392 * In this case, Windows Server 2012 guest VMs may use more than
5393 * 64 VPs, up to the maximum supported number of processors applicable
5394 * to the specific Windows version being used."
5396 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU
, hv_max_vps
, -1),
5397 DEFINE_PROP_END_OF_LIST()
5400 static void x86_cpu_common_class_init(ObjectClass
*oc
, void *data
)
5402 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
5403 CPUClass
*cc
= CPU_CLASS(oc
);
5404 DeviceClass
*dc
= DEVICE_CLASS(oc
);
5406 device_class_set_parent_realize(dc
, x86_cpu_realizefn
,
5407 &xcc
->parent_realize
);
5408 device_class_set_parent_unrealize(dc
, x86_cpu_unrealizefn
,
5409 &xcc
->parent_unrealize
);
5410 dc
->props
= x86_cpu_properties
;
5412 xcc
->parent_reset
= cc
->reset
;
5413 cc
->reset
= x86_cpu_reset
;
5414 cc
->reset_dump_flags
= CPU_DUMP_FPU
| CPU_DUMP_CCOP
;
5416 cc
->class_by_name
= x86_cpu_class_by_name
;
5417 cc
->parse_features
= x86_cpu_parse_featurestr
;
5418 cc
->has_work
= x86_cpu_has_work
;
5420 cc
->do_interrupt
= x86_cpu_do_interrupt
;
5421 cc
->cpu_exec_interrupt
= x86_cpu_exec_interrupt
;
5423 cc
->dump_state
= x86_cpu_dump_state
;
5424 cc
->get_crash_info
= x86_cpu_get_crash_info
;
5425 cc
->set_pc
= x86_cpu_set_pc
;
5426 cc
->synchronize_from_tb
= x86_cpu_synchronize_from_tb
;
5427 cc
->gdb_read_register
= x86_cpu_gdb_read_register
;
5428 cc
->gdb_write_register
= x86_cpu_gdb_write_register
;
5429 cc
->get_arch_id
= x86_cpu_get_arch_id
;
5430 cc
->get_paging_enabled
= x86_cpu_get_paging_enabled
;
5431 #ifdef CONFIG_USER_ONLY
5432 cc
->handle_mmu_fault
= x86_cpu_handle_mmu_fault
;
5434 cc
->asidx_from_attrs
= x86_asidx_from_attrs
;
5435 cc
->get_memory_mapping
= x86_cpu_get_memory_mapping
;
5436 cc
->get_phys_page_debug
= x86_cpu_get_phys_page_debug
;
5437 cc
->write_elf64_note
= x86_cpu_write_elf64_note
;
5438 cc
->write_elf64_qemunote
= x86_cpu_write_elf64_qemunote
;
5439 cc
->write_elf32_note
= x86_cpu_write_elf32_note
;
5440 cc
->write_elf32_qemunote
= x86_cpu_write_elf32_qemunote
;
5441 cc
->vmsd
= &vmstate_x86_cpu
;
5443 cc
->gdb_arch_name
= x86_gdb_arch_name
;
5444 #ifdef TARGET_X86_64
5445 cc
->gdb_core_xml_file
= "i386-64bit.xml";
5446 cc
->gdb_num_core_regs
= 57;
5448 cc
->gdb_core_xml_file
= "i386-32bit.xml";
5449 cc
->gdb_num_core_regs
= 41;
5451 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5452 cc
->debug_excp_handler
= breakpoint_handler
;
5454 cc
->cpu_exec_enter
= x86_cpu_exec_enter
;
5455 cc
->cpu_exec_exit
= x86_cpu_exec_exit
;
5457 cc
->tcg_initialize
= tcg_x86_init
;
5459 cc
->disas_set_info
= x86_disas_set_info
;
5461 dc
->user_creatable
= true;
5464 static const TypeInfo x86_cpu_type_info
= {
5465 .name
= TYPE_X86_CPU
,
5467 .instance_size
= sizeof(X86CPU
),
5468 .instance_init
= x86_cpu_initfn
,
5470 .class_size
= sizeof(X86CPUClass
),
5471 .class_init
= x86_cpu_common_class_init
,
5475 /* "base" CPU model, used by query-cpu-model-expansion */
5476 static void x86_cpu_base_class_init(ObjectClass
*oc
, void *data
)
5478 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
5480 xcc
->static_model
= true;
5481 xcc
->migration_safe
= true;
5482 xcc
->model_description
= "base CPU model type with no features enabled";
5486 static const TypeInfo x86_base_cpu_type_info
= {
5487 .name
= X86_CPU_TYPE_NAME("base"),
5488 .parent
= TYPE_X86_CPU
,
5489 .class_init
= x86_cpu_base_class_init
,
5492 static void x86_cpu_register_types(void)
5496 type_register_static(&x86_cpu_type_info
);
5497 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); i
++) {
5498 x86_register_cpudef_type(&builtin_x86_defs
[i
]);
5500 type_register_static(&max_x86_cpu_type_info
);
5501 type_register_static(&x86_base_cpu_type_info
);
5502 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5503 type_register_static(&host_x86_cpu_type_info
);
5507 type_init(x86_cpu_register_types
)