2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/hvf.h"
30 #include "sysemu/cpus.h"
34 #include "qemu/error-report.h"
35 #include "qemu/option.h"
36 #include "qemu/config-file.h"
37 #include "qapi/error.h"
38 #include "qapi/qapi-visit-misc.h"
39 #include "qapi/qapi-visit-run-state.h"
40 #include "qapi/qmp/qdict.h"
41 #include "qapi/qmp/qerror.h"
42 #include "qapi/visitor.h"
43 #include "qom/qom-qobject.h"
44 #include "sysemu/arch_init.h"
45 #include "qapi/qapi-commands-target.h"
47 #include "standard-headers/asm-x86/kvm_para.h"
49 #include "sysemu/sysemu.h"
50 #include "sysemu/tcg.h"
51 #include "hw/qdev-properties.h"
52 #include "hw/i386/topology.h"
53 #ifndef CONFIG_USER_ONLY
54 #include "exec/address-spaces.h"
56 #include "hw/xen/xen.h"
57 #include "hw/i386/apic_internal.h"
60 #include "disas/capstone.h"
62 /* Helpers for building CPUID[2] descriptors: */
64 struct CPUID2CacheDescriptorInfo
{
73 * Known CPUID 2 cache descriptors.
74 * From Intel SDM Volume 2A, CPUID instruction
76 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors
[] = {
77 [0x06] = { .level
= 1, .type
= INSTRUCTION_CACHE
, .size
= 8 * KiB
,
78 .associativity
= 4, .line_size
= 32, },
79 [0x08] = { .level
= 1, .type
= INSTRUCTION_CACHE
, .size
= 16 * KiB
,
80 .associativity
= 4, .line_size
= 32, },
81 [0x09] = { .level
= 1, .type
= INSTRUCTION_CACHE
, .size
= 32 * KiB
,
82 .associativity
= 4, .line_size
= 64, },
83 [0x0A] = { .level
= 1, .type
= DATA_CACHE
, .size
= 8 * KiB
,
84 .associativity
= 2, .line_size
= 32, },
85 [0x0C] = { .level
= 1, .type
= DATA_CACHE
, .size
= 16 * KiB
,
86 .associativity
= 4, .line_size
= 32, },
87 [0x0D] = { .level
= 1, .type
= DATA_CACHE
, .size
= 16 * KiB
,
88 .associativity
= 4, .line_size
= 64, },
89 [0x0E] = { .level
= 1, .type
= DATA_CACHE
, .size
= 24 * KiB
,
90 .associativity
= 6, .line_size
= 64, },
91 [0x1D] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 128 * KiB
,
92 .associativity
= 2, .line_size
= 64, },
93 [0x21] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 256 * KiB
,
94 .associativity
= 8, .line_size
= 64, },
95 /* lines per sector is not supported cpuid2_cache_descriptor(),
96 * so descriptors 0x22, 0x23 are not included
98 [0x24] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
99 .associativity
= 16, .line_size
= 64, },
100 /* lines per sector is not supported cpuid2_cache_descriptor(),
101 * so descriptors 0x25, 0x20 are not included
103 [0x2C] = { .level
= 1, .type
= DATA_CACHE
, .size
= 32 * KiB
,
104 .associativity
= 8, .line_size
= 64, },
105 [0x30] = { .level
= 1, .type
= INSTRUCTION_CACHE
, .size
= 32 * KiB
,
106 .associativity
= 8, .line_size
= 64, },
107 [0x41] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 128 * KiB
,
108 .associativity
= 4, .line_size
= 32, },
109 [0x42] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 256 * KiB
,
110 .associativity
= 4, .line_size
= 32, },
111 [0x43] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
112 .associativity
= 4, .line_size
= 32, },
113 [0x44] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
114 .associativity
= 4, .line_size
= 32, },
115 [0x45] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
116 .associativity
= 4, .line_size
= 32, },
117 [0x46] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 4 * MiB
,
118 .associativity
= 4, .line_size
= 64, },
119 [0x47] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 8 * MiB
,
120 .associativity
= 8, .line_size
= 64, },
121 [0x48] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 3 * MiB
,
122 .associativity
= 12, .line_size
= 64, },
123 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
124 [0x4A] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 6 * MiB
,
125 .associativity
= 12, .line_size
= 64, },
126 [0x4B] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 8 * MiB
,
127 .associativity
= 16, .line_size
= 64, },
128 [0x4C] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 12 * MiB
,
129 .associativity
= 12, .line_size
= 64, },
130 [0x4D] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 16 * MiB
,
131 .associativity
= 16, .line_size
= 64, },
132 [0x4E] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 6 * MiB
,
133 .associativity
= 24, .line_size
= 64, },
134 [0x60] = { .level
= 1, .type
= DATA_CACHE
, .size
= 16 * KiB
,
135 .associativity
= 8, .line_size
= 64, },
136 [0x66] = { .level
= 1, .type
= DATA_CACHE
, .size
= 8 * KiB
,
137 .associativity
= 4, .line_size
= 64, },
138 [0x67] = { .level
= 1, .type
= DATA_CACHE
, .size
= 16 * KiB
,
139 .associativity
= 4, .line_size
= 64, },
140 [0x68] = { .level
= 1, .type
= DATA_CACHE
, .size
= 32 * KiB
,
141 .associativity
= 4, .line_size
= 64, },
142 [0x78] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
143 .associativity
= 4, .line_size
= 64, },
144 /* lines per sector is not supported cpuid2_cache_descriptor(),
145 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
147 [0x7D] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
148 .associativity
= 8, .line_size
= 64, },
149 [0x7F] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
150 .associativity
= 2, .line_size
= 64, },
151 [0x80] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
152 .associativity
= 8, .line_size
= 64, },
153 [0x82] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 256 * KiB
,
154 .associativity
= 8, .line_size
= 32, },
155 [0x83] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
156 .associativity
= 8, .line_size
= 32, },
157 [0x84] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
158 .associativity
= 8, .line_size
= 32, },
159 [0x85] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
160 .associativity
= 8, .line_size
= 32, },
161 [0x86] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
162 .associativity
= 4, .line_size
= 64, },
163 [0x87] = { .level
= 2, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
164 .associativity
= 8, .line_size
= 64, },
165 [0xD0] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 512 * KiB
,
166 .associativity
= 4, .line_size
= 64, },
167 [0xD1] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
168 .associativity
= 4, .line_size
= 64, },
169 [0xD2] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
170 .associativity
= 4, .line_size
= 64, },
171 [0xD6] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 1 * MiB
,
172 .associativity
= 8, .line_size
= 64, },
173 [0xD7] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
174 .associativity
= 8, .line_size
= 64, },
175 [0xD8] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 4 * MiB
,
176 .associativity
= 8, .line_size
= 64, },
177 [0xDC] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 1.5 * MiB
,
178 .associativity
= 12, .line_size
= 64, },
179 [0xDD] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 3 * MiB
,
180 .associativity
= 12, .line_size
= 64, },
181 [0xDE] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 6 * MiB
,
182 .associativity
= 12, .line_size
= 64, },
183 [0xE2] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 2 * MiB
,
184 .associativity
= 16, .line_size
= 64, },
185 [0xE3] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 4 * MiB
,
186 .associativity
= 16, .line_size
= 64, },
187 [0xE4] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 8 * MiB
,
188 .associativity
= 16, .line_size
= 64, },
189 [0xEA] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 12 * MiB
,
190 .associativity
= 24, .line_size
= 64, },
191 [0xEB] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 18 * MiB
,
192 .associativity
= 24, .line_size
= 64, },
193 [0xEC] = { .level
= 3, .type
= UNIFIED_CACHE
, .size
= 24 * MiB
,
194 .associativity
= 24, .line_size
= 64, },
198 * "CPUID leaf 2 does not report cache descriptor information,
199 * use CPUID leaf 4 to query cache parameters"
201 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
204 * Return a CPUID 2 cache descriptor for a given cache.
205 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
207 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo
*cache
)
211 assert(cache
->size
> 0);
212 assert(cache
->level
> 0);
213 assert(cache
->line_size
> 0);
214 assert(cache
->associativity
> 0);
215 for (i
= 0; i
< ARRAY_SIZE(cpuid2_cache_descriptors
); i
++) {
216 struct CPUID2CacheDescriptorInfo
*d
= &cpuid2_cache_descriptors
[i
];
217 if (d
->level
== cache
->level
&& d
->type
== cache
->type
&&
218 d
->size
== cache
->size
&& d
->line_size
== cache
->line_size
&&
219 d
->associativity
== cache
->associativity
) {
224 return CACHE_DESCRIPTOR_UNAVAILABLE
;
227 /* CPUID Leaf 4 constants: */
230 #define CACHE_TYPE_D 1
231 #define CACHE_TYPE_I 2
232 #define CACHE_TYPE_UNIFIED 3
234 #define CACHE_LEVEL(l) (l << 5)
236 #define CACHE_SELF_INIT_LEVEL (1 << 8)
239 #define CACHE_NO_INVD_SHARING (1 << 0)
240 #define CACHE_INCLUSIVE (1 << 1)
241 #define CACHE_COMPLEX_IDX (1 << 2)
243 /* Encode CacheType for CPUID[4].EAX */
244 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
245 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
246 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
247 0 /* Invalid value */)
250 /* Encode cache info for CPUID[4] */
251 static void encode_cache_cpuid4(CPUCacheInfo
*cache
,
252 int num_apic_ids
, int num_cores
,
253 uint32_t *eax
, uint32_t *ebx
,
254 uint32_t *ecx
, uint32_t *edx
)
256 assert(cache
->size
== cache
->line_size
* cache
->associativity
*
257 cache
->partitions
* cache
->sets
);
259 assert(num_apic_ids
> 0);
260 *eax
= CACHE_TYPE(cache
->type
) |
261 CACHE_LEVEL(cache
->level
) |
262 (cache
->self_init
? CACHE_SELF_INIT_LEVEL
: 0) |
263 ((num_cores
- 1) << 26) |
264 ((num_apic_ids
- 1) << 14);
266 assert(cache
->line_size
> 0);
267 assert(cache
->partitions
> 0);
268 assert(cache
->associativity
> 0);
269 /* We don't implement fully-associative caches */
270 assert(cache
->associativity
< cache
->sets
);
271 *ebx
= (cache
->line_size
- 1) |
272 ((cache
->partitions
- 1) << 12) |
273 ((cache
->associativity
- 1) << 22);
275 assert(cache
->sets
> 0);
276 *ecx
= cache
->sets
- 1;
278 *edx
= (cache
->no_invd_sharing
? CACHE_NO_INVD_SHARING
: 0) |
279 (cache
->inclusive
? CACHE_INCLUSIVE
: 0) |
280 (cache
->complex_indexing
? CACHE_COMPLEX_IDX
: 0);
283 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
284 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo
*cache
)
286 assert(cache
->size
% 1024 == 0);
287 assert(cache
->lines_per_tag
> 0);
288 assert(cache
->associativity
> 0);
289 assert(cache
->line_size
> 0);
290 return ((cache
->size
/ 1024) << 24) | (cache
->associativity
<< 16) |
291 (cache
->lines_per_tag
<< 8) | (cache
->line_size
);
294 #define ASSOC_FULL 0xFF
296 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
297 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
307 a == ASSOC_FULL ? 0xF : \
308 0 /* invalid value */)
311 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 static void encode_cache_cpuid80000006(CPUCacheInfo
*l2
,
316 uint32_t *ecx
, uint32_t *edx
)
318 assert(l2
->size
% 1024 == 0);
319 assert(l2
->associativity
> 0);
320 assert(l2
->lines_per_tag
> 0);
321 assert(l2
->line_size
> 0);
322 *ecx
= ((l2
->size
/ 1024) << 16) |
323 (AMD_ENC_ASSOC(l2
->associativity
) << 12) |
324 (l2
->lines_per_tag
<< 8) | (l2
->line_size
);
327 assert(l3
->size
% (512 * 1024) == 0);
328 assert(l3
->associativity
> 0);
329 assert(l3
->lines_per_tag
> 0);
330 assert(l3
->line_size
> 0);
331 *edx
= ((l3
->size
/ (512 * 1024)) << 18) |
332 (AMD_ENC_ASSOC(l3
->associativity
) << 12) |
333 (l3
->lines_per_tag
<< 8) | (l3
->line_size
);
340 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
341 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
342 * Define the constants to build the cpu topology. Right now, TOPOEXT
343 * feature is enabled only on EPYC. So, these constants are based on
344 * EPYC supported configurations. We may need to handle the cases if
345 * these values change in future.
347 /* Maximum core complexes in a node */
349 /* Maximum cores in a core complex */
350 #define MAX_CORES_IN_CCX 4
351 /* Maximum cores in a node */
352 #define MAX_CORES_IN_NODE 8
353 /* Maximum nodes in a socket */
354 #define MAX_NODES_PER_SOCKET 4
357 * Figure out the number of nodes required to build this config.
358 * Max cores in a node is 8
360 static int nodes_in_socket(int nr_cores
)
364 nodes
= DIV_ROUND_UP(nr_cores
, MAX_CORES_IN_NODE
);
366 /* Hardware does not support config with 3 nodes, return 4 in that case */
367 return (nodes
== 3) ? 4 : nodes
;
371 * Decide the number of cores in a core complex with the given nr_cores using
372 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
373 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
374 * L3 cache is shared across all cores in a core complex. So, this will also
375 * tell us how many cores are sharing the L3 cache.
377 static int cores_in_core_complex(int nr_cores
)
381 /* Check if we can fit all the cores in one core complex */
382 if (nr_cores
<= MAX_CORES_IN_CCX
) {
385 /* Get the number of nodes required to build this config */
386 nodes
= nodes_in_socket(nr_cores
);
389 * Divide the cores accros all the core complexes
390 * Return rounded up value
392 return DIV_ROUND_UP(nr_cores
, nodes
* MAX_CCX
);
395 /* Encode cache info for CPUID[8000001D] */
396 static void encode_cache_cpuid8000001d(CPUCacheInfo
*cache
, CPUState
*cs
,
397 uint32_t *eax
, uint32_t *ebx
,
398 uint32_t *ecx
, uint32_t *edx
)
401 assert(cache
->size
== cache
->line_size
* cache
->associativity
*
402 cache
->partitions
* cache
->sets
);
404 *eax
= CACHE_TYPE(cache
->type
) | CACHE_LEVEL(cache
->level
) |
405 (cache
->self_init
? CACHE_SELF_INIT_LEVEL
: 0);
407 /* L3 is shared among multiple cores */
408 if (cache
->level
== 3) {
409 l3_cores
= cores_in_core_complex(cs
->nr_cores
);
410 *eax
|= ((l3_cores
* cs
->nr_threads
) - 1) << 14;
412 *eax
|= ((cs
->nr_threads
- 1) << 14);
415 assert(cache
->line_size
> 0);
416 assert(cache
->partitions
> 0);
417 assert(cache
->associativity
> 0);
418 /* We don't implement fully-associative caches */
419 assert(cache
->associativity
< cache
->sets
);
420 *ebx
= (cache
->line_size
- 1) |
421 ((cache
->partitions
- 1) << 12) |
422 ((cache
->associativity
- 1) << 22);
424 assert(cache
->sets
> 0);
425 *ecx
= cache
->sets
- 1;
427 *edx
= (cache
->no_invd_sharing
? CACHE_NO_INVD_SHARING
: 0) |
428 (cache
->inclusive
? CACHE_INCLUSIVE
: 0) |
429 (cache
->complex_indexing
? CACHE_COMPLEX_IDX
: 0);
432 /* Data structure to hold the configuration info for a given core index */
433 struct core_topology
{
434 /* core complex id of the current core index */
437 * Adjusted core index for this core in the topology
438 * This can be 0,1,2,3 with max 4 cores in a core complex
441 /* Node id for this core index */
443 /* Number of nodes in this config */
448 * Build the configuration closely match the EPYC hardware. Using the EPYC
449 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
450 * right now. This could change in future.
451 * nr_cores : Total number of cores in the config
452 * core_id : Core index of the current CPU
453 * topo : Data structure to hold all the config info for this core index
455 static void build_core_topology(int nr_cores
, int core_id
,
456 struct core_topology
*topo
)
458 int nodes
, cores_in_ccx
;
460 /* First get the number of nodes required */
461 nodes
= nodes_in_socket(nr_cores
);
463 cores_in_ccx
= cores_in_core_complex(nr_cores
);
465 topo
->node_id
= core_id
/ (cores_in_ccx
* MAX_CCX
);
466 topo
->ccx_id
= (core_id
% (cores_in_ccx
* MAX_CCX
)) / cores_in_ccx
;
467 topo
->core_id
= core_id
% cores_in_ccx
;
468 topo
->num_nodes
= nodes
;
471 /* Encode cache info for CPUID[8000001E] */
472 static void encode_topo_cpuid8000001e(CPUState
*cs
, X86CPU
*cpu
,
473 uint32_t *eax
, uint32_t *ebx
,
474 uint32_t *ecx
, uint32_t *edx
)
476 struct core_topology topo
= {0};
480 build_core_topology(cs
->nr_cores
, cpu
->core_id
, &topo
);
483 * CPUID_Fn8000001E_EBX
485 * 15:8 Threads per core (The number of threads per core is
486 * Threads per core + 1)
487 * 7:0 Core id (see bit decoding below)
497 if (cs
->nr_threads
- 1) {
498 *ebx
= ((cs
->nr_threads
- 1) << 8) | (topo
.node_id
<< 3) |
499 (topo
.ccx_id
<< 2) | topo
.core_id
;
501 *ebx
= (topo
.node_id
<< 4) | (topo
.ccx_id
<< 3) | topo
.core_id
;
504 * CPUID_Fn8000001E_ECX
506 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
507 * 7:0 Node id (see bit decoding below)
511 if (topo
.num_nodes
<= 4) {
512 *ecx
= ((topo
.num_nodes
- 1) << 8) | (cpu
->socket_id
<< 2) |
516 * Node id fix up. Actual hardware supports up to 4 nodes. But with
517 * more than 32 cores, we may end up with more than 4 nodes.
518 * Node id is a combination of socket id and node id. Only requirement
519 * here is that this number should be unique accross the system.
520 * Shift the socket id to accommodate more nodes. We dont expect both
521 * socket id and node id to be big number at the same time. This is not
522 * an ideal config but we need to to support it. Max nodes we can have
523 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
524 * 5 bits for nodes. Find the left most set bit to represent the total
525 * number of nodes. find_last_bit returns last set bit(0 based). Left
526 * shift(+1) the socket id to represent all the nodes.
528 nodes
= topo
.num_nodes
- 1;
529 shift
= find_last_bit(&nodes
, 8);
530 *ecx
= ((topo
.num_nodes
- 1) << 8) | (cpu
->socket_id
<< (shift
+ 1)) |
537 * Definitions of the hardcoded cache entries we expose:
538 * These are legacy cache values. If there is a need to change any
539 * of these values please use builtin_x86_defs
543 static CPUCacheInfo legacy_l1d_cache
= {
552 .no_invd_sharing
= true,
555 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
556 static CPUCacheInfo legacy_l1d_cache_amd
= {
566 .no_invd_sharing
= true,
569 /* L1 instruction cache: */
570 static CPUCacheInfo legacy_l1i_cache
= {
571 .type
= INSTRUCTION_CACHE
,
579 .no_invd_sharing
= true,
582 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
583 static CPUCacheInfo legacy_l1i_cache_amd
= {
584 .type
= INSTRUCTION_CACHE
,
593 .no_invd_sharing
= true,
596 /* Level 2 unified cache: */
597 static CPUCacheInfo legacy_l2_cache
= {
598 .type
= UNIFIED_CACHE
,
606 .no_invd_sharing
= true,
609 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
610 static CPUCacheInfo legacy_l2_cache_cpuid2
= {
611 .type
= UNIFIED_CACHE
,
619 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
620 static CPUCacheInfo legacy_l2_cache_amd
= {
621 .type
= UNIFIED_CACHE
,
631 /* Level 3 unified cache: */
632 static CPUCacheInfo legacy_l3_cache
= {
633 .type
= UNIFIED_CACHE
,
643 .complex_indexing
= true,
646 /* TLB definitions: */
648 #define L1_DTLB_2M_ASSOC 1
649 #define L1_DTLB_2M_ENTRIES 255
650 #define L1_DTLB_4K_ASSOC 1
651 #define L1_DTLB_4K_ENTRIES 255
653 #define L1_ITLB_2M_ASSOC 1
654 #define L1_ITLB_2M_ENTRIES 255
655 #define L1_ITLB_4K_ASSOC 1
656 #define L1_ITLB_4K_ENTRIES 255
658 #define L2_DTLB_2M_ASSOC 0 /* disabled */
659 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
660 #define L2_DTLB_4K_ASSOC 4
661 #define L2_DTLB_4K_ENTRIES 512
663 #define L2_ITLB_2M_ASSOC 0 /* disabled */
664 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
665 #define L2_ITLB_4K_ASSOC 4
666 #define L2_ITLB_4K_ENTRIES 512
668 /* CPUID Leaf 0x14 constants: */
669 #define INTEL_PT_MAX_SUBLEAF 0x1
671 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
672 * MSR can be accessed;
673 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
674 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
675 * of Intel PT MSRs across warm reset;
676 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
678 #define INTEL_PT_MINIMAL_EBX 0xf
680 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
681 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
683 * bit[01]: ToPA tables can hold any number of output entries, up to the
684 * maximum allowed by the MaskOrTableOffset field of
685 * IA32_RTIT_OUTPUT_MASK_PTRS;
686 * bit[02]: Support Single-Range Output scheme;
688 #define INTEL_PT_MINIMAL_ECX 0x7
689 /* generated packets which contain IP payloads have LIP values */
690 #define INTEL_PT_IP_LIP (1 << 31)
691 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
692 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
693 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
694 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
695 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
697 static void x86_cpu_vendor_words2str(char *dst
, uint32_t vendor1
,
698 uint32_t vendor2
, uint32_t vendor3
)
701 for (i
= 0; i
< 4; i
++) {
702 dst
[i
] = vendor1
>> (8 * i
);
703 dst
[i
+ 4] = vendor2
>> (8 * i
);
704 dst
[i
+ 8] = vendor3
>> (8 * i
);
706 dst
[CPUID_VENDOR_SZ
] = '\0';
709 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
710 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
711 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
712 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
713 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
714 CPUID_PSE36 | CPUID_FXSR)
715 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
716 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
717 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
718 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
719 CPUID_PAE | CPUID_SEP | CPUID_APIC)
721 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
722 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
723 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
724 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
725 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
726 /* partly implemented:
727 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
729 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
730 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
731 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
732 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
733 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
734 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
737 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
738 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
739 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
740 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
744 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
746 #define TCG_EXT2_X86_64_FEATURES 0
749 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
750 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
751 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
752 TCG_EXT2_X86_64_FEATURES)
753 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
754 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
755 #define TCG_EXT4_FEATURES 0
756 #define TCG_SVM_FEATURES CPUID_SVM_NPT
757 #define TCG_KVM_FEATURES 0
758 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
759 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
760 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
761 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
764 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
765 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
766 CPUID_7_0_EBX_RDSEED */
767 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
768 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
770 #define TCG_7_0_EDX_FEATURES 0
771 #define TCG_APM_FEATURES 0
772 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
773 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
775 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
777 typedef enum FeatureWordType
{
782 typedef struct FeatureWordInfo
{
783 FeatureWordType type
;
784 /* feature flags names are taken from "Intel Processor Identification and
785 * the CPUID Instruction" and AMD's "CPUID Specification".
786 * In cases of disagreement between feature naming conventions,
787 * aliases may be added.
789 const char *feat_names
[32];
791 /* If type==CPUID_FEATURE_WORD */
793 uint32_t eax
; /* Input EAX for CPUID */
794 bool needs_ecx
; /* CPUID instruction uses ECX as input */
795 uint32_t ecx
; /* Input ECX value for CPUID */
796 int reg
; /* output register (R_* constant) */
798 /* If type==MSR_FEATURE_WORD */
801 struct { /*CPUID that enumerate this MSR*/
802 FeatureWord cpuid_class
;
807 uint32_t tcg_features
; /* Feature flags supported by TCG */
808 uint32_t unmigratable_flags
; /* Feature flags known to be unmigratable */
809 uint32_t migratable_flags
; /* Feature flags known to be migratable */
810 /* Features that shouldn't be auto-enabled by "-cpu host" */
811 uint32_t no_autoenable_flags
;
814 static FeatureWordInfo feature_word_info
[FEATURE_WORDS
] = {
816 .type
= CPUID_FEATURE_WORD
,
818 "fpu", "vme", "de", "pse",
819 "tsc", "msr", "pae", "mce",
820 "cx8", "apic", NULL
, "sep",
821 "mtrr", "pge", "mca", "cmov",
822 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
823 NULL
, "ds" /* Intel dts */, "acpi", "mmx",
824 "fxsr", "sse", "sse2", "ss",
825 "ht" /* Intel htt */, "tm", "ia64", "pbe",
827 .cpuid
= {.eax
= 1, .reg
= R_EDX
, },
828 .tcg_features
= TCG_FEATURES
,
831 .type
= CPUID_FEATURE_WORD
,
833 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
834 "ds-cpl", "vmx", "smx", "est",
835 "tm2", "ssse3", "cid", NULL
,
836 "fma", "cx16", "xtpr", "pdcm",
837 NULL
, "pcid", "dca", "sse4.1",
838 "sse4.2", "x2apic", "movbe", "popcnt",
839 "tsc-deadline", "aes", "xsave", NULL
/* osxsave */,
840 "avx", "f16c", "rdrand", "hypervisor",
842 .cpuid
= { .eax
= 1, .reg
= R_ECX
, },
843 .tcg_features
= TCG_EXT_FEATURES
,
845 /* Feature names that are already defined on feature_name[] but
846 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
847 * names on feat_names below. They are copied automatically
848 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
850 [FEAT_8000_0001_EDX
] = {
851 .type
= CPUID_FEATURE_WORD
,
853 NULL
/* fpu */, NULL
/* vme */, NULL
/* de */, NULL
/* pse */,
854 NULL
/* tsc */, NULL
/* msr */, NULL
/* pae */, NULL
/* mce */,
855 NULL
/* cx8 */, NULL
/* apic */, NULL
, "syscall",
856 NULL
/* mtrr */, NULL
/* pge */, NULL
/* mca */, NULL
/* cmov */,
857 NULL
/* pat */, NULL
/* pse36 */, NULL
, NULL
/* Linux mp */,
858 "nx", NULL
, "mmxext", NULL
/* mmx */,
859 NULL
/* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
860 NULL
, "lm", "3dnowext", "3dnow",
862 .cpuid
= { .eax
= 0x80000001, .reg
= R_EDX
, },
863 .tcg_features
= TCG_EXT2_FEATURES
,
865 [FEAT_8000_0001_ECX
] = {
866 .type
= CPUID_FEATURE_WORD
,
868 "lahf-lm", "cmp-legacy", "svm", "extapic",
869 "cr8legacy", "abm", "sse4a", "misalignsse",
870 "3dnowprefetch", "osvw", "ibs", "xop",
871 "skinit", "wdt", NULL
, "lwp",
872 "fma4", "tce", NULL
, "nodeid-msr",
873 NULL
, "tbm", "topoext", "perfctr-core",
874 "perfctr-nb", NULL
, NULL
, NULL
,
875 NULL
, NULL
, NULL
, NULL
,
877 .cpuid
= { .eax
= 0x80000001, .reg
= R_ECX
, },
878 .tcg_features
= TCG_EXT3_FEATURES
,
880 * TOPOEXT is always allowed but can't be enabled blindly by
881 * "-cpu host", as it requires consistent cache topology info
882 * to be provided so it doesn't confuse guests.
884 .no_autoenable_flags
= CPUID_EXT3_TOPOEXT
,
886 [FEAT_C000_0001_EDX
] = {
887 .type
= CPUID_FEATURE_WORD
,
889 NULL
, NULL
, "xstore", "xstore-en",
890 NULL
, NULL
, "xcrypt", "xcrypt-en",
891 "ace2", "ace2-en", "phe", "phe-en",
892 "pmm", "pmm-en", NULL
, NULL
,
893 NULL
, NULL
, NULL
, NULL
,
894 NULL
, NULL
, NULL
, NULL
,
895 NULL
, NULL
, NULL
, NULL
,
896 NULL
, NULL
, NULL
, NULL
,
898 .cpuid
= { .eax
= 0xC0000001, .reg
= R_EDX
, },
899 .tcg_features
= TCG_EXT4_FEATURES
,
902 .type
= CPUID_FEATURE_WORD
,
904 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
905 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
906 NULL
, "kvm-pv-tlb-flush", NULL
, "kvm-pv-ipi",
907 NULL
, NULL
, NULL
, NULL
,
908 NULL
, NULL
, NULL
, NULL
,
909 NULL
, NULL
, NULL
, NULL
,
910 "kvmclock-stable-bit", NULL
, NULL
, NULL
,
911 NULL
, NULL
, NULL
, NULL
,
913 .cpuid
= { .eax
= KVM_CPUID_FEATURES
, .reg
= R_EAX
, },
914 .tcg_features
= TCG_KVM_FEATURES
,
917 .type
= CPUID_FEATURE_WORD
,
919 "kvm-hint-dedicated", NULL
, NULL
, NULL
,
920 NULL
, NULL
, NULL
, NULL
,
921 NULL
, NULL
, NULL
, NULL
,
922 NULL
, NULL
, NULL
, NULL
,
923 NULL
, NULL
, NULL
, NULL
,
924 NULL
, NULL
, NULL
, NULL
,
925 NULL
, NULL
, NULL
, NULL
,
926 NULL
, NULL
, NULL
, NULL
,
928 .cpuid
= { .eax
= KVM_CPUID_FEATURES
, .reg
= R_EDX
, },
929 .tcg_features
= TCG_KVM_FEATURES
,
931 * KVM hints aren't auto-enabled by -cpu host, they need to be
932 * explicitly enabled in the command-line.
934 .no_autoenable_flags
= ~0U,
937 * .feat_names are commented out for Hyper-V enlightenments because we
938 * don't want to have two different ways for enabling them on QEMU command
939 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
940 * enabling several feature bits simultaneously, exposing these bits
941 * individually may just confuse guests.
943 [FEAT_HYPERV_EAX
] = {
944 .type
= CPUID_FEATURE_WORD
,
946 NULL
/* hv_msr_vp_runtime_access */, NULL
/* hv_msr_time_refcount_access */,
947 NULL
/* hv_msr_synic_access */, NULL
/* hv_msr_stimer_access */,
948 NULL
/* hv_msr_apic_access */, NULL
/* hv_msr_hypercall_access */,
949 NULL
/* hv_vpindex_access */, NULL
/* hv_msr_reset_access */,
950 NULL
/* hv_msr_stats_access */, NULL
/* hv_reftsc_access */,
951 NULL
/* hv_msr_idle_access */, NULL
/* hv_msr_frequency_access */,
952 NULL
/* hv_msr_debug_access */, NULL
/* hv_msr_reenlightenment_access */,
954 NULL
, NULL
, NULL
, NULL
,
955 NULL
, NULL
, NULL
, NULL
,
956 NULL
, NULL
, NULL
, NULL
,
957 NULL
, NULL
, NULL
, NULL
,
959 .cpuid
= { .eax
= 0x40000003, .reg
= R_EAX
, },
961 [FEAT_HYPERV_EBX
] = {
962 .type
= CPUID_FEATURE_WORD
,
964 NULL
/* hv_create_partitions */, NULL
/* hv_access_partition_id */,
965 NULL
/* hv_access_memory_pool */, NULL
/* hv_adjust_message_buffers */,
966 NULL
/* hv_post_messages */, NULL
/* hv_signal_events */,
967 NULL
/* hv_create_port */, NULL
/* hv_connect_port */,
968 NULL
/* hv_access_stats */, NULL
, NULL
, NULL
/* hv_debugging */,
969 NULL
/* hv_cpu_power_management */, NULL
/* hv_configure_profiler */,
971 NULL
, NULL
, NULL
, NULL
,
972 NULL
, NULL
, NULL
, NULL
,
973 NULL
, NULL
, NULL
, NULL
,
974 NULL
, NULL
, NULL
, NULL
,
976 .cpuid
= { .eax
= 0x40000003, .reg
= R_EBX
, },
978 [FEAT_HYPERV_EDX
] = {
979 .type
= CPUID_FEATURE_WORD
,
981 NULL
/* hv_mwait */, NULL
/* hv_guest_debugging */,
982 NULL
/* hv_perf_monitor */, NULL
/* hv_cpu_dynamic_part */,
983 NULL
/* hv_hypercall_params_xmm */, NULL
/* hv_guest_idle_state */,
985 NULL
, NULL
, NULL
/* hv_guest_crash_msr */, NULL
,
986 NULL
, NULL
, NULL
, NULL
,
987 NULL
, NULL
, NULL
, NULL
,
988 NULL
, NULL
, NULL
, NULL
,
989 NULL
, NULL
, NULL
, NULL
,
990 NULL
, NULL
, NULL
, NULL
,
992 .cpuid
= { .eax
= 0x40000003, .reg
= R_EDX
, },
994 [FEAT_HV_RECOMM_EAX
] = {
995 .type
= CPUID_FEATURE_WORD
,
997 NULL
/* hv_recommend_pv_as_switch */,
998 NULL
/* hv_recommend_pv_tlbflush_local */,
999 NULL
/* hv_recommend_pv_tlbflush_remote */,
1000 NULL
/* hv_recommend_msr_apic_access */,
1001 NULL
/* hv_recommend_msr_reset */,
1002 NULL
/* hv_recommend_relaxed_timing */,
1003 NULL
/* hv_recommend_dma_remapping */,
1004 NULL
/* hv_recommend_int_remapping */,
1005 NULL
/* hv_recommend_x2apic_msrs */,
1006 NULL
/* hv_recommend_autoeoi_deprecation */,
1007 NULL
/* hv_recommend_pv_ipi */,
1008 NULL
/* hv_recommend_ex_hypercalls */,
1009 NULL
/* hv_hypervisor_is_nested */,
1010 NULL
/* hv_recommend_int_mbec */,
1011 NULL
/* hv_recommend_evmcs */,
1013 NULL
, NULL
, NULL
, NULL
,
1014 NULL
, NULL
, NULL
, NULL
,
1015 NULL
, NULL
, NULL
, NULL
,
1016 NULL
, NULL
, NULL
, NULL
,
1018 .cpuid
= { .eax
= 0x40000004, .reg
= R_EAX
, },
1020 [FEAT_HV_NESTED_EAX
] = {
1021 .type
= CPUID_FEATURE_WORD
,
1022 .cpuid
= { .eax
= 0x4000000A, .reg
= R_EAX
, },
1025 .type
= CPUID_FEATURE_WORD
,
1027 "npt", "lbrv", "svm-lock", "nrip-save",
1028 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1029 NULL
, NULL
, "pause-filter", NULL
,
1030 "pfthreshold", NULL
, NULL
, NULL
,
1031 NULL
, NULL
, NULL
, NULL
,
1032 NULL
, NULL
, NULL
, NULL
,
1033 NULL
, NULL
, NULL
, NULL
,
1034 NULL
, NULL
, NULL
, NULL
,
1036 .cpuid
= { .eax
= 0x8000000A, .reg
= R_EDX
, },
1037 .tcg_features
= TCG_SVM_FEATURES
,
1040 .type
= CPUID_FEATURE_WORD
,
1042 "fsgsbase", "tsc-adjust", NULL
, "bmi1",
1043 "hle", "avx2", NULL
, "smep",
1044 "bmi2", "erms", "invpcid", "rtm",
1045 NULL
, NULL
, "mpx", NULL
,
1046 "avx512f", "avx512dq", "rdseed", "adx",
1047 "smap", "avx512ifma", "pcommit", "clflushopt",
1048 "clwb", "intel-pt", "avx512pf", "avx512er",
1049 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1053 .needs_ecx
= true, .ecx
= 0,
1056 .tcg_features
= TCG_7_0_EBX_FEATURES
,
1059 .type
= CPUID_FEATURE_WORD
,
1061 NULL
, "avx512vbmi", "umip", "pku",
1062 NULL
/* ospke */, NULL
, "avx512vbmi2", NULL
,
1063 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1064 "avx512bitalg", NULL
, "avx512-vpopcntdq", NULL
,
1065 "la57", NULL
, NULL
, NULL
,
1066 NULL
, NULL
, "rdpid", NULL
,
1067 NULL
, "cldemote", NULL
, "movdiri",
1068 "movdir64b", NULL
, NULL
, NULL
,
1072 .needs_ecx
= true, .ecx
= 0,
1075 .tcg_features
= TCG_7_0_ECX_FEATURES
,
1078 .type
= CPUID_FEATURE_WORD
,
1080 NULL
, NULL
, "avx512-4vnniw", "avx512-4fmaps",
1081 NULL
, NULL
, NULL
, NULL
,
1082 NULL
, NULL
, "md-clear", NULL
,
1083 NULL
, NULL
, NULL
, NULL
,
1084 NULL
, NULL
, NULL
, NULL
,
1085 NULL
, NULL
, NULL
, NULL
,
1086 NULL
, NULL
, "spec-ctrl", "stibp",
1087 NULL
, "arch-capabilities", NULL
, "ssbd",
1091 .needs_ecx
= true, .ecx
= 0,
1094 .tcg_features
= TCG_7_0_EDX_FEATURES
,
1096 [FEAT_8000_0007_EDX
] = {
1097 .type
= CPUID_FEATURE_WORD
,
1099 NULL
, NULL
, NULL
, NULL
,
1100 NULL
, NULL
, NULL
, NULL
,
1101 "invtsc", NULL
, NULL
, NULL
,
1102 NULL
, NULL
, NULL
, NULL
,
1103 NULL
, NULL
, NULL
, NULL
,
1104 NULL
, NULL
, NULL
, NULL
,
1105 NULL
, NULL
, NULL
, NULL
,
1106 NULL
, NULL
, NULL
, NULL
,
1108 .cpuid
= { .eax
= 0x80000007, .reg
= R_EDX
, },
1109 .tcg_features
= TCG_APM_FEATURES
,
1110 .unmigratable_flags
= CPUID_APM_INVTSC
,
1112 [FEAT_8000_0008_EBX
] = {
1113 .type
= CPUID_FEATURE_WORD
,
1115 NULL
, NULL
, NULL
, NULL
,
1116 NULL
, NULL
, NULL
, NULL
,
1117 NULL
, "wbnoinvd", NULL
, NULL
,
1118 "ibpb", NULL
, NULL
, NULL
,
1119 NULL
, NULL
, NULL
, NULL
,
1120 NULL
, NULL
, NULL
, NULL
,
1121 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL
,
1122 NULL
, NULL
, NULL
, NULL
,
1124 .cpuid
= { .eax
= 0x80000008, .reg
= R_EBX
, },
1126 .unmigratable_flags
= 0,
1129 .type
= CPUID_FEATURE_WORD
,
1131 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1132 NULL
, NULL
, NULL
, NULL
,
1133 NULL
, NULL
, NULL
, NULL
,
1134 NULL
, NULL
, NULL
, NULL
,
1135 NULL
, NULL
, NULL
, NULL
,
1136 NULL
, NULL
, NULL
, NULL
,
1137 NULL
, NULL
, NULL
, NULL
,
1138 NULL
, NULL
, NULL
, NULL
,
1142 .needs_ecx
= true, .ecx
= 1,
1145 .tcg_features
= TCG_XSAVE_FEATURES
,
1148 .type
= CPUID_FEATURE_WORD
,
1150 NULL
, NULL
, "arat", NULL
,
1151 NULL
, NULL
, NULL
, NULL
,
1152 NULL
, NULL
, NULL
, NULL
,
1153 NULL
, NULL
, NULL
, NULL
,
1154 NULL
, NULL
, NULL
, NULL
,
1155 NULL
, NULL
, NULL
, NULL
,
1156 NULL
, NULL
, NULL
, NULL
,
1157 NULL
, NULL
, NULL
, NULL
,
1159 .cpuid
= { .eax
= 6, .reg
= R_EAX
, },
1160 .tcg_features
= TCG_6_EAX_FEATURES
,
1162 [FEAT_XSAVE_COMP_LO
] = {
1163 .type
= CPUID_FEATURE_WORD
,
1166 .needs_ecx
= true, .ecx
= 0,
1169 .tcg_features
= ~0U,
1170 .migratable_flags
= XSTATE_FP_MASK
| XSTATE_SSE_MASK
|
1171 XSTATE_YMM_MASK
| XSTATE_BNDREGS_MASK
| XSTATE_BNDCSR_MASK
|
1172 XSTATE_OPMASK_MASK
| XSTATE_ZMM_Hi256_MASK
| XSTATE_Hi16_ZMM_MASK
|
1175 [FEAT_XSAVE_COMP_HI
] = {
1176 .type
= CPUID_FEATURE_WORD
,
1179 .needs_ecx
= true, .ecx
= 0,
1182 .tcg_features
= ~0U,
1184 /*Below are MSR exposed features*/
1185 [FEAT_ARCH_CAPABILITIES
] = {
1186 .type
= MSR_FEATURE_WORD
,
1188 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1189 "ssb-no", "mds-no", NULL
, NULL
,
1190 NULL
, NULL
, NULL
, NULL
,
1191 NULL
, NULL
, NULL
, NULL
,
1192 NULL
, NULL
, NULL
, NULL
,
1193 NULL
, NULL
, NULL
, NULL
,
1194 NULL
, NULL
, NULL
, NULL
,
1195 NULL
, NULL
, NULL
, NULL
,
1198 .index
= MSR_IA32_ARCH_CAPABILITIES
,
1201 CPUID_7_0_EDX_ARCH_CAPABILITIES
1207 typedef struct X86RegisterInfo32
{
1208 /* Name of register */
1210 /* QAPI enum value register */
1211 X86CPURegister32 qapi_enum
;
1212 } X86RegisterInfo32
;
1214 #define REGISTER(reg) \
1215 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1216 static const X86RegisterInfo32 x86_reg_info_32
[CPU_NB_REGS32
] = {
1228 typedef struct ExtSaveArea
{
1229 uint32_t feature
, bits
;
1230 uint32_t offset
, size
;
1233 static const ExtSaveArea x86_ext_save_areas
[] = {
1235 /* x87 FP state component is always enabled if XSAVE is supported */
1236 .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_XSAVE
,
1237 /* x87 state is in the legacy region of the XSAVE area */
1239 .size
= sizeof(X86LegacyXSaveArea
) + sizeof(X86XSaveHeader
),
1241 [XSTATE_SSE_BIT
] = {
1242 /* SSE state component is always enabled if XSAVE is supported */
1243 .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_XSAVE
,
1244 /* SSE state is in the legacy region of the XSAVE area */
1246 .size
= sizeof(X86LegacyXSaveArea
) + sizeof(X86XSaveHeader
),
1249 { .feature
= FEAT_1_ECX
, .bits
= CPUID_EXT_AVX
,
1250 .offset
= offsetof(X86XSaveArea
, avx_state
),
1251 .size
= sizeof(XSaveAVX
) },
1252 [XSTATE_BNDREGS_BIT
] =
1253 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_MPX
,
1254 .offset
= offsetof(X86XSaveArea
, bndreg_state
),
1255 .size
= sizeof(XSaveBNDREG
) },
1256 [XSTATE_BNDCSR_BIT
] =
1257 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_MPX
,
1258 .offset
= offsetof(X86XSaveArea
, bndcsr_state
),
1259 .size
= sizeof(XSaveBNDCSR
) },
1260 [XSTATE_OPMASK_BIT
] =
1261 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
1262 .offset
= offsetof(X86XSaveArea
, opmask_state
),
1263 .size
= sizeof(XSaveOpmask
) },
1264 [XSTATE_ZMM_Hi256_BIT
] =
1265 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
1266 .offset
= offsetof(X86XSaveArea
, zmm_hi256_state
),
1267 .size
= sizeof(XSaveZMM_Hi256
) },
1268 [XSTATE_Hi16_ZMM_BIT
] =
1269 { .feature
= FEAT_7_0_EBX
, .bits
= CPUID_7_0_EBX_AVX512F
,
1270 .offset
= offsetof(X86XSaveArea
, hi16_zmm_state
),
1271 .size
= sizeof(XSaveHi16_ZMM
) },
1273 { .feature
= FEAT_7_0_ECX
, .bits
= CPUID_7_0_ECX_PKU
,
1274 .offset
= offsetof(X86XSaveArea
, pkru_state
),
1275 .size
= sizeof(XSavePKRU
) },
1278 static uint32_t xsave_area_size(uint64_t mask
)
1283 for (i
= 0; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
1284 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
1285 if ((mask
>> i
) & 1) {
1286 ret
= MAX(ret
, esa
->offset
+ esa
->size
);
1292 static inline bool accel_uses_host_cpuid(void)
1294 return kvm_enabled() || hvf_enabled();
1297 static inline uint64_t x86_cpu_xsave_components(X86CPU
*cpu
)
1299 return ((uint64_t)cpu
->env
.features
[FEAT_XSAVE_COMP_HI
]) << 32 |
1300 cpu
->env
.features
[FEAT_XSAVE_COMP_LO
];
1303 const char *get_register_name_32(unsigned int reg
)
1305 if (reg
>= CPU_NB_REGS32
) {
1308 return x86_reg_info_32
[reg
].name
;
1312 * Returns the set of feature flags that are supported and migratable by
1313 * QEMU, for a given FeatureWord.
1315 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w
)
1317 FeatureWordInfo
*wi
= &feature_word_info
[w
];
1321 for (i
= 0; i
< 32; i
++) {
1322 uint32_t f
= 1U << i
;
1324 /* If the feature name is known, it is implicitly considered migratable,
1325 * unless it is explicitly set in unmigratable_flags */
1326 if ((wi
->migratable_flags
& f
) ||
1327 (wi
->feat_names
[i
] && !(wi
->unmigratable_flags
& f
))) {
1334 void host_cpuid(uint32_t function
, uint32_t count
,
1335 uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
1340 asm volatile("cpuid"
1341 : "=a"(vec
[0]), "=b"(vec
[1]),
1342 "=c"(vec
[2]), "=d"(vec
[3])
1343 : "0"(function
), "c"(count
) : "cc");
1344 #elif defined(__i386__)
1345 asm volatile("pusha \n\t"
1347 "mov %%eax, 0(%2) \n\t"
1348 "mov %%ebx, 4(%2) \n\t"
1349 "mov %%ecx, 8(%2) \n\t"
1350 "mov %%edx, 12(%2) \n\t"
1352 : : "a"(function
), "c"(count
), "S"(vec
)
1368 void host_vendor_fms(char *vendor
, int *family
, int *model
, int *stepping
)
1370 uint32_t eax
, ebx
, ecx
, edx
;
1372 host_cpuid(0x0, 0, &eax
, &ebx
, &ecx
, &edx
);
1373 x86_cpu_vendor_words2str(vendor
, ebx
, edx
, ecx
);
1375 host_cpuid(0x1, 0, &eax
, &ebx
, &ecx
, &edx
);
1377 *family
= ((eax
>> 8) & 0x0F) + ((eax
>> 20) & 0xFF);
1380 *model
= ((eax
>> 4) & 0x0F) | ((eax
& 0xF0000) >> 12);
1383 *stepping
= eax
& 0x0F;
1387 /* CPU class name definitions: */
1389 /* Return type name for a given CPU model name
1390 * Caller is responsible for freeing the returned string.
1392 static char *x86_cpu_type_name(const char *model_name
)
1394 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name
);
1397 static ObjectClass
*x86_cpu_class_by_name(const char *cpu_model
)
1400 char *typename
= x86_cpu_type_name(cpu_model
);
1401 oc
= object_class_by_name(typename
);
1406 static char *x86_cpu_class_get_model_name(X86CPUClass
*cc
)
1408 const char *class_name
= object_class_get_name(OBJECT_CLASS(cc
));
1409 assert(g_str_has_suffix(class_name
, X86_CPU_TYPE_SUFFIX
));
1410 return g_strndup(class_name
,
1411 strlen(class_name
) - strlen(X86_CPU_TYPE_SUFFIX
));
1414 struct X86CPUDefinition
{
1418 /* vendor is zero-terminated, 12 character ASCII string */
1419 char vendor
[CPUID_VENDOR_SZ
+ 1];
1423 FeatureWordArray features
;
1424 const char *model_id
;
1425 CPUCaches
*cache_info
;
1428 static CPUCaches epyc_cache_info
= {
1429 .l1d_cache
= &(CPUCacheInfo
) {
1439 .no_invd_sharing
= true,
1441 .l1i_cache
= &(CPUCacheInfo
) {
1442 .type
= INSTRUCTION_CACHE
,
1451 .no_invd_sharing
= true,
1453 .l2_cache
= &(CPUCacheInfo
) {
1454 .type
= UNIFIED_CACHE
,
1463 .l3_cache
= &(CPUCacheInfo
) {
1464 .type
= UNIFIED_CACHE
,
1468 .associativity
= 16,
1474 .complex_indexing
= true,
1478 static X86CPUDefinition builtin_x86_defs
[] = {
1482 .vendor
= CPUID_VENDOR_AMD
,
1486 .features
[FEAT_1_EDX
] =
1488 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
1490 .features
[FEAT_1_ECX
] =
1491 CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
1492 .features
[FEAT_8000_0001_EDX
] =
1493 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1494 .features
[FEAT_8000_0001_ECX
] =
1495 CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
,
1496 .xlevel
= 0x8000000A,
1497 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
1502 .vendor
= CPUID_VENDOR_AMD
,
1506 /* Missing: CPUID_HT */
1507 .features
[FEAT_1_EDX
] =
1509 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
1510 CPUID_PSE36
| CPUID_VME
,
1511 .features
[FEAT_1_ECX
] =
1512 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_CX16
|
1514 .features
[FEAT_8000_0001_EDX
] =
1515 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
|
1516 CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
| CPUID_EXT2_MMXEXT
|
1517 CPUID_EXT2_FFXSR
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
,
1518 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1520 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1521 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1522 .features
[FEAT_8000_0001_ECX
] =
1523 CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
1524 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
1525 /* Missing: CPUID_SVM_LBRV */
1526 .features
[FEAT_SVM
] =
1528 .xlevel
= 0x8000001A,
1529 .model_id
= "AMD Phenom(tm) 9550 Quad-Core Processor"
1534 .vendor
= CPUID_VENDOR_INTEL
,
1538 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1539 .features
[FEAT_1_EDX
] =
1541 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
1542 CPUID_PSE36
| CPUID_VME
| CPUID_ACPI
| CPUID_SS
,
1543 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1544 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1545 .features
[FEAT_1_ECX
] =
1546 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
1548 .features
[FEAT_8000_0001_EDX
] =
1549 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1550 .features
[FEAT_8000_0001_ECX
] =
1552 .xlevel
= 0x80000008,
1553 .model_id
= "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1558 .vendor
= CPUID_VENDOR_INTEL
,
1562 /* Missing: CPUID_HT */
1563 .features
[FEAT_1_EDX
] =
1564 PPRO_FEATURES
| CPUID_VME
|
1565 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
1567 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1568 .features
[FEAT_1_ECX
] =
1569 CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
1570 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1571 .features
[FEAT_8000_0001_EDX
] =
1572 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1573 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1574 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1575 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1576 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1577 .features
[FEAT_8000_0001_ECX
] =
1579 .xlevel
= 0x80000008,
1580 .model_id
= "Common KVM processor"
1585 .vendor
= CPUID_VENDOR_INTEL
,
1589 .features
[FEAT_1_EDX
] =
1591 .features
[FEAT_1_ECX
] =
1593 .xlevel
= 0x80000004,
1594 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
1599 .vendor
= CPUID_VENDOR_INTEL
,
1603 .features
[FEAT_1_EDX
] =
1604 PPRO_FEATURES
| CPUID_VME
|
1605 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_PSE36
,
1606 .features
[FEAT_1_ECX
] =
1608 .features
[FEAT_8000_0001_ECX
] =
1610 .xlevel
= 0x80000008,
1611 .model_id
= "Common 32-bit KVM processor"
1616 .vendor
= CPUID_VENDOR_INTEL
,
1620 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1621 .features
[FEAT_1_EDX
] =
1622 PPRO_FEATURES
| CPUID_VME
|
1623 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_ACPI
|
1625 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1626 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1627 .features
[FEAT_1_ECX
] =
1628 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
,
1629 .features
[FEAT_8000_0001_EDX
] =
1631 .xlevel
= 0x80000008,
1632 .model_id
= "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1637 .vendor
= CPUID_VENDOR_INTEL
,
1641 .features
[FEAT_1_EDX
] =
1649 .vendor
= CPUID_VENDOR_INTEL
,
1653 .features
[FEAT_1_EDX
] =
1661 .vendor
= CPUID_VENDOR_INTEL
,
1665 .features
[FEAT_1_EDX
] =
1673 .vendor
= CPUID_VENDOR_INTEL
,
1677 .features
[FEAT_1_EDX
] =
1685 .vendor
= CPUID_VENDOR_AMD
,
1689 .features
[FEAT_1_EDX
] =
1690 PPRO_FEATURES
| CPUID_PSE36
| CPUID_VME
| CPUID_MTRR
|
1692 .features
[FEAT_8000_0001_EDX
] =
1693 CPUID_EXT2_MMXEXT
| CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
,
1694 .xlevel
= 0x80000008,
1695 .model_id
= "QEMU Virtual CPU version " QEMU_HW_VERSION
,
1700 .vendor
= CPUID_VENDOR_INTEL
,
1704 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1705 .features
[FEAT_1_EDX
] =
1707 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_VME
|
1708 CPUID_ACPI
| CPUID_SS
,
1709 /* Some CPUs got no CPUID_SEP */
1710 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1712 .features
[FEAT_1_ECX
] =
1713 CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
1715 .features
[FEAT_8000_0001_EDX
] =
1717 .features
[FEAT_8000_0001_ECX
] =
1719 .xlevel
= 0x80000008,
1720 .model_id
= "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1725 .vendor
= CPUID_VENDOR_INTEL
,
1729 .features
[FEAT_1_EDX
] =
1730 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1731 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1732 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1733 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1734 CPUID_DE
| CPUID_FP87
,
1735 .features
[FEAT_1_ECX
] =
1736 CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
1737 .features
[FEAT_8000_0001_EDX
] =
1738 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1739 .features
[FEAT_8000_0001_ECX
] =
1741 .xlevel
= 0x80000008,
1742 .model_id
= "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1747 .vendor
= CPUID_VENDOR_INTEL
,
1751 .features
[FEAT_1_EDX
] =
1752 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1753 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1754 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1755 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1756 CPUID_DE
| CPUID_FP87
,
1757 .features
[FEAT_1_ECX
] =
1758 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1760 .features
[FEAT_8000_0001_EDX
] =
1761 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
1762 .features
[FEAT_8000_0001_ECX
] =
1764 .xlevel
= 0x80000008,
1765 .model_id
= "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1770 .vendor
= CPUID_VENDOR_INTEL
,
1774 .features
[FEAT_1_EDX
] =
1775 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1776 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1777 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1778 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1779 CPUID_DE
| CPUID_FP87
,
1780 .features
[FEAT_1_ECX
] =
1781 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1782 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
1783 .features
[FEAT_8000_0001_EDX
] =
1784 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1785 .features
[FEAT_8000_0001_ECX
] =
1787 .xlevel
= 0x80000008,
1788 .model_id
= "Intel Core i7 9xx (Nehalem Class Core i7)",
1791 .name
= "Nehalem-IBRS",
1793 .vendor
= CPUID_VENDOR_INTEL
,
1797 .features
[FEAT_1_EDX
] =
1798 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1799 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1800 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1801 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1802 CPUID_DE
| CPUID_FP87
,
1803 .features
[FEAT_1_ECX
] =
1804 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1805 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_SSE3
,
1806 .features
[FEAT_7_0_EDX
] =
1807 CPUID_7_0_EDX_SPEC_CTRL
,
1808 .features
[FEAT_8000_0001_EDX
] =
1809 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1810 .features
[FEAT_8000_0001_ECX
] =
1812 .xlevel
= 0x80000008,
1813 .model_id
= "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1818 .vendor
= CPUID_VENDOR_INTEL
,
1822 .features
[FEAT_1_EDX
] =
1823 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1824 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1825 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1826 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1827 CPUID_DE
| CPUID_FP87
,
1828 .features
[FEAT_1_ECX
] =
1829 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
1830 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1831 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
1832 .features
[FEAT_8000_0001_EDX
] =
1833 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1834 .features
[FEAT_8000_0001_ECX
] =
1836 .features
[FEAT_6_EAX
] =
1838 .xlevel
= 0x80000008,
1839 .model_id
= "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1842 .name
= "Westmere-IBRS",
1844 .vendor
= CPUID_VENDOR_INTEL
,
1848 .features
[FEAT_1_EDX
] =
1849 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1850 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1851 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1852 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1853 CPUID_DE
| CPUID_FP87
,
1854 .features
[FEAT_1_ECX
] =
1855 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
1856 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
1857 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
1858 .features
[FEAT_8000_0001_EDX
] =
1859 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
1860 .features
[FEAT_8000_0001_ECX
] =
1862 .features
[FEAT_7_0_EDX
] =
1863 CPUID_7_0_EDX_SPEC_CTRL
,
1864 .features
[FEAT_6_EAX
] =
1866 .xlevel
= 0x80000008,
1867 .model_id
= "Westmere E56xx/L56xx/X56xx (IBRS update)",
1870 .name
= "SandyBridge",
1872 .vendor
= CPUID_VENDOR_INTEL
,
1876 .features
[FEAT_1_EDX
] =
1877 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1878 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1879 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1880 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1881 CPUID_DE
| CPUID_FP87
,
1882 .features
[FEAT_1_ECX
] =
1883 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1884 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1885 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1886 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1888 .features
[FEAT_8000_0001_EDX
] =
1889 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1891 .features
[FEAT_8000_0001_ECX
] =
1893 .features
[FEAT_XSAVE
] =
1894 CPUID_XSAVE_XSAVEOPT
,
1895 .features
[FEAT_6_EAX
] =
1897 .xlevel
= 0x80000008,
1898 .model_id
= "Intel Xeon E312xx (Sandy Bridge)",
1901 .name
= "SandyBridge-IBRS",
1903 .vendor
= CPUID_VENDOR_INTEL
,
1907 .features
[FEAT_1_EDX
] =
1908 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1909 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1910 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1911 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1912 CPUID_DE
| CPUID_FP87
,
1913 .features
[FEAT_1_ECX
] =
1914 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1915 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1916 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1917 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1919 .features
[FEAT_8000_0001_EDX
] =
1920 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1922 .features
[FEAT_8000_0001_ECX
] =
1924 .features
[FEAT_7_0_EDX
] =
1925 CPUID_7_0_EDX_SPEC_CTRL
,
1926 .features
[FEAT_XSAVE
] =
1927 CPUID_XSAVE_XSAVEOPT
,
1928 .features
[FEAT_6_EAX
] =
1930 .xlevel
= 0x80000008,
1931 .model_id
= "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1934 .name
= "IvyBridge",
1936 .vendor
= CPUID_VENDOR_INTEL
,
1940 .features
[FEAT_1_EDX
] =
1941 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1942 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1943 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1944 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1945 CPUID_DE
| CPUID_FP87
,
1946 .features
[FEAT_1_ECX
] =
1947 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1948 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1949 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1950 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1951 CPUID_EXT_SSE3
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1952 .features
[FEAT_7_0_EBX
] =
1953 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_SMEP
|
1955 .features
[FEAT_8000_0001_EDX
] =
1956 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1958 .features
[FEAT_8000_0001_ECX
] =
1960 .features
[FEAT_XSAVE
] =
1961 CPUID_XSAVE_XSAVEOPT
,
1962 .features
[FEAT_6_EAX
] =
1964 .xlevel
= 0x80000008,
1965 .model_id
= "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1968 .name
= "IvyBridge-IBRS",
1970 .vendor
= CPUID_VENDOR_INTEL
,
1974 .features
[FEAT_1_EDX
] =
1975 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
1976 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
1977 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
1978 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
1979 CPUID_DE
| CPUID_FP87
,
1980 .features
[FEAT_1_ECX
] =
1981 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
1982 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_POPCNT
|
1983 CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
1984 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
1985 CPUID_EXT_SSE3
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
1986 .features
[FEAT_7_0_EBX
] =
1987 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_SMEP
|
1989 .features
[FEAT_8000_0001_EDX
] =
1990 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
1992 .features
[FEAT_8000_0001_ECX
] =
1994 .features
[FEAT_7_0_EDX
] =
1995 CPUID_7_0_EDX_SPEC_CTRL
,
1996 .features
[FEAT_XSAVE
] =
1997 CPUID_XSAVE_XSAVEOPT
,
1998 .features
[FEAT_6_EAX
] =
2000 .xlevel
= 0x80000008,
2001 .model_id
= "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
2004 .name
= "Haswell-noTSX",
2006 .vendor
= CPUID_VENDOR_INTEL
,
2010 .features
[FEAT_1_EDX
] =
2011 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2012 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2013 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2014 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2015 CPUID_DE
| CPUID_FP87
,
2016 .features
[FEAT_1_ECX
] =
2017 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2018 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2019 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2020 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2021 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2022 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2023 .features
[FEAT_8000_0001_EDX
] =
2024 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2026 .features
[FEAT_8000_0001_ECX
] =
2027 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
2028 .features
[FEAT_7_0_EBX
] =
2029 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2030 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2031 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
,
2032 .features
[FEAT_XSAVE
] =
2033 CPUID_XSAVE_XSAVEOPT
,
2034 .features
[FEAT_6_EAX
] =
2036 .xlevel
= 0x80000008,
2037 .model_id
= "Intel Core Processor (Haswell, no TSX)",
2040 .name
= "Haswell-noTSX-IBRS",
2042 .vendor
= CPUID_VENDOR_INTEL
,
2046 .features
[FEAT_1_EDX
] =
2047 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2048 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2049 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2050 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2051 CPUID_DE
| CPUID_FP87
,
2052 .features
[FEAT_1_ECX
] =
2053 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2054 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2055 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2056 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2057 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2058 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2059 .features
[FEAT_8000_0001_EDX
] =
2060 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2062 .features
[FEAT_8000_0001_ECX
] =
2063 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
2064 .features
[FEAT_7_0_EDX
] =
2065 CPUID_7_0_EDX_SPEC_CTRL
,
2066 .features
[FEAT_7_0_EBX
] =
2067 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2068 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2069 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
,
2070 .features
[FEAT_XSAVE
] =
2071 CPUID_XSAVE_XSAVEOPT
,
2072 .features
[FEAT_6_EAX
] =
2074 .xlevel
= 0x80000008,
2075 .model_id
= "Intel Core Processor (Haswell, no TSX, IBRS)",
2080 .vendor
= CPUID_VENDOR_INTEL
,
2084 .features
[FEAT_1_EDX
] =
2085 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2086 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2087 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2088 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2089 CPUID_DE
| CPUID_FP87
,
2090 .features
[FEAT_1_ECX
] =
2091 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2092 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2093 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2094 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2095 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2096 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2097 .features
[FEAT_8000_0001_EDX
] =
2098 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2100 .features
[FEAT_8000_0001_ECX
] =
2101 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
2102 .features
[FEAT_7_0_EBX
] =
2103 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2104 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2105 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2107 .features
[FEAT_XSAVE
] =
2108 CPUID_XSAVE_XSAVEOPT
,
2109 .features
[FEAT_6_EAX
] =
2111 .xlevel
= 0x80000008,
2112 .model_id
= "Intel Core Processor (Haswell)",
2115 .name
= "Haswell-IBRS",
2117 .vendor
= CPUID_VENDOR_INTEL
,
2121 .features
[FEAT_1_EDX
] =
2122 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2123 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2124 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2125 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2126 CPUID_DE
| CPUID_FP87
,
2127 .features
[FEAT_1_ECX
] =
2128 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2129 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2130 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2131 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2132 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2133 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2134 .features
[FEAT_8000_0001_EDX
] =
2135 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2137 .features
[FEAT_8000_0001_ECX
] =
2138 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
,
2139 .features
[FEAT_7_0_EDX
] =
2140 CPUID_7_0_EDX_SPEC_CTRL
,
2141 .features
[FEAT_7_0_EBX
] =
2142 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2143 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2144 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2146 .features
[FEAT_XSAVE
] =
2147 CPUID_XSAVE_XSAVEOPT
,
2148 .features
[FEAT_6_EAX
] =
2150 .xlevel
= 0x80000008,
2151 .model_id
= "Intel Core Processor (Haswell, IBRS)",
2154 .name
= "Broadwell-noTSX",
2156 .vendor
= CPUID_VENDOR_INTEL
,
2160 .features
[FEAT_1_EDX
] =
2161 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2162 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2163 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2164 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2165 CPUID_DE
| CPUID_FP87
,
2166 .features
[FEAT_1_ECX
] =
2167 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2168 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2169 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2170 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2171 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2172 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2173 .features
[FEAT_8000_0001_EDX
] =
2174 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2176 .features
[FEAT_8000_0001_ECX
] =
2177 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2178 .features
[FEAT_7_0_EBX
] =
2179 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2180 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2181 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2182 CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2184 .features
[FEAT_XSAVE
] =
2185 CPUID_XSAVE_XSAVEOPT
,
2186 .features
[FEAT_6_EAX
] =
2188 .xlevel
= 0x80000008,
2189 .model_id
= "Intel Core Processor (Broadwell, no TSX)",
2192 .name
= "Broadwell-noTSX-IBRS",
2194 .vendor
= CPUID_VENDOR_INTEL
,
2198 .features
[FEAT_1_EDX
] =
2199 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2200 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2201 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2202 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2203 CPUID_DE
| CPUID_FP87
,
2204 .features
[FEAT_1_ECX
] =
2205 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2206 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2207 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2208 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2209 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2210 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2211 .features
[FEAT_8000_0001_EDX
] =
2212 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2214 .features
[FEAT_8000_0001_ECX
] =
2215 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2216 .features
[FEAT_7_0_EDX
] =
2217 CPUID_7_0_EDX_SPEC_CTRL
,
2218 .features
[FEAT_7_0_EBX
] =
2219 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2220 CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2221 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2222 CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2224 .features
[FEAT_XSAVE
] =
2225 CPUID_XSAVE_XSAVEOPT
,
2226 .features
[FEAT_6_EAX
] =
2228 .xlevel
= 0x80000008,
2229 .model_id
= "Intel Core Processor (Broadwell, no TSX, IBRS)",
2232 .name
= "Broadwell",
2234 .vendor
= CPUID_VENDOR_INTEL
,
2238 .features
[FEAT_1_EDX
] =
2239 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2240 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2241 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2242 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2243 CPUID_DE
| CPUID_FP87
,
2244 .features
[FEAT_1_ECX
] =
2245 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2246 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2247 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2248 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2249 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2250 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2251 .features
[FEAT_8000_0001_EDX
] =
2252 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2254 .features
[FEAT_8000_0001_ECX
] =
2255 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2256 .features
[FEAT_7_0_EBX
] =
2257 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2258 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2259 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2260 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2262 .features
[FEAT_XSAVE
] =
2263 CPUID_XSAVE_XSAVEOPT
,
2264 .features
[FEAT_6_EAX
] =
2266 .xlevel
= 0x80000008,
2267 .model_id
= "Intel Core Processor (Broadwell)",
2270 .name
= "Broadwell-IBRS",
2272 .vendor
= CPUID_VENDOR_INTEL
,
2276 .features
[FEAT_1_EDX
] =
2277 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2278 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2279 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2280 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2281 CPUID_DE
| CPUID_FP87
,
2282 .features
[FEAT_1_ECX
] =
2283 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2284 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2285 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2286 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2287 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2288 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2289 .features
[FEAT_8000_0001_EDX
] =
2290 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2292 .features
[FEAT_8000_0001_ECX
] =
2293 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2294 .features
[FEAT_7_0_EDX
] =
2295 CPUID_7_0_EDX_SPEC_CTRL
,
2296 .features
[FEAT_7_0_EBX
] =
2297 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2298 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2299 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2300 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2302 .features
[FEAT_XSAVE
] =
2303 CPUID_XSAVE_XSAVEOPT
,
2304 .features
[FEAT_6_EAX
] =
2306 .xlevel
= 0x80000008,
2307 .model_id
= "Intel Core Processor (Broadwell, IBRS)",
2310 .name
= "Skylake-Client",
2312 .vendor
= CPUID_VENDOR_INTEL
,
2316 .features
[FEAT_1_EDX
] =
2317 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2318 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2319 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2320 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2321 CPUID_DE
| CPUID_FP87
,
2322 .features
[FEAT_1_ECX
] =
2323 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2324 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2325 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2326 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2327 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2328 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2329 .features
[FEAT_8000_0001_EDX
] =
2330 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2332 .features
[FEAT_8000_0001_ECX
] =
2333 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2334 .features
[FEAT_7_0_EBX
] =
2335 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2336 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2337 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2338 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2340 /* Missing: XSAVES (not supported by some Linux versions,
2341 * including v4.1 to v4.12).
2342 * KVM doesn't yet expose any XSAVES state save component,
2343 * and the only one defined in Skylake (processor tracing)
2344 * probably will block migration anyway.
2346 .features
[FEAT_XSAVE
] =
2347 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2348 CPUID_XSAVE_XGETBV1
,
2349 .features
[FEAT_6_EAX
] =
2351 .xlevel
= 0x80000008,
2352 .model_id
= "Intel Core Processor (Skylake)",
2355 .name
= "Skylake-Client-IBRS",
2357 .vendor
= CPUID_VENDOR_INTEL
,
2361 .features
[FEAT_1_EDX
] =
2362 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2363 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2364 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2365 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2366 CPUID_DE
| CPUID_FP87
,
2367 .features
[FEAT_1_ECX
] =
2368 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2369 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2370 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2371 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2372 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2373 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2374 .features
[FEAT_8000_0001_EDX
] =
2375 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2377 .features
[FEAT_8000_0001_ECX
] =
2378 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2379 .features
[FEAT_7_0_EDX
] =
2380 CPUID_7_0_EDX_SPEC_CTRL
,
2381 .features
[FEAT_7_0_EBX
] =
2382 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2383 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2384 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2385 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2387 /* Missing: XSAVES (not supported by some Linux versions,
2388 * including v4.1 to v4.12).
2389 * KVM doesn't yet expose any XSAVES state save component,
2390 * and the only one defined in Skylake (processor tracing)
2391 * probably will block migration anyway.
2393 .features
[FEAT_XSAVE
] =
2394 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2395 CPUID_XSAVE_XGETBV1
,
2396 .features
[FEAT_6_EAX
] =
2398 .xlevel
= 0x80000008,
2399 .model_id
= "Intel Core Processor (Skylake, IBRS)",
2402 .name
= "Skylake-Server",
2404 .vendor
= CPUID_VENDOR_INTEL
,
2408 .features
[FEAT_1_EDX
] =
2409 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2410 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2411 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2412 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2413 CPUID_DE
| CPUID_FP87
,
2414 .features
[FEAT_1_ECX
] =
2415 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2416 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2417 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2418 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2419 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2420 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2421 .features
[FEAT_8000_0001_EDX
] =
2422 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
2423 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2424 .features
[FEAT_8000_0001_ECX
] =
2425 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2426 .features
[FEAT_7_0_EBX
] =
2427 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2428 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2429 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2430 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2431 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_CLWB
|
2432 CPUID_7_0_EBX_AVX512F
| CPUID_7_0_EBX_AVX512DQ
|
2433 CPUID_7_0_EBX_AVX512BW
| CPUID_7_0_EBX_AVX512CD
|
2434 CPUID_7_0_EBX_AVX512VL
| CPUID_7_0_EBX_CLFLUSHOPT
,
2435 .features
[FEAT_7_0_ECX
] =
2437 /* Missing: XSAVES (not supported by some Linux versions,
2438 * including v4.1 to v4.12).
2439 * KVM doesn't yet expose any XSAVES state save component,
2440 * and the only one defined in Skylake (processor tracing)
2441 * probably will block migration anyway.
2443 .features
[FEAT_XSAVE
] =
2444 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2445 CPUID_XSAVE_XGETBV1
,
2446 .features
[FEAT_6_EAX
] =
2448 .xlevel
= 0x80000008,
2449 .model_id
= "Intel Xeon Processor (Skylake)",
2452 .name
= "Skylake-Server-IBRS",
2454 .vendor
= CPUID_VENDOR_INTEL
,
2458 .features
[FEAT_1_EDX
] =
2459 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2460 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2461 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2462 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2463 CPUID_DE
| CPUID_FP87
,
2464 .features
[FEAT_1_ECX
] =
2465 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2466 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2467 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2468 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2469 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2470 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2471 .features
[FEAT_8000_0001_EDX
] =
2472 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
2473 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2474 .features
[FEAT_8000_0001_ECX
] =
2475 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2476 .features
[FEAT_7_0_EDX
] =
2477 CPUID_7_0_EDX_SPEC_CTRL
,
2478 .features
[FEAT_7_0_EBX
] =
2479 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2480 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2481 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2482 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2483 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_CLWB
|
2484 CPUID_7_0_EBX_AVX512F
| CPUID_7_0_EBX_AVX512DQ
|
2485 CPUID_7_0_EBX_AVX512BW
| CPUID_7_0_EBX_AVX512CD
|
2486 CPUID_7_0_EBX_AVX512VL
,
2487 .features
[FEAT_7_0_ECX
] =
2489 /* Missing: XSAVES (not supported by some Linux versions,
2490 * including v4.1 to v4.12).
2491 * KVM doesn't yet expose any XSAVES state save component,
2492 * and the only one defined in Skylake (processor tracing)
2493 * probably will block migration anyway.
2495 .features
[FEAT_XSAVE
] =
2496 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2497 CPUID_XSAVE_XGETBV1
,
2498 .features
[FEAT_6_EAX
] =
2500 .xlevel
= 0x80000008,
2501 .model_id
= "Intel Xeon Processor (Skylake, IBRS)",
2504 .name
= "Cascadelake-Server",
2506 .vendor
= CPUID_VENDOR_INTEL
,
2510 .features
[FEAT_1_EDX
] =
2511 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2512 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2513 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2514 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2515 CPUID_DE
| CPUID_FP87
,
2516 .features
[FEAT_1_ECX
] =
2517 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2518 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2519 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2520 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2521 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2522 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2523 .features
[FEAT_8000_0001_EDX
] =
2524 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
2525 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2526 .features
[FEAT_8000_0001_ECX
] =
2527 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2528 .features
[FEAT_7_0_EBX
] =
2529 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2530 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2531 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2532 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2533 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_CLWB
|
2534 CPUID_7_0_EBX_AVX512F
| CPUID_7_0_EBX_AVX512DQ
|
2535 CPUID_7_0_EBX_AVX512BW
| CPUID_7_0_EBX_AVX512CD
|
2536 CPUID_7_0_EBX_AVX512VL
| CPUID_7_0_EBX_CLFLUSHOPT
,
2537 .features
[FEAT_7_0_ECX
] =
2539 CPUID_7_0_ECX_AVX512VNNI
,
2540 .features
[FEAT_7_0_EDX
] =
2541 CPUID_7_0_EDX_SPEC_CTRL
| CPUID_7_0_EDX_SPEC_CTRL_SSBD
,
2542 /* Missing: XSAVES (not supported by some Linux versions,
2543 * including v4.1 to v4.12).
2544 * KVM doesn't yet expose any XSAVES state save component,
2545 * and the only one defined in Skylake (processor tracing)
2546 * probably will block migration anyway.
2548 .features
[FEAT_XSAVE
] =
2549 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2550 CPUID_XSAVE_XGETBV1
,
2551 .features
[FEAT_6_EAX
] =
2553 .xlevel
= 0x80000008,
2554 .model_id
= "Intel Xeon Processor (Cascadelake)",
2557 .name
= "Icelake-Client",
2559 .vendor
= CPUID_VENDOR_INTEL
,
2563 .features
[FEAT_1_EDX
] =
2564 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2565 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2566 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2567 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2568 CPUID_DE
| CPUID_FP87
,
2569 .features
[FEAT_1_ECX
] =
2570 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2571 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2572 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2573 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2574 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2575 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2576 .features
[FEAT_8000_0001_EDX
] =
2577 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_NX
|
2579 .features
[FEAT_8000_0001_ECX
] =
2580 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2581 .features
[FEAT_8000_0008_EBX
] =
2582 CPUID_8000_0008_EBX_WBNOINVD
,
2583 .features
[FEAT_7_0_EBX
] =
2584 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2585 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2586 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2587 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2589 .features
[FEAT_7_0_ECX
] =
2590 CPUID_7_0_ECX_VBMI
| CPUID_7_0_ECX_UMIP
| CPUID_7_0_ECX_PKU
|
2591 CPUID_7_0_ECX_VBMI2
| CPUID_7_0_ECX_GFNI
|
2592 CPUID_7_0_ECX_VAES
| CPUID_7_0_ECX_VPCLMULQDQ
|
2593 CPUID_7_0_ECX_AVX512VNNI
| CPUID_7_0_ECX_AVX512BITALG
|
2594 CPUID_7_0_ECX_AVX512_VPOPCNTDQ
,
2595 .features
[FEAT_7_0_EDX
] =
2596 CPUID_7_0_EDX_SPEC_CTRL
| CPUID_7_0_EDX_SPEC_CTRL_SSBD
,
2597 /* Missing: XSAVES (not supported by some Linux versions,
2598 * including v4.1 to v4.12).
2599 * KVM doesn't yet expose any XSAVES state save component,
2600 * and the only one defined in Skylake (processor tracing)
2601 * probably will block migration anyway.
2603 .features
[FEAT_XSAVE
] =
2604 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2605 CPUID_XSAVE_XGETBV1
,
2606 .features
[FEAT_6_EAX
] =
2608 .xlevel
= 0x80000008,
2609 .model_id
= "Intel Core Processor (Icelake)",
2612 .name
= "Icelake-Server",
2614 .vendor
= CPUID_VENDOR_INTEL
,
2618 .features
[FEAT_1_EDX
] =
2619 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2620 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2621 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2622 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2623 CPUID_DE
| CPUID_FP87
,
2624 .features
[FEAT_1_ECX
] =
2625 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2626 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2627 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2628 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2629 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2630 CPUID_EXT_PCID
| CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2631 .features
[FEAT_8000_0001_EDX
] =
2632 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
2633 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2634 .features
[FEAT_8000_0001_ECX
] =
2635 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2636 .features
[FEAT_8000_0008_EBX
] =
2637 CPUID_8000_0008_EBX_WBNOINVD
,
2638 .features
[FEAT_7_0_EBX
] =
2639 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
|
2640 CPUID_7_0_EBX_HLE
| CPUID_7_0_EBX_AVX2
| CPUID_7_0_EBX_SMEP
|
2641 CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
| CPUID_7_0_EBX_INVPCID
|
2642 CPUID_7_0_EBX_RTM
| CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
|
2643 CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_CLWB
|
2644 CPUID_7_0_EBX_AVX512F
| CPUID_7_0_EBX_AVX512DQ
|
2645 CPUID_7_0_EBX_AVX512BW
| CPUID_7_0_EBX_AVX512CD
|
2646 CPUID_7_0_EBX_AVX512VL
| CPUID_7_0_EBX_CLFLUSHOPT
,
2647 .features
[FEAT_7_0_ECX
] =
2648 CPUID_7_0_ECX_VBMI
| CPUID_7_0_ECX_UMIP
| CPUID_7_0_ECX_PKU
|
2649 CPUID_7_0_ECX_VBMI2
| CPUID_7_0_ECX_GFNI
|
2650 CPUID_7_0_ECX_VAES
| CPUID_7_0_ECX_VPCLMULQDQ
|
2651 CPUID_7_0_ECX_AVX512VNNI
| CPUID_7_0_ECX_AVX512BITALG
|
2652 CPUID_7_0_ECX_AVX512_VPOPCNTDQ
| CPUID_7_0_ECX_LA57
,
2653 .features
[FEAT_7_0_EDX
] =
2654 CPUID_7_0_EDX_SPEC_CTRL
| CPUID_7_0_EDX_SPEC_CTRL_SSBD
,
2655 /* Missing: XSAVES (not supported by some Linux versions,
2656 * including v4.1 to v4.12).
2657 * KVM doesn't yet expose any XSAVES state save component,
2658 * and the only one defined in Skylake (processor tracing)
2659 * probably will block migration anyway.
2661 .features
[FEAT_XSAVE
] =
2662 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2663 CPUID_XSAVE_XGETBV1
,
2664 .features
[FEAT_6_EAX
] =
2666 .xlevel
= 0x80000008,
2667 .model_id
= "Intel Xeon Processor (Icelake)",
2670 .name
= "KnightsMill",
2672 .vendor
= CPUID_VENDOR_INTEL
,
2676 .features
[FEAT_1_EDX
] =
2677 CPUID_VME
| CPUID_SS
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
|
2678 CPUID_MMX
| CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
|
2679 CPUID_MCA
| CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
|
2680 CPUID_CX8
| CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
|
2681 CPUID_PSE
| CPUID_DE
| CPUID_FP87
,
2682 .features
[FEAT_1_ECX
] =
2683 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2684 CPUID_EXT_POPCNT
| CPUID_EXT_X2APIC
| CPUID_EXT_SSE42
|
2685 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_SSSE3
|
2686 CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
|
2687 CPUID_EXT_TSC_DEADLINE_TIMER
| CPUID_EXT_FMA
| CPUID_EXT_MOVBE
|
2688 CPUID_EXT_F16C
| CPUID_EXT_RDRAND
,
2689 .features
[FEAT_8000_0001_EDX
] =
2690 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
|
2691 CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2692 .features
[FEAT_8000_0001_ECX
] =
2693 CPUID_EXT3_ABM
| CPUID_EXT3_LAHF_LM
| CPUID_EXT3_3DNOWPREFETCH
,
2694 .features
[FEAT_7_0_EBX
] =
2695 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
| CPUID_7_0_EBX_AVX2
|
2696 CPUID_7_0_EBX_SMEP
| CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_ERMS
|
2697 CPUID_7_0_EBX_RDSEED
| CPUID_7_0_EBX_ADX
| CPUID_7_0_EBX_AVX512F
|
2698 CPUID_7_0_EBX_AVX512CD
| CPUID_7_0_EBX_AVX512PF
|
2699 CPUID_7_0_EBX_AVX512ER
,
2700 .features
[FEAT_7_0_ECX
] =
2701 CPUID_7_0_ECX_AVX512_VPOPCNTDQ
,
2702 .features
[FEAT_7_0_EDX
] =
2703 CPUID_7_0_EDX_AVX512_4VNNIW
| CPUID_7_0_EDX_AVX512_4FMAPS
,
2704 .features
[FEAT_XSAVE
] =
2705 CPUID_XSAVE_XSAVEOPT
,
2706 .features
[FEAT_6_EAX
] =
2708 .xlevel
= 0x80000008,
2709 .model_id
= "Intel Xeon Phi Processor (Knights Mill)",
2712 .name
= "Opteron_G1",
2714 .vendor
= CPUID_VENDOR_AMD
,
2718 .features
[FEAT_1_EDX
] =
2719 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2720 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2721 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2722 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2723 CPUID_DE
| CPUID_FP87
,
2724 .features
[FEAT_1_ECX
] =
2726 .features
[FEAT_8000_0001_EDX
] =
2727 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2728 .xlevel
= 0x80000008,
2729 .model_id
= "AMD Opteron 240 (Gen 1 Class Opteron)",
2732 .name
= "Opteron_G2",
2734 .vendor
= CPUID_VENDOR_AMD
,
2738 .features
[FEAT_1_EDX
] =
2739 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2740 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2741 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2742 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2743 CPUID_DE
| CPUID_FP87
,
2744 .features
[FEAT_1_ECX
] =
2745 CPUID_EXT_CX16
| CPUID_EXT_SSE3
,
2746 .features
[FEAT_8000_0001_EDX
] =
2747 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
,
2748 .features
[FEAT_8000_0001_ECX
] =
2749 CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
2750 .xlevel
= 0x80000008,
2751 .model_id
= "AMD Opteron 22xx (Gen 2 Class Opteron)",
2754 .name
= "Opteron_G3",
2756 .vendor
= CPUID_VENDOR_AMD
,
2760 .features
[FEAT_1_EDX
] =
2761 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2762 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2763 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2764 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2765 CPUID_DE
| CPUID_FP87
,
2766 .features
[FEAT_1_ECX
] =
2767 CPUID_EXT_POPCNT
| CPUID_EXT_CX16
| CPUID_EXT_MONITOR
|
2769 .features
[FEAT_8000_0001_EDX
] =
2770 CPUID_EXT2_LM
| CPUID_EXT2_NX
| CPUID_EXT2_SYSCALL
|
2772 .features
[FEAT_8000_0001_ECX
] =
2773 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
|
2774 CPUID_EXT3_ABM
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
,
2775 .xlevel
= 0x80000008,
2776 .model_id
= "AMD Opteron 23xx (Gen 3 Class Opteron)",
2779 .name
= "Opteron_G4",
2781 .vendor
= CPUID_VENDOR_AMD
,
2785 .features
[FEAT_1_EDX
] =
2786 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2787 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2788 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2789 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2790 CPUID_DE
| CPUID_FP87
,
2791 .features
[FEAT_1_ECX
] =
2792 CPUID_EXT_AVX
| CPUID_EXT_XSAVE
| CPUID_EXT_AES
|
2793 CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
2794 CPUID_EXT_CX16
| CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
|
2796 .features
[FEAT_8000_0001_EDX
] =
2797 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_NX
|
2798 CPUID_EXT2_SYSCALL
| CPUID_EXT2_RDTSCP
,
2799 .features
[FEAT_8000_0001_ECX
] =
2800 CPUID_EXT3_FMA4
| CPUID_EXT3_XOP
|
2801 CPUID_EXT3_3DNOWPREFETCH
| CPUID_EXT3_MISALIGNSSE
|
2802 CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
| CPUID_EXT3_SVM
|
2804 .features
[FEAT_SVM
] =
2805 CPUID_SVM_NPT
| CPUID_SVM_NRIPSAVE
,
2807 .xlevel
= 0x8000001A,
2808 .model_id
= "AMD Opteron 62xx class CPU",
2811 .name
= "Opteron_G5",
2813 .vendor
= CPUID_VENDOR_AMD
,
2817 .features
[FEAT_1_EDX
] =
2818 CPUID_VME
| CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
|
2819 CPUID_CLFLUSH
| CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
|
2820 CPUID_PGE
| CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
|
2821 CPUID_MCE
| CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
|
2822 CPUID_DE
| CPUID_FP87
,
2823 .features
[FEAT_1_ECX
] =
2824 CPUID_EXT_F16C
| CPUID_EXT_AVX
| CPUID_EXT_XSAVE
|
2825 CPUID_EXT_AES
| CPUID_EXT_POPCNT
| CPUID_EXT_SSE42
|
2826 CPUID_EXT_SSE41
| CPUID_EXT_CX16
| CPUID_EXT_FMA
|
2827 CPUID_EXT_SSSE3
| CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
2828 .features
[FEAT_8000_0001_EDX
] =
2829 CPUID_EXT2_LM
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_NX
|
2830 CPUID_EXT2_SYSCALL
| CPUID_EXT2_RDTSCP
,
2831 .features
[FEAT_8000_0001_ECX
] =
2832 CPUID_EXT3_TBM
| CPUID_EXT3_FMA4
| CPUID_EXT3_XOP
|
2833 CPUID_EXT3_3DNOWPREFETCH
| CPUID_EXT3_MISALIGNSSE
|
2834 CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
| CPUID_EXT3_SVM
|
2836 .features
[FEAT_SVM
] =
2837 CPUID_SVM_NPT
| CPUID_SVM_NRIPSAVE
,
2839 .xlevel
= 0x8000001A,
2840 .model_id
= "AMD Opteron 63xx class CPU",
2845 .vendor
= CPUID_VENDOR_AMD
,
2849 .features
[FEAT_1_EDX
] =
2850 CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
| CPUID_CLFLUSH
|
2851 CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
| CPUID_PGE
|
2852 CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
| CPUID_MCE
|
2853 CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
| CPUID_DE
|
2854 CPUID_VME
| CPUID_FP87
,
2855 .features
[FEAT_1_ECX
] =
2856 CPUID_EXT_RDRAND
| CPUID_EXT_F16C
| CPUID_EXT_AVX
|
2857 CPUID_EXT_XSAVE
| CPUID_EXT_AES
| CPUID_EXT_POPCNT
|
2858 CPUID_EXT_MOVBE
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
2859 CPUID_EXT_CX16
| CPUID_EXT_FMA
| CPUID_EXT_SSSE3
|
2860 CPUID_EXT_MONITOR
| CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
2861 .features
[FEAT_8000_0001_EDX
] =
2862 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_PDPE1GB
|
2863 CPUID_EXT2_FFXSR
| CPUID_EXT2_MMXEXT
| CPUID_EXT2_NX
|
2865 .features
[FEAT_8000_0001_ECX
] =
2866 CPUID_EXT3_OSVW
| CPUID_EXT3_3DNOWPREFETCH
|
2867 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
|
2868 CPUID_EXT3_CR8LEG
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
|
2870 .features
[FEAT_7_0_EBX
] =
2871 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
| CPUID_7_0_EBX_AVX2
|
2872 CPUID_7_0_EBX_SMEP
| CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_RDSEED
|
2873 CPUID_7_0_EBX_ADX
| CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_CLFLUSHOPT
|
2874 CPUID_7_0_EBX_SHA_NI
,
2875 /* Missing: XSAVES (not supported by some Linux versions,
2876 * including v4.1 to v4.12).
2877 * KVM doesn't yet expose any XSAVES state save component.
2879 .features
[FEAT_XSAVE
] =
2880 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2881 CPUID_XSAVE_XGETBV1
,
2882 .features
[FEAT_6_EAX
] =
2884 .features
[FEAT_SVM
] =
2885 CPUID_SVM_NPT
| CPUID_SVM_NRIPSAVE
,
2886 .xlevel
= 0x8000001E,
2887 .model_id
= "AMD EPYC Processor",
2888 .cache_info
= &epyc_cache_info
,
2891 .name
= "EPYC-IBPB",
2893 .vendor
= CPUID_VENDOR_AMD
,
2897 .features
[FEAT_1_EDX
] =
2898 CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
| CPUID_CLFLUSH
|
2899 CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
| CPUID_PGE
|
2900 CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
| CPUID_MCE
|
2901 CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
| CPUID_DE
|
2902 CPUID_VME
| CPUID_FP87
,
2903 .features
[FEAT_1_ECX
] =
2904 CPUID_EXT_RDRAND
| CPUID_EXT_F16C
| CPUID_EXT_AVX
|
2905 CPUID_EXT_XSAVE
| CPUID_EXT_AES
| CPUID_EXT_POPCNT
|
2906 CPUID_EXT_MOVBE
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
2907 CPUID_EXT_CX16
| CPUID_EXT_FMA
| CPUID_EXT_SSSE3
|
2908 CPUID_EXT_MONITOR
| CPUID_EXT_PCLMULQDQ
| CPUID_EXT_SSE3
,
2909 .features
[FEAT_8000_0001_EDX
] =
2910 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_PDPE1GB
|
2911 CPUID_EXT2_FFXSR
| CPUID_EXT2_MMXEXT
| CPUID_EXT2_NX
|
2913 .features
[FEAT_8000_0001_ECX
] =
2914 CPUID_EXT3_OSVW
| CPUID_EXT3_3DNOWPREFETCH
|
2915 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
|
2916 CPUID_EXT3_CR8LEG
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
|
2918 .features
[FEAT_8000_0008_EBX
] =
2919 CPUID_8000_0008_EBX_IBPB
,
2920 .features
[FEAT_7_0_EBX
] =
2921 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
| CPUID_7_0_EBX_AVX2
|
2922 CPUID_7_0_EBX_SMEP
| CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_RDSEED
|
2923 CPUID_7_0_EBX_ADX
| CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_CLFLUSHOPT
|
2924 CPUID_7_0_EBX_SHA_NI
,
2925 /* Missing: XSAVES (not supported by some Linux versions,
2926 * including v4.1 to v4.12).
2927 * KVM doesn't yet expose any XSAVES state save component.
2929 .features
[FEAT_XSAVE
] =
2930 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2931 CPUID_XSAVE_XGETBV1
,
2932 .features
[FEAT_6_EAX
] =
2934 .features
[FEAT_SVM
] =
2935 CPUID_SVM_NPT
| CPUID_SVM_NRIPSAVE
,
2936 .xlevel
= 0x8000001E,
2937 .model_id
= "AMD EPYC Processor (with IBPB)",
2938 .cache_info
= &epyc_cache_info
,
2943 .vendor
= CPUID_VENDOR_HYGON
,
2947 .features
[FEAT_1_EDX
] =
2948 CPUID_SSE2
| CPUID_SSE
| CPUID_FXSR
| CPUID_MMX
| CPUID_CLFLUSH
|
2949 CPUID_PSE36
| CPUID_PAT
| CPUID_CMOV
| CPUID_MCA
| CPUID_PGE
|
2950 CPUID_MTRR
| CPUID_SEP
| CPUID_APIC
| CPUID_CX8
| CPUID_MCE
|
2951 CPUID_PAE
| CPUID_MSR
| CPUID_TSC
| CPUID_PSE
| CPUID_DE
|
2952 CPUID_VME
| CPUID_FP87
,
2953 .features
[FEAT_1_ECX
] =
2954 CPUID_EXT_RDRAND
| CPUID_EXT_F16C
| CPUID_EXT_AVX
|
2955 CPUID_EXT_XSAVE
| CPUID_EXT_POPCNT
|
2956 CPUID_EXT_MOVBE
| CPUID_EXT_SSE42
| CPUID_EXT_SSE41
|
2957 CPUID_EXT_CX16
| CPUID_EXT_FMA
| CPUID_EXT_SSSE3
|
2958 CPUID_EXT_MONITOR
| CPUID_EXT_SSE3
,
2959 .features
[FEAT_8000_0001_EDX
] =
2960 CPUID_EXT2_LM
| CPUID_EXT2_RDTSCP
| CPUID_EXT2_PDPE1GB
|
2961 CPUID_EXT2_FFXSR
| CPUID_EXT2_MMXEXT
| CPUID_EXT2_NX
|
2963 .features
[FEAT_8000_0001_ECX
] =
2964 CPUID_EXT3_OSVW
| CPUID_EXT3_3DNOWPREFETCH
|
2965 CPUID_EXT3_MISALIGNSSE
| CPUID_EXT3_SSE4A
| CPUID_EXT3_ABM
|
2966 CPUID_EXT3_CR8LEG
| CPUID_EXT3_SVM
| CPUID_EXT3_LAHF_LM
|
2968 .features
[FEAT_8000_0008_EBX
] =
2969 CPUID_8000_0008_EBX_IBPB
,
2970 .features
[FEAT_7_0_EBX
] =
2971 CPUID_7_0_EBX_FSGSBASE
| CPUID_7_0_EBX_BMI1
| CPUID_7_0_EBX_AVX2
|
2972 CPUID_7_0_EBX_SMEP
| CPUID_7_0_EBX_BMI2
| CPUID_7_0_EBX_RDSEED
|
2973 CPUID_7_0_EBX_ADX
| CPUID_7_0_EBX_SMAP
| CPUID_7_0_EBX_CLFLUSHOPT
,
2975 * Missing: XSAVES (not supported by some Linux versions,
2976 * including v4.1 to v4.12).
2977 * KVM doesn't yet expose any XSAVES state save component.
2979 .features
[FEAT_XSAVE
] =
2980 CPUID_XSAVE_XSAVEOPT
| CPUID_XSAVE_XSAVEC
|
2981 CPUID_XSAVE_XGETBV1
,
2982 .features
[FEAT_6_EAX
] =
2984 .features
[FEAT_SVM
] =
2985 CPUID_SVM_NPT
| CPUID_SVM_NRIPSAVE
,
2986 .xlevel
= 0x8000001E,
2987 .model_id
= "Hygon Dhyana Processor",
2988 .cache_info
= &epyc_cache_info
,
2992 typedef struct PropValue
{
2993 const char *prop
, *value
;
2996 /* KVM-specific features that are automatically added/removed
2997 * from all CPU models when KVM is enabled.
2999 static PropValue kvm_default_props
[] = {
3000 { "kvmclock", "on" },
3001 { "kvm-nopiodelay", "on" },
3002 { "kvm-asyncpf", "on" },
3003 { "kvm-steal-time", "on" },
3004 { "kvm-pv-eoi", "on" },
3005 { "kvmclock-stable-bit", "on" },
3008 { "monitor", "off" },
3013 /* TCG-specific defaults that override all CPU models when using TCG
3015 static PropValue tcg_default_props
[] = {
3021 void x86_cpu_change_kvm_default(const char *prop
, const char *value
)
3024 for (pv
= kvm_default_props
; pv
->prop
; pv
++) {
3025 if (!strcmp(pv
->prop
, prop
)) {
3031 /* It is valid to call this function only for properties that
3032 * are already present in the kvm_default_props table.
3037 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w
,
3038 bool migratable_only
);
3040 static bool lmce_supported(void)
3042 uint64_t mce_cap
= 0;
3045 if (kvm_ioctl(kvm_state
, KVM_X86_GET_MCE_CAP_SUPPORTED
, &mce_cap
) < 0) {
3050 return !!(mce_cap
& MCG_LMCE_P
);
3053 #define CPUID_MODEL_ID_SZ 48
3056 * cpu_x86_fill_model_id:
3057 * Get CPUID model ID string from host CPU.
3059 * @str should have at least CPUID_MODEL_ID_SZ bytes
3061 * The function does NOT add a null terminator to the string
3064 static int cpu_x86_fill_model_id(char *str
)
3066 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
3069 for (i
= 0; i
< 3; i
++) {
3070 host_cpuid(0x80000002 + i
, 0, &eax
, &ebx
, &ecx
, &edx
);
3071 memcpy(str
+ i
* 16 + 0, &eax
, 4);
3072 memcpy(str
+ i
* 16 + 4, &ebx
, 4);
3073 memcpy(str
+ i
* 16 + 8, &ecx
, 4);
3074 memcpy(str
+ i
* 16 + 12, &edx
, 4);
3079 static Property max_x86_cpu_properties
[] = {
3080 DEFINE_PROP_BOOL("migratable", X86CPU
, migratable
, true),
3081 DEFINE_PROP_BOOL("host-cache-info", X86CPU
, cache_info_passthrough
, false),
3082 DEFINE_PROP_END_OF_LIST()
3085 static void max_x86_cpu_class_init(ObjectClass
*oc
, void *data
)
3087 DeviceClass
*dc
= DEVICE_CLASS(oc
);
3088 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
3092 xcc
->model_description
=
3093 "Enables all features supported by the accelerator in the current host";
3095 dc
->props
= max_x86_cpu_properties
;
3098 static void x86_cpu_load_def(X86CPU
*cpu
, X86CPUDefinition
*def
, Error
**errp
);
3100 static void max_x86_cpu_initfn(Object
*obj
)
3102 X86CPU
*cpu
= X86_CPU(obj
);
3103 CPUX86State
*env
= &cpu
->env
;
3104 KVMState
*s
= kvm_state
;
3106 /* We can't fill the features array here because we don't know yet if
3107 * "migratable" is true or false.
3109 cpu
->max_features
= true;
3111 if (accel_uses_host_cpuid()) {
3112 char vendor
[CPUID_VENDOR_SZ
+ 1] = { 0 };
3113 char model_id
[CPUID_MODEL_ID_SZ
+ 1] = { 0 };
3114 int family
, model
, stepping
;
3115 X86CPUDefinition host_cpudef
= { };
3116 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
3118 host_cpuid(0x0, 0, &eax
, &ebx
, &ecx
, &edx
);
3119 x86_cpu_vendor_words2str(host_cpudef
.vendor
, ebx
, edx
, ecx
);
3121 host_vendor_fms(vendor
, &family
, &model
, &stepping
);
3123 cpu_x86_fill_model_id(model_id
);
3125 object_property_set_str(OBJECT(cpu
), vendor
, "vendor", &error_abort
);
3126 object_property_set_int(OBJECT(cpu
), family
, "family", &error_abort
);
3127 object_property_set_int(OBJECT(cpu
), model
, "model", &error_abort
);
3128 object_property_set_int(OBJECT(cpu
), stepping
, "stepping",
3130 object_property_set_str(OBJECT(cpu
), model_id
, "model-id",
3133 if (kvm_enabled()) {
3134 env
->cpuid_min_level
=
3135 kvm_arch_get_supported_cpuid(s
, 0x0, 0, R_EAX
);
3136 env
->cpuid_min_xlevel
=
3137 kvm_arch_get_supported_cpuid(s
, 0x80000000, 0, R_EAX
);
3138 env
->cpuid_min_xlevel2
=
3139 kvm_arch_get_supported_cpuid(s
, 0xC0000000, 0, R_EAX
);
3141 env
->cpuid_min_level
=
3142 hvf_get_supported_cpuid(0x0, 0, R_EAX
);
3143 env
->cpuid_min_xlevel
=
3144 hvf_get_supported_cpuid(0x80000000, 0, R_EAX
);
3145 env
->cpuid_min_xlevel2
=
3146 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX
);
3149 if (lmce_supported()) {
3150 object_property_set_bool(OBJECT(cpu
), true, "lmce", &error_abort
);
3153 object_property_set_str(OBJECT(cpu
), CPUID_VENDOR_AMD
,
3154 "vendor", &error_abort
);
3155 object_property_set_int(OBJECT(cpu
), 6, "family", &error_abort
);
3156 object_property_set_int(OBJECT(cpu
), 6, "model", &error_abort
);
3157 object_property_set_int(OBJECT(cpu
), 3, "stepping", &error_abort
);
3158 object_property_set_str(OBJECT(cpu
),
3159 "QEMU TCG CPU version " QEMU_HW_VERSION
,
3160 "model-id", &error_abort
);
3163 object_property_set_bool(OBJECT(cpu
), true, "pmu", &error_abort
);
3166 static const TypeInfo max_x86_cpu_type_info
= {
3167 .name
= X86_CPU_TYPE_NAME("max"),
3168 .parent
= TYPE_X86_CPU
,
3169 .instance_init
= max_x86_cpu_initfn
,
3170 .class_init
= max_x86_cpu_class_init
,
3173 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3174 static void host_x86_cpu_class_init(ObjectClass
*oc
, void *data
)
3176 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
3178 xcc
->host_cpuid_required
= true;
3181 #if defined(CONFIG_KVM)
3182 xcc
->model_description
=
3183 "KVM processor with all supported host features ";
3184 #elif defined(CONFIG_HVF)
3185 xcc
->model_description
=
3186 "HVF processor with all supported host features ";
3190 static const TypeInfo host_x86_cpu_type_info
= {
3191 .name
= X86_CPU_TYPE_NAME("host"),
3192 .parent
= X86_CPU_TYPE_NAME("max"),
3193 .class_init
= host_x86_cpu_class_init
,
3198 static char *feature_word_description(FeatureWordInfo
*f
, uint32_t bit
)
3200 assert(f
->type
== CPUID_FEATURE_WORD
|| f
->type
== MSR_FEATURE_WORD
);
3203 case CPUID_FEATURE_WORD
:
3205 const char *reg
= get_register_name_32(f
->cpuid
.reg
);
3207 return g_strdup_printf("CPUID.%02XH:%s",
3210 case MSR_FEATURE_WORD
:
3211 return g_strdup_printf("MSR(%02XH)",
3218 static void report_unavailable_features(FeatureWord w
, uint32_t mask
)
3220 FeatureWordInfo
*f
= &feature_word_info
[w
];
3222 char *feat_word_str
;
3224 for (i
= 0; i
< 32; ++i
) {
3225 if ((1UL << i
) & mask
) {
3226 feat_word_str
= feature_word_description(f
, i
);
3227 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
3228 accel_uses_host_cpuid() ? "host" : "TCG",
3230 f
->feat_names
[i
] ? "." : "",
3231 f
->feat_names
[i
] ? f
->feat_names
[i
] : "", i
);
3232 g_free(feat_word_str
);
3237 static void x86_cpuid_version_get_family(Object
*obj
, Visitor
*v
,
3238 const char *name
, void *opaque
,
3241 X86CPU
*cpu
= X86_CPU(obj
);
3242 CPUX86State
*env
= &cpu
->env
;
3245 value
= (env
->cpuid_version
>> 8) & 0xf;
3247 value
+= (env
->cpuid_version
>> 20) & 0xff;
3249 visit_type_int(v
, name
, &value
, errp
);
3252 static void x86_cpuid_version_set_family(Object
*obj
, Visitor
*v
,
3253 const char *name
, void *opaque
,
3256 X86CPU
*cpu
= X86_CPU(obj
);
3257 CPUX86State
*env
= &cpu
->env
;
3258 const int64_t min
= 0;
3259 const int64_t max
= 0xff + 0xf;
3260 Error
*local_err
= NULL
;
3263 visit_type_int(v
, name
, &value
, &local_err
);
3265 error_propagate(errp
, local_err
);
3268 if (value
< min
|| value
> max
) {
3269 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
3270 name
? name
: "null", value
, min
, max
);
3274 env
->cpuid_version
&= ~0xff00f00;
3276 env
->cpuid_version
|= 0xf00 | ((value
- 0x0f) << 20);
3278 env
->cpuid_version
|= value
<< 8;
3282 static void x86_cpuid_version_get_model(Object
*obj
, Visitor
*v
,
3283 const char *name
, void *opaque
,
3286 X86CPU
*cpu
= X86_CPU(obj
);
3287 CPUX86State
*env
= &cpu
->env
;
3290 value
= (env
->cpuid_version
>> 4) & 0xf;
3291 value
|= ((env
->cpuid_version
>> 16) & 0xf) << 4;
3292 visit_type_int(v
, name
, &value
, errp
);
3295 static void x86_cpuid_version_set_model(Object
*obj
, Visitor
*v
,
3296 const char *name
, void *opaque
,
3299 X86CPU
*cpu
= X86_CPU(obj
);
3300 CPUX86State
*env
= &cpu
->env
;
3301 const int64_t min
= 0;
3302 const int64_t max
= 0xff;
3303 Error
*local_err
= NULL
;
3306 visit_type_int(v
, name
, &value
, &local_err
);
3308 error_propagate(errp
, local_err
);
3311 if (value
< min
|| value
> max
) {
3312 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
3313 name
? name
: "null", value
, min
, max
);
3317 env
->cpuid_version
&= ~0xf00f0;
3318 env
->cpuid_version
|= ((value
& 0xf) << 4) | ((value
>> 4) << 16);
3321 static void x86_cpuid_version_get_stepping(Object
*obj
, Visitor
*v
,
3322 const char *name
, void *opaque
,
3325 X86CPU
*cpu
= X86_CPU(obj
);
3326 CPUX86State
*env
= &cpu
->env
;
3329 value
= env
->cpuid_version
& 0xf;
3330 visit_type_int(v
, name
, &value
, errp
);
3333 static void x86_cpuid_version_set_stepping(Object
*obj
, Visitor
*v
,
3334 const char *name
, void *opaque
,
3337 X86CPU
*cpu
= X86_CPU(obj
);
3338 CPUX86State
*env
= &cpu
->env
;
3339 const int64_t min
= 0;
3340 const int64_t max
= 0xf;
3341 Error
*local_err
= NULL
;
3344 visit_type_int(v
, name
, &value
, &local_err
);
3346 error_propagate(errp
, local_err
);
3349 if (value
< min
|| value
> max
) {
3350 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
3351 name
? name
: "null", value
, min
, max
);
3355 env
->cpuid_version
&= ~0xf;
3356 env
->cpuid_version
|= value
& 0xf;
3359 static char *x86_cpuid_get_vendor(Object
*obj
, Error
**errp
)
3361 X86CPU
*cpu
= X86_CPU(obj
);
3362 CPUX86State
*env
= &cpu
->env
;
3365 value
= g_malloc(CPUID_VENDOR_SZ
+ 1);
3366 x86_cpu_vendor_words2str(value
, env
->cpuid_vendor1
, env
->cpuid_vendor2
,
3367 env
->cpuid_vendor3
);
3371 static void x86_cpuid_set_vendor(Object
*obj
, const char *value
,
3374 X86CPU
*cpu
= X86_CPU(obj
);
3375 CPUX86State
*env
= &cpu
->env
;
3378 if (strlen(value
) != CPUID_VENDOR_SZ
) {
3379 error_setg(errp
, QERR_PROPERTY_VALUE_BAD
, "", "vendor", value
);
3383 env
->cpuid_vendor1
= 0;
3384 env
->cpuid_vendor2
= 0;
3385 env
->cpuid_vendor3
= 0;
3386 for (i
= 0; i
< 4; i
++) {
3387 env
->cpuid_vendor1
|= ((uint8_t)value
[i
]) << (8 * i
);
3388 env
->cpuid_vendor2
|= ((uint8_t)value
[i
+ 4]) << (8 * i
);
3389 env
->cpuid_vendor3
|= ((uint8_t)value
[i
+ 8]) << (8 * i
);
3393 static char *x86_cpuid_get_model_id(Object
*obj
, Error
**errp
)
3395 X86CPU
*cpu
= X86_CPU(obj
);
3396 CPUX86State
*env
= &cpu
->env
;
3400 value
= g_malloc(48 + 1);
3401 for (i
= 0; i
< 48; i
++) {
3402 value
[i
] = env
->cpuid_model
[i
>> 2] >> (8 * (i
& 3));
3408 static void x86_cpuid_set_model_id(Object
*obj
, const char *model_id
,
3411 X86CPU
*cpu
= X86_CPU(obj
);
3412 CPUX86State
*env
= &cpu
->env
;
3415 if (model_id
== NULL
) {
3418 len
= strlen(model_id
);
3419 memset(env
->cpuid_model
, 0, 48);
3420 for (i
= 0; i
< 48; i
++) {
3424 c
= (uint8_t)model_id
[i
];
3426 env
->cpuid_model
[i
>> 2] |= c
<< (8 * (i
& 3));
3430 static void x86_cpuid_get_tsc_freq(Object
*obj
, Visitor
*v
, const char *name
,
3431 void *opaque
, Error
**errp
)
3433 X86CPU
*cpu
= X86_CPU(obj
);
3436 value
= cpu
->env
.tsc_khz
* 1000;
3437 visit_type_int(v
, name
, &value
, errp
);
3440 static void x86_cpuid_set_tsc_freq(Object
*obj
, Visitor
*v
, const char *name
,
3441 void *opaque
, Error
**errp
)
3443 X86CPU
*cpu
= X86_CPU(obj
);
3444 const int64_t min
= 0;
3445 const int64_t max
= INT64_MAX
;
3446 Error
*local_err
= NULL
;
3449 visit_type_int(v
, name
, &value
, &local_err
);
3451 error_propagate(errp
, local_err
);
3454 if (value
< min
|| value
> max
) {
3455 error_setg(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
3456 name
? name
: "null", value
, min
, max
);
3460 cpu
->env
.tsc_khz
= cpu
->env
.user_tsc_khz
= value
/ 1000;
3463 /* Generic getter for "feature-words" and "filtered-features" properties */
3464 static void x86_cpu_get_feature_words(Object
*obj
, Visitor
*v
,
3465 const char *name
, void *opaque
,
3468 uint32_t *array
= (uint32_t *)opaque
;
3470 X86CPUFeatureWordInfo word_infos
[FEATURE_WORDS
] = { };
3471 X86CPUFeatureWordInfoList list_entries
[FEATURE_WORDS
] = { };
3472 X86CPUFeatureWordInfoList
*list
= NULL
;
3474 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3475 FeatureWordInfo
*wi
= &feature_word_info
[w
];
3477 * We didn't have MSR features when "feature-words" was
3478 * introduced. Therefore skipped other type entries.
3480 if (wi
->type
!= CPUID_FEATURE_WORD
) {
3483 X86CPUFeatureWordInfo
*qwi
= &word_infos
[w
];
3484 qwi
->cpuid_input_eax
= wi
->cpuid
.eax
;
3485 qwi
->has_cpuid_input_ecx
= wi
->cpuid
.needs_ecx
;
3486 qwi
->cpuid_input_ecx
= wi
->cpuid
.ecx
;
3487 qwi
->cpuid_register
= x86_reg_info_32
[wi
->cpuid
.reg
].qapi_enum
;
3488 qwi
->features
= array
[w
];
3490 /* List will be in reverse order, but order shouldn't matter */
3491 list_entries
[w
].next
= list
;
3492 list_entries
[w
].value
= &word_infos
[w
];
3493 list
= &list_entries
[w
];
3496 visit_type_X86CPUFeatureWordInfoList(v
, "feature-words", &list
, errp
);
3499 static void x86_get_hv_spinlocks(Object
*obj
, Visitor
*v
, const char *name
,
3500 void *opaque
, Error
**errp
)
3502 X86CPU
*cpu
= X86_CPU(obj
);
3503 int64_t value
= cpu
->hyperv_spinlock_attempts
;
3505 visit_type_int(v
, name
, &value
, errp
);
3508 static void x86_set_hv_spinlocks(Object
*obj
, Visitor
*v
, const char *name
,
3509 void *opaque
, Error
**errp
)
3511 const int64_t min
= 0xFFF;
3512 const int64_t max
= UINT_MAX
;
3513 X86CPU
*cpu
= X86_CPU(obj
);
3517 visit_type_int(v
, name
, &value
, &err
);
3519 error_propagate(errp
, err
);
3523 if (value
< min
|| value
> max
) {
3524 error_setg(errp
, "Property %s.%s doesn't take value %" PRId64
3525 " (minimum: %" PRId64
", maximum: %" PRId64
")",
3526 object_get_typename(obj
), name
? name
: "null",
3530 cpu
->hyperv_spinlock_attempts
= value
;
3533 static const PropertyInfo qdev_prop_spinlocks
= {
3535 .get
= x86_get_hv_spinlocks
,
3536 .set
= x86_set_hv_spinlocks
,
3539 /* Convert all '_' in a feature string option name to '-', to make feature
3540 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3542 static inline void feat2prop(char *s
)
3544 while ((s
= strchr(s
, '_'))) {
3549 /* Return the feature property name for a feature flag bit */
3550 static const char *x86_cpu_feature_name(FeatureWord w
, int bitnr
)
3552 /* XSAVE components are automatically enabled by other features,
3553 * so return the original feature name instead
3555 if (w
== FEAT_XSAVE_COMP_LO
|| w
== FEAT_XSAVE_COMP_HI
) {
3556 int comp
= (w
== FEAT_XSAVE_COMP_HI
) ? bitnr
+ 32 : bitnr
;
3558 if (comp
< ARRAY_SIZE(x86_ext_save_areas
) &&
3559 x86_ext_save_areas
[comp
].bits
) {
3560 w
= x86_ext_save_areas
[comp
].feature
;
3561 bitnr
= ctz32(x86_ext_save_areas
[comp
].bits
);
3566 assert(w
< FEATURE_WORDS
);
3567 return feature_word_info
[w
].feat_names
[bitnr
];
3570 /* Compatibily hack to maintain legacy +-feat semantic,
3571 * where +-feat overwrites any feature set by
3572 * feat=on|feat even if the later is parsed after +-feat
3573 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3575 static GList
*plus_features
, *minus_features
;
3577 static gint
compare_string(gconstpointer a
, gconstpointer b
)
3579 return g_strcmp0(a
, b
);
3582 /* Parse "+feature,-feature,feature=foo" CPU feature string
3584 static void x86_cpu_parse_featurestr(const char *typename
, char *features
,
3587 char *featurestr
; /* Single 'key=value" string being parsed */
3588 static bool cpu_globals_initialized
;
3589 bool ambiguous
= false;
3591 if (cpu_globals_initialized
) {
3594 cpu_globals_initialized
= true;
3600 for (featurestr
= strtok(features
, ",");
3602 featurestr
= strtok(NULL
, ",")) {
3604 const char *val
= NULL
;
3607 GlobalProperty
*prop
;
3609 /* Compatibility syntax: */
3610 if (featurestr
[0] == '+') {
3611 plus_features
= g_list_append(plus_features
,
3612 g_strdup(featurestr
+ 1));
3614 } else if (featurestr
[0] == '-') {
3615 minus_features
= g_list_append(minus_features
,
3616 g_strdup(featurestr
+ 1));
3620 eq
= strchr(featurestr
, '=');
3628 feat2prop(featurestr
);
3631 if (g_list_find_custom(plus_features
, name
, compare_string
)) {
3632 warn_report("Ambiguous CPU model string. "
3633 "Don't mix both \"+%s\" and \"%s=%s\"",
3637 if (g_list_find_custom(minus_features
, name
, compare_string
)) {
3638 warn_report("Ambiguous CPU model string. "
3639 "Don't mix both \"-%s\" and \"%s=%s\"",
3645 if (!strcmp(name
, "tsc-freq")) {
3649 ret
= qemu_strtosz_metric(val
, NULL
, &tsc_freq
);
3650 if (ret
< 0 || tsc_freq
> INT64_MAX
) {
3651 error_setg(errp
, "bad numerical value %s", val
);
3654 snprintf(num
, sizeof(num
), "%" PRId64
, tsc_freq
);
3656 name
= "tsc-frequency";
3659 prop
= g_new0(typeof(*prop
), 1);
3660 prop
->driver
= typename
;
3661 prop
->property
= g_strdup(name
);
3662 prop
->value
= g_strdup(val
);
3663 qdev_prop_register_global(prop
);
3667 warn_report("Compatibility of ambiguous CPU model "
3668 "strings won't be kept on future QEMU versions");
3672 static void x86_cpu_expand_features(X86CPU
*cpu
, Error
**errp
);
3673 static int x86_cpu_filter_features(X86CPU
*cpu
);
3675 /* Check for missing features that may prevent the CPU class from
3676 * running using the current machine and accelerator.
3678 static void x86_cpu_class_check_missing_features(X86CPUClass
*xcc
,
3679 strList
**missing_feats
)
3684 strList
**next
= missing_feats
;
3686 if (xcc
->host_cpuid_required
&& !accel_uses_host_cpuid()) {
3687 strList
*new = g_new0(strList
, 1);
3688 new->value
= g_strdup("kvm");
3689 *missing_feats
= new;
3693 xc
= X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc
))));
3695 x86_cpu_expand_features(xc
, &err
);
3697 /* Errors at x86_cpu_expand_features should never happen,
3698 * but in case it does, just report the model as not
3699 * runnable at all using the "type" property.
3701 strList
*new = g_new0(strList
, 1);
3702 new->value
= g_strdup("type");
3707 x86_cpu_filter_features(xc
);
3709 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3710 uint32_t filtered
= xc
->filtered_features
[w
];
3712 for (i
= 0; i
< 32; i
++) {
3713 if (filtered
& (1UL << i
)) {
3714 strList
*new = g_new0(strList
, 1);
3715 new->value
= g_strdup(x86_cpu_feature_name(w
, i
));
3722 object_unref(OBJECT(xc
));
3725 /* Print all cpuid feature names in featureset
3727 static void listflags(GList
*features
)
3732 for (tmp
= features
; tmp
; tmp
= tmp
->next
) {
3733 const char *name
= tmp
->data
;
3734 if ((len
+ strlen(name
) + 1) >= 75) {
3738 qemu_printf("%s%s", len
== 0 ? " " : " ", name
);
3739 len
+= strlen(name
) + 1;
3744 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3745 static gint
x86_cpu_list_compare(gconstpointer a
, gconstpointer b
)
3747 ObjectClass
*class_a
= (ObjectClass
*)a
;
3748 ObjectClass
*class_b
= (ObjectClass
*)b
;
3749 X86CPUClass
*cc_a
= X86_CPU_CLASS(class_a
);
3750 X86CPUClass
*cc_b
= X86_CPU_CLASS(class_b
);
3751 char *name_a
, *name_b
;
3754 if (cc_a
->ordering
!= cc_b
->ordering
) {
3755 ret
= cc_a
->ordering
- cc_b
->ordering
;
3757 name_a
= x86_cpu_class_get_model_name(cc_a
);
3758 name_b
= x86_cpu_class_get_model_name(cc_b
);
3759 ret
= strcmp(name_a
, name_b
);
3766 static GSList
*get_sorted_cpu_model_list(void)
3768 GSList
*list
= object_class_get_list(TYPE_X86_CPU
, false);
3769 list
= g_slist_sort(list
, x86_cpu_list_compare
);
3773 static void x86_cpu_list_entry(gpointer data
, gpointer user_data
)
3775 ObjectClass
*oc
= data
;
3776 X86CPUClass
*cc
= X86_CPU_CLASS(oc
);
3777 char *name
= x86_cpu_class_get_model_name(cc
);
3778 const char *desc
= cc
->model_description
;
3779 if (!desc
&& cc
->cpu_def
) {
3780 desc
= cc
->cpu_def
->model_id
;
3783 qemu_printf("x86 %-20s %-48s\n", name
, desc
);
3787 /* list available CPU models and flags */
3788 void x86_cpu_list(void)
3792 GList
*names
= NULL
;
3794 qemu_printf("Available CPUs:\n");
3795 list
= get_sorted_cpu_model_list();
3796 g_slist_foreach(list
, x86_cpu_list_entry
, NULL
);
3800 for (i
= 0; i
< ARRAY_SIZE(feature_word_info
); i
++) {
3801 FeatureWordInfo
*fw
= &feature_word_info
[i
];
3802 for (j
= 0; j
< 32; j
++) {
3803 if (fw
->feat_names
[j
]) {
3804 names
= g_list_append(names
, (gpointer
)fw
->feat_names
[j
]);
3809 names
= g_list_sort(names
, (GCompareFunc
)strcmp
);
3811 qemu_printf("\nRecognized CPUID flags:\n");
3817 static void x86_cpu_definition_entry(gpointer data
, gpointer user_data
)
3819 ObjectClass
*oc
= data
;
3820 X86CPUClass
*cc
= X86_CPU_CLASS(oc
);
3821 CpuDefinitionInfoList
**cpu_list
= user_data
;
3822 CpuDefinitionInfoList
*entry
;
3823 CpuDefinitionInfo
*info
;
3825 info
= g_malloc0(sizeof(*info
));
3826 info
->name
= x86_cpu_class_get_model_name(cc
);
3827 x86_cpu_class_check_missing_features(cc
, &info
->unavailable_features
);
3828 info
->has_unavailable_features
= true;
3829 info
->q_typename
= g_strdup(object_class_get_name(oc
));
3830 info
->migration_safe
= cc
->migration_safe
;
3831 info
->has_migration_safe
= true;
3832 info
->q_static
= cc
->static_model
;
3834 entry
= g_malloc0(sizeof(*entry
));
3835 entry
->value
= info
;
3836 entry
->next
= *cpu_list
;
3840 CpuDefinitionInfoList
*qmp_query_cpu_definitions(Error
**errp
)
3842 CpuDefinitionInfoList
*cpu_list
= NULL
;
3843 GSList
*list
= get_sorted_cpu_model_list();
3844 g_slist_foreach(list
, x86_cpu_definition_entry
, &cpu_list
);
3849 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w
,
3850 bool migratable_only
)
3852 FeatureWordInfo
*wi
= &feature_word_info
[w
];
3855 if (kvm_enabled()) {
3857 case CPUID_FEATURE_WORD
:
3858 r
= kvm_arch_get_supported_cpuid(kvm_state
, wi
->cpuid
.eax
,
3862 case MSR_FEATURE_WORD
:
3863 r
= kvm_arch_get_supported_msr_feature(kvm_state
,
3867 } else if (hvf_enabled()) {
3868 if (wi
->type
!= CPUID_FEATURE_WORD
) {
3871 r
= hvf_get_supported_cpuid(wi
->cpuid
.eax
,
3874 } else if (tcg_enabled()) {
3875 r
= wi
->tcg_features
;
3879 if (migratable_only
) {
3880 r
&= x86_cpu_get_migratable_flags(w
);
3885 static void x86_cpu_report_filtered_features(X86CPU
*cpu
)
3889 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3890 report_unavailable_features(w
, cpu
->filtered_features
[w
]);
3894 static void x86_cpu_apply_props(X86CPU
*cpu
, PropValue
*props
)
3897 for (pv
= props
; pv
->prop
; pv
++) {
3901 object_property_parse(OBJECT(cpu
), pv
->value
, pv
->prop
,
3906 /* Load data from X86CPUDefinition into a X86CPU object
3908 static void x86_cpu_load_def(X86CPU
*cpu
, X86CPUDefinition
*def
, Error
**errp
)
3910 CPUX86State
*env
= &cpu
->env
;
3912 char host_vendor
[CPUID_VENDOR_SZ
+ 1];
3915 /*NOTE: any property set by this function should be returned by
3916 * x86_cpu_static_props(), so static expansion of
3917 * query-cpu-model-expansion is always complete.
3920 /* CPU models only set _minimum_ values for level/xlevel: */
3921 object_property_set_uint(OBJECT(cpu
), def
->level
, "min-level", errp
);
3922 object_property_set_uint(OBJECT(cpu
), def
->xlevel
, "min-xlevel", errp
);
3924 object_property_set_int(OBJECT(cpu
), def
->family
, "family", errp
);
3925 object_property_set_int(OBJECT(cpu
), def
->model
, "model", errp
);
3926 object_property_set_int(OBJECT(cpu
), def
->stepping
, "stepping", errp
);
3927 object_property_set_str(OBJECT(cpu
), def
->model_id
, "model-id", errp
);
3928 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
3929 env
->features
[w
] = def
->features
[w
];
3932 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3933 cpu
->legacy_cache
= !def
->cache_info
;
3935 /* Special cases not set in the X86CPUDefinition structs: */
3936 /* TODO: in-kernel irqchip for hvf */
3937 if (kvm_enabled()) {
3938 if (!kvm_irqchip_in_kernel()) {
3939 x86_cpu_change_kvm_default("x2apic", "off");
3942 x86_cpu_apply_props(cpu
, kvm_default_props
);
3943 } else if (tcg_enabled()) {
3944 x86_cpu_apply_props(cpu
, tcg_default_props
);
3947 env
->features
[FEAT_1_ECX
] |= CPUID_EXT_HYPERVISOR
;
3949 /* sysenter isn't supported in compatibility mode on AMD,
3950 * syscall isn't supported in compatibility mode on Intel.
3951 * Normally we advertise the actual CPU vendor, but you can
3952 * override this using the 'vendor' property if you want to use
3953 * KVM's sysenter/syscall emulation in compatibility mode and
3954 * when doing cross vendor migration
3956 vendor
= def
->vendor
;
3957 if (accel_uses_host_cpuid()) {
3958 uint32_t ebx
= 0, ecx
= 0, edx
= 0;
3959 host_cpuid(0, 0, NULL
, &ebx
, &ecx
, &edx
);
3960 x86_cpu_vendor_words2str(host_vendor
, ebx
, edx
, ecx
);
3961 vendor
= host_vendor
;
3964 object_property_set_str(OBJECT(cpu
), vendor
, "vendor", errp
);
3968 #ifndef CONFIG_USER_ONLY
3969 /* Return a QDict containing keys for all properties that can be included
3970 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3971 * must be included in the dictionary.
3973 static QDict
*x86_cpu_static_props(void)
3977 static const char *props
[] = {
3995 for (i
= 0; props
[i
]; i
++) {
3996 qdict_put_null(d
, props
[i
]);
3999 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
4000 FeatureWordInfo
*fi
= &feature_word_info
[w
];
4002 for (bit
= 0; bit
< 32; bit
++) {
4003 if (!fi
->feat_names
[bit
]) {
4006 qdict_put_null(d
, fi
->feat_names
[bit
]);
4013 /* Add an entry to @props dict, with the value for property. */
4014 static void x86_cpu_expand_prop(X86CPU
*cpu
, QDict
*props
, const char *prop
)
4016 QObject
*value
= object_property_get_qobject(OBJECT(cpu
), prop
,
4019 qdict_put_obj(props
, prop
, value
);
4022 /* Convert CPU model data from X86CPU object to a property dictionary
4023 * that can recreate exactly the same CPU model.
4025 static void x86_cpu_to_dict(X86CPU
*cpu
, QDict
*props
)
4027 QDict
*sprops
= x86_cpu_static_props();
4028 const QDictEntry
*e
;
4030 for (e
= qdict_first(sprops
); e
; e
= qdict_next(sprops
, e
)) {
4031 const char *prop
= qdict_entry_key(e
);
4032 x86_cpu_expand_prop(cpu
, props
, prop
);
4036 /* Convert CPU model data from X86CPU object to a property dictionary
4037 * that can recreate exactly the same CPU model, including every
4038 * writeable QOM property.
4040 static void x86_cpu_to_dict_full(X86CPU
*cpu
, QDict
*props
)
4042 ObjectPropertyIterator iter
;
4043 ObjectProperty
*prop
;
4045 object_property_iter_init(&iter
, OBJECT(cpu
));
4046 while ((prop
= object_property_iter_next(&iter
))) {
4047 /* skip read-only or write-only properties */
4048 if (!prop
->get
|| !prop
->set
) {
4052 /* "hotplugged" is the only property that is configurable
4053 * on the command-line but will be set differently on CPUs
4054 * created using "-cpu ... -smp ..." and by CPUs created
4055 * on the fly by x86_cpu_from_model() for querying. Skip it.
4057 if (!strcmp(prop
->name
, "hotplugged")) {
4060 x86_cpu_expand_prop(cpu
, props
, prop
->name
);
4064 static void object_apply_props(Object
*obj
, QDict
*props
, Error
**errp
)
4066 const QDictEntry
*prop
;
4069 for (prop
= qdict_first(props
); prop
; prop
= qdict_next(props
, prop
)) {
4070 object_property_set_qobject(obj
, qdict_entry_value(prop
),
4071 qdict_entry_key(prop
), &err
);
4077 error_propagate(errp
, err
);
4080 /* Create X86CPU object according to model+props specification */
4081 static X86CPU
*x86_cpu_from_model(const char *model
, QDict
*props
, Error
**errp
)
4087 xcc
= X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU
, model
));
4089 error_setg(&err
, "CPU model '%s' not found", model
);
4093 xc
= X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc
))));
4095 object_apply_props(OBJECT(xc
), props
, &err
);
4101 x86_cpu_expand_features(xc
, &err
);
4108 error_propagate(errp
, err
);
4109 object_unref(OBJECT(xc
));
4115 CpuModelExpansionInfo
*
4116 qmp_query_cpu_model_expansion(CpuModelExpansionType type
,
4117 CpuModelInfo
*model
,
4122 CpuModelExpansionInfo
*ret
= g_new0(CpuModelExpansionInfo
, 1);
4123 QDict
*props
= NULL
;
4124 const char *base_name
;
4126 xc
= x86_cpu_from_model(model
->name
,
4128 qobject_to(QDict
, model
->props
) :
4134 props
= qdict_new();
4135 ret
->model
= g_new0(CpuModelInfo
, 1);
4136 ret
->model
->props
= QOBJECT(props
);
4137 ret
->model
->has_props
= true;
4140 case CPU_MODEL_EXPANSION_TYPE_STATIC
:
4141 /* Static expansion will be based on "base" only */
4143 x86_cpu_to_dict(xc
, props
);
4145 case CPU_MODEL_EXPANSION_TYPE_FULL
:
4146 /* As we don't return every single property, full expansion needs
4147 * to keep the original model name+props, and add extra
4148 * properties on top of that.
4150 base_name
= model
->name
;
4151 x86_cpu_to_dict_full(xc
, props
);
4154 error_setg(&err
, "Unsupported expansion type");
4158 x86_cpu_to_dict(xc
, props
);
4160 ret
->model
->name
= g_strdup(base_name
);
4163 object_unref(OBJECT(xc
));
4165 error_propagate(errp
, err
);
4166 qapi_free_CpuModelExpansionInfo(ret
);
4171 #endif /* !CONFIG_USER_ONLY */
4173 static gchar
*x86_gdb_arch_name(CPUState
*cs
)
4175 #ifdef TARGET_X86_64
4176 return g_strdup("i386:x86-64");
4178 return g_strdup("i386");
4182 static void x86_cpu_cpudef_class_init(ObjectClass
*oc
, void *data
)
4184 X86CPUDefinition
*cpudef
= data
;
4185 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
4187 xcc
->cpu_def
= cpudef
;
4188 xcc
->migration_safe
= true;
4191 static void x86_register_cpudef_type(X86CPUDefinition
*def
)
4193 char *typename
= x86_cpu_type_name(def
->name
);
4196 .parent
= TYPE_X86_CPU
,
4197 .class_init
= x86_cpu_cpudef_class_init
,
4201 /* AMD aliases are handled at runtime based on CPUID vendor, so
4202 * they shouldn't be set on the CPU model table.
4204 assert(!(def
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_AMD_ALIASES
));
4205 /* catch mistakes instead of silently truncating model_id when too long */
4206 assert(def
->model_id
&& strlen(def
->model_id
) <= 48);
4213 #if !defined(CONFIG_USER_ONLY)
4215 void cpu_clear_apic_feature(CPUX86State
*env
)
4217 env
->features
[FEAT_1_EDX
] &= ~CPUID_APIC
;
4220 #endif /* !CONFIG_USER_ONLY */
4222 void cpu_x86_cpuid(CPUX86State
*env
, uint32_t index
, uint32_t count
,
4223 uint32_t *eax
, uint32_t *ebx
,
4224 uint32_t *ecx
, uint32_t *edx
)
4226 X86CPU
*cpu
= env_archcpu(env
);
4227 CPUState
*cs
= env_cpu(env
);
4228 uint32_t pkg_offset
;
4230 uint32_t signature
[3];
4232 /* Calculate & apply limits for different index ranges */
4233 if (index
>= 0xC0000000) {
4234 limit
= env
->cpuid_xlevel2
;
4235 } else if (index
>= 0x80000000) {
4236 limit
= env
->cpuid_xlevel
;
4237 } else if (index
>= 0x40000000) {
4240 limit
= env
->cpuid_level
;
4243 if (index
> limit
) {
4244 /* Intel documentation states that invalid EAX input will
4245 * return the same information as EAX=cpuid_level
4246 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4248 index
= env
->cpuid_level
;
4253 *eax
= env
->cpuid_level
;
4254 *ebx
= env
->cpuid_vendor1
;
4255 *edx
= env
->cpuid_vendor2
;
4256 *ecx
= env
->cpuid_vendor3
;
4259 *eax
= env
->cpuid_version
;
4260 *ebx
= (cpu
->apic_id
<< 24) |
4261 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4262 *ecx
= env
->features
[FEAT_1_ECX
];
4263 if ((*ecx
& CPUID_EXT_XSAVE
) && (env
->cr
[4] & CR4_OSXSAVE_MASK
)) {
4264 *ecx
|= CPUID_EXT_OSXSAVE
;
4266 *edx
= env
->features
[FEAT_1_EDX
];
4267 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
4268 *ebx
|= (cs
->nr_cores
* cs
->nr_threads
) << 16;
4273 /* cache info: needed for Pentium Pro compatibility */
4274 if (cpu
->cache_info_passthrough
) {
4275 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
4278 *eax
= 1; /* Number of CPUID[EAX=2] calls required */
4280 if (!cpu
->enable_l3_cache
) {
4283 *ecx
= cpuid2_cache_descriptor(env
->cache_info_cpuid2
.l3_cache
);
4285 *edx
= (cpuid2_cache_descriptor(env
->cache_info_cpuid2
.l1d_cache
) << 16) |
4286 (cpuid2_cache_descriptor(env
->cache_info_cpuid2
.l1i_cache
) << 8) |
4287 (cpuid2_cache_descriptor(env
->cache_info_cpuid2
.l2_cache
));
4290 /* cache info: needed for Core compatibility */
4291 if (cpu
->cache_info_passthrough
) {
4292 host_cpuid(index
, count
, eax
, ebx
, ecx
, edx
);
4293 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4294 *eax
&= ~0xFC000000;
4295 if ((*eax
& 31) && cs
->nr_cores
> 1) {
4296 *eax
|= (cs
->nr_cores
- 1) << 26;
4301 case 0: /* L1 dcache info */
4302 encode_cache_cpuid4(env
->cache_info_cpuid4
.l1d_cache
,
4304 eax
, ebx
, ecx
, edx
);
4306 case 1: /* L1 icache info */
4307 encode_cache_cpuid4(env
->cache_info_cpuid4
.l1i_cache
,
4309 eax
, ebx
, ecx
, edx
);
4311 case 2: /* L2 cache info */
4312 encode_cache_cpuid4(env
->cache_info_cpuid4
.l2_cache
,
4313 cs
->nr_threads
, cs
->nr_cores
,
4314 eax
, ebx
, ecx
, edx
);
4316 case 3: /* L3 cache info */
4317 pkg_offset
= apicid_pkg_offset(cs
->nr_cores
, cs
->nr_threads
);
4318 if (cpu
->enable_l3_cache
) {
4319 encode_cache_cpuid4(env
->cache_info_cpuid4
.l3_cache
,
4320 (1 << pkg_offset
), cs
->nr_cores
,
4321 eax
, ebx
, ecx
, edx
);
4325 default: /* end of info */
4326 *eax
= *ebx
= *ecx
= *edx
= 0;
4332 /* MONITOR/MWAIT Leaf */
4333 *eax
= cpu
->mwait
.eax
; /* Smallest monitor-line size in bytes */
4334 *ebx
= cpu
->mwait
.ebx
; /* Largest monitor-line size in bytes */
4335 *ecx
= cpu
->mwait
.ecx
; /* flags */
4336 *edx
= cpu
->mwait
.edx
; /* mwait substates */
4339 /* Thermal and Power Leaf */
4340 *eax
= env
->features
[FEAT_6_EAX
];
4346 /* Structured Extended Feature Flags Enumeration Leaf */
4348 *eax
= 0; /* Maximum ECX value for sub-leaves */
4349 *ebx
= env
->features
[FEAT_7_0_EBX
]; /* Feature flags */
4350 *ecx
= env
->features
[FEAT_7_0_ECX
]; /* Feature flags */
4351 if ((*ecx
& CPUID_7_0_ECX_PKU
) && env
->cr
[4] & CR4_PKE_MASK
) {
4352 *ecx
|= CPUID_7_0_ECX_OSPKE
;
4354 *edx
= env
->features
[FEAT_7_0_EDX
]; /* Feature flags */
4363 /* Direct Cache Access Information Leaf */
4364 *eax
= 0; /* Bits 0-31 in DCA_CAP MSR */
4370 /* Architectural Performance Monitoring Leaf */
4371 if (kvm_enabled() && cpu
->enable_pmu
) {
4372 KVMState
*s
= cs
->kvm_state
;
4374 *eax
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EAX
);
4375 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EBX
);
4376 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_ECX
);
4377 *edx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EDX
);
4378 } else if (hvf_enabled() && cpu
->enable_pmu
) {
4379 *eax
= hvf_get_supported_cpuid(0xA, count
, R_EAX
);
4380 *ebx
= hvf_get_supported_cpuid(0xA, count
, R_EBX
);
4381 *ecx
= hvf_get_supported_cpuid(0xA, count
, R_ECX
);
4382 *edx
= hvf_get_supported_cpuid(0xA, count
, R_EDX
);
4391 /* Extended Topology Enumeration Leaf */
4392 if (!cpu
->enable_cpuid_0xb
) {
4393 *eax
= *ebx
= *ecx
= *edx
= 0;
4397 *ecx
= count
& 0xff;
4398 *edx
= cpu
->apic_id
;
4402 *eax
= apicid_core_offset(cs
->nr_cores
, cs
->nr_threads
);
4403 *ebx
= cs
->nr_threads
;
4404 *ecx
|= CPUID_TOPOLOGY_LEVEL_SMT
;
4407 *eax
= apicid_pkg_offset(cs
->nr_cores
, cs
->nr_threads
);
4408 *ebx
= cs
->nr_cores
* cs
->nr_threads
;
4409 *ecx
|= CPUID_TOPOLOGY_LEVEL_CORE
;
4414 *ecx
|= CPUID_TOPOLOGY_LEVEL_INVALID
;
4417 assert(!(*eax
& ~0x1f));
4418 *ebx
&= 0xffff; /* The count doesn't need to be reliable. */
4421 /* Processor Extended State */
4426 if (!(env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
)) {
4431 *ecx
= xsave_area_size(x86_cpu_xsave_components(cpu
));
4432 *eax
= env
->features
[FEAT_XSAVE_COMP_LO
];
4433 *edx
= env
->features
[FEAT_XSAVE_COMP_HI
];
4434 *ebx
= xsave_area_size(env
->xcr0
);
4435 } else if (count
== 1) {
4436 *eax
= env
->features
[FEAT_XSAVE
];
4437 } else if (count
< ARRAY_SIZE(x86_ext_save_areas
)) {
4438 if ((x86_cpu_xsave_components(cpu
) >> count
) & 1) {
4439 const ExtSaveArea
*esa
= &x86_ext_save_areas
[count
];
4447 /* Intel Processor Trace Enumeration */
4452 if (!(env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) ||
4458 *eax
= INTEL_PT_MAX_SUBLEAF
;
4459 *ebx
= INTEL_PT_MINIMAL_EBX
;
4460 *ecx
= INTEL_PT_MINIMAL_ECX
;
4461 } else if (count
== 1) {
4462 *eax
= INTEL_PT_MTC_BITMAP
| INTEL_PT_ADDR_RANGES_NUM
;
4463 *ebx
= INTEL_PT_PSB_BITMAP
| INTEL_PT_CYCLE_BITMAP
;
4469 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4470 * set here, but we restrict to TCG none the less.
4472 if (tcg_enabled() && cpu
->expose_tcg
) {
4473 memcpy(signature
, "TCGTCGTCGTCG", 12);
4475 *ebx
= signature
[0];
4476 *ecx
= signature
[1];
4477 *edx
= signature
[2];
4492 *eax
= env
->cpuid_xlevel
;
4493 *ebx
= env
->cpuid_vendor1
;
4494 *edx
= env
->cpuid_vendor2
;
4495 *ecx
= env
->cpuid_vendor3
;
4498 *eax
= env
->cpuid_version
;
4500 *ecx
= env
->features
[FEAT_8000_0001_ECX
];
4501 *edx
= env
->features
[FEAT_8000_0001_EDX
];
4503 /* The Linux kernel checks for the CMPLegacy bit and
4504 * discards multiple thread information if it is set.
4505 * So don't set it here for Intel to make Linux guests happy.
4507 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
4508 if (env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
||
4509 env
->cpuid_vendor2
!= CPUID_VENDOR_INTEL_2
||
4510 env
->cpuid_vendor3
!= CPUID_VENDOR_INTEL_3
) {
4511 *ecx
|= 1 << 1; /* CmpLegacy bit */
4518 *eax
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
4519 *ebx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
4520 *ecx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
4521 *edx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
4524 /* cache info (L1 cache) */
4525 if (cpu
->cache_info_passthrough
) {
4526 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
4529 *eax
= (L1_DTLB_2M_ASSOC
<< 24) | (L1_DTLB_2M_ENTRIES
<< 16) | \
4530 (L1_ITLB_2M_ASSOC
<< 8) | (L1_ITLB_2M_ENTRIES
);
4531 *ebx
= (L1_DTLB_4K_ASSOC
<< 24) | (L1_DTLB_4K_ENTRIES
<< 16) | \
4532 (L1_ITLB_4K_ASSOC
<< 8) | (L1_ITLB_4K_ENTRIES
);
4533 *ecx
= encode_cache_cpuid80000005(env
->cache_info_amd
.l1d_cache
);
4534 *edx
= encode_cache_cpuid80000005(env
->cache_info_amd
.l1i_cache
);
4537 /* cache info (L2 cache) */
4538 if (cpu
->cache_info_passthrough
) {
4539 host_cpuid(index
, 0, eax
, ebx
, ecx
, edx
);
4542 *eax
= (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC
) << 28) | \
4543 (L2_DTLB_2M_ENTRIES
<< 16) | \
4544 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC
) << 12) | \
4545 (L2_ITLB_2M_ENTRIES
);
4546 *ebx
= (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC
) << 28) | \
4547 (L2_DTLB_4K_ENTRIES
<< 16) | \
4548 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC
) << 12) | \
4549 (L2_ITLB_4K_ENTRIES
);
4550 encode_cache_cpuid80000006(env
->cache_info_amd
.l2_cache
,
4551 cpu
->enable_l3_cache
?
4552 env
->cache_info_amd
.l3_cache
: NULL
,
4559 *edx
= env
->features
[FEAT_8000_0007_EDX
];
4562 /* virtual & phys address size in low 2 bytes. */
4563 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
) {
4564 /* 64 bit processor */
4565 *eax
= cpu
->phys_bits
; /* configurable physical bits */
4566 if (env
->features
[FEAT_7_0_ECX
] & CPUID_7_0_ECX_LA57
) {
4567 *eax
|= 0x00003900; /* 57 bits virtual */
4569 *eax
|= 0x00003000; /* 48 bits virtual */
4572 *eax
= cpu
->phys_bits
;
4574 *ebx
= env
->features
[FEAT_8000_0008_EBX
];
4577 if (cs
->nr_cores
* cs
->nr_threads
> 1) {
4578 *ecx
|= (cs
->nr_cores
* cs
->nr_threads
) - 1;
4582 if (env
->features
[FEAT_8000_0001_ECX
] & CPUID_EXT3_SVM
) {
4583 *eax
= 0x00000001; /* SVM Revision */
4584 *ebx
= 0x00000010; /* nr of ASIDs */
4586 *edx
= env
->features
[FEAT_SVM
]; /* optional features */
4596 if (cpu
->cache_info_passthrough
) {
4597 host_cpuid(index
, count
, eax
, ebx
, ecx
, edx
);
4601 case 0: /* L1 dcache info */
4602 encode_cache_cpuid8000001d(env
->cache_info_amd
.l1d_cache
, cs
,
4603 eax
, ebx
, ecx
, edx
);
4605 case 1: /* L1 icache info */
4606 encode_cache_cpuid8000001d(env
->cache_info_amd
.l1i_cache
, cs
,
4607 eax
, ebx
, ecx
, edx
);
4609 case 2: /* L2 cache info */
4610 encode_cache_cpuid8000001d(env
->cache_info_amd
.l2_cache
, cs
,
4611 eax
, ebx
, ecx
, edx
);
4613 case 3: /* L3 cache info */
4614 encode_cache_cpuid8000001d(env
->cache_info_amd
.l3_cache
, cs
,
4615 eax
, ebx
, ecx
, edx
);
4617 default: /* end of info */
4618 *eax
= *ebx
= *ecx
= *edx
= 0;
4623 assert(cpu
->core_id
<= 255);
4624 encode_topo_cpuid8000001e(cs
, cpu
,
4625 eax
, ebx
, ecx
, edx
);
4628 *eax
= env
->cpuid_xlevel2
;
4634 /* Support for VIA CPU's CPUID instruction */
4635 *eax
= env
->cpuid_version
;
4638 *edx
= env
->features
[FEAT_C000_0001_EDX
];
4643 /* Reserved for the future, and now filled with zero */
4650 *eax
= sev_enabled() ? 0x2 : 0;
4651 *ebx
= sev_get_cbit_position();
4652 *ebx
|= sev_get_reduced_phys_bits() << 6;
4657 /* reserved values: zero */
4666 /* CPUClass::reset() */
4667 static void x86_cpu_reset(CPUState
*s
)
4669 X86CPU
*cpu
= X86_CPU(s
);
4670 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(cpu
);
4671 CPUX86State
*env
= &cpu
->env
;
4676 xcc
->parent_reset(s
);
4678 memset(env
, 0, offsetof(CPUX86State
, end_reset_fields
));
4680 env
->old_exception
= -1;
4682 /* init to reset state */
4684 env
->hflags2
|= HF2_GIF_MASK
;
4686 cpu_x86_update_cr0(env
, 0x60000010);
4687 env
->a20_mask
= ~0x0;
4688 env
->smbase
= 0x30000;
4689 env
->msr_smi_count
= 0;
4691 env
->idt
.limit
= 0xffff;
4692 env
->gdt
.limit
= 0xffff;
4693 env
->ldt
.limit
= 0xffff;
4694 env
->ldt
.flags
= DESC_P_MASK
| (2 << DESC_TYPE_SHIFT
);
4695 env
->tr
.limit
= 0xffff;
4696 env
->tr
.flags
= DESC_P_MASK
| (11 << DESC_TYPE_SHIFT
);
4698 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff,
4699 DESC_P_MASK
| DESC_S_MASK
| DESC_CS_MASK
|
4700 DESC_R_MASK
| DESC_A_MASK
);
4701 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff,
4702 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4704 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff,
4705 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4707 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff,
4708 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4710 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff,
4711 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4713 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff,
4714 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
4718 env
->regs
[R_EDX
] = env
->cpuid_version
;
4723 for (i
= 0; i
< 8; i
++) {
4726 cpu_set_fpuc(env
, 0x37f);
4728 env
->mxcsr
= 0x1f80;
4729 /* All units are in INIT state. */
4732 env
->pat
= 0x0007040600070406ULL
;
4733 env
->msr_ia32_misc_enable
= MSR_IA32_MISC_ENABLE_DEFAULT
;
4734 if (env
->features
[FEAT_1_ECX
] & CPUID_EXT_MONITOR
) {
4735 env
->msr_ia32_misc_enable
|= MSR_IA32_MISC_ENABLE_MWAIT
;
4738 memset(env
->dr
, 0, sizeof(env
->dr
));
4739 env
->dr
[6] = DR6_FIXED_1
;
4740 env
->dr
[7] = DR7_FIXED_1
;
4741 cpu_breakpoint_remove_all(s
, BP_CPU
);
4742 cpu_watchpoint_remove_all(s
, BP_CPU
);
4745 xcr0
= XSTATE_FP_MASK
;
4747 #ifdef CONFIG_USER_ONLY
4748 /* Enable all the features for user-mode. */
4749 if (env
->features
[FEAT_1_EDX
] & CPUID_SSE
) {
4750 xcr0
|= XSTATE_SSE_MASK
;
4752 for (i
= 2; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
4753 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
4754 if (env
->features
[esa
->feature
] & esa
->bits
) {
4759 if (env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
) {
4760 cr4
|= CR4_OSFXSR_MASK
| CR4_OSXSAVE_MASK
;
4762 if (env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_FSGSBASE
) {
4763 cr4
|= CR4_FSGSBASE_MASK
;
4768 cpu_x86_update_cr4(env
, cr4
);
4771 * SDM 11.11.5 requires:
4772 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4773 * - IA32_MTRR_PHYSMASKn.V = 0
4774 * All other bits are undefined. For simplification, zero it all.
4776 env
->mtrr_deftype
= 0;
4777 memset(env
->mtrr_var
, 0, sizeof(env
->mtrr_var
));
4778 memset(env
->mtrr_fixed
, 0, sizeof(env
->mtrr_fixed
));
4780 env
->interrupt_injected
= -1;
4781 env
->exception_injected
= -1;
4782 env
->nmi_injected
= false;
4783 #if !defined(CONFIG_USER_ONLY)
4784 /* We hard-wire the BSP to the first CPU. */
4785 apic_designate_bsp(cpu
->apic_state
, s
->cpu_index
== 0);
4787 s
->halted
= !cpu_is_bsp(cpu
);
4789 if (kvm_enabled()) {
4790 kvm_arch_reset_vcpu(cpu
);
4792 else if (hvf_enabled()) {
4798 #ifndef CONFIG_USER_ONLY
4799 bool cpu_is_bsp(X86CPU
*cpu
)
4801 return cpu_get_apic_base(cpu
->apic_state
) & MSR_IA32_APICBASE_BSP
;
4804 /* TODO: remove me, when reset over QOM tree is implemented */
4805 static void x86_cpu_machine_reset_cb(void *opaque
)
4807 X86CPU
*cpu
= opaque
;
4808 cpu_reset(CPU(cpu
));
4812 static void mce_init(X86CPU
*cpu
)
4814 CPUX86State
*cenv
= &cpu
->env
;
4817 if (((cenv
->cpuid_version
>> 8) & 0xf) >= 6
4818 && (cenv
->features
[FEAT_1_EDX
] & (CPUID_MCE
| CPUID_MCA
)) ==
4819 (CPUID_MCE
| CPUID_MCA
)) {
4820 cenv
->mcg_cap
= MCE_CAP_DEF
| MCE_BANKS_DEF
|
4821 (cpu
->enable_lmce
? MCG_LMCE_P
: 0);
4822 cenv
->mcg_ctl
= ~(uint64_t)0;
4823 for (bank
= 0; bank
< MCE_BANKS_DEF
; bank
++) {
4824 cenv
->mce_banks
[bank
* 4] = ~(uint64_t)0;
4829 #ifndef CONFIG_USER_ONLY
4830 APICCommonClass
*apic_get_class(void)
4832 const char *apic_type
= "apic";
4834 /* TODO: in-kernel irqchip for hvf */
4835 if (kvm_apic_in_kernel()) {
4836 apic_type
= "kvm-apic";
4837 } else if (xen_enabled()) {
4838 apic_type
= "xen-apic";
4841 return APIC_COMMON_CLASS(object_class_by_name(apic_type
));
4844 static void x86_cpu_apic_create(X86CPU
*cpu
, Error
**errp
)
4846 APICCommonState
*apic
;
4847 ObjectClass
*apic_class
= OBJECT_CLASS(apic_get_class());
4849 cpu
->apic_state
= DEVICE(object_new(object_class_get_name(apic_class
)));
4851 object_property_add_child(OBJECT(cpu
), "lapic",
4852 OBJECT(cpu
->apic_state
), &error_abort
);
4853 object_unref(OBJECT(cpu
->apic_state
));
4855 qdev_prop_set_uint32(cpu
->apic_state
, "id", cpu
->apic_id
);
4856 /* TODO: convert to link<> */
4857 apic
= APIC_COMMON(cpu
->apic_state
);
4859 apic
->apicbase
= APIC_DEFAULT_ADDRESS
| MSR_IA32_APICBASE_ENABLE
;
4862 static void x86_cpu_apic_realize(X86CPU
*cpu
, Error
**errp
)
4864 APICCommonState
*apic
;
4865 static bool apic_mmio_map_once
;
4867 if (cpu
->apic_state
== NULL
) {
4870 object_property_set_bool(OBJECT(cpu
->apic_state
), true, "realized",
4873 /* Map APIC MMIO area */
4874 apic
= APIC_COMMON(cpu
->apic_state
);
4875 if (!apic_mmio_map_once
) {
4876 memory_region_add_subregion_overlap(get_system_memory(),
4878 MSR_IA32_APICBASE_BASE
,
4881 apic_mmio_map_once
= true;
4885 static void x86_cpu_machine_done(Notifier
*n
, void *unused
)
4887 X86CPU
*cpu
= container_of(n
, X86CPU
, machine_done
);
4888 MemoryRegion
*smram
=
4889 (MemoryRegion
*) object_resolve_path("/machine/smram", NULL
);
4892 cpu
->smram
= g_new(MemoryRegion
, 1);
4893 memory_region_init_alias(cpu
->smram
, OBJECT(cpu
), "smram",
4894 smram
, 0, 1ull << 32);
4895 memory_region_set_enabled(cpu
->smram
, true);
4896 memory_region_add_subregion_overlap(cpu
->cpu_as_root
, 0, cpu
->smram
, 1);
4900 static void x86_cpu_apic_realize(X86CPU
*cpu
, Error
**errp
)
4905 /* Note: Only safe for use on x86(-64) hosts */
4906 static uint32_t x86_host_phys_bits(void)
4909 uint32_t host_phys_bits
;
4911 host_cpuid(0x80000000, 0, &eax
, NULL
, NULL
, NULL
);
4912 if (eax
>= 0x80000008) {
4913 host_cpuid(0x80000008, 0, &eax
, NULL
, NULL
, NULL
);
4914 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4915 * at 23:16 that can specify a maximum physical address bits for
4916 * the guest that can override this value; but I've not seen
4917 * anything with that set.
4919 host_phys_bits
= eax
& 0xff;
4921 /* It's an odd 64 bit machine that doesn't have the leaf for
4922 * physical address bits; fall back to 36 that's most older
4925 host_phys_bits
= 36;
4928 return host_phys_bits
;
4931 static void x86_cpu_adjust_level(X86CPU
*cpu
, uint32_t *min
, uint32_t value
)
4938 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4939 static void x86_cpu_adjust_feat_level(X86CPU
*cpu
, FeatureWord w
)
4941 CPUX86State
*env
= &cpu
->env
;
4942 FeatureWordInfo
*fi
= &feature_word_info
[w
];
4943 uint32_t eax
= fi
->cpuid
.eax
;
4944 uint32_t region
= eax
& 0xF0000000;
4946 assert(feature_word_info
[w
].type
== CPUID_FEATURE_WORD
);
4947 if (!env
->features
[w
]) {
4953 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_level
, eax
);
4956 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel
, eax
);
4959 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel2
, eax
);
4964 /* Calculate XSAVE components based on the configured CPU feature flags */
4965 static void x86_cpu_enable_xsave_components(X86CPU
*cpu
)
4967 CPUX86State
*env
= &cpu
->env
;
4971 if (!(env
->features
[FEAT_1_ECX
] & CPUID_EXT_XSAVE
)) {
4976 for (i
= 0; i
< ARRAY_SIZE(x86_ext_save_areas
); i
++) {
4977 const ExtSaveArea
*esa
= &x86_ext_save_areas
[i
];
4978 if (env
->features
[esa
->feature
] & esa
->bits
) {
4979 mask
|= (1ULL << i
);
4983 env
->features
[FEAT_XSAVE_COMP_LO
] = mask
;
4984 env
->features
[FEAT_XSAVE_COMP_HI
] = mask
>> 32;
4987 /***** Steps involved on loading and filtering CPUID data
4989 * When initializing and realizing a CPU object, the steps
4990 * involved in setting up CPUID data are:
4992 * 1) Loading CPU model definition (X86CPUDefinition). This is
4993 * implemented by x86_cpu_load_def() and should be completely
4994 * transparent, as it is done automatically by instance_init.
4995 * No code should need to look at X86CPUDefinition structs
4996 * outside instance_init.
4998 * 2) CPU expansion. This is done by realize before CPUID
4999 * filtering, and will make sure host/accelerator data is
5000 * loaded for CPU models that depend on host capabilities
5001 * (e.g. "host"). Done by x86_cpu_expand_features().
5003 * 3) CPUID filtering. This initializes extra data related to
5004 * CPUID, and checks if the host supports all capabilities
5005 * required by the CPU. Runnability of a CPU model is
5006 * determined at this step. Done by x86_cpu_filter_features().
5008 * Some operations don't require all steps to be performed.
5011 * - CPU instance creation (instance_init) will run only CPU
5012 * model loading. CPU expansion can't run at instance_init-time
5013 * because host/accelerator data may be not available yet.
5014 * - CPU realization will perform both CPU model expansion and CPUID
5015 * filtering, and return an error in case one of them fails.
5016 * - query-cpu-definitions needs to run all 3 steps. It needs
5017 * to run CPUID filtering, as the 'unavailable-features'
5018 * field is set based on the filtering results.
5019 * - The query-cpu-model-expansion QMP command only needs to run
5020 * CPU model loading and CPU expansion. It should not filter
5021 * any CPUID data based on host capabilities.
5024 /* Expand CPU configuration data, based on configured features
5025 * and host/accelerator capabilities when appropriate.
5027 static void x86_cpu_expand_features(X86CPU
*cpu
, Error
**errp
)
5029 CPUX86State
*env
= &cpu
->env
;
5032 Error
*local_err
= NULL
;
5034 /*TODO: Now cpu->max_features doesn't overwrite features
5035 * set using QOM properties, and we can convert
5036 * plus_features & minus_features to global properties
5037 * inside x86_cpu_parse_featurestr() too.
5039 if (cpu
->max_features
) {
5040 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
5041 /* Override only features that weren't set explicitly
5045 x86_cpu_get_supported_feature_word(w
, cpu
->migratable
) &
5046 ~env
->user_features
[w
] & \
5047 ~feature_word_info
[w
].no_autoenable_flags
;
5051 for (l
= plus_features
; l
; l
= l
->next
) {
5052 const char *prop
= l
->data
;
5053 object_property_set_bool(OBJECT(cpu
), true, prop
, &local_err
);
5059 for (l
= minus_features
; l
; l
= l
->next
) {
5060 const char *prop
= l
->data
;
5061 object_property_set_bool(OBJECT(cpu
), false, prop
, &local_err
);
5067 if (!kvm_enabled() || !cpu
->expose_kvm
) {
5068 env
->features
[FEAT_KVM
] = 0;
5071 x86_cpu_enable_xsave_components(cpu
);
5073 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5074 x86_cpu_adjust_feat_level(cpu
, FEAT_7_0_EBX
);
5075 if (cpu
->full_cpuid_auto_level
) {
5076 x86_cpu_adjust_feat_level(cpu
, FEAT_1_EDX
);
5077 x86_cpu_adjust_feat_level(cpu
, FEAT_1_ECX
);
5078 x86_cpu_adjust_feat_level(cpu
, FEAT_6_EAX
);
5079 x86_cpu_adjust_feat_level(cpu
, FEAT_7_0_ECX
);
5080 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0001_EDX
);
5081 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0001_ECX
);
5082 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0007_EDX
);
5083 x86_cpu_adjust_feat_level(cpu
, FEAT_8000_0008_EBX
);
5084 x86_cpu_adjust_feat_level(cpu
, FEAT_C000_0001_EDX
);
5085 x86_cpu_adjust_feat_level(cpu
, FEAT_SVM
);
5086 x86_cpu_adjust_feat_level(cpu
, FEAT_XSAVE
);
5088 /* Intel Processor Trace requires CPUID[0x14] */
5089 if ((env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) &&
5090 kvm_enabled() && cpu
->intel_pt_auto_level
) {
5091 x86_cpu_adjust_level(cpu
, &cpu
->env
.cpuid_min_level
, 0x14);
5094 /* SVM requires CPUID[0x8000000A] */
5095 if (env
->features
[FEAT_8000_0001_ECX
] & CPUID_EXT3_SVM
) {
5096 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel
, 0x8000000A);
5099 /* SEV requires CPUID[0x8000001F] */
5100 if (sev_enabled()) {
5101 x86_cpu_adjust_level(cpu
, &env
->cpuid_min_xlevel
, 0x8000001F);
5105 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5106 if (env
->cpuid_level
== UINT32_MAX
) {
5107 env
->cpuid_level
= env
->cpuid_min_level
;
5109 if (env
->cpuid_xlevel
== UINT32_MAX
) {
5110 env
->cpuid_xlevel
= env
->cpuid_min_xlevel
;
5112 if (env
->cpuid_xlevel2
== UINT32_MAX
) {
5113 env
->cpuid_xlevel2
= env
->cpuid_min_xlevel2
;
5117 if (local_err
!= NULL
) {
5118 error_propagate(errp
, local_err
);
5123 * Finishes initialization of CPUID data, filters CPU feature
5124 * words based on host availability of each feature.
5126 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5128 static int x86_cpu_filter_features(X86CPU
*cpu
)
5130 CPUX86State
*env
= &cpu
->env
;
5134 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
5135 uint32_t host_feat
=
5136 x86_cpu_get_supported_feature_word(w
, false);
5137 uint32_t requested_features
= env
->features
[w
];
5138 env
->features
[w
] &= host_feat
;
5139 cpu
->filtered_features
[w
] = requested_features
& ~env
->features
[w
];
5140 if (cpu
->filtered_features
[w
]) {
5145 if ((env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_INTEL_PT
) &&
5147 KVMState
*s
= CPU(cpu
)->kvm_state
;
5148 uint32_t eax_0
= kvm_arch_get_supported_cpuid(s
, 0x14, 0, R_EAX
);
5149 uint32_t ebx_0
= kvm_arch_get_supported_cpuid(s
, 0x14, 0, R_EBX
);
5150 uint32_t ecx_0
= kvm_arch_get_supported_cpuid(s
, 0x14, 0, R_ECX
);
5151 uint32_t eax_1
= kvm_arch_get_supported_cpuid(s
, 0x14, 1, R_EAX
);
5152 uint32_t ebx_1
= kvm_arch_get_supported_cpuid(s
, 0x14, 1, R_EBX
);
5155 ((ebx_0
& INTEL_PT_MINIMAL_EBX
) != INTEL_PT_MINIMAL_EBX
) ||
5156 ((ecx_0
& INTEL_PT_MINIMAL_ECX
) != INTEL_PT_MINIMAL_ECX
) ||
5157 ((eax_1
& INTEL_PT_MTC_BITMAP
) != INTEL_PT_MTC_BITMAP
) ||
5158 ((eax_1
& INTEL_PT_ADDR_RANGES_NUM_MASK
) <
5159 INTEL_PT_ADDR_RANGES_NUM
) ||
5160 ((ebx_1
& (INTEL_PT_PSB_BITMAP
| INTEL_PT_CYCLE_BITMAP
)) !=
5161 (INTEL_PT_PSB_BITMAP
| INTEL_PT_CYCLE_BITMAP
)) ||
5162 (ecx_0
& INTEL_PT_IP_LIP
)) {
5164 * Processor Trace capabilities aren't configurable, so if the
5165 * host can't emulate the capabilities we report on
5166 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5168 env
->features
[FEAT_7_0_EBX
] &= ~CPUID_7_0_EBX_INTEL_PT
;
5169 cpu
->filtered_features
[FEAT_7_0_EBX
] |= CPUID_7_0_EBX_INTEL_PT
;
5177 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
5178 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
5179 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
5180 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
5181 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
5182 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
5183 static void x86_cpu_realizefn(DeviceState
*dev
, Error
**errp
)
5185 CPUState
*cs
= CPU(dev
);
5186 X86CPU
*cpu
= X86_CPU(dev
);
5187 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(dev
);
5188 CPUX86State
*env
= &cpu
->env
;
5189 Error
*local_err
= NULL
;
5190 static bool ht_warned
;
5192 if (xcc
->host_cpuid_required
) {
5193 if (!accel_uses_host_cpuid()) {
5194 char *name
= x86_cpu_class_get_model_name(xcc
);
5195 error_setg(&local_err
, "CPU model '%s' requires KVM", name
);
5200 if (enable_cpu_pm
) {
5201 host_cpuid(5, 0, &cpu
->mwait
.eax
, &cpu
->mwait
.ebx
,
5202 &cpu
->mwait
.ecx
, &cpu
->mwait
.edx
);
5203 env
->features
[FEAT_1_ECX
] |= CPUID_EXT_MONITOR
;
5207 /* mwait extended info: needed for Core compatibility */
5208 /* We always wake on interrupt even if host does not have the capability */
5209 cpu
->mwait
.ecx
|= CPUID_MWAIT_EMX
| CPUID_MWAIT_IBE
;
5211 if (cpu
->apic_id
== UNASSIGNED_APIC_ID
) {
5212 error_setg(errp
, "apic-id property was not initialized properly");
5216 x86_cpu_expand_features(cpu
, &local_err
);
5221 if (x86_cpu_filter_features(cpu
) &&
5222 (cpu
->check_cpuid
|| cpu
->enforce_cpuid
)) {
5223 x86_cpu_report_filtered_features(cpu
);
5224 if (cpu
->enforce_cpuid
) {
5225 error_setg(&local_err
,
5226 accel_uses_host_cpuid() ?
5227 "Host doesn't support requested features" :
5228 "TCG doesn't support requested features");
5233 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5236 if (IS_AMD_CPU(env
)) {
5237 env
->features
[FEAT_8000_0001_EDX
] &= ~CPUID_EXT2_AMD_ALIASES
;
5238 env
->features
[FEAT_8000_0001_EDX
] |= (env
->features
[FEAT_1_EDX
]
5239 & CPUID_EXT2_AMD_ALIASES
);
5242 /* For 64bit systems think about the number of physical bits to present.
5243 * ideally this should be the same as the host; anything other than matching
5244 * the host can cause incorrect guest behaviour.
5245 * QEMU used to pick the magic value of 40 bits that corresponds to
5246 * consumer AMD devices but nothing else.
5248 if (env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
) {
5249 if (accel_uses_host_cpuid()) {
5250 uint32_t host_phys_bits
= x86_host_phys_bits();
5253 if (cpu
->host_phys_bits
) {
5254 /* The user asked for us to use the host physical bits */
5255 cpu
->phys_bits
= host_phys_bits
;
5256 if (cpu
->host_phys_bits_limit
&&
5257 cpu
->phys_bits
> cpu
->host_phys_bits_limit
) {
5258 cpu
->phys_bits
= cpu
->host_phys_bits_limit
;
5262 /* Print a warning if the user set it to a value that's not the
5265 if (cpu
->phys_bits
!= host_phys_bits
&& cpu
->phys_bits
!= 0 &&
5267 warn_report("Host physical bits (%u)"
5268 " does not match phys-bits property (%u)",
5269 host_phys_bits
, cpu
->phys_bits
);
5273 if (cpu
->phys_bits
&&
5274 (cpu
->phys_bits
> TARGET_PHYS_ADDR_SPACE_BITS
||
5275 cpu
->phys_bits
< 32)) {
5276 error_setg(errp
, "phys-bits should be between 32 and %u "
5278 TARGET_PHYS_ADDR_SPACE_BITS
, cpu
->phys_bits
);
5282 if (cpu
->phys_bits
&& cpu
->phys_bits
!= TCG_PHYS_ADDR_BITS
) {
5283 error_setg(errp
, "TCG only supports phys-bits=%u",
5284 TCG_PHYS_ADDR_BITS
);
5288 /* 0 means it was not explicitly set by the user (or by machine
5289 * compat_props or by the host code above). In this case, the default
5290 * is the value used by TCG (40).
5292 if (cpu
->phys_bits
== 0) {
5293 cpu
->phys_bits
= TCG_PHYS_ADDR_BITS
;
5296 /* For 32 bit systems don't use the user set value, but keep
5297 * phys_bits consistent with what we tell the guest.
5299 if (cpu
->phys_bits
!= 0) {
5300 error_setg(errp
, "phys-bits is not user-configurable in 32 bit");
5304 if (env
->features
[FEAT_1_EDX
] & CPUID_PSE36
) {
5305 cpu
->phys_bits
= 36;
5307 cpu
->phys_bits
= 32;
5311 /* Cache information initialization */
5312 if (!cpu
->legacy_cache
) {
5313 if (!xcc
->cpu_def
|| !xcc
->cpu_def
->cache_info
) {
5314 char *name
= x86_cpu_class_get_model_name(xcc
);
5316 "CPU model '%s' doesn't support legacy-cache=off", name
);
5320 env
->cache_info_cpuid2
= env
->cache_info_cpuid4
= env
->cache_info_amd
=
5321 *xcc
->cpu_def
->cache_info
;
5323 /* Build legacy cache information */
5324 env
->cache_info_cpuid2
.l1d_cache
= &legacy_l1d_cache
;
5325 env
->cache_info_cpuid2
.l1i_cache
= &legacy_l1i_cache
;
5326 env
->cache_info_cpuid2
.l2_cache
= &legacy_l2_cache_cpuid2
;
5327 env
->cache_info_cpuid2
.l3_cache
= &legacy_l3_cache
;
5329 env
->cache_info_cpuid4
.l1d_cache
= &legacy_l1d_cache
;
5330 env
->cache_info_cpuid4
.l1i_cache
= &legacy_l1i_cache
;
5331 env
->cache_info_cpuid4
.l2_cache
= &legacy_l2_cache
;
5332 env
->cache_info_cpuid4
.l3_cache
= &legacy_l3_cache
;
5334 env
->cache_info_amd
.l1d_cache
= &legacy_l1d_cache_amd
;
5335 env
->cache_info_amd
.l1i_cache
= &legacy_l1i_cache_amd
;
5336 env
->cache_info_amd
.l2_cache
= &legacy_l2_cache_amd
;
5337 env
->cache_info_amd
.l3_cache
= &legacy_l3_cache
;
5341 cpu_exec_realizefn(cs
, &local_err
);
5342 if (local_err
!= NULL
) {
5343 error_propagate(errp
, local_err
);
5347 #ifndef CONFIG_USER_ONLY
5348 qemu_register_reset(x86_cpu_machine_reset_cb
, cpu
);
5350 if (cpu
->env
.features
[FEAT_1_EDX
] & CPUID_APIC
|| smp_cpus
> 1) {
5351 x86_cpu_apic_create(cpu
, &local_err
);
5352 if (local_err
!= NULL
) {
5360 #ifndef CONFIG_USER_ONLY
5361 if (tcg_enabled()) {
5362 cpu
->cpu_as_mem
= g_new(MemoryRegion
, 1);
5363 cpu
->cpu_as_root
= g_new(MemoryRegion
, 1);
5365 /* Outer container... */
5366 memory_region_init(cpu
->cpu_as_root
, OBJECT(cpu
), "memory", ~0ull);
5367 memory_region_set_enabled(cpu
->cpu_as_root
, true);
5369 /* ... with two regions inside: normal system memory with low
5372 memory_region_init_alias(cpu
->cpu_as_mem
, OBJECT(cpu
), "memory",
5373 get_system_memory(), 0, ~0ull);
5374 memory_region_add_subregion_overlap(cpu
->cpu_as_root
, 0, cpu
->cpu_as_mem
, 0);
5375 memory_region_set_enabled(cpu
->cpu_as_mem
, true);
5378 cpu_address_space_init(cs
, 0, "cpu-memory", cs
->memory
);
5379 cpu_address_space_init(cs
, 1, "cpu-smm", cpu
->cpu_as_root
);
5381 /* ... SMRAM with higher priority, linked from /machine/smram. */
5382 cpu
->machine_done
.notify
= x86_cpu_machine_done
;
5383 qemu_add_machine_init_done_notifier(&cpu
->machine_done
);
5390 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5391 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5392 * based on inputs (sockets,cores,threads), it is still better to give
5395 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5396 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5398 if (IS_AMD_CPU(env
) &&
5399 !(env
->features
[FEAT_8000_0001_ECX
] & CPUID_EXT3_TOPOEXT
) &&
5400 cs
->nr_threads
> 1 && !ht_warned
) {
5401 warn_report("This family of AMD CPU doesn't support "
5402 "hyperthreading(%d)",
5404 error_printf("Please configure -smp options properly"
5405 " or try enabling topoext feature.\n");
5409 x86_cpu_apic_realize(cpu
, &local_err
);
5410 if (local_err
!= NULL
) {
5415 xcc
->parent_realize(dev
, &local_err
);
5418 if (local_err
!= NULL
) {
5419 error_propagate(errp
, local_err
);
5424 static void x86_cpu_unrealizefn(DeviceState
*dev
, Error
**errp
)
5426 X86CPU
*cpu
= X86_CPU(dev
);
5427 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(dev
);
5428 Error
*local_err
= NULL
;
5430 #ifndef CONFIG_USER_ONLY
5431 cpu_remove_sync(CPU(dev
));
5432 qemu_unregister_reset(x86_cpu_machine_reset_cb
, dev
);
5435 if (cpu
->apic_state
) {
5436 object_unparent(OBJECT(cpu
->apic_state
));
5437 cpu
->apic_state
= NULL
;
5440 xcc
->parent_unrealize(dev
, &local_err
);
5441 if (local_err
!= NULL
) {
5442 error_propagate(errp
, local_err
);
5447 typedef struct BitProperty
{
5452 static void x86_cpu_get_bit_prop(Object
*obj
, Visitor
*v
, const char *name
,
5453 void *opaque
, Error
**errp
)
5455 X86CPU
*cpu
= X86_CPU(obj
);
5456 BitProperty
*fp
= opaque
;
5457 uint32_t f
= cpu
->env
.features
[fp
->w
];
5458 bool value
= (f
& fp
->mask
) == fp
->mask
;
5459 visit_type_bool(v
, name
, &value
, errp
);
5462 static void x86_cpu_set_bit_prop(Object
*obj
, Visitor
*v
, const char *name
,
5463 void *opaque
, Error
**errp
)
5465 DeviceState
*dev
= DEVICE(obj
);
5466 X86CPU
*cpu
= X86_CPU(obj
);
5467 BitProperty
*fp
= opaque
;
5468 Error
*local_err
= NULL
;
5471 if (dev
->realized
) {
5472 qdev_prop_set_after_realize(dev
, name
, errp
);
5476 visit_type_bool(v
, name
, &value
, &local_err
);
5478 error_propagate(errp
, local_err
);
5483 cpu
->env
.features
[fp
->w
] |= fp
->mask
;
5485 cpu
->env
.features
[fp
->w
] &= ~fp
->mask
;
5487 cpu
->env
.user_features
[fp
->w
] |= fp
->mask
;
5490 static void x86_cpu_release_bit_prop(Object
*obj
, const char *name
,
5493 BitProperty
*prop
= opaque
;
5497 /* Register a boolean property to get/set a single bit in a uint32_t field.
5499 * The same property name can be registered multiple times to make it affect
5500 * multiple bits in the same FeatureWord. In that case, the getter will return
5501 * true only if all bits are set.
5503 static void x86_cpu_register_bit_prop(X86CPU
*cpu
,
5504 const char *prop_name
,
5510 uint32_t mask
= (1UL << bitnr
);
5512 op
= object_property_find(OBJECT(cpu
), prop_name
, NULL
);
5518 fp
= g_new0(BitProperty
, 1);
5521 object_property_add(OBJECT(cpu
), prop_name
, "bool",
5522 x86_cpu_get_bit_prop
,
5523 x86_cpu_set_bit_prop
,
5524 x86_cpu_release_bit_prop
, fp
, &error_abort
);
5528 static void x86_cpu_register_feature_bit_props(X86CPU
*cpu
,
5532 FeatureWordInfo
*fi
= &feature_word_info
[w
];
5533 const char *name
= fi
->feat_names
[bitnr
];
5539 /* Property names should use "-" instead of "_".
5540 * Old names containing underscores are registered as aliases
5541 * using object_property_add_alias()
5543 assert(!strchr(name
, '_'));
5544 /* aliases don't use "|" delimiters anymore, they are registered
5545 * manually using object_property_add_alias() */
5546 assert(!strchr(name
, '|'));
5547 x86_cpu_register_bit_prop(cpu
, name
, w
, bitnr
);
5550 static GuestPanicInformation
*x86_cpu_get_crash_info(CPUState
*cs
)
5552 X86CPU
*cpu
= X86_CPU(cs
);
5553 CPUX86State
*env
= &cpu
->env
;
5554 GuestPanicInformation
*panic_info
= NULL
;
5556 if (env
->features
[FEAT_HYPERV_EDX
] & HV_GUEST_CRASH_MSR_AVAILABLE
) {
5557 panic_info
= g_malloc0(sizeof(GuestPanicInformation
));
5559 panic_info
->type
= GUEST_PANIC_INFORMATION_TYPE_HYPER_V
;
5561 assert(HV_CRASH_PARAMS
>= 5);
5562 panic_info
->u
.hyper_v
.arg1
= env
->msr_hv_crash_params
[0];
5563 panic_info
->u
.hyper_v
.arg2
= env
->msr_hv_crash_params
[1];
5564 panic_info
->u
.hyper_v
.arg3
= env
->msr_hv_crash_params
[2];
5565 panic_info
->u
.hyper_v
.arg4
= env
->msr_hv_crash_params
[3];
5566 panic_info
->u
.hyper_v
.arg5
= env
->msr_hv_crash_params
[4];
5571 static void x86_cpu_get_crash_info_qom(Object
*obj
, Visitor
*v
,
5572 const char *name
, void *opaque
,
5575 CPUState
*cs
= CPU(obj
);
5576 GuestPanicInformation
*panic_info
;
5578 if (!cs
->crash_occurred
) {
5579 error_setg(errp
, "No crash occured");
5583 panic_info
= x86_cpu_get_crash_info(cs
);
5584 if (panic_info
== NULL
) {
5585 error_setg(errp
, "No crash information");
5589 visit_type_GuestPanicInformation(v
, "crash-information", &panic_info
,
5591 qapi_free_GuestPanicInformation(panic_info
);
5594 static void x86_cpu_initfn(Object
*obj
)
5596 X86CPU
*cpu
= X86_CPU(obj
);
5597 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(obj
);
5598 CPUX86State
*env
= &cpu
->env
;
5601 cpu_set_cpustate_pointers(cpu
);
5603 object_property_add(obj
, "family", "int",
5604 x86_cpuid_version_get_family
,
5605 x86_cpuid_version_set_family
, NULL
, NULL
, NULL
);
5606 object_property_add(obj
, "model", "int",
5607 x86_cpuid_version_get_model
,
5608 x86_cpuid_version_set_model
, NULL
, NULL
, NULL
);
5609 object_property_add(obj
, "stepping", "int",
5610 x86_cpuid_version_get_stepping
,
5611 x86_cpuid_version_set_stepping
, NULL
, NULL
, NULL
);
5612 object_property_add_str(obj
, "vendor",
5613 x86_cpuid_get_vendor
,
5614 x86_cpuid_set_vendor
, NULL
);
5615 object_property_add_str(obj
, "model-id",
5616 x86_cpuid_get_model_id
,
5617 x86_cpuid_set_model_id
, NULL
);
5618 object_property_add(obj
, "tsc-frequency", "int",
5619 x86_cpuid_get_tsc_freq
,
5620 x86_cpuid_set_tsc_freq
, NULL
, NULL
, NULL
);
5621 object_property_add(obj
, "feature-words", "X86CPUFeatureWordInfo",
5622 x86_cpu_get_feature_words
,
5623 NULL
, NULL
, (void *)env
->features
, NULL
);
5624 object_property_add(obj
, "filtered-features", "X86CPUFeatureWordInfo",
5625 x86_cpu_get_feature_words
,
5626 NULL
, NULL
, (void *)cpu
->filtered_features
, NULL
);
5628 object_property_add(obj
, "crash-information", "GuestPanicInformation",
5629 x86_cpu_get_crash_info_qom
, NULL
, NULL
, NULL
, NULL
);
5631 cpu
->hyperv_spinlock_attempts
= HYPERV_SPINLOCK_NEVER_RETRY
;
5633 for (w
= 0; w
< FEATURE_WORDS
; w
++) {
5636 for (bitnr
= 0; bitnr
< 32; bitnr
++) {
5637 x86_cpu_register_feature_bit_props(cpu
, w
, bitnr
);
5641 object_property_add_alias(obj
, "sse3", obj
, "pni", &error_abort
);
5642 object_property_add_alias(obj
, "pclmuldq", obj
, "pclmulqdq", &error_abort
);
5643 object_property_add_alias(obj
, "sse4-1", obj
, "sse4.1", &error_abort
);
5644 object_property_add_alias(obj
, "sse4-2", obj
, "sse4.2", &error_abort
);
5645 object_property_add_alias(obj
, "xd", obj
, "nx", &error_abort
);
5646 object_property_add_alias(obj
, "ffxsr", obj
, "fxsr-opt", &error_abort
);
5647 object_property_add_alias(obj
, "i64", obj
, "lm", &error_abort
);
5649 object_property_add_alias(obj
, "ds_cpl", obj
, "ds-cpl", &error_abort
);
5650 object_property_add_alias(obj
, "tsc_adjust", obj
, "tsc-adjust", &error_abort
);
5651 object_property_add_alias(obj
, "fxsr_opt", obj
, "fxsr-opt", &error_abort
);
5652 object_property_add_alias(obj
, "lahf_lm", obj
, "lahf-lm", &error_abort
);
5653 object_property_add_alias(obj
, "cmp_legacy", obj
, "cmp-legacy", &error_abort
);
5654 object_property_add_alias(obj
, "nodeid_msr", obj
, "nodeid-msr", &error_abort
);
5655 object_property_add_alias(obj
, "perfctr_core", obj
, "perfctr-core", &error_abort
);
5656 object_property_add_alias(obj
, "perfctr_nb", obj
, "perfctr-nb", &error_abort
);
5657 object_property_add_alias(obj
, "kvm_nopiodelay", obj
, "kvm-nopiodelay", &error_abort
);
5658 object_property_add_alias(obj
, "kvm_mmu", obj
, "kvm-mmu", &error_abort
);
5659 object_property_add_alias(obj
, "kvm_asyncpf", obj
, "kvm-asyncpf", &error_abort
);
5660 object_property_add_alias(obj
, "kvm_steal_time", obj
, "kvm-steal-time", &error_abort
);
5661 object_property_add_alias(obj
, "kvm_pv_eoi", obj
, "kvm-pv-eoi", &error_abort
);
5662 object_property_add_alias(obj
, "kvm_pv_unhalt", obj
, "kvm-pv-unhalt", &error_abort
);
5663 object_property_add_alias(obj
, "svm_lock", obj
, "svm-lock", &error_abort
);
5664 object_property_add_alias(obj
, "nrip_save", obj
, "nrip-save", &error_abort
);
5665 object_property_add_alias(obj
, "tsc_scale", obj
, "tsc-scale", &error_abort
);
5666 object_property_add_alias(obj
, "vmcb_clean", obj
, "vmcb-clean", &error_abort
);
5667 object_property_add_alias(obj
, "pause_filter", obj
, "pause-filter", &error_abort
);
5668 object_property_add_alias(obj
, "sse4_1", obj
, "sse4.1", &error_abort
);
5669 object_property_add_alias(obj
, "sse4_2", obj
, "sse4.2", &error_abort
);
5672 x86_cpu_load_def(cpu
, xcc
->cpu_def
, &error_abort
);
5676 static int64_t x86_cpu_get_arch_id(CPUState
*cs
)
5678 X86CPU
*cpu
= X86_CPU(cs
);
5680 return cpu
->apic_id
;
5683 static bool x86_cpu_get_paging_enabled(const CPUState
*cs
)
5685 X86CPU
*cpu
= X86_CPU(cs
);
5687 return cpu
->env
.cr
[0] & CR0_PG_MASK
;
5690 static void x86_cpu_set_pc(CPUState
*cs
, vaddr value
)
5692 X86CPU
*cpu
= X86_CPU(cs
);
5694 cpu
->env
.eip
= value
;
5697 static void x86_cpu_synchronize_from_tb(CPUState
*cs
, TranslationBlock
*tb
)
5699 X86CPU
*cpu
= X86_CPU(cs
);
5701 cpu
->env
.eip
= tb
->pc
- tb
->cs_base
;
5704 int x86_cpu_pending_interrupt(CPUState
*cs
, int interrupt_request
)
5706 X86CPU
*cpu
= X86_CPU(cs
);
5707 CPUX86State
*env
= &cpu
->env
;
5709 #if !defined(CONFIG_USER_ONLY)
5710 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
5711 return CPU_INTERRUPT_POLL
;
5714 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
5715 return CPU_INTERRUPT_SIPI
;
5718 if (env
->hflags2
& HF2_GIF_MASK
) {
5719 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
5720 !(env
->hflags
& HF_SMM_MASK
)) {
5721 return CPU_INTERRUPT_SMI
;
5722 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
5723 !(env
->hflags2
& HF2_NMI_MASK
)) {
5724 return CPU_INTERRUPT_NMI
;
5725 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
5726 return CPU_INTERRUPT_MCE
;
5727 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
5728 (((env
->hflags2
& HF2_VINTR_MASK
) &&
5729 (env
->hflags2
& HF2_HIF_MASK
)) ||
5730 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
5731 (env
->eflags
& IF_MASK
&&
5732 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
5733 return CPU_INTERRUPT_HARD
;
5734 #if !defined(CONFIG_USER_ONLY)
5735 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
5736 (env
->eflags
& IF_MASK
) &&
5737 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
5738 return CPU_INTERRUPT_VIRQ
;
5746 static bool x86_cpu_has_work(CPUState
*cs
)
5748 return x86_cpu_pending_interrupt(cs
, cs
->interrupt_request
) != 0;
5751 static void x86_disas_set_info(CPUState
*cs
, disassemble_info
*info
)
5753 X86CPU
*cpu
= X86_CPU(cs
);
5754 CPUX86State
*env
= &cpu
->env
;
5756 info
->mach
= (env
->hflags
& HF_CS64_MASK
? bfd_mach_x86_64
5757 : env
->hflags
& HF_CS32_MASK
? bfd_mach_i386_i386
5758 : bfd_mach_i386_i8086
);
5759 info
->print_insn
= print_insn_i386
;
5761 info
->cap_arch
= CS_ARCH_X86
;
5762 info
->cap_mode
= (env
->hflags
& HF_CS64_MASK
? CS_MODE_64
5763 : env
->hflags
& HF_CS32_MASK
? CS_MODE_32
5765 info
->cap_insn_unit
= 1;
5766 info
->cap_insn_split
= 8;
5769 void x86_update_hflags(CPUX86State
*env
)
5772 #define HFLAG_COPY_MASK \
5773 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5774 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5775 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5776 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5778 hflags
= env
->hflags
& HFLAG_COPY_MASK
;
5779 hflags
|= (env
->segs
[R_SS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
5780 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
5781 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
5782 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
5783 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
5785 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
5786 hflags
|= HF_OSFXSR_MASK
;
5789 if (env
->efer
& MSR_EFER_LMA
) {
5790 hflags
|= HF_LMA_MASK
;
5793 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
5794 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
5796 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
5797 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
5798 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
5799 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
5800 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
) ||
5801 !(hflags
& HF_CS32_MASK
)) {
5802 hflags
|= HF_ADDSEG_MASK
;
5804 hflags
|= ((env
->segs
[R_DS
].base
| env
->segs
[R_ES
].base
|
5805 env
->segs
[R_SS
].base
) != 0) << HF_ADDSEG_SHIFT
;
5808 env
->hflags
= hflags
;
5811 static Property x86_cpu_properties
[] = {
5812 #ifdef CONFIG_USER_ONLY
5813 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5814 DEFINE_PROP_UINT32("apic-id", X86CPU
, apic_id
, 0),
5815 DEFINE_PROP_INT32("thread-id", X86CPU
, thread_id
, 0),
5816 DEFINE_PROP_INT32("core-id", X86CPU
, core_id
, 0),
5817 DEFINE_PROP_INT32("socket-id", X86CPU
, socket_id
, 0),
5819 DEFINE_PROP_UINT32("apic-id", X86CPU
, apic_id
, UNASSIGNED_APIC_ID
),
5820 DEFINE_PROP_INT32("thread-id", X86CPU
, thread_id
, -1),
5821 DEFINE_PROP_INT32("core-id", X86CPU
, core_id
, -1),
5822 DEFINE_PROP_INT32("socket-id", X86CPU
, socket_id
, -1),
5824 DEFINE_PROP_INT32("node-id", X86CPU
, node_id
, CPU_UNSET_NUMA_NODE_ID
),
5825 DEFINE_PROP_BOOL("pmu", X86CPU
, enable_pmu
, false),
5826 { .name
= "hv-spinlocks", .info
= &qdev_prop_spinlocks
},
5827 DEFINE_PROP_BOOL("hv-relaxed", X86CPU
, hyperv_relaxed_timing
, false),
5828 DEFINE_PROP_BOOL("hv-vapic", X86CPU
, hyperv_vapic
, false),
5829 DEFINE_PROP_BOOL("hv-time", X86CPU
, hyperv_time
, false),
5830 DEFINE_PROP_BOOL("hv-crash", X86CPU
, hyperv_crash
, false),
5831 DEFINE_PROP_BOOL("hv-reset", X86CPU
, hyperv_reset
, false),
5832 DEFINE_PROP_BOOL("hv-vpindex", X86CPU
, hyperv_vpindex
, false),
5833 DEFINE_PROP_BOOL("hv-runtime", X86CPU
, hyperv_runtime
, false),
5834 DEFINE_PROP_BOOL("hv-synic", X86CPU
, hyperv_synic
, false),
5835 DEFINE_PROP_BOOL("hv-stimer", X86CPU
, hyperv_stimer
, false),
5836 DEFINE_PROP_BOOL("hv-frequencies", X86CPU
, hyperv_frequencies
, false),
5837 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU
, hyperv_reenlightenment
, false),
5838 DEFINE_PROP_BOOL("hv-tlbflush", X86CPU
, hyperv_tlbflush
, false),
5839 DEFINE_PROP_BOOL("hv-evmcs", X86CPU
, hyperv_evmcs
, false),
5840 DEFINE_PROP_BOOL("hv-ipi", X86CPU
, hyperv_ipi
, false),
5841 DEFINE_PROP_BOOL("check", X86CPU
, check_cpuid
, true),
5842 DEFINE_PROP_BOOL("enforce", X86CPU
, enforce_cpuid
, false),
5843 DEFINE_PROP_BOOL("kvm", X86CPU
, expose_kvm
, true),
5844 DEFINE_PROP_UINT32("phys-bits", X86CPU
, phys_bits
, 0),
5845 DEFINE_PROP_BOOL("host-phys-bits", X86CPU
, host_phys_bits
, false),
5846 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU
, host_phys_bits_limit
, 0),
5847 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU
, fill_mtrr_mask
, true),
5848 DEFINE_PROP_UINT32("level", X86CPU
, env
.cpuid_level
, UINT32_MAX
),
5849 DEFINE_PROP_UINT32("xlevel", X86CPU
, env
.cpuid_xlevel
, UINT32_MAX
),
5850 DEFINE_PROP_UINT32("xlevel2", X86CPU
, env
.cpuid_xlevel2
, UINT32_MAX
),
5851 DEFINE_PROP_UINT32("min-level", X86CPU
, env
.cpuid_min_level
, 0),
5852 DEFINE_PROP_UINT32("min-xlevel", X86CPU
, env
.cpuid_min_xlevel
, 0),
5853 DEFINE_PROP_UINT32("min-xlevel2", X86CPU
, env
.cpuid_min_xlevel2
, 0),
5854 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU
, full_cpuid_auto_level
, true),
5855 DEFINE_PROP_STRING("hv-vendor-id", X86CPU
, hyperv_vendor_id
),
5856 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU
, enable_cpuid_0xb
, true),
5857 DEFINE_PROP_BOOL("lmce", X86CPU
, enable_lmce
, false),
5858 DEFINE_PROP_BOOL("l3-cache", X86CPU
, enable_l3_cache
, true),
5859 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU
, kvm_no_smi_migration
,
5861 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU
, vmware_cpuid_freq
, true),
5862 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU
, expose_tcg
, true),
5863 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU
, migrate_smi_count
,
5866 * lecacy_cache defaults to true unless the CPU model provides its
5867 * own cache information (see x86_cpu_load_def()).
5869 DEFINE_PROP_BOOL("legacy-cache", X86CPU
, legacy_cache
, true),
5872 * From "Requirements for Implementing the Microsoft
5873 * Hypervisor Interface":
5874 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5876 * "Starting with Windows Server 2012 and Windows 8, if
5877 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5878 * the hypervisor imposes no specific limit to the number of VPs.
5879 * In this case, Windows Server 2012 guest VMs may use more than
5880 * 64 VPs, up to the maximum supported number of processors applicable
5881 * to the specific Windows version being used."
5883 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU
, hv_max_vps
, -1),
5884 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU
, hyperv_synic_kvm_only
,
5886 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU
, intel_pt_auto_level
,
5888 DEFINE_PROP_END_OF_LIST()
5891 static void x86_cpu_common_class_init(ObjectClass
*oc
, void *data
)
5893 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
5894 CPUClass
*cc
= CPU_CLASS(oc
);
5895 DeviceClass
*dc
= DEVICE_CLASS(oc
);
5897 device_class_set_parent_realize(dc
, x86_cpu_realizefn
,
5898 &xcc
->parent_realize
);
5899 device_class_set_parent_unrealize(dc
, x86_cpu_unrealizefn
,
5900 &xcc
->parent_unrealize
);
5901 dc
->props
= x86_cpu_properties
;
5903 xcc
->parent_reset
= cc
->reset
;
5904 cc
->reset
= x86_cpu_reset
;
5905 cc
->reset_dump_flags
= CPU_DUMP_FPU
| CPU_DUMP_CCOP
;
5907 cc
->class_by_name
= x86_cpu_class_by_name
;
5908 cc
->parse_features
= x86_cpu_parse_featurestr
;
5909 cc
->has_work
= x86_cpu_has_work
;
5911 cc
->do_interrupt
= x86_cpu_do_interrupt
;
5912 cc
->cpu_exec_interrupt
= x86_cpu_exec_interrupt
;
5914 cc
->dump_state
= x86_cpu_dump_state
;
5915 cc
->get_crash_info
= x86_cpu_get_crash_info
;
5916 cc
->set_pc
= x86_cpu_set_pc
;
5917 cc
->synchronize_from_tb
= x86_cpu_synchronize_from_tb
;
5918 cc
->gdb_read_register
= x86_cpu_gdb_read_register
;
5919 cc
->gdb_write_register
= x86_cpu_gdb_write_register
;
5920 cc
->get_arch_id
= x86_cpu_get_arch_id
;
5921 cc
->get_paging_enabled
= x86_cpu_get_paging_enabled
;
5922 #ifndef CONFIG_USER_ONLY
5923 cc
->asidx_from_attrs
= x86_asidx_from_attrs
;
5924 cc
->get_memory_mapping
= x86_cpu_get_memory_mapping
;
5925 cc
->get_phys_page_debug
= x86_cpu_get_phys_page_debug
;
5926 cc
->write_elf64_note
= x86_cpu_write_elf64_note
;
5927 cc
->write_elf64_qemunote
= x86_cpu_write_elf64_qemunote
;
5928 cc
->write_elf32_note
= x86_cpu_write_elf32_note
;
5929 cc
->write_elf32_qemunote
= x86_cpu_write_elf32_qemunote
;
5930 cc
->vmsd
= &vmstate_x86_cpu
;
5932 cc
->gdb_arch_name
= x86_gdb_arch_name
;
5933 #ifdef TARGET_X86_64
5934 cc
->gdb_core_xml_file
= "i386-64bit.xml";
5935 cc
->gdb_num_core_regs
= 66;
5937 cc
->gdb_core_xml_file
= "i386-32bit.xml";
5938 cc
->gdb_num_core_regs
= 50;
5940 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5941 cc
->debug_excp_handler
= breakpoint_handler
;
5943 cc
->cpu_exec_enter
= x86_cpu_exec_enter
;
5944 cc
->cpu_exec_exit
= x86_cpu_exec_exit
;
5946 cc
->tcg_initialize
= tcg_x86_init
;
5947 cc
->tlb_fill
= x86_cpu_tlb_fill
;
5949 cc
->disas_set_info
= x86_disas_set_info
;
5951 dc
->user_creatable
= true;
5954 static const TypeInfo x86_cpu_type_info
= {
5955 .name
= TYPE_X86_CPU
,
5957 .instance_size
= sizeof(X86CPU
),
5958 .instance_init
= x86_cpu_initfn
,
5960 .class_size
= sizeof(X86CPUClass
),
5961 .class_init
= x86_cpu_common_class_init
,
5965 /* "base" CPU model, used by query-cpu-model-expansion */
5966 static void x86_cpu_base_class_init(ObjectClass
*oc
, void *data
)
5968 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
5970 xcc
->static_model
= true;
5971 xcc
->migration_safe
= true;
5972 xcc
->model_description
= "base CPU model type with no features enabled";
5976 static const TypeInfo x86_base_cpu_type_info
= {
5977 .name
= X86_CPU_TYPE_NAME("base"),
5978 .parent
= TYPE_X86_CPU
,
5979 .class_init
= x86_cpu_base_class_init
,
5982 static void x86_cpu_register_types(void)
5986 type_register_static(&x86_cpu_type_info
);
5987 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); i
++) {
5988 x86_register_cpudef_type(&builtin_x86_defs
[i
]);
5990 type_register_static(&max_x86_cpu_type_info
);
5991 type_register_static(&x86_base_cpu_type_info
);
5992 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5993 type_register_static(&host_x86_cpu_type_info
);
5997 type_init(x86_cpu_register_types
)