Merge tag 'qemu-macppc-20230206' of https://github.com/mcayland/qemu into staging
[qemu.git] / target / i386 / cpu.h
blobd4bc19577a2147d10cfb93f9a6a9a7ae9ffc8bd0
1 /*
2 * i386 virtual CPU header
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #ifndef I386_CPU_H
21 #define I386_CPU_H
23 #include "sysemu/tcg.h"
24 #include "cpu-qom.h"
25 #include "kvm/hyperv-proto.h"
26 #include "exec/cpu-defs.h"
27 #include "qapi/qapi-types-common.h"
28 #include "qemu/cpu-float.h"
30 /* The x86 has a strong memory model with some store-after-load re-ordering */
31 #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
33 #define KVM_HAVE_MCE_INJECTION 1
35 /* support for self modifying code even if the modified instruction is
36 close to the modifying instruction */
37 #define TARGET_HAS_PRECISE_SMC
39 #ifdef TARGET_X86_64
40 #define I386_ELF_MACHINE EM_X86_64
41 #define ELF_MACHINE_UNAME "x86_64"
42 #else
43 #define I386_ELF_MACHINE EM_386
44 #define ELF_MACHINE_UNAME "i686"
45 #endif
47 enum {
48 R_EAX = 0,
49 R_ECX = 1,
50 R_EDX = 2,
51 R_EBX = 3,
52 R_ESP = 4,
53 R_EBP = 5,
54 R_ESI = 6,
55 R_EDI = 7,
56 R_R8 = 8,
57 R_R9 = 9,
58 R_R10 = 10,
59 R_R11 = 11,
60 R_R12 = 12,
61 R_R13 = 13,
62 R_R14 = 14,
63 R_R15 = 15,
65 R_AL = 0,
66 R_CL = 1,
67 R_DL = 2,
68 R_BL = 3,
69 R_AH = 4,
70 R_CH = 5,
71 R_DH = 6,
72 R_BH = 7,
75 typedef enum X86Seg {
76 R_ES = 0,
77 R_CS = 1,
78 R_SS = 2,
79 R_DS = 3,
80 R_FS = 4,
81 R_GS = 5,
82 R_LDTR = 6,
83 R_TR = 7,
84 } X86Seg;
86 /* segment descriptor fields */
87 #define DESC_G_SHIFT 23
88 #define DESC_G_MASK (1 << DESC_G_SHIFT)
89 #define DESC_B_SHIFT 22
90 #define DESC_B_MASK (1 << DESC_B_SHIFT)
91 #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
92 #define DESC_L_MASK (1 << DESC_L_SHIFT)
93 #define DESC_AVL_SHIFT 20
94 #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT)
95 #define DESC_P_SHIFT 15
96 #define DESC_P_MASK (1 << DESC_P_SHIFT)
97 #define DESC_DPL_SHIFT 13
98 #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
99 #define DESC_S_SHIFT 12
100 #define DESC_S_MASK (1 << DESC_S_SHIFT)
101 #define DESC_TYPE_SHIFT 8
102 #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
103 #define DESC_A_MASK (1 << 8)
105 #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
106 #define DESC_C_MASK (1 << 10) /* code: conforming */
107 #define DESC_R_MASK (1 << 9) /* code: readable */
109 #define DESC_E_MASK (1 << 10) /* data: expansion direction */
110 #define DESC_W_MASK (1 << 9) /* data: writable */
112 #define DESC_TSS_BUSY_MASK (1 << 9)
114 /* eflags masks */
115 #define CC_C 0x0001
116 #define CC_P 0x0004
117 #define CC_A 0x0010
118 #define CC_Z 0x0040
119 #define CC_S 0x0080
120 #define CC_O 0x0800
122 #define TF_SHIFT 8
123 #define IOPL_SHIFT 12
124 #define VM_SHIFT 17
126 #define TF_MASK 0x00000100
127 #define IF_MASK 0x00000200
128 #define DF_MASK 0x00000400
129 #define IOPL_MASK 0x00003000
130 #define NT_MASK 0x00004000
131 #define RF_MASK 0x00010000
132 #define VM_MASK 0x00020000
133 #define AC_MASK 0x00040000
134 #define VIF_MASK 0x00080000
135 #define VIP_MASK 0x00100000
136 #define ID_MASK 0x00200000
138 /* hidden flags - used internally by qemu to represent additional cpu
139 states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
140 avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
141 positions to ease oring with eflags. */
142 /* current cpl */
143 #define HF_CPL_SHIFT 0
144 /* true if hardware interrupts must be disabled for next instruction */
145 #define HF_INHIBIT_IRQ_SHIFT 3
146 /* 16 or 32 segments */
147 #define HF_CS32_SHIFT 4
148 #define HF_SS32_SHIFT 5
149 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
150 #define HF_ADDSEG_SHIFT 6
151 /* copy of CR0.PE (protected mode) */
152 #define HF_PE_SHIFT 7
153 #define HF_TF_SHIFT 8 /* must be same as eflags */
154 #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
155 #define HF_EM_SHIFT 10
156 #define HF_TS_SHIFT 11
157 #define HF_IOPL_SHIFT 12 /* must be same as eflags */
158 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
159 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
160 #define HF_RF_SHIFT 16 /* must be same as eflags */
161 #define HF_VM_SHIFT 17 /* must be same as eflags */
162 #define HF_AC_SHIFT 18 /* must be same as eflags */
163 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */
164 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
165 #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */
166 #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
167 #define HF_SMAP_SHIFT 23 /* CR4.SMAP */
168 #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */
169 #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
170 #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
171 #define HF_UMIP_SHIFT 27 /* CR4.UMIP */
172 #define HF_AVX_EN_SHIFT 28 /* AVX Enabled (CR4+XCR0) */
174 #define HF_CPL_MASK (3 << HF_CPL_SHIFT)
175 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
176 #define HF_CS32_MASK (1 << HF_CS32_SHIFT)
177 #define HF_SS32_MASK (1 << HF_SS32_SHIFT)
178 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
179 #define HF_PE_MASK (1 << HF_PE_SHIFT)
180 #define HF_TF_MASK (1 << HF_TF_SHIFT)
181 #define HF_MP_MASK (1 << HF_MP_SHIFT)
182 #define HF_EM_MASK (1 << HF_EM_SHIFT)
183 #define HF_TS_MASK (1 << HF_TS_SHIFT)
184 #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
185 #define HF_LMA_MASK (1 << HF_LMA_SHIFT)
186 #define HF_CS64_MASK (1 << HF_CS64_SHIFT)
187 #define HF_RF_MASK (1 << HF_RF_SHIFT)
188 #define HF_VM_MASK (1 << HF_VM_SHIFT)
189 #define HF_AC_MASK (1 << HF_AC_SHIFT)
190 #define HF_SMM_MASK (1 << HF_SMM_SHIFT)
191 #define HF_SVME_MASK (1 << HF_SVME_SHIFT)
192 #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT)
193 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
194 #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
195 #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
196 #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
197 #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
198 #define HF_UMIP_MASK (1 << HF_UMIP_SHIFT)
199 #define HF_AVX_EN_MASK (1 << HF_AVX_EN_SHIFT)
201 /* hflags2 */
203 #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
204 #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
205 #define HF2_NMI_SHIFT 2 /* CPU serving NMI */
206 #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
207 #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
208 #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */
209 #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */
210 #define HF2_IGNNE_SHIFT 7 /* Ignore CR0.NE=0 */
211 #define HF2_VGIF_SHIFT 8 /* Can take VIRQ*/
213 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
214 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
215 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
216 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
217 #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
218 #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
219 #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT)
220 #define HF2_IGNNE_MASK (1 << HF2_IGNNE_SHIFT)
221 #define HF2_VGIF_MASK (1 << HF2_VGIF_SHIFT)
223 #define CR0_PE_SHIFT 0
224 #define CR0_MP_SHIFT 1
226 #define CR0_PE_MASK (1U << 0)
227 #define CR0_MP_MASK (1U << 1)
228 #define CR0_EM_MASK (1U << 2)
229 #define CR0_TS_MASK (1U << 3)
230 #define CR0_ET_MASK (1U << 4)
231 #define CR0_NE_MASK (1U << 5)
232 #define CR0_WP_MASK (1U << 16)
233 #define CR0_AM_MASK (1U << 18)
234 #define CR0_NW_MASK (1U << 29)
235 #define CR0_CD_MASK (1U << 30)
236 #define CR0_PG_MASK (1U << 31)
238 #define CR4_VME_MASK (1U << 0)
239 #define CR4_PVI_MASK (1U << 1)
240 #define CR4_TSD_MASK (1U << 2)
241 #define CR4_DE_MASK (1U << 3)
242 #define CR4_PSE_MASK (1U << 4)
243 #define CR4_PAE_MASK (1U << 5)
244 #define CR4_MCE_MASK (1U << 6)
245 #define CR4_PGE_MASK (1U << 7)
246 #define CR4_PCE_MASK (1U << 8)
247 #define CR4_OSFXSR_SHIFT 9
248 #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
249 #define CR4_OSXMMEXCPT_MASK (1U << 10)
250 #define CR4_UMIP_MASK (1U << 11)
251 #define CR4_LA57_MASK (1U << 12)
252 #define CR4_VMXE_MASK (1U << 13)
253 #define CR4_SMXE_MASK (1U << 14)
254 #define CR4_FSGSBASE_MASK (1U << 16)
255 #define CR4_PCIDE_MASK (1U << 17)
256 #define CR4_OSXSAVE_MASK (1U << 18)
257 #define CR4_SMEP_MASK (1U << 20)
258 #define CR4_SMAP_MASK (1U << 21)
259 #define CR4_PKE_MASK (1U << 22)
260 #define CR4_PKS_MASK (1U << 24)
262 #define CR4_RESERVED_MASK \
263 (~(target_ulong)(CR4_VME_MASK | CR4_PVI_MASK | CR4_TSD_MASK \
264 | CR4_DE_MASK | CR4_PSE_MASK | CR4_PAE_MASK \
265 | CR4_MCE_MASK | CR4_PGE_MASK | CR4_PCE_MASK \
266 | CR4_OSFXSR_MASK | CR4_OSXMMEXCPT_MASK | CR4_UMIP_MASK \
267 | CR4_LA57_MASK \
268 | CR4_FSGSBASE_MASK | CR4_PCIDE_MASK | CR4_OSXSAVE_MASK \
269 | CR4_SMEP_MASK | CR4_SMAP_MASK | CR4_PKE_MASK | CR4_PKS_MASK))
271 #define DR6_BD (1 << 13)
272 #define DR6_BS (1 << 14)
273 #define DR6_BT (1 << 15)
274 #define DR6_FIXED_1 0xffff0ff0
276 #define DR7_GD (1 << 13)
277 #define DR7_TYPE_SHIFT 16
278 #define DR7_LEN_SHIFT 18
279 #define DR7_FIXED_1 0x00000400
280 #define DR7_GLOBAL_BP_MASK 0xaa
281 #define DR7_LOCAL_BP_MASK 0x55
282 #define DR7_MAX_BP 4
283 #define DR7_TYPE_BP_INST 0x0
284 #define DR7_TYPE_DATA_WR 0x1
285 #define DR7_TYPE_IO_RW 0x2
286 #define DR7_TYPE_DATA_RW 0x3
288 #define DR_RESERVED_MASK 0xffffffff00000000ULL
290 #define PG_PRESENT_BIT 0
291 #define PG_RW_BIT 1
292 #define PG_USER_BIT 2
293 #define PG_PWT_BIT 3
294 #define PG_PCD_BIT 4
295 #define PG_ACCESSED_BIT 5
296 #define PG_DIRTY_BIT 6
297 #define PG_PSE_BIT 7
298 #define PG_GLOBAL_BIT 8
299 #define PG_PSE_PAT_BIT 12
300 #define PG_PKRU_BIT 59
301 #define PG_NX_BIT 63
303 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
304 #define PG_RW_MASK (1 << PG_RW_BIT)
305 #define PG_USER_MASK (1 << PG_USER_BIT)
306 #define PG_PWT_MASK (1 << PG_PWT_BIT)
307 #define PG_PCD_MASK (1 << PG_PCD_BIT)
308 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
309 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
310 #define PG_PSE_MASK (1 << PG_PSE_BIT)
311 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
312 #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT)
313 #define PG_ADDRESS_MASK 0x000ffffffffff000LL
314 #define PG_HI_USER_MASK 0x7ff0000000000000LL
315 #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT)
316 #define PG_NX_MASK (1ULL << PG_NX_BIT)
318 #define PG_ERROR_W_BIT 1
320 #define PG_ERROR_P_MASK 0x01
321 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
322 #define PG_ERROR_U_MASK 0x04
323 #define PG_ERROR_RSVD_MASK 0x08
324 #define PG_ERROR_I_D_MASK 0x10
325 #define PG_ERROR_PK_MASK 0x20
327 #define PG_MODE_PAE (1 << 0)
328 #define PG_MODE_LMA (1 << 1)
329 #define PG_MODE_NXE (1 << 2)
330 #define PG_MODE_PSE (1 << 3)
331 #define PG_MODE_LA57 (1 << 4)
332 #define PG_MODE_SVM_MASK MAKE_64BIT_MASK(0, 15)
334 /* Bits of CR4 that do not affect the NPT page format. */
335 #define PG_MODE_WP (1 << 16)
336 #define PG_MODE_PKE (1 << 17)
337 #define PG_MODE_PKS (1 << 18)
338 #define PG_MODE_SMEP (1 << 19)
340 #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
341 #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
342 #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */
344 #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
345 #define MCE_BANKS_DEF 10
347 #define MCG_CAP_BANKS_MASK 0xff
349 #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
350 #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
351 #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
352 #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */
354 #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */
356 #define MCI_STATUS_VAL (1ULL<<63) /* valid error */
357 #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
358 #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
359 #define MCI_STATUS_EN (1ULL<<60) /* error enabled */
360 #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
361 #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
362 #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
363 #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
364 #define MCI_STATUS_AR (1ULL<<55) /* Action required */
366 /* MISC register defines */
367 #define MCM_ADDR_SEGOFF 0 /* segment offset */
368 #define MCM_ADDR_LINEAR 1 /* linear address */
369 #define MCM_ADDR_PHYS 2 /* physical address */
370 #define MCM_ADDR_MEM 3 /* memory address */
371 #define MCM_ADDR_GENERIC 7 /* generic */
373 #define MSR_IA32_TSC 0x10
374 #define MSR_IA32_APICBASE 0x1b
375 #define MSR_IA32_APICBASE_BSP (1<<8)
376 #define MSR_IA32_APICBASE_ENABLE (1<<11)
377 #define MSR_IA32_APICBASE_EXTD (1 << 10)
378 #define MSR_IA32_APICBASE_BASE (0xfffffU<<12)
379 #define MSR_IA32_FEATURE_CONTROL 0x0000003a
380 #define MSR_TSC_ADJUST 0x0000003b
381 #define MSR_IA32_SPEC_CTRL 0x48
382 #define MSR_VIRT_SSBD 0xc001011f
383 #define MSR_IA32_PRED_CMD 0x49
384 #define MSR_IA32_UCODE_REV 0x8b
385 #define MSR_IA32_CORE_CAPABILITY 0xcf
387 #define MSR_IA32_ARCH_CAPABILITIES 0x10a
388 #define ARCH_CAP_TSX_CTRL_MSR (1<<7)
390 #define MSR_IA32_PERF_CAPABILITIES 0x345
391 #define PERF_CAP_LBR_FMT 0x3f
393 #define MSR_IA32_TSX_CTRL 0x122
394 #define MSR_IA32_TSCDEADLINE 0x6e0
395 #define MSR_IA32_PKRS 0x6e1
396 #define MSR_ARCH_LBR_CTL 0x000014ce
397 #define MSR_ARCH_LBR_DEPTH 0x000014cf
398 #define MSR_ARCH_LBR_FROM_0 0x00001500
399 #define MSR_ARCH_LBR_TO_0 0x00001600
400 #define MSR_ARCH_LBR_INFO_0 0x00001200
402 #define FEATURE_CONTROL_LOCKED (1<<0)
403 #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1)
404 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
405 #define FEATURE_CONTROL_SGX_LC (1ULL << 17)
406 #define FEATURE_CONTROL_SGX (1ULL << 18)
407 #define FEATURE_CONTROL_LMCE (1<<20)
409 #define MSR_IA32_SGXLEPUBKEYHASH0 0x8c
410 #define MSR_IA32_SGXLEPUBKEYHASH1 0x8d
411 #define MSR_IA32_SGXLEPUBKEYHASH2 0x8e
412 #define MSR_IA32_SGXLEPUBKEYHASH3 0x8f
414 #define MSR_P6_PERFCTR0 0xc1
416 #define MSR_IA32_SMBASE 0x9e
417 #define MSR_SMI_COUNT 0x34
418 #define MSR_CORE_THREAD_COUNT 0x35
419 #define MSR_MTRRcap 0xfe
420 #define MSR_MTRRcap_VCNT 8
421 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
422 #define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
424 #define MSR_IA32_SYSENTER_CS 0x174
425 #define MSR_IA32_SYSENTER_ESP 0x175
426 #define MSR_IA32_SYSENTER_EIP 0x176
428 #define MSR_MCG_CAP 0x179
429 #define MSR_MCG_STATUS 0x17a
430 #define MSR_MCG_CTL 0x17b
431 #define MSR_MCG_EXT_CTL 0x4d0
433 #define MSR_P6_EVNTSEL0 0x186
435 #define MSR_IA32_PERF_STATUS 0x198
437 #define MSR_IA32_MISC_ENABLE 0x1a0
438 /* Indicates good rep/movs microcode on some processors: */
439 #define MSR_IA32_MISC_ENABLE_DEFAULT 1
440 #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18)
442 #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
443 #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
445 #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
447 #define MSR_MTRRfix64K_00000 0x250
448 #define MSR_MTRRfix16K_80000 0x258
449 #define MSR_MTRRfix16K_A0000 0x259
450 #define MSR_MTRRfix4K_C0000 0x268
451 #define MSR_MTRRfix4K_C8000 0x269
452 #define MSR_MTRRfix4K_D0000 0x26a
453 #define MSR_MTRRfix4K_D8000 0x26b
454 #define MSR_MTRRfix4K_E0000 0x26c
455 #define MSR_MTRRfix4K_E8000 0x26d
456 #define MSR_MTRRfix4K_F0000 0x26e
457 #define MSR_MTRRfix4K_F8000 0x26f
459 #define MSR_PAT 0x277
461 #define MSR_MTRRdefType 0x2ff
463 #define MSR_CORE_PERF_FIXED_CTR0 0x309
464 #define MSR_CORE_PERF_FIXED_CTR1 0x30a
465 #define MSR_CORE_PERF_FIXED_CTR2 0x30b
466 #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
467 #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
468 #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
469 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
471 #define MSR_MC0_CTL 0x400
472 #define MSR_MC0_STATUS 0x401
473 #define MSR_MC0_ADDR 0x402
474 #define MSR_MC0_MISC 0x403
476 #define MSR_IA32_RTIT_OUTPUT_BASE 0x560
477 #define MSR_IA32_RTIT_OUTPUT_MASK 0x561
478 #define MSR_IA32_RTIT_CTL 0x570
479 #define MSR_IA32_RTIT_STATUS 0x571
480 #define MSR_IA32_RTIT_CR3_MATCH 0x572
481 #define MSR_IA32_RTIT_ADDR0_A 0x580
482 #define MSR_IA32_RTIT_ADDR0_B 0x581
483 #define MSR_IA32_RTIT_ADDR1_A 0x582
484 #define MSR_IA32_RTIT_ADDR1_B 0x583
485 #define MSR_IA32_RTIT_ADDR2_A 0x584
486 #define MSR_IA32_RTIT_ADDR2_B 0x585
487 #define MSR_IA32_RTIT_ADDR3_A 0x586
488 #define MSR_IA32_RTIT_ADDR3_B 0x587
489 #define MAX_RTIT_ADDRS 8
491 #define MSR_EFER 0xc0000080
493 #define MSR_EFER_SCE (1 << 0)
494 #define MSR_EFER_LME (1 << 8)
495 #define MSR_EFER_LMA (1 << 10)
496 #define MSR_EFER_NXE (1 << 11)
497 #define MSR_EFER_SVME (1 << 12)
498 #define MSR_EFER_FFXSR (1 << 14)
500 #define MSR_EFER_RESERVED\
501 (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\
502 | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\
503 | MSR_EFER_FFXSR))
505 #define MSR_STAR 0xc0000081
506 #define MSR_LSTAR 0xc0000082
507 #define MSR_CSTAR 0xc0000083
508 #define MSR_FMASK 0xc0000084
509 #define MSR_FSBASE 0xc0000100
510 #define MSR_GSBASE 0xc0000101
511 #define MSR_KERNELGSBASE 0xc0000102
512 #define MSR_TSC_AUX 0xc0000103
513 #define MSR_AMD64_TSC_RATIO 0xc0000104
515 #define MSR_AMD64_TSC_RATIO_DEFAULT 0x100000000ULL
517 #define MSR_VM_HSAVE_PA 0xc0010117
519 #define MSR_IA32_XFD 0x000001c4
520 #define MSR_IA32_XFD_ERR 0x000001c5
522 #define MSR_IA32_BNDCFGS 0x00000d90
523 #define MSR_IA32_XSS 0x00000da0
524 #define MSR_IA32_UMWAIT_CONTROL 0xe1
526 #define MSR_IA32_VMX_BASIC 0x00000480
527 #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481
528 #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482
529 #define MSR_IA32_VMX_EXIT_CTLS 0x00000483
530 #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484
531 #define MSR_IA32_VMX_MISC 0x00000485
532 #define MSR_IA32_VMX_CR0_FIXED0 0x00000486
533 #define MSR_IA32_VMX_CR0_FIXED1 0x00000487
534 #define MSR_IA32_VMX_CR4_FIXED0 0x00000488
535 #define MSR_IA32_VMX_CR4_FIXED1 0x00000489
536 #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a
537 #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b
538 #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c
539 #define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x0000048d
540 #define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
541 #define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
542 #define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
543 #define MSR_IA32_VMX_VMFUNC 0x00000491
545 #define XSTATE_FP_BIT 0
546 #define XSTATE_SSE_BIT 1
547 #define XSTATE_YMM_BIT 2
548 #define XSTATE_BNDREGS_BIT 3
549 #define XSTATE_BNDCSR_BIT 4
550 #define XSTATE_OPMASK_BIT 5
551 #define XSTATE_ZMM_Hi256_BIT 6
552 #define XSTATE_Hi16_ZMM_BIT 7
553 #define XSTATE_PKRU_BIT 9
554 #define XSTATE_ARCH_LBR_BIT 15
555 #define XSTATE_XTILE_CFG_BIT 17
556 #define XSTATE_XTILE_DATA_BIT 18
558 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT)
559 #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT)
560 #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT)
561 #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT)
562 #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT)
563 #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
564 #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
565 #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
566 #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
567 #define XSTATE_ARCH_LBR_MASK (1ULL << XSTATE_ARCH_LBR_BIT)
568 #define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT)
569 #define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT)
571 #define XSTATE_DYNAMIC_MASK (XSTATE_XTILE_DATA_MASK)
573 #define ESA_FEATURE_ALIGN64_BIT 1
574 #define ESA_FEATURE_XFD_BIT 2
576 #define ESA_FEATURE_ALIGN64_MASK (1U << ESA_FEATURE_ALIGN64_BIT)
577 #define ESA_FEATURE_XFD_MASK (1U << ESA_FEATURE_XFD_BIT)
580 /* CPUID feature bits available in XCR0 */
581 #define CPUID_XSTATE_XCR0_MASK (XSTATE_FP_MASK | XSTATE_SSE_MASK | \
582 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | \
583 XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK | \
584 XSTATE_ZMM_Hi256_MASK | \
585 XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \
586 XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK)
588 /* CPUID feature words */
589 typedef enum FeatureWord {
590 FEAT_1_EDX, /* CPUID[1].EDX */
591 FEAT_1_ECX, /* CPUID[1].ECX */
592 FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
593 FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */
594 FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */
595 FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */
596 FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
597 FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
598 FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
599 FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */
600 FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
601 FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
602 FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */
603 FEAT_SVM, /* CPUID[8000_000A].EDX */
604 FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
605 FEAT_6_EAX, /* CPUID[6].EAX */
606 FEAT_XSAVE_XCR0_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
607 FEAT_XSAVE_XCR0_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
608 FEAT_ARCH_CAPABILITIES,
609 FEAT_CORE_CAPABILITY,
610 FEAT_PERF_CAPABILITIES,
611 FEAT_VMX_PROCBASED_CTLS,
612 FEAT_VMX_SECONDARY_CTLS,
613 FEAT_VMX_PINBASED_CTLS,
614 FEAT_VMX_EXIT_CTLS,
615 FEAT_VMX_ENTRY_CTLS,
616 FEAT_VMX_MISC,
617 FEAT_VMX_EPT_VPID_CAPS,
618 FEAT_VMX_BASIC,
619 FEAT_VMX_VMFUNC,
620 FEAT_14_0_ECX,
621 FEAT_SGX_12_0_EAX, /* CPUID[EAX=0x12,ECX=0].EAX (SGX) */
622 FEAT_SGX_12_0_EBX, /* CPUID[EAX=0x12,ECX=0].EBX (SGX MISCSELECT[31:0]) */
623 FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */
624 FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */
625 FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */
626 FEATURE_WORDS,
627 } FeatureWord;
629 typedef uint64_t FeatureWordArray[FEATURE_WORDS];
630 uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
631 bool migratable_only);
633 /* cpuid_features bits */
634 #define CPUID_FP87 (1U << 0)
635 #define CPUID_VME (1U << 1)
636 #define CPUID_DE (1U << 2)
637 #define CPUID_PSE (1U << 3)
638 #define CPUID_TSC (1U << 4)
639 #define CPUID_MSR (1U << 5)
640 #define CPUID_PAE (1U << 6)
641 #define CPUID_MCE (1U << 7)
642 #define CPUID_CX8 (1U << 8)
643 #define CPUID_APIC (1U << 9)
644 #define CPUID_SEP (1U << 11) /* sysenter/sysexit */
645 #define CPUID_MTRR (1U << 12)
646 #define CPUID_PGE (1U << 13)
647 #define CPUID_MCA (1U << 14)
648 #define CPUID_CMOV (1U << 15)
649 #define CPUID_PAT (1U << 16)
650 #define CPUID_PSE36 (1U << 17)
651 #define CPUID_PN (1U << 18)
652 #define CPUID_CLFLUSH (1U << 19)
653 #define CPUID_DTS (1U << 21)
654 #define CPUID_ACPI (1U << 22)
655 #define CPUID_MMX (1U << 23)
656 #define CPUID_FXSR (1U << 24)
657 #define CPUID_SSE (1U << 25)
658 #define CPUID_SSE2 (1U << 26)
659 #define CPUID_SS (1U << 27)
660 #define CPUID_HT (1U << 28)
661 #define CPUID_TM (1U << 29)
662 #define CPUID_IA64 (1U << 30)
663 #define CPUID_PBE (1U << 31)
665 #define CPUID_EXT_SSE3 (1U << 0)
666 #define CPUID_EXT_PCLMULQDQ (1U << 1)
667 #define CPUID_EXT_DTES64 (1U << 2)
668 #define CPUID_EXT_MONITOR (1U << 3)
669 #define CPUID_EXT_DSCPL (1U << 4)
670 #define CPUID_EXT_VMX (1U << 5)
671 #define CPUID_EXT_SMX (1U << 6)
672 #define CPUID_EXT_EST (1U << 7)
673 #define CPUID_EXT_TM2 (1U << 8)
674 #define CPUID_EXT_SSSE3 (1U << 9)
675 #define CPUID_EXT_CID (1U << 10)
676 #define CPUID_EXT_FMA (1U << 12)
677 #define CPUID_EXT_CX16 (1U << 13)
678 #define CPUID_EXT_XTPR (1U << 14)
679 #define CPUID_EXT_PDCM (1U << 15)
680 #define CPUID_EXT_PCID (1U << 17)
681 #define CPUID_EXT_DCA (1U << 18)
682 #define CPUID_EXT_SSE41 (1U << 19)
683 #define CPUID_EXT_SSE42 (1U << 20)
684 #define CPUID_EXT_X2APIC (1U << 21)
685 #define CPUID_EXT_MOVBE (1U << 22)
686 #define CPUID_EXT_POPCNT (1U << 23)
687 #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24)
688 #define CPUID_EXT_AES (1U << 25)
689 #define CPUID_EXT_XSAVE (1U << 26)
690 #define CPUID_EXT_OSXSAVE (1U << 27)
691 #define CPUID_EXT_AVX (1U << 28)
692 #define CPUID_EXT_F16C (1U << 29)
693 #define CPUID_EXT_RDRAND (1U << 30)
694 #define CPUID_EXT_HYPERVISOR (1U << 31)
696 #define CPUID_EXT2_FPU (1U << 0)
697 #define CPUID_EXT2_VME (1U << 1)
698 #define CPUID_EXT2_DE (1U << 2)
699 #define CPUID_EXT2_PSE (1U << 3)
700 #define CPUID_EXT2_TSC (1U << 4)
701 #define CPUID_EXT2_MSR (1U << 5)
702 #define CPUID_EXT2_PAE (1U << 6)
703 #define CPUID_EXT2_MCE (1U << 7)
704 #define CPUID_EXT2_CX8 (1U << 8)
705 #define CPUID_EXT2_APIC (1U << 9)
706 #define CPUID_EXT2_SYSCALL (1U << 11)
707 #define CPUID_EXT2_MTRR (1U << 12)
708 #define CPUID_EXT2_PGE (1U << 13)
709 #define CPUID_EXT2_MCA (1U << 14)
710 #define CPUID_EXT2_CMOV (1U << 15)
711 #define CPUID_EXT2_PAT (1U << 16)
712 #define CPUID_EXT2_PSE36 (1U << 17)
713 #define CPUID_EXT2_MP (1U << 19)
714 #define CPUID_EXT2_NX (1U << 20)
715 #define CPUID_EXT2_MMXEXT (1U << 22)
716 #define CPUID_EXT2_MMX (1U << 23)
717 #define CPUID_EXT2_FXSR (1U << 24)
718 #define CPUID_EXT2_FFXSR (1U << 25)
719 #define CPUID_EXT2_PDPE1GB (1U << 26)
720 #define CPUID_EXT2_RDTSCP (1U << 27)
721 #define CPUID_EXT2_LM (1U << 29)
722 #define CPUID_EXT2_3DNOWEXT (1U << 30)
723 #define CPUID_EXT2_3DNOW (1U << 31)
725 /* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
726 #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
727 CPUID_EXT2_DE | CPUID_EXT2_PSE | \
728 CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
729 CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
730 CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
731 CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
732 CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
733 CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
734 CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
736 #define CPUID_EXT3_LAHF_LM (1U << 0)
737 #define CPUID_EXT3_CMP_LEG (1U << 1)
738 #define CPUID_EXT3_SVM (1U << 2)
739 #define CPUID_EXT3_EXTAPIC (1U << 3)
740 #define CPUID_EXT3_CR8LEG (1U << 4)
741 #define CPUID_EXT3_ABM (1U << 5)
742 #define CPUID_EXT3_SSE4A (1U << 6)
743 #define CPUID_EXT3_MISALIGNSSE (1U << 7)
744 #define CPUID_EXT3_3DNOWPREFETCH (1U << 8)
745 #define CPUID_EXT3_OSVW (1U << 9)
746 #define CPUID_EXT3_IBS (1U << 10)
747 #define CPUID_EXT3_XOP (1U << 11)
748 #define CPUID_EXT3_SKINIT (1U << 12)
749 #define CPUID_EXT3_WDT (1U << 13)
750 #define CPUID_EXT3_LWP (1U << 15)
751 #define CPUID_EXT3_FMA4 (1U << 16)
752 #define CPUID_EXT3_TCE (1U << 17)
753 #define CPUID_EXT3_NODEID (1U << 19)
754 #define CPUID_EXT3_TBM (1U << 21)
755 #define CPUID_EXT3_TOPOEXT (1U << 22)
756 #define CPUID_EXT3_PERFCORE (1U << 23)
757 #define CPUID_EXT3_PERFNB (1U << 24)
759 #define CPUID_SVM_NPT (1U << 0)
760 #define CPUID_SVM_LBRV (1U << 1)
761 #define CPUID_SVM_SVMLOCK (1U << 2)
762 #define CPUID_SVM_NRIPSAVE (1U << 3)
763 #define CPUID_SVM_TSCSCALE (1U << 4)
764 #define CPUID_SVM_VMCBCLEAN (1U << 5)
765 #define CPUID_SVM_FLUSHASID (1U << 6)
766 #define CPUID_SVM_DECODEASSIST (1U << 7)
767 #define CPUID_SVM_PAUSEFILTER (1U << 10)
768 #define CPUID_SVM_PFTHRESHOLD (1U << 12)
769 #define CPUID_SVM_AVIC (1U << 13)
770 #define CPUID_SVM_V_VMSAVE_VMLOAD (1U << 15)
771 #define CPUID_SVM_VGIF (1U << 16)
772 #define CPUID_SVM_SVME_ADDR_CHK (1U << 28)
774 /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */
775 #define CPUID_7_0_EBX_FSGSBASE (1U << 0)
776 /* Support SGX */
777 #define CPUID_7_0_EBX_SGX (1U << 2)
778 /* 1st Group of Advanced Bit Manipulation Extensions */
779 #define CPUID_7_0_EBX_BMI1 (1U << 3)
780 /* Hardware Lock Elision */
781 #define CPUID_7_0_EBX_HLE (1U << 4)
782 /* Intel Advanced Vector Extensions 2 */
783 #define CPUID_7_0_EBX_AVX2 (1U << 5)
784 /* Supervisor-mode Execution Prevention */
785 #define CPUID_7_0_EBX_SMEP (1U << 7)
786 /* 2nd Group of Advanced Bit Manipulation Extensions */
787 #define CPUID_7_0_EBX_BMI2 (1U << 8)
788 /* Enhanced REP MOVSB/STOSB */
789 #define CPUID_7_0_EBX_ERMS (1U << 9)
790 /* Invalidate Process-Context Identifier */
791 #define CPUID_7_0_EBX_INVPCID (1U << 10)
792 /* Restricted Transactional Memory */
793 #define CPUID_7_0_EBX_RTM (1U << 11)
794 /* Memory Protection Extension */
795 #define CPUID_7_0_EBX_MPX (1U << 14)
796 /* AVX-512 Foundation */
797 #define CPUID_7_0_EBX_AVX512F (1U << 16)
798 /* AVX-512 Doubleword & Quadword Instruction */
799 #define CPUID_7_0_EBX_AVX512DQ (1U << 17)
800 /* Read Random SEED */
801 #define CPUID_7_0_EBX_RDSEED (1U << 18)
802 /* ADCX and ADOX instructions */
803 #define CPUID_7_0_EBX_ADX (1U << 19)
804 /* Supervisor Mode Access Prevention */
805 #define CPUID_7_0_EBX_SMAP (1U << 20)
806 /* AVX-512 Integer Fused Multiply Add */
807 #define CPUID_7_0_EBX_AVX512IFMA (1U << 21)
808 /* Persistent Commit */
809 #define CPUID_7_0_EBX_PCOMMIT (1U << 22)
810 /* Flush a Cache Line Optimized */
811 #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23)
812 /* Cache Line Write Back */
813 #define CPUID_7_0_EBX_CLWB (1U << 24)
814 /* Intel Processor Trace */
815 #define CPUID_7_0_EBX_INTEL_PT (1U << 25)
816 /* AVX-512 Prefetch */
817 #define CPUID_7_0_EBX_AVX512PF (1U << 26)
818 /* AVX-512 Exponential and Reciprocal */
819 #define CPUID_7_0_EBX_AVX512ER (1U << 27)
820 /* AVX-512 Conflict Detection */
821 #define CPUID_7_0_EBX_AVX512CD (1U << 28)
822 /* SHA1/SHA256 Instruction Extensions */
823 #define CPUID_7_0_EBX_SHA_NI (1U << 29)
824 /* AVX-512 Byte and Word Instructions */
825 #define CPUID_7_0_EBX_AVX512BW (1U << 30)
826 /* AVX-512 Vector Length Extensions */
827 #define CPUID_7_0_EBX_AVX512VL (1U << 31)
829 /* AVX-512 Vector Byte Manipulation Instruction */
830 #define CPUID_7_0_ECX_AVX512_VBMI (1U << 1)
831 /* User-Mode Instruction Prevention */
832 #define CPUID_7_0_ECX_UMIP (1U << 2)
833 /* Protection Keys for User-mode Pages */
834 #define CPUID_7_0_ECX_PKU (1U << 3)
835 /* OS Enable Protection Keys */
836 #define CPUID_7_0_ECX_OSPKE (1U << 4)
837 /* UMONITOR/UMWAIT/TPAUSE Instructions */
838 #define CPUID_7_0_ECX_WAITPKG (1U << 5)
839 /* Additional AVX-512 Vector Byte Manipulation Instruction */
840 #define CPUID_7_0_ECX_AVX512_VBMI2 (1U << 6)
841 /* Galois Field New Instructions */
842 #define CPUID_7_0_ECX_GFNI (1U << 8)
843 /* Vector AES Instructions */
844 #define CPUID_7_0_ECX_VAES (1U << 9)
845 /* Carry-Less Multiplication Quadword */
846 #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10)
847 /* Vector Neural Network Instructions */
848 #define CPUID_7_0_ECX_AVX512VNNI (1U << 11)
849 /* Support for VPOPCNT[B,W] and VPSHUFBITQMB */
850 #define CPUID_7_0_ECX_AVX512BITALG (1U << 12)
851 /* POPCNT for vectors of DW/QW */
852 #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14)
853 /* 5-level Page Tables */
854 #define CPUID_7_0_ECX_LA57 (1U << 16)
855 /* Read Processor ID */
856 #define CPUID_7_0_ECX_RDPID (1U << 22)
857 /* Bus Lock Debug Exception */
858 #define CPUID_7_0_ECX_BUS_LOCK_DETECT (1U << 24)
859 /* Cache Line Demote Instruction */
860 #define CPUID_7_0_ECX_CLDEMOTE (1U << 25)
861 /* Move Doubleword as Direct Store Instruction */
862 #define CPUID_7_0_ECX_MOVDIRI (1U << 27)
863 /* Move 64 Bytes as Direct Store Instruction */
864 #define CPUID_7_0_ECX_MOVDIR64B (1U << 28)
865 /* Support SGX Launch Control */
866 #define CPUID_7_0_ECX_SGX_LC (1U << 30)
867 /* Protection Keys for Supervisor-mode Pages */
868 #define CPUID_7_0_ECX_PKS (1U << 31)
870 /* AVX512 Neural Network Instructions */
871 #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2)
872 /* AVX512 Multiply Accumulation Single Precision */
873 #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3)
874 /* Fast Short Rep Mov */
875 #define CPUID_7_0_EDX_FSRM (1U << 4)
876 /* AVX512 Vector Pair Intersection to a Pair of Mask Registers */
877 #define CPUID_7_0_EDX_AVX512_VP2INTERSECT (1U << 8)
878 /* SERIALIZE instruction */
879 #define CPUID_7_0_EDX_SERIALIZE (1U << 14)
880 /* TSX Suspend Load Address Tracking instruction */
881 #define CPUID_7_0_EDX_TSX_LDTRK (1U << 16)
882 /* Architectural LBRs */
883 #define CPUID_7_0_EDX_ARCH_LBR (1U << 19)
884 /* AVX512_FP16 instruction */
885 #define CPUID_7_0_EDX_AVX512_FP16 (1U << 23)
886 /* AMX tile (two-dimensional register) */
887 #define CPUID_7_0_EDX_AMX_TILE (1U << 24)
888 /* Speculation Control */
889 #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26)
890 /* Single Thread Indirect Branch Predictors */
891 #define CPUID_7_0_EDX_STIBP (1U << 27)
892 /* Arch Capabilities */
893 #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29)
894 /* Core Capability */
895 #define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30)
896 /* Speculative Store Bypass Disable */
897 #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31)
899 /* AVX VNNI Instruction */
900 #define CPUID_7_1_EAX_AVX_VNNI (1U << 4)
901 /* AVX512 BFloat16 Instruction */
902 #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5)
903 /* XFD Extend Feature Disabled */
904 #define CPUID_D_1_EAX_XFD (1U << 4)
906 /* Packets which contain IP payload have LIP values */
907 #define CPUID_14_0_ECX_LIP (1U << 31)
909 /* CLZERO instruction */
910 #define CPUID_8000_0008_EBX_CLZERO (1U << 0)
911 /* Always save/restore FP error pointers */
912 #define CPUID_8000_0008_EBX_XSAVEERPTR (1U << 2)
913 /* Write back and do not invalidate cache */
914 #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9)
915 /* Indirect Branch Prediction Barrier */
916 #define CPUID_8000_0008_EBX_IBPB (1U << 12)
917 /* Indirect Branch Restricted Speculation */
918 #define CPUID_8000_0008_EBX_IBRS (1U << 14)
919 /* Single Thread Indirect Branch Predictors */
920 #define CPUID_8000_0008_EBX_STIBP (1U << 15)
921 /* Speculative Store Bypass Disable */
922 #define CPUID_8000_0008_EBX_AMD_SSBD (1U << 24)
924 #define CPUID_XSAVE_XSAVEOPT (1U << 0)
925 #define CPUID_XSAVE_XSAVEC (1U << 1)
926 #define CPUID_XSAVE_XGETBV1 (1U << 2)
927 #define CPUID_XSAVE_XSAVES (1U << 3)
929 #define CPUID_6_EAX_ARAT (1U << 2)
931 /* CPUID[0x80000007].EDX flags: */
932 #define CPUID_APM_INVTSC (1U << 8)
934 #define CPUID_VENDOR_SZ 12
936 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
937 #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
938 #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
939 #define CPUID_VENDOR_INTEL "GenuineIntel"
941 #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */
942 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */
943 #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
944 #define CPUID_VENDOR_AMD "AuthenticAMD"
946 #define CPUID_VENDOR_VIA "CentaurHauls"
948 #define CPUID_VENDOR_HYGON "HygonGenuine"
950 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
951 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
952 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
953 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
954 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
955 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
957 #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
958 #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
960 /* CPUID[0xB].ECX level types */
961 #define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8)
962 #define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8)
963 #define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8)
964 #define CPUID_TOPOLOGY_LEVEL_DIE (5U << 8)
966 /* MSR Feature Bits */
967 #define MSR_ARCH_CAP_RDCL_NO (1U << 0)
968 #define MSR_ARCH_CAP_IBRS_ALL (1U << 1)
969 #define MSR_ARCH_CAP_RSBA (1U << 2)
970 #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3)
971 #define MSR_ARCH_CAP_SSB_NO (1U << 4)
972 #define MSR_ARCH_CAP_MDS_NO (1U << 5)
973 #define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6)
974 #define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7)
975 #define MSR_ARCH_CAP_TAA_NO (1U << 8)
977 #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5)
979 /* VMX MSR features */
980 #define MSR_VMX_BASIC_VMCS_REVISION_MASK 0x7FFFFFFFull
981 #define MSR_VMX_BASIC_VMXON_REGION_SIZE_MASK (0x00001FFFull << 32)
982 #define MSR_VMX_BASIC_VMCS_MEM_TYPE_MASK (0x003C0000ull << 32)
983 #define MSR_VMX_BASIC_DUAL_MONITOR (1ULL << 49)
984 #define MSR_VMX_BASIC_INS_OUTS (1ULL << 54)
985 #define MSR_VMX_BASIC_TRUE_CTLS (1ULL << 55)
987 #define MSR_VMX_MISC_PREEMPTION_TIMER_SHIFT_MASK 0x1Full
988 #define MSR_VMX_MISC_STORE_LMA (1ULL << 5)
989 #define MSR_VMX_MISC_ACTIVITY_HLT (1ULL << 6)
990 #define MSR_VMX_MISC_ACTIVITY_SHUTDOWN (1ULL << 7)
991 #define MSR_VMX_MISC_ACTIVITY_WAIT_SIPI (1ULL << 8)
992 #define MSR_VMX_MISC_MAX_MSR_LIST_SIZE_MASK 0x0E000000ull
993 #define MSR_VMX_MISC_VMWRITE_VMEXIT (1ULL << 29)
994 #define MSR_VMX_MISC_ZERO_LEN_INJECT (1ULL << 30)
996 #define MSR_VMX_EPT_EXECONLY (1ULL << 0)
997 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_4 (1ULL << 6)
998 #define MSR_VMX_EPT_PAGE_WALK_LENGTH_5 (1ULL << 7)
999 #define MSR_VMX_EPT_UC (1ULL << 8)
1000 #define MSR_VMX_EPT_WB (1ULL << 14)
1001 #define MSR_VMX_EPT_2MB (1ULL << 16)
1002 #define MSR_VMX_EPT_1GB (1ULL << 17)
1003 #define MSR_VMX_EPT_INVEPT (1ULL << 20)
1004 #define MSR_VMX_EPT_AD_BITS (1ULL << 21)
1005 #define MSR_VMX_EPT_ADVANCED_VMEXIT_INFO (1ULL << 22)
1006 #define MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT (1ULL << 25)
1007 #define MSR_VMX_EPT_INVEPT_ALL_CONTEXT (1ULL << 26)
1008 #define MSR_VMX_EPT_INVVPID (1ULL << 32)
1009 #define MSR_VMX_EPT_INVVPID_SINGLE_ADDR (1ULL << 40)
1010 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT (1ULL << 41)
1011 #define MSR_VMX_EPT_INVVPID_ALL_CONTEXT (1ULL << 42)
1012 #define MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS (1ULL << 43)
1014 #define MSR_VMX_VMFUNC_EPT_SWITCHING (1ULL << 0)
1017 /* VMX controls */
1018 #define VMX_CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
1019 #define VMX_CPU_BASED_USE_TSC_OFFSETING 0x00000008
1020 #define VMX_CPU_BASED_HLT_EXITING 0x00000080
1021 #define VMX_CPU_BASED_INVLPG_EXITING 0x00000200
1022 #define VMX_CPU_BASED_MWAIT_EXITING 0x00000400
1023 #define VMX_CPU_BASED_RDPMC_EXITING 0x00000800
1024 #define VMX_CPU_BASED_RDTSC_EXITING 0x00001000
1025 #define VMX_CPU_BASED_CR3_LOAD_EXITING 0x00008000
1026 #define VMX_CPU_BASED_CR3_STORE_EXITING 0x00010000
1027 #define VMX_CPU_BASED_CR8_LOAD_EXITING 0x00080000
1028 #define VMX_CPU_BASED_CR8_STORE_EXITING 0x00100000
1029 #define VMX_CPU_BASED_TPR_SHADOW 0x00200000
1030 #define VMX_CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
1031 #define VMX_CPU_BASED_MOV_DR_EXITING 0x00800000
1032 #define VMX_CPU_BASED_UNCOND_IO_EXITING 0x01000000
1033 #define VMX_CPU_BASED_USE_IO_BITMAPS 0x02000000
1034 #define VMX_CPU_BASED_MONITOR_TRAP_FLAG 0x08000000
1035 #define VMX_CPU_BASED_USE_MSR_BITMAPS 0x10000000
1036 #define VMX_CPU_BASED_MONITOR_EXITING 0x20000000
1037 #define VMX_CPU_BASED_PAUSE_EXITING 0x40000000
1038 #define VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
1040 #define VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
1041 #define VMX_SECONDARY_EXEC_ENABLE_EPT 0x00000002
1042 #define VMX_SECONDARY_EXEC_DESC 0x00000004
1043 #define VMX_SECONDARY_EXEC_RDTSCP 0x00000008
1044 #define VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
1045 #define VMX_SECONDARY_EXEC_ENABLE_VPID 0x00000020
1046 #define VMX_SECONDARY_EXEC_WBINVD_EXITING 0x00000040
1047 #define VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
1048 #define VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
1049 #define VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
1050 #define VMX_SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
1051 #define VMX_SECONDARY_EXEC_RDRAND_EXITING 0x00000800
1052 #define VMX_SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
1053 #define VMX_SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000
1054 #define VMX_SECONDARY_EXEC_SHADOW_VMCS 0x00004000
1055 #define VMX_SECONDARY_EXEC_ENCLS_EXITING 0x00008000
1056 #define VMX_SECONDARY_EXEC_RDSEED_EXITING 0x00010000
1057 #define VMX_SECONDARY_EXEC_ENABLE_PML 0x00020000
1058 #define VMX_SECONDARY_EXEC_XSAVES 0x00100000
1059 #define VMX_SECONDARY_EXEC_TSC_SCALING 0x02000000
1061 #define VMX_PIN_BASED_EXT_INTR_MASK 0x00000001
1062 #define VMX_PIN_BASED_NMI_EXITING 0x00000008
1063 #define VMX_PIN_BASED_VIRTUAL_NMIS 0x00000020
1064 #define VMX_PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
1065 #define VMX_PIN_BASED_POSTED_INTR 0x00000080
1067 #define VMX_VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
1068 #define VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
1069 #define VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
1070 #define VMX_VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
1071 #define VMX_VM_EXIT_SAVE_IA32_PAT 0x00040000
1072 #define VMX_VM_EXIT_LOAD_IA32_PAT 0x00080000
1073 #define VMX_VM_EXIT_SAVE_IA32_EFER 0x00100000
1074 #define VMX_VM_EXIT_LOAD_IA32_EFER 0x00200000
1075 #define VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
1076 #define VMX_VM_EXIT_CLEAR_BNDCFGS 0x00800000
1077 #define VMX_VM_EXIT_PT_CONCEAL_PIP 0x01000000
1078 #define VMX_VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
1079 #define VMX_VM_EXIT_LOAD_IA32_PKRS 0x20000000
1081 #define VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
1082 #define VMX_VM_ENTRY_IA32E_MODE 0x00000200
1083 #define VMX_VM_ENTRY_SMM 0x00000400
1084 #define VMX_VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
1085 #define VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
1086 #define VMX_VM_ENTRY_LOAD_IA32_PAT 0x00004000
1087 #define VMX_VM_ENTRY_LOAD_IA32_EFER 0x00008000
1088 #define VMX_VM_ENTRY_LOAD_BNDCFGS 0x00010000
1089 #define VMX_VM_ENTRY_PT_CONCEAL_PIP 0x00020000
1090 #define VMX_VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000
1091 #define VMX_VM_ENTRY_LOAD_IA32_PKRS 0x00400000
1093 /* Supported Hyper-V Enlightenments */
1094 #define HYPERV_FEAT_RELAXED 0
1095 #define HYPERV_FEAT_VAPIC 1
1096 #define HYPERV_FEAT_TIME 2
1097 #define HYPERV_FEAT_CRASH 3
1098 #define HYPERV_FEAT_RESET 4
1099 #define HYPERV_FEAT_VPINDEX 5
1100 #define HYPERV_FEAT_RUNTIME 6
1101 #define HYPERV_FEAT_SYNIC 7
1102 #define HYPERV_FEAT_STIMER 8
1103 #define HYPERV_FEAT_FREQUENCIES 9
1104 #define HYPERV_FEAT_REENLIGHTENMENT 10
1105 #define HYPERV_FEAT_TLBFLUSH 11
1106 #define HYPERV_FEAT_EVMCS 12
1107 #define HYPERV_FEAT_IPI 13
1108 #define HYPERV_FEAT_STIMER_DIRECT 14
1109 #define HYPERV_FEAT_AVIC 15
1110 #define HYPERV_FEAT_SYNDBG 16
1111 #define HYPERV_FEAT_MSR_BITMAP 17
1112 #define HYPERV_FEAT_XMM_INPUT 18
1113 #define HYPERV_FEAT_TLBFLUSH_EXT 19
1114 #define HYPERV_FEAT_TLBFLUSH_DIRECT 20
1116 #ifndef HYPERV_SPINLOCK_NEVER_NOTIFY
1117 #define HYPERV_SPINLOCK_NEVER_NOTIFY 0xFFFFFFFF
1118 #endif
1120 #define EXCP00_DIVZ 0
1121 #define EXCP01_DB 1
1122 #define EXCP02_NMI 2
1123 #define EXCP03_INT3 3
1124 #define EXCP04_INTO 4
1125 #define EXCP05_BOUND 5
1126 #define EXCP06_ILLOP 6
1127 #define EXCP07_PREX 7
1128 #define EXCP08_DBLE 8
1129 #define EXCP09_XERR 9
1130 #define EXCP0A_TSS 10
1131 #define EXCP0B_NOSEG 11
1132 #define EXCP0C_STACK 12
1133 #define EXCP0D_GPF 13
1134 #define EXCP0E_PAGE 14
1135 #define EXCP10_COPR 16
1136 #define EXCP11_ALGN 17
1137 #define EXCP12_MCHK 18
1139 #define EXCP_VMEXIT 0x100 /* only for system emulation */
1140 #define EXCP_SYSCALL 0x101 /* only for user emulation */
1141 #define EXCP_VSYSCALL 0x102 /* only for user emulation */
1143 /* i386-specific interrupt pending bits. */
1144 #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
1145 #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
1146 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
1147 #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
1148 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0
1149 #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1
1150 #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2
1152 /* Use a clearer name for this. */
1153 #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
1155 /* Instead of computing the condition codes after each x86 instruction,
1156 * QEMU just stores one operand (called CC_SRC), the result
1157 * (called CC_DST) and the type of operation (called CC_OP). When the
1158 * condition codes are needed, the condition codes can be calculated
1159 * using this information. Condition codes are not generated if they
1160 * are only needed for conditional branches.
1162 typedef enum {
1163 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
1164 CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
1166 CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
1167 CC_OP_MULW,
1168 CC_OP_MULL,
1169 CC_OP_MULQ,
1171 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1172 CC_OP_ADDW,
1173 CC_OP_ADDL,
1174 CC_OP_ADDQ,
1176 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1177 CC_OP_ADCW,
1178 CC_OP_ADCL,
1179 CC_OP_ADCQ,
1181 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1182 CC_OP_SUBW,
1183 CC_OP_SUBL,
1184 CC_OP_SUBQ,
1186 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
1187 CC_OP_SBBW,
1188 CC_OP_SBBL,
1189 CC_OP_SBBQ,
1191 CC_OP_LOGICB, /* modify all flags, CC_DST = res */
1192 CC_OP_LOGICW,
1193 CC_OP_LOGICL,
1194 CC_OP_LOGICQ,
1196 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
1197 CC_OP_INCW,
1198 CC_OP_INCL,
1199 CC_OP_INCQ,
1201 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
1202 CC_OP_DECW,
1203 CC_OP_DECL,
1204 CC_OP_DECQ,
1206 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
1207 CC_OP_SHLW,
1208 CC_OP_SHLL,
1209 CC_OP_SHLQ,
1211 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
1212 CC_OP_SARW,
1213 CC_OP_SARL,
1214 CC_OP_SARQ,
1216 CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */
1217 CC_OP_BMILGW,
1218 CC_OP_BMILGL,
1219 CC_OP_BMILGQ,
1221 CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
1222 CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */
1223 CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
1225 CC_OP_CLR, /* Z set, all other flags clear. */
1226 CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */
1228 CC_OP_NB,
1229 } CCOp;
1231 typedef struct SegmentCache {
1232 uint32_t selector;
1233 target_ulong base;
1234 uint32_t limit;
1235 uint32_t flags;
1236 } SegmentCache;
1238 typedef union MMXReg {
1239 uint8_t _b_MMXReg[64 / 8];
1240 uint16_t _w_MMXReg[64 / 16];
1241 uint32_t _l_MMXReg[64 / 32];
1242 uint64_t _q_MMXReg[64 / 64];
1243 float32 _s_MMXReg[64 / 32];
1244 float64 _d_MMXReg[64 / 64];
1245 } MMXReg;
1247 typedef union XMMReg {
1248 uint64_t _q_XMMReg[128 / 64];
1249 } XMMReg;
1251 typedef union YMMReg {
1252 uint64_t _q_YMMReg[256 / 64];
1253 XMMReg _x_YMMReg[256 / 128];
1254 } YMMReg;
1256 typedef union ZMMReg {
1257 uint8_t _b_ZMMReg[512 / 8];
1258 uint16_t _w_ZMMReg[512 / 16];
1259 uint32_t _l_ZMMReg[512 / 32];
1260 uint64_t _q_ZMMReg[512 / 64];
1261 float16 _h_ZMMReg[512 / 16];
1262 float32 _s_ZMMReg[512 / 32];
1263 float64 _d_ZMMReg[512 / 64];
1264 XMMReg _x_ZMMReg[512 / 128];
1265 YMMReg _y_ZMMReg[512 / 256];
1266 } ZMMReg;
1268 typedef struct BNDReg {
1269 uint64_t lb;
1270 uint64_t ub;
1271 } BNDReg;
1273 typedef struct BNDCSReg {
1274 uint64_t cfgu;
1275 uint64_t sts;
1276 } BNDCSReg;
1278 #define BNDCFG_ENABLE 1ULL
1279 #define BNDCFG_BNDPRESERVE 2ULL
1280 #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
1282 #if HOST_BIG_ENDIAN
1283 #define ZMM_B(n) _b_ZMMReg[63 - (n)]
1284 #define ZMM_W(n) _w_ZMMReg[31 - (n)]
1285 #define ZMM_L(n) _l_ZMMReg[15 - (n)]
1286 #define ZMM_H(n) _h_ZMMReg[31 - (n)]
1287 #define ZMM_S(n) _s_ZMMReg[15 - (n)]
1288 #define ZMM_Q(n) _q_ZMMReg[7 - (n)]
1289 #define ZMM_D(n) _d_ZMMReg[7 - (n)]
1290 #define ZMM_X(n) _x_ZMMReg[3 - (n)]
1291 #define ZMM_Y(n) _y_ZMMReg[1 - (n)]
1293 #define XMM_Q(n) _q_XMMReg[1 - (n)]
1295 #define YMM_Q(n) _q_YMMReg[3 - (n)]
1296 #define YMM_X(n) _x_YMMReg[1 - (n)]
1298 #define MMX_B(n) _b_MMXReg[7 - (n)]
1299 #define MMX_W(n) _w_MMXReg[3 - (n)]
1300 #define MMX_L(n) _l_MMXReg[1 - (n)]
1301 #define MMX_S(n) _s_MMXReg[1 - (n)]
1302 #else
1303 #define ZMM_B(n) _b_ZMMReg[n]
1304 #define ZMM_W(n) _w_ZMMReg[n]
1305 #define ZMM_L(n) _l_ZMMReg[n]
1306 #define ZMM_H(n) _h_ZMMReg[n]
1307 #define ZMM_S(n) _s_ZMMReg[n]
1308 #define ZMM_Q(n) _q_ZMMReg[n]
1309 #define ZMM_D(n) _d_ZMMReg[n]
1310 #define ZMM_X(n) _x_ZMMReg[n]
1311 #define ZMM_Y(n) _y_ZMMReg[n]
1313 #define XMM_Q(n) _q_XMMReg[n]
1315 #define YMM_Q(n) _q_YMMReg[n]
1316 #define YMM_X(n) _x_YMMReg[n]
1318 #define MMX_B(n) _b_MMXReg[n]
1319 #define MMX_W(n) _w_MMXReg[n]
1320 #define MMX_L(n) _l_MMXReg[n]
1321 #define MMX_S(n) _s_MMXReg[n]
1322 #endif
1323 #define MMX_Q(n) _q_MMXReg[n]
1325 typedef union {
1326 floatx80 d __attribute__((aligned(16)));
1327 MMXReg mmx;
1328 } FPReg;
1330 typedef struct {
1331 uint64_t base;
1332 uint64_t mask;
1333 } MTRRVar;
1335 #define CPU_NB_REGS64 16
1336 #define CPU_NB_REGS32 8
1338 #ifdef TARGET_X86_64
1339 #define CPU_NB_REGS CPU_NB_REGS64
1340 #else
1341 #define CPU_NB_REGS CPU_NB_REGS32
1342 #endif
1344 #define MAX_FIXED_COUNTERS 3
1345 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
1347 #define TARGET_INSN_START_EXTRA_WORDS 1
1349 #define NB_OPMASK_REGS 8
1351 /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
1352 * that APIC ID hasn't been set yet
1354 #define UNASSIGNED_APIC_ID 0xFFFFFFFF
1356 typedef union X86LegacyXSaveArea {
1357 struct {
1358 uint16_t fcw;
1359 uint16_t fsw;
1360 uint8_t ftw;
1361 uint8_t reserved;
1362 uint16_t fpop;
1363 uint64_t fpip;
1364 uint64_t fpdp;
1365 uint32_t mxcsr;
1366 uint32_t mxcsr_mask;
1367 FPReg fpregs[8];
1368 uint8_t xmm_regs[16][16];
1370 uint8_t data[512];
1371 } X86LegacyXSaveArea;
1373 typedef struct X86XSaveHeader {
1374 uint64_t xstate_bv;
1375 uint64_t xcomp_bv;
1376 uint64_t reserve0;
1377 uint8_t reserved[40];
1378 } X86XSaveHeader;
1380 /* Ext. save area 2: AVX State */
1381 typedef struct XSaveAVX {
1382 uint8_t ymmh[16][16];
1383 } XSaveAVX;
1385 /* Ext. save area 3: BNDREG */
1386 typedef struct XSaveBNDREG {
1387 BNDReg bnd_regs[4];
1388 } XSaveBNDREG;
1390 /* Ext. save area 4: BNDCSR */
1391 typedef union XSaveBNDCSR {
1392 BNDCSReg bndcsr;
1393 uint8_t data[64];
1394 } XSaveBNDCSR;
1396 /* Ext. save area 5: Opmask */
1397 typedef struct XSaveOpmask {
1398 uint64_t opmask_regs[NB_OPMASK_REGS];
1399 } XSaveOpmask;
1401 /* Ext. save area 6: ZMM_Hi256 */
1402 typedef struct XSaveZMM_Hi256 {
1403 uint8_t zmm_hi256[16][32];
1404 } XSaveZMM_Hi256;
1406 /* Ext. save area 7: Hi16_ZMM */
1407 typedef struct XSaveHi16_ZMM {
1408 uint8_t hi16_zmm[16][64];
1409 } XSaveHi16_ZMM;
1411 /* Ext. save area 9: PKRU state */
1412 typedef struct XSavePKRU {
1413 uint32_t pkru;
1414 uint32_t padding;
1415 } XSavePKRU;
1417 /* Ext. save area 17: AMX XTILECFG state */
1418 typedef struct XSaveXTILECFG {
1419 uint8_t xtilecfg[64];
1420 } XSaveXTILECFG;
1422 /* Ext. save area 18: AMX XTILEDATA state */
1423 typedef struct XSaveXTILEDATA {
1424 uint8_t xtiledata[8][1024];
1425 } XSaveXTILEDATA;
1427 typedef struct {
1428 uint64_t from;
1429 uint64_t to;
1430 uint64_t info;
1431 } LBREntry;
1433 #define ARCH_LBR_NR_ENTRIES 32
1435 /* Ext. save area 19: Supervisor mode Arch LBR state */
1436 typedef struct XSavesArchLBR {
1437 uint64_t lbr_ctl;
1438 uint64_t lbr_depth;
1439 uint64_t ler_from;
1440 uint64_t ler_to;
1441 uint64_t ler_info;
1442 LBREntry lbr_records[ARCH_LBR_NR_ENTRIES];
1443 } XSavesArchLBR;
1445 QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
1446 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
1447 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
1448 QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40);
1449 QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200);
1450 QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
1451 QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
1452 QEMU_BUILD_BUG_ON(sizeof(XSaveXTILECFG) != 0x40);
1453 QEMU_BUILD_BUG_ON(sizeof(XSaveXTILEDATA) != 0x2000);
1454 QEMU_BUILD_BUG_ON(sizeof(XSavesArchLBR) != 0x328);
1456 typedef struct ExtSaveArea {
1457 uint32_t feature, bits;
1458 uint32_t offset, size;
1459 uint32_t ecx;
1460 } ExtSaveArea;
1462 #define XSAVE_STATE_AREA_COUNT (XSTATE_XTILE_DATA_BIT + 1)
1464 extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT];
1466 typedef enum TPRAccess {
1467 TPR_ACCESS_READ,
1468 TPR_ACCESS_WRITE,
1469 } TPRAccess;
1471 /* Cache information data structures: */
1473 enum CacheType {
1474 DATA_CACHE,
1475 INSTRUCTION_CACHE,
1476 UNIFIED_CACHE
1479 typedef struct CPUCacheInfo {
1480 enum CacheType type;
1481 uint8_t level;
1482 /* Size in bytes */
1483 uint32_t size;
1484 /* Line size, in bytes */
1485 uint16_t line_size;
1487 * Associativity.
1488 * Note: representation of fully-associative caches is not implemented
1490 uint8_t associativity;
1491 /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */
1492 uint8_t partitions;
1493 /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */
1494 uint32_t sets;
1496 * Lines per tag.
1497 * AMD-specific: CPUID[0x80000005], CPUID[0x80000006].
1498 * (Is this synonym to @partitions?)
1500 uint8_t lines_per_tag;
1502 /* Self-initializing cache */
1503 bool self_init;
1505 * WBINVD/INVD is not guaranteed to act upon lower level caches of
1506 * non-originating threads sharing this cache.
1507 * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0]
1509 bool no_invd_sharing;
1511 * Cache is inclusive of lower cache levels.
1512 * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1].
1514 bool inclusive;
1516 * A complex function is used to index the cache, potentially using all
1517 * address bits. CPUID[4].EDX[bit 2].
1519 bool complex_indexing;
1520 } CPUCacheInfo;
1523 typedef struct CPUCaches {
1524 CPUCacheInfo *l1d_cache;
1525 CPUCacheInfo *l1i_cache;
1526 CPUCacheInfo *l2_cache;
1527 CPUCacheInfo *l3_cache;
1528 } CPUCaches;
1530 typedef struct HVFX86LazyFlags {
1531 target_ulong result;
1532 target_ulong auxbits;
1533 } HVFX86LazyFlags;
1535 typedef struct CPUArchState {
1536 /* standard registers */
1537 target_ulong regs[CPU_NB_REGS];
1538 target_ulong eip;
1539 target_ulong eflags; /* eflags register. During CPU emulation, CC
1540 flags and DF are set to zero because they are
1541 stored elsewhere */
1543 /* emulator internal eflags handling */
1544 target_ulong cc_dst;
1545 target_ulong cc_src;
1546 target_ulong cc_src2;
1547 uint32_t cc_op;
1548 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
1549 uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
1550 are known at translation time. */
1551 uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
1553 /* segments */
1554 SegmentCache segs[6]; /* selector values */
1555 SegmentCache ldt;
1556 SegmentCache tr;
1557 SegmentCache gdt; /* only base and limit are used */
1558 SegmentCache idt; /* only base and limit are used */
1560 target_ulong cr[5]; /* NOTE: cr1 is unused */
1562 bool pdptrs_valid;
1563 uint64_t pdptrs[4];
1564 int32_t a20_mask;
1566 BNDReg bnd_regs[4];
1567 BNDCSReg bndcs_regs;
1568 uint64_t msr_bndcfgs;
1569 uint64_t efer;
1571 /* Beginning of state preserved by INIT (dummy marker). */
1572 struct {} start_init_save;
1574 /* FPU state */
1575 unsigned int fpstt; /* top of stack index */
1576 uint16_t fpus;
1577 uint16_t fpuc;
1578 uint8_t fptags[8]; /* 0 = valid, 1 = empty */
1579 FPReg fpregs[8];
1580 /* KVM-only so far */
1581 uint16_t fpop;
1582 uint16_t fpcs;
1583 uint16_t fpds;
1584 uint64_t fpip;
1585 uint64_t fpdp;
1587 /* emulator internal variables */
1588 float_status fp_status;
1589 floatx80 ft0;
1591 float_status mmx_status; /* for 3DNow! float ops */
1592 float_status sse_status;
1593 uint32_t mxcsr;
1594 ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32] QEMU_ALIGNED(16);
1595 ZMMReg xmm_t0 QEMU_ALIGNED(16);
1596 MMXReg mmx_t0;
1598 uint64_t opmask_regs[NB_OPMASK_REGS];
1599 #ifdef TARGET_X86_64
1600 uint8_t xtilecfg[64];
1601 uint8_t xtiledata[8192];
1602 #endif
1604 /* sysenter registers */
1605 uint32_t sysenter_cs;
1606 target_ulong sysenter_esp;
1607 target_ulong sysenter_eip;
1608 uint64_t star;
1610 uint64_t vm_hsave;
1612 #ifdef TARGET_X86_64
1613 target_ulong lstar;
1614 target_ulong cstar;
1615 target_ulong fmask;
1616 target_ulong kernelgsbase;
1617 #endif
1619 uint64_t tsc_adjust;
1620 uint64_t tsc_deadline;
1621 uint64_t tsc_aux;
1623 uint64_t xcr0;
1625 uint64_t mcg_status;
1626 uint64_t msr_ia32_misc_enable;
1627 uint64_t msr_ia32_feature_control;
1628 uint64_t msr_ia32_sgxlepubkeyhash[4];
1630 uint64_t msr_fixed_ctr_ctrl;
1631 uint64_t msr_global_ctrl;
1632 uint64_t msr_global_status;
1633 uint64_t msr_global_ovf_ctrl;
1634 uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
1635 uint64_t msr_gp_counters[MAX_GP_COUNTERS];
1636 uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
1638 uint64_t pat;
1639 uint32_t smbase;
1640 uint64_t msr_smi_count;
1642 uint32_t pkru;
1643 uint32_t pkrs;
1644 uint32_t tsx_ctrl;
1646 uint64_t spec_ctrl;
1647 uint64_t amd_tsc_scale_msr;
1648 uint64_t virt_ssbd;
1650 /* End of state preserved by INIT (dummy marker). */
1651 struct {} end_init_save;
1653 uint64_t system_time_msr;
1654 uint64_t wall_clock_msr;
1655 uint64_t steal_time_msr;
1656 uint64_t async_pf_en_msr;
1657 uint64_t async_pf_int_msr;
1658 uint64_t pv_eoi_en_msr;
1659 uint64_t poll_control_msr;
1661 /* Partition-wide HV MSRs, will be updated only on the first vcpu */
1662 uint64_t msr_hv_hypercall;
1663 uint64_t msr_hv_guest_os_id;
1664 uint64_t msr_hv_tsc;
1665 uint64_t msr_hv_syndbg_control;
1666 uint64_t msr_hv_syndbg_status;
1667 uint64_t msr_hv_syndbg_send_page;
1668 uint64_t msr_hv_syndbg_recv_page;
1669 uint64_t msr_hv_syndbg_pending_page;
1670 uint64_t msr_hv_syndbg_options;
1672 /* Per-VCPU HV MSRs */
1673 uint64_t msr_hv_vapic;
1674 uint64_t msr_hv_crash_params[HV_CRASH_PARAMS];
1675 uint64_t msr_hv_runtime;
1676 uint64_t msr_hv_synic_control;
1677 uint64_t msr_hv_synic_evt_page;
1678 uint64_t msr_hv_synic_msg_page;
1679 uint64_t msr_hv_synic_sint[HV_SINT_COUNT];
1680 uint64_t msr_hv_stimer_config[HV_STIMER_COUNT];
1681 uint64_t msr_hv_stimer_count[HV_STIMER_COUNT];
1682 uint64_t msr_hv_reenlightenment_control;
1683 uint64_t msr_hv_tsc_emulation_control;
1684 uint64_t msr_hv_tsc_emulation_status;
1686 uint64_t msr_rtit_ctrl;
1687 uint64_t msr_rtit_status;
1688 uint64_t msr_rtit_output_base;
1689 uint64_t msr_rtit_output_mask;
1690 uint64_t msr_rtit_cr3_match;
1691 uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS];
1693 /* Per-VCPU XFD MSRs */
1694 uint64_t msr_xfd;
1695 uint64_t msr_xfd_err;
1697 /* Per-VCPU Arch LBR MSRs */
1698 uint64_t msr_lbr_ctl;
1699 uint64_t msr_lbr_depth;
1700 LBREntry lbr_records[ARCH_LBR_NR_ENTRIES];
1702 /* exception/interrupt handling */
1703 int error_code;
1704 int exception_is_int;
1705 target_ulong exception_next_eip;
1706 target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */
1707 union {
1708 struct CPUBreakpoint *cpu_breakpoint[4];
1709 struct CPUWatchpoint *cpu_watchpoint[4];
1710 }; /* break/watchpoints for dr[0..3] */
1711 int old_exception; /* exception in flight */
1713 uint64_t vm_vmcb;
1714 uint64_t tsc_offset;
1715 uint64_t intercept;
1716 uint16_t intercept_cr_read;
1717 uint16_t intercept_cr_write;
1718 uint16_t intercept_dr_read;
1719 uint16_t intercept_dr_write;
1720 uint32_t intercept_exceptions;
1721 uint64_t nested_cr3;
1722 uint32_t nested_pg_mode;
1723 uint8_t v_tpr;
1724 uint32_t int_ctl;
1726 /* KVM states, automatically cleared on reset */
1727 uint8_t nmi_injected;
1728 uint8_t nmi_pending;
1730 uintptr_t retaddr;
1732 /* Fields up to this point are cleared by a CPU reset */
1733 struct {} end_reset_fields;
1735 /* Fields after this point are preserved across CPU reset. */
1737 /* processor features (e.g. for CPUID insn) */
1738 /* Minimum cpuid leaf 7 value */
1739 uint32_t cpuid_level_func7;
1740 /* Actual cpuid leaf 7 value */
1741 uint32_t cpuid_min_level_func7;
1742 /* Minimum level/xlevel/xlevel2, based on CPU model + features */
1743 uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
1744 /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
1745 uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2;
1746 /* Actual level/xlevel/xlevel2 value: */
1747 uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2;
1748 uint32_t cpuid_vendor1;
1749 uint32_t cpuid_vendor2;
1750 uint32_t cpuid_vendor3;
1751 uint32_t cpuid_version;
1752 FeatureWordArray features;
1753 /* Features that were explicitly enabled/disabled */
1754 FeatureWordArray user_features;
1755 uint32_t cpuid_model[12];
1756 /* Cache information for CPUID. When legacy-cache=on, the cache data
1757 * on each CPUID leaf will be different, because we keep compatibility
1758 * with old QEMU versions.
1760 CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd;
1762 /* MTRRs */
1763 uint64_t mtrr_fixed[11];
1764 uint64_t mtrr_deftype;
1765 MTRRVar mtrr_var[MSR_MTRRcap_VCNT];
1767 /* For KVM */
1768 uint32_t mp_state;
1769 int32_t exception_nr;
1770 int32_t interrupt_injected;
1771 uint8_t soft_interrupt;
1772 uint8_t exception_pending;
1773 uint8_t exception_injected;
1774 uint8_t has_error_code;
1775 uint8_t exception_has_payload;
1776 uint64_t exception_payload;
1777 uint8_t triple_fault_pending;
1778 uint32_t ins_len;
1779 uint32_t sipi_vector;
1780 bool tsc_valid;
1781 int64_t tsc_khz;
1782 int64_t user_tsc_khz; /* for sanity check only */
1783 uint64_t apic_bus_freq;
1784 uint64_t tsc;
1785 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
1786 void *xsave_buf;
1787 uint32_t xsave_buf_len;
1788 #endif
1789 #if defined(CONFIG_KVM)
1790 struct kvm_nested_state *nested_state;
1791 #endif
1792 #if defined(CONFIG_HVF)
1793 HVFX86LazyFlags hvf_lflags;
1794 void *hvf_mmio_buf;
1795 #endif
1797 uint64_t mcg_cap;
1798 uint64_t mcg_ctl;
1799 uint64_t mcg_ext_ctl;
1800 uint64_t mce_banks[MCE_BANKS_DEF*4];
1801 uint64_t xstate_bv;
1803 /* vmstate */
1804 uint16_t fpus_vmstate;
1805 uint16_t fptag_vmstate;
1806 uint16_t fpregs_format_vmstate;
1808 uint64_t xss;
1809 uint32_t umwait;
1811 TPRAccess tpr_access_type;
1813 unsigned nr_dies;
1814 } CPUX86State;
1816 struct kvm_msrs;
1819 * X86CPU:
1820 * @env: #CPUX86State
1821 * @migratable: If set, only migratable flags will be accepted when "enforce"
1822 * mode is used, and only migratable flags will be included in the "host"
1823 * CPU model.
1825 * An x86 CPU.
1827 struct ArchCPU {
1828 /*< private >*/
1829 CPUState parent_obj;
1830 /*< public >*/
1832 CPUNegativeOffsetState neg;
1833 CPUX86State env;
1834 VMChangeStateEntry *vmsentry;
1836 uint64_t ucode_rev;
1838 uint32_t hyperv_spinlock_attempts;
1839 char *hyperv_vendor;
1840 bool hyperv_synic_kvm_only;
1841 uint64_t hyperv_features;
1842 bool hyperv_passthrough;
1843 OnOffAuto hyperv_no_nonarch_cs;
1844 uint32_t hyperv_vendor_id[3];
1845 uint32_t hyperv_interface_id[4];
1846 uint32_t hyperv_limits[3];
1847 bool hyperv_enforce_cpuid;
1848 uint32_t hyperv_ver_id_build;
1849 uint16_t hyperv_ver_id_major;
1850 uint16_t hyperv_ver_id_minor;
1851 uint32_t hyperv_ver_id_sp;
1852 uint8_t hyperv_ver_id_sb;
1853 uint32_t hyperv_ver_id_sn;
1855 bool check_cpuid;
1856 bool enforce_cpuid;
1858 * Force features to be enabled even if the host doesn't support them.
1859 * This is dangerous and should be done only for testing CPUID
1860 * compatibility.
1862 bool force_features;
1863 bool expose_kvm;
1864 bool expose_tcg;
1865 bool migratable;
1866 bool migrate_smi_count;
1867 bool max_features; /* Enable all supported features automatically */
1868 uint32_t apic_id;
1870 /* Enables publishing of TSC increment and Local APIC bus frequencies to
1871 * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */
1872 bool vmware_cpuid_freq;
1874 /* if true the CPUID code directly forward host cache leaves to the guest */
1875 bool cache_info_passthrough;
1877 /* if true the CPUID code directly forwards
1878 * host monitor/mwait leaves to the guest */
1879 struct {
1880 uint32_t eax;
1881 uint32_t ebx;
1882 uint32_t ecx;
1883 uint32_t edx;
1884 } mwait;
1886 /* Features that were filtered out because of missing host capabilities */
1887 FeatureWordArray filtered_features;
1889 /* Enable PMU CPUID bits. This can't be enabled by default yet because
1890 * it doesn't have ABI stability guarantees, as it passes all PMU CPUID
1891 * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
1892 * capabilities) directly to the guest.
1894 bool enable_pmu;
1897 * Enable LBR_FMT bits of IA32_PERF_CAPABILITIES MSR.
1898 * This can't be initialized with a default because it doesn't have
1899 * stable ABI support yet. It is only allowed to pass all LBR_FMT bits
1900 * returned by kvm_arch_get_supported_msr_feature()(which depends on both
1901 * host CPU and kernel capabilities) to the guest.
1903 uint64_t lbr_fmt;
1905 /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is
1906 * disabled by default to avoid breaking migration between QEMU with
1907 * different LMCE configurations.
1909 bool enable_lmce;
1911 /* Compatibility bits for old machine types.
1912 * If true present virtual l3 cache for VM, the vcpus in the same virtual
1913 * socket share an virtual l3 cache.
1915 bool enable_l3_cache;
1917 /* Compatibility bits for old machine types.
1918 * If true present the old cache topology information
1920 bool legacy_cache;
1922 /* Compatibility bits for old machine types: */
1923 bool enable_cpuid_0xb;
1925 /* Enable auto level-increase for all CPUID leaves */
1926 bool full_cpuid_auto_level;
1928 /* Only advertise CPUID leaves defined by the vendor */
1929 bool vendor_cpuid_only;
1931 /* Enable auto level-increase for Intel Processor Trace leave */
1932 bool intel_pt_auto_level;
1934 /* if true fill the top bits of the MTRR_PHYSMASKn variable range */
1935 bool fill_mtrr_mask;
1937 /* if true override the phys_bits value with a value read from the host */
1938 bool host_phys_bits;
1940 /* if set, limit maximum value for phys_bits when host_phys_bits is true */
1941 uint8_t host_phys_bits_limit;
1943 /* Stop SMI delivery for migration compatibility with old machines */
1944 bool kvm_no_smi_migration;
1946 /* Forcefully disable KVM PV features not exposed in guest CPUIDs */
1947 bool kvm_pv_enforce_cpuid;
1949 /* Number of physical address bits supported */
1950 uint32_t phys_bits;
1952 /* in order to simplify APIC support, we leave this pointer to the
1953 user */
1954 struct DeviceState *apic_state;
1955 struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram;
1956 Notifier machine_done;
1958 struct kvm_msrs *kvm_msr_buf;
1960 int32_t node_id; /* NUMA node this CPU belongs to */
1961 int32_t socket_id;
1962 int32_t die_id;
1963 int32_t core_id;
1964 int32_t thread_id;
1966 int32_t hv_max_vps;
1970 #ifndef CONFIG_USER_ONLY
1971 extern const VMStateDescription vmstate_x86_cpu;
1972 #endif
1974 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request);
1976 int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
1977 int cpuid, DumpState *s);
1978 int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
1979 int cpuid, DumpState *s);
1980 int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
1981 DumpState *s);
1982 int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
1983 DumpState *s);
1985 void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
1986 Error **errp);
1988 void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags);
1990 hwaddr x86_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
1991 MemTxAttrs *attrs);
1993 int x86_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg);
1994 int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
1996 void x86_cpu_list(void);
1997 int cpu_x86_support_mca_broadcast(CPUX86State *env);
1999 #ifndef CONFIG_USER_ONLY
2000 int cpu_get_pic_interrupt(CPUX86State *s);
2002 /* MSDOS compatibility mode FPU exception support */
2003 void x86_register_ferr_irq(qemu_irq irq);
2004 void fpu_check_raise_ferr_irq(CPUX86State *s);
2005 void cpu_set_ignne(void);
2006 void cpu_clear_ignne(void);
2007 #endif
2009 /* mpx_helper.c */
2010 void cpu_sync_bndcs_hflags(CPUX86State *env);
2012 /* this function must always be used to load data in the segment
2013 cache: it synchronizes the hflags with the segment cache values */
2014 static inline void cpu_x86_load_seg_cache(CPUX86State *env,
2015 X86Seg seg_reg, unsigned int selector,
2016 target_ulong base,
2017 unsigned int limit,
2018 unsigned int flags)
2020 SegmentCache *sc;
2021 unsigned int new_hflags;
2023 sc = &env->segs[seg_reg];
2024 sc->selector = selector;
2025 sc->base = base;
2026 sc->limit = limit;
2027 sc->flags = flags;
2029 /* update the hidden flags */
2031 if (seg_reg == R_CS) {
2032 #ifdef TARGET_X86_64
2033 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
2034 /* long mode */
2035 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
2036 env->hflags &= ~(HF_ADDSEG_MASK);
2037 } else
2038 #endif
2040 /* legacy / compatibility case */
2041 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
2042 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
2043 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
2044 new_hflags;
2047 if (seg_reg == R_SS) {
2048 int cpl = (flags >> DESC_DPL_SHIFT) & 3;
2049 #if HF_CPL_MASK != 3
2050 #error HF_CPL_MASK is hardcoded
2051 #endif
2052 env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
2053 /* Possibly switch between BNDCFGS and BNDCFGU */
2054 cpu_sync_bndcs_hflags(env);
2056 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
2057 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
2058 if (env->hflags & HF_CS64_MASK) {
2059 /* zero base assumed for DS, ES and SS in long mode */
2060 } else if (!(env->cr[0] & CR0_PE_MASK) ||
2061 (env->eflags & VM_MASK) ||
2062 !(env->hflags & HF_CS32_MASK)) {
2063 /* XXX: try to avoid this test. The problem comes from the
2064 fact that is real mode or vm86 mode we only modify the
2065 'base' and 'selector' fields of the segment cache to go
2066 faster. A solution may be to force addseg to one in
2067 translate-i386.c. */
2068 new_hflags |= HF_ADDSEG_MASK;
2069 } else {
2070 new_hflags |= ((env->segs[R_DS].base |
2071 env->segs[R_ES].base |
2072 env->segs[R_SS].base) != 0) <<
2073 HF_ADDSEG_SHIFT;
2075 env->hflags = (env->hflags &
2076 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
2080 static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
2081 uint8_t sipi_vector)
2083 CPUState *cs = CPU(cpu);
2084 CPUX86State *env = &cpu->env;
2086 env->eip = 0;
2087 cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
2088 sipi_vector << 12,
2089 env->segs[R_CS].limit,
2090 env->segs[R_CS].flags);
2091 cs->halted = 0;
2094 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
2095 target_ulong *base, unsigned int *limit,
2096 unsigned int *flags);
2098 /* op_helper.c */
2099 /* used for debug or cpu save/restore */
2101 /* cpu-exec.c */
2102 /* the following helpers are only usable in user mode simulation as
2103 they can trigger unexpected exceptions */
2104 void cpu_x86_load_seg(CPUX86State *s, X86Seg seg_reg, int selector);
2105 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
2106 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
2107 void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr);
2108 void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr);
2109 void cpu_x86_xsave(CPUX86State *s, target_ulong ptr);
2110 void cpu_x86_xrstor(CPUX86State *s, target_ulong ptr);
2112 /* cpu.c */
2113 void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
2114 uint32_t vendor2, uint32_t vendor3);
2115 typedef struct PropValue {
2116 const char *prop, *value;
2117 } PropValue;
2118 void x86_cpu_apply_props(X86CPU *cpu, PropValue *props);
2120 void x86_cpu_after_reset(X86CPU *cpu);
2122 uint32_t cpu_x86_virtual_addr_width(CPUX86State *env);
2124 /* cpu.c other functions (cpuid) */
2125 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2126 uint32_t *eax, uint32_t *ebx,
2127 uint32_t *ecx, uint32_t *edx);
2128 void cpu_clear_apic_feature(CPUX86State *env);
2129 void host_cpuid(uint32_t function, uint32_t count,
2130 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
2132 /* helper.c */
2133 void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
2134 void cpu_sync_avx_hflag(CPUX86State *env);
2136 #ifndef CONFIG_USER_ONLY
2137 static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
2139 return !!attrs.secure;
2142 static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs)
2144 return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs));
2148 * load efer and update the corresponding hflags. XXX: do consistency
2149 * checks with cpuid bits?
2151 void cpu_load_efer(CPUX86State *env, uint64_t val);
2152 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
2153 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
2154 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
2155 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr);
2156 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val);
2157 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val);
2158 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val);
2159 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
2160 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
2161 #endif
2163 /* will be suppressed */
2164 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
2165 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
2166 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
2167 void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
2169 /* hw/pc.c */
2170 uint64_t cpu_get_tsc(CPUX86State *env);
2172 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
2173 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
2174 #define CPU_RESOLVING_TYPE TYPE_X86_CPU
2176 #ifdef TARGET_X86_64
2177 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64")
2178 #else
2179 #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32")
2180 #endif
2182 #define cpu_list x86_cpu_list
2184 /* MMU modes definitions */
2185 #define MMU_KSMAP_IDX 0
2186 #define MMU_USER_IDX 1
2187 #define MMU_KNOSMAP_IDX 2
2188 #define MMU_NESTED_IDX 3
2189 #define MMU_PHYS_IDX 4
2191 static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
2193 return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
2194 (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
2195 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
2198 static inline int cpu_mmu_index_kernel(CPUX86State *env)
2200 return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
2201 ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
2202 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
2205 #define CC_DST (env->cc_dst)
2206 #define CC_SRC (env->cc_src)
2207 #define CC_SRC2 (env->cc_src2)
2208 #define CC_OP (env->cc_op)
2210 #include "exec/cpu-all.h"
2211 #include "svm.h"
2213 #if !defined(CONFIG_USER_ONLY)
2214 #include "hw/i386/apic.h"
2215 #endif
2217 static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
2218 target_ulong *cs_base, uint32_t *flags)
2220 *cs_base = env->segs[R_CS].base;
2221 *pc = *cs_base + env->eip;
2222 *flags = env->hflags |
2223 (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
2226 void do_cpu_init(X86CPU *cpu);
2227 void do_cpu_sipi(X86CPU *cpu);
2229 #define MCE_INJECT_BROADCAST 1
2230 #define MCE_INJECT_UNCOND_AO 2
2232 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
2233 uint64_t status, uint64_t mcg_status, uint64_t addr,
2234 uint64_t misc, int flags);
2236 uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
2238 static inline uint32_t cpu_compute_eflags(CPUX86State *env)
2240 uint32_t eflags = env->eflags;
2241 if (tcg_enabled()) {
2242 eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
2244 return eflags;
2247 static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
2249 return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
2252 static inline int32_t x86_get_a20_mask(CPUX86State *env)
2254 if (env->hflags & HF_SMM_MASK) {
2255 return -1;
2256 } else {
2257 return env->a20_mask;
2261 static inline bool cpu_has_vmx(CPUX86State *env)
2263 return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
2266 static inline bool cpu_has_svm(CPUX86State *env)
2268 return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM;
2272 * In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
2273 * Since it was set, CR4.VMXE must remain set as long as vCPU is in
2274 * VMX operation. This is because CR4.VMXE is one of the bits set
2275 * in MSR_IA32_VMX_CR4_FIXED1.
2277 * There is one exception to above statement when vCPU enters SMM mode.
2278 * When a vCPU enters SMM mode, it temporarily exit VMX operation and
2279 * may also reset CR4.VMXE during execution in SMM mode.
2280 * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation
2281 * and CR4.VMXE is restored to it's original value of being set.
2283 * Therefore, when vCPU is not in SMM mode, we can infer whether
2284 * VMX is being used by examining CR4.VMXE. Otherwise, we cannot
2285 * know for certain.
2287 static inline bool cpu_vmx_maybe_enabled(CPUX86State *env)
2289 return cpu_has_vmx(env) &&
2290 ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK));
2293 /* excp_helper.c */
2294 int get_pg_mode(CPUX86State *env);
2296 /* fpu_helper.c */
2297 void update_fp_status(CPUX86State *env);
2298 void update_mxcsr_status(CPUX86State *env);
2299 void update_mxcsr_from_sse_status(CPUX86State *env);
2301 static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr)
2303 env->mxcsr = mxcsr;
2304 if (tcg_enabled()) {
2305 update_mxcsr_status(env);
2309 static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc)
2311 env->fpuc = fpuc;
2312 if (tcg_enabled()) {
2313 update_fp_status(env);
2317 /* mem_helper.c */
2318 void helper_lock_init(void);
2320 /* svm_helper.c */
2321 #ifdef CONFIG_USER_ONLY
2322 static inline void
2323 cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
2324 uint64_t param, uintptr_t retaddr)
2325 { /* no-op */ }
2326 static inline bool
2327 cpu_svm_has_intercept(CPUX86State *env, uint32_t type)
2328 { return false; }
2329 #else
2330 void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
2331 uint64_t param, uintptr_t retaddr);
2332 bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type);
2333 #endif
2335 /* apic.c */
2336 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
2337 void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
2338 TPRAccess access);
2340 /* Special values for X86CPUVersion: */
2342 /* Resolve to latest CPU version */
2343 #define CPU_VERSION_LATEST -1
2346 * Resolve to version defined by current machine type.
2347 * See x86_cpu_set_default_version()
2349 #define CPU_VERSION_AUTO -2
2351 /* Don't resolve to any versioned CPU models, like old QEMU versions */
2352 #define CPU_VERSION_LEGACY 0
2354 typedef int X86CPUVersion;
2357 * Set default CPU model version for CPU models having
2358 * version == CPU_VERSION_AUTO.
2360 void x86_cpu_set_default_version(X86CPUVersion version);
2362 #define APIC_DEFAULT_ADDRESS 0xfee00000
2363 #define APIC_SPACE_SIZE 0x100000
2365 /* cpu-dump.c */
2366 void x86_cpu_dump_local_apic_state(CPUState *cs, int flags);
2368 /* cpu.c */
2369 bool cpu_is_bsp(X86CPU *cpu);
2371 void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen);
2372 void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen);
2373 uint32_t xsave_area_size(uint64_t mask, bool compacted);
2374 void x86_update_hflags(CPUX86State* env);
2376 static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat)
2378 return !!(cpu->hyperv_features & BIT(feat));
2381 static inline uint64_t cr4_reserved_bits(CPUX86State *env)
2383 uint64_t reserved_bits = CR4_RESERVED_MASK;
2384 if (!env->features[FEAT_XSAVE]) {
2385 reserved_bits |= CR4_OSXSAVE_MASK;
2387 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMEP)) {
2388 reserved_bits |= CR4_SMEP_MASK;
2390 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
2391 reserved_bits |= CR4_SMAP_MASK;
2393 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE)) {
2394 reserved_bits |= CR4_FSGSBASE_MASK;
2396 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKU)) {
2397 reserved_bits |= CR4_PKE_MASK;
2399 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57)) {
2400 reserved_bits |= CR4_LA57_MASK;
2402 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_UMIP)) {
2403 reserved_bits |= CR4_UMIP_MASK;
2405 if (!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_PKS)) {
2406 reserved_bits |= CR4_PKS_MASK;
2408 return reserved_bits;
2411 static inline bool ctl_has_irq(CPUX86State *env)
2413 uint32_t int_prio;
2414 uint32_t tpr;
2416 int_prio = (env->int_ctl & V_INTR_PRIO_MASK) >> V_INTR_PRIO_SHIFT;
2417 tpr = env->int_ctl & V_TPR_MASK;
2419 if (env->int_ctl & V_IGN_TPR_MASK) {
2420 return (env->int_ctl & V_IRQ_MASK);
2423 return (env->int_ctl & V_IRQ_MASK) && (int_prio >= tpr);
2426 #if defined(TARGET_X86_64) && \
2427 defined(CONFIG_USER_ONLY) && \
2428 defined(CONFIG_LINUX)
2429 # define TARGET_VSYSCALL_PAGE (UINT64_C(-10) << 20)
2430 #endif
2432 #endif /* I386_CPU_H */