spapr_events: add support for dedicated hotplug event source
[qemu/ar7.git] / target-i386 / cpu.h
blob6303d6593d07f1c86dbcf86dd156590bc107ed79
1 /*
2 * i386 virtual CPU header
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #ifndef I386_CPU_H
21 #define I386_CPU_H
23 #include "qemu-common.h"
24 #include "cpu-qom.h"
25 #include "standard-headers/asm-x86/hyperv.h"
27 #ifdef TARGET_X86_64
28 #define TARGET_LONG_BITS 64
29 #else
30 #define TARGET_LONG_BITS 32
31 #endif
33 /* Maximum instruction code size */
34 #define TARGET_MAX_INSN_SIZE 16
36 /* support for self modifying code even if the modified instruction is
37 close to the modifying instruction */
38 #define TARGET_HAS_PRECISE_SMC
40 #ifdef TARGET_X86_64
41 #define I386_ELF_MACHINE EM_X86_64
42 #define ELF_MACHINE_UNAME "x86_64"
43 #else
44 #define I386_ELF_MACHINE EM_386
45 #define ELF_MACHINE_UNAME "i686"
46 #endif
48 #define CPUArchState struct CPUX86State
50 #include "exec/cpu-defs.h"
52 #include "fpu/softfloat.h"
54 #define R_EAX 0
55 #define R_ECX 1
56 #define R_EDX 2
57 #define R_EBX 3
58 #define R_ESP 4
59 #define R_EBP 5
60 #define R_ESI 6
61 #define R_EDI 7
63 #define R_AL 0
64 #define R_CL 1
65 #define R_DL 2
66 #define R_BL 3
67 #define R_AH 4
68 #define R_CH 5
69 #define R_DH 6
70 #define R_BH 7
72 #define R_ES 0
73 #define R_CS 1
74 #define R_SS 2
75 #define R_DS 3
76 #define R_FS 4
77 #define R_GS 5
79 /* segment descriptor fields */
80 #define DESC_G_MASK (1 << 23)
81 #define DESC_B_SHIFT 22
82 #define DESC_B_MASK (1 << DESC_B_SHIFT)
83 #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */
84 #define DESC_L_MASK (1 << DESC_L_SHIFT)
85 #define DESC_AVL_MASK (1 << 20)
86 #define DESC_P_MASK (1 << 15)
87 #define DESC_DPL_SHIFT 13
88 #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT)
89 #define DESC_S_MASK (1 << 12)
90 #define DESC_TYPE_SHIFT 8
91 #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT)
92 #define DESC_A_MASK (1 << 8)
94 #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */
95 #define DESC_C_MASK (1 << 10) /* code: conforming */
96 #define DESC_R_MASK (1 << 9) /* code: readable */
98 #define DESC_E_MASK (1 << 10) /* data: expansion direction */
99 #define DESC_W_MASK (1 << 9) /* data: writable */
101 #define DESC_TSS_BUSY_MASK (1 << 9)
103 /* eflags masks */
104 #define CC_C 0x0001
105 #define CC_P 0x0004
106 #define CC_A 0x0010
107 #define CC_Z 0x0040
108 #define CC_S 0x0080
109 #define CC_O 0x0800
111 #define TF_SHIFT 8
112 #define IOPL_SHIFT 12
113 #define VM_SHIFT 17
115 #define TF_MASK 0x00000100
116 #define IF_MASK 0x00000200
117 #define DF_MASK 0x00000400
118 #define IOPL_MASK 0x00003000
119 #define NT_MASK 0x00004000
120 #define RF_MASK 0x00010000
121 #define VM_MASK 0x00020000
122 #define AC_MASK 0x00040000
123 #define VIF_MASK 0x00080000
124 #define VIP_MASK 0x00100000
125 #define ID_MASK 0x00200000
127 /* hidden flags - used internally by qemu to represent additional cpu
128 states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We
129 avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit
130 positions to ease oring with eflags. */
131 /* current cpl */
132 #define HF_CPL_SHIFT 0
133 /* true if hardware interrupts must be disabled for next instruction */
134 #define HF_INHIBIT_IRQ_SHIFT 3
135 /* 16 or 32 segments */
136 #define HF_CS32_SHIFT 4
137 #define HF_SS32_SHIFT 5
138 /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */
139 #define HF_ADDSEG_SHIFT 6
140 /* copy of CR0.PE (protected mode) */
141 #define HF_PE_SHIFT 7
142 #define HF_TF_SHIFT 8 /* must be same as eflags */
143 #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */
144 #define HF_EM_SHIFT 10
145 #define HF_TS_SHIFT 11
146 #define HF_IOPL_SHIFT 12 /* must be same as eflags */
147 #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */
148 #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */
149 #define HF_RF_SHIFT 16 /* must be same as eflags */
150 #define HF_VM_SHIFT 17 /* must be same as eflags */
151 #define HF_AC_SHIFT 18 /* must be same as eflags */
152 #define HF_SMM_SHIFT 19 /* CPU in SMM mode */
153 #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */
154 #define HF_SVMI_SHIFT 21 /* SVM intercepts are active */
155 #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
156 #define HF_SMAP_SHIFT 23 /* CR4.SMAP */
157 #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */
158 #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
159 #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
161 #define HF_CPL_MASK (3 << HF_CPL_SHIFT)
162 #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
163 #define HF_CS32_MASK (1 << HF_CS32_SHIFT)
164 #define HF_SS32_MASK (1 << HF_SS32_SHIFT)
165 #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
166 #define HF_PE_MASK (1 << HF_PE_SHIFT)
167 #define HF_TF_MASK (1 << HF_TF_SHIFT)
168 #define HF_MP_MASK (1 << HF_MP_SHIFT)
169 #define HF_EM_MASK (1 << HF_EM_SHIFT)
170 #define HF_TS_MASK (1 << HF_TS_SHIFT)
171 #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT)
172 #define HF_LMA_MASK (1 << HF_LMA_SHIFT)
173 #define HF_CS64_MASK (1 << HF_CS64_SHIFT)
174 #define HF_RF_MASK (1 << HF_RF_SHIFT)
175 #define HF_VM_MASK (1 << HF_VM_SHIFT)
176 #define HF_AC_MASK (1 << HF_AC_SHIFT)
177 #define HF_SMM_MASK (1 << HF_SMM_SHIFT)
178 #define HF_SVME_MASK (1 << HF_SVME_SHIFT)
179 #define HF_SVMI_MASK (1 << HF_SVMI_SHIFT)
180 #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
181 #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
182 #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
183 #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
184 #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
186 /* hflags2 */
188 #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */
189 #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */
190 #define HF2_NMI_SHIFT 2 /* CPU serving NMI */
191 #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
192 #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
193 #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */
195 #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
196 #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
197 #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
198 #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
199 #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
200 #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
202 #define CR0_PE_SHIFT 0
203 #define CR0_MP_SHIFT 1
205 #define CR0_PE_MASK (1U << 0)
206 #define CR0_MP_MASK (1U << 1)
207 #define CR0_EM_MASK (1U << 2)
208 #define CR0_TS_MASK (1U << 3)
209 #define CR0_ET_MASK (1U << 4)
210 #define CR0_NE_MASK (1U << 5)
211 #define CR0_WP_MASK (1U << 16)
212 #define CR0_AM_MASK (1U << 18)
213 #define CR0_PG_MASK (1U << 31)
215 #define CR4_VME_MASK (1U << 0)
216 #define CR4_PVI_MASK (1U << 1)
217 #define CR4_TSD_MASK (1U << 2)
218 #define CR4_DE_MASK (1U << 3)
219 #define CR4_PSE_MASK (1U << 4)
220 #define CR4_PAE_MASK (1U << 5)
221 #define CR4_MCE_MASK (1U << 6)
222 #define CR4_PGE_MASK (1U << 7)
223 #define CR4_PCE_MASK (1U << 8)
224 #define CR4_OSFXSR_SHIFT 9
225 #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT)
226 #define CR4_OSXMMEXCPT_MASK (1U << 10)
227 #define CR4_VMXE_MASK (1U << 13)
228 #define CR4_SMXE_MASK (1U << 14)
229 #define CR4_FSGSBASE_MASK (1U << 16)
230 #define CR4_PCIDE_MASK (1U << 17)
231 #define CR4_OSXSAVE_MASK (1U << 18)
232 #define CR4_SMEP_MASK (1U << 20)
233 #define CR4_SMAP_MASK (1U << 21)
234 #define CR4_PKE_MASK (1U << 22)
236 #define DR6_BD (1 << 13)
237 #define DR6_BS (1 << 14)
238 #define DR6_BT (1 << 15)
239 #define DR6_FIXED_1 0xffff0ff0
241 #define DR7_GD (1 << 13)
242 #define DR7_TYPE_SHIFT 16
243 #define DR7_LEN_SHIFT 18
244 #define DR7_FIXED_1 0x00000400
245 #define DR7_GLOBAL_BP_MASK 0xaa
246 #define DR7_LOCAL_BP_MASK 0x55
247 #define DR7_MAX_BP 4
248 #define DR7_TYPE_BP_INST 0x0
249 #define DR7_TYPE_DATA_WR 0x1
250 #define DR7_TYPE_IO_RW 0x2
251 #define DR7_TYPE_DATA_RW 0x3
253 #define PG_PRESENT_BIT 0
254 #define PG_RW_BIT 1
255 #define PG_USER_BIT 2
256 #define PG_PWT_BIT 3
257 #define PG_PCD_BIT 4
258 #define PG_ACCESSED_BIT 5
259 #define PG_DIRTY_BIT 6
260 #define PG_PSE_BIT 7
261 #define PG_GLOBAL_BIT 8
262 #define PG_PSE_PAT_BIT 12
263 #define PG_PKRU_BIT 59
264 #define PG_NX_BIT 63
266 #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT)
267 #define PG_RW_MASK (1 << PG_RW_BIT)
268 #define PG_USER_MASK (1 << PG_USER_BIT)
269 #define PG_PWT_MASK (1 << PG_PWT_BIT)
270 #define PG_PCD_MASK (1 << PG_PCD_BIT)
271 #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT)
272 #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT)
273 #define PG_PSE_MASK (1 << PG_PSE_BIT)
274 #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT)
275 #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT)
276 #define PG_ADDRESS_MASK 0x000ffffffffff000LL
277 #define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK)
278 #define PG_HI_USER_MASK 0x7ff0000000000000LL
279 #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT)
280 #define PG_NX_MASK (1ULL << PG_NX_BIT)
282 #define PG_ERROR_W_BIT 1
284 #define PG_ERROR_P_MASK 0x01
285 #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT)
286 #define PG_ERROR_U_MASK 0x04
287 #define PG_ERROR_RSVD_MASK 0x08
288 #define PG_ERROR_I_D_MASK 0x10
289 #define PG_ERROR_PK_MASK 0x20
291 #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */
292 #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
293 #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */
295 #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P)
296 #define MCE_BANKS_DEF 10
298 #define MCG_CAP_BANKS_MASK 0xff
300 #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
301 #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
302 #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
303 #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */
305 #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */
307 #define MCI_STATUS_VAL (1ULL<<63) /* valid error */
308 #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
309 #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
310 #define MCI_STATUS_EN (1ULL<<60) /* error enabled */
311 #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
312 #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
313 #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
314 #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
315 #define MCI_STATUS_AR (1ULL<<55) /* Action required */
317 /* MISC register defines */
318 #define MCM_ADDR_SEGOFF 0 /* segment offset */
319 #define MCM_ADDR_LINEAR 1 /* linear address */
320 #define MCM_ADDR_PHYS 2 /* physical address */
321 #define MCM_ADDR_MEM 3 /* memory address */
322 #define MCM_ADDR_GENERIC 7 /* generic */
324 #define MSR_IA32_TSC 0x10
325 #define MSR_IA32_APICBASE 0x1b
326 #define MSR_IA32_APICBASE_BSP (1<<8)
327 #define MSR_IA32_APICBASE_ENABLE (1<<11)
328 #define MSR_IA32_APICBASE_EXTD (1 << 10)
329 #define MSR_IA32_APICBASE_BASE (0xfffffU<<12)
330 #define MSR_IA32_FEATURE_CONTROL 0x0000003a
331 #define MSR_TSC_ADJUST 0x0000003b
332 #define MSR_IA32_TSCDEADLINE 0x6e0
334 #define FEATURE_CONTROL_LOCKED (1<<0)
335 #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2)
336 #define FEATURE_CONTROL_LMCE (1<<20)
338 #define MSR_P6_PERFCTR0 0xc1
340 #define MSR_IA32_SMBASE 0x9e
341 #define MSR_MTRRcap 0xfe
342 #define MSR_MTRRcap_VCNT 8
343 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8)
344 #define MSR_MTRRcap_WC_SUPPORTED (1 << 10)
346 #define MSR_IA32_SYSENTER_CS 0x174
347 #define MSR_IA32_SYSENTER_ESP 0x175
348 #define MSR_IA32_SYSENTER_EIP 0x176
350 #define MSR_MCG_CAP 0x179
351 #define MSR_MCG_STATUS 0x17a
352 #define MSR_MCG_CTL 0x17b
353 #define MSR_MCG_EXT_CTL 0x4d0
355 #define MSR_P6_EVNTSEL0 0x186
357 #define MSR_IA32_PERF_STATUS 0x198
359 #define MSR_IA32_MISC_ENABLE 0x1a0
360 /* Indicates good rep/movs microcode on some processors: */
361 #define MSR_IA32_MISC_ENABLE_DEFAULT 1
363 #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
364 #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
366 #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2)
368 #define MSR_MTRRfix64K_00000 0x250
369 #define MSR_MTRRfix16K_80000 0x258
370 #define MSR_MTRRfix16K_A0000 0x259
371 #define MSR_MTRRfix4K_C0000 0x268
372 #define MSR_MTRRfix4K_C8000 0x269
373 #define MSR_MTRRfix4K_D0000 0x26a
374 #define MSR_MTRRfix4K_D8000 0x26b
375 #define MSR_MTRRfix4K_E0000 0x26c
376 #define MSR_MTRRfix4K_E8000 0x26d
377 #define MSR_MTRRfix4K_F0000 0x26e
378 #define MSR_MTRRfix4K_F8000 0x26f
380 #define MSR_PAT 0x277
382 #define MSR_MTRRdefType 0x2ff
384 #define MSR_CORE_PERF_FIXED_CTR0 0x309
385 #define MSR_CORE_PERF_FIXED_CTR1 0x30a
386 #define MSR_CORE_PERF_FIXED_CTR2 0x30b
387 #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d
388 #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e
389 #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f
390 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390
392 #define MSR_MC0_CTL 0x400
393 #define MSR_MC0_STATUS 0x401
394 #define MSR_MC0_ADDR 0x402
395 #define MSR_MC0_MISC 0x403
397 #define MSR_EFER 0xc0000080
399 #define MSR_EFER_SCE (1 << 0)
400 #define MSR_EFER_LME (1 << 8)
401 #define MSR_EFER_LMA (1 << 10)
402 #define MSR_EFER_NXE (1 << 11)
403 #define MSR_EFER_SVME (1 << 12)
404 #define MSR_EFER_FFXSR (1 << 14)
406 #define MSR_STAR 0xc0000081
407 #define MSR_LSTAR 0xc0000082
408 #define MSR_CSTAR 0xc0000083
409 #define MSR_FMASK 0xc0000084
410 #define MSR_FSBASE 0xc0000100
411 #define MSR_GSBASE 0xc0000101
412 #define MSR_KERNELGSBASE 0xc0000102
413 #define MSR_TSC_AUX 0xc0000103
415 #define MSR_VM_HSAVE_PA 0xc0010117
417 #define MSR_IA32_BNDCFGS 0x00000d90
418 #define MSR_IA32_XSS 0x00000da0
420 #define XSTATE_FP_BIT 0
421 #define XSTATE_SSE_BIT 1
422 #define XSTATE_YMM_BIT 2
423 #define XSTATE_BNDREGS_BIT 3
424 #define XSTATE_BNDCSR_BIT 4
425 #define XSTATE_OPMASK_BIT 5
426 #define XSTATE_ZMM_Hi256_BIT 6
427 #define XSTATE_Hi16_ZMM_BIT 7
428 #define XSTATE_PKRU_BIT 9
430 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT)
431 #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT)
432 #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT)
433 #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT)
434 #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT)
435 #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT)
436 #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT)
437 #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT)
438 #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT)
440 /* CPUID feature words */
441 typedef enum FeatureWord {
442 FEAT_1_EDX, /* CPUID[1].EDX */
443 FEAT_1_ECX, /* CPUID[1].ECX */
444 FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */
445 FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */
446 FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */
447 FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */
448 FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */
449 FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */
450 FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */
451 FEAT_HYPERV_EAX, /* CPUID[4000_0003].EAX */
452 FEAT_HYPERV_EBX, /* CPUID[4000_0003].EBX */
453 FEAT_HYPERV_EDX, /* CPUID[4000_0003].EDX */
454 FEAT_SVM, /* CPUID[8000_000A].EDX */
455 FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */
456 FEAT_6_EAX, /* CPUID[6].EAX */
457 FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
458 FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
459 FEATURE_WORDS,
460 } FeatureWord;
462 typedef uint32_t FeatureWordArray[FEATURE_WORDS];
464 /* cpuid_features bits */
465 #define CPUID_FP87 (1U << 0)
466 #define CPUID_VME (1U << 1)
467 #define CPUID_DE (1U << 2)
468 #define CPUID_PSE (1U << 3)
469 #define CPUID_TSC (1U << 4)
470 #define CPUID_MSR (1U << 5)
471 #define CPUID_PAE (1U << 6)
472 #define CPUID_MCE (1U << 7)
473 #define CPUID_CX8 (1U << 8)
474 #define CPUID_APIC (1U << 9)
475 #define CPUID_SEP (1U << 11) /* sysenter/sysexit */
476 #define CPUID_MTRR (1U << 12)
477 #define CPUID_PGE (1U << 13)
478 #define CPUID_MCA (1U << 14)
479 #define CPUID_CMOV (1U << 15)
480 #define CPUID_PAT (1U << 16)
481 #define CPUID_PSE36 (1U << 17)
482 #define CPUID_PN (1U << 18)
483 #define CPUID_CLFLUSH (1U << 19)
484 #define CPUID_DTS (1U << 21)
485 #define CPUID_ACPI (1U << 22)
486 #define CPUID_MMX (1U << 23)
487 #define CPUID_FXSR (1U << 24)
488 #define CPUID_SSE (1U << 25)
489 #define CPUID_SSE2 (1U << 26)
490 #define CPUID_SS (1U << 27)
491 #define CPUID_HT (1U << 28)
492 #define CPUID_TM (1U << 29)
493 #define CPUID_IA64 (1U << 30)
494 #define CPUID_PBE (1U << 31)
496 #define CPUID_EXT_SSE3 (1U << 0)
497 #define CPUID_EXT_PCLMULQDQ (1U << 1)
498 #define CPUID_EXT_DTES64 (1U << 2)
499 #define CPUID_EXT_MONITOR (1U << 3)
500 #define CPUID_EXT_DSCPL (1U << 4)
501 #define CPUID_EXT_VMX (1U << 5)
502 #define CPUID_EXT_SMX (1U << 6)
503 #define CPUID_EXT_EST (1U << 7)
504 #define CPUID_EXT_TM2 (1U << 8)
505 #define CPUID_EXT_SSSE3 (1U << 9)
506 #define CPUID_EXT_CID (1U << 10)
507 #define CPUID_EXT_FMA (1U << 12)
508 #define CPUID_EXT_CX16 (1U << 13)
509 #define CPUID_EXT_XTPR (1U << 14)
510 #define CPUID_EXT_PDCM (1U << 15)
511 #define CPUID_EXT_PCID (1U << 17)
512 #define CPUID_EXT_DCA (1U << 18)
513 #define CPUID_EXT_SSE41 (1U << 19)
514 #define CPUID_EXT_SSE42 (1U << 20)
515 #define CPUID_EXT_X2APIC (1U << 21)
516 #define CPUID_EXT_MOVBE (1U << 22)
517 #define CPUID_EXT_POPCNT (1U << 23)
518 #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24)
519 #define CPUID_EXT_AES (1U << 25)
520 #define CPUID_EXT_XSAVE (1U << 26)
521 #define CPUID_EXT_OSXSAVE (1U << 27)
522 #define CPUID_EXT_AVX (1U << 28)
523 #define CPUID_EXT_F16C (1U << 29)
524 #define CPUID_EXT_RDRAND (1U << 30)
525 #define CPUID_EXT_HYPERVISOR (1U << 31)
527 #define CPUID_EXT2_FPU (1U << 0)
528 #define CPUID_EXT2_VME (1U << 1)
529 #define CPUID_EXT2_DE (1U << 2)
530 #define CPUID_EXT2_PSE (1U << 3)
531 #define CPUID_EXT2_TSC (1U << 4)
532 #define CPUID_EXT2_MSR (1U << 5)
533 #define CPUID_EXT2_PAE (1U << 6)
534 #define CPUID_EXT2_MCE (1U << 7)
535 #define CPUID_EXT2_CX8 (1U << 8)
536 #define CPUID_EXT2_APIC (1U << 9)
537 #define CPUID_EXT2_SYSCALL (1U << 11)
538 #define CPUID_EXT2_MTRR (1U << 12)
539 #define CPUID_EXT2_PGE (1U << 13)
540 #define CPUID_EXT2_MCA (1U << 14)
541 #define CPUID_EXT2_CMOV (1U << 15)
542 #define CPUID_EXT2_PAT (1U << 16)
543 #define CPUID_EXT2_PSE36 (1U << 17)
544 #define CPUID_EXT2_MP (1U << 19)
545 #define CPUID_EXT2_NX (1U << 20)
546 #define CPUID_EXT2_MMXEXT (1U << 22)
547 #define CPUID_EXT2_MMX (1U << 23)
548 #define CPUID_EXT2_FXSR (1U << 24)
549 #define CPUID_EXT2_FFXSR (1U << 25)
550 #define CPUID_EXT2_PDPE1GB (1U << 26)
551 #define CPUID_EXT2_RDTSCP (1U << 27)
552 #define CPUID_EXT2_LM (1U << 29)
553 #define CPUID_EXT2_3DNOWEXT (1U << 30)
554 #define CPUID_EXT2_3DNOW (1U << 31)
556 /* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */
557 #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \
558 CPUID_EXT2_DE | CPUID_EXT2_PSE | \
559 CPUID_EXT2_TSC | CPUID_EXT2_MSR | \
560 CPUID_EXT2_PAE | CPUID_EXT2_MCE | \
561 CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \
562 CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \
563 CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \
564 CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \
565 CPUID_EXT2_MMX | CPUID_EXT2_FXSR)
567 #define CPUID_EXT3_LAHF_LM (1U << 0)
568 #define CPUID_EXT3_CMP_LEG (1U << 1)
569 #define CPUID_EXT3_SVM (1U << 2)
570 #define CPUID_EXT3_EXTAPIC (1U << 3)
571 #define CPUID_EXT3_CR8LEG (1U << 4)
572 #define CPUID_EXT3_ABM (1U << 5)
573 #define CPUID_EXT3_SSE4A (1U << 6)
574 #define CPUID_EXT3_MISALIGNSSE (1U << 7)
575 #define CPUID_EXT3_3DNOWPREFETCH (1U << 8)
576 #define CPUID_EXT3_OSVW (1U << 9)
577 #define CPUID_EXT3_IBS (1U << 10)
578 #define CPUID_EXT3_XOP (1U << 11)
579 #define CPUID_EXT3_SKINIT (1U << 12)
580 #define CPUID_EXT3_WDT (1U << 13)
581 #define CPUID_EXT3_LWP (1U << 15)
582 #define CPUID_EXT3_FMA4 (1U << 16)
583 #define CPUID_EXT3_TCE (1U << 17)
584 #define CPUID_EXT3_NODEID (1U << 19)
585 #define CPUID_EXT3_TBM (1U << 21)
586 #define CPUID_EXT3_TOPOEXT (1U << 22)
587 #define CPUID_EXT3_PERFCORE (1U << 23)
588 #define CPUID_EXT3_PERFNB (1U << 24)
590 #define CPUID_SVM_NPT (1U << 0)
591 #define CPUID_SVM_LBRV (1U << 1)
592 #define CPUID_SVM_SVMLOCK (1U << 2)
593 #define CPUID_SVM_NRIPSAVE (1U << 3)
594 #define CPUID_SVM_TSCSCALE (1U << 4)
595 #define CPUID_SVM_VMCBCLEAN (1U << 5)
596 #define CPUID_SVM_FLUSHASID (1U << 6)
597 #define CPUID_SVM_DECODEASSIST (1U << 7)
598 #define CPUID_SVM_PAUSEFILTER (1U << 10)
599 #define CPUID_SVM_PFTHRESHOLD (1U << 12)
601 #define CPUID_7_0_EBX_FSGSBASE (1U << 0)
602 #define CPUID_7_0_EBX_BMI1 (1U << 3)
603 #define CPUID_7_0_EBX_HLE (1U << 4)
604 #define CPUID_7_0_EBX_AVX2 (1U << 5)
605 #define CPUID_7_0_EBX_SMEP (1U << 7)
606 #define CPUID_7_0_EBX_BMI2 (1U << 8)
607 #define CPUID_7_0_EBX_ERMS (1U << 9)
608 #define CPUID_7_0_EBX_INVPCID (1U << 10)
609 #define CPUID_7_0_EBX_RTM (1U << 11)
610 #define CPUID_7_0_EBX_MPX (1U << 14)
611 #define CPUID_7_0_EBX_AVX512F (1U << 16) /* AVX-512 Foundation */
612 #define CPUID_7_0_EBX_AVX512DQ (1U << 17) /* AVX-512 Doubleword & Quadword Instrs */
613 #define CPUID_7_0_EBX_RDSEED (1U << 18)
614 #define CPUID_7_0_EBX_ADX (1U << 19)
615 #define CPUID_7_0_EBX_SMAP (1U << 20)
616 #define CPUID_7_0_EBX_AVX512IFMA (1U << 21) /* AVX-512 Integer Fused Multiply Add */
617 #define CPUID_7_0_EBX_PCOMMIT (1U << 22) /* Persistent Commit */
618 #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Flush a Cache Line Optimized */
619 #define CPUID_7_0_EBX_CLWB (1U << 24) /* Cache Line Write Back */
620 #define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */
621 #define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */
622 #define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */
623 #define CPUID_7_0_EBX_AVX512BW (1U << 30) /* AVX-512 Byte and Word Instructions */
624 #define CPUID_7_0_EBX_AVX512VL (1U << 31) /* AVX-512 Vector Length Extensions */
626 #define CPUID_7_0_ECX_VBMI (1U << 1) /* AVX-512 Vector Byte Manipulation Instrs */
627 #define CPUID_7_0_ECX_UMIP (1U << 2)
628 #define CPUID_7_0_ECX_PKU (1U << 3)
629 #define CPUID_7_0_ECX_OSPKE (1U << 4)
630 #define CPUID_7_0_ECX_RDPID (1U << 22)
632 #define CPUID_XSAVE_XSAVEOPT (1U << 0)
633 #define CPUID_XSAVE_XSAVEC (1U << 1)
634 #define CPUID_XSAVE_XGETBV1 (1U << 2)
635 #define CPUID_XSAVE_XSAVES (1U << 3)
637 #define CPUID_6_EAX_ARAT (1U << 2)
639 /* CPUID[0x80000007].EDX flags: */
640 #define CPUID_APM_INVTSC (1U << 8)
642 #define CPUID_VENDOR_SZ 12
644 #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */
645 #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */
646 #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */
647 #define CPUID_VENDOR_INTEL "GenuineIntel"
649 #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */
650 #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */
651 #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */
652 #define CPUID_VENDOR_AMD "AuthenticAMD"
654 #define CPUID_VENDOR_VIA "CentaurHauls"
656 #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
657 #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
659 /* CPUID[0xB].ECX level types */
660 #define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8)
661 #define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8)
662 #define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8)
664 #ifndef HYPERV_SPINLOCK_NEVER_RETRY
665 #define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF
666 #endif
668 #define EXCP00_DIVZ 0
669 #define EXCP01_DB 1
670 #define EXCP02_NMI 2
671 #define EXCP03_INT3 3
672 #define EXCP04_INTO 4
673 #define EXCP05_BOUND 5
674 #define EXCP06_ILLOP 6
675 #define EXCP07_PREX 7
676 #define EXCP08_DBLE 8
677 #define EXCP09_XERR 9
678 #define EXCP0A_TSS 10
679 #define EXCP0B_NOSEG 11
680 #define EXCP0C_STACK 12
681 #define EXCP0D_GPF 13
682 #define EXCP0E_PAGE 14
683 #define EXCP10_COPR 16
684 #define EXCP11_ALGN 17
685 #define EXCP12_MCHK 18
687 #define EXCP_SYSCALL 0x100 /* only happens in user only emulation
688 for syscall instruction */
690 /* i386-specific interrupt pending bits. */
691 #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
692 #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
693 #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
694 #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
695 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0
696 #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1
697 #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2
699 /* Use a clearer name for this. */
700 #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
702 /* Instead of computing the condition codes after each x86 instruction,
703 * QEMU just stores one operand (called CC_SRC), the result
704 * (called CC_DST) and the type of operation (called CC_OP). When the
705 * condition codes are needed, the condition codes can be calculated
706 * using this information. Condition codes are not generated if they
707 * are only needed for conditional branches.
709 typedef enum {
710 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
711 CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
713 CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
714 CC_OP_MULW,
715 CC_OP_MULL,
716 CC_OP_MULQ,
718 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
719 CC_OP_ADDW,
720 CC_OP_ADDL,
721 CC_OP_ADDQ,
723 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
724 CC_OP_ADCW,
725 CC_OP_ADCL,
726 CC_OP_ADCQ,
728 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
729 CC_OP_SUBW,
730 CC_OP_SUBL,
731 CC_OP_SUBQ,
733 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
734 CC_OP_SBBW,
735 CC_OP_SBBL,
736 CC_OP_SBBQ,
738 CC_OP_LOGICB, /* modify all flags, CC_DST = res */
739 CC_OP_LOGICW,
740 CC_OP_LOGICL,
741 CC_OP_LOGICQ,
743 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
744 CC_OP_INCW,
745 CC_OP_INCL,
746 CC_OP_INCQ,
748 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
749 CC_OP_DECW,
750 CC_OP_DECL,
751 CC_OP_DECQ,
753 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */
754 CC_OP_SHLW,
755 CC_OP_SHLL,
756 CC_OP_SHLQ,
758 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
759 CC_OP_SARW,
760 CC_OP_SARL,
761 CC_OP_SARQ,
763 CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */
764 CC_OP_BMILGW,
765 CC_OP_BMILGL,
766 CC_OP_BMILGQ,
768 CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
769 CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */
770 CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
772 CC_OP_CLR, /* Z set, all other flags clear. */
774 CC_OP_NB,
775 } CCOp;
777 typedef struct SegmentCache {
778 uint32_t selector;
779 target_ulong base;
780 uint32_t limit;
781 uint32_t flags;
782 } SegmentCache;
784 #define MMREG_UNION(n, bits) \
785 union n { \
786 uint8_t _b_##n[(bits)/8]; \
787 uint16_t _w_##n[(bits)/16]; \
788 uint32_t _l_##n[(bits)/32]; \
789 uint64_t _q_##n[(bits)/64]; \
790 float32 _s_##n[(bits)/32]; \
791 float64 _d_##n[(bits)/64]; \
794 typedef MMREG_UNION(ZMMReg, 512) ZMMReg;
795 typedef MMREG_UNION(MMXReg, 64) MMXReg;
797 typedef struct BNDReg {
798 uint64_t lb;
799 uint64_t ub;
800 } BNDReg;
802 typedef struct BNDCSReg {
803 uint64_t cfgu;
804 uint64_t sts;
805 } BNDCSReg;
807 #define BNDCFG_ENABLE 1ULL
808 #define BNDCFG_BNDPRESERVE 2ULL
809 #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
811 #ifdef HOST_WORDS_BIGENDIAN
812 #define ZMM_B(n) _b_ZMMReg[63 - (n)]
813 #define ZMM_W(n) _w_ZMMReg[31 - (n)]
814 #define ZMM_L(n) _l_ZMMReg[15 - (n)]
815 #define ZMM_S(n) _s_ZMMReg[15 - (n)]
816 #define ZMM_Q(n) _q_ZMMReg[7 - (n)]
817 #define ZMM_D(n) _d_ZMMReg[7 - (n)]
819 #define MMX_B(n) _b_MMXReg[7 - (n)]
820 #define MMX_W(n) _w_MMXReg[3 - (n)]
821 #define MMX_L(n) _l_MMXReg[1 - (n)]
822 #define MMX_S(n) _s_MMXReg[1 - (n)]
823 #else
824 #define ZMM_B(n) _b_ZMMReg[n]
825 #define ZMM_W(n) _w_ZMMReg[n]
826 #define ZMM_L(n) _l_ZMMReg[n]
827 #define ZMM_S(n) _s_ZMMReg[n]
828 #define ZMM_Q(n) _q_ZMMReg[n]
829 #define ZMM_D(n) _d_ZMMReg[n]
831 #define MMX_B(n) _b_MMXReg[n]
832 #define MMX_W(n) _w_MMXReg[n]
833 #define MMX_L(n) _l_MMXReg[n]
834 #define MMX_S(n) _s_MMXReg[n]
835 #endif
836 #define MMX_Q(n) _q_MMXReg[n]
838 typedef union {
839 floatx80 d __attribute__((aligned(16)));
840 MMXReg mmx;
841 } FPReg;
843 typedef struct {
844 uint64_t base;
845 uint64_t mask;
846 } MTRRVar;
848 #define CPU_NB_REGS64 16
849 #define CPU_NB_REGS32 8
851 #ifdef TARGET_X86_64
852 #define CPU_NB_REGS CPU_NB_REGS64
853 #else
854 #define CPU_NB_REGS CPU_NB_REGS32
855 #endif
857 #define MAX_FIXED_COUNTERS 3
858 #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0)
860 #define NB_MMU_MODES 3
861 #define TARGET_INSN_START_EXTRA_WORDS 1
863 #define NB_OPMASK_REGS 8
865 /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
866 * that APIC ID hasn't been set yet
868 #define UNASSIGNED_APIC_ID 0xFFFFFFFF
870 typedef union X86LegacyXSaveArea {
871 struct {
872 uint16_t fcw;
873 uint16_t fsw;
874 uint8_t ftw;
875 uint8_t reserved;
876 uint16_t fpop;
877 uint64_t fpip;
878 uint64_t fpdp;
879 uint32_t mxcsr;
880 uint32_t mxcsr_mask;
881 FPReg fpregs[8];
882 uint8_t xmm_regs[16][16];
884 uint8_t data[512];
885 } X86LegacyXSaveArea;
887 typedef struct X86XSaveHeader {
888 uint64_t xstate_bv;
889 uint64_t xcomp_bv;
890 uint64_t reserve0;
891 uint8_t reserved[40];
892 } X86XSaveHeader;
894 /* Ext. save area 2: AVX State */
895 typedef struct XSaveAVX {
896 uint8_t ymmh[16][16];
897 } XSaveAVX;
899 /* Ext. save area 3: BNDREG */
900 typedef struct XSaveBNDREG {
901 BNDReg bnd_regs[4];
902 } XSaveBNDREG;
904 /* Ext. save area 4: BNDCSR */
905 typedef union XSaveBNDCSR {
906 BNDCSReg bndcsr;
907 uint8_t data[64];
908 } XSaveBNDCSR;
910 /* Ext. save area 5: Opmask */
911 typedef struct XSaveOpmask {
912 uint64_t opmask_regs[NB_OPMASK_REGS];
913 } XSaveOpmask;
915 /* Ext. save area 6: ZMM_Hi256 */
916 typedef struct XSaveZMM_Hi256 {
917 uint8_t zmm_hi256[16][32];
918 } XSaveZMM_Hi256;
920 /* Ext. save area 7: Hi16_ZMM */
921 typedef struct XSaveHi16_ZMM {
922 uint8_t hi16_zmm[16][64];
923 } XSaveHi16_ZMM;
925 /* Ext. save area 9: PKRU state */
926 typedef struct XSavePKRU {
927 uint32_t pkru;
928 uint32_t padding;
929 } XSavePKRU;
931 typedef struct X86XSaveArea {
932 X86LegacyXSaveArea legacy;
933 X86XSaveHeader header;
935 /* Extended save areas: */
937 /* AVX State: */
938 XSaveAVX avx_state;
939 uint8_t padding[960 - 576 - sizeof(XSaveAVX)];
940 /* MPX State: */
941 XSaveBNDREG bndreg_state;
942 XSaveBNDCSR bndcsr_state;
943 /* AVX-512 State: */
944 XSaveOpmask opmask_state;
945 XSaveZMM_Hi256 zmm_hi256_state;
946 XSaveHi16_ZMM hi16_zmm_state;
947 /* PKRU State: */
948 XSavePKRU pkru_state;
949 } X86XSaveArea;
951 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != 0x240);
952 QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100);
953 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != 0x3c0);
954 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40);
955 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != 0x400);
956 QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40);
957 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != 0x440);
958 QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40);
959 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != 0x480);
960 QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200);
961 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != 0x680);
962 QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400);
963 QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != 0xA80);
964 QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8);
966 typedef enum TPRAccess {
967 TPR_ACCESS_READ,
968 TPR_ACCESS_WRITE,
969 } TPRAccess;
971 typedef struct CPUX86State {
972 /* standard registers */
973 target_ulong regs[CPU_NB_REGS];
974 target_ulong eip;
975 target_ulong eflags; /* eflags register. During CPU emulation, CC
976 flags and DF are set to zero because they are
977 stored elsewhere */
979 /* emulator internal eflags handling */
980 target_ulong cc_dst;
981 target_ulong cc_src;
982 target_ulong cc_src2;
983 uint32_t cc_op;
984 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
985 uint32_t hflags; /* TB flags, see HF_xxx constants. These flags
986 are known at translation time. */
987 uint32_t hflags2; /* various other flags, see HF2_xxx constants. */
989 /* segments */
990 SegmentCache segs[6]; /* selector values */
991 SegmentCache ldt;
992 SegmentCache tr;
993 SegmentCache gdt; /* only base and limit are used */
994 SegmentCache idt; /* only base and limit are used */
996 target_ulong cr[5]; /* NOTE: cr1 is unused */
997 int32_t a20_mask;
999 BNDReg bnd_regs[4];
1000 BNDCSReg bndcs_regs;
1001 uint64_t msr_bndcfgs;
1002 uint64_t efer;
1004 /* Beginning of state preserved by INIT (dummy marker). */
1005 struct {} start_init_save;
1007 /* FPU state */
1008 unsigned int fpstt; /* top of stack index */
1009 uint16_t fpus;
1010 uint16_t fpuc;
1011 uint8_t fptags[8]; /* 0 = valid, 1 = empty */
1012 FPReg fpregs[8];
1013 /* KVM-only so far */
1014 uint16_t fpop;
1015 uint64_t fpip;
1016 uint64_t fpdp;
1018 /* emulator internal variables */
1019 float_status fp_status;
1020 floatx80 ft0;
1022 float_status mmx_status; /* for 3DNow! float ops */
1023 float_status sse_status;
1024 uint32_t mxcsr;
1025 ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32];
1026 ZMMReg xmm_t0;
1027 MMXReg mmx_t0;
1029 uint64_t opmask_regs[NB_OPMASK_REGS];
1031 /* sysenter registers */
1032 uint32_t sysenter_cs;
1033 target_ulong sysenter_esp;
1034 target_ulong sysenter_eip;
1035 uint64_t star;
1037 uint64_t vm_hsave;
1039 #ifdef TARGET_X86_64
1040 target_ulong lstar;
1041 target_ulong cstar;
1042 target_ulong fmask;
1043 target_ulong kernelgsbase;
1044 #endif
1046 uint64_t tsc;
1047 uint64_t tsc_adjust;
1048 uint64_t tsc_deadline;
1049 uint64_t tsc_aux;
1051 uint64_t xcr0;
1053 uint64_t mcg_status;
1054 uint64_t msr_ia32_misc_enable;
1055 uint64_t msr_ia32_feature_control;
1057 uint64_t msr_fixed_ctr_ctrl;
1058 uint64_t msr_global_ctrl;
1059 uint64_t msr_global_status;
1060 uint64_t msr_global_ovf_ctrl;
1061 uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS];
1062 uint64_t msr_gp_counters[MAX_GP_COUNTERS];
1063 uint64_t msr_gp_evtsel[MAX_GP_COUNTERS];
1065 uint64_t pat;
1066 uint32_t smbase;
1068 uint32_t pkru;
1070 /* End of state preserved by INIT (dummy marker). */
1071 struct {} end_init_save;
1073 uint64_t system_time_msr;
1074 uint64_t wall_clock_msr;
1075 uint64_t steal_time_msr;
1076 uint64_t async_pf_en_msr;
1077 uint64_t pv_eoi_en_msr;
1079 uint64_t msr_hv_hypercall;
1080 uint64_t msr_hv_guest_os_id;
1081 uint64_t msr_hv_vapic;
1082 uint64_t msr_hv_tsc;
1083 uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS];
1084 uint64_t msr_hv_runtime;
1085 uint64_t msr_hv_synic_control;
1086 uint64_t msr_hv_synic_version;
1087 uint64_t msr_hv_synic_evt_page;
1088 uint64_t msr_hv_synic_msg_page;
1089 uint64_t msr_hv_synic_sint[HV_SYNIC_SINT_COUNT];
1090 uint64_t msr_hv_stimer_config[HV_SYNIC_STIMER_COUNT];
1091 uint64_t msr_hv_stimer_count[HV_SYNIC_STIMER_COUNT];
1093 /* exception/interrupt handling */
1094 int error_code;
1095 int exception_is_int;
1096 target_ulong exception_next_eip;
1097 target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */
1098 union {
1099 struct CPUBreakpoint *cpu_breakpoint[4];
1100 struct CPUWatchpoint *cpu_watchpoint[4];
1101 }; /* break/watchpoints for dr[0..3] */
1102 int old_exception; /* exception in flight */
1104 uint64_t vm_vmcb;
1105 uint64_t tsc_offset;
1106 uint64_t intercept;
1107 uint16_t intercept_cr_read;
1108 uint16_t intercept_cr_write;
1109 uint16_t intercept_dr_read;
1110 uint16_t intercept_dr_write;
1111 uint32_t intercept_exceptions;
1112 uint8_t v_tpr;
1114 /* KVM states, automatically cleared on reset */
1115 uint8_t nmi_injected;
1116 uint8_t nmi_pending;
1118 CPU_COMMON
1120 /* Fields from here on are preserved across CPU reset. */
1121 struct {} end_reset_fields;
1123 /* processor features (e.g. for CPUID insn) */
1124 /* Minimum level/xlevel/xlevel2, based on CPU model + features */
1125 uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2;
1126 /* Maximum level/xlevel/xlevel2 value for auto-assignment: */
1127 uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2;
1128 /* Actual level/xlevel/xlevel2 value: */
1129 uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2;
1130 uint32_t cpuid_vendor1;
1131 uint32_t cpuid_vendor2;
1132 uint32_t cpuid_vendor3;
1133 uint32_t cpuid_version;
1134 FeatureWordArray features;
1135 uint32_t cpuid_model[12];
1137 /* MTRRs */
1138 uint64_t mtrr_fixed[11];
1139 uint64_t mtrr_deftype;
1140 MTRRVar mtrr_var[MSR_MTRRcap_VCNT];
1142 /* For KVM */
1143 uint32_t mp_state;
1144 int32_t exception_injected;
1145 int32_t interrupt_injected;
1146 uint8_t soft_interrupt;
1147 uint8_t has_error_code;
1148 uint32_t sipi_vector;
1149 bool tsc_valid;
1150 int64_t tsc_khz;
1151 int64_t user_tsc_khz; /* for sanity check only */
1152 void *kvm_xsave_buf;
1154 uint64_t mcg_cap;
1155 uint64_t mcg_ctl;
1156 uint64_t mcg_ext_ctl;
1157 uint64_t mce_banks[MCE_BANKS_DEF*4];
1158 uint64_t xstate_bv;
1160 /* vmstate */
1161 uint16_t fpus_vmstate;
1162 uint16_t fptag_vmstate;
1163 uint16_t fpregs_format_vmstate;
1165 uint64_t xss;
1167 TPRAccess tpr_access_type;
1168 } CPUX86State;
1170 struct kvm_msrs;
1173 * X86CPU:
1174 * @env: #CPUX86State
1175 * @migratable: If set, only migratable flags will be accepted when "enforce"
1176 * mode is used, and only migratable flags will be included in the "host"
1177 * CPU model.
1179 * An x86 CPU.
1181 struct X86CPU {
1182 /*< private >*/
1183 CPUState parent_obj;
1184 /*< public >*/
1186 CPUX86State env;
1188 bool hyperv_vapic;
1189 bool hyperv_relaxed_timing;
1190 int hyperv_spinlock_attempts;
1191 char *hyperv_vendor_id;
1192 bool hyperv_time;
1193 bool hyperv_crash;
1194 bool hyperv_reset;
1195 bool hyperv_vpindex;
1196 bool hyperv_runtime;
1197 bool hyperv_synic;
1198 bool hyperv_stimer;
1199 bool check_cpuid;
1200 bool enforce_cpuid;
1201 bool expose_kvm;
1202 bool migratable;
1203 bool host_features;
1204 uint32_t apic_id;
1206 /* if true the CPUID code directly forward host cache leaves to the guest */
1207 bool cache_info_passthrough;
1209 /* Features that were filtered out because of missing host capabilities */
1210 uint32_t filtered_features[FEATURE_WORDS];
1212 /* Enable PMU CPUID bits. This can't be enabled by default yet because
1213 * it doesn't have ABI stability guarantees, as it passes all PMU CPUID
1214 * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel
1215 * capabilities) directly to the guest.
1217 bool enable_pmu;
1219 /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is
1220 * disabled by default to avoid breaking migration between QEMU with
1221 * different LMCE configurations.
1223 bool enable_lmce;
1225 /* Compatibility bits for old machine types.
1226 * If true present virtual l3 cache for VM, the vcpus in the same virtual
1227 * socket share an virtual l3 cache.
1229 bool enable_l3_cache;
1231 /* Compatibility bits for old machine types: */
1232 bool enable_cpuid_0xb;
1234 /* Enable auto level-increase for all CPUID leaves */
1235 bool full_cpuid_auto_level;
1237 /* if true fill the top bits of the MTRR_PHYSMASKn variable range */
1238 bool fill_mtrr_mask;
1240 /* if true override the phys_bits value with a value read from the host */
1241 bool host_phys_bits;
1243 /* Number of physical address bits supported */
1244 uint32_t phys_bits;
1246 /* in order to simplify APIC support, we leave this pointer to the
1247 user */
1248 struct DeviceState *apic_state;
1249 struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram;
1250 Notifier machine_done;
1252 struct kvm_msrs *kvm_msr_buf;
1254 int32_t socket_id;
1255 int32_t core_id;
1256 int32_t thread_id;
1259 static inline X86CPU *x86_env_get_cpu(CPUX86State *env)
1261 return container_of(env, X86CPU, env);
1264 #define ENV_GET_CPU(e) CPU(x86_env_get_cpu(e))
1266 #define ENV_OFFSET offsetof(X86CPU, env)
1268 #ifndef CONFIG_USER_ONLY
1269 extern struct VMStateDescription vmstate_x86_cpu;
1270 #endif
1273 * x86_cpu_do_interrupt:
1274 * @cpu: vCPU the interrupt is to be handled by.
1276 void x86_cpu_do_interrupt(CPUState *cpu);
1277 bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req);
1279 int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
1280 int cpuid, void *opaque);
1281 int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu,
1282 int cpuid, void *opaque);
1283 int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
1284 void *opaque);
1285 int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu,
1286 void *opaque);
1288 void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list,
1289 Error **errp);
1291 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
1292 int flags);
1294 hwaddr x86_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
1296 int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
1297 int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
1299 void x86_cpu_exec_enter(CPUState *cpu);
1300 void x86_cpu_exec_exit(CPUState *cpu);
1302 X86CPU *cpu_x86_init(const char *cpu_model);
1303 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf);
1304 int cpu_x86_support_mca_broadcast(CPUX86State *env);
1306 int cpu_get_pic_interrupt(CPUX86State *s);
1307 /* MSDOS compatibility mode FPU exception support */
1308 void cpu_set_ferr(CPUX86State *s);
1310 /* this function must always be used to load data in the segment
1311 cache: it synchronizes the hflags with the segment cache values */
1312 static inline void cpu_x86_load_seg_cache(CPUX86State *env,
1313 int seg_reg, unsigned int selector,
1314 target_ulong base,
1315 unsigned int limit,
1316 unsigned int flags)
1318 SegmentCache *sc;
1319 unsigned int new_hflags;
1321 sc = &env->segs[seg_reg];
1322 sc->selector = selector;
1323 sc->base = base;
1324 sc->limit = limit;
1325 sc->flags = flags;
1327 /* update the hidden flags */
1329 if (seg_reg == R_CS) {
1330 #ifdef TARGET_X86_64
1331 if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) {
1332 /* long mode */
1333 env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
1334 env->hflags &= ~(HF_ADDSEG_MASK);
1335 } else
1336 #endif
1338 /* legacy / compatibility case */
1339 new_hflags = (env->segs[R_CS].flags & DESC_B_MASK)
1340 >> (DESC_B_SHIFT - HF_CS32_SHIFT);
1341 env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) |
1342 new_hflags;
1345 if (seg_reg == R_SS) {
1346 int cpl = (flags >> DESC_DPL_SHIFT) & 3;
1347 #if HF_CPL_MASK != 3
1348 #error HF_CPL_MASK is hardcoded
1349 #endif
1350 env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl;
1352 new_hflags = (env->segs[R_SS].flags & DESC_B_MASK)
1353 >> (DESC_B_SHIFT - HF_SS32_SHIFT);
1354 if (env->hflags & HF_CS64_MASK) {
1355 /* zero base assumed for DS, ES and SS in long mode */
1356 } else if (!(env->cr[0] & CR0_PE_MASK) ||
1357 (env->eflags & VM_MASK) ||
1358 !(env->hflags & HF_CS32_MASK)) {
1359 /* XXX: try to avoid this test. The problem comes from the
1360 fact that is real mode or vm86 mode we only modify the
1361 'base' and 'selector' fields of the segment cache to go
1362 faster. A solution may be to force addseg to one in
1363 translate-i386.c. */
1364 new_hflags |= HF_ADDSEG_MASK;
1365 } else {
1366 new_hflags |= ((env->segs[R_DS].base |
1367 env->segs[R_ES].base |
1368 env->segs[R_SS].base) != 0) <<
1369 HF_ADDSEG_SHIFT;
1371 env->hflags = (env->hflags &
1372 ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags;
1376 static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu,
1377 uint8_t sipi_vector)
1379 CPUState *cs = CPU(cpu);
1380 CPUX86State *env = &cpu->env;
1382 env->eip = 0;
1383 cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8,
1384 sipi_vector << 12,
1385 env->segs[R_CS].limit,
1386 env->segs[R_CS].flags);
1387 cs->halted = 0;
1390 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1391 target_ulong *base, unsigned int *limit,
1392 unsigned int *flags);
1394 /* op_helper.c */
1395 /* used for debug or cpu save/restore */
1396 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f);
1397 floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper);
1399 /* cpu-exec.c */
1400 /* the following helpers are only usable in user mode simulation as
1401 they can trigger unexpected exceptions */
1402 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
1403 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32);
1404 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
1406 /* you can call this signal handler from your SIGBUS and SIGSEGV
1407 signal handlers to inform the virtual CPU of exceptions. non zero
1408 is returned if the signal was handled by the virtual CPU. */
1409 int cpu_x86_signal_handler(int host_signum, void *pinfo,
1410 void *puc);
1412 /* cpu.c */
1413 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1414 uint32_t *eax, uint32_t *ebx,
1415 uint32_t *ecx, uint32_t *edx);
1416 void cpu_clear_apic_feature(CPUX86State *env);
1417 void host_cpuid(uint32_t function, uint32_t count,
1418 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
1420 /* helper.c */
1421 int x86_cpu_handle_mmu_fault(CPUState *cpu, vaddr addr,
1422 int is_write, int mmu_idx);
1423 void x86_cpu_set_a20(X86CPU *cpu, int a20_state);
1425 #ifndef CONFIG_USER_ONLY
1426 uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr);
1427 uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr);
1428 uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr);
1429 uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr);
1430 void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val);
1431 void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val);
1432 void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val);
1433 void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val);
1434 void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val);
1435 #endif
1437 void breakpoint_handler(CPUState *cs);
1439 /* will be suppressed */
1440 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0);
1441 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3);
1442 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4);
1443 void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7);
1445 /* hw/pc.c */
1446 uint64_t cpu_get_tsc(CPUX86State *env);
1448 #define TARGET_PAGE_BITS 12
1450 #ifdef TARGET_X86_64
1451 #define TARGET_PHYS_ADDR_SPACE_BITS 52
1452 /* ??? This is really 48 bits, sign-extended, but the only thing
1453 accessible to userland with bit 48 set is the VSYSCALL, and that
1454 is handled via other mechanisms. */
1455 #define TARGET_VIRT_ADDR_SPACE_BITS 47
1456 #else
1457 #define TARGET_PHYS_ADDR_SPACE_BITS 36
1458 #define TARGET_VIRT_ADDR_SPACE_BITS 32
1459 #endif
1461 /* XXX: This value should match the one returned by CPUID
1462 * and in exec.c */
1463 # if defined(TARGET_X86_64)
1464 # define TCG_PHYS_ADDR_BITS 40
1465 # else
1466 # define TCG_PHYS_ADDR_BITS 36
1467 # endif
1469 #define PHYS_ADDR_MASK MAKE_64BIT_MASK(0, TCG_PHYS_ADDR_BITS)
1471 #define cpu_init(cpu_model) CPU(cpu_x86_init(cpu_model))
1473 #define cpu_signal_handler cpu_x86_signal_handler
1474 #define cpu_list x86_cpu_list
1476 /* MMU modes definitions */
1477 #define MMU_MODE0_SUFFIX _ksmap
1478 #define MMU_MODE1_SUFFIX _user
1479 #define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */
1480 #define MMU_KSMAP_IDX 0
1481 #define MMU_USER_IDX 1
1482 #define MMU_KNOSMAP_IDX 2
1483 static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
1485 return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
1486 (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
1487 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
1490 static inline int cpu_mmu_index_kernel(CPUX86State *env)
1492 return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
1493 ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
1494 ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
1497 #define CC_DST (env->cc_dst)
1498 #define CC_SRC (env->cc_src)
1499 #define CC_SRC2 (env->cc_src2)
1500 #define CC_OP (env->cc_op)
1502 /* n must be a constant to be efficient */
1503 static inline target_long lshift(target_long x, int n)
1505 if (n >= 0) {
1506 return x << n;
1507 } else {
1508 return x >> (-n);
1512 /* float macros */
1513 #define FT0 (env->ft0)
1514 #define ST0 (env->fpregs[env->fpstt].d)
1515 #define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d)
1516 #define ST1 ST(1)
1518 /* translate.c */
1519 void tcg_x86_init(void);
1521 #include "exec/cpu-all.h"
1522 #include "svm.h"
1524 #if !defined(CONFIG_USER_ONLY)
1525 #include "hw/i386/apic.h"
1526 #endif
1528 static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc,
1529 target_ulong *cs_base, uint32_t *flags)
1531 *cs_base = env->segs[R_CS].base;
1532 *pc = *cs_base + env->eip;
1533 *flags = env->hflags |
1534 (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK));
1537 void do_cpu_init(X86CPU *cpu);
1538 void do_cpu_sipi(X86CPU *cpu);
1540 #define MCE_INJECT_BROADCAST 1
1541 #define MCE_INJECT_UNCOND_AO 2
1543 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1544 uint64_t status, uint64_t mcg_status, uint64_t addr,
1545 uint64_t misc, int flags);
1547 /* excp_helper.c */
1548 void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index);
1549 void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index,
1550 uintptr_t retaddr);
1551 void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
1552 int error_code);
1553 void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index,
1554 int error_code, uintptr_t retaddr);
1555 void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
1556 int error_code, int next_eip_addend);
1558 /* cc_helper.c */
1559 extern const uint8_t parity_table[256];
1560 uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
1561 void update_fp_status(CPUX86State *env);
1563 static inline uint32_t cpu_compute_eflags(CPUX86State *env)
1565 return env->eflags | cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK);
1568 /* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS
1569 * after generating a call to a helper that uses this.
1571 static inline void cpu_load_eflags(CPUX86State *env, int eflags,
1572 int update_mask)
1574 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1575 CC_OP = CC_OP_EFLAGS;
1576 env->df = 1 - (2 * ((eflags >> 10) & 1));
1577 env->eflags = (env->eflags & ~update_mask) |
1578 (eflags & update_mask) | 0x2;
1581 /* load efer and update the corresponding hflags. XXX: do consistency
1582 checks with cpuid bits? */
1583 static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
1585 env->efer = val;
1586 env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
1587 if (env->efer & MSR_EFER_LMA) {
1588 env->hflags |= HF_LMA_MASK;
1590 if (env->efer & MSR_EFER_SVME) {
1591 env->hflags |= HF_SVME_MASK;
1595 static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env)
1597 return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 });
1600 /* fpu_helper.c */
1601 void cpu_set_mxcsr(CPUX86State *env, uint32_t val);
1602 void cpu_set_fpuc(CPUX86State *env, uint16_t val);
1604 /* mem_helper.c */
1605 void helper_lock_init(void);
1607 /* svm_helper.c */
1608 void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
1609 uint64_t param);
1610 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1);
1612 /* seg_helper.c */
1613 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
1615 /* smm_helper.c */
1616 void do_smm_enter(X86CPU *cpu);
1617 void cpu_smm_update(X86CPU *cpu);
1619 /* apic.c */
1620 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
1621 void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
1622 TPRAccess access);
1625 /* Change the value of a KVM-specific default
1627 * If value is NULL, no default will be set and the original
1628 * value from the CPU model table will be kept.
1630 * It is valid to call this function only for properties that
1631 * are already present in the kvm_default_props table.
1633 void x86_cpu_change_kvm_default(const char *prop, const char *value);
1635 /* mpx_helper.c */
1636 void cpu_sync_bndcs_hflags(CPUX86State *env);
1638 /* Return name of 32-bit register, from a R_* constant */
1639 const char *get_register_name_32(unsigned int reg);
1641 void enable_compat_apic_id_mode(void);
1643 #define APIC_DEFAULT_ADDRESS 0xfee00000
1644 #define APIC_SPACE_SIZE 0x100000
1646 void x86_cpu_dump_local_apic_state(CPUState *cs, FILE *f,
1647 fprintf_function cpu_fprintf, int flags);
1649 /* cpu.c */
1650 bool cpu_is_bsp(X86CPU *cpu);
1652 #endif /* I386_CPU_H */