2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qapi/qapi-events-run-state.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/runstate.h"
25 #include "kvm/kvm_i386.h"
26 #ifndef CONFIG_USER_ONLY
27 #include "sysemu/hw_accel.h"
28 #include "monitor/monitor.h"
31 void cpu_sync_bndcs_hflags(CPUX86State
*env
)
33 uint32_t hflags
= env
->hflags
;
34 uint32_t hflags2
= env
->hflags2
;
37 if ((hflags
& HF_CPL_MASK
) == 3) {
38 bndcsr
= env
->bndcs_regs
.cfgu
;
40 bndcsr
= env
->msr_bndcfgs
;
43 if ((env
->cr
[4] & CR4_OSXSAVE_MASK
)
44 && (env
->xcr0
& XSTATE_BNDCSR_MASK
)
45 && (bndcsr
& BNDCFG_ENABLE
)) {
46 hflags
|= HF_MPX_EN_MASK
;
48 hflags
&= ~HF_MPX_EN_MASK
;
51 if (bndcsr
& BNDCFG_BNDPRESERVE
) {
52 hflags2
|= HF2_MPX_PR_MASK
;
54 hflags2
&= ~HF2_MPX_PR_MASK
;
58 env
->hflags2
= hflags2
;
61 static void cpu_x86_version(CPUX86State
*env
, int *family
, int *model
)
63 int cpuver
= env
->cpuid_version
;
65 if (family
== NULL
|| model
== NULL
) {
69 *family
= (cpuver
>> 8) & 0x0f;
70 *model
= ((cpuver
>> 12) & 0xf0) + ((cpuver
>> 4) & 0x0f);
73 /* Broadcast MCA signal for processor version 06H_EH and above */
74 int cpu_x86_support_mca_broadcast(CPUX86State
*env
)
79 cpu_x86_version(env
, &family
, &model
);
80 if ((family
== 6 && model
>= 14) || family
> 6) {
87 /***********************************************************/
89 /* XXX: add PGE support */
91 void x86_cpu_set_a20(X86CPU
*cpu
, int a20_state
)
93 CPUX86State
*env
= &cpu
->env
;
95 a20_state
= (a20_state
!= 0);
96 if (a20_state
!= ((env
->a20_mask
>> 20) & 1)) {
97 CPUState
*cs
= CPU(cpu
);
99 qemu_log_mask(CPU_LOG_MMU
, "A20 update: a20=%d\n", a20_state
);
100 /* if the cpu is currently executing code, we must unlink it and
101 all the potentially executing TB */
102 cpu_interrupt(cs
, CPU_INTERRUPT_EXITTB
);
104 /* when a20 is changed, all the MMU mappings are invalid, so
105 we must flush everything */
107 env
->a20_mask
= ~(1 << 20) | (a20_state
<< 20);
111 void cpu_x86_update_cr0(CPUX86State
*env
, uint32_t new_cr0
)
113 X86CPU
*cpu
= env_archcpu(env
);
116 qemu_log_mask(CPU_LOG_MMU
, "CR0 update: CR0=0x%08x\n", new_cr0
);
117 if ((new_cr0
& (CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
)) !=
118 (env
->cr
[0] & (CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
))) {
123 if (!(env
->cr
[0] & CR0_PG_MASK
) && (new_cr0
& CR0_PG_MASK
) &&
124 (env
->efer
& MSR_EFER_LME
)) {
125 /* enter in long mode */
126 /* XXX: generate an exception */
127 if (!(env
->cr
[4] & CR4_PAE_MASK
))
129 env
->efer
|= MSR_EFER_LMA
;
130 env
->hflags
|= HF_LMA_MASK
;
131 } else if ((env
->cr
[0] & CR0_PG_MASK
) && !(new_cr0
& CR0_PG_MASK
) &&
132 (env
->efer
& MSR_EFER_LMA
)) {
134 env
->efer
&= ~MSR_EFER_LMA
;
135 env
->hflags
&= ~(HF_LMA_MASK
| HF_CS64_MASK
);
136 env
->eip
&= 0xffffffff;
139 env
->cr
[0] = new_cr0
| CR0_ET_MASK
;
141 /* update PE flag in hidden flags */
142 pe_state
= (env
->cr
[0] & CR0_PE_MASK
);
143 env
->hflags
= (env
->hflags
& ~HF_PE_MASK
) | (pe_state
<< HF_PE_SHIFT
);
144 /* ensure that ADDSEG is always set in real mode */
145 env
->hflags
|= ((pe_state
^ 1) << HF_ADDSEG_SHIFT
);
146 /* update FPU flags */
147 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
148 ((new_cr0
<< (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
151 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
153 void cpu_x86_update_cr3(CPUX86State
*env
, target_ulong new_cr3
)
155 env
->cr
[3] = new_cr3
;
156 if (env
->cr
[0] & CR0_PG_MASK
) {
157 qemu_log_mask(CPU_LOG_MMU
,
158 "CR3 update: CR3=" TARGET_FMT_lx
"\n", new_cr3
);
159 tlb_flush(env_cpu(env
));
163 void cpu_x86_update_cr4(CPUX86State
*env
, uint32_t new_cr4
)
167 #if defined(DEBUG_MMU)
168 printf("CR4 update: %08x -> %08x\n", (uint32_t)env
->cr
[4], new_cr4
);
170 if ((new_cr4
^ env
->cr
[4]) &
171 (CR4_PGE_MASK
| CR4_PAE_MASK
| CR4_PSE_MASK
|
172 CR4_SMEP_MASK
| CR4_SMAP_MASK
| CR4_LA57_MASK
)) {
173 tlb_flush(env_cpu(env
));
176 /* Clear bits we're going to recompute. */
177 hflags
= env
->hflags
& ~(HF_OSFXSR_MASK
| HF_SMAP_MASK
);
180 if (!(env
->features
[FEAT_1_EDX
] & CPUID_SSE
)) {
181 new_cr4
&= ~CR4_OSFXSR_MASK
;
183 if (new_cr4
& CR4_OSFXSR_MASK
) {
184 hflags
|= HF_OSFXSR_MASK
;
187 if (!(env
->features
[FEAT_7_0_EBX
] & CPUID_7_0_EBX_SMAP
)) {
188 new_cr4
&= ~CR4_SMAP_MASK
;
190 if (new_cr4
& CR4_SMAP_MASK
) {
191 hflags
|= HF_SMAP_MASK
;
194 if (!(env
->features
[FEAT_7_0_ECX
] & CPUID_7_0_ECX_PKU
)) {
195 new_cr4
&= ~CR4_PKE_MASK
;
198 env
->cr
[4] = new_cr4
;
199 env
->hflags
= hflags
;
201 cpu_sync_bndcs_hflags(env
);
204 #if !defined(CONFIG_USER_ONLY)
205 hwaddr
x86_cpu_get_phys_page_attrs_debug(CPUState
*cs
, vaddr addr
,
208 X86CPU
*cpu
= X86_CPU(cs
);
209 CPUX86State
*env
= &cpu
->env
;
210 target_ulong pde_addr
, pte_addr
;
213 uint32_t page_offset
;
216 *attrs
= cpu_get_mem_attrs(env
);
218 a20_mask
= x86_get_a20_mask(env
);
219 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
220 pte
= addr
& a20_mask
;
222 } else if (env
->cr
[4] & CR4_PAE_MASK
) {
223 target_ulong pdpe_addr
;
227 if (env
->hflags
& HF_LMA_MASK
) {
228 bool la57
= env
->cr
[4] & CR4_LA57_MASK
;
229 uint64_t pml5e_addr
, pml5e
;
230 uint64_t pml4e_addr
, pml4e
;
233 /* test virtual address sign extension */
234 sext
= la57
? (int64_t)addr
>> 56 : (int64_t)addr
>> 47;
235 if (sext
!= 0 && sext
!= -1) {
240 pml5e_addr
= ((env
->cr
[3] & ~0xfff) +
241 (((addr
>> 48) & 0x1ff) << 3)) & a20_mask
;
242 pml5e
= x86_ldq_phys(cs
, pml5e_addr
);
243 if (!(pml5e
& PG_PRESENT_MASK
)) {
250 pml4e_addr
= ((pml5e
& PG_ADDRESS_MASK
) +
251 (((addr
>> 39) & 0x1ff) << 3)) & a20_mask
;
252 pml4e
= x86_ldq_phys(cs
, pml4e_addr
);
253 if (!(pml4e
& PG_PRESENT_MASK
)) {
256 pdpe_addr
= ((pml4e
& PG_ADDRESS_MASK
) +
257 (((addr
>> 30) & 0x1ff) << 3)) & a20_mask
;
258 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
259 if (!(pdpe
& PG_PRESENT_MASK
)) {
262 if (pdpe
& PG_PSE_MASK
) {
263 page_size
= 1024 * 1024 * 1024;
271 pdpe_addr
= ((env
->cr
[3] & ~0x1f) + ((addr
>> 27) & 0x18)) &
273 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
274 if (!(pdpe
& PG_PRESENT_MASK
))
278 pde_addr
= ((pdpe
& PG_ADDRESS_MASK
) +
279 (((addr
>> 21) & 0x1ff) << 3)) & a20_mask
;
280 pde
= x86_ldq_phys(cs
, pde_addr
);
281 if (!(pde
& PG_PRESENT_MASK
)) {
284 if (pde
& PG_PSE_MASK
) {
286 page_size
= 2048 * 1024;
290 pte_addr
= ((pde
& PG_ADDRESS_MASK
) +
291 (((addr
>> 12) & 0x1ff) << 3)) & a20_mask
;
293 pte
= x86_ldq_phys(cs
, pte_addr
);
295 if (!(pte
& PG_PRESENT_MASK
)) {
301 /* page directory entry */
302 pde_addr
= ((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & 0xffc)) & a20_mask
;
303 pde
= x86_ldl_phys(cs
, pde_addr
);
304 if (!(pde
& PG_PRESENT_MASK
))
306 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
307 pte
= pde
| ((pde
& 0x1fe000LL
) << (32 - 13));
308 page_size
= 4096 * 1024;
310 /* page directory entry */
311 pte_addr
= ((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) & a20_mask
;
312 pte
= x86_ldl_phys(cs
, pte_addr
);
313 if (!(pte
& PG_PRESENT_MASK
)) {
318 pte
= pte
& a20_mask
;
324 pte
&= PG_ADDRESS_MASK
& ~(page_size
- 1);
325 page_offset
= (addr
& TARGET_PAGE_MASK
) & (page_size
- 1);
326 return pte
| page_offset
;
329 typedef struct MCEInjectionParams
{
337 } MCEInjectionParams
;
339 static void emit_guest_memory_failure(MemoryFailureAction action
, bool ar
,
342 MemoryFailureFlags mff
= {.action_required
= ar
, .recursive
= recursive
};
344 qapi_event_send_memory_failure(MEMORY_FAILURE_RECIPIENT_GUEST
, action
,
348 static void do_inject_x86_mce(CPUState
*cs
, run_on_cpu_data data
)
350 MCEInjectionParams
*params
= data
.host_ptr
;
351 X86CPU
*cpu
= X86_CPU(cs
);
352 CPUX86State
*cenv
= &cpu
->env
;
353 uint64_t *banks
= cenv
->mce_banks
+ 4 * params
->bank
;
354 g_autofree
char *msg
= NULL
;
355 bool need_reset
= false;
357 bool ar
= !!(params
->status
& MCI_STATUS_AR
);
359 cpu_synchronize_state(cs
);
360 recursive
= !!(cenv
->mcg_status
& MCG_STATUS_MCIP
);
363 * If there is an MCE exception being processed, ignore this SRAO MCE
364 * unless unconditional injection was requested.
366 if (!(params
->flags
& MCE_INJECT_UNCOND_AO
) && !ar
&& recursive
) {
367 emit_guest_memory_failure(MEMORY_FAILURE_ACTION_IGNORE
, ar
, recursive
);
371 if (params
->status
& MCI_STATUS_UC
) {
373 * if MSR_MCG_CTL is not all 1s, the uncorrected error
374 * reporting is disabled
376 if ((cenv
->mcg_cap
& MCG_CTL_P
) && cenv
->mcg_ctl
!= ~(uint64_t)0) {
377 monitor_printf(params
->mon
,
378 "CPU %d: Uncorrected error reporting disabled\n",
384 * if MSR_MCi_CTL is not all 1s, the uncorrected error
385 * reporting is disabled for the bank
387 if (banks
[0] != ~(uint64_t)0) {
388 monitor_printf(params
->mon
,
389 "CPU %d: Uncorrected error reporting disabled for"
391 cs
->cpu_index
, params
->bank
);
395 if (!(cenv
->cr
[4] & CR4_MCE_MASK
)) {
397 msg
= g_strdup_printf("CPU %d: MCE capability is not enabled, "
398 "raising triple fault", cs
->cpu_index
);
399 } else if (recursive
) {
401 msg
= g_strdup_printf("CPU %d: Previous MCE still in progress, "
402 "raising triple fault", cs
->cpu_index
);
406 emit_guest_memory_failure(MEMORY_FAILURE_ACTION_RESET
, ar
,
408 monitor_printf(params
->mon
, "%s", msg
);
409 qemu_log_mask(CPU_LOG_RESET
, "%s\n", msg
);
410 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
414 if (banks
[1] & MCI_STATUS_VAL
) {
415 params
->status
|= MCI_STATUS_OVER
;
417 banks
[2] = params
->addr
;
418 banks
[3] = params
->misc
;
419 cenv
->mcg_status
= params
->mcg_status
;
420 banks
[1] = params
->status
;
421 cpu_interrupt(cs
, CPU_INTERRUPT_MCE
);
422 } else if (!(banks
[1] & MCI_STATUS_VAL
)
423 || !(banks
[1] & MCI_STATUS_UC
)) {
424 if (banks
[1] & MCI_STATUS_VAL
) {
425 params
->status
|= MCI_STATUS_OVER
;
427 banks
[2] = params
->addr
;
428 banks
[3] = params
->misc
;
429 banks
[1] = params
->status
;
431 banks
[1] |= MCI_STATUS_OVER
;
434 emit_guest_memory_failure(MEMORY_FAILURE_ACTION_INJECT
, ar
, recursive
);
437 void cpu_x86_inject_mce(Monitor
*mon
, X86CPU
*cpu
, int bank
,
438 uint64_t status
, uint64_t mcg_status
, uint64_t addr
,
439 uint64_t misc
, int flags
)
441 CPUState
*cs
= CPU(cpu
);
442 CPUX86State
*cenv
= &cpu
->env
;
443 MCEInjectionParams params
= {
447 .mcg_status
= mcg_status
,
452 unsigned bank_num
= cenv
->mcg_cap
& 0xff;
454 if (!cenv
->mcg_cap
) {
455 monitor_printf(mon
, "MCE injection not supported\n");
458 if (bank
>= bank_num
) {
459 monitor_printf(mon
, "Invalid MCE bank number\n");
462 if (!(status
& MCI_STATUS_VAL
)) {
463 monitor_printf(mon
, "Invalid MCE status code\n");
466 if ((flags
& MCE_INJECT_BROADCAST
)
467 && !cpu_x86_support_mca_broadcast(cenv
)) {
468 monitor_printf(mon
, "Guest CPU does not support MCA broadcast\n");
472 run_on_cpu(cs
, do_inject_x86_mce
, RUN_ON_CPU_HOST_PTR(¶ms
));
473 if (flags
& MCE_INJECT_BROADCAST
) {
477 params
.status
= MCI_STATUS_VAL
| MCI_STATUS_UC
;
478 params
.mcg_status
= MCG_STATUS_MCIP
| MCG_STATUS_RIPV
;
481 CPU_FOREACH(other_cs
) {
482 if (other_cs
== cs
) {
485 run_on_cpu(other_cs
, do_inject_x86_mce
, RUN_ON_CPU_HOST_PTR(¶ms
));
490 void cpu_report_tpr_access(CPUX86State
*env
, TPRAccess access
)
492 X86CPU
*cpu
= env_archcpu(env
);
493 CPUState
*cs
= env_cpu(env
);
495 if (kvm_enabled() || whpx_enabled()) {
496 env
->tpr_access_type
= access
;
498 cpu_interrupt(cs
, CPU_INTERRUPT_TPR
);
499 } else if (tcg_enabled()) {
500 cpu_restore_state(cs
, cs
->mem_io_pc
, false);
502 apic_handle_tpr_access_report(cpu
->apic_state
, env
->eip
, access
);
505 #endif /* !CONFIG_USER_ONLY */
507 int cpu_x86_get_descr_debug(CPUX86State
*env
, unsigned int selector
,
508 target_ulong
*base
, unsigned int *limit
,
511 CPUState
*cs
= env_cpu(env
);
521 index
= selector
& ~7;
522 ptr
= dt
->base
+ index
;
523 if ((index
+ 7) > dt
->limit
524 || cpu_memory_rw_debug(cs
, ptr
, (uint8_t *)&e1
, sizeof(e1
), 0) != 0
525 || cpu_memory_rw_debug(cs
, ptr
+4, (uint8_t *)&e2
, sizeof(e2
), 0) != 0)
528 *base
= ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
529 *limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
530 if (e2
& DESC_G_MASK
)
531 *limit
= (*limit
<< 12) | 0xfff;
537 #if !defined(CONFIG_USER_ONLY)
538 void do_cpu_init(X86CPU
*cpu
)
540 CPUState
*cs
= CPU(cpu
);
541 CPUX86State
*env
= &cpu
->env
;
542 CPUX86State
*save
= g_new(CPUX86State
, 1);
543 int sipi
= cs
->interrupt_request
& CPU_INTERRUPT_SIPI
;
548 cs
->interrupt_request
= sipi
;
549 memcpy(&env
->start_init_save
, &save
->start_init_save
,
550 offsetof(CPUX86State
, end_init_save
) -
551 offsetof(CPUX86State
, start_init_save
));
555 kvm_arch_do_init_vcpu(cpu
);
557 apic_init_reset(cpu
->apic_state
);
560 void do_cpu_sipi(X86CPU
*cpu
)
562 apic_sipi(cpu
->apic_state
);
565 void do_cpu_init(X86CPU
*cpu
)
568 void do_cpu_sipi(X86CPU
*cpu
)
573 #ifndef CONFIG_USER_ONLY
574 uint8_t x86_ldub_phys(CPUState
*cs
, hwaddr addr
)
576 X86CPU
*cpu
= X86_CPU(cs
);
577 CPUX86State
*env
= &cpu
->env
;
578 MemTxAttrs attrs
= cpu_get_mem_attrs(env
);
579 AddressSpace
*as
= cpu_addressspace(cs
, attrs
);
581 return address_space_ldub(as
, addr
, attrs
, NULL
);
584 uint32_t x86_lduw_phys(CPUState
*cs
, hwaddr addr
)
586 X86CPU
*cpu
= X86_CPU(cs
);
587 CPUX86State
*env
= &cpu
->env
;
588 MemTxAttrs attrs
= cpu_get_mem_attrs(env
);
589 AddressSpace
*as
= cpu_addressspace(cs
, attrs
);
591 return address_space_lduw(as
, addr
, attrs
, NULL
);
594 uint32_t x86_ldl_phys(CPUState
*cs
, hwaddr addr
)
596 X86CPU
*cpu
= X86_CPU(cs
);
597 CPUX86State
*env
= &cpu
->env
;
598 MemTxAttrs attrs
= cpu_get_mem_attrs(env
);
599 AddressSpace
*as
= cpu_addressspace(cs
, attrs
);
601 return address_space_ldl(as
, addr
, attrs
, NULL
);
604 uint64_t x86_ldq_phys(CPUState
*cs
, hwaddr addr
)
606 X86CPU
*cpu
= X86_CPU(cs
);
607 CPUX86State
*env
= &cpu
->env
;
608 MemTxAttrs attrs
= cpu_get_mem_attrs(env
);
609 AddressSpace
*as
= cpu_addressspace(cs
, attrs
);
611 return address_space_ldq(as
, addr
, attrs
, NULL
);
614 void x86_stb_phys(CPUState
*cs
, hwaddr addr
, uint8_t val
)
616 X86CPU
*cpu
= X86_CPU(cs
);
617 CPUX86State
*env
= &cpu
->env
;
618 MemTxAttrs attrs
= cpu_get_mem_attrs(env
);
619 AddressSpace
*as
= cpu_addressspace(cs
, attrs
);
621 address_space_stb(as
, addr
, val
, attrs
, NULL
);
624 void x86_stl_phys_notdirty(CPUState
*cs
, hwaddr addr
, uint32_t val
)
626 X86CPU
*cpu
= X86_CPU(cs
);
627 CPUX86State
*env
= &cpu
->env
;
628 MemTxAttrs attrs
= cpu_get_mem_attrs(env
);
629 AddressSpace
*as
= cpu_addressspace(cs
, attrs
);
631 address_space_stl_notdirty(as
, addr
, val
, attrs
, NULL
);
634 void x86_stw_phys(CPUState
*cs
, hwaddr addr
, uint32_t val
)
636 X86CPU
*cpu
= X86_CPU(cs
);
637 CPUX86State
*env
= &cpu
->env
;
638 MemTxAttrs attrs
= cpu_get_mem_attrs(env
);
639 AddressSpace
*as
= cpu_addressspace(cs
, attrs
);
641 address_space_stw(as
, addr
, val
, attrs
, NULL
);
644 void x86_stl_phys(CPUState
*cs
, hwaddr addr
, uint32_t val
)
646 X86CPU
*cpu
= X86_CPU(cs
);
647 CPUX86State
*env
= &cpu
->env
;
648 MemTxAttrs attrs
= cpu_get_mem_attrs(env
);
649 AddressSpace
*as
= cpu_addressspace(cs
, attrs
);
651 address_space_stl(as
, addr
, val
, attrs
, NULL
);
654 void x86_stq_phys(CPUState
*cs
, hwaddr addr
, uint64_t val
)
656 X86CPU
*cpu
= X86_CPU(cs
);
657 CPUX86State
*env
= &cpu
->env
;
658 MemTxAttrs attrs
= cpu_get_mem_attrs(env
);
659 AddressSpace
*as
= cpu_addressspace(cs
, attrs
);
661 address_space_stq(as
, addr
, val
, attrs
, NULL
);