2 * x86 exception helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
24 #include "sysemu/runstate.h"
25 #include "exec/helper-proto.h"
26 #include "helper-tcg.h"
28 void helper_raise_interrupt(CPUX86State
*env
, int intno
, int next_eip_addend
)
30 raise_interrupt(env
, intno
, 1, 0, next_eip_addend
);
33 void helper_raise_exception(CPUX86State
*env
, int exception_index
)
35 raise_exception(env
, exception_index
);
39 * Check nested exceptions and change to double or triple fault if
40 * needed. It should only be called, if this is not an interrupt.
41 * Returns the new exception number.
43 static int check_exception(CPUX86State
*env
, int intno
, int *error_code
,
46 int first_contributory
= env
->old_exception
== 0 ||
47 (env
->old_exception
>= 10 &&
48 env
->old_exception
<= 13);
49 int second_contributory
= intno
== 0 ||
50 (intno
>= 10 && intno
<= 13);
52 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
53 env
->old_exception
, intno
);
55 #if !defined(CONFIG_USER_ONLY)
56 if (env
->old_exception
== EXCP08_DBLE
) {
57 if (env
->hflags
& HF_GUEST_MASK
) {
58 cpu_vmexit(env
, SVM_EXIT_SHUTDOWN
, 0, retaddr
); /* does not return */
61 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
63 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
68 if ((first_contributory
&& second_contributory
)
69 || (env
->old_exception
== EXCP0E_PAGE
&&
70 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
75 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
76 (intno
== EXCP08_DBLE
)) {
77 env
->old_exception
= intno
;
84 * Signal an interruption. It is executed in the main CPU loop.
85 * is_int is TRUE if coming from the int instruction. next_eip is the
86 * env->eip value AFTER the interrupt instruction. It is only relevant if
89 static void QEMU_NORETURN
raise_interrupt2(CPUX86State
*env
, int intno
,
90 int is_int
, int error_code
,
94 CPUState
*cs
= env_cpu(env
);
97 cpu_svm_check_intercept_param(env
, SVM_EXIT_EXCP_BASE
+ intno
,
99 intno
= check_exception(env
, intno
, &error_code
, retaddr
);
101 cpu_svm_check_intercept_param(env
, SVM_EXIT_SWINT
, 0, retaddr
);
104 cs
->exception_index
= intno
;
105 env
->error_code
= error_code
;
106 env
->exception_is_int
= is_int
;
107 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
108 cpu_loop_exit_restore(cs
, retaddr
);
111 /* shortcuts to generate exceptions */
113 void QEMU_NORETURN
raise_interrupt(CPUX86State
*env
, int intno
, int is_int
,
114 int error_code
, int next_eip_addend
)
116 raise_interrupt2(env
, intno
, is_int
, error_code
, next_eip_addend
, 0);
119 void raise_exception_err(CPUX86State
*env
, int exception_index
,
122 raise_interrupt2(env
, exception_index
, 0, error_code
, 0, 0);
125 void raise_exception_err_ra(CPUX86State
*env
, int exception_index
,
126 int error_code
, uintptr_t retaddr
)
128 raise_interrupt2(env
, exception_index
, 0, error_code
, 0, retaddr
);
131 void raise_exception(CPUX86State
*env
, int exception_index
)
133 raise_interrupt2(env
, exception_index
, 0, 0, 0, 0);
136 void raise_exception_ra(CPUX86State
*env
, int exception_index
, uintptr_t retaddr
)
138 raise_interrupt2(env
, exception_index
, 0, 0, 0, retaddr
);
141 #if !defined(CONFIG_USER_ONLY)
142 static hwaddr
get_hphys(CPUState
*cs
, hwaddr gphys
, MMUAccessType access_type
,
145 CPUX86State
*env
= &X86_CPU(cs
)->env
;
146 uint64_t rsvd_mask
= PG_HI_RSVD_MASK
;
148 uint64_t exit_info_1
= 0;
149 target_ulong pde_addr
, pte_addr
;
150 uint32_t page_offset
;
153 if (likely(!(env
->hflags2
& HF2_NPT_MASK
))) {
157 if (!(env
->nested_pg_mode
& SVM_NPT_NXE
)) {
158 rsvd_mask
|= PG_NX_MASK
;
161 if (env
->nested_pg_mode
& SVM_NPT_PAE
) {
163 target_ulong pdpe_addr
;
166 if (env
->nested_pg_mode
& SVM_NPT_LMA
) {
168 uint64_t pml4e_addr
, pml4e
;
170 pml5e
= env
->nested_cr3
;
171 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
173 pml4e_addr
= (pml5e
& PG_ADDRESS_MASK
) +
174 (((gphys
>> 39) & 0x1ff) << 3);
175 pml4e
= x86_ldq_phys(cs
, pml4e_addr
);
176 if (!(pml4e
& PG_PRESENT_MASK
)) {
179 if (pml4e
& (rsvd_mask
| PG_PSE_MASK
)) {
182 if (!(pml4e
& PG_ACCESSED_MASK
)) {
183 pml4e
|= PG_ACCESSED_MASK
;
184 x86_stl_phys_notdirty(cs
, pml4e_addr
, pml4e
);
186 ptep
&= pml4e
^ PG_NX_MASK
;
187 pdpe_addr
= (pml4e
& PG_ADDRESS_MASK
) +
188 (((gphys
>> 30) & 0x1ff) << 3);
189 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
190 if (!(pdpe
& PG_PRESENT_MASK
)) {
193 if (pdpe
& rsvd_mask
) {
196 ptep
&= pdpe
^ PG_NX_MASK
;
197 if (!(pdpe
& PG_ACCESSED_MASK
)) {
198 pdpe
|= PG_ACCESSED_MASK
;
199 x86_stl_phys_notdirty(cs
, pdpe_addr
, pdpe
);
201 if (pdpe
& PG_PSE_MASK
) {
203 page_size
= 1024 * 1024 * 1024;
204 pte_addr
= pdpe_addr
;
206 goto do_check_protect
;
211 pdpe_addr
= (env
->nested_cr3
& ~0x1f) + ((gphys
>> 27) & 0x18);
212 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
213 if (!(pdpe
& PG_PRESENT_MASK
)) {
216 rsvd_mask
|= PG_HI_USER_MASK
;
217 if (pdpe
& (rsvd_mask
| PG_NX_MASK
)) {
220 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
223 pde_addr
= (pdpe
& PG_ADDRESS_MASK
) + (((gphys
>> 21) & 0x1ff) << 3);
224 pde
= x86_ldq_phys(cs
, pde_addr
);
225 if (!(pde
& PG_PRESENT_MASK
)) {
228 if (pde
& rsvd_mask
) {
231 ptep
&= pde
^ PG_NX_MASK
;
232 if (pde
& PG_PSE_MASK
) {
234 page_size
= 2048 * 1024;
237 goto do_check_protect
;
240 if (!(pde
& PG_ACCESSED_MASK
)) {
241 pde
|= PG_ACCESSED_MASK
;
242 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
244 pte_addr
= (pde
& PG_ADDRESS_MASK
) + (((gphys
>> 12) & 0x1ff) << 3);
245 pte
= x86_ldq_phys(cs
, pte_addr
);
246 if (!(pte
& PG_PRESENT_MASK
)) {
249 if (pte
& rsvd_mask
) {
252 /* combine pde and pte nx, user and rw protections */
253 ptep
&= pte
^ PG_NX_MASK
;
258 /* page directory entry */
259 pde_addr
= (env
->nested_cr3
& ~0xfff) + ((gphys
>> 20) & 0xffc);
260 pde
= x86_ldl_phys(cs
, pde_addr
);
261 if (!(pde
& PG_PRESENT_MASK
)) {
264 ptep
= pde
| PG_NX_MASK
;
266 /* if host cr4 PSE bit is set, then we use a 4MB page */
267 if ((pde
& PG_PSE_MASK
) && (env
->nested_pg_mode
& SVM_NPT_PSE
)) {
268 page_size
= 4096 * 1024;
271 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
272 * Leave bits 20-13 in place for setting accessed/dirty bits below.
274 pte
= pde
| ((pde
& 0x1fe000LL
) << (32 - 13));
275 rsvd_mask
= 0x200000;
276 goto do_check_protect_pse36
;
279 if (!(pde
& PG_ACCESSED_MASK
)) {
280 pde
|= PG_ACCESSED_MASK
;
281 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
284 /* page directory entry */
285 pte_addr
= (pde
& ~0xfff) + ((gphys
>> 10) & 0xffc);
286 pte
= x86_ldl_phys(cs
, pte_addr
);
287 if (!(pte
& PG_PRESENT_MASK
)) {
290 /* combine pde and pte user and rw protections */
291 ptep
&= pte
| PG_NX_MASK
;
297 rsvd_mask
|= (page_size
- 1) & PG_ADDRESS_MASK
& ~PG_PSE_PAT_MASK
;
298 do_check_protect_pse36
:
299 if (pte
& rsvd_mask
) {
304 if (!(ptep
& PG_USER_MASK
)) {
305 goto do_fault_protect
;
307 if (ptep
& PG_NX_MASK
) {
308 if (access_type
== MMU_INST_FETCH
) {
309 goto do_fault_protect
;
313 if (!(ptep
& PG_RW_MASK
)) {
314 if (access_type
== MMU_DATA_STORE
) {
315 goto do_fault_protect
;
317 *prot
&= ~PAGE_WRITE
;
320 pte
&= PG_ADDRESS_MASK
& ~(page_size
- 1);
321 page_offset
= gphys
& (page_size
- 1);
322 return pte
+ page_offset
;
325 exit_info_1
|= SVM_NPTEXIT_RSVD
;
327 exit_info_1
|= SVM_NPTEXIT_P
;
329 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
331 exit_info_1
|= SVM_NPTEXIT_US
;
332 if (access_type
== MMU_DATA_STORE
) {
333 exit_info_1
|= SVM_NPTEXIT_RW
;
334 } else if (access_type
== MMU_INST_FETCH
) {
335 exit_info_1
|= SVM_NPTEXIT_ID
;
338 exit_info_1
|= SVM_NPTEXIT_GPA
;
339 } else { /* page table access */
340 exit_info_1
|= SVM_NPTEXIT_GPT
;
342 cpu_vmexit(env
, SVM_EXIT_NPF
, exit_info_1
, env
->retaddr
);
346 * -1 = cannot handle fault
347 * 0 = nothing more to do
348 * 1 = generate PF fault
350 static int handle_mmu_fault(CPUState
*cs
, vaddr addr
, int size
,
351 int is_write1
, int mmu_idx
)
353 X86CPU
*cpu
= X86_CPU(cs
);
354 CPUX86State
*env
= &cpu
->env
;
357 target_ulong pde_addr
, pte_addr
;
359 int is_dirty
, prot
, page_size
, is_write
, is_user
;
361 uint64_t rsvd_mask
= PG_HI_RSVD_MASK
;
362 uint32_t page_offset
;
366 is_user
= mmu_idx
== MMU_USER_IDX
;
367 #if defined(DEBUG_MMU)
368 printf("MMU fault: addr=%" VADDR_PRIx
" w=%d u=%d eip=" TARGET_FMT_lx
"\n",
369 addr
, is_write1
, is_user
, env
->eip
);
371 is_write
= is_write1
& 1;
373 a20_mask
= x86_get_a20_mask(env
);
374 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
377 if (!(env
->hflags
& HF_LMA_MASK
)) {
378 /* Without long mode we can only address 32bits in real mode */
382 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
387 if (!(env
->efer
& MSR_EFER_NXE
)) {
388 rsvd_mask
|= PG_NX_MASK
;
391 if (env
->cr
[4] & CR4_PAE_MASK
) {
393 target_ulong pdpe_addr
;
396 if (env
->hflags
& HF_LMA_MASK
) {
397 bool la57
= env
->cr
[4] & CR4_LA57_MASK
;
398 uint64_t pml5e_addr
, pml5e
;
399 uint64_t pml4e_addr
, pml4e
;
402 /* test virtual address sign extension */
403 sext
= la57
? (int64_t)addr
>> 56 : (int64_t)addr
>> 47;
404 if (sext
!= 0 && sext
!= -1) {
406 cs
->exception_index
= EXCP0D_GPF
;
411 pml5e_addr
= ((env
->cr
[3] & ~0xfff) +
412 (((addr
>> 48) & 0x1ff) << 3)) & a20_mask
;
413 pml5e_addr
= get_hphys(cs
, pml5e_addr
, MMU_DATA_STORE
, NULL
);
414 pml5e
= x86_ldq_phys(cs
, pml5e_addr
);
415 if (!(pml5e
& PG_PRESENT_MASK
)) {
418 if (pml5e
& (rsvd_mask
| PG_PSE_MASK
)) {
421 if (!(pml5e
& PG_ACCESSED_MASK
)) {
422 pml5e
|= PG_ACCESSED_MASK
;
423 x86_stl_phys_notdirty(cs
, pml5e_addr
, pml5e
);
425 ptep
= pml5e
^ PG_NX_MASK
;
428 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
431 pml4e_addr
= ((pml5e
& PG_ADDRESS_MASK
) +
432 (((addr
>> 39) & 0x1ff) << 3)) & a20_mask
;
433 pml4e_addr
= get_hphys(cs
, pml4e_addr
, MMU_DATA_STORE
, false);
434 pml4e
= x86_ldq_phys(cs
, pml4e_addr
);
435 if (!(pml4e
& PG_PRESENT_MASK
)) {
438 if (pml4e
& (rsvd_mask
| PG_PSE_MASK
)) {
441 if (!(pml4e
& PG_ACCESSED_MASK
)) {
442 pml4e
|= PG_ACCESSED_MASK
;
443 x86_stl_phys_notdirty(cs
, pml4e_addr
, pml4e
);
445 ptep
&= pml4e
^ PG_NX_MASK
;
446 pdpe_addr
= ((pml4e
& PG_ADDRESS_MASK
) + (((addr
>> 30) & 0x1ff) << 3)) &
448 pdpe_addr
= get_hphys(cs
, pdpe_addr
, MMU_DATA_STORE
, NULL
);
449 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
450 if (!(pdpe
& PG_PRESENT_MASK
)) {
453 if (pdpe
& rsvd_mask
) {
456 ptep
&= pdpe
^ PG_NX_MASK
;
457 if (!(pdpe
& PG_ACCESSED_MASK
)) {
458 pdpe
|= PG_ACCESSED_MASK
;
459 x86_stl_phys_notdirty(cs
, pdpe_addr
, pdpe
);
461 if (pdpe
& PG_PSE_MASK
) {
463 page_size
= 1024 * 1024 * 1024;
464 pte_addr
= pdpe_addr
;
466 goto do_check_protect
;
471 /* XXX: load them when cr3 is loaded ? */
472 pdpe_addr
= ((env
->cr
[3] & ~0x1f) + ((addr
>> 27) & 0x18)) &
474 pdpe_addr
= get_hphys(cs
, pdpe_addr
, MMU_DATA_STORE
, false);
475 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
476 if (!(pdpe
& PG_PRESENT_MASK
)) {
479 rsvd_mask
|= PG_HI_USER_MASK
;
480 if (pdpe
& (rsvd_mask
| PG_NX_MASK
)) {
483 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
486 pde_addr
= ((pdpe
& PG_ADDRESS_MASK
) + (((addr
>> 21) & 0x1ff) << 3)) &
488 pde_addr
= get_hphys(cs
, pde_addr
, MMU_DATA_STORE
, NULL
);
489 pde
= x86_ldq_phys(cs
, pde_addr
);
490 if (!(pde
& PG_PRESENT_MASK
)) {
493 if (pde
& rsvd_mask
) {
496 ptep
&= pde
^ PG_NX_MASK
;
497 if (pde
& PG_PSE_MASK
) {
499 page_size
= 2048 * 1024;
502 goto do_check_protect
;
505 if (!(pde
& PG_ACCESSED_MASK
)) {
506 pde
|= PG_ACCESSED_MASK
;
507 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
509 pte_addr
= ((pde
& PG_ADDRESS_MASK
) + (((addr
>> 12) & 0x1ff) << 3)) &
511 pte_addr
= get_hphys(cs
, pte_addr
, MMU_DATA_STORE
, NULL
);
512 pte
= x86_ldq_phys(cs
, pte_addr
);
513 if (!(pte
& PG_PRESENT_MASK
)) {
516 if (pte
& rsvd_mask
) {
519 /* combine pde and pte nx, user and rw protections */
520 ptep
&= pte
^ PG_NX_MASK
;
525 /* page directory entry */
526 pde_addr
= ((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & 0xffc)) &
528 pde_addr
= get_hphys(cs
, pde_addr
, MMU_DATA_STORE
, NULL
);
529 pde
= x86_ldl_phys(cs
, pde_addr
);
530 if (!(pde
& PG_PRESENT_MASK
)) {
533 ptep
= pde
| PG_NX_MASK
;
535 /* if PSE bit is set, then we use a 4MB page */
536 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
537 page_size
= 4096 * 1024;
540 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
541 * Leave bits 20-13 in place for setting accessed/dirty bits below.
543 pte
= pde
| ((pde
& 0x1fe000LL
) << (32 - 13));
544 rsvd_mask
= 0x200000;
545 goto do_check_protect_pse36
;
548 if (!(pde
& PG_ACCESSED_MASK
)) {
549 pde
|= PG_ACCESSED_MASK
;
550 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
553 /* page directory entry */
554 pte_addr
= ((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) &
556 pte_addr
= get_hphys(cs
, pte_addr
, MMU_DATA_STORE
, NULL
);
557 pte
= x86_ldl_phys(cs
, pte_addr
);
558 if (!(pte
& PG_PRESENT_MASK
)) {
561 /* combine pde and pte user and rw protections */
562 ptep
&= pte
| PG_NX_MASK
;
568 rsvd_mask
|= (page_size
- 1) & PG_ADDRESS_MASK
& ~PG_PSE_PAT_MASK
;
569 do_check_protect_pse36
:
570 if (pte
& rsvd_mask
) {
575 /* can the page can be put in the TLB? prot will tell us */
576 if (is_user
&& !(ptep
& PG_USER_MASK
)) {
577 goto do_fault_protect
;
581 if (mmu_idx
!= MMU_KSMAP_IDX
|| !(ptep
& PG_USER_MASK
)) {
583 if ((ptep
& PG_RW_MASK
) || (!is_user
&& !(env
->cr
[0] & CR0_WP_MASK
))) {
587 if (!(ptep
& PG_NX_MASK
) &&
588 (mmu_idx
== MMU_USER_IDX
||
589 !((env
->cr
[4] & CR4_SMEP_MASK
) && (ptep
& PG_USER_MASK
)))) {
593 if (!(env
->hflags
& HF_LMA_MASK
)) {
595 } else if (ptep
& PG_USER_MASK
) {
596 pkr
= env
->cr
[4] & CR4_PKE_MASK
? env
->pkru
: 0;
598 pkr
= env
->cr
[4] & CR4_PKS_MASK
? env
->pkrs
: 0;
601 uint32_t pk
= (pte
& PG_PKRU_MASK
) >> PG_PKRU_BIT
;
602 uint32_t pkr_ad
= (pkr
>> pk
* 2) & 1;
603 uint32_t pkr_wd
= (pkr
>> pk
* 2) & 2;
604 uint32_t pkr_prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
607 pkr_prot
&= ~(PAGE_READ
| PAGE_WRITE
);
608 } else if (pkr_wd
&& (is_user
|| env
->cr
[0] & CR0_WP_MASK
)) {
609 pkr_prot
&= ~PAGE_WRITE
;
613 if ((pkr_prot
& (1 << is_write1
)) == 0) {
614 assert(is_write1
!= 2);
615 error_code
|= PG_ERROR_PK_MASK
;
616 goto do_fault_protect
;
620 if ((prot
& (1 << is_write1
)) == 0) {
621 goto do_fault_protect
;
625 is_dirty
= is_write
&& !(pte
& PG_DIRTY_MASK
);
626 if (!(pte
& PG_ACCESSED_MASK
) || is_dirty
) {
627 pte
|= PG_ACCESSED_MASK
;
629 pte
|= PG_DIRTY_MASK
;
631 x86_stl_phys_notdirty(cs
, pte_addr
, pte
);
634 if (!(pte
& PG_DIRTY_MASK
)) {
635 /* only set write access if already dirty... otherwise wait
642 pte
= pte
& a20_mask
;
644 /* align to page_size */
645 pte
&= PG_ADDRESS_MASK
& ~(page_size
- 1);
646 page_offset
= addr
& (page_size
- 1);
647 paddr
= get_hphys(cs
, pte
+ page_offset
, is_write1
, &prot
);
649 /* Even if 4MB pages, we map only one 4KB page in the cache to
650 avoid filling it too fast */
651 vaddr
= addr
& TARGET_PAGE_MASK
;
652 paddr
&= TARGET_PAGE_MASK
;
654 assert(prot
& (1 << is_write1
));
655 tlb_set_page_with_attrs(cs
, vaddr
, paddr
, cpu_get_mem_attrs(env
),
656 prot
, mmu_idx
, page_size
);
659 error_code
|= PG_ERROR_RSVD_MASK
;
661 error_code
|= PG_ERROR_P_MASK
;
663 error_code
|= (is_write
<< PG_ERROR_W_BIT
);
665 error_code
|= PG_ERROR_U_MASK
;
666 if (is_write1
== 2 &&
667 (((env
->efer
& MSR_EFER_NXE
) &&
668 (env
->cr
[4] & CR4_PAE_MASK
)) ||
669 (env
->cr
[4] & CR4_SMEP_MASK
)))
670 error_code
|= PG_ERROR_I_D_MASK
;
671 if (env
->intercept_exceptions
& (1 << EXCP0E_PAGE
)) {
672 /* cr2 is not modified in case of exceptions */
674 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
679 env
->error_code
= error_code
;
680 cs
->exception_index
= EXCP0E_PAGE
;
685 bool x86_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
686 MMUAccessType access_type
, int mmu_idx
,
687 bool probe
, uintptr_t retaddr
)
689 X86CPU
*cpu
= X86_CPU(cs
);
690 CPUX86State
*env
= &cpu
->env
;
692 #ifdef CONFIG_USER_ONLY
693 /* user mode only emulation */
695 env
->error_code
= (access_type
== MMU_DATA_STORE
) << PG_ERROR_W_BIT
;
696 env
->error_code
|= PG_ERROR_U_MASK
;
697 cs
->exception_index
= EXCP0E_PAGE
;
698 env
->exception_is_int
= 0;
699 env
->exception_next_eip
= -1;
700 cpu_loop_exit_restore(cs
, retaddr
);
702 env
->retaddr
= retaddr
;
703 if (handle_mmu_fault(cs
, addr
, size
, access_type
, mmu_idx
)) {
704 /* FIXME: On error in get_hphys we have already jumped out. */
706 raise_exception_err_ra(env
, cs
->exception_index
,
707 env
->error_code
, retaddr
);