2 * x86 exception helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
24 #include "sysemu/runstate.h"
25 #include "exec/helper-proto.h"
27 void helper_raise_interrupt(CPUX86State
*env
, int intno
, int next_eip_addend
)
29 raise_interrupt(env
, intno
, 1, 0, next_eip_addend
);
32 void helper_raise_exception(CPUX86State
*env
, int exception_index
)
34 raise_exception(env
, exception_index
);
38 * Check nested exceptions and change to double or triple fault if
39 * needed. It should only be called, if this is not an interrupt.
40 * Returns the new exception number.
42 static int check_exception(CPUX86State
*env
, int intno
, int *error_code
,
45 int first_contributory
= env
->old_exception
== 0 ||
46 (env
->old_exception
>= 10 &&
47 env
->old_exception
<= 13);
48 int second_contributory
= intno
== 0 ||
49 (intno
>= 10 && intno
<= 13);
51 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
52 env
->old_exception
, intno
);
54 #if !defined(CONFIG_USER_ONLY)
55 if (env
->old_exception
== EXCP08_DBLE
) {
56 if (env
->hflags
& HF_GUEST_MASK
) {
57 cpu_vmexit(env
, SVM_EXIT_SHUTDOWN
, 0, retaddr
); /* does not return */
60 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
62 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
67 if ((first_contributory
&& second_contributory
)
68 || (env
->old_exception
== EXCP0E_PAGE
&&
69 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
74 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
75 (intno
== EXCP08_DBLE
)) {
76 env
->old_exception
= intno
;
83 * Signal an interruption. It is executed in the main CPU loop.
84 * is_int is TRUE if coming from the int instruction. next_eip is the
85 * env->eip value AFTER the interrupt instruction. It is only relevant if
88 static void QEMU_NORETURN
raise_interrupt2(CPUX86State
*env
, int intno
,
89 int is_int
, int error_code
,
93 CPUState
*cs
= env_cpu(env
);
96 cpu_svm_check_intercept_param(env
, SVM_EXIT_EXCP_BASE
+ intno
,
98 intno
= check_exception(env
, intno
, &error_code
, retaddr
);
100 cpu_svm_check_intercept_param(env
, SVM_EXIT_SWINT
, 0, retaddr
);
103 cs
->exception_index
= intno
;
104 env
->error_code
= error_code
;
105 env
->exception_is_int
= is_int
;
106 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
107 cpu_loop_exit_restore(cs
, retaddr
);
110 /* shortcuts to generate exceptions */
112 void QEMU_NORETURN
raise_interrupt(CPUX86State
*env
, int intno
, int is_int
,
113 int error_code
, int next_eip_addend
)
115 raise_interrupt2(env
, intno
, is_int
, error_code
, next_eip_addend
, 0);
118 void raise_exception_err(CPUX86State
*env
, int exception_index
,
121 raise_interrupt2(env
, exception_index
, 0, error_code
, 0, 0);
124 void raise_exception_err_ra(CPUX86State
*env
, int exception_index
,
125 int error_code
, uintptr_t retaddr
)
127 raise_interrupt2(env
, exception_index
, 0, error_code
, 0, retaddr
);
130 void raise_exception(CPUX86State
*env
, int exception_index
)
132 raise_interrupt2(env
, exception_index
, 0, 0, 0, 0);
135 void raise_exception_ra(CPUX86State
*env
, int exception_index
, uintptr_t retaddr
)
137 raise_interrupt2(env
, exception_index
, 0, 0, 0, retaddr
);
140 #if !defined(CONFIG_USER_ONLY)
141 static hwaddr
get_hphys(CPUState
*cs
, hwaddr gphys
, MMUAccessType access_type
,
144 CPUX86State
*env
= &X86_CPU(cs
)->env
;
145 uint64_t rsvd_mask
= PG_HI_RSVD_MASK
;
147 uint64_t exit_info_1
= 0;
148 target_ulong pde_addr
, pte_addr
;
149 uint32_t page_offset
;
152 if (likely(!(env
->hflags2
& HF2_NPT_MASK
))) {
156 if (!(env
->nested_pg_mode
& SVM_NPT_NXE
)) {
157 rsvd_mask
|= PG_NX_MASK
;
160 if (env
->nested_pg_mode
& SVM_NPT_PAE
) {
162 target_ulong pdpe_addr
;
165 if (env
->nested_pg_mode
& SVM_NPT_LMA
) {
167 uint64_t pml4e_addr
, pml4e
;
169 pml5e
= env
->nested_cr3
;
170 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
172 pml4e_addr
= (pml5e
& PG_ADDRESS_MASK
) +
173 (((gphys
>> 39) & 0x1ff) << 3);
174 pml4e
= x86_ldq_phys(cs
, pml4e_addr
);
175 if (!(pml4e
& PG_PRESENT_MASK
)) {
178 if (pml4e
& (rsvd_mask
| PG_PSE_MASK
)) {
181 if (!(pml4e
& PG_ACCESSED_MASK
)) {
182 pml4e
|= PG_ACCESSED_MASK
;
183 x86_stl_phys_notdirty(cs
, pml4e_addr
, pml4e
);
185 ptep
&= pml4e
^ PG_NX_MASK
;
186 pdpe_addr
= (pml4e
& PG_ADDRESS_MASK
) +
187 (((gphys
>> 30) & 0x1ff) << 3);
188 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
189 if (!(pdpe
& PG_PRESENT_MASK
)) {
192 if (pdpe
& rsvd_mask
) {
195 ptep
&= pdpe
^ PG_NX_MASK
;
196 if (!(pdpe
& PG_ACCESSED_MASK
)) {
197 pdpe
|= PG_ACCESSED_MASK
;
198 x86_stl_phys_notdirty(cs
, pdpe_addr
, pdpe
);
200 if (pdpe
& PG_PSE_MASK
) {
202 page_size
= 1024 * 1024 * 1024;
203 pte_addr
= pdpe_addr
;
205 goto do_check_protect
;
210 pdpe_addr
= (env
->nested_cr3
& ~0x1f) + ((gphys
>> 27) & 0x18);
211 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
212 if (!(pdpe
& PG_PRESENT_MASK
)) {
215 rsvd_mask
|= PG_HI_USER_MASK
;
216 if (pdpe
& (rsvd_mask
| PG_NX_MASK
)) {
219 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
222 pde_addr
= (pdpe
& PG_ADDRESS_MASK
) + (((gphys
>> 21) & 0x1ff) << 3);
223 pde
= x86_ldq_phys(cs
, pde_addr
);
224 if (!(pde
& PG_PRESENT_MASK
)) {
227 if (pde
& rsvd_mask
) {
230 ptep
&= pde
^ PG_NX_MASK
;
231 if (pde
& PG_PSE_MASK
) {
233 page_size
= 2048 * 1024;
236 goto do_check_protect
;
239 if (!(pde
& PG_ACCESSED_MASK
)) {
240 pde
|= PG_ACCESSED_MASK
;
241 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
243 pte_addr
= (pde
& PG_ADDRESS_MASK
) + (((gphys
>> 12) & 0x1ff) << 3);
244 pte
= x86_ldq_phys(cs
, pte_addr
);
245 if (!(pte
& PG_PRESENT_MASK
)) {
248 if (pte
& rsvd_mask
) {
251 /* combine pde and pte nx, user and rw protections */
252 ptep
&= pte
^ PG_NX_MASK
;
257 /* page directory entry */
258 pde_addr
= (env
->nested_cr3
& ~0xfff) + ((gphys
>> 20) & 0xffc);
259 pde
= x86_ldl_phys(cs
, pde_addr
);
260 if (!(pde
& PG_PRESENT_MASK
)) {
263 ptep
= pde
| PG_NX_MASK
;
265 /* if PSE bit is set, then we use a 4MB page */
266 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
267 page_size
= 4096 * 1024;
270 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
271 * Leave bits 20-13 in place for setting accessed/dirty bits below.
273 pte
= pde
| ((pde
& 0x1fe000LL
) << (32 - 13));
274 rsvd_mask
= 0x200000;
275 goto do_check_protect_pse36
;
278 if (!(pde
& PG_ACCESSED_MASK
)) {
279 pde
|= PG_ACCESSED_MASK
;
280 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
283 /* page directory entry */
284 pte_addr
= (pde
& ~0xfff) + ((gphys
>> 10) & 0xffc);
285 pte
= x86_ldl_phys(cs
, pte_addr
);
286 if (!(pte
& PG_PRESENT_MASK
)) {
289 /* combine pde and pte user and rw protections */
290 ptep
&= pte
| PG_NX_MASK
;
296 rsvd_mask
|= (page_size
- 1) & PG_ADDRESS_MASK
& ~PG_PSE_PAT_MASK
;
297 do_check_protect_pse36
:
298 if (pte
& rsvd_mask
) {
303 if (!(ptep
& PG_USER_MASK
)) {
304 goto do_fault_protect
;
306 if (ptep
& PG_NX_MASK
) {
307 if (access_type
== MMU_INST_FETCH
) {
308 goto do_fault_protect
;
312 if (!(ptep
& PG_RW_MASK
)) {
313 if (access_type
== MMU_DATA_STORE
) {
314 goto do_fault_protect
;
316 *prot
&= ~PAGE_WRITE
;
319 pte
&= PG_ADDRESS_MASK
& ~(page_size
- 1);
320 page_offset
= gphys
& (page_size
- 1);
321 return pte
+ page_offset
;
324 exit_info_1
|= SVM_NPTEXIT_RSVD
;
326 exit_info_1
|= SVM_NPTEXIT_P
;
328 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
330 exit_info_1
|= SVM_NPTEXIT_US
;
331 if (access_type
== MMU_DATA_STORE
) {
332 exit_info_1
|= SVM_NPTEXIT_RW
;
333 } else if (access_type
== MMU_INST_FETCH
) {
334 exit_info_1
|= SVM_NPTEXIT_ID
;
337 exit_info_1
|= SVM_NPTEXIT_GPA
;
338 } else { /* page table access */
339 exit_info_1
|= SVM_NPTEXIT_GPT
;
341 cpu_vmexit(env
, SVM_EXIT_NPF
, exit_info_1
, env
->retaddr
);
345 * -1 = cannot handle fault
346 * 0 = nothing more to do
347 * 1 = generate PF fault
349 static int handle_mmu_fault(CPUState
*cs
, vaddr addr
, int size
,
350 int is_write1
, int mmu_idx
)
352 X86CPU
*cpu
= X86_CPU(cs
);
353 CPUX86State
*env
= &cpu
->env
;
356 target_ulong pde_addr
, pte_addr
;
358 int is_dirty
, prot
, page_size
, is_write
, is_user
;
360 uint64_t rsvd_mask
= PG_HI_RSVD_MASK
;
361 uint32_t page_offset
;
364 is_user
= mmu_idx
== MMU_USER_IDX
;
365 #if defined(DEBUG_MMU)
366 printf("MMU fault: addr=%" VADDR_PRIx
" w=%d u=%d eip=" TARGET_FMT_lx
"\n",
367 addr
, is_write1
, is_user
, env
->eip
);
369 is_write
= is_write1
& 1;
371 a20_mask
= x86_get_a20_mask(env
);
372 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
375 if (!(env
->hflags
& HF_LMA_MASK
)) {
376 /* Without long mode we can only address 32bits in real mode */
380 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
385 if (!(env
->efer
& MSR_EFER_NXE
)) {
386 rsvd_mask
|= PG_NX_MASK
;
389 if (env
->cr
[4] & CR4_PAE_MASK
) {
391 target_ulong pdpe_addr
;
394 if (env
->hflags
& HF_LMA_MASK
) {
395 bool la57
= env
->cr
[4] & CR4_LA57_MASK
;
396 uint64_t pml5e_addr
, pml5e
;
397 uint64_t pml4e_addr
, pml4e
;
400 /* test virtual address sign extension */
401 sext
= la57
? (int64_t)addr
>> 56 : (int64_t)addr
>> 47;
402 if (sext
!= 0 && sext
!= -1) {
404 cs
->exception_index
= EXCP0D_GPF
;
409 pml5e_addr
= ((env
->cr
[3] & ~0xfff) +
410 (((addr
>> 48) & 0x1ff) << 3)) & a20_mask
;
411 pml5e_addr
= get_hphys(cs
, pml5e_addr
, MMU_DATA_STORE
, NULL
);
412 pml5e
= x86_ldq_phys(cs
, pml5e_addr
);
413 if (!(pml5e
& PG_PRESENT_MASK
)) {
416 if (pml5e
& (rsvd_mask
| PG_PSE_MASK
)) {
419 if (!(pml5e
& PG_ACCESSED_MASK
)) {
420 pml5e
|= PG_ACCESSED_MASK
;
421 x86_stl_phys_notdirty(cs
, pml5e_addr
, pml5e
);
423 ptep
= pml5e
^ PG_NX_MASK
;
426 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
429 pml4e_addr
= ((pml5e
& PG_ADDRESS_MASK
) +
430 (((addr
>> 39) & 0x1ff) << 3)) & a20_mask
;
431 pml4e_addr
= get_hphys(cs
, pml4e_addr
, MMU_DATA_STORE
, false);
432 pml4e
= x86_ldq_phys(cs
, pml4e_addr
);
433 if (!(pml4e
& PG_PRESENT_MASK
)) {
436 if (pml4e
& (rsvd_mask
| PG_PSE_MASK
)) {
439 if (!(pml4e
& PG_ACCESSED_MASK
)) {
440 pml4e
|= PG_ACCESSED_MASK
;
441 x86_stl_phys_notdirty(cs
, pml4e_addr
, pml4e
);
443 ptep
&= pml4e
^ PG_NX_MASK
;
444 pdpe_addr
= ((pml4e
& PG_ADDRESS_MASK
) + (((addr
>> 30) & 0x1ff) << 3)) &
446 pdpe_addr
= get_hphys(cs
, pdpe_addr
, MMU_DATA_STORE
, NULL
);
447 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
448 if (!(pdpe
& PG_PRESENT_MASK
)) {
451 if (pdpe
& rsvd_mask
) {
454 ptep
&= pdpe
^ PG_NX_MASK
;
455 if (!(pdpe
& PG_ACCESSED_MASK
)) {
456 pdpe
|= PG_ACCESSED_MASK
;
457 x86_stl_phys_notdirty(cs
, pdpe_addr
, pdpe
);
459 if (pdpe
& PG_PSE_MASK
) {
461 page_size
= 1024 * 1024 * 1024;
462 pte_addr
= pdpe_addr
;
464 goto do_check_protect
;
469 /* XXX: load them when cr3 is loaded ? */
470 pdpe_addr
= ((env
->cr
[3] & ~0x1f) + ((addr
>> 27) & 0x18)) &
472 pdpe_addr
= get_hphys(cs
, pdpe_addr
, MMU_DATA_STORE
, false);
473 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
474 if (!(pdpe
& PG_PRESENT_MASK
)) {
477 rsvd_mask
|= PG_HI_USER_MASK
;
478 if (pdpe
& (rsvd_mask
| PG_NX_MASK
)) {
481 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
484 pde_addr
= ((pdpe
& PG_ADDRESS_MASK
) + (((addr
>> 21) & 0x1ff) << 3)) &
486 pde_addr
= get_hphys(cs
, pde_addr
, MMU_DATA_STORE
, NULL
);
487 pde
= x86_ldq_phys(cs
, pde_addr
);
488 if (!(pde
& PG_PRESENT_MASK
)) {
491 if (pde
& rsvd_mask
) {
494 ptep
&= pde
^ PG_NX_MASK
;
495 if (pde
& PG_PSE_MASK
) {
497 page_size
= 2048 * 1024;
500 goto do_check_protect
;
503 if (!(pde
& PG_ACCESSED_MASK
)) {
504 pde
|= PG_ACCESSED_MASK
;
505 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
507 pte_addr
= ((pde
& PG_ADDRESS_MASK
) + (((addr
>> 12) & 0x1ff) << 3)) &
509 pte_addr
= get_hphys(cs
, pte_addr
, MMU_DATA_STORE
, NULL
);
510 pte
= x86_ldq_phys(cs
, pte_addr
);
511 if (!(pte
& PG_PRESENT_MASK
)) {
514 if (pte
& rsvd_mask
) {
517 /* combine pde and pte nx, user and rw protections */
518 ptep
&= pte
^ PG_NX_MASK
;
523 /* page directory entry */
524 pde_addr
= ((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & 0xffc)) &
526 pde_addr
= get_hphys(cs
, pde_addr
, MMU_DATA_STORE
, NULL
);
527 pde
= x86_ldl_phys(cs
, pde_addr
);
528 if (!(pde
& PG_PRESENT_MASK
)) {
531 ptep
= pde
| PG_NX_MASK
;
533 /* if PSE bit is set, then we use a 4MB page */
534 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
535 page_size
= 4096 * 1024;
538 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
539 * Leave bits 20-13 in place for setting accessed/dirty bits below.
541 pte
= pde
| ((pde
& 0x1fe000LL
) << (32 - 13));
542 rsvd_mask
= 0x200000;
543 goto do_check_protect_pse36
;
546 if (!(pde
& PG_ACCESSED_MASK
)) {
547 pde
|= PG_ACCESSED_MASK
;
548 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
551 /* page directory entry */
552 pte_addr
= ((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) &
554 pte_addr
= get_hphys(cs
, pte_addr
, MMU_DATA_STORE
, NULL
);
555 pte
= x86_ldl_phys(cs
, pte_addr
);
556 if (!(pte
& PG_PRESENT_MASK
)) {
559 /* combine pde and pte user and rw protections */
560 ptep
&= pte
| PG_NX_MASK
;
566 rsvd_mask
|= (page_size
- 1) & PG_ADDRESS_MASK
& ~PG_PSE_PAT_MASK
;
567 do_check_protect_pse36
:
568 if (pte
& rsvd_mask
) {
573 /* can the page can be put in the TLB? prot will tell us */
574 if (is_user
&& !(ptep
& PG_USER_MASK
)) {
575 goto do_fault_protect
;
579 if (mmu_idx
!= MMU_KSMAP_IDX
|| !(ptep
& PG_USER_MASK
)) {
581 if ((ptep
& PG_RW_MASK
) || (!is_user
&& !(env
->cr
[0] & CR0_WP_MASK
))) {
585 if (!(ptep
& PG_NX_MASK
) &&
586 (mmu_idx
== MMU_USER_IDX
||
587 !((env
->cr
[4] & CR4_SMEP_MASK
) && (ptep
& PG_USER_MASK
)))) {
590 if ((env
->cr
[4] & CR4_PKE_MASK
) && (env
->hflags
& HF_LMA_MASK
) &&
591 (ptep
& PG_USER_MASK
) && env
->pkru
) {
592 uint32_t pk
= (pte
& PG_PKRU_MASK
) >> PG_PKRU_BIT
;
593 uint32_t pkru_ad
= (env
->pkru
>> pk
* 2) & 1;
594 uint32_t pkru_wd
= (env
->pkru
>> pk
* 2) & 2;
595 uint32_t pkru_prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
598 pkru_prot
&= ~(PAGE_READ
| PAGE_WRITE
);
599 } else if (pkru_wd
&& (is_user
|| env
->cr
[0] & CR0_WP_MASK
)) {
600 pkru_prot
&= ~PAGE_WRITE
;
604 if ((pkru_prot
& (1 << is_write1
)) == 0) {
605 assert(is_write1
!= 2);
606 error_code
|= PG_ERROR_PK_MASK
;
607 goto do_fault_protect
;
611 if ((prot
& (1 << is_write1
)) == 0) {
612 goto do_fault_protect
;
616 is_dirty
= is_write
&& !(pte
& PG_DIRTY_MASK
);
617 if (!(pte
& PG_ACCESSED_MASK
) || is_dirty
) {
618 pte
|= PG_ACCESSED_MASK
;
620 pte
|= PG_DIRTY_MASK
;
622 x86_stl_phys_notdirty(cs
, pte_addr
, pte
);
625 if (!(pte
& PG_DIRTY_MASK
)) {
626 /* only set write access if already dirty... otherwise wait
633 pte
= pte
& a20_mask
;
635 /* align to page_size */
636 pte
&= PG_ADDRESS_MASK
& ~(page_size
- 1);
637 page_offset
= addr
& (page_size
- 1);
638 paddr
= get_hphys(cs
, pte
+ page_offset
, is_write1
, &prot
);
640 /* Even if 4MB pages, we map only one 4KB page in the cache to
641 avoid filling it too fast */
642 vaddr
= addr
& TARGET_PAGE_MASK
;
643 paddr
&= TARGET_PAGE_MASK
;
645 assert(prot
& (1 << is_write1
));
646 tlb_set_page_with_attrs(cs
, vaddr
, paddr
, cpu_get_mem_attrs(env
),
647 prot
, mmu_idx
, page_size
);
650 error_code
|= PG_ERROR_RSVD_MASK
;
652 error_code
|= PG_ERROR_P_MASK
;
654 error_code
|= (is_write
<< PG_ERROR_W_BIT
);
656 error_code
|= PG_ERROR_U_MASK
;
657 if (is_write1
== 2 &&
658 (((env
->efer
& MSR_EFER_NXE
) &&
659 (env
->cr
[4] & CR4_PAE_MASK
)) ||
660 (env
->cr
[4] & CR4_SMEP_MASK
)))
661 error_code
|= PG_ERROR_I_D_MASK
;
662 if (env
->intercept_exceptions
& (1 << EXCP0E_PAGE
)) {
663 /* cr2 is not modified in case of exceptions */
665 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
670 env
->error_code
= error_code
;
671 cs
->exception_index
= EXCP0E_PAGE
;
676 bool x86_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
677 MMUAccessType access_type
, int mmu_idx
,
678 bool probe
, uintptr_t retaddr
)
680 X86CPU
*cpu
= X86_CPU(cs
);
681 CPUX86State
*env
= &cpu
->env
;
683 #ifdef CONFIG_USER_ONLY
684 /* user mode only emulation */
686 env
->error_code
= (access_type
== MMU_DATA_STORE
) << PG_ERROR_W_BIT
;
687 env
->error_code
|= PG_ERROR_U_MASK
;
688 cs
->exception_index
= EXCP0E_PAGE
;
689 env
->exception_is_int
= 0;
690 env
->exception_next_eip
= -1;
691 cpu_loop_exit_restore(cs
, retaddr
);
693 env
->retaddr
= retaddr
;
694 if (handle_mmu_fault(cs
, addr
, size
, access_type
, mmu_idx
)) {
695 /* FIXME: On error in get_hphys we have already jumped out. */
697 raise_exception_err_ra(env
, cs
->exception_index
,
698 env
->error_code
, retaddr
);