2 * x86 exception helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
24 #include "sysemu/runstate.h"
25 #include "exec/helper-proto.h"
26 #include "helper-tcg.h"
28 void helper_raise_interrupt(CPUX86State
*env
, int intno
, int next_eip_addend
)
30 raise_interrupt(env
, intno
, 1, 0, next_eip_addend
);
33 void helper_raise_exception(CPUX86State
*env
, int exception_index
)
35 raise_exception(env
, exception_index
);
39 * Check nested exceptions and change to double or triple fault if
40 * needed. It should only be called, if this is not an interrupt.
41 * Returns the new exception number.
43 static int check_exception(CPUX86State
*env
, int intno
, int *error_code
,
46 int first_contributory
= env
->old_exception
== 0 ||
47 (env
->old_exception
>= 10 &&
48 env
->old_exception
<= 13);
49 int second_contributory
= intno
== 0 ||
50 (intno
>= 10 && intno
<= 13);
52 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
53 env
->old_exception
, intno
);
55 #if !defined(CONFIG_USER_ONLY)
56 if (env
->old_exception
== EXCP08_DBLE
) {
57 if (env
->hflags
& HF_GUEST_MASK
) {
58 cpu_vmexit(env
, SVM_EXIT_SHUTDOWN
, 0, retaddr
); /* does not return */
61 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
63 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
68 if ((first_contributory
&& second_contributory
)
69 || (env
->old_exception
== EXCP0E_PAGE
&&
70 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
75 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
76 (intno
== EXCP08_DBLE
)) {
77 env
->old_exception
= intno
;
84 * Signal an interruption. It is executed in the main CPU loop.
85 * is_int is TRUE if coming from the int instruction. next_eip is the
86 * env->eip value AFTER the interrupt instruction. It is only relevant if
89 static void QEMU_NORETURN
raise_interrupt2(CPUX86State
*env
, int intno
,
90 int is_int
, int error_code
,
94 CPUState
*cs
= env_cpu(env
);
97 cpu_svm_check_intercept_param(env
, SVM_EXIT_EXCP_BASE
+ intno
,
99 intno
= check_exception(env
, intno
, &error_code
, retaddr
);
101 cpu_svm_check_intercept_param(env
, SVM_EXIT_SWINT
, 0, retaddr
);
104 cs
->exception_index
= intno
;
105 env
->error_code
= error_code
;
106 env
->exception_is_int
= is_int
;
107 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
108 cpu_loop_exit_restore(cs
, retaddr
);
111 /* shortcuts to generate exceptions */
113 void QEMU_NORETURN
raise_interrupt(CPUX86State
*env
, int intno
, int is_int
,
114 int error_code
, int next_eip_addend
)
116 raise_interrupt2(env
, intno
, is_int
, error_code
, next_eip_addend
, 0);
119 void raise_exception_err(CPUX86State
*env
, int exception_index
,
122 raise_interrupt2(env
, exception_index
, 0, error_code
, 0, 0);
125 void raise_exception_err_ra(CPUX86State
*env
, int exception_index
,
126 int error_code
, uintptr_t retaddr
)
128 raise_interrupt2(env
, exception_index
, 0, error_code
, 0, retaddr
);
131 void raise_exception(CPUX86State
*env
, int exception_index
)
133 raise_interrupt2(env
, exception_index
, 0, 0, 0, 0);
136 void raise_exception_ra(CPUX86State
*env
, int exception_index
, uintptr_t retaddr
)
138 raise_interrupt2(env
, exception_index
, 0, 0, 0, retaddr
);
141 #if !defined(CONFIG_USER_ONLY)
142 static hwaddr
get_hphys(CPUState
*cs
, hwaddr gphys
, MMUAccessType access_type
,
145 X86CPU
*cpu
= X86_CPU(cs
);
146 CPUX86State
*env
= &cpu
->env
;
147 uint64_t rsvd_mask
= PG_ADDRESS_MASK
& ~MAKE_64BIT_MASK(0, cpu
->phys_bits
);
149 uint64_t exit_info_1
= 0;
150 target_ulong pde_addr
, pte_addr
;
151 uint32_t page_offset
;
154 if (likely(!(env
->hflags2
& HF2_NPT_MASK
))) {
158 if (!(env
->nested_pg_mode
& SVM_NPT_NXE
)) {
159 rsvd_mask
|= PG_NX_MASK
;
162 if (env
->nested_pg_mode
& SVM_NPT_PAE
) {
164 target_ulong pdpe_addr
;
167 if (env
->nested_pg_mode
& SVM_NPT_LMA
) {
169 uint64_t pml4e_addr
, pml4e
;
171 pml5e
= env
->nested_cr3
;
172 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
174 pml4e_addr
= (pml5e
& PG_ADDRESS_MASK
) +
175 (((gphys
>> 39) & 0x1ff) << 3);
176 pml4e
= x86_ldq_phys(cs
, pml4e_addr
);
177 if (!(pml4e
& PG_PRESENT_MASK
)) {
180 if (pml4e
& (rsvd_mask
| PG_PSE_MASK
)) {
183 if (!(pml4e
& PG_ACCESSED_MASK
)) {
184 pml4e
|= PG_ACCESSED_MASK
;
185 x86_stl_phys_notdirty(cs
, pml4e_addr
, pml4e
);
187 ptep
&= pml4e
^ PG_NX_MASK
;
188 pdpe_addr
= (pml4e
& PG_ADDRESS_MASK
) +
189 (((gphys
>> 30) & 0x1ff) << 3);
190 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
191 if (!(pdpe
& PG_PRESENT_MASK
)) {
194 if (pdpe
& rsvd_mask
) {
197 ptep
&= pdpe
^ PG_NX_MASK
;
198 if (!(pdpe
& PG_ACCESSED_MASK
)) {
199 pdpe
|= PG_ACCESSED_MASK
;
200 x86_stl_phys_notdirty(cs
, pdpe_addr
, pdpe
);
202 if (pdpe
& PG_PSE_MASK
) {
204 page_size
= 1024 * 1024 * 1024;
205 pte_addr
= pdpe_addr
;
207 goto do_check_protect
;
212 pdpe_addr
= (env
->nested_cr3
& ~0x1f) + ((gphys
>> 27) & 0x18);
213 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
214 if (!(pdpe
& PG_PRESENT_MASK
)) {
217 rsvd_mask
|= PG_HI_USER_MASK
;
218 if (pdpe
& (rsvd_mask
| PG_NX_MASK
)) {
221 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
224 pde_addr
= (pdpe
& PG_ADDRESS_MASK
) + (((gphys
>> 21) & 0x1ff) << 3);
225 pde
= x86_ldq_phys(cs
, pde_addr
);
226 if (!(pde
& PG_PRESENT_MASK
)) {
229 if (pde
& rsvd_mask
) {
232 ptep
&= pde
^ PG_NX_MASK
;
233 if (pde
& PG_PSE_MASK
) {
235 page_size
= 2048 * 1024;
238 goto do_check_protect
;
241 if (!(pde
& PG_ACCESSED_MASK
)) {
242 pde
|= PG_ACCESSED_MASK
;
243 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
245 pte_addr
= (pde
& PG_ADDRESS_MASK
) + (((gphys
>> 12) & 0x1ff) << 3);
246 pte
= x86_ldq_phys(cs
, pte_addr
);
247 if (!(pte
& PG_PRESENT_MASK
)) {
250 if (pte
& rsvd_mask
) {
253 /* combine pde and pte nx, user and rw protections */
254 ptep
&= pte
^ PG_NX_MASK
;
259 /* page directory entry */
260 pde_addr
= (env
->nested_cr3
& ~0xfff) + ((gphys
>> 20) & 0xffc);
261 pde
= x86_ldl_phys(cs
, pde_addr
);
262 if (!(pde
& PG_PRESENT_MASK
)) {
265 ptep
= pde
| PG_NX_MASK
;
267 /* if host cr4 PSE bit is set, then we use a 4MB page */
268 if ((pde
& PG_PSE_MASK
) && (env
->nested_pg_mode
& SVM_NPT_PSE
)) {
269 page_size
= 4096 * 1024;
272 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
273 * Leave bits 20-13 in place for setting accessed/dirty bits below.
275 pte
= pde
| ((pde
& 0x1fe000LL
) << (32 - 13));
276 rsvd_mask
= 0x200000;
277 goto do_check_protect_pse36
;
280 if (!(pde
& PG_ACCESSED_MASK
)) {
281 pde
|= PG_ACCESSED_MASK
;
282 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
285 /* page directory entry */
286 pte_addr
= (pde
& ~0xfff) + ((gphys
>> 10) & 0xffc);
287 pte
= x86_ldl_phys(cs
, pte_addr
);
288 if (!(pte
& PG_PRESENT_MASK
)) {
291 /* combine pde and pte user and rw protections */
292 ptep
&= pte
| PG_NX_MASK
;
298 rsvd_mask
|= (page_size
- 1) & PG_ADDRESS_MASK
& ~PG_PSE_PAT_MASK
;
299 do_check_protect_pse36
:
300 if (pte
& rsvd_mask
) {
305 if (!(ptep
& PG_USER_MASK
)) {
306 goto do_fault_protect
;
308 if (ptep
& PG_NX_MASK
) {
309 if (access_type
== MMU_INST_FETCH
) {
310 goto do_fault_protect
;
314 if (!(ptep
& PG_RW_MASK
)) {
315 if (access_type
== MMU_DATA_STORE
) {
316 goto do_fault_protect
;
318 *prot
&= ~PAGE_WRITE
;
321 pte
&= PG_ADDRESS_MASK
& ~(page_size
- 1);
322 page_offset
= gphys
& (page_size
- 1);
323 return pte
+ page_offset
;
326 exit_info_1
|= SVM_NPTEXIT_RSVD
;
328 exit_info_1
|= SVM_NPTEXIT_P
;
330 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
332 exit_info_1
|= SVM_NPTEXIT_US
;
333 if (access_type
== MMU_DATA_STORE
) {
334 exit_info_1
|= SVM_NPTEXIT_RW
;
335 } else if (access_type
== MMU_INST_FETCH
) {
336 exit_info_1
|= SVM_NPTEXIT_ID
;
339 exit_info_1
|= SVM_NPTEXIT_GPA
;
340 } else { /* page table access */
341 exit_info_1
|= SVM_NPTEXIT_GPT
;
343 cpu_vmexit(env
, SVM_EXIT_NPF
, exit_info_1
, env
->retaddr
);
347 * -1 = cannot handle fault
348 * 0 = nothing more to do
349 * 1 = generate PF fault
351 static int handle_mmu_fault(CPUState
*cs
, vaddr addr
, int size
,
352 int is_write1
, int mmu_idx
)
354 X86CPU
*cpu
= X86_CPU(cs
);
355 CPUX86State
*env
= &cpu
->env
;
358 target_ulong pde_addr
, pte_addr
;
360 int is_dirty
, prot
, page_size
, is_write
, is_user
;
362 uint64_t rsvd_mask
= PG_ADDRESS_MASK
& ~MAKE_64BIT_MASK(0, cpu
->phys_bits
);
363 uint32_t page_offset
;
367 is_user
= mmu_idx
== MMU_USER_IDX
;
368 #if defined(DEBUG_MMU)
369 printf("MMU fault: addr=%" VADDR_PRIx
" w=%d u=%d eip=" TARGET_FMT_lx
"\n",
370 addr
, is_write1
, is_user
, env
->eip
);
372 is_write
= is_write1
& 1;
374 a20_mask
= x86_get_a20_mask(env
);
375 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
378 if (!(env
->hflags
& HF_LMA_MASK
)) {
379 /* Without long mode we can only address 32bits in real mode */
383 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
388 if (!(env
->efer
& MSR_EFER_NXE
)) {
389 rsvd_mask
|= PG_NX_MASK
;
392 if (env
->cr
[4] & CR4_PAE_MASK
) {
394 target_ulong pdpe_addr
;
397 if (env
->hflags
& HF_LMA_MASK
) {
398 bool la57
= env
->cr
[4] & CR4_LA57_MASK
;
399 uint64_t pml5e_addr
, pml5e
;
400 uint64_t pml4e_addr
, pml4e
;
403 /* test virtual address sign extension */
404 sext
= la57
? (int64_t)addr
>> 56 : (int64_t)addr
>> 47;
405 if (sext
!= 0 && sext
!= -1) {
407 cs
->exception_index
= EXCP0D_GPF
;
412 pml5e_addr
= ((env
->cr
[3] & ~0xfff) +
413 (((addr
>> 48) & 0x1ff) << 3)) & a20_mask
;
414 pml5e_addr
= get_hphys(cs
, pml5e_addr
, MMU_DATA_STORE
, NULL
);
415 pml5e
= x86_ldq_phys(cs
, pml5e_addr
);
416 if (!(pml5e
& PG_PRESENT_MASK
)) {
419 if (pml5e
& (rsvd_mask
| PG_PSE_MASK
)) {
422 if (!(pml5e
& PG_ACCESSED_MASK
)) {
423 pml5e
|= PG_ACCESSED_MASK
;
424 x86_stl_phys_notdirty(cs
, pml5e_addr
, pml5e
);
426 ptep
= pml5e
^ PG_NX_MASK
;
429 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
432 pml4e_addr
= ((pml5e
& PG_ADDRESS_MASK
) +
433 (((addr
>> 39) & 0x1ff) << 3)) & a20_mask
;
434 pml4e_addr
= get_hphys(cs
, pml4e_addr
, MMU_DATA_STORE
, false);
435 pml4e
= x86_ldq_phys(cs
, pml4e_addr
);
436 if (!(pml4e
& PG_PRESENT_MASK
)) {
439 if (pml4e
& (rsvd_mask
| PG_PSE_MASK
)) {
442 if (!(pml4e
& PG_ACCESSED_MASK
)) {
443 pml4e
|= PG_ACCESSED_MASK
;
444 x86_stl_phys_notdirty(cs
, pml4e_addr
, pml4e
);
446 ptep
&= pml4e
^ PG_NX_MASK
;
447 pdpe_addr
= ((pml4e
& PG_ADDRESS_MASK
) + (((addr
>> 30) & 0x1ff) << 3)) &
449 pdpe_addr
= get_hphys(cs
, pdpe_addr
, MMU_DATA_STORE
, NULL
);
450 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
451 if (!(pdpe
& PG_PRESENT_MASK
)) {
454 if (pdpe
& rsvd_mask
) {
457 ptep
&= pdpe
^ PG_NX_MASK
;
458 if (!(pdpe
& PG_ACCESSED_MASK
)) {
459 pdpe
|= PG_ACCESSED_MASK
;
460 x86_stl_phys_notdirty(cs
, pdpe_addr
, pdpe
);
462 if (pdpe
& PG_PSE_MASK
) {
464 page_size
= 1024 * 1024 * 1024;
465 pte_addr
= pdpe_addr
;
467 goto do_check_protect
;
472 /* XXX: load them when cr3 is loaded ? */
473 pdpe_addr
= ((env
->cr
[3] & ~0x1f) + ((addr
>> 27) & 0x18)) &
475 pdpe_addr
= get_hphys(cs
, pdpe_addr
, MMU_DATA_STORE
, false);
476 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
477 if (!(pdpe
& PG_PRESENT_MASK
)) {
480 rsvd_mask
|= PG_HI_USER_MASK
;
481 if (pdpe
& (rsvd_mask
| PG_NX_MASK
)) {
484 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
487 pde_addr
= ((pdpe
& PG_ADDRESS_MASK
) + (((addr
>> 21) & 0x1ff) << 3)) &
489 pde_addr
= get_hphys(cs
, pde_addr
, MMU_DATA_STORE
, NULL
);
490 pde
= x86_ldq_phys(cs
, pde_addr
);
491 if (!(pde
& PG_PRESENT_MASK
)) {
494 if (pde
& rsvd_mask
) {
497 ptep
&= pde
^ PG_NX_MASK
;
498 if (pde
& PG_PSE_MASK
) {
500 page_size
= 2048 * 1024;
503 goto do_check_protect
;
506 if (!(pde
& PG_ACCESSED_MASK
)) {
507 pde
|= PG_ACCESSED_MASK
;
508 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
510 pte_addr
= ((pde
& PG_ADDRESS_MASK
) + (((addr
>> 12) & 0x1ff) << 3)) &
512 pte_addr
= get_hphys(cs
, pte_addr
, MMU_DATA_STORE
, NULL
);
513 pte
= x86_ldq_phys(cs
, pte_addr
);
514 if (!(pte
& PG_PRESENT_MASK
)) {
517 if (pte
& rsvd_mask
) {
520 /* combine pde and pte nx, user and rw protections */
521 ptep
&= pte
^ PG_NX_MASK
;
526 /* page directory entry */
527 pde_addr
= ((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & 0xffc)) &
529 pde_addr
= get_hphys(cs
, pde_addr
, MMU_DATA_STORE
, NULL
);
530 pde
= x86_ldl_phys(cs
, pde_addr
);
531 if (!(pde
& PG_PRESENT_MASK
)) {
534 ptep
= pde
| PG_NX_MASK
;
536 /* if PSE bit is set, then we use a 4MB page */
537 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
538 page_size
= 4096 * 1024;
541 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
542 * Leave bits 20-13 in place for setting accessed/dirty bits below.
544 pte
= pde
| ((pde
& 0x1fe000LL
) << (32 - 13));
545 rsvd_mask
= 0x200000;
546 goto do_check_protect_pse36
;
549 if (!(pde
& PG_ACCESSED_MASK
)) {
550 pde
|= PG_ACCESSED_MASK
;
551 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
554 /* page directory entry */
555 pte_addr
= ((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) &
557 pte_addr
= get_hphys(cs
, pte_addr
, MMU_DATA_STORE
, NULL
);
558 pte
= x86_ldl_phys(cs
, pte_addr
);
559 if (!(pte
& PG_PRESENT_MASK
)) {
562 /* combine pde and pte user and rw protections */
563 ptep
&= pte
| PG_NX_MASK
;
569 rsvd_mask
|= (page_size
- 1) & PG_ADDRESS_MASK
& ~PG_PSE_PAT_MASK
;
570 do_check_protect_pse36
:
571 if (pte
& rsvd_mask
) {
576 /* can the page can be put in the TLB? prot will tell us */
577 if (is_user
&& !(ptep
& PG_USER_MASK
)) {
578 goto do_fault_protect
;
582 if (mmu_idx
!= MMU_KSMAP_IDX
|| !(ptep
& PG_USER_MASK
)) {
584 if ((ptep
& PG_RW_MASK
) || (!is_user
&& !(env
->cr
[0] & CR0_WP_MASK
))) {
588 if (!(ptep
& PG_NX_MASK
) &&
589 (mmu_idx
== MMU_USER_IDX
||
590 !((env
->cr
[4] & CR4_SMEP_MASK
) && (ptep
& PG_USER_MASK
)))) {
594 if (!(env
->hflags
& HF_LMA_MASK
)) {
596 } else if (ptep
& PG_USER_MASK
) {
597 pkr
= env
->cr
[4] & CR4_PKE_MASK
? env
->pkru
: 0;
599 pkr
= env
->cr
[4] & CR4_PKS_MASK
? env
->pkrs
: 0;
602 uint32_t pk
= (pte
& PG_PKRU_MASK
) >> PG_PKRU_BIT
;
603 uint32_t pkr_ad
= (pkr
>> pk
* 2) & 1;
604 uint32_t pkr_wd
= (pkr
>> pk
* 2) & 2;
605 uint32_t pkr_prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
608 pkr_prot
&= ~(PAGE_READ
| PAGE_WRITE
);
609 } else if (pkr_wd
&& (is_user
|| env
->cr
[0] & CR0_WP_MASK
)) {
610 pkr_prot
&= ~PAGE_WRITE
;
614 if ((pkr_prot
& (1 << is_write1
)) == 0) {
615 assert(is_write1
!= 2);
616 error_code
|= PG_ERROR_PK_MASK
;
617 goto do_fault_protect
;
621 if ((prot
& (1 << is_write1
)) == 0) {
622 goto do_fault_protect
;
626 is_dirty
= is_write
&& !(pte
& PG_DIRTY_MASK
);
627 if (!(pte
& PG_ACCESSED_MASK
) || is_dirty
) {
628 pte
|= PG_ACCESSED_MASK
;
630 pte
|= PG_DIRTY_MASK
;
632 x86_stl_phys_notdirty(cs
, pte_addr
, pte
);
635 if (!(pte
& PG_DIRTY_MASK
)) {
636 /* only set write access if already dirty... otherwise wait
643 pte
= pte
& a20_mask
;
645 /* align to page_size */
646 pte
&= PG_ADDRESS_MASK
& ~(page_size
- 1);
647 page_offset
= addr
& (page_size
- 1);
648 paddr
= get_hphys(cs
, pte
+ page_offset
, is_write1
, &prot
);
650 /* Even if 4MB pages, we map only one 4KB page in the cache to
651 avoid filling it too fast */
652 vaddr
= addr
& TARGET_PAGE_MASK
;
653 paddr
&= TARGET_PAGE_MASK
;
655 assert(prot
& (1 << is_write1
));
656 tlb_set_page_with_attrs(cs
, vaddr
, paddr
, cpu_get_mem_attrs(env
),
657 prot
, mmu_idx
, page_size
);
660 error_code
|= PG_ERROR_RSVD_MASK
;
662 error_code
|= PG_ERROR_P_MASK
;
664 error_code
|= (is_write
<< PG_ERROR_W_BIT
);
666 error_code
|= PG_ERROR_U_MASK
;
667 if (is_write1
== 2 &&
668 (((env
->efer
& MSR_EFER_NXE
) &&
669 (env
->cr
[4] & CR4_PAE_MASK
)) ||
670 (env
->cr
[4] & CR4_SMEP_MASK
)))
671 error_code
|= PG_ERROR_I_D_MASK
;
672 if (env
->intercept_exceptions
& (1 << EXCP0E_PAGE
)) {
673 /* cr2 is not modified in case of exceptions */
675 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
680 env
->error_code
= error_code
;
681 cs
->exception_index
= EXCP0E_PAGE
;
686 bool x86_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
687 MMUAccessType access_type
, int mmu_idx
,
688 bool probe
, uintptr_t retaddr
)
690 X86CPU
*cpu
= X86_CPU(cs
);
691 CPUX86State
*env
= &cpu
->env
;
693 #ifdef CONFIG_USER_ONLY
694 /* user mode only emulation */
696 env
->error_code
= (access_type
== MMU_DATA_STORE
) << PG_ERROR_W_BIT
;
697 env
->error_code
|= PG_ERROR_U_MASK
;
698 cs
->exception_index
= EXCP0E_PAGE
;
699 env
->exception_is_int
= 0;
700 env
->exception_next_eip
= -1;
701 cpu_loop_exit_restore(cs
, retaddr
);
703 env
->retaddr
= retaddr
;
704 if (handle_mmu_fault(cs
, addr
, size
, access_type
, mmu_idx
)) {
705 /* FIXME: On error in get_hphys we have already jumped out. */
707 raise_exception_err_ra(env
, cs
->exception_index
,
708 env
->error_code
, retaddr
);