2 * x86 exception helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
24 #include "sysemu/sysemu.h"
25 #include "exec/helper-proto.h"
27 void helper_raise_interrupt(CPUX86State
*env
, int intno
, int next_eip_addend
)
29 raise_interrupt(env
, intno
, 1, 0, next_eip_addend
);
32 void helper_raise_exception(CPUX86State
*env
, int exception_index
)
34 raise_exception(env
, exception_index
);
38 * Check nested exceptions and change to double or triple fault if
39 * needed. It should only be called, if this is not an interrupt.
40 * Returns the new exception number.
42 static int check_exception(CPUX86State
*env
, int intno
, int *error_code
,
45 int first_contributory
= env
->old_exception
== 0 ||
46 (env
->old_exception
>= 10 &&
47 env
->old_exception
<= 13);
48 int second_contributory
= intno
== 0 ||
49 (intno
>= 10 && intno
<= 13);
51 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
52 env
->old_exception
, intno
);
54 #if !defined(CONFIG_USER_ONLY)
55 if (env
->old_exception
== EXCP08_DBLE
) {
56 if (env
->hflags
& HF_GUEST_MASK
) {
57 cpu_vmexit(env
, SVM_EXIT_SHUTDOWN
, 0, retaddr
); /* does not return */
60 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
62 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
67 if ((first_contributory
&& second_contributory
)
68 || (env
->old_exception
== EXCP0E_PAGE
&&
69 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
74 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
75 (intno
== EXCP08_DBLE
)) {
76 env
->old_exception
= intno
;
83 * Signal an interruption. It is executed in the main CPU loop.
84 * is_int is TRUE if coming from the int instruction. next_eip is the
85 * env->eip value AFTER the interrupt instruction. It is only relevant if
88 static void QEMU_NORETURN
raise_interrupt2(CPUX86State
*env
, int intno
,
89 int is_int
, int error_code
,
93 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
96 cpu_svm_check_intercept_param(env
, SVM_EXIT_EXCP_BASE
+ intno
,
98 intno
= check_exception(env
, intno
, &error_code
, retaddr
);
100 cpu_svm_check_intercept_param(env
, SVM_EXIT_SWINT
, 0, retaddr
);
103 cs
->exception_index
= intno
;
104 env
->error_code
= error_code
;
105 env
->exception_is_int
= is_int
;
106 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
107 cpu_loop_exit_restore(cs
, retaddr
);
110 /* shortcuts to generate exceptions */
112 void QEMU_NORETURN
raise_interrupt(CPUX86State
*env
, int intno
, int is_int
,
113 int error_code
, int next_eip_addend
)
115 raise_interrupt2(env
, intno
, is_int
, error_code
, next_eip_addend
, 0);
118 void raise_exception_err(CPUX86State
*env
, int exception_index
,
121 raise_interrupt2(env
, exception_index
, 0, error_code
, 0, 0);
124 void raise_exception_err_ra(CPUX86State
*env
, int exception_index
,
125 int error_code
, uintptr_t retaddr
)
127 raise_interrupt2(env
, exception_index
, 0, error_code
, 0, retaddr
);
130 void raise_exception(CPUX86State
*env
, int exception_index
)
132 raise_interrupt2(env
, exception_index
, 0, 0, 0, 0);
135 void raise_exception_ra(CPUX86State
*env
, int exception_index
, uintptr_t retaddr
)
137 raise_interrupt2(env
, exception_index
, 0, 0, 0, retaddr
);
140 #if defined(CONFIG_USER_ONLY)
141 int x86_cpu_handle_mmu_fault(CPUState
*cs
, vaddr addr
, int size
,
142 int is_write
, int mmu_idx
)
144 X86CPU
*cpu
= X86_CPU(cs
);
145 CPUX86State
*env
= &cpu
->env
;
147 /* user mode only emulation */
150 env
->error_code
= (is_write
<< PG_ERROR_W_BIT
);
151 env
->error_code
|= PG_ERROR_U_MASK
;
152 cs
->exception_index
= EXCP0E_PAGE
;
153 env
->exception_is_int
= 0;
154 env
->exception_next_eip
= -1;
160 static hwaddr
get_hphys(CPUState
*cs
, hwaddr gphys
, MMUAccessType access_type
,
163 CPUX86State
*env
= &X86_CPU(cs
)->env
;
164 uint64_t rsvd_mask
= PG_HI_RSVD_MASK
;
166 uint64_t exit_info_1
= 0;
167 target_ulong pde_addr
, pte_addr
;
168 uint32_t page_offset
;
171 if (likely(!(env
->hflags2
& HF2_NPT_MASK
))) {
175 if (!(env
->nested_pg_mode
& SVM_NPT_NXE
)) {
176 rsvd_mask
|= PG_NX_MASK
;
179 if (env
->nested_pg_mode
& SVM_NPT_PAE
) {
181 target_ulong pdpe_addr
;
184 if (env
->nested_pg_mode
& SVM_NPT_LMA
) {
186 uint64_t pml4e_addr
, pml4e
;
188 pml5e
= env
->nested_cr3
;
189 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
191 pml4e_addr
= (pml5e
& PG_ADDRESS_MASK
) +
192 (((gphys
>> 39) & 0x1ff) << 3);
193 pml4e
= x86_ldq_phys(cs
, pml4e_addr
);
194 if (!(pml4e
& PG_PRESENT_MASK
)) {
197 if (pml4e
& (rsvd_mask
| PG_PSE_MASK
)) {
200 if (!(pml4e
& PG_ACCESSED_MASK
)) {
201 pml4e
|= PG_ACCESSED_MASK
;
202 x86_stl_phys_notdirty(cs
, pml4e_addr
, pml4e
);
204 ptep
&= pml4e
^ PG_NX_MASK
;
205 pdpe_addr
= (pml4e
& PG_ADDRESS_MASK
) +
206 (((gphys
>> 30) & 0x1ff) << 3);
207 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
208 if (!(pdpe
& PG_PRESENT_MASK
)) {
211 if (pdpe
& rsvd_mask
) {
214 ptep
&= pdpe
^ PG_NX_MASK
;
215 if (!(pdpe
& PG_ACCESSED_MASK
)) {
216 pdpe
|= PG_ACCESSED_MASK
;
217 x86_stl_phys_notdirty(cs
, pdpe_addr
, pdpe
);
219 if (pdpe
& PG_PSE_MASK
) {
221 page_size
= 1024 * 1024 * 1024;
222 pte_addr
= pdpe_addr
;
224 goto do_check_protect
;
229 pdpe_addr
= (env
->nested_cr3
& ~0x1f) + ((gphys
>> 27) & 0x18);
230 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
231 if (!(pdpe
& PG_PRESENT_MASK
)) {
234 rsvd_mask
|= PG_HI_USER_MASK
;
235 if (pdpe
& (rsvd_mask
| PG_NX_MASK
)) {
238 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
241 pde_addr
= (pdpe
& PG_ADDRESS_MASK
) + (((gphys
>> 21) & 0x1ff) << 3);
242 pde
= x86_ldq_phys(cs
, pde_addr
);
243 if (!(pde
& PG_PRESENT_MASK
)) {
246 if (pde
& rsvd_mask
) {
249 ptep
&= pde
^ PG_NX_MASK
;
250 if (pde
& PG_PSE_MASK
) {
252 page_size
= 2048 * 1024;
255 goto do_check_protect
;
258 if (!(pde
& PG_ACCESSED_MASK
)) {
259 pde
|= PG_ACCESSED_MASK
;
260 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
262 pte_addr
= (pde
& PG_ADDRESS_MASK
) + (((gphys
>> 12) & 0x1ff) << 3);
263 pte
= x86_ldq_phys(cs
, pte_addr
);
264 if (!(pte
& PG_PRESENT_MASK
)) {
267 if (pte
& rsvd_mask
) {
270 /* combine pde and pte nx, user and rw protections */
271 ptep
&= pte
^ PG_NX_MASK
;
276 /* page directory entry */
277 pde_addr
= (env
->nested_cr3
& ~0xfff) + ((gphys
>> 20) & 0xffc);
278 pde
= x86_ldl_phys(cs
, pde_addr
);
279 if (!(pde
& PG_PRESENT_MASK
)) {
282 ptep
= pde
| PG_NX_MASK
;
284 /* if PSE bit is set, then we use a 4MB page */
285 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
286 page_size
= 4096 * 1024;
289 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
290 * Leave bits 20-13 in place for setting accessed/dirty bits below.
292 pte
= pde
| ((pde
& 0x1fe000LL
) << (32 - 13));
293 rsvd_mask
= 0x200000;
294 goto do_check_protect_pse36
;
297 if (!(pde
& PG_ACCESSED_MASK
)) {
298 pde
|= PG_ACCESSED_MASK
;
299 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
302 /* page directory entry */
303 pte_addr
= (pde
& ~0xfff) + ((gphys
>> 10) & 0xffc);
304 pte
= x86_ldl_phys(cs
, pte_addr
);
305 if (!(pte
& PG_PRESENT_MASK
)) {
308 /* combine pde and pte user and rw protections */
309 ptep
&= pte
| PG_NX_MASK
;
315 rsvd_mask
|= (page_size
- 1) & PG_ADDRESS_MASK
& ~PG_PSE_PAT_MASK
;
316 do_check_protect_pse36
:
317 if (pte
& rsvd_mask
) {
322 if (!(ptep
& PG_USER_MASK
)) {
323 goto do_fault_protect
;
325 if (ptep
& PG_NX_MASK
) {
326 if (access_type
== MMU_INST_FETCH
) {
327 goto do_fault_protect
;
331 if (!(ptep
& PG_RW_MASK
)) {
332 if (access_type
== MMU_DATA_STORE
) {
333 goto do_fault_protect
;
335 *prot
&= ~PAGE_WRITE
;
338 pte
&= PG_ADDRESS_MASK
& ~(page_size
- 1);
339 page_offset
= gphys
& (page_size
- 1);
340 return pte
+ page_offset
;
343 exit_info_1
|= SVM_NPTEXIT_RSVD
;
345 exit_info_1
|= SVM_NPTEXIT_P
;
347 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
349 exit_info_1
|= SVM_NPTEXIT_US
;
350 if (access_type
== MMU_DATA_STORE
) {
351 exit_info_1
|= SVM_NPTEXIT_RW
;
352 } else if (access_type
== MMU_INST_FETCH
) {
353 exit_info_1
|= SVM_NPTEXIT_ID
;
356 exit_info_1
|= SVM_NPTEXIT_GPA
;
357 } else { /* page table access */
358 exit_info_1
|= SVM_NPTEXIT_GPT
;
360 cpu_vmexit(env
, SVM_EXIT_NPF
, exit_info_1
, env
->retaddr
);
364 * -1 = cannot handle fault
365 * 0 = nothing more to do
366 * 1 = generate PF fault
368 int x86_cpu_handle_mmu_fault(CPUState
*cs
, vaddr addr
, int size
,
369 int is_write1
, int mmu_idx
)
371 X86CPU
*cpu
= X86_CPU(cs
);
372 CPUX86State
*env
= &cpu
->env
;
375 target_ulong pde_addr
, pte_addr
;
377 int is_dirty
, prot
, page_size
, is_write
, is_user
;
379 uint64_t rsvd_mask
= PG_HI_RSVD_MASK
;
380 uint32_t page_offset
;
383 is_user
= mmu_idx
== MMU_USER_IDX
;
384 #if defined(DEBUG_MMU)
385 printf("MMU fault: addr=%" VADDR_PRIx
" w=%d u=%d eip=" TARGET_FMT_lx
"\n",
386 addr
, is_write1
, is_user
, env
->eip
);
388 is_write
= is_write1
& 1;
390 a20_mask
= x86_get_a20_mask(env
);
391 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
394 if (!(env
->hflags
& HF_LMA_MASK
)) {
395 /* Without long mode we can only address 32bits in real mode */
399 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
404 if (!(env
->efer
& MSR_EFER_NXE
)) {
405 rsvd_mask
|= PG_NX_MASK
;
408 if (env
->cr
[4] & CR4_PAE_MASK
) {
410 target_ulong pdpe_addr
;
413 if (env
->hflags
& HF_LMA_MASK
) {
414 bool la57
= env
->cr
[4] & CR4_LA57_MASK
;
415 uint64_t pml5e_addr
, pml5e
;
416 uint64_t pml4e_addr
, pml4e
;
419 /* test virtual address sign extension */
420 sext
= la57
? (int64_t)addr
>> 56 : (int64_t)addr
>> 47;
421 if (sext
!= 0 && sext
!= -1) {
423 cs
->exception_index
= EXCP0D_GPF
;
428 pml5e_addr
= ((env
->cr
[3] & ~0xfff) +
429 (((addr
>> 48) & 0x1ff) << 3)) & a20_mask
;
430 pml5e_addr
= get_hphys(cs
, pml5e_addr
, MMU_DATA_STORE
, NULL
);
431 pml5e
= x86_ldq_phys(cs
, pml5e_addr
);
432 if (!(pml5e
& PG_PRESENT_MASK
)) {
435 if (pml5e
& (rsvd_mask
| PG_PSE_MASK
)) {
438 if (!(pml5e
& PG_ACCESSED_MASK
)) {
439 pml5e
|= PG_ACCESSED_MASK
;
440 x86_stl_phys_notdirty(cs
, pml5e_addr
, pml5e
);
442 ptep
= pml5e
^ PG_NX_MASK
;
445 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
448 pml4e_addr
= ((pml5e
& PG_ADDRESS_MASK
) +
449 (((addr
>> 39) & 0x1ff) << 3)) & a20_mask
;
450 pml4e_addr
= get_hphys(cs
, pml4e_addr
, MMU_DATA_STORE
, false);
451 pml4e
= x86_ldq_phys(cs
, pml4e_addr
);
452 if (!(pml4e
& PG_PRESENT_MASK
)) {
455 if (pml4e
& (rsvd_mask
| PG_PSE_MASK
)) {
458 if (!(pml4e
& PG_ACCESSED_MASK
)) {
459 pml4e
|= PG_ACCESSED_MASK
;
460 x86_stl_phys_notdirty(cs
, pml4e_addr
, pml4e
);
462 ptep
&= pml4e
^ PG_NX_MASK
;
463 pdpe_addr
= ((pml4e
& PG_ADDRESS_MASK
) + (((addr
>> 30) & 0x1ff) << 3)) &
465 pdpe_addr
= get_hphys(cs
, pdpe_addr
, MMU_DATA_STORE
, NULL
);
466 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
467 if (!(pdpe
& PG_PRESENT_MASK
)) {
470 if (pdpe
& rsvd_mask
) {
473 ptep
&= pdpe
^ PG_NX_MASK
;
474 if (!(pdpe
& PG_ACCESSED_MASK
)) {
475 pdpe
|= PG_ACCESSED_MASK
;
476 x86_stl_phys_notdirty(cs
, pdpe_addr
, pdpe
);
478 if (pdpe
& PG_PSE_MASK
) {
480 page_size
= 1024 * 1024 * 1024;
481 pte_addr
= pdpe_addr
;
483 goto do_check_protect
;
488 /* XXX: load them when cr3 is loaded ? */
489 pdpe_addr
= ((env
->cr
[3] & ~0x1f) + ((addr
>> 27) & 0x18)) &
491 pdpe_addr
= get_hphys(cs
, pdpe_addr
, MMU_DATA_STORE
, false);
492 pdpe
= x86_ldq_phys(cs
, pdpe_addr
);
493 if (!(pdpe
& PG_PRESENT_MASK
)) {
496 rsvd_mask
|= PG_HI_USER_MASK
;
497 if (pdpe
& (rsvd_mask
| PG_NX_MASK
)) {
500 ptep
= PG_NX_MASK
| PG_USER_MASK
| PG_RW_MASK
;
503 pde_addr
= ((pdpe
& PG_ADDRESS_MASK
) + (((addr
>> 21) & 0x1ff) << 3)) &
505 pde_addr
= get_hphys(cs
, pde_addr
, MMU_DATA_STORE
, NULL
);
506 pde
= x86_ldq_phys(cs
, pde_addr
);
507 if (!(pde
& PG_PRESENT_MASK
)) {
510 if (pde
& rsvd_mask
) {
513 ptep
&= pde
^ PG_NX_MASK
;
514 if (pde
& PG_PSE_MASK
) {
516 page_size
= 2048 * 1024;
519 goto do_check_protect
;
522 if (!(pde
& PG_ACCESSED_MASK
)) {
523 pde
|= PG_ACCESSED_MASK
;
524 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
526 pte_addr
= ((pde
& PG_ADDRESS_MASK
) + (((addr
>> 12) & 0x1ff) << 3)) &
528 pte_addr
= get_hphys(cs
, pte_addr
, MMU_DATA_STORE
, NULL
);
529 pte
= x86_ldq_phys(cs
, pte_addr
);
530 if (!(pte
& PG_PRESENT_MASK
)) {
533 if (pte
& rsvd_mask
) {
536 /* combine pde and pte nx, user and rw protections */
537 ptep
&= pte
^ PG_NX_MASK
;
542 /* page directory entry */
543 pde_addr
= ((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & 0xffc)) &
545 pde_addr
= get_hphys(cs
, pde_addr
, MMU_DATA_STORE
, NULL
);
546 pde
= x86_ldl_phys(cs
, pde_addr
);
547 if (!(pde
& PG_PRESENT_MASK
)) {
550 ptep
= pde
| PG_NX_MASK
;
552 /* if PSE bit is set, then we use a 4MB page */
553 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
554 page_size
= 4096 * 1024;
557 /* Bits 20-13 provide bits 39-32 of the address, bit 21 is reserved.
558 * Leave bits 20-13 in place for setting accessed/dirty bits below.
560 pte
= pde
| ((pde
& 0x1fe000LL
) << (32 - 13));
561 rsvd_mask
= 0x200000;
562 goto do_check_protect_pse36
;
565 if (!(pde
& PG_ACCESSED_MASK
)) {
566 pde
|= PG_ACCESSED_MASK
;
567 x86_stl_phys_notdirty(cs
, pde_addr
, pde
);
570 /* page directory entry */
571 pte_addr
= ((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) &
573 pte_addr
= get_hphys(cs
, pte_addr
, MMU_DATA_STORE
, NULL
);
574 pte
= x86_ldl_phys(cs
, pte_addr
);
575 if (!(pte
& PG_PRESENT_MASK
)) {
578 /* combine pde and pte user and rw protections */
579 ptep
&= pte
| PG_NX_MASK
;
585 rsvd_mask
|= (page_size
- 1) & PG_ADDRESS_MASK
& ~PG_PSE_PAT_MASK
;
586 do_check_protect_pse36
:
587 if (pte
& rsvd_mask
) {
592 /* can the page can be put in the TLB? prot will tell us */
593 if (is_user
&& !(ptep
& PG_USER_MASK
)) {
594 goto do_fault_protect
;
598 if (mmu_idx
!= MMU_KSMAP_IDX
|| !(ptep
& PG_USER_MASK
)) {
600 if ((ptep
& PG_RW_MASK
) || (!is_user
&& !(env
->cr
[0] & CR0_WP_MASK
))) {
604 if (!(ptep
& PG_NX_MASK
) &&
605 (mmu_idx
== MMU_USER_IDX
||
606 !((env
->cr
[4] & CR4_SMEP_MASK
) && (ptep
& PG_USER_MASK
)))) {
609 if ((env
->cr
[4] & CR4_PKE_MASK
) && (env
->hflags
& HF_LMA_MASK
) &&
610 (ptep
& PG_USER_MASK
) && env
->pkru
) {
611 uint32_t pk
= (pte
& PG_PKRU_MASK
) >> PG_PKRU_BIT
;
612 uint32_t pkru_ad
= (env
->pkru
>> pk
* 2) & 1;
613 uint32_t pkru_wd
= (env
->pkru
>> pk
* 2) & 2;
614 uint32_t pkru_prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
617 pkru_prot
&= ~(PAGE_READ
| PAGE_WRITE
);
618 } else if (pkru_wd
&& (is_user
|| env
->cr
[0] & CR0_WP_MASK
)) {
619 pkru_prot
&= ~PAGE_WRITE
;
623 if ((pkru_prot
& (1 << is_write1
)) == 0) {
624 assert(is_write1
!= 2);
625 error_code
|= PG_ERROR_PK_MASK
;
626 goto do_fault_protect
;
630 if ((prot
& (1 << is_write1
)) == 0) {
631 goto do_fault_protect
;
635 is_dirty
= is_write
&& !(pte
& PG_DIRTY_MASK
);
636 if (!(pte
& PG_ACCESSED_MASK
) || is_dirty
) {
637 pte
|= PG_ACCESSED_MASK
;
639 pte
|= PG_DIRTY_MASK
;
641 x86_stl_phys_notdirty(cs
, pte_addr
, pte
);
644 if (!(pte
& PG_DIRTY_MASK
)) {
645 /* only set write access if already dirty... otherwise wait
652 pte
= pte
& a20_mask
;
654 /* align to page_size */
655 pte
&= PG_ADDRESS_MASK
& ~(page_size
- 1);
656 page_offset
= addr
& (page_size
- 1);
657 paddr
= get_hphys(cs
, pte
+ page_offset
, is_write1
, &prot
);
659 /* Even if 4MB pages, we map only one 4KB page in the cache to
660 avoid filling it too fast */
661 vaddr
= addr
& TARGET_PAGE_MASK
;
662 paddr
&= TARGET_PAGE_MASK
;
664 assert(prot
& (1 << is_write1
));
665 tlb_set_page_with_attrs(cs
, vaddr
, paddr
, cpu_get_mem_attrs(env
),
666 prot
, mmu_idx
, page_size
);
669 error_code
|= PG_ERROR_RSVD_MASK
;
671 error_code
|= PG_ERROR_P_MASK
;
673 error_code
|= (is_write
<< PG_ERROR_W_BIT
);
675 error_code
|= PG_ERROR_U_MASK
;
676 if (is_write1
== 2 &&
677 (((env
->efer
& MSR_EFER_NXE
) &&
678 (env
->cr
[4] & CR4_PAE_MASK
)) ||
679 (env
->cr
[4] & CR4_SMEP_MASK
)))
680 error_code
|= PG_ERROR_I_D_MASK
;
681 if (env
->intercept_exceptions
& (1 << EXCP0E_PAGE
)) {
682 /* cr2 is not modified in case of exceptions */
684 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
689 env
->error_code
= error_code
;
690 cs
->exception_index
= EXCP0E_PAGE
;