2 * x86 exception helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
24 #include "sysemu/runstate.h"
25 #include "exec/helper-proto.h"
26 #include "helper-tcg.h"
28 G_NORETURN
void helper_raise_interrupt(CPUX86State
*env
, int intno
,
31 raise_interrupt(env
, intno
, 1, 0, next_eip_addend
);
34 G_NORETURN
void helper_raise_exception(CPUX86State
*env
, int exception_index
)
36 raise_exception(env
, exception_index
);
40 * Check nested exceptions and change to double or triple fault if
41 * needed. It should only be called, if this is not an interrupt.
42 * Returns the new exception number.
44 static int check_exception(CPUX86State
*env
, int intno
, int *error_code
,
47 int first_contributory
= env
->old_exception
== 0 ||
48 (env
->old_exception
>= 10 &&
49 env
->old_exception
<= 13);
50 int second_contributory
= intno
== 0 ||
51 (intno
>= 10 && intno
<= 13);
53 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
54 env
->old_exception
, intno
);
56 #if !defined(CONFIG_USER_ONLY)
57 if (env
->old_exception
== EXCP08_DBLE
) {
58 if (env
->hflags
& HF_GUEST_MASK
) {
59 cpu_vmexit(env
, SVM_EXIT_SHUTDOWN
, 0, retaddr
); /* does not return */
62 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
64 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
69 if ((first_contributory
&& second_contributory
)
70 || (env
->old_exception
== EXCP0E_PAGE
&&
71 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
76 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
77 (intno
== EXCP08_DBLE
)) {
78 env
->old_exception
= intno
;
85 * Signal an interruption. It is executed in the main CPU loop.
86 * is_int is TRUE if coming from the int instruction. next_eip is the
87 * env->eip value AFTER the interrupt instruction. It is only relevant if
91 void raise_interrupt2(CPUX86State
*env
, int intno
,
92 int is_int
, int error_code
,
96 CPUState
*cs
= env_cpu(env
);
99 cpu_svm_check_intercept_param(env
, SVM_EXIT_EXCP_BASE
+ intno
,
100 error_code
, retaddr
);
101 intno
= check_exception(env
, intno
, &error_code
, retaddr
);
103 cpu_svm_check_intercept_param(env
, SVM_EXIT_SWINT
, 0, retaddr
);
106 cs
->exception_index
= intno
;
107 env
->error_code
= error_code
;
108 env
->exception_is_int
= is_int
;
109 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
110 cpu_loop_exit_restore(cs
, retaddr
);
113 /* shortcuts to generate exceptions */
115 G_NORETURN
void raise_interrupt(CPUX86State
*env
, int intno
, int is_int
,
116 int error_code
, int next_eip_addend
)
118 raise_interrupt2(env
, intno
, is_int
, error_code
, next_eip_addend
, 0);
121 G_NORETURN
void raise_exception_err(CPUX86State
*env
, int exception_index
,
124 raise_interrupt2(env
, exception_index
, 0, error_code
, 0, 0);
127 G_NORETURN
void raise_exception_err_ra(CPUX86State
*env
, int exception_index
,
128 int error_code
, uintptr_t retaddr
)
130 raise_interrupt2(env
, exception_index
, 0, error_code
, 0, retaddr
);
133 G_NORETURN
void raise_exception(CPUX86State
*env
, int exception_index
)
135 raise_interrupt2(env
, exception_index
, 0, 0, 0, 0);
138 G_NORETURN
void raise_exception_ra(CPUX86State
*env
, int exception_index
,
141 raise_interrupt2(env
, exception_index
, 0, 0, 0, retaddr
);
144 G_NORETURN
void handle_unaligned_access(CPUX86State
*env
, vaddr vaddr
,
145 MMUAccessType access_type
,
149 * Unaligned accesses are currently only triggered by SSE/AVX
150 * instructions that impose alignment requirements on memory
151 * operands. These instructions raise #GP(0) upon accessing an
154 raise_exception_ra(env
, EXCP0D_GPF
, retaddr
);