i386: split seg_helper into user-only and sysemu parts
[qemu/kevin.git] / target / i386 / tcg / sysemu / seg_helper.c
blobe0d7b32b82c80c2ba621fc7ddc1a90e768a67590
1 /*
2 * x86 segmentation related helpers: (sysemu-only code)
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/helper-proto.h"
24 #include "exec/cpu_ldst.h"
25 #include "tcg/helper-tcg.h"
27 #ifdef TARGET_X86_64
28 void helper_syscall(CPUX86State *env, int next_eip_addend)
30 int selector;
32 if (!(env->efer & MSR_EFER_SCE)) {
33 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
35 selector = (env->star >> 32) & 0xffff;
36 if (env->hflags & HF_LMA_MASK) {
37 int code64;
39 env->regs[R_ECX] = env->eip + next_eip_addend;
40 env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
42 code64 = env->hflags & HF_CS64_MASK;
44 env->eflags &= ~(env->fmask | RF_MASK);
45 cpu_load_eflags(env, env->eflags, 0);
46 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
47 0, 0xffffffff,
48 DESC_G_MASK | DESC_P_MASK |
49 DESC_S_MASK |
50 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
51 DESC_L_MASK);
52 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
53 0, 0xffffffff,
54 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
55 DESC_S_MASK |
56 DESC_W_MASK | DESC_A_MASK);
57 if (code64) {
58 env->eip = env->lstar;
59 } else {
60 env->eip = env->cstar;
62 } else {
63 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
65 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
66 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
67 0, 0xffffffff,
68 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
69 DESC_S_MASK |
70 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
71 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
72 0, 0xffffffff,
73 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
74 DESC_S_MASK |
75 DESC_W_MASK | DESC_A_MASK);
76 env->eip = (uint32_t)env->star;
79 #endif /* TARGET_X86_64 */
81 void handle_even_inj(CPUX86State *env, int intno, int is_int,
82 int error_code, int is_hw, int rm)
84 CPUState *cs = env_cpu(env);
85 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
86 control.event_inj));
88 if (!(event_inj & SVM_EVTINJ_VALID)) {
89 int type;
91 if (is_int) {
92 type = SVM_EVTINJ_TYPE_SOFT;
93 } else {
94 type = SVM_EVTINJ_TYPE_EXEPT;
96 event_inj = intno | type | SVM_EVTINJ_VALID;
97 if (!rm && exception_has_error_code(intno)) {
98 event_inj |= SVM_EVTINJ_VALID_ERR;
99 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
100 control.event_inj_err),
101 error_code);
103 x86_stl_phys(cs,
104 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
105 event_inj);
109 void x86_cpu_do_interrupt(CPUState *cs)
111 X86CPU *cpu = X86_CPU(cs);
112 CPUX86State *env = &cpu->env;
114 if (cs->exception_index == EXCP_VMEXIT) {
115 assert(env->old_exception == -1);
116 do_vmexit(env);
117 } else {
118 do_interrupt_all(cpu, cs->exception_index,
119 env->exception_is_int,
120 env->error_code,
121 env->exception_next_eip, 0);
122 /* successfully delivered */
123 env->old_exception = -1;