target/i386: Implement skinit in translate.c
[qemu/ar7.git] / target / i386 / tcg / sysemu / svm_helper.c
blob79c73d67afba4ca123eb54ee1db00aa77ed1d5af
1 /*
2 * x86 SVM helpers (sysemu only)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "tcg/helper-tcg.h"
27 /* Secure Virtual Machine helpers */
29 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
30 const SegmentCache *sc)
32 CPUState *cs = env_cpu(env);
34 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
35 sc->selector);
36 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
37 sc->base);
38 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
39 sc->limit);
40 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
41 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
44 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
45 SegmentCache *sc)
47 CPUState *cs = env_cpu(env);
48 unsigned int flags;
50 sc->selector = x86_lduw_phys(cs,
51 addr + offsetof(struct vmcb_seg, selector));
52 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
53 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
54 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
55 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
58 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
59 int seg_reg)
61 SegmentCache sc1, *sc = &sc1;
63 svm_load_seg(env, addr, sc);
64 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
65 sc->base, sc->limit, sc->flags);
68 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
70 CPUState *cs = env_cpu(env);
71 target_ulong addr;
72 uint64_t nested_ctl;
73 uint32_t event_inj;
74 uint32_t int_ctl;
76 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
78 if (aflag == 2) {
79 addr = env->regs[R_EAX];
80 } else {
81 addr = (uint32_t)env->regs[R_EAX];
84 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
86 env->vm_vmcb = addr;
88 /* save the current CPU state in the hsave page */
89 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
90 env->gdt.base);
91 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
92 env->gdt.limit);
94 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
95 env->idt.base);
96 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
97 env->idt.limit);
99 x86_stq_phys(cs,
100 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
101 x86_stq_phys(cs,
102 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
103 x86_stq_phys(cs,
104 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
105 x86_stq_phys(cs,
106 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
107 x86_stq_phys(cs,
108 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
109 x86_stq_phys(cs,
110 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
112 x86_stq_phys(cs,
113 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
114 x86_stq_phys(cs,
115 env->vm_hsave + offsetof(struct vmcb, save.rflags),
116 cpu_compute_eflags(env));
118 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
119 &env->segs[R_ES]);
120 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
121 &env->segs[R_CS]);
122 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
123 &env->segs[R_SS]);
124 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
125 &env->segs[R_DS]);
127 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
128 env->eip + next_eip_addend);
129 x86_stq_phys(cs,
130 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
131 x86_stq_phys(cs,
132 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
134 /* load the interception bitmaps so we do not need to access the
135 vmcb in svm mode */
136 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
137 control.intercept));
138 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
139 offsetof(struct vmcb,
140 control.intercept_cr_read));
141 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
142 offsetof(struct vmcb,
143 control.intercept_cr_write));
144 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
145 offsetof(struct vmcb,
146 control.intercept_dr_read));
147 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
148 offsetof(struct vmcb,
149 control.intercept_dr_write));
150 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
151 offsetof(struct vmcb,
152 control.intercept_exceptions
155 nested_ctl = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
156 control.nested_ctl));
158 env->nested_pg_mode = 0;
160 if (nested_ctl & SVM_NPT_ENABLED) {
161 env->nested_cr3 = x86_ldq_phys(cs,
162 env->vm_vmcb + offsetof(struct vmcb,
163 control.nested_cr3));
164 env->hflags2 |= HF2_NPT_MASK;
166 env->nested_pg_mode = get_pg_mode(env) & PG_MODE_SVM_MASK;
169 /* enable intercepts */
170 env->hflags |= HF_GUEST_MASK;
172 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
173 offsetof(struct vmcb, control.tsc_offset));
175 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
176 save.gdtr.base));
177 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
178 save.gdtr.limit));
180 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
181 save.idtr.base));
182 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
183 save.idtr.limit));
185 /* clear exit_info_2 so we behave like the real hardware */
186 x86_stq_phys(cs,
187 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
189 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
190 env->vm_vmcb + offsetof(struct vmcb,
191 save.cr0)));
192 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
193 env->vm_vmcb + offsetof(struct vmcb,
194 save.cr4)));
195 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
196 env->vm_vmcb + offsetof(struct vmcb,
197 save.cr3)));
198 env->cr[2] = x86_ldq_phys(cs,
199 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
200 int_ctl = x86_ldl_phys(cs,
201 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
202 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
203 if (int_ctl & V_INTR_MASKING_MASK) {
204 env->v_tpr = int_ctl & V_TPR_MASK;
205 env->hflags2 |= HF2_VINTR_MASK;
206 if (env->eflags & IF_MASK) {
207 env->hflags2 |= HF2_HIF_MASK;
211 cpu_load_efer(env,
212 x86_ldq_phys(cs,
213 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
214 env->eflags = 0;
215 cpu_load_eflags(env, x86_ldq_phys(cs,
216 env->vm_vmcb + offsetof(struct vmcb,
217 save.rflags)),
218 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
220 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
221 R_ES);
222 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
223 R_CS);
224 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
225 R_SS);
226 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
227 R_DS);
229 env->eip = x86_ldq_phys(cs,
230 env->vm_vmcb + offsetof(struct vmcb, save.rip));
232 env->regs[R_ESP] = x86_ldq_phys(cs,
233 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
234 env->regs[R_EAX] = x86_ldq_phys(cs,
235 env->vm_vmcb + offsetof(struct vmcb, save.rax));
236 env->dr[7] = x86_ldq_phys(cs,
237 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
238 env->dr[6] = x86_ldq_phys(cs,
239 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
241 /* FIXME: guest state consistency checks */
243 switch (x86_ldub_phys(cs,
244 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
245 case TLB_CONTROL_DO_NOTHING:
246 break;
247 case TLB_CONTROL_FLUSH_ALL_ASID:
248 /* FIXME: this is not 100% correct but should work for now */
249 tlb_flush(cs);
250 break;
253 env->hflags2 |= HF2_GIF_MASK;
255 if (int_ctl & V_IRQ_MASK) {
256 CPUState *cs = env_cpu(env);
258 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
261 /* maybe we need to inject an event */
262 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
263 control.event_inj));
264 if (event_inj & SVM_EVTINJ_VALID) {
265 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
266 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
267 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
268 offsetof(struct vmcb,
269 control.event_inj_err));
271 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
272 /* FIXME: need to implement valid_err */
273 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
274 case SVM_EVTINJ_TYPE_INTR:
275 cs->exception_index = vector;
276 env->error_code = event_inj_err;
277 env->exception_is_int = 0;
278 env->exception_next_eip = -1;
279 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
280 /* XXX: is it always correct? */
281 do_interrupt_x86_hardirq(env, vector, 1);
282 break;
283 case SVM_EVTINJ_TYPE_NMI:
284 cs->exception_index = EXCP02_NMI;
285 env->error_code = event_inj_err;
286 env->exception_is_int = 0;
287 env->exception_next_eip = env->eip;
288 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
289 cpu_loop_exit(cs);
290 break;
291 case SVM_EVTINJ_TYPE_EXEPT:
292 cs->exception_index = vector;
293 env->error_code = event_inj_err;
294 env->exception_is_int = 0;
295 env->exception_next_eip = -1;
296 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
297 cpu_loop_exit(cs);
298 break;
299 case SVM_EVTINJ_TYPE_SOFT:
300 cs->exception_index = vector;
301 env->error_code = event_inj_err;
302 env->exception_is_int = 1;
303 env->exception_next_eip = env->eip;
304 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
305 cpu_loop_exit(cs);
306 break;
308 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
309 env->error_code);
313 void helper_vmmcall(CPUX86State *env)
315 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
316 raise_exception(env, EXCP06_ILLOP);
319 void helper_vmload(CPUX86State *env, int aflag)
321 CPUState *cs = env_cpu(env);
322 target_ulong addr;
324 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
326 if (aflag == 2) {
327 addr = env->regs[R_EAX];
328 } else {
329 addr = (uint32_t)env->regs[R_EAX];
332 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
333 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
334 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
335 save.fs.base)),
336 env->segs[R_FS].base);
338 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
339 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
340 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
341 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
343 #ifdef TARGET_X86_64
344 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
345 save.kernel_gs_base));
346 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
347 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
348 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
349 #endif
350 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
351 env->sysenter_cs = x86_ldq_phys(cs,
352 addr + offsetof(struct vmcb, save.sysenter_cs));
353 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
354 save.sysenter_esp));
355 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
356 save.sysenter_eip));
359 void helper_vmsave(CPUX86State *env, int aflag)
361 CPUState *cs = env_cpu(env);
362 target_ulong addr;
364 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
366 if (aflag == 2) {
367 addr = env->regs[R_EAX];
368 } else {
369 addr = (uint32_t)env->regs[R_EAX];
372 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
373 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
374 addr, x86_ldq_phys(cs,
375 addr + offsetof(struct vmcb, save.fs.base)),
376 env->segs[R_FS].base);
378 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
379 &env->segs[R_FS]);
380 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
381 &env->segs[R_GS]);
382 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
383 &env->tr);
384 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
385 &env->ldt);
387 #ifdef TARGET_X86_64
388 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
389 env->kernelgsbase);
390 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
391 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
392 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
393 #endif
394 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
395 x86_stq_phys(cs,
396 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
397 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
398 env->sysenter_esp);
399 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
400 env->sysenter_eip);
403 void helper_stgi(CPUX86State *env)
405 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
406 env->hflags2 |= HF2_GIF_MASK;
409 void helper_clgi(CPUX86State *env)
411 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
412 env->hflags2 &= ~HF2_GIF_MASK;
415 void helper_invlpga(CPUX86State *env, int aflag)
417 X86CPU *cpu = env_archcpu(env);
418 target_ulong addr;
420 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
422 if (aflag == 2) {
423 addr = env->regs[R_EAX];
424 } else {
425 addr = (uint32_t)env->regs[R_EAX];
428 /* XXX: could use the ASID to see if it is needed to do the
429 flush */
430 tlb_flush_page(CPU(cpu), addr);
433 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
434 uint64_t param, uintptr_t retaddr)
436 CPUState *cs = env_cpu(env);
438 if (likely(!(env->hflags & HF_GUEST_MASK))) {
439 return;
441 switch (type) {
442 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
443 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
444 cpu_vmexit(env, type, param, retaddr);
446 break;
447 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
448 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
449 cpu_vmexit(env, type, param, retaddr);
451 break;
452 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
453 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
454 cpu_vmexit(env, type, param, retaddr);
456 break;
457 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
458 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
459 cpu_vmexit(env, type, param, retaddr);
461 break;
462 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
463 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
464 cpu_vmexit(env, type, param, retaddr);
466 break;
467 case SVM_EXIT_MSR:
468 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
469 /* FIXME: this should be read in at vmrun (faster this way?) */
470 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
471 offsetof(struct vmcb,
472 control.msrpm_base_pa));
473 uint32_t t0, t1;
475 switch ((uint32_t)env->regs[R_ECX]) {
476 case 0 ... 0x1fff:
477 t0 = (env->regs[R_ECX] * 2) % 8;
478 t1 = (env->regs[R_ECX] * 2) / 8;
479 break;
480 case 0xc0000000 ... 0xc0001fff:
481 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
482 t1 = (t0 / 8);
483 t0 %= 8;
484 break;
485 case 0xc0010000 ... 0xc0011fff:
486 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
487 t1 = (t0 / 8);
488 t0 %= 8;
489 break;
490 default:
491 cpu_vmexit(env, type, param, retaddr);
492 t0 = 0;
493 t1 = 0;
494 break;
496 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
497 cpu_vmexit(env, type, param, retaddr);
500 break;
501 default:
502 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
503 cpu_vmexit(env, type, param, retaddr);
505 break;
509 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
510 uint64_t param)
512 cpu_svm_check_intercept_param(env, type, param, GETPC());
515 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
516 uint32_t next_eip_addend)
518 CPUState *cs = env_cpu(env);
520 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
521 /* FIXME: this should be read in at vmrun (faster this way?) */
522 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
523 offsetof(struct vmcb, control.iopm_base_pa));
524 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
526 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
527 /* next env->eip */
528 x86_stq_phys(cs,
529 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
530 env->eip + next_eip_addend);
531 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
536 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
537 uintptr_t retaddr)
539 CPUState *cs = env_cpu(env);
541 cpu_restore_state(cs, retaddr, true);
543 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
544 PRIx64 ", " TARGET_FMT_lx ")!\n",
545 exit_code, exit_info_1,
546 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
547 control.exit_info_2)),
548 env->eip);
550 cs->exception_index = EXCP_VMEXIT;
551 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
552 exit_code);
554 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
555 control.exit_info_1), exit_info_1),
557 /* remove any pending exception */
558 env->old_exception = -1;
559 cpu_loop_exit(cs);
562 void do_vmexit(CPUX86State *env)
564 CPUState *cs = env_cpu(env);
565 uint32_t int_ctl;
567 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
568 x86_stl_phys(cs,
569 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
570 SVM_INTERRUPT_SHADOW_MASK);
571 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
572 } else {
573 x86_stl_phys(cs,
574 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
576 env->hflags2 &= ~HF2_NPT_MASK;
578 /* Save the VM state in the vmcb */
579 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
580 &env->segs[R_ES]);
581 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
582 &env->segs[R_CS]);
583 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
584 &env->segs[R_SS]);
585 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
586 &env->segs[R_DS]);
588 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
589 env->gdt.base);
590 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
591 env->gdt.limit);
593 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
594 env->idt.base);
595 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
596 env->idt.limit);
598 x86_stq_phys(cs,
599 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
600 x86_stq_phys(cs,
601 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
602 x86_stq_phys(cs,
603 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
604 x86_stq_phys(cs,
605 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
606 x86_stq_phys(cs,
607 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
609 int_ctl = x86_ldl_phys(cs,
610 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
611 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
612 int_ctl |= env->v_tpr & V_TPR_MASK;
613 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
614 int_ctl |= V_IRQ_MASK;
616 x86_stl_phys(cs,
617 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
619 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
620 cpu_compute_eflags(env));
621 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
622 env->eip);
623 x86_stq_phys(cs,
624 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
625 x86_stq_phys(cs,
626 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
627 x86_stq_phys(cs,
628 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
629 x86_stq_phys(cs,
630 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
631 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
632 env->hflags & HF_CPL_MASK);
634 /* Reload the host state from vm_hsave */
635 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
636 env->hflags &= ~HF_GUEST_MASK;
637 env->intercept = 0;
638 env->intercept_exceptions = 0;
639 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
640 env->tsc_offset = 0;
642 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
643 save.gdtr.base));
644 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
645 save.gdtr.limit));
647 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
648 save.idtr.base));
649 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
650 save.idtr.limit));
652 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
653 env->vm_hsave + offsetof(struct vmcb,
654 save.cr0)) |
655 CR0_PE_MASK);
656 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
657 env->vm_hsave + offsetof(struct vmcb,
658 save.cr4)));
659 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
660 env->vm_hsave + offsetof(struct vmcb,
661 save.cr3)));
662 /* we need to set the efer after the crs so the hidden flags get
663 set properly */
664 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
665 save.efer)));
666 env->eflags = 0;
667 cpu_load_eflags(env, x86_ldq_phys(cs,
668 env->vm_hsave + offsetof(struct vmcb,
669 save.rflags)),
670 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
671 VM_MASK));
673 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
674 R_ES);
675 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
676 R_CS);
677 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
678 R_SS);
679 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
680 R_DS);
682 env->eip = x86_ldq_phys(cs,
683 env->vm_hsave + offsetof(struct vmcb, save.rip));
684 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
685 offsetof(struct vmcb, save.rsp));
686 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
687 offsetof(struct vmcb, save.rax));
689 env->dr[6] = x86_ldq_phys(cs,
690 env->vm_hsave + offsetof(struct vmcb, save.dr6));
691 env->dr[7] = x86_ldq_phys(cs,
692 env->vm_hsave + offsetof(struct vmcb, save.dr7));
694 /* other setups */
695 x86_stl_phys(cs,
696 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
697 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
698 control.event_inj)));
699 x86_stl_phys(cs,
700 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
701 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
702 control.event_inj_err)));
703 x86_stl_phys(cs,
704 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
706 env->hflags2 &= ~HF2_GIF_MASK;
707 /* FIXME: Resets the current ASID register to zero (host ASID). */
709 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
711 /* Clears the TSC_OFFSET inside the processor. */
713 /* If the host is in PAE mode, the processor reloads the host's PDPEs
714 from the page table indicated the host's CR3. If the PDPEs contain
715 illegal state, the processor causes a shutdown. */
717 /* Disables all breakpoints in the host DR7 register. */
719 /* Checks the reloaded host state for consistency. */
721 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
722 host's code segment or non-canonical (in the case of long mode), a
723 #GP fault is delivered inside the host. */