Merge remote-tracking branch 'remotes/jnsnow/tags/ide-pull-request' into staging
[qemu.git] / target / i386 / svm_helper.c
blob303106981cb1a97f2e69ce73faea337ab4c77449
1 /*
2 * x86 SVM helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 /* Secure Virtual Machine helpers */
28 #if defined(CONFIG_USER_ONLY)
30 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
34 void helper_vmmcall(CPUX86State *env)
38 void helper_vmload(CPUX86State *env, int aflag)
42 void helper_vmsave(CPUX86State *env, int aflag)
46 void helper_stgi(CPUX86State *env)
50 void helper_clgi(CPUX86State *env)
54 void helper_skinit(CPUX86State *env)
58 void helper_invlpga(CPUX86State *env, int aflag)
62 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
63 uintptr_t retaddr)
67 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
68 uint64_t param)
72 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 uint64_t param, uintptr_t retaddr)
77 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
78 uint32_t next_eip_addend)
81 #else
83 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
84 const SegmentCache *sc)
86 CPUState *cs = CPU(x86_env_get_cpu(env));
88 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
89 sc->selector);
90 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
91 sc->base);
92 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
93 sc->limit);
94 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
95 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
98 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
99 SegmentCache *sc)
101 CPUState *cs = CPU(x86_env_get_cpu(env));
102 unsigned int flags;
104 sc->selector = x86_lduw_phys(cs,
105 addr + offsetof(struct vmcb_seg, selector));
106 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
107 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
108 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
109 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
112 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
113 int seg_reg)
115 SegmentCache sc1, *sc = &sc1;
117 svm_load_seg(env, addr, sc);
118 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
119 sc->base, sc->limit, sc->flags);
122 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
124 CPUState *cs = CPU(x86_env_get_cpu(env));
125 target_ulong addr;
126 uint32_t event_inj;
127 uint32_t int_ctl;
129 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
131 if (aflag == 2) {
132 addr = env->regs[R_EAX];
133 } else {
134 addr = (uint32_t)env->regs[R_EAX];
137 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
139 env->vm_vmcb = addr;
141 /* save the current CPU state in the hsave page */
142 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
143 env->gdt.base);
144 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
145 env->gdt.limit);
147 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
148 env->idt.base);
149 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
150 env->idt.limit);
152 x86_stq_phys(cs,
153 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
154 x86_stq_phys(cs,
155 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
156 x86_stq_phys(cs,
157 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
158 x86_stq_phys(cs,
159 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
160 x86_stq_phys(cs,
161 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
162 x86_stq_phys(cs,
163 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
165 x86_stq_phys(cs,
166 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
167 x86_stq_phys(cs,
168 env->vm_hsave + offsetof(struct vmcb, save.rflags),
169 cpu_compute_eflags(env));
171 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
172 &env->segs[R_ES]);
173 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
174 &env->segs[R_CS]);
175 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
176 &env->segs[R_SS]);
177 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
178 &env->segs[R_DS]);
180 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
181 env->eip + next_eip_addend);
182 x86_stq_phys(cs,
183 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
184 x86_stq_phys(cs,
185 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
187 /* load the interception bitmaps so we do not need to access the
188 vmcb in svm mode */
189 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
190 control.intercept));
191 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
192 offsetof(struct vmcb,
193 control.intercept_cr_read));
194 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
195 offsetof(struct vmcb,
196 control.intercept_cr_write));
197 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
198 offsetof(struct vmcb,
199 control.intercept_dr_read));
200 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
201 offsetof(struct vmcb,
202 control.intercept_dr_write));
203 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
204 offsetof(struct vmcb,
205 control.intercept_exceptions
208 /* enable intercepts */
209 env->hflags |= HF_SVMI_MASK;
211 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
212 offsetof(struct vmcb, control.tsc_offset));
214 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
215 save.gdtr.base));
216 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
217 save.gdtr.limit));
219 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
220 save.idtr.base));
221 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
222 save.idtr.limit));
224 /* clear exit_info_2 so we behave like the real hardware */
225 x86_stq_phys(cs,
226 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
228 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
229 env->vm_vmcb + offsetof(struct vmcb,
230 save.cr0)));
231 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
232 env->vm_vmcb + offsetof(struct vmcb,
233 save.cr4)));
234 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
235 env->vm_vmcb + offsetof(struct vmcb,
236 save.cr3)));
237 env->cr[2] = x86_ldq_phys(cs,
238 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
239 int_ctl = x86_ldl_phys(cs,
240 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
241 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
242 if (int_ctl & V_INTR_MASKING_MASK) {
243 env->v_tpr = int_ctl & V_TPR_MASK;
244 env->hflags2 |= HF2_VINTR_MASK;
245 if (env->eflags & IF_MASK) {
246 env->hflags2 |= HF2_HIF_MASK;
250 cpu_load_efer(env,
251 x86_ldq_phys(cs,
252 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
253 env->eflags = 0;
254 cpu_load_eflags(env, x86_ldq_phys(cs,
255 env->vm_vmcb + offsetof(struct vmcb,
256 save.rflags)),
257 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
259 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
260 R_ES);
261 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
262 R_CS);
263 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
264 R_SS);
265 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
266 R_DS);
268 env->eip = x86_ldq_phys(cs,
269 env->vm_vmcb + offsetof(struct vmcb, save.rip));
271 env->regs[R_ESP] = x86_ldq_phys(cs,
272 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
273 env->regs[R_EAX] = x86_ldq_phys(cs,
274 env->vm_vmcb + offsetof(struct vmcb, save.rax));
275 env->dr[7] = x86_ldq_phys(cs,
276 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
277 env->dr[6] = x86_ldq_phys(cs,
278 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
280 /* FIXME: guest state consistency checks */
282 switch (x86_ldub_phys(cs,
283 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
284 case TLB_CONTROL_DO_NOTHING:
285 break;
286 case TLB_CONTROL_FLUSH_ALL_ASID:
287 /* FIXME: this is not 100% correct but should work for now */
288 tlb_flush(cs);
289 break;
292 env->hflags2 |= HF2_GIF_MASK;
294 if (int_ctl & V_IRQ_MASK) {
295 CPUState *cs = CPU(x86_env_get_cpu(env));
297 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
300 /* maybe we need to inject an event */
301 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
302 control.event_inj));
303 if (event_inj & SVM_EVTINJ_VALID) {
304 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
305 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
306 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
307 offsetof(struct vmcb,
308 control.event_inj_err));
310 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
311 /* FIXME: need to implement valid_err */
312 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
313 case SVM_EVTINJ_TYPE_INTR:
314 cs->exception_index = vector;
315 env->error_code = event_inj_err;
316 env->exception_is_int = 0;
317 env->exception_next_eip = -1;
318 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
319 /* XXX: is it always correct? */
320 do_interrupt_x86_hardirq(env, vector, 1);
321 break;
322 case SVM_EVTINJ_TYPE_NMI:
323 cs->exception_index = EXCP02_NMI;
324 env->error_code = event_inj_err;
325 env->exception_is_int = 0;
326 env->exception_next_eip = env->eip;
327 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
328 cpu_loop_exit(cs);
329 break;
330 case SVM_EVTINJ_TYPE_EXEPT:
331 cs->exception_index = vector;
332 env->error_code = event_inj_err;
333 env->exception_is_int = 0;
334 env->exception_next_eip = -1;
335 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
336 cpu_loop_exit(cs);
337 break;
338 case SVM_EVTINJ_TYPE_SOFT:
339 cs->exception_index = vector;
340 env->error_code = event_inj_err;
341 env->exception_is_int = 1;
342 env->exception_next_eip = env->eip;
343 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
344 cpu_loop_exit(cs);
345 break;
347 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
348 env->error_code);
352 void helper_vmmcall(CPUX86State *env)
354 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
355 raise_exception(env, EXCP06_ILLOP);
358 void helper_vmload(CPUX86State *env, int aflag)
360 CPUState *cs = CPU(x86_env_get_cpu(env));
361 target_ulong addr;
363 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
365 if (aflag == 2) {
366 addr = env->regs[R_EAX];
367 } else {
368 addr = (uint32_t)env->regs[R_EAX];
371 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
372 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
373 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
374 save.fs.base)),
375 env->segs[R_FS].base);
377 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
378 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
379 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
380 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
382 #ifdef TARGET_X86_64
383 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
384 save.kernel_gs_base));
385 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
386 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
387 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
388 #endif
389 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
390 env->sysenter_cs = x86_ldq_phys(cs,
391 addr + offsetof(struct vmcb, save.sysenter_cs));
392 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
393 save.sysenter_esp));
394 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
395 save.sysenter_eip));
398 void helper_vmsave(CPUX86State *env, int aflag)
400 CPUState *cs = CPU(x86_env_get_cpu(env));
401 target_ulong addr;
403 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
405 if (aflag == 2) {
406 addr = env->regs[R_EAX];
407 } else {
408 addr = (uint32_t)env->regs[R_EAX];
411 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
412 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
413 addr, x86_ldq_phys(cs,
414 addr + offsetof(struct vmcb, save.fs.base)),
415 env->segs[R_FS].base);
417 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
418 &env->segs[R_FS]);
419 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
420 &env->segs[R_GS]);
421 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
422 &env->tr);
423 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
424 &env->ldt);
426 #ifdef TARGET_X86_64
427 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
428 env->kernelgsbase);
429 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
430 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
431 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
432 #endif
433 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
434 x86_stq_phys(cs,
435 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
436 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
437 env->sysenter_esp);
438 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
439 env->sysenter_eip);
442 void helper_stgi(CPUX86State *env)
444 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
445 env->hflags2 |= HF2_GIF_MASK;
448 void helper_clgi(CPUX86State *env)
450 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
451 env->hflags2 &= ~HF2_GIF_MASK;
454 void helper_skinit(CPUX86State *env)
456 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
457 /* XXX: not implemented */
458 raise_exception(env, EXCP06_ILLOP);
461 void helper_invlpga(CPUX86State *env, int aflag)
463 X86CPU *cpu = x86_env_get_cpu(env);
464 target_ulong addr;
466 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
468 if (aflag == 2) {
469 addr = env->regs[R_EAX];
470 } else {
471 addr = (uint32_t)env->regs[R_EAX];
474 /* XXX: could use the ASID to see if it is needed to do the
475 flush */
476 tlb_flush_page(CPU(cpu), addr);
479 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
480 uint64_t param, uintptr_t retaddr)
482 CPUState *cs = CPU(x86_env_get_cpu(env));
484 if (likely(!(env->hflags & HF_SVMI_MASK))) {
485 return;
487 switch (type) {
488 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
489 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
490 cpu_vmexit(env, type, param, retaddr);
492 break;
493 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
494 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
495 cpu_vmexit(env, type, param, retaddr);
497 break;
498 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
499 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
500 cpu_vmexit(env, type, param, retaddr);
502 break;
503 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
504 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
505 cpu_vmexit(env, type, param, retaddr);
507 break;
508 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
509 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
510 cpu_vmexit(env, type, param, retaddr);
512 break;
513 case SVM_EXIT_MSR:
514 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
515 /* FIXME: this should be read in at vmrun (faster this way?) */
516 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
517 offsetof(struct vmcb,
518 control.msrpm_base_pa));
519 uint32_t t0, t1;
521 switch ((uint32_t)env->regs[R_ECX]) {
522 case 0 ... 0x1fff:
523 t0 = (env->regs[R_ECX] * 2) % 8;
524 t1 = (env->regs[R_ECX] * 2) / 8;
525 break;
526 case 0xc0000000 ... 0xc0001fff:
527 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
528 t1 = (t0 / 8);
529 t0 %= 8;
530 break;
531 case 0xc0010000 ... 0xc0011fff:
532 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
533 t1 = (t0 / 8);
534 t0 %= 8;
535 break;
536 default:
537 cpu_vmexit(env, type, param, retaddr);
538 t0 = 0;
539 t1 = 0;
540 break;
542 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
543 cpu_vmexit(env, type, param, retaddr);
546 break;
547 default:
548 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
549 cpu_vmexit(env, type, param, retaddr);
551 break;
555 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
556 uint64_t param)
558 cpu_svm_check_intercept_param(env, type, param, GETPC());
561 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
562 uint32_t next_eip_addend)
564 CPUState *cs = CPU(x86_env_get_cpu(env));
566 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
567 /* FIXME: this should be read in at vmrun (faster this way?) */
568 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
569 offsetof(struct vmcb, control.iopm_base_pa));
570 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
572 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
573 /* next env->eip */
574 x86_stq_phys(cs,
575 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
576 env->eip + next_eip_addend);
577 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
582 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
583 uintptr_t retaddr)
585 CPUState *cs = CPU(x86_env_get_cpu(env));
587 cpu_restore_state(cs, retaddr);
589 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
590 PRIx64 ", " TARGET_FMT_lx ")!\n",
591 exit_code, exit_info_1,
592 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
593 control.exit_info_2)),
594 env->eip);
596 cs->exception_index = EXCP_VMEXIT + exit_code;
597 env->error_code = exit_info_1;
599 /* remove any pending exception */
600 env->old_exception = -1;
601 cpu_loop_exit(cs);
604 void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
606 CPUState *cs = CPU(x86_env_get_cpu(env));
607 uint32_t int_ctl;
609 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
610 x86_stl_phys(cs,
611 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
612 SVM_INTERRUPT_SHADOW_MASK);
613 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
614 } else {
615 x86_stl_phys(cs,
616 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
619 /* Save the VM state in the vmcb */
620 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
621 &env->segs[R_ES]);
622 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
623 &env->segs[R_CS]);
624 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
625 &env->segs[R_SS]);
626 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
627 &env->segs[R_DS]);
629 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
630 env->gdt.base);
631 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
632 env->gdt.limit);
634 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
635 env->idt.base);
636 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
637 env->idt.limit);
639 x86_stq_phys(cs,
640 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
641 x86_stq_phys(cs,
642 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
643 x86_stq_phys(cs,
644 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
645 x86_stq_phys(cs,
646 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
647 x86_stq_phys(cs,
648 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
650 int_ctl = x86_ldl_phys(cs,
651 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
652 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
653 int_ctl |= env->v_tpr & V_TPR_MASK;
654 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
655 int_ctl |= V_IRQ_MASK;
657 x86_stl_phys(cs,
658 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
660 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
661 cpu_compute_eflags(env));
662 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
663 env->eip);
664 x86_stq_phys(cs,
665 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
666 x86_stq_phys(cs,
667 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
668 x86_stq_phys(cs,
669 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
670 x86_stq_phys(cs,
671 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
672 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
673 env->hflags & HF_CPL_MASK);
675 /* Reload the host state from vm_hsave */
676 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
677 env->hflags &= ~HF_SVMI_MASK;
678 env->intercept = 0;
679 env->intercept_exceptions = 0;
680 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
681 env->tsc_offset = 0;
683 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
684 save.gdtr.base));
685 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
686 save.gdtr.limit));
688 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
689 save.idtr.base));
690 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
691 save.idtr.limit));
693 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
694 env->vm_hsave + offsetof(struct vmcb,
695 save.cr0)) |
696 CR0_PE_MASK);
697 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
698 env->vm_hsave + offsetof(struct vmcb,
699 save.cr4)));
700 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
701 env->vm_hsave + offsetof(struct vmcb,
702 save.cr3)));
703 /* we need to set the efer after the crs so the hidden flags get
704 set properly */
705 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
706 save.efer)));
707 env->eflags = 0;
708 cpu_load_eflags(env, x86_ldq_phys(cs,
709 env->vm_hsave + offsetof(struct vmcb,
710 save.rflags)),
711 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
712 VM_MASK));
714 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
715 R_ES);
716 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
717 R_CS);
718 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
719 R_SS);
720 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
721 R_DS);
723 env->eip = x86_ldq_phys(cs,
724 env->vm_hsave + offsetof(struct vmcb, save.rip));
725 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
726 offsetof(struct vmcb, save.rsp));
727 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
728 offsetof(struct vmcb, save.rax));
730 env->dr[6] = x86_ldq_phys(cs,
731 env->vm_hsave + offsetof(struct vmcb, save.dr6));
732 env->dr[7] = x86_ldq_phys(cs,
733 env->vm_hsave + offsetof(struct vmcb, save.dr7));
735 /* other setups */
736 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
737 exit_code);
738 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
739 exit_info_1);
741 x86_stl_phys(cs,
742 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
743 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
744 control.event_inj)));
745 x86_stl_phys(cs,
746 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
747 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
748 control.event_inj_err)));
749 x86_stl_phys(cs,
750 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
752 env->hflags2 &= ~HF2_GIF_MASK;
753 /* FIXME: Resets the current ASID register to zero (host ASID). */
755 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
757 /* Clears the TSC_OFFSET inside the processor. */
759 /* If the host is in PAE mode, the processor reloads the host's PDPEs
760 from the page table indicated the host's CR3. If the PDPEs contain
761 illegal state, the processor causes a shutdown. */
763 /* Disables all breakpoints in the host DR7 register. */
765 /* Checks the reloaded host state for consistency. */
767 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
768 host's code segment or non-canonical (in the case of long mode), a
769 #GP fault is delivered inside the host. */
772 #endif