Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20170109' into...
[qemu/ar7.git] / target / i386 / svm_helper.c
blob782b3f12f0ffbd7f92d6ec5b82bc906d5ab183fb
1 /*
2 * x86 SVM helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/cpu-all.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
27 /* Secure Virtual Machine helpers */
29 #if defined(CONFIG_USER_ONLY)
31 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
35 void helper_vmmcall(CPUX86State *env)
39 void helper_vmload(CPUX86State *env, int aflag)
43 void helper_vmsave(CPUX86State *env, int aflag)
47 void helper_stgi(CPUX86State *env)
51 void helper_clgi(CPUX86State *env)
55 void helper_skinit(CPUX86State *env)
59 void helper_invlpga(CPUX86State *env, int aflag)
63 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
67 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
71 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
72 uint64_t param)
76 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
77 uint64_t param)
81 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
82 uint32_t next_eip_addend)
85 #else
87 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
88 const SegmentCache *sc)
90 CPUState *cs = CPU(x86_env_get_cpu(env));
92 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
93 sc->selector);
94 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
95 sc->base);
96 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
97 sc->limit);
98 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
99 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
102 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
103 SegmentCache *sc)
105 CPUState *cs = CPU(x86_env_get_cpu(env));
106 unsigned int flags;
108 sc->selector = x86_lduw_phys(cs,
109 addr + offsetof(struct vmcb_seg, selector));
110 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
111 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
112 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
113 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
116 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
117 int seg_reg)
119 SegmentCache sc1, *sc = &sc1;
121 svm_load_seg(env, addr, sc);
122 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
123 sc->base, sc->limit, sc->flags);
126 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
128 CPUState *cs = CPU(x86_env_get_cpu(env));
129 target_ulong addr;
130 uint32_t event_inj;
131 uint32_t int_ctl;
133 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
135 if (aflag == 2) {
136 addr = env->regs[R_EAX];
137 } else {
138 addr = (uint32_t)env->regs[R_EAX];
141 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
143 env->vm_vmcb = addr;
145 /* save the current CPU state in the hsave page */
146 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
147 env->gdt.base);
148 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
149 env->gdt.limit);
151 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
152 env->idt.base);
153 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
154 env->idt.limit);
156 x86_stq_phys(cs,
157 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
158 x86_stq_phys(cs,
159 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
160 x86_stq_phys(cs,
161 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
162 x86_stq_phys(cs,
163 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
164 x86_stq_phys(cs,
165 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
166 x86_stq_phys(cs,
167 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
169 x86_stq_phys(cs,
170 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
171 x86_stq_phys(cs,
172 env->vm_hsave + offsetof(struct vmcb, save.rflags),
173 cpu_compute_eflags(env));
175 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
176 &env->segs[R_ES]);
177 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
178 &env->segs[R_CS]);
179 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
180 &env->segs[R_SS]);
181 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
182 &env->segs[R_DS]);
184 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
185 env->eip + next_eip_addend);
186 x86_stq_phys(cs,
187 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
188 x86_stq_phys(cs,
189 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
191 /* load the interception bitmaps so we do not need to access the
192 vmcb in svm mode */
193 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
194 control.intercept));
195 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
196 offsetof(struct vmcb,
197 control.intercept_cr_read));
198 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
199 offsetof(struct vmcb,
200 control.intercept_cr_write));
201 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
202 offsetof(struct vmcb,
203 control.intercept_dr_read));
204 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
205 offsetof(struct vmcb,
206 control.intercept_dr_write));
207 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
208 offsetof(struct vmcb,
209 control.intercept_exceptions
212 /* enable intercepts */
213 env->hflags |= HF_SVMI_MASK;
215 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
216 offsetof(struct vmcb, control.tsc_offset));
218 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
219 save.gdtr.base));
220 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
221 save.gdtr.limit));
223 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
224 save.idtr.base));
225 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
226 save.idtr.limit));
228 /* clear exit_info_2 so we behave like the real hardware */
229 x86_stq_phys(cs,
230 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
232 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
233 env->vm_vmcb + offsetof(struct vmcb,
234 save.cr0)));
235 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
236 env->vm_vmcb + offsetof(struct vmcb,
237 save.cr4)));
238 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
239 env->vm_vmcb + offsetof(struct vmcb,
240 save.cr3)));
241 env->cr[2] = x86_ldq_phys(cs,
242 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
243 int_ctl = x86_ldl_phys(cs,
244 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
245 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
246 if (int_ctl & V_INTR_MASKING_MASK) {
247 env->v_tpr = int_ctl & V_TPR_MASK;
248 env->hflags2 |= HF2_VINTR_MASK;
249 if (env->eflags & IF_MASK) {
250 env->hflags2 |= HF2_HIF_MASK;
254 cpu_load_efer(env,
255 x86_ldq_phys(cs,
256 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
257 env->eflags = 0;
258 cpu_load_eflags(env, x86_ldq_phys(cs,
259 env->vm_vmcb + offsetof(struct vmcb,
260 save.rflags)),
261 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
263 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
264 R_ES);
265 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
266 R_CS);
267 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
268 R_SS);
269 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
270 R_DS);
272 env->eip = x86_ldq_phys(cs,
273 env->vm_vmcb + offsetof(struct vmcb, save.rip));
275 env->regs[R_ESP] = x86_ldq_phys(cs,
276 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
277 env->regs[R_EAX] = x86_ldq_phys(cs,
278 env->vm_vmcb + offsetof(struct vmcb, save.rax));
279 env->dr[7] = x86_ldq_phys(cs,
280 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
281 env->dr[6] = x86_ldq_phys(cs,
282 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
284 /* FIXME: guest state consistency checks */
286 switch (x86_ldub_phys(cs,
287 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
288 case TLB_CONTROL_DO_NOTHING:
289 break;
290 case TLB_CONTROL_FLUSH_ALL_ASID:
291 /* FIXME: this is not 100% correct but should work for now */
292 tlb_flush(cs, 1);
293 break;
296 env->hflags2 |= HF2_GIF_MASK;
298 if (int_ctl & V_IRQ_MASK) {
299 CPUState *cs = CPU(x86_env_get_cpu(env));
301 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
304 /* maybe we need to inject an event */
305 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
306 control.event_inj));
307 if (event_inj & SVM_EVTINJ_VALID) {
308 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
309 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
310 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
311 offsetof(struct vmcb,
312 control.event_inj_err));
314 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
315 /* FIXME: need to implement valid_err */
316 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
317 case SVM_EVTINJ_TYPE_INTR:
318 cs->exception_index = vector;
319 env->error_code = event_inj_err;
320 env->exception_is_int = 0;
321 env->exception_next_eip = -1;
322 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
323 /* XXX: is it always correct? */
324 do_interrupt_x86_hardirq(env, vector, 1);
325 break;
326 case SVM_EVTINJ_TYPE_NMI:
327 cs->exception_index = EXCP02_NMI;
328 env->error_code = event_inj_err;
329 env->exception_is_int = 0;
330 env->exception_next_eip = env->eip;
331 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
332 cpu_loop_exit(cs);
333 break;
334 case SVM_EVTINJ_TYPE_EXEPT:
335 cs->exception_index = vector;
336 env->error_code = event_inj_err;
337 env->exception_is_int = 0;
338 env->exception_next_eip = -1;
339 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
340 cpu_loop_exit(cs);
341 break;
342 case SVM_EVTINJ_TYPE_SOFT:
343 cs->exception_index = vector;
344 env->error_code = event_inj_err;
345 env->exception_is_int = 1;
346 env->exception_next_eip = env->eip;
347 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
348 cpu_loop_exit(cs);
349 break;
351 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
352 env->error_code);
356 void helper_vmmcall(CPUX86State *env)
358 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
359 raise_exception(env, EXCP06_ILLOP);
362 void helper_vmload(CPUX86State *env, int aflag)
364 CPUState *cs = CPU(x86_env_get_cpu(env));
365 target_ulong addr;
367 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
369 if (aflag == 2) {
370 addr = env->regs[R_EAX];
371 } else {
372 addr = (uint32_t)env->regs[R_EAX];
375 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
376 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
377 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
378 save.fs.base)),
379 env->segs[R_FS].base);
381 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
382 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
383 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
384 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
386 #ifdef TARGET_X86_64
387 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
388 save.kernel_gs_base));
389 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
390 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
391 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
392 #endif
393 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
394 env->sysenter_cs = x86_ldq_phys(cs,
395 addr + offsetof(struct vmcb, save.sysenter_cs));
396 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
397 save.sysenter_esp));
398 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
399 save.sysenter_eip));
402 void helper_vmsave(CPUX86State *env, int aflag)
404 CPUState *cs = CPU(x86_env_get_cpu(env));
405 target_ulong addr;
407 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
409 if (aflag == 2) {
410 addr = env->regs[R_EAX];
411 } else {
412 addr = (uint32_t)env->regs[R_EAX];
415 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
416 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
417 addr, x86_ldq_phys(cs,
418 addr + offsetof(struct vmcb, save.fs.base)),
419 env->segs[R_FS].base);
421 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
422 &env->segs[R_FS]);
423 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
424 &env->segs[R_GS]);
425 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
426 &env->tr);
427 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
428 &env->ldt);
430 #ifdef TARGET_X86_64
431 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
432 env->kernelgsbase);
433 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
434 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
435 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
436 #endif
437 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
438 x86_stq_phys(cs,
439 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
440 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
441 env->sysenter_esp);
442 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
443 env->sysenter_eip);
446 void helper_stgi(CPUX86State *env)
448 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
449 env->hflags2 |= HF2_GIF_MASK;
452 void helper_clgi(CPUX86State *env)
454 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
455 env->hflags2 &= ~HF2_GIF_MASK;
458 void helper_skinit(CPUX86State *env)
460 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
461 /* XXX: not implemented */
462 raise_exception(env, EXCP06_ILLOP);
465 void helper_invlpga(CPUX86State *env, int aflag)
467 X86CPU *cpu = x86_env_get_cpu(env);
468 target_ulong addr;
470 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
472 if (aflag == 2) {
473 addr = env->regs[R_EAX];
474 } else {
475 addr = (uint32_t)env->regs[R_EAX];
478 /* XXX: could use the ASID to see if it is needed to do the
479 flush */
480 tlb_flush_page(CPU(cpu), addr);
483 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
484 uint64_t param)
486 CPUState *cs = CPU(x86_env_get_cpu(env));
488 if (likely(!(env->hflags & HF_SVMI_MASK))) {
489 return;
491 switch (type) {
492 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
493 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
494 helper_vmexit(env, type, param);
496 break;
497 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
498 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
499 helper_vmexit(env, type, param);
501 break;
502 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
503 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
504 helper_vmexit(env, type, param);
506 break;
507 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
508 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
509 helper_vmexit(env, type, param);
511 break;
512 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
513 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
514 helper_vmexit(env, type, param);
516 break;
517 case SVM_EXIT_MSR:
518 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
519 /* FIXME: this should be read in at vmrun (faster this way?) */
520 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
521 offsetof(struct vmcb,
522 control.msrpm_base_pa));
523 uint32_t t0, t1;
525 switch ((uint32_t)env->regs[R_ECX]) {
526 case 0 ... 0x1fff:
527 t0 = (env->regs[R_ECX] * 2) % 8;
528 t1 = (env->regs[R_ECX] * 2) / 8;
529 break;
530 case 0xc0000000 ... 0xc0001fff:
531 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
532 t1 = (t0 / 8);
533 t0 %= 8;
534 break;
535 case 0xc0010000 ... 0xc0011fff:
536 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
537 t1 = (t0 / 8);
538 t0 %= 8;
539 break;
540 default:
541 helper_vmexit(env, type, param);
542 t0 = 0;
543 t1 = 0;
544 break;
546 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
547 helper_vmexit(env, type, param);
550 break;
551 default:
552 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
553 helper_vmexit(env, type, param);
555 break;
559 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
560 uint64_t param)
562 helper_svm_check_intercept_param(env, type, param);
565 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
566 uint32_t next_eip_addend)
568 CPUState *cs = CPU(x86_env_get_cpu(env));
570 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
571 /* FIXME: this should be read in at vmrun (faster this way?) */
572 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
573 offsetof(struct vmcb, control.iopm_base_pa));
574 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
576 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
577 /* next env->eip */
578 x86_stq_phys(cs,
579 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
580 env->eip + next_eip_addend);
581 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
586 /* Note: currently only 32 bits of exit_code are used */
587 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
589 CPUState *cs = CPU(x86_env_get_cpu(env));
590 uint32_t int_ctl;
592 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
593 PRIx64 ", " TARGET_FMT_lx ")!\n",
594 exit_code, exit_info_1,
595 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
596 control.exit_info_2)),
597 env->eip);
599 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
600 x86_stl_phys(cs,
601 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
602 SVM_INTERRUPT_SHADOW_MASK);
603 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
604 } else {
605 x86_stl_phys(cs,
606 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
609 /* Save the VM state in the vmcb */
610 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
611 &env->segs[R_ES]);
612 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
613 &env->segs[R_CS]);
614 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
615 &env->segs[R_SS]);
616 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
617 &env->segs[R_DS]);
619 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
620 env->gdt.base);
621 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
622 env->gdt.limit);
624 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
625 env->idt.base);
626 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
627 env->idt.limit);
629 x86_stq_phys(cs,
630 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
631 x86_stq_phys(cs,
632 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
633 x86_stq_phys(cs,
634 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
635 x86_stq_phys(cs,
636 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
637 x86_stq_phys(cs,
638 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
640 int_ctl = x86_ldl_phys(cs,
641 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
642 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
643 int_ctl |= env->v_tpr & V_TPR_MASK;
644 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
645 int_ctl |= V_IRQ_MASK;
647 x86_stl_phys(cs,
648 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
650 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
651 cpu_compute_eflags(env));
652 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
653 env->eip);
654 x86_stq_phys(cs,
655 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
656 x86_stq_phys(cs,
657 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
658 x86_stq_phys(cs,
659 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
660 x86_stq_phys(cs,
661 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
662 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
663 env->hflags & HF_CPL_MASK);
665 /* Reload the host state from vm_hsave */
666 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
667 env->hflags &= ~HF_SVMI_MASK;
668 env->intercept = 0;
669 env->intercept_exceptions = 0;
670 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
671 env->tsc_offset = 0;
673 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
674 save.gdtr.base));
675 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
676 save.gdtr.limit));
678 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
679 save.idtr.base));
680 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
681 save.idtr.limit));
683 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
684 env->vm_hsave + offsetof(struct vmcb,
685 save.cr0)) |
686 CR0_PE_MASK);
687 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
688 env->vm_hsave + offsetof(struct vmcb,
689 save.cr4)));
690 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
691 env->vm_hsave + offsetof(struct vmcb,
692 save.cr3)));
693 /* we need to set the efer after the crs so the hidden flags get
694 set properly */
695 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
696 save.efer)));
697 env->eflags = 0;
698 cpu_load_eflags(env, x86_ldq_phys(cs,
699 env->vm_hsave + offsetof(struct vmcb,
700 save.rflags)),
701 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
702 VM_MASK));
704 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
705 R_ES);
706 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
707 R_CS);
708 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
709 R_SS);
710 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
711 R_DS);
713 env->eip = x86_ldq_phys(cs,
714 env->vm_hsave + offsetof(struct vmcb, save.rip));
715 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
716 offsetof(struct vmcb, save.rsp));
717 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
718 offsetof(struct vmcb, save.rax));
720 env->dr[6] = x86_ldq_phys(cs,
721 env->vm_hsave + offsetof(struct vmcb, save.dr6));
722 env->dr[7] = x86_ldq_phys(cs,
723 env->vm_hsave + offsetof(struct vmcb, save.dr7));
725 /* other setups */
726 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
727 exit_code);
728 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
729 exit_info_1);
731 x86_stl_phys(cs,
732 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
733 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
734 control.event_inj)));
735 x86_stl_phys(cs,
736 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
737 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
738 control.event_inj_err)));
739 x86_stl_phys(cs,
740 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
742 env->hflags2 &= ~HF2_GIF_MASK;
743 /* FIXME: Resets the current ASID register to zero (host ASID). */
745 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
747 /* Clears the TSC_OFFSET inside the processor. */
749 /* If the host is in PAE mode, the processor reloads the host's PDPEs
750 from the page table indicated the host's CR3. If the PDPEs contain
751 illegal state, the processor causes a shutdown. */
753 /* Disables all breakpoints in the host DR7 register. */
755 /* Checks the reloaded host state for consistency. */
757 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
758 host's code segment or non-canonical (in the case of long mode), a
759 #GP fault is delivered inside the host. */
761 /* remove any pending exception */
762 cs->exception_index = -1;
763 env->error_code = 0;
764 env->old_exception = -1;
766 cpu_loop_exit(cs);
769 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
771 helper_vmexit(env, exit_code, exit_info_1);
774 #endif