Merge remote-tracking branch 'remotes/qmp-unstable/queue/qmp' into staging
[qemu.git] / target-i386 / svm_helper.c
blobb38d45002f328c7b7a68433fa93e4e7352959cc0
1 /*
2 * x86 SVM helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "exec/cpu-all.h"
22 #include "helper.h"
24 #if !defined(CONFIG_USER_ONLY)
25 #include "exec/softmmu_exec.h"
26 #endif /* !defined(CONFIG_USER_ONLY) */
28 /* Secure Virtual Machine helpers */
30 #if defined(CONFIG_USER_ONLY)
32 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
36 void helper_vmmcall(CPUX86State *env)
40 void helper_vmload(CPUX86State *env, int aflag)
44 void helper_vmsave(CPUX86State *env, int aflag)
48 void helper_stgi(CPUX86State *env)
52 void helper_clgi(CPUX86State *env)
56 void helper_skinit(CPUX86State *env)
60 void helper_invlpga(CPUX86State *env, int aflag)
64 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
68 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
72 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 uint64_t param)
77 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
78 uint64_t param)
82 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
83 uint32_t next_eip_addend)
86 #else
88 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
89 const SegmentCache *sc)
91 CPUState *cs = ENV_GET_CPU(env);
92 stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
93 sc->selector);
94 stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
95 sc->base);
96 stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit),
97 sc->limit);
98 stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib),
99 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
102 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
103 SegmentCache *sc)
105 CPUState *cs = ENV_GET_CPU(env);
106 unsigned int flags;
108 sc->selector = lduw_phys(cs->as,
109 addr + offsetof(struct vmcb_seg, selector));
110 sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base));
111 sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit));
112 flags = lduw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib));
113 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
116 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
117 int seg_reg)
119 SegmentCache sc1, *sc = &sc1;
121 svm_load_seg(env, addr, sc);
122 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
123 sc->base, sc->limit, sc->flags);
126 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
128 CPUState *cs = ENV_GET_CPU(env);
129 target_ulong addr;
130 uint32_t event_inj;
131 uint32_t int_ctl;
133 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
135 if (aflag == 2) {
136 addr = env->regs[R_EAX];
137 } else {
138 addr = (uint32_t)env->regs[R_EAX];
141 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
143 env->vm_vmcb = addr;
145 /* save the current CPU state in the hsave page */
146 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
147 env->gdt.base);
148 stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
149 env->gdt.limit);
151 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
152 env->idt.base);
153 stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
154 env->idt.limit);
156 stq_phys(cs->as,
157 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
158 stq_phys(cs->as,
159 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
160 stq_phys(cs->as,
161 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
162 stq_phys(cs->as,
163 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
164 stq_phys(cs->as,
165 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
166 stq_phys(cs->as,
167 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
169 stq_phys(cs->as,
170 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
171 stq_phys(cs->as,
172 env->vm_hsave + offsetof(struct vmcb, save.rflags),
173 cpu_compute_eflags(env));
175 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
176 &env->segs[R_ES]);
177 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
178 &env->segs[R_CS]);
179 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
180 &env->segs[R_SS]);
181 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
182 &env->segs[R_DS]);
184 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip),
185 env->eip + next_eip_addend);
186 stq_phys(cs->as,
187 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
188 stq_phys(cs->as,
189 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
191 /* load the interception bitmaps so we do not need to access the
192 vmcb in svm mode */
193 env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
194 control.intercept));
195 env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb +
196 offsetof(struct vmcb,
197 control.intercept_cr_read));
198 env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb +
199 offsetof(struct vmcb,
200 control.intercept_cr_write));
201 env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb +
202 offsetof(struct vmcb,
203 control.intercept_dr_read));
204 env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb +
205 offsetof(struct vmcb,
206 control.intercept_dr_write));
207 env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb +
208 offsetof(struct vmcb,
209 control.intercept_exceptions
212 /* enable intercepts */
213 env->hflags |= HF_SVMI_MASK;
215 env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb +
216 offsetof(struct vmcb, control.tsc_offset));
218 env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
219 save.gdtr.base));
220 env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
221 save.gdtr.limit));
223 env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
224 save.idtr.base));
225 env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
226 save.idtr.limit));
228 /* clear exit_info_2 so we behave like the real hardware */
229 stq_phys(cs->as,
230 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
232 cpu_x86_update_cr0(env, ldq_phys(cs->as,
233 env->vm_vmcb + offsetof(struct vmcb,
234 save.cr0)));
235 cpu_x86_update_cr4(env, ldq_phys(cs->as,
236 env->vm_vmcb + offsetof(struct vmcb,
237 save.cr4)));
238 cpu_x86_update_cr3(env, ldq_phys(cs->as,
239 env->vm_vmcb + offsetof(struct vmcb,
240 save.cr3)));
241 env->cr[2] = ldq_phys(cs->as,
242 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
243 int_ctl = ldl_phys(cs->as,
244 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
245 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
246 if (int_ctl & V_INTR_MASKING_MASK) {
247 env->v_tpr = int_ctl & V_TPR_MASK;
248 env->hflags2 |= HF2_VINTR_MASK;
249 if (env->eflags & IF_MASK) {
250 env->hflags2 |= HF2_HIF_MASK;
254 cpu_load_efer(env,
255 ldq_phys(cs->as,
256 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
257 env->eflags = 0;
258 cpu_load_eflags(env, ldq_phys(cs->as,
259 env->vm_vmcb + offsetof(struct vmcb,
260 save.rflags)),
261 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
262 CC_OP = CC_OP_EFLAGS;
264 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
265 R_ES);
266 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
267 R_CS);
268 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
269 R_SS);
270 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
271 R_DS);
273 env->eip = ldq_phys(cs->as,
274 env->vm_vmcb + offsetof(struct vmcb, save.rip));
276 env->regs[R_ESP] = ldq_phys(cs->as,
277 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
278 env->regs[R_EAX] = ldq_phys(cs->as,
279 env->vm_vmcb + offsetof(struct vmcb, save.rax));
280 env->dr[7] = ldq_phys(cs->as,
281 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
282 env->dr[6] = ldq_phys(cs->as,
283 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
284 cpu_x86_set_cpl(env, ldub_phys(cs->as,
285 env->vm_vmcb + offsetof(struct vmcb,
286 save.cpl)));
288 /* FIXME: guest state consistency checks */
290 switch (ldub_phys(cs->as,
291 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
292 case TLB_CONTROL_DO_NOTHING:
293 break;
294 case TLB_CONTROL_FLUSH_ALL_ASID:
295 /* FIXME: this is not 100% correct but should work for now */
296 tlb_flush(env, 1);
297 break;
300 env->hflags2 |= HF2_GIF_MASK;
302 if (int_ctl & V_IRQ_MASK) {
303 CPUState *cs = CPU(x86_env_get_cpu(env));
305 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
308 /* maybe we need to inject an event */
309 event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
310 control.event_inj));
311 if (event_inj & SVM_EVTINJ_VALID) {
312 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
313 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
314 uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb +
315 offsetof(struct vmcb,
316 control.event_inj_err));
318 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
319 /* FIXME: need to implement valid_err */
320 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
321 case SVM_EVTINJ_TYPE_INTR:
322 env->exception_index = vector;
323 env->error_code = event_inj_err;
324 env->exception_is_int = 0;
325 env->exception_next_eip = -1;
326 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
327 /* XXX: is it always correct? */
328 do_interrupt_x86_hardirq(env, vector, 1);
329 break;
330 case SVM_EVTINJ_TYPE_NMI:
331 env->exception_index = EXCP02_NMI;
332 env->error_code = event_inj_err;
333 env->exception_is_int = 0;
334 env->exception_next_eip = env->eip;
335 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
336 cpu_loop_exit(env);
337 break;
338 case SVM_EVTINJ_TYPE_EXEPT:
339 env->exception_index = vector;
340 env->error_code = event_inj_err;
341 env->exception_is_int = 0;
342 env->exception_next_eip = -1;
343 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
344 cpu_loop_exit(env);
345 break;
346 case SVM_EVTINJ_TYPE_SOFT:
347 env->exception_index = vector;
348 env->error_code = event_inj_err;
349 env->exception_is_int = 1;
350 env->exception_next_eip = env->eip;
351 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
352 cpu_loop_exit(env);
353 break;
355 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
356 env->error_code);
360 void helper_vmmcall(CPUX86State *env)
362 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
363 raise_exception(env, EXCP06_ILLOP);
366 void helper_vmload(CPUX86State *env, int aflag)
368 CPUState *cs = ENV_GET_CPU(env);
369 target_ulong addr;
371 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
373 if (aflag == 2) {
374 addr = env->regs[R_EAX];
375 } else {
376 addr = (uint32_t)env->regs[R_EAX];
379 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
380 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
381 addr, ldq_phys(cs->as, addr + offsetof(struct vmcb,
382 save.fs.base)),
383 env->segs[R_FS].base);
385 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
386 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
387 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
388 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
390 #ifdef TARGET_X86_64
391 env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb,
392 save.kernel_gs_base));
393 env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar));
394 env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar));
395 env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask));
396 #endif
397 env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star));
398 env->sysenter_cs = ldq_phys(cs->as,
399 addr + offsetof(struct vmcb, save.sysenter_cs));
400 env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb,
401 save.sysenter_esp));
402 env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb,
403 save.sysenter_eip));
406 void helper_vmsave(CPUX86State *env, int aflag)
408 CPUState *cs = ENV_GET_CPU(env);
409 target_ulong addr;
411 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
413 if (aflag == 2) {
414 addr = env->regs[R_EAX];
415 } else {
416 addr = (uint32_t)env->regs[R_EAX];
419 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
420 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
421 addr, ldq_phys(cs->as,
422 addr + offsetof(struct vmcb, save.fs.base)),
423 env->segs[R_FS].base);
425 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
426 &env->segs[R_FS]);
427 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
428 &env->segs[R_GS]);
429 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
430 &env->tr);
431 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
432 &env->ldt);
434 #ifdef TARGET_X86_64
435 stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base),
436 env->kernelgsbase);
437 stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar);
438 stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar);
439 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
440 #endif
441 stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star);
442 stq_phys(cs->as,
443 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
444 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp),
445 env->sysenter_esp);
446 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip),
447 env->sysenter_eip);
450 void helper_stgi(CPUX86State *env)
452 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
453 env->hflags2 |= HF2_GIF_MASK;
456 void helper_clgi(CPUX86State *env)
458 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
459 env->hflags2 &= ~HF2_GIF_MASK;
462 void helper_skinit(CPUX86State *env)
464 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
465 /* XXX: not implemented */
466 raise_exception(env, EXCP06_ILLOP);
469 void helper_invlpga(CPUX86State *env, int aflag)
471 target_ulong addr;
473 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
475 if (aflag == 2) {
476 addr = env->regs[R_EAX];
477 } else {
478 addr = (uint32_t)env->regs[R_EAX];
481 /* XXX: could use the ASID to see if it is needed to do the
482 flush */
483 tlb_flush_page(env, addr);
486 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
487 uint64_t param)
489 CPUState *cs = ENV_GET_CPU(env);
491 if (likely(!(env->hflags & HF_SVMI_MASK))) {
492 return;
494 switch (type) {
495 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
496 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
497 helper_vmexit(env, type, param);
499 break;
500 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
501 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
502 helper_vmexit(env, type, param);
504 break;
505 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
506 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
507 helper_vmexit(env, type, param);
509 break;
510 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
511 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
512 helper_vmexit(env, type, param);
514 break;
515 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
516 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
517 helper_vmexit(env, type, param);
519 break;
520 case SVM_EXIT_MSR:
521 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
522 /* FIXME: this should be read in at vmrun (faster this way?) */
523 uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
524 offsetof(struct vmcb,
525 control.msrpm_base_pa));
526 uint32_t t0, t1;
528 switch ((uint32_t)env->regs[R_ECX]) {
529 case 0 ... 0x1fff:
530 t0 = (env->regs[R_ECX] * 2) % 8;
531 t1 = (env->regs[R_ECX] * 2) / 8;
532 break;
533 case 0xc0000000 ... 0xc0001fff:
534 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
535 t1 = (t0 / 8);
536 t0 %= 8;
537 break;
538 case 0xc0010000 ... 0xc0011fff:
539 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
540 t1 = (t0 / 8);
541 t0 %= 8;
542 break;
543 default:
544 helper_vmexit(env, type, param);
545 t0 = 0;
546 t1 = 0;
547 break;
549 if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) {
550 helper_vmexit(env, type, param);
553 break;
554 default:
555 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
556 helper_vmexit(env, type, param);
558 break;
562 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
563 uint64_t param)
565 helper_svm_check_intercept_param(env, type, param);
568 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
569 uint32_t next_eip_addend)
571 CPUState *cs = ENV_GET_CPU(env);
572 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
573 /* FIXME: this should be read in at vmrun (faster this way?) */
574 uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
575 offsetof(struct vmcb, control.iopm_base_pa));
576 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
578 if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) {
579 /* next env->eip */
580 stq_phys(cs->as,
581 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
582 env->eip + next_eip_addend);
583 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
588 /* Note: currently only 32 bits of exit_code are used */
589 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
591 CPUState *cs = CPU(x86_env_get_cpu(env));
592 uint32_t int_ctl;
594 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
595 PRIx64 ", " TARGET_FMT_lx ")!\n",
596 exit_code, exit_info_1,
597 ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
598 control.exit_info_2)),
599 env->eip);
601 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
602 stl_phys(cs->as,
603 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
604 SVM_INTERRUPT_SHADOW_MASK);
605 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
606 } else {
607 stl_phys(cs->as,
608 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
611 /* Save the VM state in the vmcb */
612 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
613 &env->segs[R_ES]);
614 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
615 &env->segs[R_CS]);
616 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
617 &env->segs[R_SS]);
618 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
619 &env->segs[R_DS]);
621 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
622 env->gdt.base);
623 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
624 env->gdt.limit);
626 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
627 env->idt.base);
628 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
629 env->idt.limit);
631 stq_phys(cs->as,
632 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
633 stq_phys(cs->as,
634 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
635 stq_phys(cs->as,
636 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
637 stq_phys(cs->as,
638 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
639 stq_phys(cs->as,
640 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
642 int_ctl = ldl_phys(cs->as,
643 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
644 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
645 int_ctl |= env->v_tpr & V_TPR_MASK;
646 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
647 int_ctl |= V_IRQ_MASK;
649 stl_phys(cs->as,
650 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
652 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
653 cpu_compute_eflags(env));
654 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip),
655 env->eip);
656 stq_phys(cs->as,
657 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
658 stq_phys(cs->as,
659 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
660 stq_phys(cs->as,
661 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
662 stq_phys(cs->as,
663 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
664 stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
665 env->hflags & HF_CPL_MASK);
667 /* Reload the host state from vm_hsave */
668 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
669 env->hflags &= ~HF_SVMI_MASK;
670 env->intercept = 0;
671 env->intercept_exceptions = 0;
672 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
673 env->tsc_offset = 0;
675 env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
676 save.gdtr.base));
677 env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
678 save.gdtr.limit));
680 env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
681 save.idtr.base));
682 env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
683 save.idtr.limit));
685 cpu_x86_update_cr0(env, ldq_phys(cs->as,
686 env->vm_hsave + offsetof(struct vmcb,
687 save.cr0)) |
688 CR0_PE_MASK);
689 cpu_x86_update_cr4(env, ldq_phys(cs->as,
690 env->vm_hsave + offsetof(struct vmcb,
691 save.cr4)));
692 cpu_x86_update_cr3(env, ldq_phys(cs->as,
693 env->vm_hsave + offsetof(struct vmcb,
694 save.cr3)));
695 /* we need to set the efer after the crs so the hidden flags get
696 set properly */
697 cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
698 save.efer)));
699 env->eflags = 0;
700 cpu_load_eflags(env, ldq_phys(cs->as,
701 env->vm_hsave + offsetof(struct vmcb,
702 save.rflags)),
703 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
704 CC_OP = CC_OP_EFLAGS;
706 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
707 R_ES);
708 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
709 R_CS);
710 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
711 R_SS);
712 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
713 R_DS);
715 env->eip = ldq_phys(cs->as,
716 env->vm_hsave + offsetof(struct vmcb, save.rip));
717 env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave +
718 offsetof(struct vmcb, save.rsp));
719 env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave +
720 offsetof(struct vmcb, save.rax));
722 env->dr[6] = ldq_phys(cs->as,
723 env->vm_hsave + offsetof(struct vmcb, save.dr6));
724 env->dr[7] = ldq_phys(cs->as,
725 env->vm_hsave + offsetof(struct vmcb, save.dr7));
727 /* other setups */
728 cpu_x86_set_cpl(env, 0);
729 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
730 exit_code);
731 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
732 exit_info_1);
734 stl_phys(cs->as,
735 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
736 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
737 control.event_inj)));
738 stl_phys(cs->as,
739 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
740 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
741 control.event_inj_err)));
742 stl_phys(cs->as,
743 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
745 env->hflags2 &= ~HF2_GIF_MASK;
746 /* FIXME: Resets the current ASID register to zero (host ASID). */
748 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
750 /* Clears the TSC_OFFSET inside the processor. */
752 /* If the host is in PAE mode, the processor reloads the host's PDPEs
753 from the page table indicated the host's CR3. If the PDPEs contain
754 illegal state, the processor causes a shutdown. */
756 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
757 env->cr[0] |= CR0_PE_MASK;
758 env->eflags &= ~VM_MASK;
760 /* Disables all breakpoints in the host DR7 register. */
762 /* Checks the reloaded host state for consistency. */
764 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
765 host's code segment or non-canonical (in the case of long mode), a
766 #GP fault is delivered inside the host. */
768 /* remove any pending exception */
769 env->exception_index = -1;
770 env->error_code = 0;
771 env->old_exception = -1;
773 cpu_loop_exit(env);
776 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
778 helper_vmexit(env, exit_code, exit_info_1);
781 #endif