Merge remote-tracking branch 'qemu/master'
[qemu/ar7.git] / target / i386 / svm_helper.c
blob638b9b9808e2813cfec4df38c5c474c069430ae1
1 /*
2 * x86 SVM helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/cpu-all.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
27 /* Secure Virtual Machine helpers */
29 #if defined(CONFIG_USER_ONLY)
31 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
35 void helper_vmmcall(CPUX86State *env)
39 void helper_vmload(CPUX86State *env, int aflag)
43 void helper_vmsave(CPUX86State *env, int aflag)
47 void helper_stgi(CPUX86State *env)
51 void helper_clgi(CPUX86State *env)
55 void helper_skinit(CPUX86State *env)
57 abort();
60 void helper_invlpga(CPUX86State *env, int aflag)
64 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
66 abort();
69 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
71 abort();
74 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
75 uint64_t param)
79 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
80 uint64_t param)
84 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
85 uint32_t next_eip_addend)
88 #else
90 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
91 const SegmentCache *sc)
93 CPUState *cs = CPU(x86_env_get_cpu(env));
95 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
96 sc->selector);
97 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
98 sc->base);
99 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
100 sc->limit);
101 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
102 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
105 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
106 SegmentCache *sc)
108 CPUState *cs = CPU(x86_env_get_cpu(env));
109 unsigned int flags;
111 sc->selector = x86_lduw_phys(cs,
112 addr + offsetof(struct vmcb_seg, selector));
113 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
114 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
115 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
116 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
119 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
120 int seg_reg)
122 SegmentCache sc1, *sc = &sc1;
124 svm_load_seg(env, addr, sc);
125 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
126 sc->base, sc->limit, sc->flags);
129 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
131 CPUState *cs = CPU(x86_env_get_cpu(env));
132 target_ulong addr;
133 uint32_t event_inj;
134 uint32_t int_ctl;
136 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
138 if (aflag == 2) {
139 addr = env->regs[R_EAX];
140 } else {
141 addr = (uint32_t)env->regs[R_EAX];
144 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
146 env->vm_vmcb = addr;
148 /* save the current CPU state in the hsave page */
149 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
150 env->gdt.base);
151 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
152 env->gdt.limit);
154 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
155 env->idt.base);
156 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
157 env->idt.limit);
159 x86_stq_phys(cs,
160 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
161 x86_stq_phys(cs,
162 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
163 x86_stq_phys(cs,
164 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
165 x86_stq_phys(cs,
166 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
167 x86_stq_phys(cs,
168 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
169 x86_stq_phys(cs,
170 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
172 x86_stq_phys(cs,
173 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
174 x86_stq_phys(cs,
175 env->vm_hsave + offsetof(struct vmcb, save.rflags),
176 cpu_compute_eflags(env));
178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
179 &env->segs[R_ES]);
180 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
181 &env->segs[R_CS]);
182 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
183 &env->segs[R_SS]);
184 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
185 &env->segs[R_DS]);
187 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
188 env->eip + next_eip_addend);
189 x86_stq_phys(cs,
190 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
191 x86_stq_phys(cs,
192 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
194 /* load the interception bitmaps so we do not need to access the
195 vmcb in svm mode */
196 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
197 control.intercept));
198 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
199 offsetof(struct vmcb,
200 control.intercept_cr_read));
201 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
202 offsetof(struct vmcb,
203 control.intercept_cr_write));
204 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
205 offsetof(struct vmcb,
206 control.intercept_dr_read));
207 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
208 offsetof(struct vmcb,
209 control.intercept_dr_write));
210 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
211 offsetof(struct vmcb,
212 control.intercept_exceptions
215 /* enable intercepts */
216 env->hflags |= HF_SVMI_MASK;
218 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
219 offsetof(struct vmcb, control.tsc_offset));
221 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
222 save.gdtr.base));
223 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
224 save.gdtr.limit));
226 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
227 save.idtr.base));
228 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
229 save.idtr.limit));
231 /* clear exit_info_2 so we behave like the real hardware */
232 x86_stq_phys(cs,
233 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
235 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
236 env->vm_vmcb + offsetof(struct vmcb,
237 save.cr0)));
238 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
239 env->vm_vmcb + offsetof(struct vmcb,
240 save.cr4)));
241 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
242 env->vm_vmcb + offsetof(struct vmcb,
243 save.cr3)));
244 env->cr[2] = x86_ldq_phys(cs,
245 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
246 int_ctl = x86_ldl_phys(cs,
247 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
248 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
249 if (int_ctl & V_INTR_MASKING_MASK) {
250 env->v_tpr = int_ctl & V_TPR_MASK;
251 env->hflags2 |= HF2_VINTR_MASK;
252 if (env->eflags & IF_MASK) {
253 env->hflags2 |= HF2_HIF_MASK;
257 cpu_load_efer(env,
258 x86_ldq_phys(cs,
259 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
260 env->eflags = 0;
261 cpu_load_eflags(env, x86_ldq_phys(cs,
262 env->vm_vmcb + offsetof(struct vmcb,
263 save.rflags)),
264 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
266 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
267 R_ES);
268 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
269 R_CS);
270 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
271 R_SS);
272 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
273 R_DS);
275 env->eip = x86_ldq_phys(cs,
276 env->vm_vmcb + offsetof(struct vmcb, save.rip));
278 env->regs[R_ESP] = x86_ldq_phys(cs,
279 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
280 env->regs[R_EAX] = x86_ldq_phys(cs,
281 env->vm_vmcb + offsetof(struct vmcb, save.rax));
282 env->dr[7] = x86_ldq_phys(cs,
283 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
284 env->dr[6] = x86_ldq_phys(cs,
285 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
287 /* FIXME: guest state consistency checks */
289 switch (x86_ldub_phys(cs,
290 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
291 case TLB_CONTROL_DO_NOTHING:
292 break;
293 case TLB_CONTROL_FLUSH_ALL_ASID:
294 /* FIXME: this is not 100% correct but should work for now */
295 tlb_flush(cs, 1);
296 break;
299 env->hflags2 |= HF2_GIF_MASK;
301 if (int_ctl & V_IRQ_MASK) {
302 CPUState *cs = CPU(x86_env_get_cpu(env));
304 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
307 /* maybe we need to inject an event */
308 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
309 control.event_inj));
310 if (event_inj & SVM_EVTINJ_VALID) {
311 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
312 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
313 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
314 offsetof(struct vmcb,
315 control.event_inj_err));
317 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
318 /* FIXME: need to implement valid_err */
319 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
320 case SVM_EVTINJ_TYPE_INTR:
321 cs->exception_index = vector;
322 env->error_code = event_inj_err;
323 env->exception_is_int = 0;
324 env->exception_next_eip = -1;
325 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
326 /* XXX: is it always correct? */
327 do_interrupt_x86_hardirq(env, vector, 1);
328 break;
329 case SVM_EVTINJ_TYPE_NMI:
330 cs->exception_index = EXCP02_NMI;
331 env->error_code = event_inj_err;
332 env->exception_is_int = 0;
333 env->exception_next_eip = env->eip;
334 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
335 cpu_loop_exit(cs);
336 break;
337 case SVM_EVTINJ_TYPE_EXEPT:
338 cs->exception_index = vector;
339 env->error_code = event_inj_err;
340 env->exception_is_int = 0;
341 env->exception_next_eip = -1;
342 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
343 cpu_loop_exit(cs);
344 break;
345 case SVM_EVTINJ_TYPE_SOFT:
346 cs->exception_index = vector;
347 env->error_code = event_inj_err;
348 env->exception_is_int = 1;
349 env->exception_next_eip = env->eip;
350 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
351 cpu_loop_exit(cs);
352 break;
354 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
355 env->error_code);
359 void helper_vmmcall(CPUX86State *env)
361 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
362 raise_exception(env, EXCP06_ILLOP);
365 void helper_vmload(CPUX86State *env, int aflag)
367 CPUState *cs = CPU(x86_env_get_cpu(env));
368 target_ulong addr;
370 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
372 if (aflag == 2) {
373 addr = env->regs[R_EAX];
374 } else {
375 addr = (uint32_t)env->regs[R_EAX];
378 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
379 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
380 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
381 save.fs.base)),
382 env->segs[R_FS].base);
384 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
385 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
386 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
387 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
389 #ifdef TARGET_X86_64
390 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
391 save.kernel_gs_base));
392 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
393 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
394 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
395 #endif
396 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
397 env->sysenter_cs = x86_ldq_phys(cs,
398 addr + offsetof(struct vmcb, save.sysenter_cs));
399 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
400 save.sysenter_esp));
401 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
402 save.sysenter_eip));
405 void helper_vmsave(CPUX86State *env, int aflag)
407 CPUState *cs = CPU(x86_env_get_cpu(env));
408 target_ulong addr;
410 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
412 if (aflag == 2) {
413 addr = env->regs[R_EAX];
414 } else {
415 addr = (uint32_t)env->regs[R_EAX];
418 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
419 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
420 addr, x86_ldq_phys(cs,
421 addr + offsetof(struct vmcb, save.fs.base)),
422 env->segs[R_FS].base);
424 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
425 &env->segs[R_FS]);
426 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
427 &env->segs[R_GS]);
428 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
429 &env->tr);
430 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
431 &env->ldt);
433 #ifdef TARGET_X86_64
434 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
435 env->kernelgsbase);
436 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
437 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
438 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
439 #endif
440 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
441 x86_stq_phys(cs,
442 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
443 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
444 env->sysenter_esp);
445 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
446 env->sysenter_eip);
449 void helper_stgi(CPUX86State *env)
451 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
452 env->hflags2 |= HF2_GIF_MASK;
455 void helper_clgi(CPUX86State *env)
457 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
458 env->hflags2 &= ~HF2_GIF_MASK;
461 void QEMU_NORETURN helper_skinit(CPUX86State *env)
463 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
464 /* XXX: not implemented */
465 raise_exception(env, EXCP06_ILLOP);
468 void helper_invlpga(CPUX86State *env, int aflag)
470 X86CPU *cpu = x86_env_get_cpu(env);
471 target_ulong addr;
473 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
475 if (aflag == 2) {
476 addr = env->regs[R_EAX];
477 } else {
478 addr = (uint32_t)env->regs[R_EAX];
481 /* XXX: could use the ASID to see if it is needed to do the
482 flush */
483 tlb_flush_page(CPU(cpu), addr);
486 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
487 uint64_t param)
489 CPUState *cs = CPU(x86_env_get_cpu(env));
491 if (likely(!(env->hflags & HF_SVMI_MASK))) {
492 return;
494 switch (type) {
495 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
496 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
497 helper_vmexit(env, type, param);
499 break;
500 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
501 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
502 helper_vmexit(env, type, param);
504 break;
505 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
506 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
507 helper_vmexit(env, type, param);
509 break;
510 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
511 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
512 helper_vmexit(env, type, param);
514 break;
515 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
516 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
517 helper_vmexit(env, type, param);
519 break;
520 case SVM_EXIT_MSR:
521 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
522 /* FIXME: this should be read in at vmrun (faster this way?) */
523 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
524 offsetof(struct vmcb,
525 control.msrpm_base_pa));
526 uint32_t t0, t1;
528 switch ((uint32_t)env->regs[R_ECX]) {
529 case 0 ... 0x1fff:
530 t0 = (env->regs[R_ECX] * 2) % 8;
531 t1 = (env->regs[R_ECX] * 2) / 8;
532 break;
533 case 0xc0000000 ... 0xc0001fff:
534 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
535 t1 = (t0 / 8);
536 t0 %= 8;
537 break;
538 case 0xc0010000 ... 0xc0011fff:
539 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
540 t1 = (t0 / 8);
541 t0 %= 8;
542 break;
543 default:
544 helper_vmexit(env, type, param);
545 t0 = 0;
546 t1 = 0;
547 break;
549 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
550 helper_vmexit(env, type, param);
553 break;
554 default:
555 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
556 helper_vmexit(env, type, param);
558 break;
562 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
563 uint64_t param)
565 helper_svm_check_intercept_param(env, type, param);
568 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
569 uint32_t next_eip_addend)
571 CPUState *cs = CPU(x86_env_get_cpu(env));
573 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
574 /* FIXME: this should be read in at vmrun (faster this way?) */
575 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
576 offsetof(struct vmcb, control.iopm_base_pa));
577 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
579 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
580 /* next env->eip */
581 x86_stq_phys(cs,
582 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
583 env->eip + next_eip_addend);
584 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
589 /* Note: currently only 32 bits of exit_code are used */
590 void QEMU_NORETURN helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
592 CPUState *cs = CPU(x86_env_get_cpu(env));
593 uint32_t int_ctl;
595 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
596 PRIx64 ", " TARGET_FMT_lx ")!\n",
597 exit_code, exit_info_1,
598 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
599 control.exit_info_2)),
600 env->eip);
602 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
603 x86_stl_phys(cs,
604 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
605 SVM_INTERRUPT_SHADOW_MASK);
606 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
607 } else {
608 x86_stl_phys(cs,
609 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
612 /* Save the VM state in the vmcb */
613 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
614 &env->segs[R_ES]);
615 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
616 &env->segs[R_CS]);
617 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
618 &env->segs[R_SS]);
619 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
620 &env->segs[R_DS]);
622 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
623 env->gdt.base);
624 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
625 env->gdt.limit);
627 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
628 env->idt.base);
629 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
630 env->idt.limit);
632 x86_stq_phys(cs,
633 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
634 x86_stq_phys(cs,
635 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
636 x86_stq_phys(cs,
637 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
638 x86_stq_phys(cs,
639 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
640 x86_stq_phys(cs,
641 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
643 int_ctl = x86_ldl_phys(cs,
644 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
645 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
646 int_ctl |= env->v_tpr & V_TPR_MASK;
647 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
648 int_ctl |= V_IRQ_MASK;
650 x86_stl_phys(cs,
651 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
653 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
654 cpu_compute_eflags(env));
655 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
656 env->eip);
657 x86_stq_phys(cs,
658 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
659 x86_stq_phys(cs,
660 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
661 x86_stq_phys(cs,
662 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
663 x86_stq_phys(cs,
664 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
665 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
666 env->hflags & HF_CPL_MASK);
668 /* Reload the host state from vm_hsave */
669 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
670 env->hflags &= ~HF_SVMI_MASK;
671 env->intercept = 0;
672 env->intercept_exceptions = 0;
673 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
674 env->tsc_offset = 0;
676 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
677 save.gdtr.base));
678 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
679 save.gdtr.limit));
681 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
682 save.idtr.base));
683 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
684 save.idtr.limit));
686 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
687 env->vm_hsave + offsetof(struct vmcb,
688 save.cr0)) |
689 CR0_PE_MASK);
690 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
691 env->vm_hsave + offsetof(struct vmcb,
692 save.cr4)));
693 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
694 env->vm_hsave + offsetof(struct vmcb,
695 save.cr3)));
696 /* we need to set the efer after the crs so the hidden flags get
697 set properly */
698 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
699 save.efer)));
700 env->eflags = 0;
701 cpu_load_eflags(env, x86_ldq_phys(cs,
702 env->vm_hsave + offsetof(struct vmcb,
703 save.rflags)),
704 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
705 VM_MASK));
707 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
708 R_ES);
709 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
710 R_CS);
711 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
712 R_SS);
713 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
714 R_DS);
716 env->eip = x86_ldq_phys(cs,
717 env->vm_hsave + offsetof(struct vmcb, save.rip));
718 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
719 offsetof(struct vmcb, save.rsp));
720 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
721 offsetof(struct vmcb, save.rax));
723 env->dr[6] = x86_ldq_phys(cs,
724 env->vm_hsave + offsetof(struct vmcb, save.dr6));
725 env->dr[7] = x86_ldq_phys(cs,
726 env->vm_hsave + offsetof(struct vmcb, save.dr7));
728 /* other setups */
729 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
730 exit_code);
731 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
732 exit_info_1);
734 x86_stl_phys(cs,
735 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
736 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
737 control.event_inj)));
738 x86_stl_phys(cs,
739 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
740 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
741 control.event_inj_err)));
742 x86_stl_phys(cs,
743 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
745 env->hflags2 &= ~HF2_GIF_MASK;
746 /* FIXME: Resets the current ASID register to zero (host ASID). */
748 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
750 /* Clears the TSC_OFFSET inside the processor. */
752 /* If the host is in PAE mode, the processor reloads the host's PDPEs
753 from the page table indicated the host's CR3. If the PDPEs contain
754 illegal state, the processor causes a shutdown. */
756 /* Disables all breakpoints in the host DR7 register. */
758 /* Checks the reloaded host state for consistency. */
760 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
761 host's code segment or non-canonical (in the case of long mode), a
762 #GP fault is delivered inside the host. */
764 /* remove any pending exception */
765 cs->exception_index = -1;
766 env->error_code = 0;
767 env->old_exception = -1;
769 cpu_loop_exit(cs);
772 void QEMU_NORETURN cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
774 helper_vmexit(env, exit_code, exit_info_1);
777 #endif