Merge remote-tracking branch 'qemu/master'
[qemu/ar7.git] / target / i386 / svm_helper.c
blobde493d35fc4eb1f9ed9c5d726380e6174288fa2f
1 /*
2 * x86 SVM helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 /* Secure Virtual Machine helpers */
28 #if defined(CONFIG_USER_ONLY)
30 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
34 void helper_vmmcall(CPUX86State *env)
38 void helper_vmload(CPUX86State *env, int aflag)
42 void helper_vmsave(CPUX86State *env, int aflag)
46 void helper_stgi(CPUX86State *env)
50 void helper_clgi(CPUX86State *env)
54 void helper_skinit(CPUX86State *env)
56 abort();
59 void helper_invlpga(CPUX86State *env, int aflag)
63 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
64 uintptr_t retaddr)
66 abort();
69 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
70 uint64_t param)
74 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
75 uint64_t param, uintptr_t retaddr)
79 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
80 uint32_t next_eip_addend)
83 #else
85 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
86 const SegmentCache *sc)
88 CPUState *cs = CPU(x86_env_get_cpu(env));
90 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
91 sc->selector);
92 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
93 sc->base);
94 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
95 sc->limit);
96 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
97 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
100 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
101 SegmentCache *sc)
103 CPUState *cs = CPU(x86_env_get_cpu(env));
104 unsigned int flags;
106 sc->selector = x86_lduw_phys(cs,
107 addr + offsetof(struct vmcb_seg, selector));
108 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
109 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
110 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
111 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
114 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
115 int seg_reg)
117 SegmentCache sc1, *sc = &sc1;
119 svm_load_seg(env, addr, sc);
120 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
121 sc->base, sc->limit, sc->flags);
124 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
126 CPUState *cs = CPU(x86_env_get_cpu(env));
127 target_ulong addr;
128 uint32_t event_inj;
129 uint32_t int_ctl;
131 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
133 if (aflag == 2) {
134 addr = env->regs[R_EAX];
135 } else {
136 addr = (uint32_t)env->regs[R_EAX];
139 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
141 env->vm_vmcb = addr;
143 /* save the current CPU state in the hsave page */
144 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
145 env->gdt.base);
146 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
147 env->gdt.limit);
149 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
150 env->idt.base);
151 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
152 env->idt.limit);
154 x86_stq_phys(cs,
155 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
156 x86_stq_phys(cs,
157 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
158 x86_stq_phys(cs,
159 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
160 x86_stq_phys(cs,
161 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
162 x86_stq_phys(cs,
163 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
164 x86_stq_phys(cs,
165 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
167 x86_stq_phys(cs,
168 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
169 x86_stq_phys(cs,
170 env->vm_hsave + offsetof(struct vmcb, save.rflags),
171 cpu_compute_eflags(env));
173 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
174 &env->segs[R_ES]);
175 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
176 &env->segs[R_CS]);
177 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
178 &env->segs[R_SS]);
179 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
180 &env->segs[R_DS]);
182 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
183 env->eip + next_eip_addend);
184 x86_stq_phys(cs,
185 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
186 x86_stq_phys(cs,
187 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
189 /* load the interception bitmaps so we do not need to access the
190 vmcb in svm mode */
191 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
192 control.intercept));
193 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
194 offsetof(struct vmcb,
195 control.intercept_cr_read));
196 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
197 offsetof(struct vmcb,
198 control.intercept_cr_write));
199 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
200 offsetof(struct vmcb,
201 control.intercept_dr_read));
202 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
203 offsetof(struct vmcb,
204 control.intercept_dr_write));
205 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
206 offsetof(struct vmcb,
207 control.intercept_exceptions
210 /* enable intercepts */
211 env->hflags |= HF_SVMI_MASK;
213 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
214 offsetof(struct vmcb, control.tsc_offset));
216 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
217 save.gdtr.base));
218 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
219 save.gdtr.limit));
221 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
222 save.idtr.base));
223 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
224 save.idtr.limit));
226 /* clear exit_info_2 so we behave like the real hardware */
227 x86_stq_phys(cs,
228 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
230 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
231 env->vm_vmcb + offsetof(struct vmcb,
232 save.cr0)));
233 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
234 env->vm_vmcb + offsetof(struct vmcb,
235 save.cr4)));
236 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
237 env->vm_vmcb + offsetof(struct vmcb,
238 save.cr3)));
239 env->cr[2] = x86_ldq_phys(cs,
240 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
241 int_ctl = x86_ldl_phys(cs,
242 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
243 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
244 if (int_ctl & V_INTR_MASKING_MASK) {
245 env->v_tpr = int_ctl & V_TPR_MASK;
246 env->hflags2 |= HF2_VINTR_MASK;
247 if (env->eflags & IF_MASK) {
248 env->hflags2 |= HF2_HIF_MASK;
252 cpu_load_efer(env,
253 x86_ldq_phys(cs,
254 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
255 env->eflags = 0;
256 cpu_load_eflags(env, x86_ldq_phys(cs,
257 env->vm_vmcb + offsetof(struct vmcb,
258 save.rflags)),
259 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
261 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
262 R_ES);
263 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
264 R_CS);
265 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
266 R_SS);
267 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
268 R_DS);
270 env->eip = x86_ldq_phys(cs,
271 env->vm_vmcb + offsetof(struct vmcb, save.rip));
273 env->regs[R_ESP] = x86_ldq_phys(cs,
274 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
275 env->regs[R_EAX] = x86_ldq_phys(cs,
276 env->vm_vmcb + offsetof(struct vmcb, save.rax));
277 env->dr[7] = x86_ldq_phys(cs,
278 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
279 env->dr[6] = x86_ldq_phys(cs,
280 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
282 /* FIXME: guest state consistency checks */
284 switch (x86_ldub_phys(cs,
285 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
286 case TLB_CONTROL_DO_NOTHING:
287 break;
288 case TLB_CONTROL_FLUSH_ALL_ASID:
289 /* FIXME: this is not 100% correct but should work for now */
290 tlb_flush(cs);
291 break;
294 env->hflags2 |= HF2_GIF_MASK;
296 if (int_ctl & V_IRQ_MASK) {
297 CPUState *cs = CPU(x86_env_get_cpu(env));
299 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
302 /* maybe we need to inject an event */
303 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
304 control.event_inj));
305 if (event_inj & SVM_EVTINJ_VALID) {
306 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
307 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
308 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
309 offsetof(struct vmcb,
310 control.event_inj_err));
312 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
313 /* FIXME: need to implement valid_err */
314 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
315 case SVM_EVTINJ_TYPE_INTR:
316 cs->exception_index = vector;
317 env->error_code = event_inj_err;
318 env->exception_is_int = 0;
319 env->exception_next_eip = -1;
320 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
321 /* XXX: is it always correct? */
322 do_interrupt_x86_hardirq(env, vector, 1);
323 break;
324 case SVM_EVTINJ_TYPE_NMI:
325 cs->exception_index = EXCP02_NMI;
326 env->error_code = event_inj_err;
327 env->exception_is_int = 0;
328 env->exception_next_eip = env->eip;
329 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
330 cpu_loop_exit(cs);
331 break;
332 case SVM_EVTINJ_TYPE_EXEPT:
333 cs->exception_index = vector;
334 env->error_code = event_inj_err;
335 env->exception_is_int = 0;
336 env->exception_next_eip = -1;
337 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
338 cpu_loop_exit(cs);
339 break;
340 case SVM_EVTINJ_TYPE_SOFT:
341 cs->exception_index = vector;
342 env->error_code = event_inj_err;
343 env->exception_is_int = 1;
344 env->exception_next_eip = env->eip;
345 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
346 cpu_loop_exit(cs);
347 break;
349 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
350 env->error_code);
354 void helper_vmmcall(CPUX86State *env)
356 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
357 raise_exception(env, EXCP06_ILLOP);
360 void helper_vmload(CPUX86State *env, int aflag)
362 CPUState *cs = CPU(x86_env_get_cpu(env));
363 target_ulong addr;
365 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
367 if (aflag == 2) {
368 addr = env->regs[R_EAX];
369 } else {
370 addr = (uint32_t)env->regs[R_EAX];
373 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
374 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
375 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
376 save.fs.base)),
377 env->segs[R_FS].base);
379 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
380 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
381 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
382 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
384 #ifdef TARGET_X86_64
385 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
386 save.kernel_gs_base));
387 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
388 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
389 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
390 #endif
391 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
392 env->sysenter_cs = x86_ldq_phys(cs,
393 addr + offsetof(struct vmcb, save.sysenter_cs));
394 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
395 save.sysenter_esp));
396 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
397 save.sysenter_eip));
400 void helper_vmsave(CPUX86State *env, int aflag)
402 CPUState *cs = CPU(x86_env_get_cpu(env));
403 target_ulong addr;
405 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
407 if (aflag == 2) {
408 addr = env->regs[R_EAX];
409 } else {
410 addr = (uint32_t)env->regs[R_EAX];
413 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
414 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
415 addr, x86_ldq_phys(cs,
416 addr + offsetof(struct vmcb, save.fs.base)),
417 env->segs[R_FS].base);
419 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
420 &env->segs[R_FS]);
421 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
422 &env->segs[R_GS]);
423 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
424 &env->tr);
425 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
426 &env->ldt);
428 #ifdef TARGET_X86_64
429 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
430 env->kernelgsbase);
431 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
432 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
433 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
434 #endif
435 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
436 x86_stq_phys(cs,
437 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
438 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
439 env->sysenter_esp);
440 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
441 env->sysenter_eip);
444 void helper_stgi(CPUX86State *env)
446 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
447 env->hflags2 |= HF2_GIF_MASK;
450 void helper_clgi(CPUX86State *env)
452 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
453 env->hflags2 &= ~HF2_GIF_MASK;
456 void QEMU_NORETURN helper_skinit(CPUX86State *env)
458 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
459 /* XXX: not implemented */
460 raise_exception(env, EXCP06_ILLOP);
463 void helper_invlpga(CPUX86State *env, int aflag)
465 X86CPU *cpu = x86_env_get_cpu(env);
466 target_ulong addr;
468 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
470 if (aflag == 2) {
471 addr = env->regs[R_EAX];
472 } else {
473 addr = (uint32_t)env->regs[R_EAX];
476 /* XXX: could use the ASID to see if it is needed to do the
477 flush */
478 tlb_flush_page(CPU(cpu), addr);
481 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
482 uint64_t param, uintptr_t retaddr)
484 CPUState *cs = CPU(x86_env_get_cpu(env));
486 if (likely(!(env->hflags & HF_SVMI_MASK))) {
487 return;
489 switch (type) {
490 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
491 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
492 cpu_vmexit(env, type, param, retaddr);
494 break;
495 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
496 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
497 cpu_vmexit(env, type, param, retaddr);
499 break;
500 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
501 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
502 cpu_vmexit(env, type, param, retaddr);
504 break;
505 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
506 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
507 cpu_vmexit(env, type, param, retaddr);
509 break;
510 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
511 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
512 cpu_vmexit(env, type, param, retaddr);
514 break;
515 case SVM_EXIT_MSR:
516 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
517 /* FIXME: this should be read in at vmrun (faster this way?) */
518 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
519 offsetof(struct vmcb,
520 control.msrpm_base_pa));
521 uint32_t t0, t1;
523 switch ((uint32_t)env->regs[R_ECX]) {
524 case 0 ... 0x1fff:
525 t0 = (env->regs[R_ECX] * 2) % 8;
526 t1 = (env->regs[R_ECX] * 2) / 8;
527 break;
528 case 0xc0000000 ... 0xc0001fff:
529 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
530 t1 = (t0 / 8);
531 t0 %= 8;
532 break;
533 case 0xc0010000 ... 0xc0011fff:
534 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
535 t1 = (t0 / 8);
536 t0 %= 8;
537 break;
538 default:
539 cpu_vmexit(env, type, param, retaddr);
540 t0 = 0;
541 t1 = 0;
542 break;
544 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
545 cpu_vmexit(env, type, param, retaddr);
548 break;
549 default:
550 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
551 cpu_vmexit(env, type, param, retaddr);
553 break;
557 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
558 uint64_t param)
560 cpu_svm_check_intercept_param(env, type, param, GETPC());
563 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
564 uint32_t next_eip_addend)
566 CPUState *cs = CPU(x86_env_get_cpu(env));
568 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
569 /* FIXME: this should be read in at vmrun (faster this way?) */
570 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
571 offsetof(struct vmcb, control.iopm_base_pa));
572 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
574 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
575 /* next env->eip */
576 x86_stq_phys(cs,
577 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
578 env->eip + next_eip_addend);
579 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
584 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
585 uintptr_t retaddr)
587 CPUState *cs = CPU(x86_env_get_cpu(env));
589 cpu_restore_state(cs, retaddr);
591 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
592 PRIx64 ", " TARGET_FMT_lx ")!\n",
593 exit_code, exit_info_1,
594 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
595 control.exit_info_2)),
596 env->eip);
598 cs->exception_index = EXCP_VMEXIT + exit_code;
599 env->error_code = exit_info_1;
601 /* remove any pending exception */
602 env->old_exception = -1;
603 cpu_loop_exit(cs);
606 void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
608 CPUState *cs = CPU(x86_env_get_cpu(env));
609 uint32_t int_ctl;
611 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
612 x86_stl_phys(cs,
613 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
614 SVM_INTERRUPT_SHADOW_MASK);
615 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
616 } else {
617 x86_stl_phys(cs,
618 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
621 /* Save the VM state in the vmcb */
622 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
623 &env->segs[R_ES]);
624 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
625 &env->segs[R_CS]);
626 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
627 &env->segs[R_SS]);
628 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
629 &env->segs[R_DS]);
631 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
632 env->gdt.base);
633 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
634 env->gdt.limit);
636 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
637 env->idt.base);
638 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
639 env->idt.limit);
641 x86_stq_phys(cs,
642 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
643 x86_stq_phys(cs,
644 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
645 x86_stq_phys(cs,
646 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
647 x86_stq_phys(cs,
648 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
649 x86_stq_phys(cs,
650 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
652 int_ctl = x86_ldl_phys(cs,
653 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
654 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
655 int_ctl |= env->v_tpr & V_TPR_MASK;
656 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
657 int_ctl |= V_IRQ_MASK;
659 x86_stl_phys(cs,
660 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
662 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
663 cpu_compute_eflags(env));
664 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
665 env->eip);
666 x86_stq_phys(cs,
667 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
668 x86_stq_phys(cs,
669 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
670 x86_stq_phys(cs,
671 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
672 x86_stq_phys(cs,
673 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
674 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
675 env->hflags & HF_CPL_MASK);
677 /* Reload the host state from vm_hsave */
678 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
679 env->hflags &= ~HF_SVMI_MASK;
680 env->intercept = 0;
681 env->intercept_exceptions = 0;
682 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
683 env->tsc_offset = 0;
685 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
686 save.gdtr.base));
687 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
688 save.gdtr.limit));
690 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
691 save.idtr.base));
692 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
693 save.idtr.limit));
695 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
696 env->vm_hsave + offsetof(struct vmcb,
697 save.cr0)) |
698 CR0_PE_MASK);
699 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
700 env->vm_hsave + offsetof(struct vmcb,
701 save.cr4)));
702 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
703 env->vm_hsave + offsetof(struct vmcb,
704 save.cr3)));
705 /* we need to set the efer after the crs so the hidden flags get
706 set properly */
707 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
708 save.efer)));
709 env->eflags = 0;
710 cpu_load_eflags(env, x86_ldq_phys(cs,
711 env->vm_hsave + offsetof(struct vmcb,
712 save.rflags)),
713 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
714 VM_MASK));
716 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
717 R_ES);
718 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
719 R_CS);
720 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
721 R_SS);
722 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
723 R_DS);
725 env->eip = x86_ldq_phys(cs,
726 env->vm_hsave + offsetof(struct vmcb, save.rip));
727 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
728 offsetof(struct vmcb, save.rsp));
729 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
730 offsetof(struct vmcb, save.rax));
732 env->dr[6] = x86_ldq_phys(cs,
733 env->vm_hsave + offsetof(struct vmcb, save.dr6));
734 env->dr[7] = x86_ldq_phys(cs,
735 env->vm_hsave + offsetof(struct vmcb, save.dr7));
737 /* other setups */
738 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
739 exit_code);
740 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
741 exit_info_1);
743 x86_stl_phys(cs,
744 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
745 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
746 control.event_inj)));
747 x86_stl_phys(cs,
748 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
749 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
750 control.event_inj_err)));
751 x86_stl_phys(cs,
752 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
754 env->hflags2 &= ~HF2_GIF_MASK;
755 /* FIXME: Resets the current ASID register to zero (host ASID). */
757 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
759 /* Clears the TSC_OFFSET inside the processor. */
761 /* If the host is in PAE mode, the processor reloads the host's PDPEs
762 from the page table indicated the host's CR3. If the PDPEs contain
763 illegal state, the processor causes a shutdown. */
765 /* Disables all breakpoints in the host DR7 register. */
767 /* Checks the reloaded host state for consistency. */
769 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
770 host's code segment or non-canonical (in the case of long mode), a
771 #GP fault is delivered inside the host. */
774 #endif