s390x/misc_helper.c: wrap IO instructions in BQL
[qemu/kevin.git] / target / i386 / svm_helper.c
blob78d8df4af67102c6cfd0fe7df0932e5b9d977324
1 /*
2 * x86 SVM helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/cpu-all.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
27 /* Secure Virtual Machine helpers */
29 #if defined(CONFIG_USER_ONLY)
31 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
35 void helper_vmmcall(CPUX86State *env)
39 void helper_vmload(CPUX86State *env, int aflag)
43 void helper_vmsave(CPUX86State *env, int aflag)
47 void helper_stgi(CPUX86State *env)
51 void helper_clgi(CPUX86State *env)
55 void helper_skinit(CPUX86State *env)
59 void helper_invlpga(CPUX86State *env, int aflag)
63 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
64 uintptr_t retaddr)
68 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
69 uint64_t param)
73 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
74 uint64_t param, uintptr_t retaddr)
78 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
79 uint32_t next_eip_addend)
82 #else
84 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
85 const SegmentCache *sc)
87 CPUState *cs = CPU(x86_env_get_cpu(env));
89 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
90 sc->selector);
91 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
92 sc->base);
93 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
94 sc->limit);
95 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
96 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
99 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
100 SegmentCache *sc)
102 CPUState *cs = CPU(x86_env_get_cpu(env));
103 unsigned int flags;
105 sc->selector = x86_lduw_phys(cs,
106 addr + offsetof(struct vmcb_seg, selector));
107 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
108 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
109 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
110 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
113 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
114 int seg_reg)
116 SegmentCache sc1, *sc = &sc1;
118 svm_load_seg(env, addr, sc);
119 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
120 sc->base, sc->limit, sc->flags);
123 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
125 CPUState *cs = CPU(x86_env_get_cpu(env));
126 target_ulong addr;
127 uint32_t event_inj;
128 uint32_t int_ctl;
130 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
132 if (aflag == 2) {
133 addr = env->regs[R_EAX];
134 } else {
135 addr = (uint32_t)env->regs[R_EAX];
138 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
140 env->vm_vmcb = addr;
142 /* save the current CPU state in the hsave page */
143 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
144 env->gdt.base);
145 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
146 env->gdt.limit);
148 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
149 env->idt.base);
150 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
151 env->idt.limit);
153 x86_stq_phys(cs,
154 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
155 x86_stq_phys(cs,
156 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
157 x86_stq_phys(cs,
158 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
159 x86_stq_phys(cs,
160 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
161 x86_stq_phys(cs,
162 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
163 x86_stq_phys(cs,
164 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
166 x86_stq_phys(cs,
167 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
168 x86_stq_phys(cs,
169 env->vm_hsave + offsetof(struct vmcb, save.rflags),
170 cpu_compute_eflags(env));
172 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
173 &env->segs[R_ES]);
174 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
175 &env->segs[R_CS]);
176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
177 &env->segs[R_SS]);
178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
179 &env->segs[R_DS]);
181 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
182 env->eip + next_eip_addend);
183 x86_stq_phys(cs,
184 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
185 x86_stq_phys(cs,
186 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
188 /* load the interception bitmaps so we do not need to access the
189 vmcb in svm mode */
190 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
191 control.intercept));
192 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
193 offsetof(struct vmcb,
194 control.intercept_cr_read));
195 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
196 offsetof(struct vmcb,
197 control.intercept_cr_write));
198 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
199 offsetof(struct vmcb,
200 control.intercept_dr_read));
201 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
202 offsetof(struct vmcb,
203 control.intercept_dr_write));
204 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
205 offsetof(struct vmcb,
206 control.intercept_exceptions
209 /* enable intercepts */
210 env->hflags |= HF_SVMI_MASK;
212 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
213 offsetof(struct vmcb, control.tsc_offset));
215 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
216 save.gdtr.base));
217 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
218 save.gdtr.limit));
220 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
221 save.idtr.base));
222 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
223 save.idtr.limit));
225 /* clear exit_info_2 so we behave like the real hardware */
226 x86_stq_phys(cs,
227 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
229 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
230 env->vm_vmcb + offsetof(struct vmcb,
231 save.cr0)));
232 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
233 env->vm_vmcb + offsetof(struct vmcb,
234 save.cr4)));
235 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
236 env->vm_vmcb + offsetof(struct vmcb,
237 save.cr3)));
238 env->cr[2] = x86_ldq_phys(cs,
239 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
240 int_ctl = x86_ldl_phys(cs,
241 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
242 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
243 if (int_ctl & V_INTR_MASKING_MASK) {
244 env->v_tpr = int_ctl & V_TPR_MASK;
245 env->hflags2 |= HF2_VINTR_MASK;
246 if (env->eflags & IF_MASK) {
247 env->hflags2 |= HF2_HIF_MASK;
251 cpu_load_efer(env,
252 x86_ldq_phys(cs,
253 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
254 env->eflags = 0;
255 cpu_load_eflags(env, x86_ldq_phys(cs,
256 env->vm_vmcb + offsetof(struct vmcb,
257 save.rflags)),
258 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
260 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
261 R_ES);
262 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
263 R_CS);
264 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
265 R_SS);
266 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
267 R_DS);
269 env->eip = x86_ldq_phys(cs,
270 env->vm_vmcb + offsetof(struct vmcb, save.rip));
272 env->regs[R_ESP] = x86_ldq_phys(cs,
273 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
274 env->regs[R_EAX] = x86_ldq_phys(cs,
275 env->vm_vmcb + offsetof(struct vmcb, save.rax));
276 env->dr[7] = x86_ldq_phys(cs,
277 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
278 env->dr[6] = x86_ldq_phys(cs,
279 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
281 /* FIXME: guest state consistency checks */
283 switch (x86_ldub_phys(cs,
284 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
285 case TLB_CONTROL_DO_NOTHING:
286 break;
287 case TLB_CONTROL_FLUSH_ALL_ASID:
288 /* FIXME: this is not 100% correct but should work for now */
289 tlb_flush(cs);
290 break;
293 env->hflags2 |= HF2_GIF_MASK;
295 if (int_ctl & V_IRQ_MASK) {
296 CPUState *cs = CPU(x86_env_get_cpu(env));
298 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
301 /* maybe we need to inject an event */
302 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
303 control.event_inj));
304 if (event_inj & SVM_EVTINJ_VALID) {
305 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
306 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
307 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
308 offsetof(struct vmcb,
309 control.event_inj_err));
311 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
312 /* FIXME: need to implement valid_err */
313 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
314 case SVM_EVTINJ_TYPE_INTR:
315 cs->exception_index = vector;
316 env->error_code = event_inj_err;
317 env->exception_is_int = 0;
318 env->exception_next_eip = -1;
319 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
320 /* XXX: is it always correct? */
321 do_interrupt_x86_hardirq(env, vector, 1);
322 break;
323 case SVM_EVTINJ_TYPE_NMI:
324 cs->exception_index = EXCP02_NMI;
325 env->error_code = event_inj_err;
326 env->exception_is_int = 0;
327 env->exception_next_eip = env->eip;
328 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
329 cpu_loop_exit(cs);
330 break;
331 case SVM_EVTINJ_TYPE_EXEPT:
332 cs->exception_index = vector;
333 env->error_code = event_inj_err;
334 env->exception_is_int = 0;
335 env->exception_next_eip = -1;
336 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
337 cpu_loop_exit(cs);
338 break;
339 case SVM_EVTINJ_TYPE_SOFT:
340 cs->exception_index = vector;
341 env->error_code = event_inj_err;
342 env->exception_is_int = 1;
343 env->exception_next_eip = env->eip;
344 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
345 cpu_loop_exit(cs);
346 break;
348 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
349 env->error_code);
353 void helper_vmmcall(CPUX86State *env)
355 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
356 raise_exception(env, EXCP06_ILLOP);
359 void helper_vmload(CPUX86State *env, int aflag)
361 CPUState *cs = CPU(x86_env_get_cpu(env));
362 target_ulong addr;
364 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
366 if (aflag == 2) {
367 addr = env->regs[R_EAX];
368 } else {
369 addr = (uint32_t)env->regs[R_EAX];
372 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
373 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
374 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
375 save.fs.base)),
376 env->segs[R_FS].base);
378 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
379 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
380 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
381 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
383 #ifdef TARGET_X86_64
384 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
385 save.kernel_gs_base));
386 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
387 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
388 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
389 #endif
390 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
391 env->sysenter_cs = x86_ldq_phys(cs,
392 addr + offsetof(struct vmcb, save.sysenter_cs));
393 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
394 save.sysenter_esp));
395 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
396 save.sysenter_eip));
399 void helper_vmsave(CPUX86State *env, int aflag)
401 CPUState *cs = CPU(x86_env_get_cpu(env));
402 target_ulong addr;
404 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
406 if (aflag == 2) {
407 addr = env->regs[R_EAX];
408 } else {
409 addr = (uint32_t)env->regs[R_EAX];
412 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
413 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
414 addr, x86_ldq_phys(cs,
415 addr + offsetof(struct vmcb, save.fs.base)),
416 env->segs[R_FS].base);
418 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
419 &env->segs[R_FS]);
420 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
421 &env->segs[R_GS]);
422 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
423 &env->tr);
424 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
425 &env->ldt);
427 #ifdef TARGET_X86_64
428 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
429 env->kernelgsbase);
430 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
431 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
432 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
433 #endif
434 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
435 x86_stq_phys(cs,
436 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
437 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
438 env->sysenter_esp);
439 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
440 env->sysenter_eip);
443 void helper_stgi(CPUX86State *env)
445 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
446 env->hflags2 |= HF2_GIF_MASK;
449 void helper_clgi(CPUX86State *env)
451 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
452 env->hflags2 &= ~HF2_GIF_MASK;
455 void helper_skinit(CPUX86State *env)
457 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
458 /* XXX: not implemented */
459 raise_exception(env, EXCP06_ILLOP);
462 void helper_invlpga(CPUX86State *env, int aflag)
464 X86CPU *cpu = x86_env_get_cpu(env);
465 target_ulong addr;
467 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
469 if (aflag == 2) {
470 addr = env->regs[R_EAX];
471 } else {
472 addr = (uint32_t)env->regs[R_EAX];
475 /* XXX: could use the ASID to see if it is needed to do the
476 flush */
477 tlb_flush_page(CPU(cpu), addr);
480 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
481 uint64_t param, uintptr_t retaddr)
483 CPUState *cs = CPU(x86_env_get_cpu(env));
485 if (likely(!(env->hflags & HF_SVMI_MASK))) {
486 return;
488 switch (type) {
489 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
490 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
491 cpu_vmexit(env, type, param, retaddr);
493 break;
494 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
495 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
496 cpu_vmexit(env, type, param, retaddr);
498 break;
499 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
500 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
501 cpu_vmexit(env, type, param, retaddr);
503 break;
504 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
505 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
506 cpu_vmexit(env, type, param, retaddr);
508 break;
509 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
510 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
511 cpu_vmexit(env, type, param, retaddr);
513 break;
514 case SVM_EXIT_MSR:
515 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
516 /* FIXME: this should be read in at vmrun (faster this way?) */
517 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
518 offsetof(struct vmcb,
519 control.msrpm_base_pa));
520 uint32_t t0, t1;
522 switch ((uint32_t)env->regs[R_ECX]) {
523 case 0 ... 0x1fff:
524 t0 = (env->regs[R_ECX] * 2) % 8;
525 t1 = (env->regs[R_ECX] * 2) / 8;
526 break;
527 case 0xc0000000 ... 0xc0001fff:
528 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
529 t1 = (t0 / 8);
530 t0 %= 8;
531 break;
532 case 0xc0010000 ... 0xc0011fff:
533 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
534 t1 = (t0 / 8);
535 t0 %= 8;
536 break;
537 default:
538 cpu_vmexit(env, type, param, retaddr);
539 t0 = 0;
540 t1 = 0;
541 break;
543 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
544 cpu_vmexit(env, type, param, retaddr);
547 break;
548 default:
549 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
550 cpu_vmexit(env, type, param, retaddr);
552 break;
556 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
557 uint64_t param)
559 cpu_svm_check_intercept_param(env, type, param, GETPC());
562 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
563 uint32_t next_eip_addend)
565 CPUState *cs = CPU(x86_env_get_cpu(env));
567 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
568 /* FIXME: this should be read in at vmrun (faster this way?) */
569 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
570 offsetof(struct vmcb, control.iopm_base_pa));
571 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
573 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
574 /* next env->eip */
575 x86_stq_phys(cs,
576 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
577 env->eip + next_eip_addend);
578 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
583 /* Note: currently only 32 bits of exit_code are used */
584 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
585 uintptr_t retaddr)
587 CPUState *cs = CPU(x86_env_get_cpu(env));
588 uint32_t int_ctl;
590 if (retaddr) {
591 cpu_restore_state(cs, retaddr);
594 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
595 PRIx64 ", " TARGET_FMT_lx ")!\n",
596 exit_code, exit_info_1,
597 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
598 control.exit_info_2)),
599 env->eip);
601 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
602 x86_stl_phys(cs,
603 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
604 SVM_INTERRUPT_SHADOW_MASK);
605 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
606 } else {
607 x86_stl_phys(cs,
608 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
611 /* Save the VM state in the vmcb */
612 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
613 &env->segs[R_ES]);
614 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
615 &env->segs[R_CS]);
616 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
617 &env->segs[R_SS]);
618 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
619 &env->segs[R_DS]);
621 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
622 env->gdt.base);
623 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
624 env->gdt.limit);
626 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
627 env->idt.base);
628 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
629 env->idt.limit);
631 x86_stq_phys(cs,
632 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
633 x86_stq_phys(cs,
634 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
635 x86_stq_phys(cs,
636 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
637 x86_stq_phys(cs,
638 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
639 x86_stq_phys(cs,
640 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
642 int_ctl = x86_ldl_phys(cs,
643 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
644 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
645 int_ctl |= env->v_tpr & V_TPR_MASK;
646 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
647 int_ctl |= V_IRQ_MASK;
649 x86_stl_phys(cs,
650 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
652 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
653 cpu_compute_eflags(env));
654 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
655 env->eip);
656 x86_stq_phys(cs,
657 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
658 x86_stq_phys(cs,
659 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
660 x86_stq_phys(cs,
661 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
662 x86_stq_phys(cs,
663 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
664 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
665 env->hflags & HF_CPL_MASK);
667 /* Reload the host state from vm_hsave */
668 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
669 env->hflags &= ~HF_SVMI_MASK;
670 env->intercept = 0;
671 env->intercept_exceptions = 0;
672 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
673 env->tsc_offset = 0;
675 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
676 save.gdtr.base));
677 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
678 save.gdtr.limit));
680 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
681 save.idtr.base));
682 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
683 save.idtr.limit));
685 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
686 env->vm_hsave + offsetof(struct vmcb,
687 save.cr0)) |
688 CR0_PE_MASK);
689 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
690 env->vm_hsave + offsetof(struct vmcb,
691 save.cr4)));
692 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
693 env->vm_hsave + offsetof(struct vmcb,
694 save.cr3)));
695 /* we need to set the efer after the crs so the hidden flags get
696 set properly */
697 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
698 save.efer)));
699 env->eflags = 0;
700 cpu_load_eflags(env, x86_ldq_phys(cs,
701 env->vm_hsave + offsetof(struct vmcb,
702 save.rflags)),
703 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
704 VM_MASK));
706 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
707 R_ES);
708 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
709 R_CS);
710 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
711 R_SS);
712 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
713 R_DS);
715 env->eip = x86_ldq_phys(cs,
716 env->vm_hsave + offsetof(struct vmcb, save.rip));
717 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
718 offsetof(struct vmcb, save.rsp));
719 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
720 offsetof(struct vmcb, save.rax));
722 env->dr[6] = x86_ldq_phys(cs,
723 env->vm_hsave + offsetof(struct vmcb, save.dr6));
724 env->dr[7] = x86_ldq_phys(cs,
725 env->vm_hsave + offsetof(struct vmcb, save.dr7));
727 /* other setups */
728 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
729 exit_code);
730 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
731 exit_info_1);
733 x86_stl_phys(cs,
734 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
735 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
736 control.event_inj)));
737 x86_stl_phys(cs,
738 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
739 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
740 control.event_inj_err)));
741 x86_stl_phys(cs,
742 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
744 env->hflags2 &= ~HF2_GIF_MASK;
745 /* FIXME: Resets the current ASID register to zero (host ASID). */
747 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
749 /* Clears the TSC_OFFSET inside the processor. */
751 /* If the host is in PAE mode, the processor reloads the host's PDPEs
752 from the page table indicated the host's CR3. If the PDPEs contain
753 illegal state, the processor causes a shutdown. */
755 /* Disables all breakpoints in the host DR7 register. */
757 /* Checks the reloaded host state for consistency. */
759 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
760 host's code segment or non-canonical (in the case of long mode), a
761 #GP fault is delivered inside the host. */
763 /* remove any pending exception */
764 cs->exception_index = -1;
765 env->error_code = 0;
766 env->old_exception = -1;
768 cpu_loop_exit(cs);
771 #endif