exec: Make ldl_*_phys input an AddressSpace
[qemu/ar7.git] / target-i386 / svm_helper.c
blob6c3c8bf4e370d4b3771d2a0869a7902c609ae1fd
1 /*
2 * x86 SVM helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "exec/cpu-all.h"
22 #include "helper.h"
24 #if !defined(CONFIG_USER_ONLY)
25 #include "exec/softmmu_exec.h"
26 #endif /* !defined(CONFIG_USER_ONLY) */
28 /* Secure Virtual Machine helpers */
30 #if defined(CONFIG_USER_ONLY)
32 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
36 void helper_vmmcall(CPUX86State *env)
40 void helper_vmload(CPUX86State *env, int aflag)
44 void helper_vmsave(CPUX86State *env, int aflag)
48 void helper_stgi(CPUX86State *env)
52 void helper_clgi(CPUX86State *env)
56 void helper_skinit(CPUX86State *env)
60 void helper_invlpga(CPUX86State *env, int aflag)
64 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
68 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
72 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 uint64_t param)
77 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
78 uint64_t param)
82 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
83 uint32_t next_eip_addend)
86 #else
88 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
89 const SegmentCache *sc)
91 stw_phys(addr + offsetof(struct vmcb_seg, selector),
92 sc->selector);
93 stq_phys(addr + offsetof(struct vmcb_seg, base),
94 sc->base);
95 stl_phys(addr + offsetof(struct vmcb_seg, limit),
96 sc->limit);
97 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
98 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
101 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
102 SegmentCache *sc)
104 CPUState *cs = ENV_GET_CPU(env);
105 unsigned int flags;
107 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
108 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
109 sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit));
110 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
111 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
114 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
115 int seg_reg)
117 SegmentCache sc1, *sc = &sc1;
119 svm_load_seg(env, addr, sc);
120 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
121 sc->base, sc->limit, sc->flags);
124 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
126 CPUState *cs = ENV_GET_CPU(env);
127 target_ulong addr;
128 uint32_t event_inj;
129 uint32_t int_ctl;
131 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
133 if (aflag == 2) {
134 addr = env->regs[R_EAX];
135 } else {
136 addr = (uint32_t)env->regs[R_EAX];
139 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
141 env->vm_vmcb = addr;
143 /* save the current CPU state in the hsave page */
144 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
145 env->gdt.base);
146 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
147 env->gdt.limit);
149 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
150 env->idt.base);
151 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
152 env->idt.limit);
154 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
155 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
156 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
157 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
158 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
159 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
161 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
162 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
163 cpu_compute_eflags(env));
165 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
166 &env->segs[R_ES]);
167 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
168 &env->segs[R_CS]);
169 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
170 &env->segs[R_SS]);
171 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
172 &env->segs[R_DS]);
174 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
175 env->eip + next_eip_addend);
176 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
177 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
179 /* load the interception bitmaps so we do not need to access the
180 vmcb in svm mode */
181 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
182 control.intercept));
183 env->intercept_cr_read = lduw_phys(env->vm_vmcb +
184 offsetof(struct vmcb,
185 control.intercept_cr_read));
186 env->intercept_cr_write = lduw_phys(env->vm_vmcb +
187 offsetof(struct vmcb,
188 control.intercept_cr_write));
189 env->intercept_dr_read = lduw_phys(env->vm_vmcb +
190 offsetof(struct vmcb,
191 control.intercept_dr_read));
192 env->intercept_dr_write = lduw_phys(env->vm_vmcb +
193 offsetof(struct vmcb,
194 control.intercept_dr_write));
195 env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb +
196 offsetof(struct vmcb,
197 control.intercept_exceptions
200 /* enable intercepts */
201 env->hflags |= HF_SVMI_MASK;
203 env->tsc_offset = ldq_phys(env->vm_vmcb +
204 offsetof(struct vmcb, control.tsc_offset));
206 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
207 save.gdtr.base));
208 env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
209 save.gdtr.limit));
211 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
212 save.idtr.base));
213 env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
214 save.idtr.limit));
216 /* clear exit_info_2 so we behave like the real hardware */
217 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
219 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
220 save.cr0)));
221 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
222 save.cr4)));
223 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
224 save.cr3)));
225 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
226 int_ctl = ldl_phys(cs->as,
227 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
228 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
229 if (int_ctl & V_INTR_MASKING_MASK) {
230 env->v_tpr = int_ctl & V_TPR_MASK;
231 env->hflags2 |= HF2_VINTR_MASK;
232 if (env->eflags & IF_MASK) {
233 env->hflags2 |= HF2_HIF_MASK;
237 cpu_load_efer(env,
238 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
239 env->eflags = 0;
240 cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
241 save.rflags)),
242 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
243 CC_OP = CC_OP_EFLAGS;
245 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
246 R_ES);
247 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
248 R_CS);
249 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
250 R_SS);
251 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
252 R_DS);
254 env->eip = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
256 env->regs[R_ESP] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
257 env->regs[R_EAX] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
258 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
259 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
260 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
261 save.cpl)));
263 /* FIXME: guest state consistency checks */
265 switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
266 case TLB_CONTROL_DO_NOTHING:
267 break;
268 case TLB_CONTROL_FLUSH_ALL_ASID:
269 /* FIXME: this is not 100% correct but should work for now */
270 tlb_flush(env, 1);
271 break;
274 env->hflags2 |= HF2_GIF_MASK;
276 if (int_ctl & V_IRQ_MASK) {
277 CPUState *cs = CPU(x86_env_get_cpu(env));
279 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
282 /* maybe we need to inject an event */
283 event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
284 control.event_inj));
285 if (event_inj & SVM_EVTINJ_VALID) {
286 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
287 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
288 uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb +
289 offsetof(struct vmcb,
290 control.event_inj_err));
292 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
293 /* FIXME: need to implement valid_err */
294 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
295 case SVM_EVTINJ_TYPE_INTR:
296 env->exception_index = vector;
297 env->error_code = event_inj_err;
298 env->exception_is_int = 0;
299 env->exception_next_eip = -1;
300 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
301 /* XXX: is it always correct? */
302 do_interrupt_x86_hardirq(env, vector, 1);
303 break;
304 case SVM_EVTINJ_TYPE_NMI:
305 env->exception_index = EXCP02_NMI;
306 env->error_code = event_inj_err;
307 env->exception_is_int = 0;
308 env->exception_next_eip = env->eip;
309 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
310 cpu_loop_exit(env);
311 break;
312 case SVM_EVTINJ_TYPE_EXEPT:
313 env->exception_index = vector;
314 env->error_code = event_inj_err;
315 env->exception_is_int = 0;
316 env->exception_next_eip = -1;
317 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
318 cpu_loop_exit(env);
319 break;
320 case SVM_EVTINJ_TYPE_SOFT:
321 env->exception_index = vector;
322 env->error_code = event_inj_err;
323 env->exception_is_int = 1;
324 env->exception_next_eip = env->eip;
325 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
326 cpu_loop_exit(env);
327 break;
329 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
330 env->error_code);
334 void helper_vmmcall(CPUX86State *env)
336 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
337 raise_exception(env, EXCP06_ILLOP);
340 void helper_vmload(CPUX86State *env, int aflag)
342 target_ulong addr;
344 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
346 if (aflag == 2) {
347 addr = env->regs[R_EAX];
348 } else {
349 addr = (uint32_t)env->regs[R_EAX];
352 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
353 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
354 addr, ldq_phys(addr + offsetof(struct vmcb,
355 save.fs.base)),
356 env->segs[R_FS].base);
358 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
359 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
360 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
361 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
363 #ifdef TARGET_X86_64
364 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
365 save.kernel_gs_base));
366 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
367 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
368 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
369 #endif
370 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
371 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
372 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
373 save.sysenter_esp));
374 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
375 save.sysenter_eip));
378 void helper_vmsave(CPUX86State *env, int aflag)
380 target_ulong addr;
382 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
384 if (aflag == 2) {
385 addr = env->regs[R_EAX];
386 } else {
387 addr = (uint32_t)env->regs[R_EAX];
390 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
391 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
392 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
393 env->segs[R_FS].base);
395 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
396 &env->segs[R_FS]);
397 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
398 &env->segs[R_GS]);
399 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
400 &env->tr);
401 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
402 &env->ldt);
404 #ifdef TARGET_X86_64
405 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
406 env->kernelgsbase);
407 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
408 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
409 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
410 #endif
411 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
412 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
413 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
414 env->sysenter_esp);
415 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
416 env->sysenter_eip);
419 void helper_stgi(CPUX86State *env)
421 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
422 env->hflags2 |= HF2_GIF_MASK;
425 void helper_clgi(CPUX86State *env)
427 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
428 env->hflags2 &= ~HF2_GIF_MASK;
431 void helper_skinit(CPUX86State *env)
433 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
434 /* XXX: not implemented */
435 raise_exception(env, EXCP06_ILLOP);
438 void helper_invlpga(CPUX86State *env, int aflag)
440 target_ulong addr;
442 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
444 if (aflag == 2) {
445 addr = env->regs[R_EAX];
446 } else {
447 addr = (uint32_t)env->regs[R_EAX];
450 /* XXX: could use the ASID to see if it is needed to do the
451 flush */
452 tlb_flush_page(env, addr);
455 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
456 uint64_t param)
458 if (likely(!(env->hflags & HF_SVMI_MASK))) {
459 return;
461 switch (type) {
462 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
463 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
464 helper_vmexit(env, type, param);
466 break;
467 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
468 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
469 helper_vmexit(env, type, param);
471 break;
472 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
473 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
474 helper_vmexit(env, type, param);
476 break;
477 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
478 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
479 helper_vmexit(env, type, param);
481 break;
482 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
483 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
484 helper_vmexit(env, type, param);
486 break;
487 case SVM_EXIT_MSR:
488 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
489 /* FIXME: this should be read in at vmrun (faster this way?) */
490 uint64_t addr = ldq_phys(env->vm_vmcb +
491 offsetof(struct vmcb,
492 control.msrpm_base_pa));
493 uint32_t t0, t1;
495 switch ((uint32_t)env->regs[R_ECX]) {
496 case 0 ... 0x1fff:
497 t0 = (env->regs[R_ECX] * 2) % 8;
498 t1 = (env->regs[R_ECX] * 2) / 8;
499 break;
500 case 0xc0000000 ... 0xc0001fff:
501 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
502 t1 = (t0 / 8);
503 t0 %= 8;
504 break;
505 case 0xc0010000 ... 0xc0011fff:
506 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
507 t1 = (t0 / 8);
508 t0 %= 8;
509 break;
510 default:
511 helper_vmexit(env, type, param);
512 t0 = 0;
513 t1 = 0;
514 break;
516 if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
517 helper_vmexit(env, type, param);
520 break;
521 default:
522 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
523 helper_vmexit(env, type, param);
525 break;
529 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
530 uint64_t param)
532 helper_svm_check_intercept_param(env, type, param);
535 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
536 uint32_t next_eip_addend)
538 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
539 /* FIXME: this should be read in at vmrun (faster this way?) */
540 uint64_t addr = ldq_phys(env->vm_vmcb +
541 offsetof(struct vmcb, control.iopm_base_pa));
542 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
544 if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
545 /* next env->eip */
546 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
547 env->eip + next_eip_addend);
548 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
553 /* Note: currently only 32 bits of exit_code are used */
554 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
556 CPUState *cs = CPU(x86_env_get_cpu(env));
557 uint32_t int_ctl;
559 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
560 PRIx64 ", " TARGET_FMT_lx ")!\n",
561 exit_code, exit_info_1,
562 ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
563 control.exit_info_2)),
564 env->eip);
566 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
567 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
568 SVM_INTERRUPT_SHADOW_MASK);
569 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
570 } else {
571 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
574 /* Save the VM state in the vmcb */
575 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
576 &env->segs[R_ES]);
577 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
578 &env->segs[R_CS]);
579 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
580 &env->segs[R_SS]);
581 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
582 &env->segs[R_DS]);
584 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
585 env->gdt.base);
586 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
587 env->gdt.limit);
589 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
590 env->idt.base);
591 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
592 env->idt.limit);
594 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
595 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
596 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
597 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
598 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
600 int_ctl = ldl_phys(cs->as,
601 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
602 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
603 int_ctl |= env->v_tpr & V_TPR_MASK;
604 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
605 int_ctl |= V_IRQ_MASK;
607 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
609 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
610 cpu_compute_eflags(env));
611 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip),
612 env->eip);
613 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
614 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
615 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
616 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
617 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
618 env->hflags & HF_CPL_MASK);
620 /* Reload the host state from vm_hsave */
621 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
622 env->hflags &= ~HF_SVMI_MASK;
623 env->intercept = 0;
624 env->intercept_exceptions = 0;
625 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
626 env->tsc_offset = 0;
628 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
629 save.gdtr.base));
630 env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
631 save.gdtr.limit));
633 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
634 save.idtr.base));
635 env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
636 save.idtr.limit));
638 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
639 save.cr0)) |
640 CR0_PE_MASK);
641 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
642 save.cr4)));
643 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
644 save.cr3)));
645 /* we need to set the efer after the crs so the hidden flags get
646 set properly */
647 cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
648 save.efer)));
649 env->eflags = 0;
650 cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
651 save.rflags)),
652 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
653 CC_OP = CC_OP_EFLAGS;
655 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
656 R_ES);
657 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
658 R_CS);
659 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
660 R_SS);
661 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
662 R_DS);
664 env->eip = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
665 env->regs[R_ESP] = ldq_phys(env->vm_hsave +
666 offsetof(struct vmcb, save.rsp));
667 env->regs[R_EAX] = ldq_phys(env->vm_hsave +
668 offsetof(struct vmcb, save.rax));
670 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
671 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
673 /* other setups */
674 cpu_x86_set_cpl(env, 0);
675 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
676 exit_code);
677 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
678 exit_info_1);
680 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
681 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
682 control.event_inj)));
683 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
684 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
685 control.event_inj_err)));
686 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
688 env->hflags2 &= ~HF2_GIF_MASK;
689 /* FIXME: Resets the current ASID register to zero (host ASID). */
691 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
693 /* Clears the TSC_OFFSET inside the processor. */
695 /* If the host is in PAE mode, the processor reloads the host's PDPEs
696 from the page table indicated the host's CR3. If the PDPEs contain
697 illegal state, the processor causes a shutdown. */
699 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
700 env->cr[0] |= CR0_PE_MASK;
701 env->eflags &= ~VM_MASK;
703 /* Disables all breakpoints in the host DR7 register. */
705 /* Checks the reloaded host state for consistency. */
707 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
708 host's code segment or non-canonical (in the case of long mode), a
709 #GP fault is delivered inside the host. */
711 /* remove any pending exception */
712 env->exception_index = -1;
713 env->error_code = 0;
714 env->old_exception = -1;
716 cpu_loop_exit(env);
719 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
721 helper_vmexit(env, exit_code, exit_info_1);
724 #endif