4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "exec/cpu-all.h"
24 #if !defined(CONFIG_USER_ONLY)
25 #include "exec/softmmu_exec.h"
26 #endif /* !defined(CONFIG_USER_ONLY) */
28 /* Secure Virtual Machine helpers */
30 #if defined(CONFIG_USER_ONLY)
32 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
36 void helper_vmmcall(CPUX86State
*env
)
40 void helper_vmload(CPUX86State
*env
, int aflag
)
44 void helper_vmsave(CPUX86State
*env
, int aflag
)
48 void helper_stgi(CPUX86State
*env
)
52 void helper_clgi(CPUX86State
*env
)
56 void helper_skinit(CPUX86State
*env
)
60 void helper_invlpga(CPUX86State
*env
, int aflag
)
64 void helper_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
)
68 void cpu_vmexit(CPUX86State
*nenv
, uint32_t exit_code
, uint64_t exit_info_1
)
72 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
77 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
82 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
83 uint32_t next_eip_addend
)
88 static inline void svm_save_seg(CPUX86State
*env
, hwaddr addr
,
89 const SegmentCache
*sc
)
91 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
93 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
95 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
97 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
98 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
101 static inline void svm_load_seg(CPUX86State
*env
, hwaddr addr
,
106 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
107 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
108 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
109 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
110 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
113 static inline void svm_load_seg_cache(CPUX86State
*env
, hwaddr addr
,
116 SegmentCache sc1
, *sc
= &sc1
;
118 svm_load_seg(env
, addr
, sc
);
119 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
120 sc
->base
, sc
->limit
, sc
->flags
);
123 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
129 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMRUN
, 0);
132 addr
= env
->regs
[R_EAX
];
134 addr
= (uint32_t)env
->regs
[R_EAX
];
137 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
141 /* save the current CPU state in the hsave page */
142 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
),
144 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
147 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
),
149 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
),
152 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
153 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
154 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
155 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
156 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
157 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
159 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
160 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
),
161 cpu_compute_eflags(env
));
163 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
165 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
167 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
169 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
172 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
173 env
->eip
+ next_eip_addend
);
174 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
175 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
177 /* load the interception bitmaps so we do not need to access the
179 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
181 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+
182 offsetof(struct vmcb
,
183 control
.intercept_cr_read
));
184 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+
185 offsetof(struct vmcb
,
186 control
.intercept_cr_write
));
187 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+
188 offsetof(struct vmcb
,
189 control
.intercept_dr_read
));
190 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+
191 offsetof(struct vmcb
,
192 control
.intercept_dr_write
));
193 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+
194 offsetof(struct vmcb
,
195 control
.intercept_exceptions
198 /* enable intercepts */
199 env
->hflags
|= HF_SVMI_MASK
;
201 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+
202 offsetof(struct vmcb
, control
.tsc_offset
));
204 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
206 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
209 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
211 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
214 /* clear exit_info_2 so we behave like the real hardware */
215 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
217 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
219 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
221 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
223 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
224 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
225 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
226 if (int_ctl
& V_INTR_MASKING_MASK
) {
227 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
228 env
->hflags2
|= HF2_VINTR_MASK
;
229 if (env
->eflags
& IF_MASK
) {
230 env
->hflags2
|= HF2_HIF_MASK
;
235 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
237 cpu_load_eflags(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
239 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
240 CC_OP
= CC_OP_EFLAGS
;
242 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
244 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
246 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
248 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
251 env
->eip
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
253 env
->regs
[R_ESP
] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
254 env
->regs
[R_EAX
] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
255 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
256 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
257 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
260 /* FIXME: guest state consistency checks */
262 switch (ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
263 case TLB_CONTROL_DO_NOTHING
:
265 case TLB_CONTROL_FLUSH_ALL_ASID
:
266 /* FIXME: this is not 100% correct but should work for now */
271 env
->hflags2
|= HF2_GIF_MASK
;
273 if (int_ctl
& V_IRQ_MASK
) {
274 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
276 cs
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
279 /* maybe we need to inject an event */
280 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
282 if (event_inj
& SVM_EVTINJ_VALID
) {
283 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
284 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
285 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+
286 offsetof(struct vmcb
,
287 control
.event_inj_err
));
289 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
290 /* FIXME: need to implement valid_err */
291 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
292 case SVM_EVTINJ_TYPE_INTR
:
293 env
->exception_index
= vector
;
294 env
->error_code
= event_inj_err
;
295 env
->exception_is_int
= 0;
296 env
->exception_next_eip
= -1;
297 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
298 /* XXX: is it always correct? */
299 do_interrupt_x86_hardirq(env
, vector
, 1);
301 case SVM_EVTINJ_TYPE_NMI
:
302 env
->exception_index
= EXCP02_NMI
;
303 env
->error_code
= event_inj_err
;
304 env
->exception_is_int
= 0;
305 env
->exception_next_eip
= env
->eip
;
306 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
309 case SVM_EVTINJ_TYPE_EXEPT
:
310 env
->exception_index
= vector
;
311 env
->error_code
= event_inj_err
;
312 env
->exception_is_int
= 0;
313 env
->exception_next_eip
= -1;
314 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
317 case SVM_EVTINJ_TYPE_SOFT
:
318 env
->exception_index
= vector
;
319 env
->error_code
= event_inj_err
;
320 env
->exception_is_int
= 1;
321 env
->exception_next_eip
= env
->eip
;
322 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
326 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
,
331 void helper_vmmcall(CPUX86State
*env
)
333 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMMCALL
, 0);
334 raise_exception(env
, EXCP06_ILLOP
);
337 void helper_vmload(CPUX86State
*env
, int aflag
)
341 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMLOAD
, 0);
344 addr
= env
->regs
[R_EAX
];
346 addr
= (uint32_t)env
->regs
[R_EAX
];
349 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
350 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
351 addr
, ldq_phys(addr
+ offsetof(struct vmcb
,
353 env
->segs
[R_FS
].base
);
355 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.fs
), R_FS
);
356 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.gs
), R_GS
);
357 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
), &env
->tr
);
358 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
), &env
->ldt
);
361 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
,
362 save
.kernel_gs_base
));
363 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
364 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
365 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
367 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
368 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
369 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
,
371 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
,
375 void helper_vmsave(CPUX86State
*env
, int aflag
)
379 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMSAVE
, 0);
382 addr
= env
->regs
[R_EAX
];
384 addr
= (uint32_t)env
->regs
[R_EAX
];
387 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
388 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
389 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
390 env
->segs
[R_FS
].base
);
392 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.fs
),
394 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.gs
),
396 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
),
398 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
),
402 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
),
404 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
405 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
406 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
408 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
409 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
410 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
),
412 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
),
416 void helper_stgi(CPUX86State
*env
)
418 cpu_svm_check_intercept_param(env
, SVM_EXIT_STGI
, 0);
419 env
->hflags2
|= HF2_GIF_MASK
;
422 void helper_clgi(CPUX86State
*env
)
424 cpu_svm_check_intercept_param(env
, SVM_EXIT_CLGI
, 0);
425 env
->hflags2
&= ~HF2_GIF_MASK
;
428 void helper_skinit(CPUX86State
*env
)
430 cpu_svm_check_intercept_param(env
, SVM_EXIT_SKINIT
, 0);
431 /* XXX: not implemented */
432 raise_exception(env
, EXCP06_ILLOP
);
435 void helper_invlpga(CPUX86State
*env
, int aflag
)
439 cpu_svm_check_intercept_param(env
, SVM_EXIT_INVLPGA
, 0);
442 addr
= env
->regs
[R_EAX
];
444 addr
= (uint32_t)env
->regs
[R_EAX
];
447 /* XXX: could use the ASID to see if it is needed to do the
449 tlb_flush_page(env
, addr
);
452 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
455 if (likely(!(env
->hflags
& HF_SVMI_MASK
))) {
459 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
460 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
461 helper_vmexit(env
, type
, param
);
464 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
465 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
466 helper_vmexit(env
, type
, param
);
469 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
470 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
471 helper_vmexit(env
, type
, param
);
474 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
475 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
476 helper_vmexit(env
, type
, param
);
479 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
480 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
481 helper_vmexit(env
, type
, param
);
485 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
486 /* FIXME: this should be read in at vmrun (faster this way?) */
487 uint64_t addr
= ldq_phys(env
->vm_vmcb
+
488 offsetof(struct vmcb
,
489 control
.msrpm_base_pa
));
492 switch ((uint32_t)env
->regs
[R_ECX
]) {
494 t0
= (env
->regs
[R_ECX
] * 2) % 8;
495 t1
= (env
->regs
[R_ECX
] * 2) / 8;
497 case 0xc0000000 ... 0xc0001fff:
498 t0
= (8192 + env
->regs
[R_ECX
] - 0xc0000000) * 2;
502 case 0xc0010000 ... 0xc0011fff:
503 t0
= (16384 + env
->regs
[R_ECX
] - 0xc0010000) * 2;
508 helper_vmexit(env
, type
, param
);
513 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
)) {
514 helper_vmexit(env
, type
, param
);
519 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
520 helper_vmexit(env
, type
, param
);
526 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
529 helper_svm_check_intercept_param(env
, type
, param
);
532 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
533 uint32_t next_eip_addend
)
535 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
536 /* FIXME: this should be read in at vmrun (faster this way?) */
537 uint64_t addr
= ldq_phys(env
->vm_vmcb
+
538 offsetof(struct vmcb
, control
.iopm_base_pa
));
539 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
541 if (lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
543 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
544 env
->eip
+ next_eip_addend
);
545 helper_vmexit(env
, SVM_EXIT_IOIO
, param
| (port
<< 16));
550 /* Note: currently only 32 bits of exit_code are used */
551 void helper_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
)
553 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
556 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016"
557 PRIx64
", " TARGET_FMT_lx
")!\n",
558 exit_code
, exit_info_1
,
559 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
560 control
.exit_info_2
)),
563 if (env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
564 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
),
565 SVM_INTERRUPT_SHADOW_MASK
);
566 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
568 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
571 /* Save the VM state in the vmcb */
572 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
574 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
576 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
578 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
581 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
),
583 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
586 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
),
588 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
),
591 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
592 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
593 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
594 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
595 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
597 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
598 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
599 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
600 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
601 int_ctl
|= V_IRQ_MASK
;
603 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
605 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
),
606 cpu_compute_eflags(env
));
607 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
),
609 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
610 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
611 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
612 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
613 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
),
614 env
->hflags
& HF_CPL_MASK
);
616 /* Reload the host state from vm_hsave */
617 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
618 env
->hflags
&= ~HF_SVMI_MASK
;
620 env
->intercept_exceptions
= 0;
621 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
624 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
,
626 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
,
629 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
,
631 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
,
634 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
,
637 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
,
639 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
,
641 /* we need to set the efer after the crs so the hidden flags get
643 cpu_load_efer(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
,
646 cpu_load_eflags(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
,
648 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
649 CC_OP
= CC_OP_EFLAGS
;
651 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
653 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
655 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
657 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
660 env
->eip
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
661 env
->regs
[R_ESP
] = ldq_phys(env
->vm_hsave
+
662 offsetof(struct vmcb
, save
.rsp
));
663 env
->regs
[R_EAX
] = ldq_phys(env
->vm_hsave
+
664 offsetof(struct vmcb
, save
.rax
));
666 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
667 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
670 cpu_x86_set_cpl(env
, 0);
671 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
),
673 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
),
676 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
677 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
678 control
.event_inj
)));
679 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
680 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
681 control
.event_inj_err
)));
682 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
684 env
->hflags2
&= ~HF2_GIF_MASK
;
685 /* FIXME: Resets the current ASID register to zero (host ASID). */
687 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
689 /* Clears the TSC_OFFSET inside the processor. */
691 /* If the host is in PAE mode, the processor reloads the host's PDPEs
692 from the page table indicated the host's CR3. If the PDPEs contain
693 illegal state, the processor causes a shutdown. */
695 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
696 env
->cr
[0] |= CR0_PE_MASK
;
697 env
->eflags
&= ~VM_MASK
;
699 /* Disables all breakpoints in the host DR7 register. */
701 /* Checks the reloaded host state for consistency. */
703 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
704 host's code segment or non-canonical (in the case of long mode), a
705 #GP fault is delivered inside the host. */
707 /* remove any pending exception */
708 env
->exception_index
= -1;
710 env
->old_exception
= -1;
715 void cpu_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
)
717 helper_vmexit(env
, exit_code
, exit_info_1
);