4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "exec/cpu-all.h"
22 #include "exec/helper-proto.h"
23 #include "exec/cpu_ldst.h"
25 /* Secure Virtual Machine helpers */
27 #if defined(CONFIG_USER_ONLY)
29 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
33 void helper_vmmcall(CPUX86State
*env
)
37 void helper_vmload(CPUX86State
*env
, int aflag
)
41 void helper_vmsave(CPUX86State
*env
, int aflag
)
45 void helper_stgi(CPUX86State
*env
)
49 void helper_clgi(CPUX86State
*env
)
53 void helper_skinit(CPUX86State
*env
)
57 void helper_invlpga(CPUX86State
*env
, int aflag
)
61 void helper_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
)
65 void cpu_vmexit(CPUX86State
*nenv
, uint32_t exit_code
, uint64_t exit_info_1
)
69 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
74 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
79 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
80 uint32_t next_eip_addend
)
85 static inline void svm_save_seg(CPUX86State
*env
, hwaddr addr
,
86 const SegmentCache
*sc
)
88 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
90 x86_stw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, selector
),
92 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb_seg
, base
),
94 x86_stl_phys(cs
, addr
+ offsetof(struct vmcb_seg
, limit
),
96 x86_stw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, attrib
),
97 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
100 static inline void svm_load_seg(CPUX86State
*env
, hwaddr addr
,
103 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
106 sc
->selector
= x86_lduw_phys(cs
,
107 addr
+ offsetof(struct vmcb_seg
, selector
));
108 sc
->base
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb_seg
, base
));
109 sc
->limit
= x86_ldl_phys(cs
, addr
+ offsetof(struct vmcb_seg
, limit
));
110 flags
= x86_lduw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, attrib
));
111 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
114 static inline void svm_load_seg_cache(CPUX86State
*env
, hwaddr addr
,
117 SegmentCache sc1
, *sc
= &sc1
;
119 svm_load_seg(env
, addr
, sc
);
120 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
121 sc
->base
, sc
->limit
, sc
->flags
);
124 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
126 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
131 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMRUN
, 0);
134 addr
= env
->regs
[R_EAX
];
136 addr
= (uint32_t)env
->regs
[R_EAX
];
139 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
143 /* save the current CPU state in the hsave page */
144 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
),
146 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
149 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
),
151 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
),
155 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
157 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
159 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
161 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
163 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
165 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
168 env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
170 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
),
171 cpu_compute_eflags(env
));
173 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
175 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
177 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
179 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
182 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
183 env
->eip
+ next_eip_addend
);
185 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
187 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
189 /* load the interception bitmaps so we do not need to access the
191 env
->intercept
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
193 env
->intercept_cr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
194 offsetof(struct vmcb
,
195 control
.intercept_cr_read
));
196 env
->intercept_cr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
197 offsetof(struct vmcb
,
198 control
.intercept_cr_write
));
199 env
->intercept_dr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
200 offsetof(struct vmcb
,
201 control
.intercept_dr_read
));
202 env
->intercept_dr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
203 offsetof(struct vmcb
,
204 control
.intercept_dr_write
));
205 env
->intercept_exceptions
= x86_ldl_phys(cs
, env
->vm_vmcb
+
206 offsetof(struct vmcb
,
207 control
.intercept_exceptions
210 /* enable intercepts */
211 env
->hflags
|= HF_SVMI_MASK
;
213 env
->tsc_offset
= x86_ldq_phys(cs
, env
->vm_vmcb
+
214 offsetof(struct vmcb
, control
.tsc_offset
));
216 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
218 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
221 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
223 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
226 /* clear exit_info_2 so we behave like the real hardware */
228 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
230 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
231 env
->vm_vmcb
+ offsetof(struct vmcb
,
233 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
234 env
->vm_vmcb
+ offsetof(struct vmcb
,
236 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
237 env
->vm_vmcb
+ offsetof(struct vmcb
,
239 env
->cr
[2] = x86_ldq_phys(cs
,
240 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
241 int_ctl
= x86_ldl_phys(cs
,
242 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
243 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
244 if (int_ctl
& V_INTR_MASKING_MASK
) {
245 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
246 env
->hflags2
|= HF2_VINTR_MASK
;
247 if (env
->eflags
& IF_MASK
) {
248 env
->hflags2
|= HF2_HIF_MASK
;
254 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
256 cpu_load_eflags(env
, x86_ldq_phys(cs
,
257 env
->vm_vmcb
+ offsetof(struct vmcb
,
259 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
261 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
263 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
265 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
267 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
270 env
->eip
= x86_ldq_phys(cs
,
271 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
273 env
->regs
[R_ESP
] = x86_ldq_phys(cs
,
274 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
275 env
->regs
[R_EAX
] = x86_ldq_phys(cs
,
276 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
277 env
->dr
[7] = x86_ldq_phys(cs
,
278 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
279 env
->dr
[6] = x86_ldq_phys(cs
,
280 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
282 /* FIXME: guest state consistency checks */
284 switch (x86_ldub_phys(cs
,
285 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
286 case TLB_CONTROL_DO_NOTHING
:
288 case TLB_CONTROL_FLUSH_ALL_ASID
:
289 /* FIXME: this is not 100% correct but should work for now */
294 env
->hflags2
|= HF2_GIF_MASK
;
296 if (int_ctl
& V_IRQ_MASK
) {
297 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
299 cs
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
302 /* maybe we need to inject an event */
303 event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
305 if (event_inj
& SVM_EVTINJ_VALID
) {
306 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
307 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
308 uint32_t event_inj_err
= x86_ldl_phys(cs
, env
->vm_vmcb
+
309 offsetof(struct vmcb
,
310 control
.event_inj_err
));
312 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
313 /* FIXME: need to implement valid_err */
314 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
315 case SVM_EVTINJ_TYPE_INTR
:
316 cs
->exception_index
= vector
;
317 env
->error_code
= event_inj_err
;
318 env
->exception_is_int
= 0;
319 env
->exception_next_eip
= -1;
320 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
321 /* XXX: is it always correct? */
322 do_interrupt_x86_hardirq(env
, vector
, 1);
324 case SVM_EVTINJ_TYPE_NMI
:
325 cs
->exception_index
= EXCP02_NMI
;
326 env
->error_code
= event_inj_err
;
327 env
->exception_is_int
= 0;
328 env
->exception_next_eip
= env
->eip
;
329 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
332 case SVM_EVTINJ_TYPE_EXEPT
:
333 cs
->exception_index
= vector
;
334 env
->error_code
= event_inj_err
;
335 env
->exception_is_int
= 0;
336 env
->exception_next_eip
= -1;
337 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
340 case SVM_EVTINJ_TYPE_SOFT
:
341 cs
->exception_index
= vector
;
342 env
->error_code
= event_inj_err
;
343 env
->exception_is_int
= 1;
344 env
->exception_next_eip
= env
->eip
;
345 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
349 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", cs
->exception_index
,
354 void helper_vmmcall(CPUX86State
*env
)
356 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMMCALL
, 0);
357 raise_exception(env
, EXCP06_ILLOP
);
360 void helper_vmload(CPUX86State
*env
, int aflag
)
362 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
365 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMLOAD
, 0);
368 addr
= env
->regs
[R_EAX
];
370 addr
= (uint32_t)env
->regs
[R_EAX
];
373 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
374 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
375 addr
, x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
377 env
->segs
[R_FS
].base
);
379 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.fs
), R_FS
);
380 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.gs
), R_GS
);
381 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
), &env
->tr
);
382 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
), &env
->ldt
);
385 env
->kernelgsbase
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
386 save
.kernel_gs_base
));
387 env
->lstar
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.lstar
));
388 env
->cstar
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.cstar
));
389 env
->fmask
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sfmask
));
391 env
->star
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.star
));
392 env
->sysenter_cs
= x86_ldq_phys(cs
,
393 addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
394 env
->sysenter_esp
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
396 env
->sysenter_eip
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
400 void helper_vmsave(CPUX86State
*env
, int aflag
)
402 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
405 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMSAVE
, 0);
408 addr
= env
->regs
[R_EAX
];
410 addr
= (uint32_t)env
->regs
[R_EAX
];
413 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
414 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
415 addr
, x86_ldq_phys(cs
,
416 addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
417 env
->segs
[R_FS
].base
);
419 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.fs
),
421 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.gs
),
423 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
),
425 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
),
429 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
),
431 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
432 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
433 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
435 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
437 addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
438 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sysenter_esp
),
440 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sysenter_eip
),
444 void helper_stgi(CPUX86State
*env
)
446 cpu_svm_check_intercept_param(env
, SVM_EXIT_STGI
, 0);
447 env
->hflags2
|= HF2_GIF_MASK
;
450 void helper_clgi(CPUX86State
*env
)
452 cpu_svm_check_intercept_param(env
, SVM_EXIT_CLGI
, 0);
453 env
->hflags2
&= ~HF2_GIF_MASK
;
456 void helper_skinit(CPUX86State
*env
)
458 cpu_svm_check_intercept_param(env
, SVM_EXIT_SKINIT
, 0);
459 /* XXX: not implemented */
460 raise_exception(env
, EXCP06_ILLOP
);
463 void helper_invlpga(CPUX86State
*env
, int aflag
)
465 X86CPU
*cpu
= x86_env_get_cpu(env
);
468 cpu_svm_check_intercept_param(env
, SVM_EXIT_INVLPGA
, 0);
471 addr
= env
->regs
[R_EAX
];
473 addr
= (uint32_t)env
->regs
[R_EAX
];
476 /* XXX: could use the ASID to see if it is needed to do the
478 tlb_flush_page(CPU(cpu
), addr
);
481 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
484 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
486 if (likely(!(env
->hflags
& HF_SVMI_MASK
))) {
490 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
491 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
492 helper_vmexit(env
, type
, param
);
495 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
496 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
497 helper_vmexit(env
, type
, param
);
500 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
501 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
502 helper_vmexit(env
, type
, param
);
505 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
506 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
507 helper_vmexit(env
, type
, param
);
510 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
511 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
512 helper_vmexit(env
, type
, param
);
516 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
517 /* FIXME: this should be read in at vmrun (faster this way?) */
518 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
519 offsetof(struct vmcb
,
520 control
.msrpm_base_pa
));
523 switch ((uint32_t)env
->regs
[R_ECX
]) {
525 t0
= (env
->regs
[R_ECX
] * 2) % 8;
526 t1
= (env
->regs
[R_ECX
] * 2) / 8;
528 case 0xc0000000 ... 0xc0001fff:
529 t0
= (8192 + env
->regs
[R_ECX
] - 0xc0000000) * 2;
533 case 0xc0010000 ... 0xc0011fff:
534 t0
= (16384 + env
->regs
[R_ECX
] - 0xc0010000) * 2;
539 helper_vmexit(env
, type
, param
);
544 if (x86_ldub_phys(cs
, addr
+ t1
) & ((1 << param
) << t0
)) {
545 helper_vmexit(env
, type
, param
);
550 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
551 helper_vmexit(env
, type
, param
);
557 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
560 helper_svm_check_intercept_param(env
, type
, param
);
563 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
564 uint32_t next_eip_addend
)
566 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
568 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
569 /* FIXME: this should be read in at vmrun (faster this way?) */
570 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
571 offsetof(struct vmcb
, control
.iopm_base_pa
));
572 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
574 if (x86_lduw_phys(cs
, addr
+ port
/ 8) & (mask
<< (port
& 7))) {
577 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
578 env
->eip
+ next_eip_addend
);
579 helper_vmexit(env
, SVM_EXIT_IOIO
, param
| (port
<< 16));
584 /* Note: currently only 32 bits of exit_code are used */
585 void helper_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
)
587 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
590 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016"
591 PRIx64
", " TARGET_FMT_lx
")!\n",
592 exit_code
, exit_info_1
,
593 x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
594 control
.exit_info_2
)),
597 if (env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
599 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
),
600 SVM_INTERRUPT_SHADOW_MASK
);
601 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
604 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
607 /* Save the VM state in the vmcb */
608 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
610 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
612 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
614 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
617 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
),
619 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
622 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
),
624 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
),
628 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
630 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
632 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
634 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
636 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
638 int_ctl
= x86_ldl_phys(cs
,
639 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
640 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
641 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
642 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
643 int_ctl
|= V_IRQ_MASK
;
646 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
648 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
),
649 cpu_compute_eflags(env
));
650 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
),
653 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
655 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
657 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
659 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
660 x86_stb_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
),
661 env
->hflags
& HF_CPL_MASK
);
663 /* Reload the host state from vm_hsave */
664 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
665 env
->hflags
&= ~HF_SVMI_MASK
;
667 env
->intercept_exceptions
= 0;
668 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
671 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
673 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
676 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
678 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
681 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
682 env
->vm_hsave
+ offsetof(struct vmcb
,
685 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
686 env
->vm_hsave
+ offsetof(struct vmcb
,
688 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
689 env
->vm_hsave
+ offsetof(struct vmcb
,
691 /* we need to set the efer after the crs so the hidden flags get
693 cpu_load_efer(env
, x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
696 cpu_load_eflags(env
, x86_ldq_phys(cs
,
697 env
->vm_hsave
+ offsetof(struct vmcb
,
699 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
|
702 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
704 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
706 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
708 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
711 env
->eip
= x86_ldq_phys(cs
,
712 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
713 env
->regs
[R_ESP
] = x86_ldq_phys(cs
, env
->vm_hsave
+
714 offsetof(struct vmcb
, save
.rsp
));
715 env
->regs
[R_EAX
] = x86_ldq_phys(cs
, env
->vm_hsave
+
716 offsetof(struct vmcb
, save
.rax
));
718 env
->dr
[6] = x86_ldq_phys(cs
,
719 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
720 env
->dr
[7] = x86_ldq_phys(cs
,
721 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
724 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
),
726 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
),
730 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
731 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
732 control
.event_inj
)));
734 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
735 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
736 control
.event_inj_err
)));
738 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
740 env
->hflags2
&= ~HF2_GIF_MASK
;
741 /* FIXME: Resets the current ASID register to zero (host ASID). */
743 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
745 /* Clears the TSC_OFFSET inside the processor. */
747 /* If the host is in PAE mode, the processor reloads the host's PDPEs
748 from the page table indicated the host's CR3. If the PDPEs contain
749 illegal state, the processor causes a shutdown. */
751 /* Disables all breakpoints in the host DR7 register. */
753 /* Checks the reloaded host state for consistency. */
755 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
756 host's code segment or non-canonical (in the case of long mode), a
757 #GP fault is delivered inside the host. */
759 /* remove any pending exception */
760 cs
->exception_index
= -1;
762 env
->old_exception
= -1;
767 void cpu_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
)
769 helper_vmexit(env
, exit_code
, exit_info_1
);