2 * x86 SVM helpers (sysemu only)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/helper-proto.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
25 #include "tcg/helper-tcg.h"
27 /* Secure Virtual Machine helpers */
29 static inline void svm_save_seg(CPUX86State
*env
, hwaddr addr
,
30 const SegmentCache
*sc
)
32 CPUState
*cs
= env_cpu(env
);
34 x86_stw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, selector
),
36 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb_seg
, base
),
38 x86_stl_phys(cs
, addr
+ offsetof(struct vmcb_seg
, limit
),
40 x86_stw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, attrib
),
41 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
44 static inline void svm_load_seg(CPUX86State
*env
, hwaddr addr
,
47 CPUState
*cs
= env_cpu(env
);
50 sc
->selector
= x86_lduw_phys(cs
,
51 addr
+ offsetof(struct vmcb_seg
, selector
));
52 sc
->base
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb_seg
, base
));
53 sc
->limit
= x86_ldl_phys(cs
, addr
+ offsetof(struct vmcb_seg
, limit
));
54 flags
= x86_lduw_phys(cs
, addr
+ offsetof(struct vmcb_seg
, attrib
));
55 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
58 static inline void svm_load_seg_cache(CPUX86State
*env
, hwaddr addr
,
61 SegmentCache sc1
, *sc
= &sc1
;
63 svm_load_seg(env
, addr
, sc
);
64 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
65 sc
->base
, sc
->limit
, sc
->flags
);
68 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
70 CPUState
*cs
= env_cpu(env
);
76 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMRUN
, 0, GETPC());
79 addr
= env
->regs
[R_EAX
];
81 addr
= (uint32_t)env
->regs
[R_EAX
];
84 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
88 /* save the current CPU state in the hsave page */
89 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
),
91 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
94 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
),
96 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
),
100 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
102 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
104 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
106 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
108 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
110 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
113 env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
115 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
),
116 cpu_compute_eflags(env
));
118 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
120 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
122 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
124 svm_save_seg(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
127 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
128 env
->eip
+ next_eip_addend
);
130 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
132 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
134 /* load the interception bitmaps so we do not need to access the
136 env
->intercept
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
138 env
->intercept_cr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
139 offsetof(struct vmcb
,
140 control
.intercept_cr_read
));
141 env
->intercept_cr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
142 offsetof(struct vmcb
,
143 control
.intercept_cr_write
));
144 env
->intercept_dr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
145 offsetof(struct vmcb
,
146 control
.intercept_dr_read
));
147 env
->intercept_dr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
148 offsetof(struct vmcb
,
149 control
.intercept_dr_write
));
150 env
->intercept_exceptions
= x86_ldl_phys(cs
, env
->vm_vmcb
+
151 offsetof(struct vmcb
,
152 control
.intercept_exceptions
155 nested_ctl
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
156 control
.nested_ctl
));
158 env
->nested_pg_mode
= 0;
160 if (nested_ctl
& SVM_NPT_ENABLED
) {
161 env
->nested_cr3
= x86_ldq_phys(cs
,
162 env
->vm_vmcb
+ offsetof(struct vmcb
,
163 control
.nested_cr3
));
164 env
->hflags2
|= HF2_NPT_MASK
;
166 env
->nested_pg_mode
= get_pg_mode(env
) & PG_MODE_SVM_MASK
;
169 /* enable intercepts */
170 env
->hflags
|= HF_GUEST_MASK
;
172 env
->tsc_offset
= x86_ldq_phys(cs
, env
->vm_vmcb
+
173 offsetof(struct vmcb
, control
.tsc_offset
));
175 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
177 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
180 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
182 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
185 /* clear exit_info_2 so we behave like the real hardware */
187 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
189 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
190 env
->vm_vmcb
+ offsetof(struct vmcb
,
192 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
193 env
->vm_vmcb
+ offsetof(struct vmcb
,
195 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
196 env
->vm_vmcb
+ offsetof(struct vmcb
,
198 env
->cr
[2] = x86_ldq_phys(cs
,
199 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
200 int_ctl
= x86_ldl_phys(cs
,
201 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
202 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
203 if (int_ctl
& V_INTR_MASKING_MASK
) {
204 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
205 env
->hflags2
|= HF2_VINTR_MASK
;
206 if (env
->eflags
& IF_MASK
) {
207 env
->hflags2
|= HF2_HIF_MASK
;
213 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
215 cpu_load_eflags(env
, x86_ldq_phys(cs
,
216 env
->vm_vmcb
+ offsetof(struct vmcb
,
218 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
220 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
222 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
224 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
226 svm_load_seg_cache(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
229 env
->eip
= x86_ldq_phys(cs
,
230 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
232 env
->regs
[R_ESP
] = x86_ldq_phys(cs
,
233 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
234 env
->regs
[R_EAX
] = x86_ldq_phys(cs
,
235 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
236 env
->dr
[7] = x86_ldq_phys(cs
,
237 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
238 env
->dr
[6] = x86_ldq_phys(cs
,
239 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
241 /* FIXME: guest state consistency checks */
243 switch (x86_ldub_phys(cs
,
244 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
245 case TLB_CONTROL_DO_NOTHING
:
247 case TLB_CONTROL_FLUSH_ALL_ASID
:
248 /* FIXME: this is not 100% correct but should work for now */
253 env
->hflags2
|= HF2_GIF_MASK
;
255 if (int_ctl
& V_IRQ_MASK
) {
256 CPUState
*cs
= env_cpu(env
);
258 cs
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
261 /* maybe we need to inject an event */
262 event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
264 if (event_inj
& SVM_EVTINJ_VALID
) {
265 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
266 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
267 uint32_t event_inj_err
= x86_ldl_phys(cs
, env
->vm_vmcb
+
268 offsetof(struct vmcb
,
269 control
.event_inj_err
));
271 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
272 /* FIXME: need to implement valid_err */
273 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
274 case SVM_EVTINJ_TYPE_INTR
:
275 cs
->exception_index
= vector
;
276 env
->error_code
= event_inj_err
;
277 env
->exception_is_int
= 0;
278 env
->exception_next_eip
= -1;
279 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
280 /* XXX: is it always correct? */
281 do_interrupt_x86_hardirq(env
, vector
, 1);
283 case SVM_EVTINJ_TYPE_NMI
:
284 cs
->exception_index
= EXCP02_NMI
;
285 env
->error_code
= event_inj_err
;
286 env
->exception_is_int
= 0;
287 env
->exception_next_eip
= env
->eip
;
288 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
291 case SVM_EVTINJ_TYPE_EXEPT
:
292 cs
->exception_index
= vector
;
293 env
->error_code
= event_inj_err
;
294 env
->exception_is_int
= 0;
295 env
->exception_next_eip
= -1;
296 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
299 case SVM_EVTINJ_TYPE_SOFT
:
300 cs
->exception_index
= vector
;
301 env
->error_code
= event_inj_err
;
302 env
->exception_is_int
= 1;
303 env
->exception_next_eip
= env
->eip
;
304 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
308 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", cs
->exception_index
,
313 void helper_vmmcall(CPUX86State
*env
)
315 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMMCALL
, 0, GETPC());
316 raise_exception(env
, EXCP06_ILLOP
);
319 void helper_vmload(CPUX86State
*env
, int aflag
)
321 CPUState
*cs
= env_cpu(env
);
324 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMLOAD
, 0, GETPC());
327 addr
= env
->regs
[R_EAX
];
329 addr
= (uint32_t)env
->regs
[R_EAX
];
332 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
333 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
334 addr
, x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
336 env
->segs
[R_FS
].base
);
338 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.fs
), R_FS
);
339 svm_load_seg_cache(env
, addr
+ offsetof(struct vmcb
, save
.gs
), R_GS
);
340 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
), &env
->tr
);
341 svm_load_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
), &env
->ldt
);
344 env
->kernelgsbase
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
345 save
.kernel_gs_base
));
346 env
->lstar
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.lstar
));
347 env
->cstar
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.cstar
));
348 env
->fmask
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sfmask
));
350 env
->star
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.star
));
351 env
->sysenter_cs
= x86_ldq_phys(cs
,
352 addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
353 env
->sysenter_esp
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
355 env
->sysenter_eip
= x86_ldq_phys(cs
, addr
+ offsetof(struct vmcb
,
359 void helper_vmsave(CPUX86State
*env
, int aflag
)
361 CPUState
*cs
= env_cpu(env
);
364 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMSAVE
, 0, GETPC());
367 addr
= env
->regs
[R_EAX
];
369 addr
= (uint32_t)env
->regs
[R_EAX
];
372 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
373 "\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
374 addr
, x86_ldq_phys(cs
,
375 addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
376 env
->segs
[R_FS
].base
);
378 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.fs
),
380 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.gs
),
382 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.tr
),
384 svm_save_seg(env
, addr
+ offsetof(struct vmcb
, save
.ldtr
),
388 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
),
390 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
391 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
392 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
394 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
396 addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
397 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sysenter_esp
),
399 x86_stq_phys(cs
, addr
+ offsetof(struct vmcb
, save
.sysenter_eip
),
403 void helper_stgi(CPUX86State
*env
)
405 cpu_svm_check_intercept_param(env
, SVM_EXIT_STGI
, 0, GETPC());
406 env
->hflags2
|= HF2_GIF_MASK
;
409 void helper_clgi(CPUX86State
*env
)
411 cpu_svm_check_intercept_param(env
, SVM_EXIT_CLGI
, 0, GETPC());
412 env
->hflags2
&= ~HF2_GIF_MASK
;
415 void helper_invlpga(CPUX86State
*env
, int aflag
)
417 X86CPU
*cpu
= env_archcpu(env
);
420 cpu_svm_check_intercept_param(env
, SVM_EXIT_INVLPGA
, 0, GETPC());
423 addr
= env
->regs
[R_EAX
];
425 addr
= (uint32_t)env
->regs
[R_EAX
];
428 /* XXX: could use the ASID to see if it is needed to do the
430 tlb_flush_page(CPU(cpu
), addr
);
433 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
434 uint64_t param
, uintptr_t retaddr
)
436 CPUState
*cs
= env_cpu(env
);
438 if (likely(!(env
->hflags
& HF_GUEST_MASK
))) {
442 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
443 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
444 cpu_vmexit(env
, type
, param
, retaddr
);
447 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
448 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
449 cpu_vmexit(env
, type
, param
, retaddr
);
452 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
453 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
454 cpu_vmexit(env
, type
, param
, retaddr
);
457 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
458 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
459 cpu_vmexit(env
, type
, param
, retaddr
);
462 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
463 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
464 cpu_vmexit(env
, type
, param
, retaddr
);
468 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
469 /* FIXME: this should be read in at vmrun (faster this way?) */
470 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
471 offsetof(struct vmcb
,
472 control
.msrpm_base_pa
));
475 switch ((uint32_t)env
->regs
[R_ECX
]) {
477 t0
= (env
->regs
[R_ECX
] * 2) % 8;
478 t1
= (env
->regs
[R_ECX
] * 2) / 8;
480 case 0xc0000000 ... 0xc0001fff:
481 t0
= (8192 + env
->regs
[R_ECX
] - 0xc0000000) * 2;
485 case 0xc0010000 ... 0xc0011fff:
486 t0
= (16384 + env
->regs
[R_ECX
] - 0xc0010000) * 2;
491 cpu_vmexit(env
, type
, param
, retaddr
);
496 if (x86_ldub_phys(cs
, addr
+ t1
) & ((1 << param
) << t0
)) {
497 cpu_vmexit(env
, type
, param
, retaddr
);
502 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
503 cpu_vmexit(env
, type
, param
, retaddr
);
509 void helper_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
512 cpu_svm_check_intercept_param(env
, type
, param
, GETPC());
515 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
516 uint32_t next_eip_addend
)
518 CPUState
*cs
= env_cpu(env
);
520 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
521 /* FIXME: this should be read in at vmrun (faster this way?) */
522 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
523 offsetof(struct vmcb
, control
.iopm_base_pa
));
524 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
526 if (x86_lduw_phys(cs
, addr
+ port
/ 8) & (mask
<< (port
& 7))) {
529 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
530 env
->eip
+ next_eip_addend
);
531 cpu_vmexit(env
, SVM_EXIT_IOIO
, param
| (port
<< 16), GETPC());
536 void cpu_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
,
539 CPUState
*cs
= env_cpu(env
);
541 cpu_restore_state(cs
, retaddr
, true);
543 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016"
544 PRIx64
", " TARGET_FMT_lx
")!\n",
545 exit_code
, exit_info_1
,
546 x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
547 control
.exit_info_2
)),
550 cs
->exception_index
= EXCP_VMEXIT
;
551 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
),
554 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
555 control
.exit_info_1
), exit_info_1
),
557 /* remove any pending exception */
558 env
->old_exception
= -1;
562 void do_vmexit(CPUX86State
*env
)
564 CPUState
*cs
= env_cpu(env
);
567 if (env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
569 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
),
570 SVM_INTERRUPT_SHADOW_MASK
);
571 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
574 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
576 env
->hflags2
&= ~HF2_NPT_MASK
;
578 /* Save the VM state in the vmcb */
579 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
581 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
583 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
585 svm_save_seg(env
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
588 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
),
590 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
593 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
),
595 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
),
599 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
601 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
603 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
605 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
607 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
609 int_ctl
= x86_ldl_phys(cs
,
610 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
611 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
612 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
613 if (cs
->interrupt_request
& CPU_INTERRUPT_VIRQ
) {
614 int_ctl
|= V_IRQ_MASK
;
617 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
619 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
),
620 cpu_compute_eflags(env
));
621 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
),
624 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
626 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
628 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
630 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
631 x86_stb_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
),
632 env
->hflags
& HF_CPL_MASK
);
634 /* Reload the host state from vm_hsave */
635 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
636 env
->hflags
&= ~HF_GUEST_MASK
;
638 env
->intercept_exceptions
= 0;
639 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
642 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
644 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
647 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
649 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
652 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
653 env
->vm_hsave
+ offsetof(struct vmcb
,
656 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
657 env
->vm_hsave
+ offsetof(struct vmcb
,
659 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
660 env
->vm_hsave
+ offsetof(struct vmcb
,
662 /* we need to set the efer after the crs so the hidden flags get
664 cpu_load_efer(env
, x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
667 cpu_load_eflags(env
, x86_ldq_phys(cs
,
668 env
->vm_hsave
+ offsetof(struct vmcb
,
670 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
|
673 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
675 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
677 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
679 svm_load_seg_cache(env
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
682 env
->eip
= x86_ldq_phys(cs
,
683 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
684 env
->regs
[R_ESP
] = x86_ldq_phys(cs
, env
->vm_hsave
+
685 offsetof(struct vmcb
, save
.rsp
));
686 env
->regs
[R_EAX
] = x86_ldq_phys(cs
, env
->vm_hsave
+
687 offsetof(struct vmcb
, save
.rax
));
689 env
->dr
[6] = x86_ldq_phys(cs
,
690 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
691 env
->dr
[7] = x86_ldq_phys(cs
,
692 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
696 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
697 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
698 control
.event_inj
)));
700 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
701 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
702 control
.event_inj_err
)));
704 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
706 env
->hflags2
&= ~HF2_GIF_MASK
;
707 /* FIXME: Resets the current ASID register to zero (host ASID). */
709 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
711 /* Clears the TSC_OFFSET inside the processor. */
713 /* If the host is in PAE mode, the processor reloads the host's PDPEs
714 from the page table indicated the host's CR3. If the PDPEs contain
715 illegal state, the processor causes a shutdown. */
717 /* Disables all breakpoints in the host DR7 register. */
719 /* Checks the reloaded host state for consistency. */
721 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
722 host's code segment or non-canonical (in the case of long mode), a
723 #GP fault is delivered inside the host. */