5 /* We shift all the intercept bits so we can OR them with the
7 INTERCEPT_INTR
= HF_HIF_SHIFT
,
12 INTERCEPT_SELECTIVE_CR0
,
36 INTERCEPT_TASK_SWITCH
,
37 INTERCEPT_FERR_FREEZE
,
50 /* This is not really an intercept but rather a placeholder to
51 show that we are in an SVM (just like a hidden flag, but keeps the
53 #define INTERCEPT_SVM 63
54 #define INTERCEPT_SVM_MASK (1ULL << INTERCEPT_SVM)
56 struct __attribute__ ((__packed__
)) vmcb_control_area
{
57 uint16_t intercept_cr_read
;
58 uint16_t intercept_cr_write
;
59 uint16_t intercept_dr_read
;
60 uint16_t intercept_dr_write
;
61 uint32_t intercept_exceptions
;
63 uint8_t reserved_1
[44];
64 uint64_t iopm_base_pa
;
65 uint64_t msrpm_base_pa
;
69 uint8_t reserved_2
[3];
73 uint8_t reserved_3
[4];
75 uint32_t exit_code_hi
;
78 uint32_t exit_int_info
;
79 uint32_t exit_int_info_err
;
81 uint8_t reserved_4
[16];
83 uint32_t event_inj_err
;
86 uint8_t reserved_5
[832];
90 #define TLB_CONTROL_DO_NOTHING 0
91 #define TLB_CONTROL_FLUSH_ALL_ASID 1
93 #define V_TPR_MASK 0x0f
96 #define V_IRQ_MASK (1 << V_IRQ_SHIFT)
98 #define V_INTR_PRIO_SHIFT 16
99 #define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
101 #define V_IGN_TPR_SHIFT 20
102 #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
104 #define V_INTR_MASKING_SHIFT 24
105 #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
107 #define SVM_INTERRUPT_SHADOW_MASK 1
109 #define SVM_IOIO_STR_SHIFT 2
110 #define SVM_IOIO_REP_SHIFT 3
111 #define SVM_IOIO_SIZE_SHIFT 4
112 #define SVM_IOIO_ASIZE_SHIFT 7
114 #define SVM_IOIO_TYPE_MASK 1
115 #define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
116 #define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
117 #define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
118 #define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
120 struct __attribute__ ((__packed__
)) vmcb_seg
{
127 struct __attribute__ ((__packed__
)) vmcb_save_area
{
134 struct vmcb_seg gdtr
;
135 struct vmcb_seg ldtr
;
136 struct vmcb_seg idtr
;
138 uint8_t reserved_1
[43];
140 uint8_t reserved_2
[4];
142 uint8_t reserved_3
[112];
150 uint8_t reserved_4
[88];
152 uint8_t reserved_5
[24];
158 uint64_t kernel_gs_base
;
159 uint64_t sysenter_cs
;
160 uint64_t sysenter_esp
;
161 uint64_t sysenter_eip
;
163 /* qemu: cr8 added to reuse this as hsave */
165 uint8_t reserved_6
[32 - 8]; /* originally 32 */
170 uint64_t last_excp_from
;
171 uint64_t last_excp_to
;
174 struct __attribute__ ((__packed__
)) vmcb
{
175 struct vmcb_control_area control
;
176 struct vmcb_save_area save
;
179 #define SVM_CPUID_FEATURE_SHIFT 2
180 #define SVM_CPUID_FUNC 0x8000000a
182 #define MSR_EFER_SVME_MASK (1ULL << 12)
184 #define SVM_SELECTOR_S_SHIFT 4
185 #define SVM_SELECTOR_DPL_SHIFT 5
186 #define SVM_SELECTOR_P_SHIFT 7
187 #define SVM_SELECTOR_AVL_SHIFT 8
188 #define SVM_SELECTOR_L_SHIFT 9
189 #define SVM_SELECTOR_DB_SHIFT 10
190 #define SVM_SELECTOR_G_SHIFT 11
192 #define SVM_SELECTOR_TYPE_MASK (0xf)
193 #define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT)
194 #define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT)
195 #define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT)
196 #define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT)
197 #define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT)
198 #define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT)
199 #define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT)
201 #define SVM_SELECTOR_WRITE_MASK (1 << 1)
202 #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
203 #define SVM_SELECTOR_CODE_MASK (1 << 3)
205 #define INTERCEPT_CR0_MASK 1
206 #define INTERCEPT_CR3_MASK (1 << 3)
207 #define INTERCEPT_CR4_MASK (1 << 4)
209 #define INTERCEPT_DR0_MASK 1
210 #define INTERCEPT_DR1_MASK (1 << 1)
211 #define INTERCEPT_DR2_MASK (1 << 2)
212 #define INTERCEPT_DR3_MASK (1 << 3)
213 #define INTERCEPT_DR4_MASK (1 << 4)
214 #define INTERCEPT_DR5_MASK (1 << 5)
215 #define INTERCEPT_DR6_MASK (1 << 6)
216 #define INTERCEPT_DR7_MASK (1 << 7)
218 #define SVM_EVTINJ_VEC_MASK 0xff
220 #define SVM_EVTINJ_TYPE_SHIFT 8
221 #define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
223 #define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
224 #define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
225 #define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
226 #define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
228 #define SVM_EVTINJ_VALID (1 << 31)
229 #define SVM_EVTINJ_VALID_ERR (1 << 11)
231 #define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
233 #define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
234 #define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
235 #define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
236 #define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
238 #define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
239 #define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
241 #define SVM_EXIT_READ_CR0 0x000
242 #define SVM_EXIT_READ_CR3 0x003
243 #define SVM_EXIT_READ_CR4 0x004
244 #define SVM_EXIT_READ_CR8 0x008
245 #define SVM_EXIT_WRITE_CR0 0x010
246 #define SVM_EXIT_WRITE_CR3 0x013
247 #define SVM_EXIT_WRITE_CR4 0x014
248 #define SVM_EXIT_WRITE_CR8 0x018
249 #define SVM_EXIT_READ_DR0 0x020
250 #define SVM_EXIT_READ_DR1 0x021
251 #define SVM_EXIT_READ_DR2 0x022
252 #define SVM_EXIT_READ_DR3 0x023
253 #define SVM_EXIT_READ_DR4 0x024
254 #define SVM_EXIT_READ_DR5 0x025
255 #define SVM_EXIT_READ_DR6 0x026
256 #define SVM_EXIT_READ_DR7 0x027
257 #define SVM_EXIT_WRITE_DR0 0x030
258 #define SVM_EXIT_WRITE_DR1 0x031
259 #define SVM_EXIT_WRITE_DR2 0x032
260 #define SVM_EXIT_WRITE_DR3 0x033
261 #define SVM_EXIT_WRITE_DR4 0x034
262 #define SVM_EXIT_WRITE_DR5 0x035
263 #define SVM_EXIT_WRITE_DR6 0x036
264 #define SVM_EXIT_WRITE_DR7 0x037
265 #define SVM_EXIT_EXCP_BASE 0x040
266 #define SVM_EXIT_INTR 0x060
267 #define SVM_EXIT_NMI 0x061
268 #define SVM_EXIT_SMI 0x062
269 #define SVM_EXIT_INIT 0x063
270 #define SVM_EXIT_VINTR 0x064
271 #define SVM_EXIT_CR0_SEL_WRITE 0x065
272 #define SVM_EXIT_IDTR_READ 0x066
273 #define SVM_EXIT_GDTR_READ 0x067
274 #define SVM_EXIT_LDTR_READ 0x068
275 #define SVM_EXIT_TR_READ 0x069
276 #define SVM_EXIT_IDTR_WRITE 0x06a
277 #define SVM_EXIT_GDTR_WRITE 0x06b
278 #define SVM_EXIT_LDTR_WRITE 0x06c
279 #define SVM_EXIT_TR_WRITE 0x06d
280 #define SVM_EXIT_RDTSC 0x06e
281 #define SVM_EXIT_RDPMC 0x06f
282 #define SVM_EXIT_PUSHF 0x070
283 #define SVM_EXIT_POPF 0x071
284 #define SVM_EXIT_CPUID 0x072
285 #define SVM_EXIT_RSM 0x073
286 #define SVM_EXIT_IRET 0x074
287 #define SVM_EXIT_SWINT 0x075
288 #define SVM_EXIT_INVD 0x076
289 #define SVM_EXIT_PAUSE 0x077
290 #define SVM_EXIT_HLT 0x078
291 #define SVM_EXIT_INVLPG 0x079
292 #define SVM_EXIT_INVLPGA 0x07a
293 #define SVM_EXIT_IOIO 0x07b
294 #define SVM_EXIT_MSR 0x07c
295 #define SVM_EXIT_TASK_SWITCH 0x07d
296 #define SVM_EXIT_FERR_FREEZE 0x07e
297 #define SVM_EXIT_SHUTDOWN 0x07f
298 #define SVM_EXIT_VMRUN 0x080
299 #define SVM_EXIT_VMMCALL 0x081
300 #define SVM_EXIT_VMLOAD 0x082
301 #define SVM_EXIT_VMSAVE 0x083
302 #define SVM_EXIT_STGI 0x084
303 #define SVM_EXIT_CLGI 0x085
304 #define SVM_EXIT_SKINIT 0x086
305 #define SVM_EXIT_RDTSCP 0x087
306 #define SVM_EXIT_ICEBP 0x088
307 #define SVM_EXIT_WBINVD 0x089
308 /* only included in documentation, maybe wrong */
309 #define SVM_EXIT_MONITOR 0x08a
310 #define SVM_EXIT_MWAIT 0x08b
311 #define SVM_EXIT_NPF 0x400
313 #define SVM_EXIT_ERR -1
315 #define SVM_CR0_SELECTIVE_MASK (1 << 3 | 1) /* TS and MP */
317 #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
318 #define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8"
319 #define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb"
320 #define SVM_CLGI ".byte 0x0f, 0x01, 0xdd"
321 #define SVM_STGI ".byte 0x0f, 0x01, 0xdc"
322 #define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
324 /* function references */
327 void vmexit(uint64_t exit_code
, uint64_t exit_info_1
);
328 int svm_check_intercept_param(uint32_t type
, uint64_t param
);
329 static inline int svm_check_intercept(unsigned int type
) {
330 return svm_check_intercept_param(type
, 0);
334 #define INTERCEPTED(mask) (env->intercept & mask)
335 #define INTERCEPTEDw(var, mask) (env->intercept ## var & mask)
336 #define INTERCEPTEDl(var, mask) (env->intercept ## var & mask)
338 #define SVM_LOAD_SEG(addr, seg_index, seg) \
339 cpu_x86_load_seg_cache(env, \
341 lduw_phys(addr + offsetof(struct vmcb, save.seg.selector)),\
342 ldq_phys(addr + offsetof(struct vmcb, save.seg.base)),\
343 ldl_phys(addr + offsetof(struct vmcb, save.seg.limit)),\
344 vmcb2cpu_attrib(lduw_phys(addr + offsetof(struct vmcb, save.seg.attrib)), ldq_phys(addr + offsetof(struct vmcb, save.seg.base)), ldl_phys(addr + offsetof(struct vmcb, save.seg.limit))))
346 #define SVM_LOAD_SEG2(addr, seg_qemu, seg_vmcb) \
347 env->seg_qemu.selector = lduw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.selector)); \
348 env->seg_qemu.base = ldq_phys(addr + offsetof(struct vmcb, save.seg_vmcb.base)); \
349 env->seg_qemu.limit = ldl_phys(addr + offsetof(struct vmcb, save.seg_vmcb.limit)); \
350 env->seg_qemu.flags = vmcb2cpu_attrib(lduw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.attrib)), env->seg_qemu.base, env->seg_qemu.limit)
352 #define SVM_SAVE_SEG(addr, seg_qemu, seg_vmcb) \
353 stw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.selector), env->seg_qemu.selector); \
354 stq_phys(addr + offsetof(struct vmcb, save.seg_vmcb.base), env->seg_qemu.base); \
355 stl_phys(addr + offsetof(struct vmcb, save.seg_vmcb.limit), env->seg_qemu.limit); \
356 stw_phys(addr + offsetof(struct vmcb, save.seg_vmcb.attrib), cpu2vmcb_attrib(env->seg_qemu.flags))