1 // This software is licensed under the terms of the GNU General Public
2 // License version 2, as published by the Free Software Foundation, and
3 // may be copied, distributed, and modified under those terms.
5 // This program is distributed in the hope that it will be useful,
6 // but WITHOUT ANY WARRANTY; without even the implied warranty of
7 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 // GNU General Public License for more details.
9 #include "qemu/osdep.h"
11 #include "qemu-common.h"
12 #include "qemu/error-report.h"
14 #include "sysemu/hvf.h"
19 #include "x86_descr.h"
21 #include "x86_decode.h"
26 #include <Hypervisor/hv.h>
27 #include <Hypervisor/hv_vmx.h>
29 #include "hw/i386/apic_internal.h"
30 #include "qemu/main-loop.h"
31 #include "sysemu/accel.h"
32 #include "sysemu/sysemu.h"
33 #include "target/i386/cpu.h"
35 // TODO: taskswitch handling
36 static void save_state_to_tss32(CPUState
*cpu
, struct x86_tss_segment32
*tss
)
38 X86CPU
*x86_cpu
= X86_CPU(cpu
);
39 CPUX86State
*env
= &x86_cpu
->env
;
41 /* CR3 and ldt selector are not saved intentionally */
43 tss
->eflags
= EFLAGS(env
);
53 tss
->es
= vmx_read_segment_selector(cpu
, R_ES
).sel
;
54 tss
->cs
= vmx_read_segment_selector(cpu
, R_CS
).sel
;
55 tss
->ss
= vmx_read_segment_selector(cpu
, R_SS
).sel
;
56 tss
->ds
= vmx_read_segment_selector(cpu
, R_DS
).sel
;
57 tss
->fs
= vmx_read_segment_selector(cpu
, R_FS
).sel
;
58 tss
->gs
= vmx_read_segment_selector(cpu
, R_GS
).sel
;
61 static void load_state_from_tss32(CPUState
*cpu
, struct x86_tss_segment32
*tss
)
63 X86CPU
*x86_cpu
= X86_CPU(cpu
);
64 CPUX86State
*env
= &x86_cpu
->env
;
66 wvmcs(cpu
->hvf_fd
, VMCS_GUEST_CR3
, tss
->cr3
);
69 EFLAGS(env
) = tss
->eflags
| 2;
71 /* General purpose registers */
81 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->ldt
}}, R_LDTR
);
82 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->es
}}, R_ES
);
83 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->cs
}}, R_CS
);
84 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->ss
}}, R_SS
);
85 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->ds
}}, R_DS
);
86 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->fs
}}, R_FS
);
87 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->gs
}}, R_GS
);
90 static int task_switch_32(CPUState
*cpu
, x68_segment_selector tss_sel
, x68_segment_selector old_tss_sel
,
91 uint64_t old_tss_base
, struct x86_segment_descriptor
*new_desc
)
93 struct x86_tss_segment32 tss_seg
;
94 uint32_t new_tss_base
= x86_segment_base(new_desc
);
95 uint32_t eip_offset
= offsetof(struct x86_tss_segment32
, eip
);
96 uint32_t ldt_sel_offset
= offsetof(struct x86_tss_segment32
, ldt
);
98 vmx_read_mem(cpu
, &tss_seg
, old_tss_base
, sizeof(tss_seg
));
99 save_state_to_tss32(cpu
, &tss_seg
);
101 vmx_write_mem(cpu
, old_tss_base
+ eip_offset
, &tss_seg
.eip
, ldt_sel_offset
- eip_offset
);
102 vmx_read_mem(cpu
, &tss_seg
, new_tss_base
, sizeof(tss_seg
));
104 if (old_tss_sel
.sel
!= 0xffff) {
105 tss_seg
.prev_tss
= old_tss_sel
.sel
;
107 vmx_write_mem(cpu
, new_tss_base
, &tss_seg
.prev_tss
, sizeof(tss_seg
.prev_tss
));
109 load_state_from_tss32(cpu
, &tss_seg
);
113 void vmx_handle_task_switch(CPUState
*cpu
, x68_segment_selector tss_sel
, int reason
, bool gate_valid
, uint8_t gate
, uint64_t gate_type
)
115 uint64_t rip
= rreg(cpu
->hvf_fd
, HV_X86_RIP
);
116 if (!gate_valid
|| (gate_type
!= VMCS_INTR_T_HWEXCEPTION
&&
117 gate_type
!= VMCS_INTR_T_HWINTR
&&
118 gate_type
!= VMCS_INTR_T_NMI
)) {
119 int ins_len
= rvmcs(cpu
->hvf_fd
, VMCS_EXIT_INSTRUCTION_LENGTH
);
120 macvm_set_rip(cpu
, rip
+ ins_len
);
126 struct x86_segment_descriptor curr_tss_desc
, next_tss_desc
;
128 x68_segment_selector old_tss_sel
= vmx_read_segment_selector(cpu
, R_TR
);
129 uint64_t old_tss_base
= vmx_read_segment_base(cpu
, R_TR
);
131 struct x86_call_gate task_gate_desc
;
132 struct vmx_segment vmx_seg
;
134 X86CPU
*x86_cpu
= X86_CPU(cpu
);
135 CPUX86State
*env
= &x86_cpu
->env
;
137 x86_read_segment_descriptor(cpu
, &next_tss_desc
, tss_sel
);
138 x86_read_segment_descriptor(cpu
, &curr_tss_desc
, old_tss_sel
);
140 if (reason
== TSR_IDT_GATE
&& gate_valid
) {
143 ret
= x86_read_call_gate(cpu
, &task_gate_desc
, gate
);
145 dpl
= task_gate_desc
.dpl
;
146 x68_segment_selector cs
= vmx_read_segment_selector(cpu
, R_CS
);
147 if (tss_sel
.rpl
> dpl
|| cs
.rpl
> dpl
)
148 ;//DPRINTF("emulate_gp");
151 desc_limit
= x86_segment_limit(&next_tss_desc
);
152 if (!next_tss_desc
.p
|| ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) || desc_limit
< 0x2b)) {
153 VM_PANIC("emulate_ts");
156 if (reason
== TSR_IRET
|| reason
== TSR_JMP
) {
157 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
158 x86_write_segment_descriptor(cpu
, &curr_tss_desc
, old_tss_sel
);
161 if (reason
== TSR_IRET
)
162 EFLAGS(env
) &= ~RFLAGS_NT
;
164 if (reason
!= TSR_CALL
&& reason
!= TSR_IDT_GATE
)
165 old_tss_sel
.sel
= 0xffff;
167 if (reason
!= TSR_IRET
) {
168 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
169 x86_write_segment_descriptor(cpu
, &next_tss_desc
, tss_sel
);
172 if (next_tss_desc
.type
& 8)
173 ret
= task_switch_32(cpu
, tss_sel
, old_tss_sel
, old_tss_base
, &next_tss_desc
);
175 //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
176 VM_PANIC("task_switch_16");
178 macvm_set_cr0(cpu
->hvf_fd
, rvmcs(cpu
->hvf_fd
, VMCS_GUEST_CR0
) | CR0_TS
);
179 x86_segment_descriptor_to_vmx(cpu
, tss_sel
, &next_tss_desc
, &vmx_seg
);
180 vmx_write_segment_descriptor(cpu
, &vmx_seg
, R_TR
);
184 hv_vcpu_invalidate_tlb(cpu
->hvf_fd
);
185 hv_vcpu_flush(cpu
->hvf_fd
);