1 // This software is licensed under the terms of the GNU General Public
2 // License version 2, as published by the Free Software Foundation, and
3 // may be copied, distributed, and modified under those terms.
5 // This program is distributed in the hope that it will be useful,
6 // but WITHOUT ANY WARRANTY; without even the implied warranty of
7 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 // GNU General Public License for more details.
9 #include "qemu/osdep.h"
11 #include "qemu-common.h"
12 #include "qemu/error-report.h"
14 #include "sysemu/hvf.h"
19 #include "x86_descr.h"
21 #include "x86_decode.h"
26 #include <Hypervisor/hv.h>
27 #include <Hypervisor/hv_vmx.h>
29 #include "hw/i386/apic_internal.h"
30 #include "qemu/main-loop.h"
31 #include "sysemu/accel.h"
32 #include "target/i386/cpu.h"
34 // TODO: taskswitch handling
35 static void save_state_to_tss32(CPUState
*cpu
, struct x86_tss_segment32
*tss
)
37 X86CPU
*x86_cpu
= X86_CPU(cpu
);
38 CPUX86State
*env
= &x86_cpu
->env
;
40 /* CR3 and ldt selector are not saved intentionally */
42 tss
->eflags
= EFLAGS(env
);
52 tss
->es
= vmx_read_segment_selector(cpu
, R_ES
).sel
;
53 tss
->cs
= vmx_read_segment_selector(cpu
, R_CS
).sel
;
54 tss
->ss
= vmx_read_segment_selector(cpu
, R_SS
).sel
;
55 tss
->ds
= vmx_read_segment_selector(cpu
, R_DS
).sel
;
56 tss
->fs
= vmx_read_segment_selector(cpu
, R_FS
).sel
;
57 tss
->gs
= vmx_read_segment_selector(cpu
, R_GS
).sel
;
60 static void load_state_from_tss32(CPUState
*cpu
, struct x86_tss_segment32
*tss
)
62 X86CPU
*x86_cpu
= X86_CPU(cpu
);
63 CPUX86State
*env
= &x86_cpu
->env
;
65 wvmcs(cpu
->hvf_fd
, VMCS_GUEST_CR3
, tss
->cr3
);
68 EFLAGS(env
) = tss
->eflags
| 2;
70 /* General purpose registers */
80 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->ldt
}}, R_LDTR
);
81 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->es
}}, R_ES
);
82 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->cs
}}, R_CS
);
83 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->ss
}}, R_SS
);
84 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->ds
}}, R_DS
);
85 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->fs
}}, R_FS
);
86 vmx_write_segment_selector(cpu
, (x68_segment_selector
){{tss
->gs
}}, R_GS
);
89 static int task_switch_32(CPUState
*cpu
, x68_segment_selector tss_sel
, x68_segment_selector old_tss_sel
,
90 uint64_t old_tss_base
, struct x86_segment_descriptor
*new_desc
)
92 struct x86_tss_segment32 tss_seg
;
93 uint32_t new_tss_base
= x86_segment_base(new_desc
);
94 uint32_t eip_offset
= offsetof(struct x86_tss_segment32
, eip
);
95 uint32_t ldt_sel_offset
= offsetof(struct x86_tss_segment32
, ldt
);
97 vmx_read_mem(cpu
, &tss_seg
, old_tss_base
, sizeof(tss_seg
));
98 save_state_to_tss32(cpu
, &tss_seg
);
100 vmx_write_mem(cpu
, old_tss_base
+ eip_offset
, &tss_seg
.eip
, ldt_sel_offset
- eip_offset
);
101 vmx_read_mem(cpu
, &tss_seg
, new_tss_base
, sizeof(tss_seg
));
103 if (old_tss_sel
.sel
!= 0xffff) {
104 tss_seg
.prev_tss
= old_tss_sel
.sel
;
106 vmx_write_mem(cpu
, new_tss_base
, &tss_seg
.prev_tss
, sizeof(tss_seg
.prev_tss
));
108 load_state_from_tss32(cpu
, &tss_seg
);
112 void vmx_handle_task_switch(CPUState
*cpu
, x68_segment_selector tss_sel
, int reason
, bool gate_valid
, uint8_t gate
, uint64_t gate_type
)
114 uint64_t rip
= rreg(cpu
->hvf_fd
, HV_X86_RIP
);
115 if (!gate_valid
|| (gate_type
!= VMCS_INTR_T_HWEXCEPTION
&&
116 gate_type
!= VMCS_INTR_T_HWINTR
&&
117 gate_type
!= VMCS_INTR_T_NMI
)) {
118 int ins_len
= rvmcs(cpu
->hvf_fd
, VMCS_EXIT_INSTRUCTION_LENGTH
);
119 macvm_set_rip(cpu
, rip
+ ins_len
);
125 struct x86_segment_descriptor curr_tss_desc
, next_tss_desc
;
127 x68_segment_selector old_tss_sel
= vmx_read_segment_selector(cpu
, R_TR
);
128 uint64_t old_tss_base
= vmx_read_segment_base(cpu
, R_TR
);
130 struct x86_call_gate task_gate_desc
;
131 struct vmx_segment vmx_seg
;
133 X86CPU
*x86_cpu
= X86_CPU(cpu
);
134 CPUX86State
*env
= &x86_cpu
->env
;
136 x86_read_segment_descriptor(cpu
, &next_tss_desc
, tss_sel
);
137 x86_read_segment_descriptor(cpu
, &curr_tss_desc
, old_tss_sel
);
139 if (reason
== TSR_IDT_GATE
&& gate_valid
) {
142 ret
= x86_read_call_gate(cpu
, &task_gate_desc
, gate
);
144 dpl
= task_gate_desc
.dpl
;
145 x68_segment_selector cs
= vmx_read_segment_selector(cpu
, R_CS
);
146 if (tss_sel
.rpl
> dpl
|| cs
.rpl
> dpl
)
147 ;//DPRINTF("emulate_gp");
150 desc_limit
= x86_segment_limit(&next_tss_desc
);
151 if (!next_tss_desc
.p
|| ((desc_limit
< 0x67 && (next_tss_desc
.type
& 8)) || desc_limit
< 0x2b)) {
152 VM_PANIC("emulate_ts");
155 if (reason
== TSR_IRET
|| reason
== TSR_JMP
) {
156 curr_tss_desc
.type
&= ~(1 << 1); /* clear busy flag */
157 x86_write_segment_descriptor(cpu
, &curr_tss_desc
, old_tss_sel
);
160 if (reason
== TSR_IRET
)
161 EFLAGS(env
) &= ~RFLAGS_NT
;
163 if (reason
!= TSR_CALL
&& reason
!= TSR_IDT_GATE
)
164 old_tss_sel
.sel
= 0xffff;
166 if (reason
!= TSR_IRET
) {
167 next_tss_desc
.type
|= (1 << 1); /* set busy flag */
168 x86_write_segment_descriptor(cpu
, &next_tss_desc
, tss_sel
);
171 if (next_tss_desc
.type
& 8)
172 ret
= task_switch_32(cpu
, tss_sel
, old_tss_sel
, old_tss_base
, &next_tss_desc
);
174 //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
175 VM_PANIC("task_switch_16");
177 macvm_set_cr0(cpu
->hvf_fd
, rvmcs(cpu
->hvf_fd
, VMCS_GUEST_CR0
) | CR0_TS
);
178 x86_segment_descriptor_to_vmx(cpu
, tss_sel
, &next_tss_desc
, &vmx_seg
);
179 vmx_write_segment_descriptor(cpu
, &vmx_seg
, R_TR
);
183 hv_vcpu_invalidate_tlb(cpu
->hvf_fd
);
184 hv_vcpu_flush(cpu
->hvf_fd
);