kernel - Disable vmm by default
[dragonfly.git] / sys / platform / pc64 / vmm / vmx_trap.s
blob164a10425e57f6a8acd75f432949e1d9d792b83f
1 /*
2 * Copyright (c) 2003-2013 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Mihai Carabas <mihai.carabas@gmail.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 #include <machine/asmacros.h>
36 #include "vmx_assym.h"
38 #define VMX_RESTORE_GUEST(reg) \
39 movq VTI_GUEST_CR2(reg),%rsi; \
40 movq %rsi,%cr2; \
41 movq VTI_GUEST_RAX(reg),%rax; \
42 movq VTI_GUEST_RBX(reg),%rbx; \
43 movq VTI_GUEST_RCX(reg),%rcx; \
44 movq VTI_GUEST_RDX(reg),%rdx; \
45 movq VTI_GUEST_RSI(reg),%rsi; \
46 movq VTI_GUEST_RDI(reg),%rdi; \
47 movq VTI_GUEST_RBP(reg),%rbp; \
48 movq VTI_GUEST_R8(reg),%r8; \
49 movq VTI_GUEST_R9(reg),%r9; \
50 movq VTI_GUEST_R10(reg),%r10; \
51 movq VTI_GUEST_R11(reg),%r11; \
52 movq VTI_GUEST_R12(reg),%r12; \
53 movq VTI_GUEST_R13(reg),%r13; \
54 movq VTI_GUEST_R14(reg),%r14; \
55 movq VTI_GUEST_R15(reg),%r15; \
57 #define VMX_SAVE_GUEST(reg) \
58 movq %rax,VTI_GUEST_RAX(reg); \
59 movq %rbx,VTI_GUEST_RBX(reg); \
60 movq %rcx,VTI_GUEST_RCX(reg); \
61 movq %rdx,VTI_GUEST_RDX(reg); \
62 movq %rsi,VTI_GUEST_RSI(reg); \
63 movq %rdi,VTI_GUEST_RDI(reg); \
64 movq %rbp,VTI_GUEST_RBP(reg); \
65 movq %r8,VTI_GUEST_R8(reg); \
66 movq %r9,VTI_GUEST_R9(reg); \
67 movq %r10,VTI_GUEST_R10(reg); \
68 movq %r11,VTI_GUEST_R11(reg); \
69 movq %r12,VTI_GUEST_R12(reg); \
70 movq %r13,VTI_GUEST_R13(reg); \
71 movq %r14,VTI_GUEST_R14(reg); \
72 movq %r15,VTI_GUEST_R15(reg); \
73 movq %cr2, %rsi; \
74 movq %rsi, VTI_GUEST_CR2(reg); \
76 #define VMX_RUN_ERROR(dst_reg) \
77 jnc 1f; \
78 movq $VM_FAIL_INVALID,dst_reg; \
79 jmp 3f; \
80 1: jnz 2f; \
81 movq $VM_FAIL_VALID,dst_reg; \
82 jmp 3f; \
83 2: movq $VM_SUCCEED,dst_reg; \
87 .text
90 * Called by the HW VMM when doing a VMEXIT.
91 * - restore the host context
92 * - return to handle_vmx_vmexit() with
93 * ret=VM_EXIT, in vmx.c
95 * void vmx_exit(void)
96 * %rsp = vmx_thread_info
98 ENTRY(vmx_vmexit)
100 VMX_SAVE_GUEST(%rsp)
102 movq %rsp,%rdi
104 movq VTI_HOST_RBX(%rdi),%rbx
105 movq VTI_HOST_RBP(%rdi),%rbp
106 movq VTI_HOST_R12(%rdi),%r12
107 movq VTI_HOST_R13(%rdi),%r13
108 movq VTI_HOST_R14(%rdi),%r14
109 movq VTI_HOST_R15(%rdi),%r15
110 movq VTI_HOST_RSP(%rdi),%rsp
112 movq $VM_EXIT, %rax
115 END(vmx_vmexit)
118 * Called first time when entering the VMM
119 * - executing "vmlaunch" with success, doesn't
120 * return here. Starts execution from the RIP
121 * pointed in by VMCS_GUEST_CR3
122 * - not executing "vmlaunch" with success, it
123 * returns immediately with the appropiate
124 * error code
126 * int vmx_launch(struct vmx_thread_info* vti)
127 * %rdi = cti
129 ENTRY(vmx_launch)
130 movq %rbx,VTI_HOST_RBX(%rdi)
131 movq %rbp,VTI_HOST_RBP(%rdi)
132 movq %r12,VTI_HOST_R12(%rdi)
133 movq %r13,VTI_HOST_R13(%rdi)
134 movq %r14,VTI_HOST_R14(%rdi)
135 movq %r15,VTI_HOST_R15(%rdi)
136 movq %rsp,VTI_HOST_RSP(%rdi)
138 movq %rdi,%rsp
140 VMX_RESTORE_GUEST(%rsp)
142 vmlaunch
144 VMX_RUN_ERROR(%rax)
146 movq %rsp,%rdi
148 movq VTI_HOST_RBX(%rdi),%rbx
149 movq VTI_HOST_RBP(%rdi),%rbp
150 movq VTI_HOST_R12(%rdi),%r12
151 movq VTI_HOST_R13(%rdi),%r13
152 movq VTI_HOST_R14(%rdi),%r14
153 movq VTI_HOST_R15(%rdi),%r15
154 movq VTI_HOST_RSP(%rdi),%rsp
157 END(vmx_launch)
160 * Called every time when entering the VMM, but only
161 * after vmlaunch was executed before it
162 * - executing "vmresume" with success, doesn't
163 * return here. Starts execution from the RIP
164 * pointed in by VMCS_GUEST_CR3
165 * - not executing "vmresume" with success, it
166 * returns immediately with the appropiate
167 * error code
169 * int vmx_resume(struct vmx_thread_info* vti)
170 * %rdi = cti
172 ENTRY(vmx_resume)
173 movq %rbx,VTI_HOST_RBX(%rdi)
174 movq %rbp,VTI_HOST_RBP(%rdi)
175 movq %r12,VTI_HOST_R12(%rdi)
176 movq %r13,VTI_HOST_R13(%rdi)
177 movq %r14,VTI_HOST_R14(%rdi)
178 movq %r15,VTI_HOST_R15(%rdi)
179 movq %rsp,VTI_HOST_RSP(%rdi)
181 movq %rdi,%rsp
183 VMX_RESTORE_GUEST(%rsp)
185 vmresume
187 VMX_RUN_ERROR(%rax)
189 movq %rsp,%rdi
191 movq VTI_HOST_RBX(%rdi),%rbx
192 movq VTI_HOST_RBP(%rdi),%rbp
193 movq VTI_HOST_R12(%rdi),%r12
194 movq VTI_HOST_R13(%rdi),%r13
195 movq VTI_HOST_R14(%rdi),%r14
196 movq VTI_HOST_R15(%rdi),%r15
197 movq VTI_HOST_RSP(%rdi),%rsp
200 END(vmx_resume)