2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/exception-64s.h>
27 #define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit
29 #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
31 .macro DISABLE_INTERRUPTS
38 #define VCPU_LOAD_NVGPRS(vcpu) \
39 ld r14, VCPU_GPR(r14)(vcpu); \
40 ld r15, VCPU_GPR(r15)(vcpu); \
41 ld r16, VCPU_GPR(r16)(vcpu); \
42 ld r17, VCPU_GPR(r17)(vcpu); \
43 ld r18, VCPU_GPR(r18)(vcpu); \
44 ld r19, VCPU_GPR(r19)(vcpu); \
45 ld r20, VCPU_GPR(r20)(vcpu); \
46 ld r21, VCPU_GPR(r21)(vcpu); \
47 ld r22, VCPU_GPR(r22)(vcpu); \
48 ld r23, VCPU_GPR(r23)(vcpu); \
49 ld r24, VCPU_GPR(r24)(vcpu); \
50 ld r25, VCPU_GPR(r25)(vcpu); \
51 ld r26, VCPU_GPR(r26)(vcpu); \
52 ld r27, VCPU_GPR(r27)(vcpu); \
53 ld r28, VCPU_GPR(r28)(vcpu); \
54 ld r29, VCPU_GPR(r29)(vcpu); \
55 ld r30, VCPU_GPR(r30)(vcpu); \
56 ld r31, VCPU_GPR(r31)(vcpu); \
58 /*****************************************************************************
60 * Guest entry / exit code that is in kernel module memory (highmem) *
62 ****************************************************************************/
68 _GLOBAL(__kvmppc_vcpu_entry)
71 /* Write correct stack frame */
75 /* Save host state to the stack */
76 stdu r1, -SWITCH_FRAME_SIZE(r1)
78 /* Save r3 (kvm_run) and r4 (vcpu) */
81 /* Save non-volatile registers (r14 - r31) */
87 /* Load non-volatile guest state from the vcpu */
90 /* Save R1/R2 in the PACA */
91 std r1, PACA_KVM_HOST_R1(r13)
92 std r2, PACA_KVM_HOST_R2(r13)
94 /* XXX swap in/out on load? */
95 ld r3, VCPU_HIGHMEM_HANDLER(r4)
96 std r3, PACA_KVM_VMHANDLER(r13)
98 kvm_start_lightweight:
100 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
101 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
103 /* Load some guest state in the respective registers */
104 ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */
105 /* will be swapped in by rmcall */
107 ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
108 mtlr r3 /* LR = r3 */
112 /* Some guests may need to have dcbz set to 32 byte length.
114 * Usually we ensure that by patching the guest's instructions
115 * to trap on dcbz and emulate it in the hypervisor.
117 * If we can, we should tell the CPU to use 32 byte dcbz though,
118 * because that's a lot faster.
121 ld r3, VCPU_HFLAGS(r4)
122 rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */
126 ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */
131 ld r6, VCPU_RMCALL(r4)
134 ld r3, VCPU_TRAMPOLINE_ENTER(r4)
135 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
137 /* Jump to SLB patching handlder and into our guest */
141 * This is the handler in module memory. It gets jumped at from the
142 * lowmem trampoline code, so it's basically the guest exit code.
146 .global kvmppc_handler_highmem
147 kvmppc_handler_highmem:
150 * Register usage at this point:
152 * R0 = guest last inst
160 * PACA.KVM.* = guest *
167 /* Now save the guest state */
169 stw r0, VCPU_LAST_INST(r7)
172 std r4, VCPU_SHADOW_SRR1(r7)
173 std r5, VCPU_FAULT_DEAR(r7)
174 std r6, VCPU_FAULT_DSISR(r7)
176 ld r5, VCPU_HFLAGS(r7)
177 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
187 std r14, VCPU_GPR(r14)(r7)
188 std r15, VCPU_GPR(r15)(r7)
189 std r16, VCPU_GPR(r16)(r7)
190 std r17, VCPU_GPR(r17)(r7)
191 std r18, VCPU_GPR(r18)(r7)
192 std r19, VCPU_GPR(r19)(r7)
193 std r20, VCPU_GPR(r20)(r7)
194 std r21, VCPU_GPR(r21)(r7)
195 std r22, VCPU_GPR(r22)(r7)
196 std r23, VCPU_GPR(r23)(r7)
197 std r24, VCPU_GPR(r24)(r7)
198 std r25, VCPU_GPR(r25)(r7)
199 std r26, VCPU_GPR(r26)(r7)
200 std r27, VCPU_GPR(r27)(r7)
201 std r28, VCPU_GPR(r28)(r7)
202 std r29, VCPU_GPR(r29)(r7)
203 std r30, VCPU_GPR(r30)(r7)
204 std r31, VCPU_GPR(r31)(r7)
214 /* Restore host msr -> SRR1 */
215 ld r6, VCPU_HOST_MSR(r7)
218 * For some interrupts, we need to call the real Linux
219 * handler, so it can do work for us. This has to happen
220 * as if the interrupt arrived from the kernel though,
221 * so let's fake it here where most state is restored.
223 * Call Linux for hardware interrupts/decrementer
224 * r3 = address of interrupt handler (exit reason)
227 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
228 beq call_linux_handler
229 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
230 beq call_linux_handler
239 * If we land here we need to jump back to the handler we
242 * We have a page that we can access from real mode, so let's
243 * jump back to that and use it as a trampoline to get back into the
246 * R3 still contains the exit code,
247 * R5 VCPU_HOST_RETIP and
251 /* Restore host IP -> SRR0 */
252 ld r5, VCPU_HOST_RETIP(r7)
254 /* XXX Better move to a safe function?
255 * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */
259 ld r4, VCPU_TRAMPOLINE_LOWMEM(r7)
261 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
266 .global kvm_return_point
269 /* Jump back to lightweight entry if we're supposed to */
270 /* go back into the guest */
272 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
275 /* Restore r3 (kvm_run) and r4 (vcpu) */
277 bl KVMPPC_HANDLE_EXIT
279 /* If RESUME_GUEST, get back in the loop */
280 cmpwi r3, RESUME_GUEST
281 beq kvm_loop_lightweight
283 cmpwi r3, RESUME_GUEST_NV
284 beq kvm_loop_heavyweight
291 /* Restore non-volatile host registers (r14 - r31) */
294 addi r1, r1, SWITCH_FRAME_SIZE
297 kvm_loop_heavyweight:
300 std r4, (16 + SWITCH_FRAME_SIZE)(r1)
302 /* Load vcpu and cpu_run */
305 /* Load non-volatile guest state from the vcpu */
308 /* Jump back into the beginning of this function */
309 b kvm_start_lightweight
311 kvm_loop_lightweight:
313 /* We'll need the vcpu pointer */
316 /* Jump back into the beginning of this function */
317 b kvm_start_lightweight