2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2010
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
24 #include <asm/asm-offsets.h>
26 /* Hypercall entry point. Will be patched with device tree instructions. */
28 .global kvm_hypercall_start
36 #define KVM_MAGIC_PAGE (-4096)
39 #define LL64(reg, offs, reg2) ld reg, (offs)(reg2)
40 #define STL64(reg, offs, reg2) std reg, (offs)(reg2)
42 #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2)
43 #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2)
46 #define SCRATCH_SAVE \
47 /* Enable critical section. We are critical if \
48 shared->critical == r1 */ \
49 STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \
52 PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
53 PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
55 stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0);
57 #define SCRATCH_RESTORE \
59 PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \
60 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \
62 PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \
64 /* Disable critical section. We are critical if \
65 shared->critical == r1 and r2 is always != r1 */ \
66 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
68 .global kvm_emulate_mtmsrd
73 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
74 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
75 lis r30, (~(MSR_EE | MSR_RI))@h
76 ori r30, r30, (~(MSR_EE | MSR_RI))@l
79 /* OR the register's (MSR_EE|MSR_RI) on MSR */
80 kvm_emulate_mtmsrd_reg:
82 andi. r30, r30, (MSR_EE|MSR_RI)
85 /* Put MSR back into magic page */
86 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
88 /* Check if we have to fetch an interrupt */
89 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
93 /* Check if we may trigger an interrupt */
94 andi. r30, r30, MSR_EE
100 kvm_emulate_mtmsrd_orig_ins:
103 b kvm_emulate_mtmsrd_branch
109 /* Go back to caller */
110 kvm_emulate_mtmsrd_branch:
112 kvm_emulate_mtmsrd_end:
114 .global kvm_emulate_mtmsrd_branch_offs
115 kvm_emulate_mtmsrd_branch_offs:
116 .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4
118 .global kvm_emulate_mtmsrd_reg_offs
119 kvm_emulate_mtmsrd_reg_offs:
120 .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4
122 .global kvm_emulate_mtmsrd_orig_ins_offs
123 kvm_emulate_mtmsrd_orig_ins_offs:
124 .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4
126 .global kvm_emulate_mtmsrd_len
127 kvm_emulate_mtmsrd_len:
128 .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
131 #define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI)
132 #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
134 .global kvm_emulate_mtmsr
139 /* Fetch old MSR in r31 */
140 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
142 /* Find the changed bits between old and new MSR */
143 kvm_emulate_mtmsr_reg1:
147 /* Check if we need to really do mtmsr */
148 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
151 /* No critical bits changed? Maybe we can stay in the guest. */
152 beq maybe_stay_in_guest
158 /* Just fire off the mtmsr if it's critical */
159 kvm_emulate_mtmsr_orig_ins:
162 b kvm_emulate_mtmsr_branch
166 /* Get the target register in r30 */
167 kvm_emulate_mtmsr_reg2:
170 /* Check if we have to fetch an interrupt */
171 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
175 /* Check if we may trigger an interrupt */
176 andi. r31, r30, MSR_EE
183 /* Put MSR into magic page because we don't call mtmsr */
184 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
188 /* Go back to caller */
189 kvm_emulate_mtmsr_branch:
191 kvm_emulate_mtmsr_end:
193 .global kvm_emulate_mtmsr_branch_offs
194 kvm_emulate_mtmsr_branch_offs:
195 .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4
197 .global kvm_emulate_mtmsr_reg1_offs
198 kvm_emulate_mtmsr_reg1_offs:
199 .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4
201 .global kvm_emulate_mtmsr_reg2_offs
202 kvm_emulate_mtmsr_reg2_offs:
203 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
205 .global kvm_emulate_mtmsr_orig_ins_offs
206 kvm_emulate_mtmsr_orig_ins_offs:
207 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4
209 .global kvm_emulate_mtmsr_len
210 kvm_emulate_mtmsr_len:
211 .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
215 .global kvm_emulate_wrteei
220 /* Fetch old MSR in r31 */
221 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
223 /* Remove MSR_EE from old MSR */
228 /* OR new MSR_EE onto the old MSR */
229 kvm_emulate_wrteei_ee:
232 /* Write new MSR value back */
233 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
237 /* Go back to caller */
238 kvm_emulate_wrteei_branch:
240 kvm_emulate_wrteei_end:
242 .global kvm_emulate_wrteei_branch_offs
243 kvm_emulate_wrteei_branch_offs:
244 .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4
246 .global kvm_emulate_wrteei_ee_offs
247 kvm_emulate_wrteei_ee_offs:
248 .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4
250 .global kvm_emulate_wrteei_len
251 kvm_emulate_wrteei_len:
252 .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4
255 .global kvm_emulate_mtsrin
260 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
261 andi. r31, r31, MSR_DR | MSR_IR
262 beq kvm_emulate_mtsrin_reg1
266 kvm_emulate_mtsrin_orig_ins:
268 b kvm_emulate_mtsrin_branch
270 kvm_emulate_mtsrin_reg1:
272 rlwinm r30,r0,6,26,29
274 kvm_emulate_mtsrin_reg2:
275 stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30)
279 /* Go back to caller */
280 kvm_emulate_mtsrin_branch:
282 kvm_emulate_mtsrin_end:
284 .global kvm_emulate_mtsrin_branch_offs
285 kvm_emulate_mtsrin_branch_offs:
286 .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4
288 .global kvm_emulate_mtsrin_reg1_offs
289 kvm_emulate_mtsrin_reg1_offs:
290 .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4
292 .global kvm_emulate_mtsrin_reg2_offs
293 kvm_emulate_mtsrin_reg2_offs:
294 .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4
296 .global kvm_emulate_mtsrin_orig_ins_offs
297 kvm_emulate_mtsrin_orig_ins_offs:
298 .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4
300 .global kvm_emulate_mtsrin_len
301 kvm_emulate_mtsrin_len:
302 .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4