2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10))
21 #define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
22 #define UNBOLT_SLB_ENTRY(num) \
23 ld r9, SHADOW_SLB_ESID(num)(r12); \
24 /* Invalid? Skip. */; \
25 rldicl. r0, r9, 37, 63; \
26 beq slb_entry_skip_ ## num; \
27 xoris r9, r9, SLB_ESID_V@h; \
28 std r9, SHADOW_SLB_ESID(num)(r12); \
29 slb_entry_skip_ ## num:
31 #define REBOLT_SLB_ENTRY(num) \
32 ld r10, SHADOW_SLB_ESID(num)(r11); \
34 beq slb_exit_skip_ ## num; \
35 oris r10, r10, SLB_ESID_V@h; \
36 ld r9, SHADOW_SLB_VSID(num)(r11); \
38 std r10, SHADOW_SLB_ESID(num)(r11); \
39 slb_exit_skip_ ## num:
41 /******************************************************************************
45 *****************************************************************************/
47 .macro LOAD_GUEST_SEGMENTS
56 * all other volatile GPRS = free except R4, R6
57 * SVCPU[CR] = guest CR
58 * SVCPU[XER] = guest XER
59 * SVCPU[CTR] = guest CTR
60 * SVCPU[LR] = guest LR
63 /* Remove LPAR shadow entries */
65 #if SLB_NUM_BOLTED == 3
67 ld r12, PACA_SLBSHADOWPTR(r13)
69 /* Save off the first entry so we can slbie it later */
70 ld r10, SHADOW_SLB_ESID(0)(r12)
71 ld r11, SHADOW_SLB_VSID(0)(r12)
73 /* Remove bolted entries */
79 #error unknown number of bolted entries
86 /* r0 = esid & ESID_MASK */
87 rldicr r10, r10, 0, 35
88 /* r0 |= CLASS_BIT(VSID) */
89 rldic r12, r11, 56 - 36, 36
93 /* Fill SLB with our shadow */
95 lbz r12, SVCPU_SLB_MAX(r3)
97 addi r12, r12, SVCPU_SLB
100 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
108 rldicl. r0, r10, 37, 63
109 beq slb_loop_enter_skip
123 /******************************************************************************
127 *****************************************************************************/
129 .macro LOAD_HOST_SEGMENTS
131 /* Register usage at this point:
135 * R12 = exit handler id
136 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
138 * SVCPU[CR] = guest CR
139 * SVCPU[XER] = guest XER
140 * SVCPU[CTR] = guest CTR
141 * SVCPU[LR] = guest LR
145 /* Restore bolted entries from the shadow and fix it along the way */
147 /* We don't store anything in entry 0, so we don't need to take care of it */
151 #if SLB_NUM_BOLTED == 3
153 ld r11, PACA_SLBSHADOWPTR(r13)
160 #error unknown number of bolted entries