GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / powerpc / kvm / book3s_32_mmu_host.c
blobeb9af49999177fd0ec444df8dc94764d4a6fc34b
1 /*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/kvm_host.h>
22 #include <linux/hash.h>
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/mmu-hash32.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
31 /* #define DEBUG_MMU */
32 /* #define DEBUG_SR */
34 #ifdef DEBUG_MMU
35 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
36 #else
37 #define dprintk_mmu(a, ...) do { } while(0)
38 #endif
40 #ifdef DEBUG_SR
41 #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
42 #else
43 #define dprintk_sr(a, ...) do { } while(0)
44 #endif
46 #if PAGE_SHIFT != 12
47 #error Unknown page size
48 #endif
50 #ifdef CONFIG_SMP
51 #error XXX need to grab mmu_hash_lock
52 #endif
54 #ifdef CONFIG_PTE_64BIT
55 #error Only 32 bit pages are supported for now
56 #endif
58 static ulong htab;
59 static u32 htabmask;
61 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
63 volatile u32 *pteg;
65 /* Remove from host HTAB */
66 pteg = (u32*)pte->slot;
67 pteg[0] = 0;
69 /* And make sure it's gone from the TLB too */
70 asm volatile ("sync");
71 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
72 asm volatile ("sync");
73 asm volatile ("tlbsync");
76 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
77 * a hash, so we don't waste cycles on looping */
78 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
80 return hash_64(gvsid, SID_MAP_BITS);
84 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
86 struct kvmppc_sid_map *map;
87 u16 sid_map_mask;
89 if (vcpu->arch.msr & MSR_PR)
90 gvsid |= VSID_PR;
92 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
93 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
94 if (map->guest_vsid == gvsid) {
95 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
96 gvsid, map->host_vsid);
97 return map;
100 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
101 if (map->guest_vsid == gvsid) {
102 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
103 gvsid, map->host_vsid);
104 return map;
107 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
108 return NULL;
111 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
112 bool primary)
114 u32 page, hash;
115 ulong pteg = htab;
117 page = (eaddr & ~ESID_MASK) >> 12;
119 hash = ((vsid ^ page) << 6);
120 if (!primary)
121 hash = ~hash;
123 hash &= htabmask;
125 pteg |= hash;
127 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
128 htab, hash, htabmask, pteg);
130 return (u32*)pteg;
133 extern char etext[];
135 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
137 pfn_t hpaddr;
138 u64 va;
139 u64 vsid;
140 struct kvmppc_sid_map *map;
141 volatile u32 *pteg;
142 u32 eaddr = orig_pte->eaddr;
143 u32 pteg0, pteg1;
144 register int rr = 0;
145 bool primary = false;
146 bool evict = false;
147 struct hpte_cache *pte;
149 /* Get host physical address for gpa */
150 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
151 if (kvm_is_error_hva(hpaddr)) {
152 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
153 orig_pte->eaddr);
154 return -EINVAL;
156 hpaddr <<= PAGE_SHIFT;
158 /* and write the mapping ea -> hpa into the pt */
159 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
160 map = find_sid_vsid(vcpu, vsid);
161 if (!map) {
162 kvmppc_mmu_map_segment(vcpu, eaddr);
163 map = find_sid_vsid(vcpu, vsid);
165 BUG_ON(!map);
167 vsid = map->host_vsid;
168 va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK);
170 next_pteg:
171 if (rr == 16) {
172 primary = !primary;
173 evict = true;
174 rr = 0;
177 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
179 /* not evicting yet */
180 if (!evict && (pteg[rr] & PTE_V)) {
181 rr += 2;
182 goto next_pteg;
185 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
186 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
187 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
188 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
189 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
190 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
191 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
192 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
193 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
195 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
196 (primary ? 0 : PTE_SEC);
197 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
199 if (orig_pte->may_write) {
200 pteg1 |= PP_RWRW;
201 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
202 } else {
203 pteg1 |= PP_RWRX;
206 local_irq_disable();
208 if (pteg[rr]) {
209 pteg[rr] = 0;
210 asm volatile ("sync");
212 pteg[rr + 1] = pteg1;
213 pteg[rr] = pteg0;
214 asm volatile ("sync");
216 local_irq_enable();
218 dprintk_mmu("KVM: new PTEG: %p\n", pteg);
219 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
220 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
221 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
222 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
223 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
224 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
225 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
226 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
229 /* Now tell our Shadow PTE code about the new page */
231 pte = kvmppc_mmu_hpte_cache_next(vcpu);
233 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
234 orig_pte->may_write ? 'w' : '-',
235 orig_pte->may_execute ? 'x' : '-',
236 orig_pte->eaddr, (ulong)pteg, va,
237 orig_pte->vpage, hpaddr);
239 pte->slot = (ulong)&pteg[rr];
240 pte->host_va = va;
241 pte->pte = *orig_pte;
242 pte->pfn = hpaddr >> PAGE_SHIFT;
244 kvmppc_mmu_hpte_cache_map(vcpu, pte);
246 return 0;
249 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
251 struct kvmppc_sid_map *map;
252 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
253 u16 sid_map_mask;
254 static int backwards_map = 0;
256 if (vcpu->arch.msr & MSR_PR)
257 gvsid |= VSID_PR;
259 /* We might get collisions that trap in preceding order, so let's
260 map them differently */
262 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
263 if (backwards_map)
264 sid_map_mask = SID_MAP_MASK - sid_map_mask;
266 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
268 /* Make sure we're taking the other map next time */
269 backwards_map = !backwards_map;
271 /* Uh-oh ... out of mappings. Let's flush! */
272 if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) {
273 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
274 memset(vcpu_book3s->sid_map, 0,
275 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
276 kvmppc_mmu_pte_flush(vcpu, 0, 0);
277 kvmppc_mmu_flush_segments(vcpu);
279 map->host_vsid = vcpu_book3s->vsid_next;
281 /* Would have to be 111 to be completely aligned with the rest of
282 Linux, but that is just way too little space! */
283 vcpu_book3s->vsid_next+=1;
285 map->guest_vsid = gvsid;
286 map->valid = true;
288 return map;
291 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
293 u32 esid = eaddr >> SID_SHIFT;
294 u64 gvsid;
295 u32 sr;
296 struct kvmppc_sid_map *map;
297 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
299 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
300 /* Invalidate an entry */
301 svcpu->sr[esid] = SR_INVALID;
302 return -ENOENT;
305 map = find_sid_vsid(vcpu, gvsid);
306 if (!map)
307 map = create_sid_map(vcpu, gvsid);
309 map->guest_esid = esid;
310 sr = map->host_vsid | SR_KP;
311 svcpu->sr[esid] = sr;
313 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
315 return 0;
318 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
320 int i;
321 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
323 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
324 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
325 svcpu->sr[i] = SR_INVALID;
328 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
330 kvmppc_mmu_hpte_destroy(vcpu);
331 preempt_disable();
332 __destroy_context(to_book3s(vcpu)->context_id);
333 preempt_enable();
336 /* From mm/mmu_context_hash32.c */
337 #define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff)
339 int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
341 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
342 int err;
343 ulong sdr1;
345 err = __init_new_context();
346 if (err < 0)
347 return -1;
348 vcpu3s->context_id = err;
350 vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1;
351 vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id);
353 BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first);
355 vcpu3s->vsid_next = vcpu3s->vsid_first;
357 /* Remember where the HTAB is */
358 asm ( "mfsdr1 %0" : "=r"(sdr1) );
359 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
360 htab = (ulong)__va(sdr1 & 0xffff0000);
362 kvmppc_mmu_hpte_init(vcpu);
364 return 0;