2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/kvm_host.h>
23 #include <asm/kvm_ppc.h>
24 #include <asm/kvm_book3s.h>
25 #include <asm/mmu-hash32.h>
26 #include <asm/machdep.h>
27 #include <asm/mmu_context.h>
28 #include <asm/hw_irq.h>
30 /* #define DEBUG_MMU */
31 /* #define DEBUG_SR */
34 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
36 #define dprintk_mmu(a, ...) do { } while(0)
40 #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
42 #define dprintk_sr(a, ...) do { } while(0)
46 #error Unknown page size
50 #error XXX need to grab mmu_hash_lock
53 #ifdef CONFIG_PTE_64BIT
54 #error Only 32 bit pages are supported for now
60 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu
*vcpu
, struct hpte_cache
*pte
)
64 /* Remove from host HTAB */
65 pteg
= (u32
*)pte
->slot
;
68 /* And make sure it's gone from the TLB too */
69 asm volatile ("sync");
70 asm volatile ("tlbie %0" : : "r" (pte
->pte
.eaddr
) : "memory");
71 asm volatile ("sync");
72 asm volatile ("tlbsync");
75 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
76 * a hash, so we don't waste cycles on looping */
77 static u16
kvmppc_sid_hash(struct kvm_vcpu
*vcpu
, u64 gvsid
)
79 return (u16
)(((gvsid
>> (SID_MAP_BITS
* 7)) & SID_MAP_MASK
) ^
80 ((gvsid
>> (SID_MAP_BITS
* 6)) & SID_MAP_MASK
) ^
81 ((gvsid
>> (SID_MAP_BITS
* 5)) & SID_MAP_MASK
) ^
82 ((gvsid
>> (SID_MAP_BITS
* 4)) & SID_MAP_MASK
) ^
83 ((gvsid
>> (SID_MAP_BITS
* 3)) & SID_MAP_MASK
) ^
84 ((gvsid
>> (SID_MAP_BITS
* 2)) & SID_MAP_MASK
) ^
85 ((gvsid
>> (SID_MAP_BITS
* 1)) & SID_MAP_MASK
) ^
86 ((gvsid
>> (SID_MAP_BITS
* 0)) & SID_MAP_MASK
));
90 static struct kvmppc_sid_map
*find_sid_vsid(struct kvm_vcpu
*vcpu
, u64 gvsid
)
92 struct kvmppc_sid_map
*map
;
95 if (vcpu
->arch
.shared
->msr
& MSR_PR
)
98 sid_map_mask
= kvmppc_sid_hash(vcpu
, gvsid
);
99 map
= &to_book3s(vcpu
)->sid_map
[sid_map_mask
];
100 if (map
->guest_vsid
== gvsid
) {
101 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
102 gvsid
, map
->host_vsid
);
106 map
= &to_book3s(vcpu
)->sid_map
[SID_MAP_MASK
- sid_map_mask
];
107 if (map
->guest_vsid
== gvsid
) {
108 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
109 gvsid
, map
->host_vsid
);
113 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid
);
117 static u32
*kvmppc_mmu_get_pteg(struct kvm_vcpu
*vcpu
, u32 vsid
, u32 eaddr
,
123 page
= (eaddr
& ~ESID_MASK
) >> 12;
125 hash
= ((vsid
^ page
) << 6);
133 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
134 htab
, hash
, htabmask
, pteg
);
141 int kvmppc_mmu_map_page(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*orig_pte
)
146 struct kvmppc_sid_map
*map
;
148 u32 eaddr
= orig_pte
->eaddr
;
151 bool primary
= false;
153 struct hpte_cache
*pte
;
155 /* Get host physical address for gpa */
156 hpaddr
= kvmppc_gfn_to_pfn(vcpu
, orig_pte
->raddr
>> PAGE_SHIFT
);
157 if (is_error_pfn(hpaddr
)) {
158 printk(KERN_INFO
"Couldn't get guest page for gfn %lx!\n",
162 hpaddr
<<= PAGE_SHIFT
;
164 /* and write the mapping ea -> hpa into the pt */
165 vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, orig_pte
->eaddr
>> SID_SHIFT
, &vsid
);
166 map
= find_sid_vsid(vcpu
, vsid
);
168 kvmppc_mmu_map_segment(vcpu
, eaddr
);
169 map
= find_sid_vsid(vcpu
, vsid
);
173 vsid
= map
->host_vsid
;
174 va
= (vsid
<< SID_SHIFT
) | (eaddr
& ~ESID_MASK
);
183 pteg
= kvmppc_mmu_get_pteg(vcpu
, vsid
, eaddr
, primary
);
185 /* not evicting yet */
186 if (!evict
&& (pteg
[rr
] & PTE_V
)) {
191 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg
, rr
);
192 dprintk_mmu("KVM: %08x - %08x\n", pteg
[0], pteg
[1]);
193 dprintk_mmu("KVM: %08x - %08x\n", pteg
[2], pteg
[3]);
194 dprintk_mmu("KVM: %08x - %08x\n", pteg
[4], pteg
[5]);
195 dprintk_mmu("KVM: %08x - %08x\n", pteg
[6], pteg
[7]);
196 dprintk_mmu("KVM: %08x - %08x\n", pteg
[8], pteg
[9]);
197 dprintk_mmu("KVM: %08x - %08x\n", pteg
[10], pteg
[11]);
198 dprintk_mmu("KVM: %08x - %08x\n", pteg
[12], pteg
[13]);
199 dprintk_mmu("KVM: %08x - %08x\n", pteg
[14], pteg
[15]);
201 pteg0
= ((eaddr
& 0x0fffffff) >> 22) | (vsid
<< 7) | PTE_V
|
202 (primary
? 0 : PTE_SEC
);
203 pteg1
= hpaddr
| PTE_M
| PTE_R
| PTE_C
;
205 if (orig_pte
->may_write
) {
207 mark_page_dirty(vcpu
->kvm
, orig_pte
->raddr
>> PAGE_SHIFT
);
216 asm volatile ("sync");
218 pteg
[rr
+ 1] = pteg1
;
220 asm volatile ("sync");
224 dprintk_mmu("KVM: new PTEG: %p\n", pteg
);
225 dprintk_mmu("KVM: %08x - %08x\n", pteg
[0], pteg
[1]);
226 dprintk_mmu("KVM: %08x - %08x\n", pteg
[2], pteg
[3]);
227 dprintk_mmu("KVM: %08x - %08x\n", pteg
[4], pteg
[5]);
228 dprintk_mmu("KVM: %08x - %08x\n", pteg
[6], pteg
[7]);
229 dprintk_mmu("KVM: %08x - %08x\n", pteg
[8], pteg
[9]);
230 dprintk_mmu("KVM: %08x - %08x\n", pteg
[10], pteg
[11]);
231 dprintk_mmu("KVM: %08x - %08x\n", pteg
[12], pteg
[13]);
232 dprintk_mmu("KVM: %08x - %08x\n", pteg
[14], pteg
[15]);
235 /* Now tell our Shadow PTE code about the new page */
237 pte
= kvmppc_mmu_hpte_cache_next(vcpu
);
239 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
240 orig_pte
->may_write
? 'w' : '-',
241 orig_pte
->may_execute
? 'x' : '-',
242 orig_pte
->eaddr
, (ulong
)pteg
, va
,
243 orig_pte
->vpage
, hpaddr
);
245 pte
->slot
= (ulong
)&pteg
[rr
];
247 pte
->pte
= *orig_pte
;
248 pte
->pfn
= hpaddr
>> PAGE_SHIFT
;
250 kvmppc_mmu_hpte_cache_map(vcpu
, pte
);
255 static struct kvmppc_sid_map
*create_sid_map(struct kvm_vcpu
*vcpu
, u64 gvsid
)
257 struct kvmppc_sid_map
*map
;
258 struct kvmppc_vcpu_book3s
*vcpu_book3s
= to_book3s(vcpu
);
260 static int backwards_map
= 0;
262 if (vcpu
->arch
.shared
->msr
& MSR_PR
)
265 /* We might get collisions that trap in preceding order, so let's
266 map them differently */
268 sid_map_mask
= kvmppc_sid_hash(vcpu
, gvsid
);
270 sid_map_mask
= SID_MAP_MASK
- sid_map_mask
;
272 map
= &to_book3s(vcpu
)->sid_map
[sid_map_mask
];
274 /* Make sure we're taking the other map next time */
275 backwards_map
= !backwards_map
;
277 /* Uh-oh ... out of mappings. Let's flush! */
278 if (vcpu_book3s
->vsid_next
>= VSID_POOL_SIZE
) {
279 vcpu_book3s
->vsid_next
= 0;
280 memset(vcpu_book3s
->sid_map
, 0,
281 sizeof(struct kvmppc_sid_map
) * SID_MAP_NUM
);
282 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
283 kvmppc_mmu_flush_segments(vcpu
);
285 map
->host_vsid
= vcpu_book3s
->vsid_pool
[vcpu_book3s
->vsid_next
];
286 vcpu_book3s
->vsid_next
++;
288 map
->guest_vsid
= gvsid
;
294 int kvmppc_mmu_map_segment(struct kvm_vcpu
*vcpu
, ulong eaddr
)
296 u32 esid
= eaddr
>> SID_SHIFT
;
299 struct kvmppc_sid_map
*map
;
300 struct kvmppc_book3s_shadow_vcpu
*svcpu
= to_svcpu(vcpu
);
302 if (vcpu
->arch
.mmu
.esid_to_vsid(vcpu
, esid
, &gvsid
)) {
303 /* Invalidate an entry */
304 svcpu
->sr
[esid
] = SR_INVALID
;
308 map
= find_sid_vsid(vcpu
, gvsid
);
310 map
= create_sid_map(vcpu
, gvsid
);
312 map
->guest_esid
= esid
;
313 sr
= map
->host_vsid
| SR_KP
;
314 svcpu
->sr
[esid
] = sr
;
316 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid
, sr
);
321 void kvmppc_mmu_flush_segments(struct kvm_vcpu
*vcpu
)
324 struct kvmppc_book3s_shadow_vcpu
*svcpu
= to_svcpu(vcpu
);
326 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu
->sr
));
327 for (i
= 0; i
< ARRAY_SIZE(svcpu
->sr
); i
++)
328 svcpu
->sr
[i
] = SR_INVALID
;
331 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
335 kvmppc_mmu_hpte_destroy(vcpu
);
337 for (i
= 0; i
< SID_CONTEXTS
; i
++)
338 __destroy_context(to_book3s(vcpu
)->context_id
[i
]);
342 /* From mm/mmu_context_hash32.c */
343 #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
345 int kvmppc_mmu_init(struct kvm_vcpu
*vcpu
)
347 struct kvmppc_vcpu_book3s
*vcpu3s
= to_book3s(vcpu
);
353 for (i
= 0; i
< SID_CONTEXTS
; i
++) {
354 err
= __init_new_context();
357 vcpu3s
->context_id
[i
] = err
;
359 /* Remember context id for this combination */
360 for (j
= 0; j
< 16; j
++)
361 vcpu3s
->vsid_pool
[(i
* 16) + j
] = CTX_TO_VSID(err
, j
);
364 vcpu3s
->vsid_next
= 0;
366 /* Remember where the HTAB is */
367 asm ( "mfsdr1 %0" : "=r"(sdr1
) );
368 htabmask
= ((sdr1
& 0x1FF) << 16) | 0xFFC0;
369 htab
= (ulong
)__va(sdr1
& 0xffff0000);
371 kvmppc_mmu_hpte_init(vcpu
);
376 for (j
= 0; j
< i
; j
++) {
377 if (!vcpu3s
->context_id
[j
])
380 __destroy_context(to_book3s(vcpu
)->context_id
[j
]);