KVM: PPC: e500: fix typo in tlb code
[linux-2.6.git] / arch / powerpc / kvm / e500_tlb.c
blobe05232b746ff4d194a87be6143690488817d00f1
1 /*
2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, yu.liu@freescale.com
5 * Scott Wood, scottwood@freescale.com
6 * Ashish Kalra, ashish.kalra@freescale.com
7 * Varun Sethi, varun.sethi@freescale.com
9 * Description:
10 * This file is based on arch/powerpc/kvm/44x_tlb.c,
11 * by Hollis Blanchard <hollisb@us.ibm.com>.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License, version 2, as
15 * published by the Free Software Foundation.
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/log2.h>
26 #include <linux/uaccess.h>
27 #include <linux/sched.h>
28 #include <linux/rwsem.h>
29 #include <linux/vmalloc.h>
30 #include <linux/hugetlb.h>
31 #include <asm/kvm_ppc.h>
33 #include "e500.h"
34 #include "trace.h"
35 #include "timing.h"
37 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
39 static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
41 static inline unsigned int gtlb0_get_next_victim(
42 struct kvmppc_vcpu_e500 *vcpu_e500)
44 unsigned int victim;
46 victim = vcpu_e500->gtlb_nv[0]++;
47 if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways))
48 vcpu_e500->gtlb_nv[0] = 0;
50 return victim;
53 static inline unsigned int tlb1_max_shadow_size(void)
55 /* reserve one entry for magic page */
56 return host_tlb_params[1].entries - tlbcam_index - 1;
59 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
61 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
64 static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
66 /* Mask off reserved bits. */
67 mas3 &= MAS3_ATTRIB_MASK;
69 #ifndef CONFIG_KVM_BOOKE_HV
70 if (!usermode) {
71 /* Guest is in supervisor mode,
72 * so we need to translate guest
73 * supervisor permissions into user permissions. */
74 mas3 &= ~E500_TLB_USER_PERM_MASK;
75 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
77 mas3 |= E500_TLB_SUPER_PERM_MASK;
78 #endif
79 return mas3;
82 static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
84 #ifdef CONFIG_SMP
85 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
86 #else
87 return mas2 & MAS2_ATTRIB_MASK;
88 #endif
92 * writing shadow tlb entry to host TLB
94 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
95 uint32_t mas0)
97 unsigned long flags;
99 local_irq_save(flags);
100 mtspr(SPRN_MAS0, mas0);
101 mtspr(SPRN_MAS1, stlbe->mas1);
102 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
103 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
104 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
105 #ifdef CONFIG_KVM_BOOKE_HV
106 mtspr(SPRN_MAS8, stlbe->mas8);
107 #endif
108 asm volatile("isync; tlbwe" : : : "memory");
110 #ifdef CONFIG_KVM_BOOKE_HV
111 /* Must clear mas8 for other host tlbwe's */
112 mtspr(SPRN_MAS8, 0);
113 isync();
114 #endif
115 local_irq_restore(flags);
117 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
118 stlbe->mas2, stlbe->mas7_3);
122 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
124 * We don't care about the address we're searching for, other than that it's
125 * in the right set and is not present in the TLB. Using a zero PID and a
126 * userspace address means we don't have to set and then restore MAS5, or
127 * calculate a proper MAS6 value.
129 static u32 get_host_mas0(unsigned long eaddr)
131 unsigned long flags;
132 u32 mas0;
134 local_irq_save(flags);
135 mtspr(SPRN_MAS6, 0);
136 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
137 mas0 = mfspr(SPRN_MAS0);
138 local_irq_restore(flags);
140 return mas0;
143 /* sesel is for tlb1 only */
144 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
145 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
147 u32 mas0;
149 if (tlbsel == 0) {
150 mas0 = get_host_mas0(stlbe->mas2);
151 __write_host_tlbe(stlbe, mas0);
152 } else {
153 __write_host_tlbe(stlbe,
154 MAS0_TLBSEL(1) |
155 MAS0_ESEL(to_htlb1_esel(sesel)));
159 #ifdef CONFIG_KVM_E500V2
160 void kvmppc_map_magic(struct kvm_vcpu *vcpu)
162 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
163 struct kvm_book3e_206_tlb_entry magic;
164 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
165 unsigned int stid;
166 pfn_t pfn;
168 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
169 get_page(pfn_to_page(pfn));
171 preempt_disable();
172 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
174 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
175 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
176 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
177 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
178 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
179 magic.mas8 = 0;
181 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
182 preempt_enable();
184 #endif
186 static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
187 int tlbsel, int esel)
189 struct kvm_book3e_206_tlb_entry *gtlbe =
190 get_entry(vcpu_e500, tlbsel, esel);
192 if (tlbsel == 1 &&
193 vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) {
194 u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
195 int hw_tlb_indx;
196 unsigned long flags;
198 local_irq_save(flags);
199 while (tmp) {
200 hw_tlb_indx = __ilog2_u64(tmp & -tmp);
201 mtspr(SPRN_MAS0,
202 MAS0_TLBSEL(1) |
203 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
204 mtspr(SPRN_MAS1, 0);
205 asm volatile("tlbwe");
206 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
207 tmp &= tmp - 1;
209 mb();
210 vcpu_e500->g2h_tlb1_map[esel] = 0;
211 vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP;
212 local_irq_restore(flags);
214 return;
217 /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
218 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
221 static int tlb0_set_base(gva_t addr, int sets, int ways)
223 int set_base;
225 set_base = (addr >> PAGE_SHIFT) & (sets - 1);
226 set_base *= ways;
228 return set_base;
231 static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
233 return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets,
234 vcpu_e500->gtlb_params[0].ways);
237 static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
239 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
240 int esel = get_tlb_esel_bit(vcpu);
242 if (tlbsel == 0) {
243 esel &= vcpu_e500->gtlb_params[0].ways - 1;
244 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
245 } else {
246 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
249 return esel;
252 /* Search the guest TLB for a matching entry. */
253 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
254 gva_t eaddr, int tlbsel, unsigned int pid, int as)
256 int size = vcpu_e500->gtlb_params[tlbsel].entries;
257 unsigned int set_base, offset;
258 int i;
260 if (tlbsel == 0) {
261 set_base = gtlb0_set_base(vcpu_e500, eaddr);
262 size = vcpu_e500->gtlb_params[0].ways;
263 } else {
264 set_base = 0;
267 offset = vcpu_e500->gtlb_offset[tlbsel];
269 for (i = 0; i < size; i++) {
270 struct kvm_book3e_206_tlb_entry *tlbe =
271 &vcpu_e500->gtlb_arch[offset + set_base + i];
272 unsigned int tid;
274 if (eaddr < get_tlb_eaddr(tlbe))
275 continue;
277 if (eaddr > get_tlb_end(tlbe))
278 continue;
280 tid = get_tlb_tid(tlbe);
281 if (tid && (tid != pid))
282 continue;
284 if (!get_tlb_v(tlbe))
285 continue;
287 if (get_tlb_ts(tlbe) != as && as != -1)
288 continue;
290 return set_base + i;
293 return -1;
296 static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
297 struct kvm_book3e_206_tlb_entry *gtlbe,
298 pfn_t pfn)
300 ref->pfn = pfn;
301 ref->flags = E500_TLB_VALID;
303 if (tlbe_is_writable(gtlbe))
304 ref->flags |= E500_TLB_DIRTY;
307 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
309 if (ref->flags & E500_TLB_VALID) {
310 if (ref->flags & E500_TLB_DIRTY)
311 kvm_release_pfn_dirty(ref->pfn);
312 else
313 kvm_release_pfn_clean(ref->pfn);
315 ref->flags = 0;
319 static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
321 if (vcpu_e500->g2h_tlb1_map)
322 memset(vcpu_e500->g2h_tlb1_map,
323 sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0);
324 if (vcpu_e500->h2g_tlb1_rmap)
325 memset(vcpu_e500->h2g_tlb1_rmap,
326 sizeof(unsigned int) * host_tlb_params[1].entries, 0);
329 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
331 int tlbsel = 0;
332 int i;
334 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
335 struct tlbe_ref *ref =
336 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
337 kvmppc_e500_ref_release(ref);
341 static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
343 int stlbsel = 1;
344 int i;
346 kvmppc_e500_tlbil_all(vcpu_e500);
348 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
349 struct tlbe_ref *ref =
350 &vcpu_e500->tlb_refs[stlbsel][i];
351 kvmppc_e500_ref_release(ref);
354 clear_tlb_privs(vcpu_e500);
357 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
358 unsigned int eaddr, int as)
360 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
361 unsigned int victim, tsized;
362 int tlbsel;
364 /* since we only have two TLBs, only lower bit is used. */
365 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
366 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
367 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
369 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
370 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
371 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
372 | MAS1_TID(get_tlbmiss_tid(vcpu))
373 | MAS1_TSIZE(tsized);
374 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
375 | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
376 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
377 vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
378 | (get_cur_pid(vcpu) << 16)
379 | (as ? MAS6_SAS : 0);
382 /* TID must be supplied by the caller */
383 static inline void kvmppc_e500_setup_stlbe(
384 struct kvm_vcpu *vcpu,
385 struct kvm_book3e_206_tlb_entry *gtlbe,
386 int tsize, struct tlbe_ref *ref, u64 gvaddr,
387 struct kvm_book3e_206_tlb_entry *stlbe)
389 pfn_t pfn = ref->pfn;
390 u32 pr = vcpu->arch.shared->msr & MSR_PR;
392 BUG_ON(!(ref->flags & E500_TLB_VALID));
394 /* Force IPROT=0 for all guest mappings. */
395 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
396 stlbe->mas2 = (gvaddr & MAS2_EPN) |
397 e500_shadow_mas2_attrib(gtlbe->mas2, pr);
398 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
399 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
401 #ifdef CONFIG_KVM_BOOKE_HV
402 stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
403 #endif
406 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
407 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
408 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
409 struct tlbe_ref *ref)
411 struct kvm_memory_slot *slot;
412 unsigned long pfn, hva;
413 int pfnmap = 0;
414 int tsize = BOOK3E_PAGESZ_4K;
417 * Translate guest physical to true physical, acquiring
418 * a page reference if it is normal, non-reserved memory.
420 * gfn_to_memslot() must succeed because otherwise we wouldn't
421 * have gotten this far. Eventually we should just pass the slot
422 * pointer through from the first lookup.
424 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
425 hva = gfn_to_hva_memslot(slot, gfn);
427 if (tlbsel == 1) {
428 struct vm_area_struct *vma;
429 down_read(&current->mm->mmap_sem);
431 vma = find_vma(current->mm, hva);
432 if (vma && hva >= vma->vm_start &&
433 (vma->vm_flags & VM_PFNMAP)) {
435 * This VMA is a physically contiguous region (e.g.
436 * /dev/mem) that bypasses normal Linux page
437 * management. Find the overlap between the
438 * vma and the memslot.
441 unsigned long start, end;
442 unsigned long slot_start, slot_end;
444 pfnmap = 1;
446 start = vma->vm_pgoff;
447 end = start +
448 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
450 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
452 slot_start = pfn - (gfn - slot->base_gfn);
453 slot_end = slot_start + slot->npages;
455 if (start < slot_start)
456 start = slot_start;
457 if (end > slot_end)
458 end = slot_end;
460 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
461 MAS1_TSIZE_SHIFT;
464 * e500 doesn't implement the lowest tsize bit,
465 * or 1K pages.
467 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
470 * Now find the largest tsize (up to what the guest
471 * requested) that will cover gfn, stay within the
472 * range, and for which gfn and pfn are mutually
473 * aligned.
476 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
477 unsigned long gfn_start, gfn_end, tsize_pages;
478 tsize_pages = 1 << (tsize - 2);
480 gfn_start = gfn & ~(tsize_pages - 1);
481 gfn_end = gfn_start + tsize_pages;
483 if (gfn_start + pfn - gfn < start)
484 continue;
485 if (gfn_end + pfn - gfn > end)
486 continue;
487 if ((gfn & (tsize_pages - 1)) !=
488 (pfn & (tsize_pages - 1)))
489 continue;
491 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
492 pfn &= ~(tsize_pages - 1);
493 break;
495 } else if (vma && hva >= vma->vm_start &&
496 (vma->vm_flags & VM_HUGETLB)) {
497 unsigned long psize = vma_kernel_pagesize(vma);
499 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
500 MAS1_TSIZE_SHIFT;
503 * Take the largest page size that satisfies both host
504 * and guest mapping
506 tsize = min(__ilog2(psize) - 10, tsize);
509 * e500 doesn't implement the lowest tsize bit,
510 * or 1K pages.
512 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
515 up_read(&current->mm->mmap_sem);
518 if (likely(!pfnmap)) {
519 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
520 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
521 if (is_error_pfn(pfn)) {
522 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
523 (long)gfn);
524 kvm_release_pfn_clean(pfn);
525 return;
528 /* Align guest and physical address to page map boundaries */
529 pfn &= ~(tsize_pages - 1);
530 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
533 /* Drop old ref and setup new one. */
534 kvmppc_e500_ref_release(ref);
535 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
537 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
538 ref, gvaddr, stlbe);
541 /* XXX only map the one-one case, for now use TLB0 */
542 static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
543 int esel,
544 struct kvm_book3e_206_tlb_entry *stlbe)
546 struct kvm_book3e_206_tlb_entry *gtlbe;
547 struct tlbe_ref *ref;
549 gtlbe = get_entry(vcpu_e500, 0, esel);
550 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
552 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
553 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
554 gtlbe, 0, stlbe, ref);
557 /* Caller must ensure that the specified guest TLB entry is safe to insert into
558 * the shadow TLB. */
559 /* XXX for both one-one and one-to-many , for now use TLB1 */
560 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
561 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
562 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
564 struct tlbe_ref *ref;
565 unsigned int victim;
567 victim = vcpu_e500->host_tlb1_nv++;
569 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
570 vcpu_e500->host_tlb1_nv = 0;
572 ref = &vcpu_e500->tlb_refs[1][victim];
573 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
575 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim;
576 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
577 if (vcpu_e500->h2g_tlb1_rmap[victim]) {
578 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim];
579 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim);
581 vcpu_e500->h2g_tlb1_rmap[victim] = esel;
583 return victim;
586 static inline int kvmppc_e500_gtlbe_invalidate(
587 struct kvmppc_vcpu_e500 *vcpu_e500,
588 int tlbsel, int esel)
590 struct kvm_book3e_206_tlb_entry *gtlbe =
591 get_entry(vcpu_e500, tlbsel, esel);
593 if (unlikely(get_tlb_iprot(gtlbe)))
594 return -1;
596 gtlbe->mas1 = 0;
598 return 0;
601 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
603 int esel;
605 if (value & MMUCSR0_TLB0FI)
606 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++)
607 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
608 if (value & MMUCSR0_TLB1FI)
609 for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
610 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
612 /* Invalidate all vcpu id mappings */
613 kvmppc_e500_tlbil_all(vcpu_e500);
615 return EMULATE_DONE;
618 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
620 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
621 unsigned int ia;
622 int esel, tlbsel;
623 gva_t ea;
625 ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
627 ia = (ea >> 2) & 0x1;
629 /* since we only have two TLBs, only lower bit is used. */
630 tlbsel = (ea >> 3) & 0x1;
632 if (ia) {
633 /* invalidate all entries */
634 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries;
635 esel++)
636 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
637 } else {
638 ea &= 0xfffff000;
639 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
640 get_cur_pid(vcpu), -1);
641 if (esel >= 0)
642 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
645 /* Invalidate all vcpu id mappings */
646 kvmppc_e500_tlbil_all(vcpu_e500);
648 return EMULATE_DONE;
651 static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
652 int pid, int rt)
654 struct kvm_book3e_206_tlb_entry *tlbe;
655 int tid, esel;
657 /* invalidate all entries */
658 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
659 tlbe = get_entry(vcpu_e500, tlbsel, esel);
660 tid = get_tlb_tid(tlbe);
661 if (rt == 0 || tid == pid) {
662 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
663 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
668 static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
669 int ra, int rb)
671 int tlbsel, esel;
672 gva_t ea;
674 ea = kvmppc_get_gpr(&vcpu_e500->vcpu, rb);
675 if (ra)
676 ea += kvmppc_get_gpr(&vcpu_e500->vcpu, ra);
678 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
679 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
680 if (esel >= 0) {
681 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
682 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
683 break;
688 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb)
690 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
691 int pid = get_cur_spid(vcpu);
693 if (rt == 0 || rt == 1) {
694 tlbilx_all(vcpu_e500, 0, pid, rt);
695 tlbilx_all(vcpu_e500, 1, pid, rt);
696 } else if (rt == 3) {
697 tlbilx_one(vcpu_e500, pid, ra, rb);
700 return EMULATE_DONE;
703 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
705 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
706 int tlbsel, esel;
707 struct kvm_book3e_206_tlb_entry *gtlbe;
709 tlbsel = get_tlb_tlbsel(vcpu);
710 esel = get_tlb_esel(vcpu, tlbsel);
712 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
713 vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
714 vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
715 vcpu->arch.shared->mas1 = gtlbe->mas1;
716 vcpu->arch.shared->mas2 = gtlbe->mas2;
717 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
719 return EMULATE_DONE;
722 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
724 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
725 int as = !!get_cur_sas(vcpu);
726 unsigned int pid = get_cur_spid(vcpu);
727 int esel, tlbsel;
728 struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
729 gva_t ea;
731 ea = kvmppc_get_gpr(vcpu, rb);
733 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
734 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
735 if (esel >= 0) {
736 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
737 break;
741 if (gtlbe) {
742 esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
744 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
745 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
746 vcpu->arch.shared->mas1 = gtlbe->mas1;
747 vcpu->arch.shared->mas2 = gtlbe->mas2;
748 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
749 } else {
750 int victim;
752 /* since we only have two TLBs, only lower bit is used. */
753 tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
754 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
756 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
757 | MAS0_ESEL(victim)
758 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
759 vcpu->arch.shared->mas1 =
760 (vcpu->arch.shared->mas6 & MAS6_SPID0)
761 | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
762 | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
763 vcpu->arch.shared->mas2 &= MAS2_EPN;
764 vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
765 MAS2_ATTRIB_MASK;
766 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
767 MAS3_U2 | MAS3_U3;
770 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
771 return EMULATE_DONE;
774 /* sesel is for tlb1 only */
775 static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
776 struct kvm_book3e_206_tlb_entry *gtlbe,
777 struct kvm_book3e_206_tlb_entry *stlbe,
778 int stlbsel, int sesel)
780 int stid;
782 preempt_disable();
783 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
785 stlbe->mas1 |= MAS1_TID(stid);
786 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
787 preempt_enable();
790 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
792 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
793 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
794 int tlbsel, esel, stlbsel, sesel;
796 tlbsel = get_tlb_tlbsel(vcpu);
797 esel = get_tlb_esel(vcpu, tlbsel);
799 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
801 if (get_tlb_v(gtlbe))
802 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
804 gtlbe->mas1 = vcpu->arch.shared->mas1;
805 gtlbe->mas2 = vcpu->arch.shared->mas2;
806 gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
808 trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
809 gtlbe->mas2, gtlbe->mas7_3);
811 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
812 if (tlbe_is_host_safe(vcpu, gtlbe)) {
813 u64 eaddr;
814 u64 raddr;
816 switch (tlbsel) {
817 case 0:
818 /* TLB0 */
819 gtlbe->mas1 &= ~MAS1_TSIZE(~0);
820 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
822 stlbsel = 0;
823 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
824 sesel = 0; /* unused */
826 break;
828 case 1:
829 /* TLB1 */
830 eaddr = get_tlb_eaddr(gtlbe);
831 raddr = get_tlb_raddr(gtlbe);
833 /* Create a 4KB mapping on the host.
834 * If the guest wanted a large page,
835 * only the first 4KB is mapped here and the rest
836 * are mapped on the fly. */
837 stlbsel = 1;
838 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
839 raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel);
840 break;
842 default:
843 BUG();
846 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
849 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
850 return EMULATE_DONE;
853 static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
854 gva_t eaddr, unsigned int pid, int as)
856 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
857 int esel, tlbsel;
859 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
860 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
861 if (esel >= 0)
862 return index_of(tlbsel, esel);
865 return -1;
868 /* 'linear_address' is actually an encoding of AS|PID|EADDR . */
869 int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
870 struct kvm_translation *tr)
872 int index;
873 gva_t eaddr;
874 u8 pid;
875 u8 as;
877 eaddr = tr->linear_address;
878 pid = (tr->linear_address >> 32) & 0xff;
879 as = (tr->linear_address >> 40) & 0x1;
881 index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
882 if (index < 0) {
883 tr->valid = 0;
884 return 0;
887 tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
888 /* XXX what does "writeable" and "usermode" even mean? */
889 tr->valid = 1;
891 return 0;
895 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
897 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
899 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
902 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
904 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
906 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
909 void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
911 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
913 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
916 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
918 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
920 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
923 gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
924 gva_t eaddr)
926 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
927 struct kvm_book3e_206_tlb_entry *gtlbe;
928 u64 pgmask;
930 gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index));
931 pgmask = get_tlb_bytes(gtlbe) - 1;
933 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
936 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
940 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
941 unsigned int index)
943 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
944 struct tlbe_priv *priv;
945 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
946 int tlbsel = tlbsel_of(index);
947 int esel = esel_of(index);
948 int stlbsel, sesel;
950 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
952 switch (tlbsel) {
953 case 0:
954 stlbsel = 0;
955 sesel = 0; /* unused */
956 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
958 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
959 &priv->ref, eaddr, &stlbe);
960 break;
962 case 1: {
963 gfn_t gfn = gpaddr >> PAGE_SHIFT;
965 stlbsel = 1;
966 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
967 gtlbe, &stlbe, esel);
968 break;
971 default:
972 BUG();
973 break;
976 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
979 static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
981 int i;
983 clear_tlb1_bitmap(vcpu_e500);
984 kfree(vcpu_e500->g2h_tlb1_map);
986 clear_tlb_refs(vcpu_e500);
987 kfree(vcpu_e500->gtlb_priv[0]);
988 kfree(vcpu_e500->gtlb_priv[1]);
990 if (vcpu_e500->shared_tlb_pages) {
991 vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch,
992 PAGE_SIZE)));
994 for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) {
995 set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]);
996 put_page(vcpu_e500->shared_tlb_pages[i]);
999 vcpu_e500->num_shared_tlb_pages = 0;
1000 vcpu_e500->shared_tlb_pages = NULL;
1001 } else {
1002 kfree(vcpu_e500->gtlb_arch);
1005 vcpu_e500->gtlb_arch = NULL;
1008 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1010 sregs->u.e.mas0 = vcpu->arch.shared->mas0;
1011 sregs->u.e.mas1 = vcpu->arch.shared->mas1;
1012 sregs->u.e.mas2 = vcpu->arch.shared->mas2;
1013 sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
1014 sregs->u.e.mas4 = vcpu->arch.shared->mas4;
1015 sregs->u.e.mas6 = vcpu->arch.shared->mas6;
1017 sregs->u.e.mmucfg = vcpu->arch.mmucfg;
1018 sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
1019 sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
1020 sregs->u.e.tlbcfg[2] = 0;
1021 sregs->u.e.tlbcfg[3] = 0;
1024 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1026 if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
1027 vcpu->arch.shared->mas0 = sregs->u.e.mas0;
1028 vcpu->arch.shared->mas1 = sregs->u.e.mas1;
1029 vcpu->arch.shared->mas2 = sregs->u.e.mas2;
1030 vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
1031 vcpu->arch.shared->mas4 = sregs->u.e.mas4;
1032 vcpu->arch.shared->mas6 = sregs->u.e.mas6;
1035 return 0;
1038 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
1039 struct kvm_config_tlb *cfg)
1041 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1042 struct kvm_book3e_206_tlb_params params;
1043 char *virt;
1044 struct page **pages;
1045 struct tlbe_priv *privs[2] = {};
1046 u64 *g2h_bitmap = NULL;
1047 size_t array_len;
1048 u32 sets;
1049 int num_pages, ret, i;
1051 if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV)
1052 return -EINVAL;
1054 if (copy_from_user(&params, (void __user *)(uintptr_t)cfg->params,
1055 sizeof(params)))
1056 return -EFAULT;
1058 if (params.tlb_sizes[1] > 64)
1059 return -EINVAL;
1060 if (params.tlb_ways[1] != params.tlb_sizes[1])
1061 return -EINVAL;
1062 if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0)
1063 return -EINVAL;
1064 if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0)
1065 return -EINVAL;
1067 if (!is_power_of_2(params.tlb_ways[0]))
1068 return -EINVAL;
1070 sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]);
1071 if (!is_power_of_2(sets))
1072 return -EINVAL;
1074 array_len = params.tlb_sizes[0] + params.tlb_sizes[1];
1075 array_len *= sizeof(struct kvm_book3e_206_tlb_entry);
1077 if (cfg->array_len < array_len)
1078 return -EINVAL;
1080 num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
1081 cfg->array / PAGE_SIZE;
1082 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
1083 if (!pages)
1084 return -ENOMEM;
1086 ret = get_user_pages_fast(cfg->array, num_pages, 1, pages);
1087 if (ret < 0)
1088 goto err_pages;
1090 if (ret != num_pages) {
1091 num_pages = ret;
1092 ret = -EFAULT;
1093 goto err_put_page;
1096 virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
1097 if (!virt)
1098 goto err_put_page;
1100 privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0],
1101 GFP_KERNEL);
1102 privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1],
1103 GFP_KERNEL);
1105 if (!privs[0] || !privs[1])
1106 goto err_put_page;
1108 g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
1109 GFP_KERNEL);
1110 if (!g2h_bitmap)
1111 goto err_put_page;
1113 free_gtlb(vcpu_e500);
1115 vcpu_e500->gtlb_priv[0] = privs[0];
1116 vcpu_e500->gtlb_priv[1] = privs[1];
1117 vcpu_e500->g2h_tlb1_map = g2h_bitmap;
1119 vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
1120 (virt + (cfg->array & (PAGE_SIZE - 1)));
1122 vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0];
1123 vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1];
1125 vcpu_e500->gtlb_offset[0] = 0;
1126 vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
1128 vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
1130 vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1131 if (params.tlb_sizes[0] <= 2048)
1132 vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0];
1133 vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
1135 vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1136 vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1];
1137 vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
1139 vcpu_e500->shared_tlb_pages = pages;
1140 vcpu_e500->num_shared_tlb_pages = num_pages;
1142 vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0];
1143 vcpu_e500->gtlb_params[0].sets = sets;
1145 vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
1146 vcpu_e500->gtlb_params[1].sets = 1;
1148 return 0;
1150 err_put_page:
1151 kfree(privs[0]);
1152 kfree(privs[1]);
1154 for (i = 0; i < num_pages; i++)
1155 put_page(pages[i]);
1157 err_pages:
1158 kfree(pages);
1159 return ret;
1162 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
1163 struct kvm_dirty_tlb *dirty)
1165 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1167 clear_tlb_refs(vcpu_e500);
1168 return 0;
1171 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1173 struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
1174 int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
1175 int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
1177 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
1178 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
1181 * This should never happen on real e500 hardware, but is
1182 * architecturally possible -- e.g. in some weird nested
1183 * virtualization case.
1185 if (host_tlb_params[0].entries == 0 ||
1186 host_tlb_params[1].entries == 0) {
1187 pr_err("%s: need to know host tlb size\n", __func__);
1188 return -ENODEV;
1191 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
1192 TLBnCFG_ASSOC_SHIFT;
1193 host_tlb_params[1].ways = host_tlb_params[1].entries;
1195 if (!is_power_of_2(host_tlb_params[0].entries) ||
1196 !is_power_of_2(host_tlb_params[0].ways) ||
1197 host_tlb_params[0].entries < host_tlb_params[0].ways ||
1198 host_tlb_params[0].ways == 0) {
1199 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
1200 __func__, host_tlb_params[0].entries,
1201 host_tlb_params[0].ways);
1202 return -ENODEV;
1205 host_tlb_params[0].sets =
1206 host_tlb_params[0].entries / host_tlb_params[0].ways;
1207 host_tlb_params[1].sets = 1;
1209 vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
1210 vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
1212 vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM;
1213 vcpu_e500->gtlb_params[0].sets =
1214 KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
1216 vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
1217 vcpu_e500->gtlb_params[1].sets = 1;
1219 vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL);
1220 if (!vcpu_e500->gtlb_arch)
1221 return -ENOMEM;
1223 vcpu_e500->gtlb_offset[0] = 0;
1224 vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
1226 vcpu_e500->tlb_refs[0] =
1227 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
1228 GFP_KERNEL);
1229 if (!vcpu_e500->tlb_refs[0])
1230 goto err;
1232 vcpu_e500->tlb_refs[1] =
1233 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
1234 GFP_KERNEL);
1235 if (!vcpu_e500->tlb_refs[1])
1236 goto err;
1238 vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) *
1239 vcpu_e500->gtlb_params[0].entries,
1240 GFP_KERNEL);
1241 if (!vcpu_e500->gtlb_priv[0])
1242 goto err;
1244 vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) *
1245 vcpu_e500->gtlb_params[1].entries,
1246 GFP_KERNEL);
1247 if (!vcpu_e500->gtlb_priv[1])
1248 goto err;
1250 vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
1251 vcpu_e500->gtlb_params[1].entries,
1252 GFP_KERNEL);
1253 if (!vcpu_e500->g2h_tlb1_map)
1254 goto err;
1256 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
1257 host_tlb_params[1].entries,
1258 GFP_KERNEL);
1259 if (!vcpu_e500->h2g_tlb1_rmap)
1260 goto err;
1262 /* Init TLB configuration register */
1263 vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
1264 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1265 vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries;
1266 vcpu->arch.tlbcfg[0] |=
1267 vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
1269 vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
1270 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1271 vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries;
1272 vcpu->arch.tlbcfg[1] |=
1273 vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
1275 return 0;
1277 err:
1278 free_gtlb(vcpu_e500);
1279 kfree(vcpu_e500->tlb_refs[0]);
1280 kfree(vcpu_e500->tlb_refs[1]);
1281 return -1;
1284 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
1286 free_gtlb(vcpu_e500);
1287 kfree(vcpu_e500->h2g_tlb1_rmap);
1288 kfree(vcpu_e500->tlb_refs[0]);
1289 kfree(vcpu_e500->tlb_refs[1]);