2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, yu.liu@freescale.com
7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/kvm.h>
19 #include <linux/kvm_host.h>
20 #include <linux/highmem.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/kvm_e500.h>
24 #include "../mm/mmu_decl.h"
28 #define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
30 static unsigned int tlb1_entry_num
;
32 void kvmppc_dump_tlbs(struct kvm_vcpu
*vcpu
)
34 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
38 printk("| %8s | %8s | %8s | %8s | %8s |\n",
39 "nr", "mas1", "mas2", "mas3", "mas7");
41 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
42 printk("Guest TLB%d:\n", tlbsel
);
43 for (i
= 0; i
< vcpu_e500
->guest_tlb_size
[tlbsel
]; i
++) {
44 tlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][i
];
45 if (tlbe
->mas1
& MAS1_VALID
)
46 printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
47 tlbsel
, i
, tlbe
->mas1
, tlbe
->mas2
,
48 tlbe
->mas3
, tlbe
->mas7
);
52 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
53 printk("Shadow TLB%d:\n", tlbsel
);
54 for (i
= 0; i
< vcpu_e500
->shadow_tlb_size
[tlbsel
]; i
++) {
55 tlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][i
];
56 if (tlbe
->mas1
& MAS1_VALID
)
57 printk(" S[%d][%3d] | %08X | %08X | %08X | %08X |\n",
58 tlbsel
, i
, tlbe
->mas1
, tlbe
->mas2
,
59 tlbe
->mas3
, tlbe
->mas7
);
64 static inline unsigned int tlb0_get_next_victim(
65 struct kvmppc_vcpu_e500
*vcpu_e500
)
69 victim
= vcpu_e500
->guest_tlb_nv
[0]++;
70 if (unlikely(vcpu_e500
->guest_tlb_nv
[0] >= KVM_E500_TLB0_WAY_NUM
))
71 vcpu_e500
->guest_tlb_nv
[0] = 0;
76 static inline unsigned int tlb1_max_shadow_size(void)
78 return tlb1_entry_num
- tlbcam_index
;
81 static inline int tlbe_is_writable(struct tlbe
*tlbe
)
83 return tlbe
->mas3
& (MAS3_SW
|MAS3_UW
);
86 static inline u32
e500_shadow_mas3_attrib(u32 mas3
, int usermode
)
88 /* Mask off reserved bits. */
89 mas3
&= MAS3_ATTRIB_MASK
;
92 /* Guest is in supervisor mode,
93 * so we need to translate guest
94 * supervisor permissions into user permissions. */
95 mas3
&= ~E500_TLB_USER_PERM_MASK
;
96 mas3
|= (mas3
& E500_TLB_SUPER_PERM_MASK
) << 1;
99 return mas3
| E500_TLB_SUPER_PERM_MASK
;
102 static inline u32
e500_shadow_mas2_attrib(u32 mas2
, int usermode
)
105 return (mas2
& MAS2_ATTRIB_MASK
) | MAS2_M
;
107 return mas2
& MAS2_ATTRIB_MASK
;
112 * writing shadow tlb entry to host TLB
114 static inline void __write_host_tlbe(struct tlbe
*stlbe
)
116 mtspr(SPRN_MAS1
, stlbe
->mas1
);
117 mtspr(SPRN_MAS2
, stlbe
->mas2
);
118 mtspr(SPRN_MAS3
, stlbe
->mas3
);
119 mtspr(SPRN_MAS7
, stlbe
->mas7
);
120 __asm__
__volatile__ ("tlbwe\n" : : );
123 static inline void write_host_tlbe(struct kvmppc_vcpu_e500
*vcpu_e500
,
124 int tlbsel
, int esel
)
126 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][esel
];
130 __write_host_tlbe(stlbe
);
132 unsigned register mas0
;
134 mas0
= mfspr(SPRN_MAS0
);
136 mtspr(SPRN_MAS0
, MAS0_TLBSEL(1) | MAS0_ESEL(to_htlb1_esel(esel
)));
137 __write_host_tlbe(stlbe
);
139 mtspr(SPRN_MAS0
, mas0
);
144 void kvmppc_e500_tlb_load(struct kvm_vcpu
*vcpu
, int cpu
)
146 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
148 unsigned register mas0
;
150 /* Load all valid TLB1 entries to reduce guest tlb miss fault */
152 mas0
= mfspr(SPRN_MAS0
);
153 for (i
= 0; i
< tlb1_max_shadow_size(); i
++) {
154 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[1][i
];
156 if (get_tlb_v(stlbe
)) {
157 mtspr(SPRN_MAS0
, MAS0_TLBSEL(1)
158 | MAS0_ESEL(to_htlb1_esel(i
)));
159 __write_host_tlbe(stlbe
);
162 mtspr(SPRN_MAS0
, mas0
);
166 void kvmppc_e500_tlb_put(struct kvm_vcpu
*vcpu
)
171 /* Search the guest TLB for a matching entry. */
172 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500
*vcpu_e500
,
173 gva_t eaddr
, int tlbsel
, unsigned int pid
, int as
)
177 for (i
= 0; i
< vcpu_e500
->guest_tlb_size
[tlbsel
]; i
++) {
178 struct tlbe
*tlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][i
];
181 if (eaddr
< get_tlb_eaddr(tlbe
))
184 if (eaddr
> get_tlb_end(tlbe
))
187 tid
= get_tlb_tid(tlbe
);
188 if (tid
&& (tid
!= pid
))
191 if (!get_tlb_v(tlbe
))
194 if (get_tlb_ts(tlbe
) != as
&& as
!= -1)
203 static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500
*vcpu_e500
,
204 int tlbsel
, int esel
)
206 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][esel
];
207 struct page
*page
= vcpu_e500
->shadow_pages
[tlbsel
][esel
];
210 vcpu_e500
->shadow_pages
[tlbsel
][esel
] = NULL
;
212 if (get_tlb_v(stlbe
)) {
213 if (tlbe_is_writable(stlbe
))
214 kvm_release_page_dirty(page
);
216 kvm_release_page_clean(page
);
221 static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500
*vcpu_e500
,
222 int tlbsel
, int esel
)
224 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][esel
];
226 kvmppc_e500_shadow_release(vcpu_e500
, tlbsel
, esel
);
228 trace_kvm_stlb_inval(index_of(tlbsel
, esel
), stlbe
->mas1
, stlbe
->mas2
,
229 stlbe
->mas3
, stlbe
->mas7
);
232 static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500
*vcpu_e500
,
233 gva_t eaddr
, gva_t eend
, u32 tid
)
235 unsigned int pid
= tid
& 0xff;
238 for (i
= 0; i
< vcpu_e500
->guest_tlb_size
[1]; i
++) {
239 struct tlbe
*stlbe
= &vcpu_e500
->shadow_tlb
[1][i
];
242 if (!get_tlb_v(stlbe
))
245 if (eend
< get_tlb_eaddr(stlbe
))
248 if (eaddr
> get_tlb_end(stlbe
))
251 tid
= get_tlb_tid(stlbe
);
252 if (tid
&& (tid
!= pid
))
255 kvmppc_e500_stlbe_invalidate(vcpu_e500
, 1, i
);
256 write_host_tlbe(vcpu_e500
, 1, i
);
260 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu
*vcpu
,
261 unsigned int eaddr
, int as
)
263 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
264 unsigned int victim
, pidsel
, tsized
;
267 /* since we only have two TLBs, only lower bit is used. */
268 tlbsel
= (vcpu_e500
->mas4
>> 28) & 0x1;
269 victim
= (tlbsel
== 0) ? tlb0_get_next_victim(vcpu_e500
) : 0;
270 pidsel
= (vcpu_e500
->mas4
>> 16) & 0xf;
271 tsized
= (vcpu_e500
->mas4
>> 7) & 0x1f;
273 vcpu_e500
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(victim
)
274 | MAS0_NV(vcpu_e500
->guest_tlb_nv
[tlbsel
]);
275 vcpu_e500
->mas1
= MAS1_VALID
| (as
? MAS1_TS
: 0)
276 | MAS1_TID(vcpu_e500
->pid
[pidsel
])
277 | MAS1_TSIZE(tsized
);
278 vcpu_e500
->mas2
= (eaddr
& MAS2_EPN
)
279 | (vcpu_e500
->mas4
& MAS2_ATTRIB_MASK
);
280 vcpu_e500
->mas3
&= MAS3_U0
| MAS3_U1
| MAS3_U2
| MAS3_U3
;
281 vcpu_e500
->mas6
= (vcpu_e500
->mas6
& MAS6_SPID1
)
282 | (get_cur_pid(vcpu
) << 16)
283 | (as
? MAS6_SAS
: 0);
287 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
288 u64 gvaddr
, gfn_t gfn
, struct tlbe
*gtlbe
, int tlbsel
, int esel
)
290 struct page
*new_page
;
294 stlbe
= &vcpu_e500
->shadow_tlb
[tlbsel
][esel
];
296 /* Get reference to new page. */
297 new_page
= gfn_to_page(vcpu_e500
->vcpu
.kvm
, gfn
);
298 if (is_error_page(new_page
)) {
299 printk(KERN_ERR
"Couldn't get guest page for gfn %lx!\n", gfn
);
300 kvm_release_page_clean(new_page
);
303 hpaddr
= page_to_phys(new_page
);
305 /* Drop reference to old page. */
306 kvmppc_e500_shadow_release(vcpu_e500
, tlbsel
, esel
);
308 vcpu_e500
->shadow_pages
[tlbsel
][esel
] = new_page
;
310 /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
311 stlbe
->mas1
= MAS1_TSIZE(BOOK3E_PAGESZ_4K
)
312 | MAS1_TID(get_tlb_tid(gtlbe
)) | MAS1_TS
| MAS1_VALID
;
313 stlbe
->mas2
= (gvaddr
& MAS2_EPN
)
314 | e500_shadow_mas2_attrib(gtlbe
->mas2
,
315 vcpu_e500
->vcpu
.arch
.msr
& MSR_PR
);
316 stlbe
->mas3
= (hpaddr
& MAS3_RPN
)
317 | e500_shadow_mas3_attrib(gtlbe
->mas3
,
318 vcpu_e500
->vcpu
.arch
.msr
& MSR_PR
);
319 stlbe
->mas7
= (hpaddr
>> 32) & MAS7_RPN
;
321 trace_kvm_stlb_write(index_of(tlbsel
, esel
), stlbe
->mas1
, stlbe
->mas2
,
322 stlbe
->mas3
, stlbe
->mas7
);
325 static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
326 int tlbsel
, int esel
)
330 gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
332 kvmppc_e500_shadow_map(vcpu_e500
, get_tlb_eaddr(gtlbe
),
333 get_tlb_raddr(gtlbe
) >> PAGE_SHIFT
,
334 gtlbe
, tlbsel
, esel
);
339 /* Caller must ensure that the specified guest TLB entry is safe to insert into
341 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500
*vcpu_e500
,
342 u64 gvaddr
, gfn_t gfn
, struct tlbe
*gtlbe
)
346 victim
= vcpu_e500
->guest_tlb_nv
[1]++;
348 if (unlikely(vcpu_e500
->guest_tlb_nv
[1] >= tlb1_max_shadow_size()))
349 vcpu_e500
->guest_tlb_nv
[1] = 0;
351 kvmppc_e500_shadow_map(vcpu_e500
, gvaddr
, gfn
, gtlbe
, 1, victim
);
356 /* Invalidate all guest kernel mappings when enter usermode,
357 * so that when they fault back in they will get the
358 * proper permission bits. */
359 void kvmppc_mmu_priv_switch(struct kvm_vcpu
*vcpu
, int usermode
)
362 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
365 for (i
= 0; i
< tlb1_max_shadow_size(); i
++)
366 kvmppc_e500_stlbe_invalidate(vcpu_e500
, 1, i
);
372 static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500
*vcpu_e500
,
373 int tlbsel
, int esel
)
375 struct tlbe
*gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
377 if (unlikely(get_tlb_iprot(gtlbe
)))
381 kvmppc_e500_tlb1_invalidate(vcpu_e500
, get_tlb_eaddr(gtlbe
),
385 kvmppc_e500_stlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
393 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500
*vcpu_e500
, ulong value
)
397 if (value
& MMUCSR0_TLB0FI
)
398 for (esel
= 0; esel
< vcpu_e500
->guest_tlb_size
[0]; esel
++)
399 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 0, esel
);
400 if (value
& MMUCSR0_TLB1FI
)
401 for (esel
= 0; esel
< vcpu_e500
->guest_tlb_size
[1]; esel
++)
402 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, 1, esel
);
409 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu
*vcpu
, int ra
, int rb
)
411 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
416 ea
= ((ra
) ? kvmppc_get_gpr(vcpu
, ra
) : 0) + kvmppc_get_gpr(vcpu
, rb
);
418 ia
= (ea
>> 2) & 0x1;
420 /* since we only have two TLBs, only lower bit is used. */
421 tlbsel
= (ea
>> 3) & 0x1;
424 /* invalidate all entries */
425 for (esel
= 0; esel
< vcpu_e500
->guest_tlb_size
[tlbsel
]; esel
++)
426 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
429 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
,
430 get_cur_pid(vcpu
), -1);
432 kvmppc_e500_gtlbe_invalidate(vcpu_e500
, tlbsel
, esel
);
440 int kvmppc_e500_emul_tlbre(struct kvm_vcpu
*vcpu
)
442 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
446 tlbsel
= get_tlb_tlbsel(vcpu_e500
);
447 esel
= get_tlb_esel(vcpu_e500
, tlbsel
);
449 gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
450 vcpu_e500
->mas0
&= ~MAS0_NV(~0);
451 vcpu_e500
->mas0
|= MAS0_NV(vcpu_e500
->guest_tlb_nv
[tlbsel
]);
452 vcpu_e500
->mas1
= gtlbe
->mas1
;
453 vcpu_e500
->mas2
= gtlbe
->mas2
;
454 vcpu_e500
->mas3
= gtlbe
->mas3
;
455 vcpu_e500
->mas7
= gtlbe
->mas7
;
460 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu
*vcpu
, int rb
)
462 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
463 int as
= !!get_cur_sas(vcpu_e500
);
464 unsigned int pid
= get_cur_spid(vcpu_e500
);
466 struct tlbe
*gtlbe
= NULL
;
469 ea
= kvmppc_get_gpr(vcpu
, rb
);
471 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
472 esel
= kvmppc_e500_tlb_index(vcpu_e500
, ea
, tlbsel
, pid
, as
);
474 gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
480 vcpu_e500
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(esel
)
481 | MAS0_NV(vcpu_e500
->guest_tlb_nv
[tlbsel
]);
482 vcpu_e500
->mas1
= gtlbe
->mas1
;
483 vcpu_e500
->mas2
= gtlbe
->mas2
;
484 vcpu_e500
->mas3
= gtlbe
->mas3
;
485 vcpu_e500
->mas7
= gtlbe
->mas7
;
489 /* since we only have two TLBs, only lower bit is used. */
490 tlbsel
= vcpu_e500
->mas4
>> 28 & 0x1;
491 victim
= (tlbsel
== 0) ? tlb0_get_next_victim(vcpu_e500
) : 0;
493 vcpu_e500
->mas0
= MAS0_TLBSEL(tlbsel
) | MAS0_ESEL(victim
)
494 | MAS0_NV(vcpu_e500
->guest_tlb_nv
[tlbsel
]);
495 vcpu_e500
->mas1
= (vcpu_e500
->mas6
& MAS6_SPID0
)
496 | (vcpu_e500
->mas6
& (MAS6_SAS
? MAS1_TS
: 0))
497 | (vcpu_e500
->mas4
& MAS4_TSIZED(~0));
498 vcpu_e500
->mas2
&= MAS2_EPN
;
499 vcpu_e500
->mas2
|= vcpu_e500
->mas4
& MAS2_ATTRIB_MASK
;
500 vcpu_e500
->mas3
&= MAS3_U0
| MAS3_U1
| MAS3_U2
| MAS3_U3
;
507 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu
*vcpu
)
509 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
514 int tlbsel
, esel
, stlbsel
, sesel
;
516 tlbsel
= get_tlb_tlbsel(vcpu_e500
);
517 esel
= get_tlb_esel(vcpu_e500
, tlbsel
);
519 gtlbe
= &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
521 if (get_tlb_v(gtlbe
) && tlbsel
== 1) {
522 eaddr
= get_tlb_eaddr(gtlbe
);
523 tid
= get_tlb_tid(gtlbe
);
524 kvmppc_e500_tlb1_invalidate(vcpu_e500
, eaddr
,
525 get_tlb_end(gtlbe
), tid
);
528 gtlbe
->mas1
= vcpu_e500
->mas1
;
529 gtlbe
->mas2
= vcpu_e500
->mas2
;
530 gtlbe
->mas3
= vcpu_e500
->mas3
;
531 gtlbe
->mas7
= vcpu_e500
->mas7
;
533 trace_kvm_gtlb_write(vcpu_e500
->mas0
, gtlbe
->mas1
, gtlbe
->mas2
,
534 gtlbe
->mas3
, gtlbe
->mas7
);
536 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
537 if (tlbe_is_host_safe(vcpu
, gtlbe
)) {
541 gtlbe
->mas1
&= ~MAS1_TSIZE(~0);
542 gtlbe
->mas1
|= MAS1_TSIZE(BOOK3E_PAGESZ_4K
);
545 sesel
= kvmppc_e500_stlbe_map(vcpu_e500
, 0, esel
);
551 eaddr
= get_tlb_eaddr(gtlbe
);
552 raddr
= get_tlb_raddr(gtlbe
);
554 /* Create a 4KB mapping on the host.
555 * If the guest wanted a large page,
556 * only the first 4KB is mapped here and the rest
557 * are mapped on the fly. */
559 sesel
= kvmppc_e500_tlb1_map(vcpu_e500
, eaddr
,
560 raddr
>> PAGE_SHIFT
, gtlbe
);
566 write_host_tlbe(vcpu_e500
, stlbsel
, sesel
);
572 int kvmppc_mmu_itlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
574 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_IS
);
576 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
579 int kvmppc_mmu_dtlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
)
581 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_DS
);
583 return kvmppc_e500_tlb_search(vcpu
, eaddr
, get_cur_pid(vcpu
), as
);
586 void kvmppc_mmu_itlb_miss(struct kvm_vcpu
*vcpu
)
588 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_IS
);
590 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.pc
, as
);
593 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu
*vcpu
)
595 unsigned int as
= !!(vcpu
->arch
.msr
& MSR_DS
);
597 kvmppc_e500_deliver_tlb_miss(vcpu
, vcpu
->arch
.fault_dear
, as
);
600 gpa_t
kvmppc_mmu_xlate(struct kvm_vcpu
*vcpu
, unsigned int index
,
603 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
605 &vcpu_e500
->guest_tlb
[tlbsel_of(index
)][esel_of(index
)];
606 u64 pgmask
= get_tlb_bytes(gtlbe
) - 1;
608 return get_tlb_raddr(gtlbe
) | (eaddr
& pgmask
);
611 void kvmppc_mmu_destroy(struct kvm_vcpu
*vcpu
)
613 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
616 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++)
617 for (i
= 0; i
< vcpu_e500
->guest_tlb_size
[tlbsel
]; i
++)
618 kvmppc_e500_shadow_release(vcpu_e500
, tlbsel
, i
);
620 /* discard all guest mapping */
624 void kvmppc_mmu_map(struct kvm_vcpu
*vcpu
, u64 eaddr
, gpa_t gpaddr
,
627 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
628 int tlbsel
= tlbsel_of(index
);
629 int esel
= esel_of(index
);
639 gfn_t gfn
= gpaddr
>> PAGE_SHIFT
;
641 = &vcpu_e500
->guest_tlb
[tlbsel
][esel
];
644 sesel
= kvmppc_e500_tlb1_map(vcpu_e500
, eaddr
, gfn
, gtlbe
);
652 write_host_tlbe(vcpu_e500
, stlbsel
, sesel
);
655 int kvmppc_e500_tlb_search(struct kvm_vcpu
*vcpu
,
656 gva_t eaddr
, unsigned int pid
, int as
)
658 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
661 for (tlbsel
= 0; tlbsel
< 2; tlbsel
++) {
662 esel
= kvmppc_e500_tlb_index(vcpu_e500
, eaddr
, tlbsel
, pid
, as
);
664 return index_of(tlbsel
, esel
);
670 void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500
*vcpu_e500
)
674 /* Insert large initial mapping for guest. */
675 tlbe
= &vcpu_e500
->guest_tlb
[1][0];
676 tlbe
->mas1
= MAS1_VALID
| MAS1_TSIZE(BOOK3E_PAGESZ_256M
);
678 tlbe
->mas3
= E500_TLB_SUPER_PERM_MASK
;
681 /* 4K map for serial output. Used by kernel wrapper. */
682 tlbe
= &vcpu_e500
->guest_tlb
[1][1];
683 tlbe
->mas1
= MAS1_VALID
| MAS1_TSIZE(BOOK3E_PAGESZ_4K
);
684 tlbe
->mas2
= (0xe0004500 & 0xFFFFF000) | MAS2_I
| MAS2_G
;
685 tlbe
->mas3
= (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK
;
689 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500
*vcpu_e500
)
691 tlb1_entry_num
= mfspr(SPRN_TLB1CFG
) & 0xFFF;
693 vcpu_e500
->guest_tlb_size
[0] = KVM_E500_TLB0_SIZE
;
694 vcpu_e500
->guest_tlb
[0] =
695 kzalloc(sizeof(struct tlbe
) * KVM_E500_TLB0_SIZE
, GFP_KERNEL
);
696 if (vcpu_e500
->guest_tlb
[0] == NULL
)
699 vcpu_e500
->shadow_tlb_size
[0] = KVM_E500_TLB0_SIZE
;
700 vcpu_e500
->shadow_tlb
[0] =
701 kzalloc(sizeof(struct tlbe
) * KVM_E500_TLB0_SIZE
, GFP_KERNEL
);
702 if (vcpu_e500
->shadow_tlb
[0] == NULL
)
705 vcpu_e500
->guest_tlb_size
[1] = KVM_E500_TLB1_SIZE
;
706 vcpu_e500
->guest_tlb
[1] =
707 kzalloc(sizeof(struct tlbe
) * KVM_E500_TLB1_SIZE
, GFP_KERNEL
);
708 if (vcpu_e500
->guest_tlb
[1] == NULL
)
709 goto err_out_shadow0
;
711 vcpu_e500
->shadow_tlb_size
[1] = tlb1_entry_num
;
712 vcpu_e500
->shadow_tlb
[1] =
713 kzalloc(sizeof(struct tlbe
) * tlb1_entry_num
, GFP_KERNEL
);
714 if (vcpu_e500
->shadow_tlb
[1] == NULL
)
717 vcpu_e500
->shadow_pages
[0] = (struct page
**)
718 kzalloc(sizeof(struct page
*) * KVM_E500_TLB0_SIZE
, GFP_KERNEL
);
719 if (vcpu_e500
->shadow_pages
[0] == NULL
)
720 goto err_out_shadow1
;
722 vcpu_e500
->shadow_pages
[1] = (struct page
**)
723 kzalloc(sizeof(struct page
*) * tlb1_entry_num
, GFP_KERNEL
);
724 if (vcpu_e500
->shadow_pages
[1] == NULL
)
727 /* Init TLB configuration register */
728 vcpu_e500
->tlb0cfg
= mfspr(SPRN_TLB0CFG
) & ~0xfffUL
;
729 vcpu_e500
->tlb0cfg
|= vcpu_e500
->guest_tlb_size
[0];
730 vcpu_e500
->tlb1cfg
= mfspr(SPRN_TLB1CFG
) & ~0xfffUL
;
731 vcpu_e500
->tlb1cfg
|= vcpu_e500
->guest_tlb_size
[1];
736 kfree(vcpu_e500
->shadow_pages
[0]);
738 kfree(vcpu_e500
->shadow_tlb
[1]);
740 kfree(vcpu_e500
->guest_tlb
[1]);
742 kfree(vcpu_e500
->shadow_tlb
[0]);
744 kfree(vcpu_e500
->guest_tlb
[0]);
749 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500
*vcpu_e500
)
751 kfree(vcpu_e500
->shadow_pages
[1]);
752 kfree(vcpu_e500
->shadow_pages
[0]);
753 kfree(vcpu_e500
->shadow_tlb
[1]);
754 kfree(vcpu_e500
->guest_tlb
[1]);
755 kfree(vcpu_e500
->shadow_tlb
[0]);
756 kfree(vcpu_e500
->guest_tlb
[0]);