2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu <yu.liu@freescale.com>
5 * Scott Wood <scottwood@freescale.com>
6 * Ashish Kalra <ashish.kalra@freescale.com>
7 * Varun Sethi <varun.sethi@freescale.com>
10 * This file is based on arch/powerpc/kvm/44x_tlb.h and
11 * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>,
12 * Copyright IBM Corp. 2007-2008
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License, version 2, as
16 * published by the Free Software Foundation.
22 #include <linux/kvm_host.h>
23 #include <asm/mmu-book3e.h>
26 #define E500_PID_NUM 3
27 #define E500_TLB_NUM 2
29 /* entry is mapped somewhere in host TLB */
30 #define E500_TLB_VALID (1 << 0)
31 /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
32 #define E500_TLB_BITMAP (1 << 1)
33 /* TLB1 entry is mapped by host TLB0 */
34 #define E500_TLB_TLB0 (1 << 2)
37 pfn_t pfn
; /* valid only for TLB0, except briefly */
38 unsigned int flags
; /* E500_TLB_* */
45 #ifdef CONFIG_KVM_E500V2
49 struct kvmppc_e500_tlb_params
{
50 int entries
, ways
, sets
;
53 struct kvmppc_vcpu_e500
{
56 /* Unmodified copy of the guest's TLB -- shared with host userspace. */
57 struct kvm_book3e_206_tlb_entry
*gtlb_arch
;
59 /* Starting entry number in gtlb_arch[] */
60 int gtlb_offset
[E500_TLB_NUM
];
62 /* KVM internal information associated with each guest TLB entry */
63 struct tlbe_priv
*gtlb_priv
[E500_TLB_NUM
];
65 struct kvmppc_e500_tlb_params gtlb_params
[E500_TLB_NUM
];
67 unsigned int gtlb_nv
[E500_TLB_NUM
];
69 unsigned int host_tlb1_nv
;
78 struct page
**shared_tlb_pages
;
79 int num_shared_tlb_pages
;
82 unsigned int *h2g_tlb1_rmap
;
84 /* Minimum and maximum address mapped my TLB1 */
85 unsigned long tlb1_min_eaddr
;
86 unsigned long tlb1_max_eaddr
;
88 #ifdef CONFIG_KVM_E500V2
89 u32 pid
[E500_PID_NUM
];
92 struct vcpu_id_table
*idt
;
96 static inline struct kvmppc_vcpu_e500
*to_e500(struct kvm_vcpu
*vcpu
)
98 return container_of(vcpu
, struct kvmppc_vcpu_e500
, vcpu
);
102 /* This geometry is the legacy default -- can be overridden by userspace */
103 #define KVM_E500_TLB0_WAY_SIZE 128
104 #define KVM_E500_TLB0_WAY_NUM 2
106 #define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
107 #define KVM_E500_TLB1_SIZE 16
109 #define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
110 #define tlbsel_of(index) ((index) >> 16)
111 #define esel_of(index) ((index) & 0xFFFF)
113 #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
114 #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
115 #define MAS2_ATTRIB_MASK \
117 #define MAS3_ATTRIB_MASK \
118 (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
119 | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
121 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500
*vcpu_e500
,
123 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu
*vcpu
);
124 int kvmppc_e500_emul_tlbre(struct kvm_vcpu
*vcpu
);
125 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu
*vcpu
, gva_t ea
);
126 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu
*vcpu
, int type
, gva_t ea
);
127 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu
*vcpu
, gva_t ea
);
128 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500
*vcpu_e500
);
129 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500
*vcpu_e500
);
131 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
132 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
135 #ifdef CONFIG_KVM_E500V2
136 unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500
*vcpu_e500
,
137 unsigned int as
, unsigned int gid
,
138 unsigned int pr
, int avoid_recursion
);
141 /* TLB helper functions */
142 static inline unsigned int
143 get_tlb_size(const struct kvm_book3e_206_tlb_entry
*tlbe
)
145 return (tlbe
->mas1
>> 7) & 0x1f;
148 static inline gva_t
get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry
*tlbe
)
150 return tlbe
->mas2
& MAS2_EPN
;
153 static inline u64
get_tlb_bytes(const struct kvm_book3e_206_tlb_entry
*tlbe
)
155 unsigned int pgsize
= get_tlb_size(tlbe
);
156 return 1ULL << 10 << pgsize
;
159 static inline gva_t
get_tlb_end(const struct kvm_book3e_206_tlb_entry
*tlbe
)
161 u64 bytes
= get_tlb_bytes(tlbe
);
162 return get_tlb_eaddr(tlbe
) + bytes
- 1;
165 static inline u64
get_tlb_raddr(const struct kvm_book3e_206_tlb_entry
*tlbe
)
167 return tlbe
->mas7_3
& ~0xfffULL
;
170 static inline unsigned int
171 get_tlb_tid(const struct kvm_book3e_206_tlb_entry
*tlbe
)
173 return (tlbe
->mas1
>> 16) & 0xff;
176 static inline unsigned int
177 get_tlb_ts(const struct kvm_book3e_206_tlb_entry
*tlbe
)
179 return (tlbe
->mas1
>> 12) & 0x1;
182 static inline unsigned int
183 get_tlb_v(const struct kvm_book3e_206_tlb_entry
*tlbe
)
185 return (tlbe
->mas1
>> 31) & 0x1;
188 static inline unsigned int
189 get_tlb_iprot(const struct kvm_book3e_206_tlb_entry
*tlbe
)
191 return (tlbe
->mas1
>> 30) & 0x1;
194 static inline unsigned int
195 get_tlb_tsize(const struct kvm_book3e_206_tlb_entry
*tlbe
)
197 return (tlbe
->mas1
& MAS1_TSIZE_MASK
) >> MAS1_TSIZE_SHIFT
;
200 static inline unsigned int get_cur_pid(struct kvm_vcpu
*vcpu
)
202 return vcpu
->arch
.pid
& 0xff;
205 static inline unsigned int get_cur_as(struct kvm_vcpu
*vcpu
)
207 return !!(vcpu
->arch
.shared
->msr
& (MSR_IS
| MSR_DS
));
210 static inline unsigned int get_cur_pr(struct kvm_vcpu
*vcpu
)
212 return !!(vcpu
->arch
.shared
->msr
& MSR_PR
);
215 static inline unsigned int get_cur_spid(const struct kvm_vcpu
*vcpu
)
217 return (vcpu
->arch
.shared
->mas6
>> 16) & 0xff;
220 static inline unsigned int get_cur_sas(const struct kvm_vcpu
*vcpu
)
222 return vcpu
->arch
.shared
->mas6
& 0x1;
225 static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu
*vcpu
)
228 * Manual says that tlbsel has 2 bits wide.
229 * Since we only have two TLBs, only lower bit is used.
231 return (vcpu
->arch
.shared
->mas0
>> 28) & 0x1;
234 static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu
*vcpu
)
236 return vcpu
->arch
.shared
->mas0
& 0xfff;
239 static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu
*vcpu
)
241 return (vcpu
->arch
.shared
->mas0
>> 16) & 0xfff;
244 static inline int tlbe_is_host_safe(const struct kvm_vcpu
*vcpu
,
245 const struct kvm_book3e_206_tlb_entry
*tlbe
)
249 if (!get_tlb_v(tlbe
))
252 #ifndef CONFIG_KVM_BOOKE_HV
253 /* Does it match current guest AS? */
254 /* XXX what about IS != DS? */
255 if (get_tlb_ts(tlbe
) != !!(vcpu
->arch
.shared
->msr
& MSR_IS
))
259 gpa
= get_tlb_raddr(tlbe
);
260 if (!gfn_to_memslot(vcpu
->kvm
, gpa
>> PAGE_SHIFT
))
261 /* Mapping is not for RAM. */
267 static inline struct kvm_book3e_206_tlb_entry
*get_entry(
268 struct kvmppc_vcpu_e500
*vcpu_e500
, int tlbsel
, int entry
)
270 int offset
= vcpu_e500
->gtlb_offset
[tlbsel
];
271 return &vcpu_e500
->gtlb_arch
[offset
+ entry
];
274 void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500
*vcpu_e500
,
275 struct kvm_book3e_206_tlb_entry
*gtlbe
);
276 void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500
*vcpu_e500
);
278 #ifdef CONFIG_KVM_BOOKE_HV
279 #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe)
280 #define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu)
281 #define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS)
283 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu
*vcpu
,
284 struct kvm_book3e_206_tlb_entry
*gtlbe
);
286 static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu
*vcpu
)
288 struct kvmppc_vcpu_e500
*vcpu_e500
= to_e500(vcpu
);
289 unsigned int tidseld
= (vcpu
->arch
.shared
->mas4
>> 16) & 0xf;
291 return vcpu_e500
->pid
[tidseld
];
294 /* Force TS=1 for all guest mappings. */
295 #define get_tlb_sts(gtlbe) (MAS1_TS)
296 #endif /* !BOOKE_HV */
298 #endif /* KVM_E500_H */