Remove unneeded version.h includes from drivers/staging/msm/
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / powerpc / kvm / book3s_mmu_hpte.c
blob79751d8dd1312c97940554a8e56eb62f0baff7f1
1 /*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/kvm_host.h>
22 #include <linux/hash.h>
23 #include <linux/slab.h>
24 #include "trace.h"
26 #include <asm/kvm_ppc.h>
27 #include <asm/kvm_book3s.h>
28 #include <asm/machdep.h>
29 #include <asm/mmu_context.h>
30 #include <asm/hw_irq.h>
32 #define PTE_SIZE 12
34 static struct kmem_cache *hpte_cache;
36 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
38 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
41 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
43 return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
44 HPTEG_HASH_BITS_PTE_LONG);
47 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
49 return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
52 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
54 return hash_64((vpage & 0xffffff000ULL) >> 12,
55 HPTEG_HASH_BITS_VPTE_LONG);
58 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
60 u64 index;
62 trace_kvm_book3s_mmu_map(pte);
64 spin_lock(&vcpu->arch.mmu_lock);
66 /* Add to ePTE list */
67 index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
68 hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]);
70 /* Add to ePTE_long list */
71 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
72 hlist_add_head_rcu(&pte->list_pte_long,
73 &vcpu->arch.hpte_hash_pte_long[index]);
75 /* Add to vPTE list */
76 index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
77 hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]);
79 /* Add to vPTE_long list */
80 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
81 hlist_add_head_rcu(&pte->list_vpte_long,
82 &vcpu->arch.hpte_hash_vpte_long[index]);
84 spin_unlock(&vcpu->arch.mmu_lock);
87 static void free_pte_rcu(struct rcu_head *head)
89 struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
90 kmem_cache_free(hpte_cache, pte);
93 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
95 trace_kvm_book3s_mmu_invalidate(pte);
97 /* Different for 32 and 64 bit */
98 kvmppc_mmu_invalidate_pte(vcpu, pte);
100 spin_lock(&vcpu->arch.mmu_lock);
102 /* pte already invalidated in between? */
103 if (hlist_unhashed(&pte->list_pte)) {
104 spin_unlock(&vcpu->arch.mmu_lock);
105 return;
108 hlist_del_init_rcu(&pte->list_pte);
109 hlist_del_init_rcu(&pte->list_pte_long);
110 hlist_del_init_rcu(&pte->list_vpte);
111 hlist_del_init_rcu(&pte->list_vpte_long);
113 if (pte->pte.may_write)
114 kvm_release_pfn_dirty(pte->pfn);
115 else
116 kvm_release_pfn_clean(pte->pfn);
118 spin_unlock(&vcpu->arch.mmu_lock);
120 vcpu->arch.hpte_cache_count--;
121 call_rcu(&pte->rcu_head, free_pte_rcu);
124 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
126 struct hpte_cache *pte;
127 struct hlist_node *node;
128 int i;
130 rcu_read_lock();
132 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
133 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
135 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
136 invalidate_pte(vcpu, pte);
139 rcu_read_unlock();
142 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
144 struct hlist_head *list;
145 struct hlist_node *node;
146 struct hpte_cache *pte;
148 /* Find the list of entries in the map */
149 list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
151 rcu_read_lock();
153 /* Check the list for matching entries and invalidate */
154 hlist_for_each_entry_rcu(pte, node, list, list_pte)
155 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
156 invalidate_pte(vcpu, pte);
158 rcu_read_unlock();
161 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
163 struct hlist_head *list;
164 struct hlist_node *node;
165 struct hpte_cache *pte;
167 /* Find the list of entries in the map */
168 list = &vcpu->arch.hpte_hash_pte_long[
169 kvmppc_mmu_hash_pte_long(guest_ea)];
171 rcu_read_lock();
173 /* Check the list for matching entries and invalidate */
174 hlist_for_each_entry_rcu(pte, node, list, list_pte_long)
175 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
176 invalidate_pte(vcpu, pte);
178 rcu_read_unlock();
181 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
183 trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
184 guest_ea &= ea_mask;
186 switch (ea_mask) {
187 case ~0xfffUL:
188 kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
189 break;
190 case 0x0ffff000:
191 kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
192 break;
193 case 0:
194 /* Doing a complete flush -> start from scratch */
195 kvmppc_mmu_pte_flush_all(vcpu);
196 break;
197 default:
198 WARN_ON(1);
199 break;
203 /* Flush with mask 0xfffffffff */
204 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
206 struct hlist_head *list;
207 struct hlist_node *node;
208 struct hpte_cache *pte;
209 u64 vp_mask = 0xfffffffffULL;
211 list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
213 rcu_read_lock();
215 /* Check the list for matching entries and invalidate */
216 hlist_for_each_entry_rcu(pte, node, list, list_vpte)
217 if ((pte->pte.vpage & vp_mask) == guest_vp)
218 invalidate_pte(vcpu, pte);
220 rcu_read_unlock();
223 /* Flush with mask 0xffffff000 */
224 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
226 struct hlist_head *list;
227 struct hlist_node *node;
228 struct hpte_cache *pte;
229 u64 vp_mask = 0xffffff000ULL;
231 list = &vcpu->arch.hpte_hash_vpte_long[
232 kvmppc_mmu_hash_vpte_long(guest_vp)];
234 rcu_read_lock();
236 /* Check the list for matching entries and invalidate */
237 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
238 if ((pte->pte.vpage & vp_mask) == guest_vp)
239 invalidate_pte(vcpu, pte);
241 rcu_read_unlock();
244 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
246 trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
247 guest_vp &= vp_mask;
249 switch(vp_mask) {
250 case 0xfffffffffULL:
251 kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
252 break;
253 case 0xffffff000ULL:
254 kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
255 break;
256 default:
257 WARN_ON(1);
258 return;
262 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
264 struct hlist_node *node;
265 struct hpte_cache *pte;
266 int i;
268 trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
270 rcu_read_lock();
272 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
273 struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
275 hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
276 if ((pte->pte.raddr >= pa_start) &&
277 (pte->pte.raddr < pa_end))
278 invalidate_pte(vcpu, pte);
281 rcu_read_unlock();
284 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
286 struct hpte_cache *pte;
288 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
289 vcpu->arch.hpte_cache_count++;
291 if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM)
292 kvmppc_mmu_pte_flush_all(vcpu);
294 return pte;
297 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
299 kvmppc_mmu_pte_flush(vcpu, 0, 0);
302 static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
304 int i;
306 for (i = 0; i < len; i++)
307 INIT_HLIST_HEAD(&hash_list[i]);
310 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
312 /* init hpte lookup hashes */
313 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte,
314 ARRAY_SIZE(vcpu->arch.hpte_hash_pte));
315 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long,
316 ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long));
317 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte,
318 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte));
319 kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long,
320 ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long));
322 spin_lock_init(&vcpu->arch.mmu_lock);
324 return 0;
327 int kvmppc_mmu_hpte_sysinit(void)
329 /* init hpte slab cache */
330 hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
331 sizeof(struct hpte_cache), 0, NULL);
333 return 0;
336 void kvmppc_mmu_hpte_sysexit(void)
338 kmem_cache_destroy(hpte_cache);