2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/kvm_host.h>
22 #include <linux/hash.h>
23 #include <linux/slab.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/kvm_book3s.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
33 /* #define DEBUG_MMU */
36 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
38 #define dprintk_mmu(a, ...) do { } while(0)
41 static struct kmem_cache
*hpte_cache
;
43 static inline u64
kvmppc_mmu_hash_pte(u64 eaddr
)
45 return hash_64(eaddr
>> PTE_SIZE
, HPTEG_HASH_BITS_PTE
);
48 static inline u64
kvmppc_mmu_hash_vpte(u64 vpage
)
50 return hash_64(vpage
& 0xfffffffffULL
, HPTEG_HASH_BITS_VPTE
);
53 static inline u64
kvmppc_mmu_hash_vpte_long(u64 vpage
)
55 return hash_64((vpage
& 0xffffff000ULL
) >> 12,
56 HPTEG_HASH_BITS_VPTE_LONG
);
59 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu
*vcpu
, struct hpte_cache
*pte
)
63 /* Add to ePTE list */
64 index
= kvmppc_mmu_hash_pte(pte
->pte
.eaddr
);
65 hlist_add_head(&pte
->list_pte
, &vcpu
->arch
.hpte_hash_pte
[index
]);
67 /* Add to vPTE list */
68 index
= kvmppc_mmu_hash_vpte(pte
->pte
.vpage
);
69 hlist_add_head(&pte
->list_vpte
, &vcpu
->arch
.hpte_hash_vpte
[index
]);
71 /* Add to vPTE_long list */
72 index
= kvmppc_mmu_hash_vpte_long(pte
->pte
.vpage
);
73 hlist_add_head(&pte
->list_vpte_long
,
74 &vcpu
->arch
.hpte_hash_vpte_long
[index
]);
77 static void invalidate_pte(struct kvm_vcpu
*vcpu
, struct hpte_cache
*pte
)
79 dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
80 pte
->pte
.eaddr
, pte
->pte
.vpage
, pte
->host_va
);
82 /* Different for 32 and 64 bit */
83 kvmppc_mmu_invalidate_pte(vcpu
, pte
);
85 if (pte
->pte
.may_write
)
86 kvm_release_pfn_dirty(pte
->pfn
);
88 kvm_release_pfn_clean(pte
->pfn
);
90 hlist_del(&pte
->list_pte
);
91 hlist_del(&pte
->list_vpte
);
92 hlist_del(&pte
->list_vpte_long
);
94 vcpu
->arch
.hpte_cache_count
--;
95 kmem_cache_free(hpte_cache
, pte
);
98 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu
*vcpu
)
100 struct hpte_cache
*pte
;
101 struct hlist_node
*node
, *tmp
;
104 for (i
= 0; i
< HPTEG_HASH_NUM_VPTE_LONG
; i
++) {
105 struct hlist_head
*list
= &vcpu
->arch
.hpte_hash_vpte_long
[i
];
107 hlist_for_each_entry_safe(pte
, node
, tmp
, list
, list_vpte_long
)
108 invalidate_pte(vcpu
, pte
);
112 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu
*vcpu
, ulong guest_ea
)
114 struct hlist_head
*list
;
115 struct hlist_node
*node
, *tmp
;
116 struct hpte_cache
*pte
;
118 /* Find the list of entries in the map */
119 list
= &vcpu
->arch
.hpte_hash_pte
[kvmppc_mmu_hash_pte(guest_ea
)];
121 /* Check the list for matching entries and invalidate */
122 hlist_for_each_entry_safe(pte
, node
, tmp
, list
, list_pte
)
123 if ((pte
->pte
.eaddr
& ~0xfffUL
) == guest_ea
)
124 invalidate_pte(vcpu
, pte
);
127 void kvmppc_mmu_pte_flush(struct kvm_vcpu
*vcpu
, ulong guest_ea
, ulong ea_mask
)
131 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
132 vcpu
->arch
.hpte_cache_count
, guest_ea
, ea_mask
);
138 kvmppc_mmu_pte_flush_page(vcpu
, guest_ea
);
141 /* 32-bit flush w/o segment, go through all possible segments */
142 for (i
= 0; i
< 0x100000000ULL
; i
+= 0x10000000ULL
)
143 kvmppc_mmu_pte_flush(vcpu
, guest_ea
| i
, ~0xfffUL
);
146 /* Doing a complete flush -> start from scratch */
147 kvmppc_mmu_pte_flush_all(vcpu
);
155 /* Flush with mask 0xfffffffff */
156 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu
*vcpu
, u64 guest_vp
)
158 struct hlist_head
*list
;
159 struct hlist_node
*node
, *tmp
;
160 struct hpte_cache
*pte
;
161 u64 vp_mask
= 0xfffffffffULL
;
163 list
= &vcpu
->arch
.hpte_hash_vpte
[kvmppc_mmu_hash_vpte(guest_vp
)];
165 /* Check the list for matching entries and invalidate */
166 hlist_for_each_entry_safe(pte
, node
, tmp
, list
, list_vpte
)
167 if ((pte
->pte
.vpage
& vp_mask
) == guest_vp
)
168 invalidate_pte(vcpu
, pte
);
171 /* Flush with mask 0xffffff000 */
172 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu
*vcpu
, u64 guest_vp
)
174 struct hlist_head
*list
;
175 struct hlist_node
*node
, *tmp
;
176 struct hpte_cache
*pte
;
177 u64 vp_mask
= 0xffffff000ULL
;
179 list
= &vcpu
->arch
.hpte_hash_vpte_long
[
180 kvmppc_mmu_hash_vpte_long(guest_vp
)];
182 /* Check the list for matching entries and invalidate */
183 hlist_for_each_entry_safe(pte
, node
, tmp
, list
, list_vpte_long
)
184 if ((pte
->pte
.vpage
& vp_mask
) == guest_vp
)
185 invalidate_pte(vcpu
, pte
);
188 void kvmppc_mmu_pte_vflush(struct kvm_vcpu
*vcpu
, u64 guest_vp
, u64 vp_mask
)
190 dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
191 vcpu
->arch
.hpte_cache_count
, guest_vp
, vp_mask
);
196 kvmppc_mmu_pte_vflush_short(vcpu
, guest_vp
);
199 kvmppc_mmu_pte_vflush_long(vcpu
, guest_vp
);
207 void kvmppc_mmu_pte_pflush(struct kvm_vcpu
*vcpu
, ulong pa_start
, ulong pa_end
)
209 struct hlist_node
*node
, *tmp
;
210 struct hpte_cache
*pte
;
213 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n",
214 vcpu
->arch
.hpte_cache_count
, pa_start
, pa_end
);
216 for (i
= 0; i
< HPTEG_HASH_NUM_VPTE_LONG
; i
++) {
217 struct hlist_head
*list
= &vcpu
->arch
.hpte_hash_vpte_long
[i
];
219 hlist_for_each_entry_safe(pte
, node
, tmp
, list
, list_vpte_long
)
220 if ((pte
->pte
.raddr
>= pa_start
) &&
221 (pte
->pte
.raddr
< pa_end
))
222 invalidate_pte(vcpu
, pte
);
226 struct hpte_cache
*kvmppc_mmu_hpte_cache_next(struct kvm_vcpu
*vcpu
)
228 struct hpte_cache
*pte
;
230 pte
= kmem_cache_zalloc(hpte_cache
, GFP_KERNEL
);
231 vcpu
->arch
.hpte_cache_count
++;
233 if (vcpu
->arch
.hpte_cache_count
== HPTEG_CACHE_NUM
)
234 kvmppc_mmu_pte_flush_all(vcpu
);
239 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu
*vcpu
)
241 kvmppc_mmu_pte_flush(vcpu
, 0, 0);
244 static void kvmppc_mmu_hpte_init_hash(struct hlist_head
*hash_list
, int len
)
248 for (i
= 0; i
< len
; i
++)
249 INIT_HLIST_HEAD(&hash_list
[i
]);
252 int kvmppc_mmu_hpte_init(struct kvm_vcpu
*vcpu
)
254 /* init hpte lookup hashes */
255 kvmppc_mmu_hpte_init_hash(vcpu
->arch
.hpte_hash_pte
,
256 ARRAY_SIZE(vcpu
->arch
.hpte_hash_pte
));
257 kvmppc_mmu_hpte_init_hash(vcpu
->arch
.hpte_hash_vpte
,
258 ARRAY_SIZE(vcpu
->arch
.hpte_hash_vpte
));
259 kvmppc_mmu_hpte_init_hash(vcpu
->arch
.hpte_hash_vpte_long
,
260 ARRAY_SIZE(vcpu
->arch
.hpte_hash_vpte_long
));
265 int kvmppc_mmu_hpte_sysinit(void)
267 /* init hpte slab cache */
268 hpte_cache
= kmem_cache_create("kvm-spt", sizeof(struct hpte_cache
),
269 sizeof(struct hpte_cache
), 0, NULL
);
274 void kvmppc_mmu_hpte_sysexit(void)
276 kmem_cache_destroy(hpte_cache
);