GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / arch / powerpc / kvm / 44x_tlb.c
blob9b9e0d6f6a2e307baf7a64c91433df2f51b34370
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
26 #include <asm/tlbflush.h>
27 #include <asm/mmu-44x.h>
28 #include <asm/kvm_ppc.h>
29 #include <asm/kvm_44x.h>
30 #include "timing.h"
32 #include "44x_tlb.h"
33 #include "trace.h"
35 #ifndef PPC44x_TLBE_SIZE
36 #define PPC44x_TLBE_SIZE PPC44x_TLB_4K
37 #endif
39 #define PAGE_SIZE_4K (1<<12)
40 #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))
42 #define PPC44x_TLB_UATTR_MASK \
43 (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
44 #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
45 #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
47 #ifdef DEBUG
48 void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
50 struct kvmppc_44x_tlbe *tlbe;
51 int i;
53 printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
54 printk("| %2s | %3s | %8s | %8s | %8s |\n",
55 "nr", "tid", "word0", "word1", "word2");
57 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
58 tlbe = &vcpu_44x->guest_tlb[i];
59 if (tlbe->word0 & PPC44x_TLB_VALID)
60 printk(" G%2d | %02X | %08X | %08X | %08X |\n",
61 i, tlbe->tid, tlbe->word0, tlbe->word1,
62 tlbe->word2);
65 #endif
67 static inline void kvmppc_44x_tlbie(unsigned int index)
69 /* 0 <= index < 64, so the V bit is clear and we can use the index as
70 * word0. */
71 asm volatile(
72 "tlbwe %[index], %[index], 0\n"
74 : [index] "r"(index)
78 static inline void kvmppc_44x_tlbre(unsigned int index,
79 struct kvmppc_44x_tlbe *tlbe)
81 asm volatile(
82 "tlbre %[word0], %[index], 0\n"
83 "mfspr %[tid], %[sprn_mmucr]\n"
84 "andi. %[tid], %[tid], 0xff\n"
85 "tlbre %[word1], %[index], 1\n"
86 "tlbre %[word2], %[index], 2\n"
87 : [word0] "=r"(tlbe->word0),
88 [word1] "=r"(tlbe->word1),
89 [word2] "=r"(tlbe->word2),
90 [tid] "=r"(tlbe->tid)
91 : [index] "r"(index),
92 [sprn_mmucr] "i"(SPRN_MMUCR)
93 : "cc"
97 static inline void kvmppc_44x_tlbwe(unsigned int index,
98 struct kvmppc_44x_tlbe *stlbe)
100 unsigned long tmp;
102 asm volatile(
103 "mfspr %[tmp], %[sprn_mmucr]\n"
104 "rlwimi %[tmp], %[tid], 0, 0xff\n"
105 "mtspr %[sprn_mmucr], %[tmp]\n"
106 "tlbwe %[word0], %[index], 0\n"
107 "tlbwe %[word1], %[index], 1\n"
108 "tlbwe %[word2], %[index], 2\n"
109 : [tmp] "=&r"(tmp)
110 : [word0] "r"(stlbe->word0),
111 [word1] "r"(stlbe->word1),
112 [word2] "r"(stlbe->word2),
113 [tid] "r"(stlbe->tid),
114 [index] "r"(index),
115 [sprn_mmucr] "i"(SPRN_MMUCR)
119 static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
121 /* We only care about the guest's permission and user bits. */
122 attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK;
124 if (!usermode) {
125 /* Guest is in supervisor mode, so we need to translate guest
126 * supervisor permissions into user permissions. */
127 attrib &= ~PPC44x_TLB_USER_PERM_MASK;
128 attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
131 /* Make sure host can always access this memory. */
132 attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
134 /* WIMGE = 0b00100 */
135 attrib |= PPC44x_TLB_M;
137 return attrib;
140 /* Load shadow TLB back into hardware. */
141 void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu)
143 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
144 int i;
146 for (i = 0; i <= tlb_44x_hwater; i++) {
147 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
149 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
150 kvmppc_44x_tlbwe(i, stlbe);
154 static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x,
155 unsigned int i)
157 vcpu_44x->shadow_tlb_mod[i] = 1;
160 /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */
161 void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu)
163 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
164 int i;
166 for (i = 0; i <= tlb_44x_hwater; i++) {
167 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
169 if (vcpu_44x->shadow_tlb_mod[i])
170 kvmppc_44x_tlbre(i, stlbe);
172 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
173 kvmppc_44x_tlbie(i);
178 /* Search the guest TLB for a matching entry. */
179 int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
180 unsigned int as)
182 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
183 int i;
185 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
186 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
187 unsigned int tid;
189 if (eaddr < get_tlb_eaddr(tlbe))
190 continue;
192 if (eaddr > get_tlb_end(tlbe))
193 continue;
195 tid = get_tlb_tid(tlbe);
196 if (tid && (tid != pid))
197 continue;
199 if (!get_tlb_v(tlbe))
200 continue;
202 if (get_tlb_ts(tlbe) != as)
203 continue;
205 return i;
208 return -1;
211 gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
212 gva_t eaddr)
214 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
215 struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
216 unsigned int pgmask = get_tlb_bytes(gtlbe) - 1;
218 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
221 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
223 unsigned int as = !!(vcpu->arch.msr & MSR_IS);
225 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
228 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
230 unsigned int as = !!(vcpu->arch.msr & MSR_DS);
232 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
235 void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
239 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
243 static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
244 unsigned int stlb_index)
246 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index];
248 if (!ref->page)
249 return;
251 /* Discard from the TLB. */
252 /* Note: we could actually invalidate a host mapping, if the host overwrote
253 * this TLB entry since we inserted a guest mapping. */
254 kvmppc_44x_tlbie(stlb_index);
256 /* Now release the page. */
257 if (ref->writeable)
258 kvm_release_page_dirty(ref->page);
259 else
260 kvm_release_page_clean(ref->page);
262 ref->page = NULL;
265 trace_kvm_stlb_inval(stlb_index);
268 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
270 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
271 int i;
273 for (i = 0; i <= tlb_44x_hwater; i++)
274 kvmppc_44x_shadow_release(vcpu_44x, i);
278 * kvmppc_mmu_map -- create a host mapping for guest memory
280 * If the guest wanted a larger page than the host supports, only the first
281 * host page is mapped here and the rest are demand faulted.
283 * If the guest wanted a smaller page than the host page size, we map only the
284 * guest-size page (i.e. not a full host page mapping).
286 * Caller must ensure that the specified guest TLB entry is safe to insert into
287 * the shadow TLB.
289 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
290 unsigned int gtlb_index)
292 struct kvmppc_44x_tlbe stlbe;
293 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
294 struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
295 struct kvmppc_44x_shadow_ref *ref;
296 struct page *new_page;
297 hpa_t hpaddr;
298 gfn_t gfn;
299 u32 asid = gtlbe->tid;
300 u32 flags = gtlbe->word2;
301 u32 max_bytes = get_tlb_bytes(gtlbe);
302 unsigned int victim;
304 /* Select TLB entry to clobber. Indirectly guard against races with the TLB
305 * miss handler by disabling interrupts. */
306 local_irq_disable();
307 victim = ++tlb_44x_index;
308 if (victim > tlb_44x_hwater)
309 victim = 0;
310 tlb_44x_index = victim;
311 local_irq_enable();
313 /* Get reference to new page. */
314 gfn = gpaddr >> PAGE_SHIFT;
315 new_page = gfn_to_page(vcpu->kvm, gfn);
316 if (is_error_page(new_page)) {
317 printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n",
318 (unsigned long long)gfn);
319 kvm_release_page_clean(new_page);
320 return;
322 hpaddr = page_to_phys(new_page);
324 /* Invalidate any previous shadow mappings. */
325 kvmppc_44x_shadow_release(vcpu_44x, victim);
329 /* Force TS=1 for all guest mappings. */
330 stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;
332 if (max_bytes >= PAGE_SIZE) {
333 /* Guest mapping is larger than or equal to host page size. We can use
334 * a "native" host mapping. */
335 stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
336 } else {
337 /* Guest mapping is smaller than host page size. We must restrict the
338 * size of the mapping to be at most the smaller of the two, but for
339 * simplicity we fall back to a 4K mapping (this is probably what the
340 * guest is using anyways). */
341 stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;
343 /* 'hpaddr' is a host page, which is larger than the mapping we're
344 * inserting here. To compensate, we must add the in-page offset to the
345 * sub-page. */
346 hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
349 stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
350 stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
351 vcpu->arch.msr & MSR_PR);
352 stlbe.tid = !(asid & 0xff);
354 /* Keep track of the reference so we can properly release it later. */
355 ref = &vcpu_44x->shadow_refs[victim];
356 ref->page = new_page;
357 ref->gtlb_index = gtlb_index;
358 ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW);
359 ref->tid = stlbe.tid;
361 /* Insert shadow mapping into hardware TLB. */
362 kvmppc_44x_tlbe_set_modified(vcpu_44x, victim);
363 kvmppc_44x_tlbwe(victim, &stlbe);
364 trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1,
365 stlbe.word2);
368 /* For a particular guest TLB entry, invalidate the corresponding host TLB
369 * mappings and release the host pages. */
370 static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
371 unsigned int gtlb_index)
373 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
374 int i;
376 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
377 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
378 if (ref->gtlb_index == gtlb_index)
379 kvmppc_44x_shadow_release(vcpu_44x, i);
383 void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
385 vcpu->arch.shadow_pid = !usermode;
388 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
390 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
391 int i;
393 if (unlikely(vcpu->arch.pid == new_pid))
394 return;
396 vcpu->arch.pid = new_pid;
398 /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
399 * can't access guest kernel mappings (TID=1). When we switch to a new
400 * guest PID, which will also use host PID=0, we must discard the old guest
401 * userspace mappings. */
402 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
403 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
405 if (ref->tid == 0)
406 kvmppc_44x_shadow_release(vcpu_44x, i);
410 static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
411 const struct kvmppc_44x_tlbe *tlbe)
413 gpa_t gpa;
415 if (!get_tlb_v(tlbe))
416 return 0;
418 /* Does it match current guest AS? */
419 if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
420 return 0;
422 gpa = get_tlb_raddr(tlbe);
423 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
424 /* Mapping is not for RAM. */
425 return 0;
427 return 1;
430 int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
432 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
433 struct kvmppc_44x_tlbe *tlbe;
434 unsigned int gtlb_index;
436 gtlb_index = kvmppc_get_gpr(vcpu, ra);
437 if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
438 printk("%s: index %d\n", __func__, gtlb_index);
439 kvmppc_dump_vcpu(vcpu);
440 return EMULATE_FAIL;
443 tlbe = &vcpu_44x->guest_tlb[gtlb_index];
445 /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */
446 if (tlbe->word0 & PPC44x_TLB_VALID)
447 kvmppc_44x_invalidate(vcpu, gtlb_index);
449 switch (ws) {
450 case PPC44x_TLB_PAGEID:
451 tlbe->tid = get_mmucr_stid(vcpu);
452 tlbe->word0 = kvmppc_get_gpr(vcpu, rs);
453 break;
455 case PPC44x_TLB_XLAT:
456 tlbe->word1 = kvmppc_get_gpr(vcpu, rs);
457 break;
459 case PPC44x_TLB_ATTRIB:
460 tlbe->word2 = kvmppc_get_gpr(vcpu, rs);
461 break;
463 default:
464 return EMULATE_FAIL;
467 if (tlbe_is_host_safe(vcpu, tlbe)) {
468 gva_t eaddr;
469 gpa_t gpaddr;
470 u32 bytes;
472 eaddr = get_tlb_eaddr(tlbe);
473 gpaddr = get_tlb_raddr(tlbe);
475 /* Use the advertised page size to mask effective and real addrs. */
476 bytes = get_tlb_bytes(tlbe);
477 eaddr &= ~(bytes - 1);
478 gpaddr &= ~(bytes - 1);
480 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
483 trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
484 tlbe->word2);
486 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
487 return EMULATE_DONE;
490 int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
492 u32 ea;
493 int gtlb_index;
494 unsigned int as = get_mmucr_sts(vcpu);
495 unsigned int pid = get_mmucr_stid(vcpu);
497 ea = kvmppc_get_gpr(vcpu, rb);
498 if (ra)
499 ea += kvmppc_get_gpr(vcpu, ra);
501 gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
502 if (rc) {
503 u32 cr = kvmppc_get_cr(vcpu);
505 if (gtlb_index < 0)
506 kvmppc_set_cr(vcpu, cr & ~0x20000000);
507 else
508 kvmppc_set_cr(vcpu, cr | 0x20000000);
510 kvmppc_set_gpr(vcpu, rt, gtlb_index);
512 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
513 return EMULATE_DONE;