add debugging code
[cor.git] / arch / mips / kvm / tlb.c
blob7cd92166a0b9a9bf3c14fe1df33442442ac668ac
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
21 #include <asm/cpu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
26 #include <asm/tlb.h>
27 #include <asm/tlbdebug.h>
29 #undef CONFIG_MIPS_MT
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #define KVM_GUEST_PC_TLB 0
34 #define KVM_GUEST_SP_TLB 1
36 #ifdef CONFIG_KVM_MIPS_VZ
37 unsigned long GUESTID_MASK;
38 EXPORT_SYMBOL_GPL(GUESTID_MASK);
39 unsigned long GUESTID_FIRST_VERSION;
40 EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION);
41 unsigned long GUESTID_VERSION_MASK;
42 EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK);
44 static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
46 struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
48 if (cpu_has_guestid)
49 return 0;
50 else
51 return cpu_asid(smp_processor_id(), gpa_mm);
53 #endif
55 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
57 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
58 int cpu = smp_processor_id();
60 return cpu_asid(cpu, kern_mm);
63 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
65 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
66 int cpu = smp_processor_id();
68 return cpu_asid(cpu, user_mm);
71 /* Structure defining an tlb entry data set. */
73 void kvm_mips_dump_host_tlbs(void)
75 unsigned long flags;
77 local_irq_save(flags);
79 kvm_info("HOST TLBs:\n");
80 dump_tlb_regs();
81 pr_info("\n");
82 dump_tlb_all();
84 local_irq_restore(flags);
86 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
88 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
90 struct mips_coproc *cop0 = vcpu->arch.cop0;
91 struct kvm_mips_tlb tlb;
92 int i;
94 kvm_info("Guest TLBs:\n");
95 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
97 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
98 tlb = vcpu->arch.guest_tlb[i];
99 kvm_info("TLB%c%3d Hi 0x%08lx ",
100 (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
101 ? ' ' : '*',
102 i, tlb.tlb_hi);
103 kvm_info("Lo0=0x%09llx %c%c attr %lx ",
104 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
105 (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
106 (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
107 (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
108 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
109 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
110 (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
111 (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
112 (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
113 tlb.tlb_mask);
116 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
118 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
120 int i;
121 int index = -1;
122 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
124 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
125 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
126 TLB_HI_ASID_HIT(tlb[i], entryhi)) {
127 index = i;
128 break;
132 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
133 __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
135 return index;
137 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
139 static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
141 int idx;
143 write_c0_entryhi(entryhi);
144 mtc0_tlbw_hazard();
146 tlb_probe();
147 tlb_probe_hazard();
148 idx = read_c0_index();
150 if (idx >= current_cpu_data.tlbsize)
151 BUG();
153 if (idx >= 0) {
154 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
155 write_c0_entrylo0(0);
156 write_c0_entrylo1(0);
157 mtc0_tlbw_hazard();
159 tlb_write_indexed();
160 tlbw_use_hazard();
163 return idx;
166 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
167 bool user, bool kernel)
170 * Initialize idx_user and idx_kernel to workaround bogus
171 * maybe-initialized warning when using GCC 6.
173 int idx_user = 0, idx_kernel = 0;
174 unsigned long flags, old_entryhi;
176 local_irq_save(flags);
178 old_entryhi = read_c0_entryhi();
180 if (user)
181 idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
182 kvm_mips_get_user_asid(vcpu));
183 if (kernel)
184 idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
185 kvm_mips_get_kernel_asid(vcpu));
187 write_c0_entryhi(old_entryhi);
188 mtc0_tlbw_hazard();
190 local_irq_restore(flags);
193 * We don't want to get reserved instruction exceptions for missing tlb
194 * entries.
196 if (cpu_has_vtag_icache)
197 flush_icache_all();
199 if (user && idx_user >= 0)
200 kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
201 __func__, (va & VPN2_MASK) |
202 kvm_mips_get_user_asid(vcpu), idx_user);
203 if (kernel && idx_kernel >= 0)
204 kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n",
205 __func__, (va & VPN2_MASK) |
206 kvm_mips_get_kernel_asid(vcpu), idx_kernel);
208 return 0;
210 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
212 #ifdef CONFIG_KVM_MIPS_VZ
214 /* GuestID management */
217 * clear_root_gid() - Set GuestCtl1.RID for normal root operation.
219 static inline void clear_root_gid(void)
221 if (cpu_has_guestid) {
222 clear_c0_guestctl1(MIPS_GCTL1_RID);
223 mtc0_tlbw_hazard();
228 * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
230 * Sets the root GuestID to match the current guest GuestID, for TLB operation
231 * on the GPA->RPA mappings in the root TLB.
233 * The caller must be sure to disable HTW while the root GID is set, and
234 * possibly longer if TLB registers are modified.
236 static inline void set_root_gid_to_guest_gid(void)
238 unsigned int guestctl1;
240 if (cpu_has_guestid) {
241 back_to_back_c0_hazard();
242 guestctl1 = read_c0_guestctl1();
243 guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) |
244 ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT)
245 << MIPS_GCTL1_RID_SHIFT;
246 write_c0_guestctl1(guestctl1);
247 mtc0_tlbw_hazard();
251 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
253 int idx;
254 unsigned long flags, old_entryhi;
256 local_irq_save(flags);
257 htw_stop();
259 /* Set root GuestID for root probe and write of guest TLB entry */
260 set_root_gid_to_guest_gid();
262 old_entryhi = read_c0_entryhi();
264 idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
265 kvm_mips_get_root_asid(vcpu));
267 write_c0_entryhi(old_entryhi);
268 clear_root_gid();
269 mtc0_tlbw_hazard();
271 htw_start();
272 local_irq_restore(flags);
275 * We don't want to get reserved instruction exceptions for missing tlb
276 * entries.
278 if (cpu_has_vtag_icache)
279 flush_icache_all();
281 if (idx > 0)
282 kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
283 __func__, (va & VPN2_MASK) |
284 kvm_mips_get_root_asid(vcpu), idx);
286 return 0;
288 EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
291 * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
292 * @vcpu: KVM VCPU pointer.
293 * @gpa: Guest virtual address in a TLB mapped guest segment.
294 * @gpa: Ponter to output guest physical address it maps to.
296 * Converts a guest virtual address in a guest TLB mapped segment to a guest
297 * physical address, by probing the guest TLB.
299 * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been
300 * written.
301 * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
302 * have been written.
304 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
305 unsigned long *gpa)
307 unsigned long o_entryhi, o_entrylo[2], o_pagemask;
308 unsigned int o_index;
309 unsigned long entrylo[2], pagemask, pagemaskbit, pa;
310 unsigned long flags;
311 int index;
313 /* Probe the guest TLB for a mapping */
314 local_irq_save(flags);
315 /* Set root GuestID for root probe of guest TLB entry */
316 htw_stop();
317 set_root_gid_to_guest_gid();
319 o_entryhi = read_gc0_entryhi();
320 o_index = read_gc0_index();
322 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl));
323 mtc0_tlbw_hazard();
324 guest_tlb_probe();
325 tlb_probe_hazard();
327 index = read_gc0_index();
328 if (index < 0) {
329 /* No match, fail */
330 write_gc0_entryhi(o_entryhi);
331 write_gc0_index(o_index);
333 clear_root_gid();
334 htw_start();
335 local_irq_restore(flags);
336 return -EFAULT;
339 /* Match! read the TLB entry */
340 o_entrylo[0] = read_gc0_entrylo0();
341 o_entrylo[1] = read_gc0_entrylo1();
342 o_pagemask = read_gc0_pagemask();
344 mtc0_tlbr_hazard();
345 guest_tlb_read();
346 tlb_read_hazard();
348 entrylo[0] = read_gc0_entrylo0();
349 entrylo[1] = read_gc0_entrylo1();
350 pagemask = ~read_gc0_pagemask() & ~0x1fffl;
352 write_gc0_entryhi(o_entryhi);
353 write_gc0_index(o_index);
354 write_gc0_entrylo0(o_entrylo[0]);
355 write_gc0_entrylo1(o_entrylo[1]);
356 write_gc0_pagemask(o_pagemask);
358 clear_root_gid();
359 htw_start();
360 local_irq_restore(flags);
362 /* Select one of the EntryLo values and interpret the GPA */
363 pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1;
364 pa = entrylo[!!(gva & pagemaskbit)];
367 * TLB entry may have become invalid since TLB probe if physical FTLB
368 * entries are shared between threads (e.g. I6400).
370 if (!(pa & ENTRYLO_V))
371 return -EFAULT;
374 * Note, this doesn't take guest MIPS32 XPA into account, where PFN is
375 * split with XI/RI in the middle.
377 pa = (pa << 6) & ~0xfffl;
378 pa |= gva & ~(pagemask | pagemaskbit);
380 *gpa = pa;
381 return 0;
383 EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup);
386 * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
387 * guests.
389 * Invalidate all entries in root tlb which are GPA mappings.
391 void kvm_vz_local_flush_roottlb_all_guests(void)
393 unsigned long flags;
394 unsigned long old_entryhi, old_pagemask, old_guestctl1;
395 int entry;
397 if (WARN_ON(!cpu_has_guestid))
398 return;
400 local_irq_save(flags);
401 htw_stop();
403 /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
404 old_entryhi = read_c0_entryhi();
405 old_pagemask = read_c0_pagemask();
406 old_guestctl1 = read_c0_guestctl1();
409 * Invalidate guest entries in root TLB while leaving root entries
410 * intact when possible.
412 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
413 write_c0_index(entry);
414 mtc0_tlbw_hazard();
415 tlb_read();
416 tlb_read_hazard();
418 /* Don't invalidate non-guest (RVA) mappings in the root TLB */
419 if (!(read_c0_guestctl1() & MIPS_GCTL1_RID))
420 continue;
422 /* Make sure all entries differ. */
423 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
424 write_c0_entrylo0(0);
425 write_c0_entrylo1(0);
426 write_c0_guestctl1(0);
427 mtc0_tlbw_hazard();
428 tlb_write_indexed();
431 write_c0_entryhi(old_entryhi);
432 write_c0_pagemask(old_pagemask);
433 write_c0_guestctl1(old_guestctl1);
434 tlbw_use_hazard();
436 htw_start();
437 local_irq_restore(flags);
439 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests);
442 * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
444 * Invalidate all entries in guest tlb irrespective of guestid.
446 void kvm_vz_local_flush_guesttlb_all(void)
448 unsigned long flags;
449 unsigned long old_index;
450 unsigned long old_entryhi;
451 unsigned long old_entrylo[2];
452 unsigned long old_pagemask;
453 int entry;
454 u64 cvmmemctl2 = 0;
456 local_irq_save(flags);
458 /* Preserve all clobbered guest registers */
459 old_index = read_gc0_index();
460 old_entryhi = read_gc0_entryhi();
461 old_entrylo[0] = read_gc0_entrylo0();
462 old_entrylo[1] = read_gc0_entrylo1();
463 old_pagemask = read_gc0_pagemask();
465 switch (current_cpu_type()) {
466 case CPU_CAVIUM_OCTEON3:
467 /* Inhibit machine check due to multiple matching TLB entries */
468 cvmmemctl2 = read_c0_cvmmemctl2();
469 cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
470 write_c0_cvmmemctl2(cvmmemctl2);
471 break;
474 /* Invalidate guest entries in guest TLB */
475 write_gc0_entrylo0(0);
476 write_gc0_entrylo1(0);
477 write_gc0_pagemask(0);
478 for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) {
479 /* Make sure all entries differ. */
480 write_gc0_index(entry);
481 write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry));
482 mtc0_tlbw_hazard();
483 guest_tlb_write_indexed();
486 if (cvmmemctl2) {
487 cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
488 write_c0_cvmmemctl2(cvmmemctl2);
491 write_gc0_index(old_index);
492 write_gc0_entryhi(old_entryhi);
493 write_gc0_entrylo0(old_entrylo[0]);
494 write_gc0_entrylo1(old_entrylo[1]);
495 write_gc0_pagemask(old_pagemask);
496 tlbw_use_hazard();
498 local_irq_restore(flags);
500 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all);
503 * kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
504 * @buf: Buffer to write TLB entries into.
505 * @index: Start index.
506 * @count: Number of entries to save.
508 * Save a range of guest TLB entries. The caller must ensure interrupts are
509 * disabled.
511 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
512 unsigned int count)
514 unsigned int end = index + count;
515 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
516 unsigned int guestctl1 = 0;
517 int old_index, i;
519 /* Save registers we're about to clobber */
520 old_index = read_gc0_index();
521 old_entryhi = read_gc0_entryhi();
522 old_entrylo0 = read_gc0_entrylo0();
523 old_entrylo1 = read_gc0_entrylo1();
524 old_pagemask = read_gc0_pagemask();
526 /* Set root GuestID for root probe */
527 htw_stop();
528 set_root_gid_to_guest_gid();
529 if (cpu_has_guestid)
530 guestctl1 = read_c0_guestctl1();
532 /* Read each entry from guest TLB */
533 for (i = index; i < end; ++i, ++buf) {
534 write_gc0_index(i);
536 mtc0_tlbr_hazard();
537 guest_tlb_read();
538 tlb_read_hazard();
540 if (cpu_has_guestid &&
541 (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) {
542 /* Entry invalid or belongs to another guest */
543 buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
544 buf->tlb_lo[0] = 0;
545 buf->tlb_lo[1] = 0;
546 buf->tlb_mask = 0;
547 } else {
548 /* Entry belongs to the right guest */
549 buf->tlb_hi = read_gc0_entryhi();
550 buf->tlb_lo[0] = read_gc0_entrylo0();
551 buf->tlb_lo[1] = read_gc0_entrylo1();
552 buf->tlb_mask = read_gc0_pagemask();
556 /* Clear root GuestID again */
557 clear_root_gid();
558 htw_start();
560 /* Restore clobbered registers */
561 write_gc0_index(old_index);
562 write_gc0_entryhi(old_entryhi);
563 write_gc0_entrylo0(old_entrylo0);
564 write_gc0_entrylo1(old_entrylo1);
565 write_gc0_pagemask(old_pagemask);
567 tlbw_use_hazard();
569 EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb);
572 * kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
573 * @buf: Buffer to read TLB entries from.
574 * @index: Start index.
575 * @count: Number of entries to load.
577 * Load a range of guest TLB entries. The caller must ensure interrupts are
578 * disabled.
580 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
581 unsigned int count)
583 unsigned int end = index + count;
584 unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
585 int old_index, i;
587 /* Save registers we're about to clobber */
588 old_index = read_gc0_index();
589 old_entryhi = read_gc0_entryhi();
590 old_entrylo0 = read_gc0_entrylo0();
591 old_entrylo1 = read_gc0_entrylo1();
592 old_pagemask = read_gc0_pagemask();
594 /* Set root GuestID for root probe */
595 htw_stop();
596 set_root_gid_to_guest_gid();
598 /* Write each entry to guest TLB */
599 for (i = index; i < end; ++i, ++buf) {
600 write_gc0_index(i);
601 write_gc0_entryhi(buf->tlb_hi);
602 write_gc0_entrylo0(buf->tlb_lo[0]);
603 write_gc0_entrylo1(buf->tlb_lo[1]);
604 write_gc0_pagemask(buf->tlb_mask);
606 mtc0_tlbw_hazard();
607 guest_tlb_write_indexed();
610 /* Clear root GuestID again */
611 clear_root_gid();
612 htw_start();
614 /* Restore clobbered registers */
615 write_gc0_index(old_index);
616 write_gc0_entryhi(old_entryhi);
617 write_gc0_entrylo0(old_entrylo0);
618 write_gc0_entrylo1(old_entrylo1);
619 write_gc0_pagemask(old_pagemask);
621 tlbw_use_hazard();
623 EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
625 #endif
628 * kvm_mips_suspend_mm() - Suspend the active mm.
629 * @cpu The CPU we're running on.
631 * Suspend the active_mm, ready for a switch to a KVM guest virtual address
632 * space. This is left active for the duration of guest context, including time
633 * with interrupts enabled, so we need to be careful not to confuse e.g. cache
634 * management IPIs.
636 * kvm_mips_resume_mm() should be called before context switching to a different
637 * process so we don't need to worry about reference counting.
639 * This needs to be in static kernel code to avoid exporting init_mm.
641 void kvm_mips_suspend_mm(int cpu)
643 cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
644 current->active_mm = &init_mm;
646 EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm);
649 * kvm_mips_resume_mm() - Resume the current process mm.
650 * @cpu The CPU we're running on.
652 * Resume the mm of the current process, after a switch back from a KVM guest
653 * virtual address space (see kvm_mips_suspend_mm()).
655 void kvm_mips_resume_mm(int cpu)
657 cpumask_set_cpu(cpu, mm_cpumask(current->mm));
658 current->active_mm = current->mm;
660 EXPORT_SYMBOL_GPL(kvm_mips_resume_mm);