2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
27 #include <asm/tlbdebug.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #define KVM_GUEST_PC_TLB 0
34 #define KVM_GUEST_SP_TLB 1
36 #ifdef CONFIG_KVM_MIPS_VZ
37 unsigned long GUESTID_MASK
;
38 EXPORT_SYMBOL_GPL(GUESTID_MASK
);
39 unsigned long GUESTID_FIRST_VERSION
;
40 EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION
);
41 unsigned long GUESTID_VERSION_MASK
;
42 EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK
);
44 static u32
kvm_mips_get_root_asid(struct kvm_vcpu
*vcpu
)
46 struct mm_struct
*gpa_mm
= &vcpu
->kvm
->arch
.gpa_mm
;
51 return cpu_asid(smp_processor_id(), gpa_mm
);
55 static u32
kvm_mips_get_kernel_asid(struct kvm_vcpu
*vcpu
)
57 struct mm_struct
*kern_mm
= &vcpu
->arch
.guest_kernel_mm
;
58 int cpu
= smp_processor_id();
60 return cpu_asid(cpu
, kern_mm
);
63 static u32
kvm_mips_get_user_asid(struct kvm_vcpu
*vcpu
)
65 struct mm_struct
*user_mm
= &vcpu
->arch
.guest_user_mm
;
66 int cpu
= smp_processor_id();
68 return cpu_asid(cpu
, user_mm
);
71 /* Structure defining an tlb entry data set. */
73 void kvm_mips_dump_host_tlbs(void)
77 local_irq_save(flags
);
79 kvm_info("HOST TLBs:\n");
84 local_irq_restore(flags
);
86 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs
);
88 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu
*vcpu
)
90 struct mips_coproc
*cop0
= vcpu
->arch
.cop0
;
91 struct kvm_mips_tlb tlb
;
94 kvm_info("Guest TLBs:\n");
95 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0
));
97 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
98 tlb
= vcpu
->arch
.guest_tlb
[i
];
99 kvm_info("TLB%c%3d Hi 0x%08lx ",
100 (tlb
.tlb_lo
[0] | tlb
.tlb_lo
[1]) & ENTRYLO_V
103 kvm_info("Lo0=0x%09llx %c%c attr %lx ",
104 (u64
) mips3_tlbpfn_to_paddr(tlb
.tlb_lo
[0]),
105 (tlb
.tlb_lo
[0] & ENTRYLO_D
) ? 'D' : ' ',
106 (tlb
.tlb_lo
[0] & ENTRYLO_G
) ? 'G' : ' ',
107 (tlb
.tlb_lo
[0] & ENTRYLO_C
) >> ENTRYLO_C_SHIFT
);
108 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
109 (u64
) mips3_tlbpfn_to_paddr(tlb
.tlb_lo
[1]),
110 (tlb
.tlb_lo
[1] & ENTRYLO_D
) ? 'D' : ' ',
111 (tlb
.tlb_lo
[1] & ENTRYLO_G
) ? 'G' : ' ',
112 (tlb
.tlb_lo
[1] & ENTRYLO_C
) >> ENTRYLO_C_SHIFT
,
116 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs
);
118 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long entryhi
)
122 struct kvm_mips_tlb
*tlb
= vcpu
->arch
.guest_tlb
;
124 for (i
= 0; i
< KVM_MIPS_GUEST_TLB_SIZE
; i
++) {
125 if (TLB_HI_VPN2_HIT(tlb
[i
], entryhi
) &&
126 TLB_HI_ASID_HIT(tlb
[i
], entryhi
)) {
132 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
133 __func__
, entryhi
, index
, tlb
[i
].tlb_lo
[0], tlb
[i
].tlb_lo
[1]);
137 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup
);
139 static int _kvm_mips_host_tlb_inv(unsigned long entryhi
)
143 write_c0_entryhi(entryhi
);
148 idx
= read_c0_index();
150 if (idx
>= current_cpu_data
.tlbsize
)
154 write_c0_entryhi(UNIQUE_ENTRYHI(idx
));
155 write_c0_entrylo0(0);
156 write_c0_entrylo1(0);
166 int kvm_mips_host_tlb_inv(struct kvm_vcpu
*vcpu
, unsigned long va
,
167 bool user
, bool kernel
)
170 * Initialize idx_user and idx_kernel to workaround bogus
171 * maybe-initialized warning when using GCC 6.
173 int idx_user
= 0, idx_kernel
= 0;
174 unsigned long flags
, old_entryhi
;
176 local_irq_save(flags
);
178 old_entryhi
= read_c0_entryhi();
181 idx_user
= _kvm_mips_host_tlb_inv((va
& VPN2_MASK
) |
182 kvm_mips_get_user_asid(vcpu
));
184 idx_kernel
= _kvm_mips_host_tlb_inv((va
& VPN2_MASK
) |
185 kvm_mips_get_kernel_asid(vcpu
));
187 write_c0_entryhi(old_entryhi
);
190 local_irq_restore(flags
);
193 * We don't want to get reserved instruction exceptions for missing tlb
196 if (cpu_has_vtag_icache
)
199 if (user
&& idx_user
>= 0)
200 kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
201 __func__
, (va
& VPN2_MASK
) |
202 kvm_mips_get_user_asid(vcpu
), idx_user
);
203 if (kernel
&& idx_kernel
>= 0)
204 kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n",
205 __func__
, (va
& VPN2_MASK
) |
206 kvm_mips_get_kernel_asid(vcpu
), idx_kernel
);
210 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv
);
212 #ifdef CONFIG_KVM_MIPS_VZ
214 /* GuestID management */
217 * clear_root_gid() - Set GuestCtl1.RID for normal root operation.
219 static inline void clear_root_gid(void)
221 if (cpu_has_guestid
) {
222 clear_c0_guestctl1(MIPS_GCTL1_RID
);
228 * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
230 * Sets the root GuestID to match the current guest GuestID, for TLB operation
231 * on the GPA->RPA mappings in the root TLB.
233 * The caller must be sure to disable HTW while the root GID is set, and
234 * possibly longer if TLB registers are modified.
236 static inline void set_root_gid_to_guest_gid(void)
238 unsigned int guestctl1
;
240 if (cpu_has_guestid
) {
241 back_to_back_c0_hazard();
242 guestctl1
= read_c0_guestctl1();
243 guestctl1
= (guestctl1
& ~MIPS_GCTL1_RID
) |
244 ((guestctl1
& MIPS_GCTL1_ID
) >> MIPS_GCTL1_ID_SHIFT
)
245 << MIPS_GCTL1_RID_SHIFT
;
246 write_c0_guestctl1(guestctl1
);
251 int kvm_vz_host_tlb_inv(struct kvm_vcpu
*vcpu
, unsigned long va
)
254 unsigned long flags
, old_entryhi
;
256 local_irq_save(flags
);
259 /* Set root GuestID for root probe and write of guest TLB entry */
260 set_root_gid_to_guest_gid();
262 old_entryhi
= read_c0_entryhi();
264 idx
= _kvm_mips_host_tlb_inv((va
& VPN2_MASK
) |
265 kvm_mips_get_root_asid(vcpu
));
267 write_c0_entryhi(old_entryhi
);
272 local_irq_restore(flags
);
275 * We don't want to get reserved instruction exceptions for missing tlb
278 if (cpu_has_vtag_icache
)
282 kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
283 __func__
, (va
& VPN2_MASK
) |
284 kvm_mips_get_root_asid(vcpu
), idx
);
288 EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv
);
291 * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
292 * @vcpu: KVM VCPU pointer.
293 * @gpa: Guest virtual address in a TLB mapped guest segment.
294 * @gpa: Ponter to output guest physical address it maps to.
296 * Converts a guest virtual address in a guest TLB mapped segment to a guest
297 * physical address, by probing the guest TLB.
299 * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been
301 * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
304 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu
*vcpu
, unsigned long gva
,
307 unsigned long o_entryhi
, o_entrylo
[2], o_pagemask
;
308 unsigned int o_index
;
309 unsigned long entrylo
[2], pagemask
, pagemaskbit
, pa
;
313 /* Probe the guest TLB for a mapping */
314 local_irq_save(flags
);
315 /* Set root GuestID for root probe of guest TLB entry */
317 set_root_gid_to_guest_gid();
319 o_entryhi
= read_gc0_entryhi();
320 o_index
= read_gc0_index();
322 write_gc0_entryhi((o_entryhi
& 0x3ff) | (gva
& ~0xfffl
));
327 index
= read_gc0_index();
330 write_gc0_entryhi(o_entryhi
);
331 write_gc0_index(o_index
);
335 local_irq_restore(flags
);
339 /* Match! read the TLB entry */
340 o_entrylo
[0] = read_gc0_entrylo0();
341 o_entrylo
[1] = read_gc0_entrylo1();
342 o_pagemask
= read_gc0_pagemask();
348 entrylo
[0] = read_gc0_entrylo0();
349 entrylo
[1] = read_gc0_entrylo1();
350 pagemask
= ~read_gc0_pagemask() & ~0x1fffl
;
352 write_gc0_entryhi(o_entryhi
);
353 write_gc0_index(o_index
);
354 write_gc0_entrylo0(o_entrylo
[0]);
355 write_gc0_entrylo1(o_entrylo
[1]);
356 write_gc0_pagemask(o_pagemask
);
360 local_irq_restore(flags
);
362 /* Select one of the EntryLo values and interpret the GPA */
363 pagemaskbit
= (pagemask
^ (pagemask
& (pagemask
- 1))) >> 1;
364 pa
= entrylo
[!!(gva
& pagemaskbit
)];
367 * TLB entry may have become invalid since TLB probe if physical FTLB
368 * entries are shared between threads (e.g. I6400).
370 if (!(pa
& ENTRYLO_V
))
374 * Note, this doesn't take guest MIPS32 XPA into account, where PFN is
375 * split with XI/RI in the middle.
377 pa
= (pa
<< 6) & ~0xfffl
;
378 pa
|= gva
& ~(pagemask
| pagemaskbit
);
383 EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup
);
386 * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
389 * Invalidate all entries in root tlb which are GPA mappings.
391 void kvm_vz_local_flush_roottlb_all_guests(void)
394 unsigned long old_entryhi
, old_pagemask
, old_guestctl1
;
397 if (WARN_ON(!cpu_has_guestid
))
400 local_irq_save(flags
);
403 /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
404 old_entryhi
= read_c0_entryhi();
405 old_pagemask
= read_c0_pagemask();
406 old_guestctl1
= read_c0_guestctl1();
409 * Invalidate guest entries in root TLB while leaving root entries
410 * intact when possible.
412 for (entry
= 0; entry
< current_cpu_data
.tlbsize
; entry
++) {
413 write_c0_index(entry
);
418 /* Don't invalidate non-guest (RVA) mappings in the root TLB */
419 if (!(read_c0_guestctl1() & MIPS_GCTL1_RID
))
422 /* Make sure all entries differ. */
423 write_c0_entryhi(UNIQUE_ENTRYHI(entry
));
424 write_c0_entrylo0(0);
425 write_c0_entrylo1(0);
426 write_c0_guestctl1(0);
431 write_c0_entryhi(old_entryhi
);
432 write_c0_pagemask(old_pagemask
);
433 write_c0_guestctl1(old_guestctl1
);
437 local_irq_restore(flags
);
439 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests
);
442 * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
444 * Invalidate all entries in guest tlb irrespective of guestid.
446 void kvm_vz_local_flush_guesttlb_all(void)
449 unsigned long old_index
;
450 unsigned long old_entryhi
;
451 unsigned long old_entrylo
[2];
452 unsigned long old_pagemask
;
456 local_irq_save(flags
);
458 /* Preserve all clobbered guest registers */
459 old_index
= read_gc0_index();
460 old_entryhi
= read_gc0_entryhi();
461 old_entrylo
[0] = read_gc0_entrylo0();
462 old_entrylo
[1] = read_gc0_entrylo1();
463 old_pagemask
= read_gc0_pagemask();
465 switch (current_cpu_type()) {
466 case CPU_CAVIUM_OCTEON3
:
467 /* Inhibit machine check due to multiple matching TLB entries */
468 cvmmemctl2
= read_c0_cvmmemctl2();
469 cvmmemctl2
|= CVMMEMCTL2_INHIBITTS
;
470 write_c0_cvmmemctl2(cvmmemctl2
);
474 /* Invalidate guest entries in guest TLB */
475 write_gc0_entrylo0(0);
476 write_gc0_entrylo1(0);
477 write_gc0_pagemask(0);
478 for (entry
= 0; entry
< current_cpu_data
.guest
.tlbsize
; entry
++) {
479 /* Make sure all entries differ. */
480 write_gc0_index(entry
);
481 write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry
));
483 guest_tlb_write_indexed();
487 cvmmemctl2
&= ~CVMMEMCTL2_INHIBITTS
;
488 write_c0_cvmmemctl2(cvmmemctl2
);
491 write_gc0_index(old_index
);
492 write_gc0_entryhi(old_entryhi
);
493 write_gc0_entrylo0(old_entrylo
[0]);
494 write_gc0_entrylo1(old_entrylo
[1]);
495 write_gc0_pagemask(old_pagemask
);
498 local_irq_restore(flags
);
500 EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all
);
503 * kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
504 * @buf: Buffer to write TLB entries into.
505 * @index: Start index.
506 * @count: Number of entries to save.
508 * Save a range of guest TLB entries. The caller must ensure interrupts are
511 void kvm_vz_save_guesttlb(struct kvm_mips_tlb
*buf
, unsigned int index
,
514 unsigned int end
= index
+ count
;
515 unsigned long old_entryhi
, old_entrylo0
, old_entrylo1
, old_pagemask
;
516 unsigned int guestctl1
= 0;
519 /* Save registers we're about to clobber */
520 old_index
= read_gc0_index();
521 old_entryhi
= read_gc0_entryhi();
522 old_entrylo0
= read_gc0_entrylo0();
523 old_entrylo1
= read_gc0_entrylo1();
524 old_pagemask
= read_gc0_pagemask();
526 /* Set root GuestID for root probe */
528 set_root_gid_to_guest_gid();
530 guestctl1
= read_c0_guestctl1();
532 /* Read each entry from guest TLB */
533 for (i
= index
; i
< end
; ++i
, ++buf
) {
540 if (cpu_has_guestid
&&
541 (read_c0_guestctl1() ^ guestctl1
) & MIPS_GCTL1_RID
) {
542 /* Entry invalid or belongs to another guest */
543 buf
->tlb_hi
= UNIQUE_GUEST_ENTRYHI(i
);
548 /* Entry belongs to the right guest */
549 buf
->tlb_hi
= read_gc0_entryhi();
550 buf
->tlb_lo
[0] = read_gc0_entrylo0();
551 buf
->tlb_lo
[1] = read_gc0_entrylo1();
552 buf
->tlb_mask
= read_gc0_pagemask();
556 /* Clear root GuestID again */
560 /* Restore clobbered registers */
561 write_gc0_index(old_index
);
562 write_gc0_entryhi(old_entryhi
);
563 write_gc0_entrylo0(old_entrylo0
);
564 write_gc0_entrylo1(old_entrylo1
);
565 write_gc0_pagemask(old_pagemask
);
569 EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb
);
572 * kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
573 * @buf: Buffer to read TLB entries from.
574 * @index: Start index.
575 * @count: Number of entries to load.
577 * Load a range of guest TLB entries. The caller must ensure interrupts are
580 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb
*buf
, unsigned int index
,
583 unsigned int end
= index
+ count
;
584 unsigned long old_entryhi
, old_entrylo0
, old_entrylo1
, old_pagemask
;
587 /* Save registers we're about to clobber */
588 old_index
= read_gc0_index();
589 old_entryhi
= read_gc0_entryhi();
590 old_entrylo0
= read_gc0_entrylo0();
591 old_entrylo1
= read_gc0_entrylo1();
592 old_pagemask
= read_gc0_pagemask();
594 /* Set root GuestID for root probe */
596 set_root_gid_to_guest_gid();
598 /* Write each entry to guest TLB */
599 for (i
= index
; i
< end
; ++i
, ++buf
) {
601 write_gc0_entryhi(buf
->tlb_hi
);
602 write_gc0_entrylo0(buf
->tlb_lo
[0]);
603 write_gc0_entrylo1(buf
->tlb_lo
[1]);
604 write_gc0_pagemask(buf
->tlb_mask
);
607 guest_tlb_write_indexed();
610 /* Clear root GuestID again */
614 /* Restore clobbered registers */
615 write_gc0_index(old_index
);
616 write_gc0_entryhi(old_entryhi
);
617 write_gc0_entrylo0(old_entrylo0
);
618 write_gc0_entrylo1(old_entrylo1
);
619 write_gc0_pagemask(old_pagemask
);
623 EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb
);
628 * kvm_mips_suspend_mm() - Suspend the active mm.
629 * @cpu The CPU we're running on.
631 * Suspend the active_mm, ready for a switch to a KVM guest virtual address
632 * space. This is left active for the duration of guest context, including time
633 * with interrupts enabled, so we need to be careful not to confuse e.g. cache
636 * kvm_mips_resume_mm() should be called before context switching to a different
637 * process so we don't need to worry about reference counting.
639 * This needs to be in static kernel code to avoid exporting init_mm.
641 void kvm_mips_suspend_mm(int cpu
)
643 cpumask_clear_cpu(cpu
, mm_cpumask(current
->active_mm
));
644 current
->active_mm
= &init_mm
;
646 EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm
);
649 * kvm_mips_resume_mm() - Resume the current process mm.
650 * @cpu The CPU we're running on.
652 * Resume the mm of the current process, after a switch back from a KVM guest
653 * virtual address space (see kvm_mips_suspend_mm()).
655 void kvm_mips_resume_mm(int cpu
)
657 cpumask_set_cpu(cpu
, mm_cpumask(current
->mm
));
658 current
->active_mm
= current
->mm
;
660 EXPORT_SYMBOL_GPL(kvm_mips_resume_mm
);