2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Synthesize TLB refill handlers at runtime.
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
12 * ... and the days got worse and worse and now you see
13 * I've gone completly out of my mind.
15 * They're coming to take me a away haha
16 * they're coming to take me a away hoho hihi haha
17 * to the funny farm where code is beautiful all the time ...
19 * (Condolences to Napoleon XIV)
22 #include <linux/bug.h>
23 #include <linux/kernel.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/init.h>
28 #include <asm/mmu_context.h>
33 static inline int r45k_bvahwbug(void)
35 /* XXX: We should probe for the presence of this bug, but we don't. */
39 static inline int r4k_250MHZhwbug(void)
41 /* XXX: We should probe for the presence of this bug, but we don't. */
45 static inline int __maybe_unused
bcm1250_m3_war(void)
47 return BCM1250_M3_WAR
;
50 static inline int __maybe_unused
r10000_llsc_war(void)
52 return R10000_LLSC_WAR
;
56 * Found by experiment: At least some revisions of the 4kc throw under
57 * some circumstances a machine check exception, triggered by invalid
58 * values in the index register. Delaying the tlbp instruction until
59 * after the next branch, plus adding an additional nop in front of
60 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
61 * why; it's not an issue caused by the core RTL.
64 static int __cpuinit
m4kc_tlbp_war(void)
66 return (current_cpu_data
.processor_id
& 0xffff00) ==
67 (PRID_COMP_MIPS
| PRID_IMP_4KC
);
70 /* Handle labels (which must be positive integers). */
72 label_second_part
= 1,
84 label_smp_pgtable_change
,
85 label_r3000_write_probe_fail
,
88 UASM_L_LA(_second_part
)
91 UASM_L_LA(_module_alloc
)
94 UASM_L_LA(_vmalloc_done
)
95 UASM_L_LA(_tlbw_hazard
)
97 UASM_L_LA(_nopage_tlbl
)
98 UASM_L_LA(_nopage_tlbs
)
99 UASM_L_LA(_nopage_tlbm
)
100 UASM_L_LA(_smp_pgtable_change
)
101 UASM_L_LA(_r3000_write_probe_fail
)
104 * For debug purposes.
106 static inline void dump_handler(const u32
*handler
, int count
)
110 pr_debug("\t.set push\n");
111 pr_debug("\t.set noreorder\n");
113 for (i
= 0; i
< count
; i
++)
114 pr_debug("\t%p\t.word 0x%08x\n", &handler
[i
], handler
[i
]);
116 pr_debug("\t.set pop\n");
119 /* The only general purpose registers allowed in TLB handlers. */
123 /* Some CP0 registers */
124 #define C0_INDEX 0, 0
125 #define C0_ENTRYLO0 2, 0
126 #define C0_TCBIND 2, 2
127 #define C0_ENTRYLO1 3, 0
128 #define C0_CONTEXT 4, 0
129 #define C0_BADVADDR 8, 0
130 #define C0_ENTRYHI 10, 0
132 #define C0_XCONTEXT 20, 0
135 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
137 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
140 /* The worst case length of the handler is around 18 instructions for
141 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
142 * Maximum space available is 32 instructions for R3000 and 64
143 * instructions for R4000.
145 * We deliberately chose a buffer size of 128, so we won't scribble
146 * over anything important on overflow before we panic.
148 static u32 tlb_handler
[128] __cpuinitdata
;
150 /* simply assume worst case size for labels and relocs */
151 static struct uasm_label labels
[128] __cpuinitdata
;
152 static struct uasm_reloc relocs
[128] __cpuinitdata
;
155 * The R3000 TLB handler is simple.
157 static void __cpuinit
build_r3000_tlb_refill_handler(void)
159 long pgdc
= (long)pgd_current
;
162 memset(tlb_handler
, 0, sizeof(tlb_handler
));
165 uasm_i_mfc0(&p
, K0
, C0_BADVADDR
);
166 uasm_i_lui(&p
, K1
, uasm_rel_hi(pgdc
)); /* cp0 delay */
167 uasm_i_lw(&p
, K1
, uasm_rel_lo(pgdc
), K1
);
168 uasm_i_srl(&p
, K0
, K0
, 22); /* load delay */
169 uasm_i_sll(&p
, K0
, K0
, 2);
170 uasm_i_addu(&p
, K1
, K1
, K0
);
171 uasm_i_mfc0(&p
, K0
, C0_CONTEXT
);
172 uasm_i_lw(&p
, K1
, 0, K1
); /* cp0 delay */
173 uasm_i_andi(&p
, K0
, K0
, 0xffc); /* load delay */
174 uasm_i_addu(&p
, K1
, K1
, K0
);
175 uasm_i_lw(&p
, K0
, 0, K1
);
176 uasm_i_nop(&p
); /* load delay */
177 uasm_i_mtc0(&p
, K0
, C0_ENTRYLO0
);
178 uasm_i_mfc0(&p
, K1
, C0_EPC
); /* cp0 delay */
179 uasm_i_tlbwr(&p
); /* cp0 delay */
181 uasm_i_rfe(&p
); /* branch delay */
183 if (p
> tlb_handler
+ 32)
184 panic("TLB refill handler space exceeded");
186 pr_debug("Wrote TLB refill handler (%u instructions).\n",
187 (unsigned int)(p
- tlb_handler
));
189 memcpy((void *)ebase
, tlb_handler
, 0x80);
191 dump_handler((u32
*)ebase
, 32);
195 * The R4000 TLB handler is much more complicated. We have two
196 * consecutive handler areas with 32 instructions space each.
197 * Since they aren't used at the same time, we can overflow in the
198 * other one.To keep things simple, we first assume linear space,
199 * then we relocate it to the final handler layout as needed.
201 static u32 final_handler
[64] __cpuinitdata
;
206 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
207 * 2. A timing hazard exists for the TLBP instruction.
209 * stalling_instruction
212 * The JTLB is being read for the TLBP throughout the stall generated by the
213 * previous instruction. This is not really correct as the stalling instruction
214 * can modify the address used to access the JTLB. The failure symptom is that
215 * the TLBP instruction will use an address created for the stalling instruction
216 * and not the address held in C0_ENHI and thus report the wrong results.
218 * The software work-around is to not allow the instruction preceding the TLBP
219 * to stall - make it an NOP or some other instruction guaranteed not to stall.
221 * Errata 2 will not be fixed. This errata is also on the R5000.
223 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
225 static void __cpuinit __maybe_unused
build_tlb_probe_entry(u32
**p
)
227 switch (current_cpu_type()) {
228 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
245 * Write random or indexed TLB entry, and care about the hazards from
246 * the preceeding mtc0 and for the following eret.
248 enum tlb_write_entry
{ tlb_random
, tlb_indexed
};
250 static void __cpuinit
build_tlb_write_entry(u32
**p
, struct uasm_label
**l
,
251 struct uasm_reloc
**r
,
252 enum tlb_write_entry wmode
)
254 void(*tlbw
)(u32
**) = NULL
;
257 case tlb_random
: tlbw
= uasm_i_tlbwr
; break;
258 case tlb_indexed
: tlbw
= uasm_i_tlbwi
; break;
261 if (cpu_has_mips_r2
) {
262 if (cpu_has_mips_r2_exec_hazard
)
268 switch (current_cpu_type()) {
276 * This branch uses up a mtc0 hazard nop slot and saves
277 * two nops after the tlbw instruction.
279 uasm_il_bgezl(p
, r
, 0, label_tlbw_hazard
);
281 uasm_l_tlbw_hazard(l
, *p
);
315 case CPU_CAVIUM_OCTEON
:
324 uasm_i_nop(p
); /* QED specifies 2 nops hazard */
326 * This branch uses up a mtc0 hazard nop slot and saves
327 * a nop after the tlbw instruction.
329 uasm_il_bgezl(p
, r
, 0, label_tlbw_hazard
);
331 uasm_l_tlbw_hazard(l
, *p
);
344 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
345 * use of the JTLB for instructions should not occur for 4
346 * cpu cycles and use for data translations should not occur
381 panic("No TLB refill handler yet (CPU type: %d)",
382 current_cpu_data
.cputype
);
389 * TMP and PTR are scratch.
390 * TMP will be clobbered, PTR will hold the pmd entry.
392 static void __cpuinit
393 build_get_pmde64(u32
**p
, struct uasm_label
**l
, struct uasm_reloc
**r
,
394 unsigned int tmp
, unsigned int ptr
)
396 long pgdc
= (long)pgd_current
;
399 * The vmalloc handling is not in the hotpath.
401 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
);
403 uasm_il_bltz(p
, r
, tmp
, label_module_alloc
);
405 uasm_il_bltz(p
, r
, tmp
, label_vmalloc
);
407 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
410 # ifdef CONFIG_MIPS_MT_SMTC
412 * SMTC uses TCBind value as "CPU" index
414 uasm_i_mfc0(p
, ptr
, C0_TCBIND
);
415 uasm_i_dsrl(p
, ptr
, ptr
, 19);
418 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
421 uasm_i_dmfc0(p
, ptr
, C0_CONTEXT
);
422 uasm_i_dsrl(p
, ptr
, ptr
, 23);
424 UASM_i_LA_mostly(p
, tmp
, pgdc
);
425 uasm_i_daddu(p
, ptr
, ptr
, tmp
);
426 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
);
427 uasm_i_ld(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
429 UASM_i_LA_mostly(p
, ptr
, pgdc
);
430 uasm_i_ld(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
433 uasm_l_vmalloc_done(l
, *p
);
435 if (PGDIR_SHIFT
- 3 < 32) /* get pgd offset in bytes */
436 uasm_i_dsrl(p
, tmp
, tmp
, PGDIR_SHIFT
-3);
438 uasm_i_dsrl32(p
, tmp
, tmp
, PGDIR_SHIFT
- 3 - 32);
440 uasm_i_andi(p
, tmp
, tmp
, (PTRS_PER_PGD
- 1)<<3);
441 uasm_i_daddu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
442 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
443 uasm_i_ld(p
, ptr
, 0, ptr
); /* get pmd pointer */
444 uasm_i_dsrl(p
, tmp
, tmp
, PMD_SHIFT
-3); /* get pmd offset in bytes */
445 uasm_i_andi(p
, tmp
, tmp
, (PTRS_PER_PMD
- 1)<<3);
446 uasm_i_daddu(p
, ptr
, ptr
, tmp
); /* add in pmd offset */
450 * BVADDR is the faulting address, PTR is scratch.
451 * PTR will hold the pgd for vmalloc.
453 static void __cpuinit
454 build_get_pgd_vmalloc64(u32
**p
, struct uasm_label
**l
, struct uasm_reloc
**r
,
455 unsigned int bvaddr
, unsigned int ptr
)
457 long swpd
= (long)swapper_pg_dir
;
460 long modd
= (long)module_pg_dir
;
462 uasm_l_module_alloc(l
, *p
);
465 * VMALLOC_START >= 0xc000000000000000UL
466 * MODULE_START >= 0xe000000000000000UL
468 UASM_i_SLL(p
, ptr
, bvaddr
, 2);
469 uasm_il_bgez(p
, r
, ptr
, label_vmalloc
);
471 if (uasm_in_compat_space_p(MODULE_START
) &&
472 !uasm_rel_lo(MODULE_START
)) {
473 uasm_i_lui(p
, ptr
, uasm_rel_hi(MODULE_START
)); /* delay slot */
475 /* unlikely configuration */
476 uasm_i_nop(p
); /* delay slot */
477 UASM_i_LA(p
, ptr
, MODULE_START
);
479 uasm_i_dsubu(p
, bvaddr
, bvaddr
, ptr
);
481 if (uasm_in_compat_space_p(modd
) && !uasm_rel_lo(modd
)) {
482 uasm_il_b(p
, r
, label_vmalloc_done
);
483 uasm_i_lui(p
, ptr
, uasm_rel_hi(modd
));
485 UASM_i_LA_mostly(p
, ptr
, modd
);
486 uasm_il_b(p
, r
, label_vmalloc_done
);
487 if (uasm_in_compat_space_p(modd
))
488 uasm_i_addiu(p
, ptr
, ptr
, uasm_rel_lo(modd
));
490 uasm_i_daddiu(p
, ptr
, ptr
, uasm_rel_lo(modd
));
493 uasm_l_vmalloc(l
, *p
);
494 if (uasm_in_compat_space_p(MODULE_START
) &&
495 !uasm_rel_lo(MODULE_START
) &&
496 MODULE_START
<< 32 == VMALLOC_START
)
497 uasm_i_dsll32(p
, ptr
, ptr
, 0); /* typical case */
499 UASM_i_LA(p
, ptr
, VMALLOC_START
);
501 uasm_l_vmalloc(l
, *p
);
502 UASM_i_LA(p
, ptr
, VMALLOC_START
);
504 uasm_i_dsubu(p
, bvaddr
, bvaddr
, ptr
);
506 if (uasm_in_compat_space_p(swpd
) && !uasm_rel_lo(swpd
)) {
507 uasm_il_b(p
, r
, label_vmalloc_done
);
508 uasm_i_lui(p
, ptr
, uasm_rel_hi(swpd
));
510 UASM_i_LA_mostly(p
, ptr
, swpd
);
511 uasm_il_b(p
, r
, label_vmalloc_done
);
512 if (uasm_in_compat_space_p(swpd
))
513 uasm_i_addiu(p
, ptr
, ptr
, uasm_rel_lo(swpd
));
515 uasm_i_daddiu(p
, ptr
, ptr
, uasm_rel_lo(swpd
));
519 #else /* !CONFIG_64BIT */
522 * TMP and PTR are scratch.
523 * TMP will be clobbered, PTR will hold the pgd entry.
525 static void __cpuinit __maybe_unused
526 build_get_pgde32(u32
**p
, unsigned int tmp
, unsigned int ptr
)
528 long pgdc
= (long)pgd_current
;
530 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
532 #ifdef CONFIG_MIPS_MT_SMTC
534 * SMTC uses TCBind value as "CPU" index
536 uasm_i_mfc0(p
, ptr
, C0_TCBIND
);
537 UASM_i_LA_mostly(p
, tmp
, pgdc
);
538 uasm_i_srl(p
, ptr
, ptr
, 19);
541 * smp_processor_id() << 3 is stored in CONTEXT.
543 uasm_i_mfc0(p
, ptr
, C0_CONTEXT
);
544 UASM_i_LA_mostly(p
, tmp
, pgdc
);
545 uasm_i_srl(p
, ptr
, ptr
, 23);
547 uasm_i_addu(p
, ptr
, tmp
, ptr
);
549 UASM_i_LA_mostly(p
, ptr
, pgdc
);
551 uasm_i_mfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
552 uasm_i_lw(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
553 uasm_i_srl(p
, tmp
, tmp
, PGDIR_SHIFT
); /* get pgd only bits */
554 uasm_i_sll(p
, tmp
, tmp
, PGD_T_LOG2
);
555 uasm_i_addu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
558 #endif /* !CONFIG_64BIT */
560 static void __cpuinit
build_adjust_context(u32
**p
, unsigned int ctx
)
562 unsigned int shift
= 4 - (PTE_T_LOG2
+ 1) + PAGE_SHIFT
- 12;
563 unsigned int mask
= (PTRS_PER_PTE
/ 2 - 1) << (PTE_T_LOG2
+ 1);
565 switch (current_cpu_type()) {
582 UASM_i_SRL(p
, ctx
, ctx
, shift
);
583 uasm_i_andi(p
, ctx
, ctx
, mask
);
586 static void __cpuinit
build_get_ptep(u32
**p
, unsigned int tmp
, unsigned int ptr
)
589 * Bug workaround for the Nevada. It seems as if under certain
590 * circumstances the move from cp0_context might produce a
591 * bogus result when the mfc0 instruction and its consumer are
592 * in a different cacheline or a load instruction, probably any
593 * memory reference, is between them.
595 switch (current_cpu_type()) {
597 UASM_i_LW(p
, ptr
, 0, ptr
);
598 GET_CONTEXT(p
, tmp
); /* get context reg */
602 GET_CONTEXT(p
, tmp
); /* get context reg */
603 UASM_i_LW(p
, ptr
, 0, ptr
);
607 build_adjust_context(p
, tmp
);
608 UASM_i_ADDU(p
, ptr
, ptr
, tmp
); /* add in offset */
611 static void __cpuinit
build_update_entries(u32
**p
, unsigned int tmp
,
615 * 64bit address support (36bit on a 32bit CPU) in a 32bit
616 * Kernel is a special case. Only a few CPUs use it.
618 #ifdef CONFIG_64BIT_PHYS_ADDR
619 if (cpu_has_64bits
) {
620 uasm_i_ld(p
, tmp
, 0, ptep
); /* get even pte */
621 uasm_i_ld(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
622 uasm_i_dsrl(p
, tmp
, tmp
, 6); /* convert to entrylo0 */
623 uasm_i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
624 uasm_i_dsrl(p
, ptep
, ptep
, 6); /* convert to entrylo1 */
625 uasm_i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
627 int pte_off_even
= sizeof(pte_t
) / 2;
628 int pte_off_odd
= pte_off_even
+ sizeof(pte_t
);
630 /* The pte entries are pre-shifted */
631 uasm_i_lw(p
, tmp
, pte_off_even
, ptep
); /* get even pte */
632 uasm_i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
633 uasm_i_lw(p
, ptep
, pte_off_odd
, ptep
); /* get odd pte */
634 uasm_i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
637 UASM_i_LW(p
, tmp
, 0, ptep
); /* get even pte */
638 UASM_i_LW(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
640 build_tlb_probe_entry(p
);
641 UASM_i_SRL(p
, tmp
, tmp
, 6); /* convert to entrylo0 */
642 if (r4k_250MHZhwbug())
643 uasm_i_mtc0(p
, 0, C0_ENTRYLO0
);
644 uasm_i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
645 UASM_i_SRL(p
, ptep
, ptep
, 6); /* convert to entrylo1 */
647 uasm_i_mfc0(p
, tmp
, C0_INDEX
);
648 if (r4k_250MHZhwbug())
649 uasm_i_mtc0(p
, 0, C0_ENTRYLO1
);
650 uasm_i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
655 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
656 * because EXL == 0. If we wrap, we can also use the 32 instruction
657 * slots before the XTLB refill exception handler which belong to the
658 * unused TLB refill exception.
660 #define MIPS64_REFILL_INSNS 32
662 static void __cpuinit
build_r4000_tlb_refill_handler(void)
664 u32
*p
= tlb_handler
;
665 struct uasm_label
*l
= labels
;
666 struct uasm_reloc
*r
= relocs
;
668 unsigned int final_len
;
670 memset(tlb_handler
, 0, sizeof(tlb_handler
));
671 memset(labels
, 0, sizeof(labels
));
672 memset(relocs
, 0, sizeof(relocs
));
673 memset(final_handler
, 0, sizeof(final_handler
));
676 * create the plain linear handler
678 if (bcm1250_m3_war()) {
679 UASM_i_MFC0(&p
, K0
, C0_BADVADDR
);
680 UASM_i_MFC0(&p
, K1
, C0_ENTRYHI
);
681 uasm_i_xor(&p
, K0
, K0
, K1
);
682 UASM_i_SRL(&p
, K0
, K0
, PAGE_SHIFT
+ 1);
683 uasm_il_bnez(&p
, &r
, K0
, label_leave
);
684 /* No need for uasm_i_nop */
688 build_get_pmde64(&p
, &l
, &r
, K0
, K1
); /* get pmd in K1 */
690 build_get_pgde32(&p
, K0
, K1
); /* get pgd in K1 */
693 build_get_ptep(&p
, K0
, K1
);
694 build_update_entries(&p
, K0
, K1
);
695 build_tlb_write_entry(&p
, &l
, &r
, tlb_random
);
697 uasm_i_eret(&p
); /* return from trap */
700 build_get_pgd_vmalloc64(&p
, &l
, &r
, K0
, K1
);
704 * Overflow check: For the 64bit handler, we need at least one
705 * free instruction slot for the wrap-around branch. In worst
706 * case, if the intended insertion point is a delay slot, we
707 * need three, with the second nop'ed and the third being
710 /* Loongson2 ebase is different than r4k, we have more space */
711 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
712 if ((p
- tlb_handler
) > 64)
713 panic("TLB refill handler space exceeded");
715 if (((p
- tlb_handler
) > (MIPS64_REFILL_INSNS
* 2) - 1)
716 || (((p
- tlb_handler
) > (MIPS64_REFILL_INSNS
* 2) - 3)
717 && uasm_insn_has_bdelay(relocs
,
718 tlb_handler
+ MIPS64_REFILL_INSNS
- 3)))
719 panic("TLB refill handler space exceeded");
723 * Now fold the handler in the TLB refill handler space.
725 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
727 /* Simplest case, just copy the handler. */
728 uasm_copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
729 final_len
= p
- tlb_handler
;
730 #else /* CONFIG_64BIT */
731 f
= final_handler
+ MIPS64_REFILL_INSNS
;
732 if ((p
- tlb_handler
) <= MIPS64_REFILL_INSNS
) {
733 /* Just copy the handler. */
734 uasm_copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
735 final_len
= p
- tlb_handler
;
738 const enum label_id ls
= label_module_alloc
;
740 const enum label_id ls
= label_vmalloc
;
746 for (i
= 0; i
< ARRAY_SIZE(labels
) && labels
[i
].lab
!= ls
; i
++)
748 BUG_ON(i
== ARRAY_SIZE(labels
));
749 split
= labels
[i
].addr
;
752 * See if we have overflown one way or the other.
754 if (split
> tlb_handler
+ MIPS64_REFILL_INSNS
||
755 split
< p
- MIPS64_REFILL_INSNS
)
760 * Split two instructions before the end. One
761 * for the branch and one for the instruction
764 split
= tlb_handler
+ MIPS64_REFILL_INSNS
- 2;
767 * If the branch would fall in a delay slot,
768 * we must back up an additional instruction
769 * so that it is no longer in a delay slot.
771 if (uasm_insn_has_bdelay(relocs
, split
- 1))
774 /* Copy first part of the handler. */
775 uasm_copy_handler(relocs
, labels
, tlb_handler
, split
, f
);
776 f
+= split
- tlb_handler
;
780 uasm_l_split(&l
, final_handler
);
781 uasm_il_b(&f
, &r
, label_split
);
782 if (uasm_insn_has_bdelay(relocs
, split
))
785 uasm_copy_handler(relocs
, labels
,
786 split
, split
+ 1, f
);
787 uasm_move_labels(labels
, f
, f
+ 1, -1);
793 /* Copy the rest of the handler. */
794 uasm_copy_handler(relocs
, labels
, split
, p
, final_handler
);
795 final_len
= (f
- (final_handler
+ MIPS64_REFILL_INSNS
)) +
798 #endif /* CONFIG_64BIT */
800 uasm_resolve_relocs(relocs
, labels
);
801 pr_debug("Wrote TLB refill handler (%u instructions).\n",
804 memcpy((void *)ebase
, final_handler
, 0x100);
806 dump_handler((u32
*)ebase
, 64);
810 * TLB load/store/modify handlers.
812 * Only the fastpath gets synthesized at runtime, the slowpath for
813 * do_page_fault remains normal asm.
815 extern void tlb_do_page_fault_0(void);
816 extern void tlb_do_page_fault_1(void);
819 * 128 instructions for the fastpath handler is generous and should
822 #define FASTPATH_SIZE 128
824 u32 handle_tlbl
[FASTPATH_SIZE
] __cacheline_aligned
;
825 u32 handle_tlbs
[FASTPATH_SIZE
] __cacheline_aligned
;
826 u32 handle_tlbm
[FASTPATH_SIZE
] __cacheline_aligned
;
828 static void __cpuinit
829 iPTE_LW(u32
**p
, struct uasm_label
**l
, unsigned int pte
, unsigned int ptr
)
832 # ifdef CONFIG_64BIT_PHYS_ADDR
834 uasm_i_lld(p
, pte
, 0, ptr
);
837 UASM_i_LL(p
, pte
, 0, ptr
);
839 # ifdef CONFIG_64BIT_PHYS_ADDR
841 uasm_i_ld(p
, pte
, 0, ptr
);
844 UASM_i_LW(p
, pte
, 0, ptr
);
848 static void __cpuinit
849 iPTE_SW(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
, unsigned int ptr
,
852 #ifdef CONFIG_64BIT_PHYS_ADDR
853 unsigned int hwmode
= mode
& (_PAGE_VALID
| _PAGE_DIRTY
);
856 uasm_i_ori(p
, pte
, pte
, mode
);
858 # ifdef CONFIG_64BIT_PHYS_ADDR
860 uasm_i_scd(p
, pte
, 0, ptr
);
863 UASM_i_SC(p
, pte
, 0, ptr
);
865 if (r10000_llsc_war())
866 uasm_il_beqzl(p
, r
, pte
, label_smp_pgtable_change
);
868 uasm_il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
870 # ifdef CONFIG_64BIT_PHYS_ADDR
871 if (!cpu_has_64bits
) {
872 /* no uasm_i_nop needed */
873 uasm_i_ll(p
, pte
, sizeof(pte_t
) / 2, ptr
);
874 uasm_i_ori(p
, pte
, pte
, hwmode
);
875 uasm_i_sc(p
, pte
, sizeof(pte_t
) / 2, ptr
);
876 uasm_il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
877 /* no uasm_i_nop needed */
878 uasm_i_lw(p
, pte
, 0, ptr
);
885 # ifdef CONFIG_64BIT_PHYS_ADDR
887 uasm_i_sd(p
, pte
, 0, ptr
);
890 UASM_i_SW(p
, pte
, 0, ptr
);
892 # ifdef CONFIG_64BIT_PHYS_ADDR
893 if (!cpu_has_64bits
) {
894 uasm_i_lw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
895 uasm_i_ori(p
, pte
, pte
, hwmode
);
896 uasm_i_sw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
897 uasm_i_lw(p
, pte
, 0, ptr
);
904 * Check if PTE is present, if not then jump to LABEL. PTR points to
905 * the page table where this PTE is located, PTE will be re-loaded
906 * with it's original value.
908 static void __cpuinit
909 build_pte_present(u32
**p
, struct uasm_label
**l
, struct uasm_reloc
**r
,
910 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
912 uasm_i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
913 uasm_i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
914 uasm_il_bnez(p
, r
, pte
, lid
);
915 iPTE_LW(p
, l
, pte
, ptr
);
918 /* Make PTE valid, store result in PTR. */
919 static void __cpuinit
920 build_make_valid(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
,
923 unsigned int mode
= _PAGE_VALID
| _PAGE_ACCESSED
;
925 iPTE_SW(p
, r
, pte
, ptr
, mode
);
929 * Check if PTE can be written to, if not branch to LABEL. Regardless
930 * restore PTE with value from PTR when done.
932 static void __cpuinit
933 build_pte_writable(u32
**p
, struct uasm_label
**l
, struct uasm_reloc
**r
,
934 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
936 uasm_i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
937 uasm_i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
938 uasm_il_bnez(p
, r
, pte
, lid
);
939 iPTE_LW(p
, l
, pte
, ptr
);
942 /* Make PTE writable, update software status bits as well, then store
945 static void __cpuinit
946 build_make_write(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
,
949 unsigned int mode
= (_PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
952 iPTE_SW(p
, r
, pte
, ptr
, mode
);
956 * Check if PTE can be modified, if not branch to LABEL. Regardless
957 * restore PTE with value from PTR when done.
959 static void __cpuinit
960 build_pte_modifiable(u32
**p
, struct uasm_label
**l
, struct uasm_reloc
**r
,
961 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
963 uasm_i_andi(p
, pte
, pte
, _PAGE_WRITE
);
964 uasm_il_beqz(p
, r
, pte
, lid
);
965 iPTE_LW(p
, l
, pte
, ptr
);
969 * R3000 style TLB load/store/modify handlers.
973 * This places the pte into ENTRYLO0 and writes it with tlbwi.
976 static void __cpuinit
977 build_r3000_pte_reload_tlbwi(u32
**p
, unsigned int pte
, unsigned int tmp
)
979 uasm_i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
980 uasm_i_mfc0(p
, tmp
, C0_EPC
); /* cp0 delay */
983 uasm_i_rfe(p
); /* branch delay */
987 * This places the pte into ENTRYLO0 and writes it with tlbwi
988 * or tlbwr as appropriate. This is because the index register
989 * may have the probe fail bit set as a result of a trap on a
990 * kseg2 access, i.e. without refill. Then it returns.
992 static void __cpuinit
993 build_r3000_tlb_reload_write(u32
**p
, struct uasm_label
**l
,
994 struct uasm_reloc
**r
, unsigned int pte
,
997 uasm_i_mfc0(p
, tmp
, C0_INDEX
);
998 uasm_i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
999 uasm_il_bltz(p
, r
, tmp
, label_r3000_write_probe_fail
); /* cp0 delay */
1000 uasm_i_mfc0(p
, tmp
, C0_EPC
); /* branch delay */
1001 uasm_i_tlbwi(p
); /* cp0 delay */
1003 uasm_i_rfe(p
); /* branch delay */
1004 uasm_l_r3000_write_probe_fail(l
, *p
);
1005 uasm_i_tlbwr(p
); /* cp0 delay */
1007 uasm_i_rfe(p
); /* branch delay */
1010 static void __cpuinit
1011 build_r3000_tlbchange_handler_head(u32
**p
, unsigned int pte
,
1014 long pgdc
= (long)pgd_current
;
1016 uasm_i_mfc0(p
, pte
, C0_BADVADDR
);
1017 uasm_i_lui(p
, ptr
, uasm_rel_hi(pgdc
)); /* cp0 delay */
1018 uasm_i_lw(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
1019 uasm_i_srl(p
, pte
, pte
, 22); /* load delay */
1020 uasm_i_sll(p
, pte
, pte
, 2);
1021 uasm_i_addu(p
, ptr
, ptr
, pte
);
1022 uasm_i_mfc0(p
, pte
, C0_CONTEXT
);
1023 uasm_i_lw(p
, ptr
, 0, ptr
); /* cp0 delay */
1024 uasm_i_andi(p
, pte
, pte
, 0xffc); /* load delay */
1025 uasm_i_addu(p
, ptr
, ptr
, pte
);
1026 uasm_i_lw(p
, pte
, 0, ptr
);
1027 uasm_i_tlbp(p
); /* load delay */
1030 static void __cpuinit
build_r3000_tlb_load_handler(void)
1032 u32
*p
= handle_tlbl
;
1033 struct uasm_label
*l
= labels
;
1034 struct uasm_reloc
*r
= relocs
;
1036 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1037 memset(labels
, 0, sizeof(labels
));
1038 memset(relocs
, 0, sizeof(relocs
));
1040 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1041 build_pte_present(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbl
);
1042 uasm_i_nop(&p
); /* load delay */
1043 build_make_valid(&p
, &r
, K0
, K1
);
1044 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1046 uasm_l_nopage_tlbl(&l
, p
);
1047 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1050 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1051 panic("TLB load handler fastpath space exceeded");
1053 uasm_resolve_relocs(relocs
, labels
);
1054 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1055 (unsigned int)(p
- handle_tlbl
));
1057 dump_handler(handle_tlbl
, ARRAY_SIZE(handle_tlbl
));
1060 static void __cpuinit
build_r3000_tlb_store_handler(void)
1062 u32
*p
= handle_tlbs
;
1063 struct uasm_label
*l
= labels
;
1064 struct uasm_reloc
*r
= relocs
;
1066 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1067 memset(labels
, 0, sizeof(labels
));
1068 memset(relocs
, 0, sizeof(relocs
));
1070 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1071 build_pte_writable(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbs
);
1072 uasm_i_nop(&p
); /* load delay */
1073 build_make_write(&p
, &r
, K0
, K1
);
1074 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1076 uasm_l_nopage_tlbs(&l
, p
);
1077 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1080 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1081 panic("TLB store handler fastpath space exceeded");
1083 uasm_resolve_relocs(relocs
, labels
);
1084 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1085 (unsigned int)(p
- handle_tlbs
));
1087 dump_handler(handle_tlbs
, ARRAY_SIZE(handle_tlbs
));
1090 static void __cpuinit
build_r3000_tlb_modify_handler(void)
1092 u32
*p
= handle_tlbm
;
1093 struct uasm_label
*l
= labels
;
1094 struct uasm_reloc
*r
= relocs
;
1096 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1097 memset(labels
, 0, sizeof(labels
));
1098 memset(relocs
, 0, sizeof(relocs
));
1100 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1101 build_pte_modifiable(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbm
);
1102 uasm_i_nop(&p
); /* load delay */
1103 build_make_write(&p
, &r
, K0
, K1
);
1104 build_r3000_pte_reload_tlbwi(&p
, K0
, K1
);
1106 uasm_l_nopage_tlbm(&l
, p
);
1107 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1110 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1111 panic("TLB modify handler fastpath space exceeded");
1113 uasm_resolve_relocs(relocs
, labels
);
1114 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1115 (unsigned int)(p
- handle_tlbm
));
1117 dump_handler(handle_tlbm
, ARRAY_SIZE(handle_tlbm
));
1121 * R4000 style TLB load/store/modify handlers.
1123 static void __cpuinit
1124 build_r4000_tlbchange_handler_head(u32
**p
, struct uasm_label
**l
,
1125 struct uasm_reloc
**r
, unsigned int pte
,
1129 build_get_pmde64(p
, l
, r
, pte
, ptr
); /* get pmd in ptr */
1131 build_get_pgde32(p
, pte
, ptr
); /* get pgd in ptr */
1134 UASM_i_MFC0(p
, pte
, C0_BADVADDR
);
1135 UASM_i_LW(p
, ptr
, 0, ptr
);
1136 UASM_i_SRL(p
, pte
, pte
, PAGE_SHIFT
+ PTE_ORDER
- PTE_T_LOG2
);
1137 uasm_i_andi(p
, pte
, pte
, (PTRS_PER_PTE
- 1) << PTE_T_LOG2
);
1138 UASM_i_ADDU(p
, ptr
, ptr
, pte
);
1141 uasm_l_smp_pgtable_change(l
, *p
);
1143 iPTE_LW(p
, l
, pte
, ptr
); /* get even pte */
1144 if (!m4kc_tlbp_war())
1145 build_tlb_probe_entry(p
);
1148 static void __cpuinit
1149 build_r4000_tlbchange_handler_tail(u32
**p
, struct uasm_label
**l
,
1150 struct uasm_reloc
**r
, unsigned int tmp
,
1153 uasm_i_ori(p
, ptr
, ptr
, sizeof(pte_t
));
1154 uasm_i_xori(p
, ptr
, ptr
, sizeof(pte_t
));
1155 build_update_entries(p
, tmp
, ptr
);
1156 build_tlb_write_entry(p
, l
, r
, tlb_indexed
);
1157 uasm_l_leave(l
, *p
);
1158 uasm_i_eret(p
); /* return from trap */
1161 build_get_pgd_vmalloc64(p
, l
, r
, tmp
, ptr
);
1165 static void __cpuinit
build_r4000_tlb_load_handler(void)
1167 u32
*p
= handle_tlbl
;
1168 struct uasm_label
*l
= labels
;
1169 struct uasm_reloc
*r
= relocs
;
1171 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1172 memset(labels
, 0, sizeof(labels
));
1173 memset(relocs
, 0, sizeof(relocs
));
1175 if (bcm1250_m3_war()) {
1176 UASM_i_MFC0(&p
, K0
, C0_BADVADDR
);
1177 UASM_i_MFC0(&p
, K1
, C0_ENTRYHI
);
1178 uasm_i_xor(&p
, K0
, K0
, K1
);
1179 UASM_i_SRL(&p
, K0
, K0
, PAGE_SHIFT
+ 1);
1180 uasm_il_bnez(&p
, &r
, K0
, label_leave
);
1181 /* No need for uasm_i_nop */
1184 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1185 build_pte_present(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbl
);
1186 if (m4kc_tlbp_war())
1187 build_tlb_probe_entry(&p
);
1188 build_make_valid(&p
, &r
, K0
, K1
);
1189 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1191 uasm_l_nopage_tlbl(&l
, p
);
1192 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1195 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1196 panic("TLB load handler fastpath space exceeded");
1198 uasm_resolve_relocs(relocs
, labels
);
1199 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1200 (unsigned int)(p
- handle_tlbl
));
1202 dump_handler(handle_tlbl
, ARRAY_SIZE(handle_tlbl
));
1205 static void __cpuinit
build_r4000_tlb_store_handler(void)
1207 u32
*p
= handle_tlbs
;
1208 struct uasm_label
*l
= labels
;
1209 struct uasm_reloc
*r
= relocs
;
1211 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1212 memset(labels
, 0, sizeof(labels
));
1213 memset(relocs
, 0, sizeof(relocs
));
1215 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1216 build_pte_writable(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbs
);
1217 if (m4kc_tlbp_war())
1218 build_tlb_probe_entry(&p
);
1219 build_make_write(&p
, &r
, K0
, K1
);
1220 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1222 uasm_l_nopage_tlbs(&l
, p
);
1223 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1226 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1227 panic("TLB store handler fastpath space exceeded");
1229 uasm_resolve_relocs(relocs
, labels
);
1230 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1231 (unsigned int)(p
- handle_tlbs
));
1233 dump_handler(handle_tlbs
, ARRAY_SIZE(handle_tlbs
));
1236 static void __cpuinit
build_r4000_tlb_modify_handler(void)
1238 u32
*p
= handle_tlbm
;
1239 struct uasm_label
*l
= labels
;
1240 struct uasm_reloc
*r
= relocs
;
1242 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1243 memset(labels
, 0, sizeof(labels
));
1244 memset(relocs
, 0, sizeof(relocs
));
1246 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1247 build_pte_modifiable(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbm
);
1248 if (m4kc_tlbp_war())
1249 build_tlb_probe_entry(&p
);
1250 /* Present and writable bits set, set accessed and dirty bits. */
1251 build_make_write(&p
, &r
, K0
, K1
);
1252 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1254 uasm_l_nopage_tlbm(&l
, p
);
1255 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1258 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1259 panic("TLB modify handler fastpath space exceeded");
1261 uasm_resolve_relocs(relocs
, labels
);
1262 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1263 (unsigned int)(p
- handle_tlbm
));
1265 dump_handler(handle_tlbm
, ARRAY_SIZE(handle_tlbm
));
1268 void __cpuinit
build_tlb_refill_handler(void)
1271 * The refill handler is generated per-CPU, multi-node systems
1272 * may have local storage for it. The other handlers are only
1275 static int run_once
= 0;
1277 switch (current_cpu_type()) {
1285 build_r3000_tlb_refill_handler();
1287 build_r3000_tlb_load_handler();
1288 build_r3000_tlb_store_handler();
1289 build_r3000_tlb_modify_handler();
1296 panic("No R6000 TLB refill handler yet");
1300 panic("No R8000 TLB refill handler yet");
1304 build_r4000_tlb_refill_handler();
1306 build_r4000_tlb_load_handler();
1307 build_r4000_tlb_store_handler();
1308 build_r4000_tlb_modify_handler();
1314 void __cpuinit
flush_tlb_handlers(void)
1316 local_flush_icache_range((unsigned long)handle_tlbl
,
1317 (unsigned long)handle_tlbl
+ sizeof(handle_tlbl
));
1318 local_flush_icache_range((unsigned long)handle_tlbs
,
1319 (unsigned long)handle_tlbs
+ sizeof(handle_tlbs
));
1320 local_flush_icache_range((unsigned long)handle_tlbm
,
1321 (unsigned long)handle_tlbm
+ sizeof(handle_tlbm
));