2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Synthesize TLB refill handlers at runtime.
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
13 * ... and the days got worse and worse and now you see
14 * I've gone completly out of my mind.
16 * They're coming to take me a away haha
17 * they're coming to take me a away hoho hihi haha
18 * to the funny farm where code is beautiful all the time ...
20 * (Condolences to Napoleon XIV)
23 #include <linux/bug.h>
24 #include <linux/kernel.h>
25 #include <linux/types.h>
26 #include <linux/smp.h>
27 #include <linux/string.h>
28 #include <linux/init.h>
30 #include <asm/mmu_context.h>
35 static inline int r45k_bvahwbug(void)
37 /* XXX: We should probe for the presence of this bug, but we don't. */
41 static inline int r4k_250MHZhwbug(void)
43 /* XXX: We should probe for the presence of this bug, but we don't. */
47 static inline int __maybe_unused
bcm1250_m3_war(void)
49 return BCM1250_M3_WAR
;
52 static inline int __maybe_unused
r10000_llsc_war(void)
54 return R10000_LLSC_WAR
;
58 * Found by experiment: At least some revisions of the 4kc throw under
59 * some circumstances a machine check exception, triggered by invalid
60 * values in the index register. Delaying the tlbp instruction until
61 * after the next branch, plus adding an additional nop in front of
62 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
63 * why; it's not an issue caused by the core RTL.
66 static int __cpuinit
m4kc_tlbp_war(void)
68 return (current_cpu_data
.processor_id
& 0xffff00) ==
69 (PRID_COMP_MIPS
| PRID_IMP_4KC
);
72 /* Handle labels (which must be positive integers). */
74 label_second_part
= 1,
83 label_smp_pgtable_change
,
84 label_r3000_write_probe_fail
,
85 #ifdef CONFIG_HUGETLB_PAGE
86 label_tlb_huge_update
,
90 UASM_L_LA(_second_part
)
93 UASM_L_LA(_vmalloc_done
)
94 UASM_L_LA(_tlbw_hazard
)
96 UASM_L_LA(_nopage_tlbl
)
97 UASM_L_LA(_nopage_tlbs
)
98 UASM_L_LA(_nopage_tlbm
)
99 UASM_L_LA(_smp_pgtable_change
)
100 UASM_L_LA(_r3000_write_probe_fail
)
101 #ifdef CONFIG_HUGETLB_PAGE
102 UASM_L_LA(_tlb_huge_update
)
106 * For debug purposes.
108 static inline void dump_handler(const u32
*handler
, int count
)
112 pr_debug("\t.set push\n");
113 pr_debug("\t.set noreorder\n");
115 for (i
= 0; i
< count
; i
++)
116 pr_debug("\t%p\t.word 0x%08x\n", &handler
[i
], handler
[i
]);
118 pr_debug("\t.set pop\n");
121 /* The only general purpose registers allowed in TLB handlers. */
125 /* Some CP0 registers */
126 #define C0_INDEX 0, 0
127 #define C0_ENTRYLO0 2, 0
128 #define C0_TCBIND 2, 2
129 #define C0_ENTRYLO1 3, 0
130 #define C0_CONTEXT 4, 0
131 #define C0_PAGEMASK 5, 0
132 #define C0_BADVADDR 8, 0
133 #define C0_ENTRYHI 10, 0
135 #define C0_XCONTEXT 20, 0
138 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
140 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
143 /* The worst case length of the handler is around 18 instructions for
144 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
145 * Maximum space available is 32 instructions for R3000 and 64
146 * instructions for R4000.
148 * We deliberately chose a buffer size of 128, so we won't scribble
149 * over anything important on overflow before we panic.
151 static u32 tlb_handler
[128] __cpuinitdata
;
153 /* simply assume worst case size for labels and relocs */
154 static struct uasm_label labels
[128] __cpuinitdata
;
155 static struct uasm_reloc relocs
[128] __cpuinitdata
;
158 * The R3000 TLB handler is simple.
160 static void __cpuinit
build_r3000_tlb_refill_handler(void)
162 long pgdc
= (long)pgd_current
;
165 memset(tlb_handler
, 0, sizeof(tlb_handler
));
168 uasm_i_mfc0(&p
, K0
, C0_BADVADDR
);
169 uasm_i_lui(&p
, K1
, uasm_rel_hi(pgdc
)); /* cp0 delay */
170 uasm_i_lw(&p
, K1
, uasm_rel_lo(pgdc
), K1
);
171 uasm_i_srl(&p
, K0
, K0
, 22); /* load delay */
172 uasm_i_sll(&p
, K0
, K0
, 2);
173 uasm_i_addu(&p
, K1
, K1
, K0
);
174 uasm_i_mfc0(&p
, K0
, C0_CONTEXT
);
175 uasm_i_lw(&p
, K1
, 0, K1
); /* cp0 delay */
176 uasm_i_andi(&p
, K0
, K0
, 0xffc); /* load delay */
177 uasm_i_addu(&p
, K1
, K1
, K0
);
178 uasm_i_lw(&p
, K0
, 0, K1
);
179 uasm_i_nop(&p
); /* load delay */
180 uasm_i_mtc0(&p
, K0
, C0_ENTRYLO0
);
181 uasm_i_mfc0(&p
, K1
, C0_EPC
); /* cp0 delay */
182 uasm_i_tlbwr(&p
); /* cp0 delay */
184 uasm_i_rfe(&p
); /* branch delay */
186 if (p
> tlb_handler
+ 32)
187 panic("TLB refill handler space exceeded");
189 pr_debug("Wrote TLB refill handler (%u instructions).\n",
190 (unsigned int)(p
- tlb_handler
));
192 memcpy((void *)ebase
, tlb_handler
, 0x80);
194 dump_handler((u32
*)ebase
, 32);
198 * The R4000 TLB handler is much more complicated. We have two
199 * consecutive handler areas with 32 instructions space each.
200 * Since they aren't used at the same time, we can overflow in the
201 * other one.To keep things simple, we first assume linear space,
202 * then we relocate it to the final handler layout as needed.
204 static u32 final_handler
[64] __cpuinitdata
;
209 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
210 * 2. A timing hazard exists for the TLBP instruction.
212 * stalling_instruction
215 * The JTLB is being read for the TLBP throughout the stall generated by the
216 * previous instruction. This is not really correct as the stalling instruction
217 * can modify the address used to access the JTLB. The failure symptom is that
218 * the TLBP instruction will use an address created for the stalling instruction
219 * and not the address held in C0_ENHI and thus report the wrong results.
221 * The software work-around is to not allow the instruction preceding the TLBP
222 * to stall - make it an NOP or some other instruction guaranteed not to stall.
224 * Errata 2 will not be fixed. This errata is also on the R5000.
226 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
228 static void __cpuinit __maybe_unused
build_tlb_probe_entry(u32
**p
)
230 switch (current_cpu_type()) {
231 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
248 * Write random or indexed TLB entry, and care about the hazards from
249 * the preceeding mtc0 and for the following eret.
251 enum tlb_write_entry
{ tlb_random
, tlb_indexed
};
253 static void __cpuinit
build_tlb_write_entry(u32
**p
, struct uasm_label
**l
,
254 struct uasm_reloc
**r
,
255 enum tlb_write_entry wmode
)
257 void(*tlbw
)(u32
**) = NULL
;
260 case tlb_random
: tlbw
= uasm_i_tlbwr
; break;
261 case tlb_indexed
: tlbw
= uasm_i_tlbwi
; break;
264 if (cpu_has_mips_r2
) {
265 if (cpu_has_mips_r2_exec_hazard
)
271 switch (current_cpu_type()) {
279 * This branch uses up a mtc0 hazard nop slot and saves
280 * two nops after the tlbw instruction.
282 uasm_il_bgezl(p
, r
, 0, label_tlbw_hazard
);
284 uasm_l_tlbw_hazard(l
, *p
);
330 uasm_i_nop(p
); /* QED specifies 2 nops hazard */
332 * This branch uses up a mtc0 hazard nop slot and saves
333 * a nop after the tlbw instruction.
335 uasm_il_bgezl(p
, r
, 0, label_tlbw_hazard
);
337 uasm_l_tlbw_hazard(l
, *p
);
350 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
351 * use of the JTLB for instructions should not occur for 4
352 * cpu cycles and use for data translations should not occur
387 panic("No TLB refill handler yet (CPU type: %d)",
388 current_cpu_data
.cputype
);
393 #ifdef CONFIG_HUGETLB_PAGE
394 static __cpuinit
void build_huge_tlb_write_entry(u32
**p
,
395 struct uasm_label
**l
,
396 struct uasm_reloc
**r
,
398 enum tlb_write_entry wmode
)
400 /* Set huge page tlb entry size */
401 uasm_i_lui(p
, tmp
, PM_HUGE_MASK
>> 16);
402 uasm_i_ori(p
, tmp
, tmp
, PM_HUGE_MASK
& 0xffff);
403 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
405 build_tlb_write_entry(p
, l
, r
, wmode
);
407 /* Reset default page size */
408 if (PM_DEFAULT_MASK
>> 16) {
409 uasm_i_lui(p
, tmp
, PM_DEFAULT_MASK
>> 16);
410 uasm_i_ori(p
, tmp
, tmp
, PM_DEFAULT_MASK
& 0xffff);
411 uasm_il_b(p
, r
, label_leave
);
412 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
413 } else if (PM_DEFAULT_MASK
) {
414 uasm_i_ori(p
, tmp
, 0, PM_DEFAULT_MASK
);
415 uasm_il_b(p
, r
, label_leave
);
416 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
418 uasm_il_b(p
, r
, label_leave
);
419 uasm_i_mtc0(p
, 0, C0_PAGEMASK
);
424 * Check if Huge PTE is present, if so then jump to LABEL.
426 static void __cpuinit
427 build_is_huge_pte(u32
**p
, struct uasm_reloc
**r
, unsigned int tmp
,
428 unsigned int pmd
, int lid
)
430 UASM_i_LW(p
, tmp
, 0, pmd
);
431 uasm_i_andi(p
, tmp
, tmp
, _PAGE_HUGE
);
432 uasm_il_bnez(p
, r
, tmp
, lid
);
435 static __cpuinit
void build_huge_update_entries(u32
**p
,
442 * A huge PTE describes an area the size of the
443 * configured huge page size. This is twice the
444 * of the large TLB entry size we intend to use.
445 * A TLB entry half the size of the configured
446 * huge page size is configured into entrylo0
447 * and entrylo1 to cover the contiguous huge PTE
450 small_sequence
= (HPAGE_SIZE
>> 7) < 0x10000;
452 /* We can clobber tmp. It isn't used after this.*/
454 uasm_i_lui(p
, tmp
, HPAGE_SIZE
>> (7 + 16));
456 UASM_i_SRL(p
, pte
, pte
, 6); /* convert to entrylo */
457 uasm_i_mtc0(p
, pte
, C0_ENTRYLO0
); /* load it */
458 /* convert to entrylo1 */
460 UASM_i_ADDIU(p
, pte
, pte
, HPAGE_SIZE
>> 7);
462 UASM_i_ADDU(p
, pte
, pte
, tmp
);
464 uasm_i_mtc0(p
, pte
, C0_ENTRYLO1
); /* load it */
467 static __cpuinit
void build_huge_handler_tail(u32
**p
,
468 struct uasm_reloc
**r
,
469 struct uasm_label
**l
,
474 UASM_i_SC(p
, pte
, 0, ptr
);
475 uasm_il_beqz(p
, r
, pte
, label_tlb_huge_update
);
476 UASM_i_LW(p
, pte
, 0, ptr
); /* Needed because SC killed our PTE */
478 UASM_i_SW(p
, pte
, 0, ptr
);
480 build_huge_update_entries(p
, pte
, ptr
);
481 build_huge_tlb_write_entry(p
, l
, r
, pte
, tlb_indexed
);
483 #endif /* CONFIG_HUGETLB_PAGE */
487 * TMP and PTR are scratch.
488 * TMP will be clobbered, PTR will hold the pmd entry.
490 static void __cpuinit
491 build_get_pmde64(u32
**p
, struct uasm_label
**l
, struct uasm_reloc
**r
,
492 unsigned int tmp
, unsigned int ptr
)
494 long pgdc
= (long)pgd_current
;
497 * The vmalloc handling is not in the hotpath.
499 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
);
500 uasm_il_bltz(p
, r
, tmp
, label_vmalloc
);
501 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
504 # ifdef CONFIG_MIPS_MT_SMTC
506 * SMTC uses TCBind value as "CPU" index
508 uasm_i_mfc0(p
, ptr
, C0_TCBIND
);
509 uasm_i_dsrl(p
, ptr
, ptr
, 19);
512 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
515 uasm_i_dmfc0(p
, ptr
, C0_CONTEXT
);
516 uasm_i_dsrl(p
, ptr
, ptr
, 23);
518 UASM_i_LA_mostly(p
, tmp
, pgdc
);
519 uasm_i_daddu(p
, ptr
, ptr
, tmp
);
520 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
);
521 uasm_i_ld(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
523 UASM_i_LA_mostly(p
, ptr
, pgdc
);
524 uasm_i_ld(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
527 uasm_l_vmalloc_done(l
, *p
);
529 if (PGDIR_SHIFT
- 3 < 32) /* get pgd offset in bytes */
530 uasm_i_dsrl(p
, tmp
, tmp
, PGDIR_SHIFT
-3);
532 uasm_i_dsrl32(p
, tmp
, tmp
, PGDIR_SHIFT
- 3 - 32);
534 uasm_i_andi(p
, tmp
, tmp
, (PTRS_PER_PGD
- 1)<<3);
535 uasm_i_daddu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
536 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
537 uasm_i_ld(p
, ptr
, 0, ptr
); /* get pmd pointer */
538 uasm_i_dsrl(p
, tmp
, tmp
, PMD_SHIFT
-3); /* get pmd offset in bytes */
539 uasm_i_andi(p
, tmp
, tmp
, (PTRS_PER_PMD
- 1)<<3);
540 uasm_i_daddu(p
, ptr
, ptr
, tmp
); /* add in pmd offset */
544 * BVADDR is the faulting address, PTR is scratch.
545 * PTR will hold the pgd for vmalloc.
547 static void __cpuinit
548 build_get_pgd_vmalloc64(u32
**p
, struct uasm_label
**l
, struct uasm_reloc
**r
,
549 unsigned int bvaddr
, unsigned int ptr
)
551 long swpd
= (long)swapper_pg_dir
;
553 uasm_l_vmalloc(l
, *p
);
555 if (uasm_in_compat_space_p(swpd
) && !uasm_rel_lo(swpd
)) {
556 uasm_il_b(p
, r
, label_vmalloc_done
);
557 uasm_i_lui(p
, ptr
, uasm_rel_hi(swpd
));
559 UASM_i_LA_mostly(p
, ptr
, swpd
);
560 uasm_il_b(p
, r
, label_vmalloc_done
);
561 if (uasm_in_compat_space_p(swpd
))
562 uasm_i_addiu(p
, ptr
, ptr
, uasm_rel_lo(swpd
));
564 uasm_i_daddiu(p
, ptr
, ptr
, uasm_rel_lo(swpd
));
568 #else /* !CONFIG_64BIT */
571 * TMP and PTR are scratch.
572 * TMP will be clobbered, PTR will hold the pgd entry.
574 static void __cpuinit __maybe_unused
575 build_get_pgde32(u32
**p
, unsigned int tmp
, unsigned int ptr
)
577 long pgdc
= (long)pgd_current
;
579 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
581 #ifdef CONFIG_MIPS_MT_SMTC
583 * SMTC uses TCBind value as "CPU" index
585 uasm_i_mfc0(p
, ptr
, C0_TCBIND
);
586 UASM_i_LA_mostly(p
, tmp
, pgdc
);
587 uasm_i_srl(p
, ptr
, ptr
, 19);
590 * smp_processor_id() << 3 is stored in CONTEXT.
592 uasm_i_mfc0(p
, ptr
, C0_CONTEXT
);
593 UASM_i_LA_mostly(p
, tmp
, pgdc
);
594 uasm_i_srl(p
, ptr
, ptr
, 23);
596 uasm_i_addu(p
, ptr
, tmp
, ptr
);
598 UASM_i_LA_mostly(p
, ptr
, pgdc
);
600 uasm_i_mfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
601 uasm_i_lw(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
602 uasm_i_srl(p
, tmp
, tmp
, PGDIR_SHIFT
); /* get pgd only bits */
603 uasm_i_sll(p
, tmp
, tmp
, PGD_T_LOG2
);
604 uasm_i_addu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
607 #endif /* !CONFIG_64BIT */
609 static void __cpuinit
build_adjust_context(u32
**p
, unsigned int ctx
)
611 unsigned int shift
= 4 - (PTE_T_LOG2
+ 1) + PAGE_SHIFT
- 12;
612 unsigned int mask
= (PTRS_PER_PTE
/ 2 - 1) << (PTE_T_LOG2
+ 1);
614 switch (current_cpu_type()) {
631 UASM_i_SRL(p
, ctx
, ctx
, shift
);
632 uasm_i_andi(p
, ctx
, ctx
, mask
);
635 static void __cpuinit
build_get_ptep(u32
**p
, unsigned int tmp
, unsigned int ptr
)
638 * Bug workaround for the Nevada. It seems as if under certain
639 * circumstances the move from cp0_context might produce a
640 * bogus result when the mfc0 instruction and its consumer are
641 * in a different cacheline or a load instruction, probably any
642 * memory reference, is between them.
644 switch (current_cpu_type()) {
646 UASM_i_LW(p
, ptr
, 0, ptr
);
647 GET_CONTEXT(p
, tmp
); /* get context reg */
651 GET_CONTEXT(p
, tmp
); /* get context reg */
652 UASM_i_LW(p
, ptr
, 0, ptr
);
656 build_adjust_context(p
, tmp
);
657 UASM_i_ADDU(p
, ptr
, ptr
, tmp
); /* add in offset */
660 static void __cpuinit
build_update_entries(u32
**p
, unsigned int tmp
,
664 * 64bit address support (36bit on a 32bit CPU) in a 32bit
665 * Kernel is a special case. Only a few CPUs use it.
667 #ifdef CONFIG_64BIT_PHYS_ADDR
668 if (cpu_has_64bits
) {
669 uasm_i_ld(p
, tmp
, 0, ptep
); /* get even pte */
670 uasm_i_ld(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
671 uasm_i_dsrl(p
, tmp
, tmp
, 6); /* convert to entrylo0 */
672 uasm_i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
673 uasm_i_dsrl(p
, ptep
, ptep
, 6); /* convert to entrylo1 */
674 uasm_i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
676 int pte_off_even
= sizeof(pte_t
) / 2;
677 int pte_off_odd
= pte_off_even
+ sizeof(pte_t
);
679 /* The pte entries are pre-shifted */
680 uasm_i_lw(p
, tmp
, pte_off_even
, ptep
); /* get even pte */
681 uasm_i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
682 uasm_i_lw(p
, ptep
, pte_off_odd
, ptep
); /* get odd pte */
683 uasm_i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
686 UASM_i_LW(p
, tmp
, 0, ptep
); /* get even pte */
687 UASM_i_LW(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
689 build_tlb_probe_entry(p
);
690 UASM_i_SRL(p
, tmp
, tmp
, 6); /* convert to entrylo0 */
691 if (r4k_250MHZhwbug())
692 uasm_i_mtc0(p
, 0, C0_ENTRYLO0
);
693 uasm_i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
694 UASM_i_SRL(p
, ptep
, ptep
, 6); /* convert to entrylo1 */
696 uasm_i_mfc0(p
, tmp
, C0_INDEX
);
697 if (r4k_250MHZhwbug())
698 uasm_i_mtc0(p
, 0, C0_ENTRYLO1
);
699 uasm_i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
704 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
705 * because EXL == 0. If we wrap, we can also use the 32 instruction
706 * slots before the XTLB refill exception handler which belong to the
707 * unused TLB refill exception.
709 #define MIPS64_REFILL_INSNS 32
711 static void __cpuinit
build_r4000_tlb_refill_handler(void)
713 u32
*p
= tlb_handler
;
714 struct uasm_label
*l
= labels
;
715 struct uasm_reloc
*r
= relocs
;
717 unsigned int final_len
;
719 memset(tlb_handler
, 0, sizeof(tlb_handler
));
720 memset(labels
, 0, sizeof(labels
));
721 memset(relocs
, 0, sizeof(relocs
));
722 memset(final_handler
, 0, sizeof(final_handler
));
725 * create the plain linear handler
727 if (bcm1250_m3_war()) {
728 UASM_i_MFC0(&p
, K0
, C0_BADVADDR
);
729 UASM_i_MFC0(&p
, K1
, C0_ENTRYHI
);
730 uasm_i_xor(&p
, K0
, K0
, K1
);
731 UASM_i_SRL(&p
, K0
, K0
, PAGE_SHIFT
+ 1);
732 uasm_il_bnez(&p
, &r
, K0
, label_leave
);
733 /* No need for uasm_i_nop */
737 build_get_pmde64(&p
, &l
, &r
, K0
, K1
); /* get pmd in K1 */
739 build_get_pgde32(&p
, K0
, K1
); /* get pgd in K1 */
742 #ifdef CONFIG_HUGETLB_PAGE
743 build_is_huge_pte(&p
, &r
, K0
, K1
, label_tlb_huge_update
);
746 build_get_ptep(&p
, K0
, K1
);
747 build_update_entries(&p
, K0
, K1
);
748 build_tlb_write_entry(&p
, &l
, &r
, tlb_random
);
750 uasm_i_eret(&p
); /* return from trap */
752 #ifdef CONFIG_HUGETLB_PAGE
753 uasm_l_tlb_huge_update(&l
, p
);
754 UASM_i_LW(&p
, K0
, 0, K1
);
755 build_huge_update_entries(&p
, K0
, K1
);
756 build_huge_tlb_write_entry(&p
, &l
, &r
, K0
, tlb_random
);
760 build_get_pgd_vmalloc64(&p
, &l
, &r
, K0
, K1
);
764 * Overflow check: For the 64bit handler, we need at least one
765 * free instruction slot for the wrap-around branch. In worst
766 * case, if the intended insertion point is a delay slot, we
767 * need three, with the second nop'ed and the third being
770 /* Loongson2 ebase is different than r4k, we have more space */
771 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
772 if ((p
- tlb_handler
) > 64)
773 panic("TLB refill handler space exceeded");
775 if (((p
- tlb_handler
) > (MIPS64_REFILL_INSNS
* 2) - 1)
776 || (((p
- tlb_handler
) > (MIPS64_REFILL_INSNS
* 2) - 3)
777 && uasm_insn_has_bdelay(relocs
,
778 tlb_handler
+ MIPS64_REFILL_INSNS
- 3)))
779 panic("TLB refill handler space exceeded");
783 * Now fold the handler in the TLB refill handler space.
785 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
787 /* Simplest case, just copy the handler. */
788 uasm_copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
789 final_len
= p
- tlb_handler
;
790 #else /* CONFIG_64BIT */
791 f
= final_handler
+ MIPS64_REFILL_INSNS
;
792 if ((p
- tlb_handler
) <= MIPS64_REFILL_INSNS
) {
793 /* Just copy the handler. */
794 uasm_copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
795 final_len
= p
- tlb_handler
;
797 #if defined(CONFIG_HUGETLB_PAGE)
798 const enum label_id ls
= label_tlb_huge_update
;
800 const enum label_id ls
= label_vmalloc
;
806 for (i
= 0; i
< ARRAY_SIZE(labels
) && labels
[i
].lab
!= ls
; i
++)
808 BUG_ON(i
== ARRAY_SIZE(labels
));
809 split
= labels
[i
].addr
;
812 * See if we have overflown one way or the other.
814 if (split
> tlb_handler
+ MIPS64_REFILL_INSNS
||
815 split
< p
- MIPS64_REFILL_INSNS
)
820 * Split two instructions before the end. One
821 * for the branch and one for the instruction
824 split
= tlb_handler
+ MIPS64_REFILL_INSNS
- 2;
827 * If the branch would fall in a delay slot,
828 * we must back up an additional instruction
829 * so that it is no longer in a delay slot.
831 if (uasm_insn_has_bdelay(relocs
, split
- 1))
834 /* Copy first part of the handler. */
835 uasm_copy_handler(relocs
, labels
, tlb_handler
, split
, f
);
836 f
+= split
- tlb_handler
;
840 uasm_l_split(&l
, final_handler
);
841 uasm_il_b(&f
, &r
, label_split
);
842 if (uasm_insn_has_bdelay(relocs
, split
))
845 uasm_copy_handler(relocs
, labels
,
846 split
, split
+ 1, f
);
847 uasm_move_labels(labels
, f
, f
+ 1, -1);
853 /* Copy the rest of the handler. */
854 uasm_copy_handler(relocs
, labels
, split
, p
, final_handler
);
855 final_len
= (f
- (final_handler
+ MIPS64_REFILL_INSNS
)) +
858 #endif /* CONFIG_64BIT */
860 uasm_resolve_relocs(relocs
, labels
);
861 pr_debug("Wrote TLB refill handler (%u instructions).\n",
864 memcpy((void *)ebase
, final_handler
, 0x100);
866 dump_handler((u32
*)ebase
, 64);
870 * TLB load/store/modify handlers.
872 * Only the fastpath gets synthesized at runtime, the slowpath for
873 * do_page_fault remains normal asm.
875 extern void tlb_do_page_fault_0(void);
876 extern void tlb_do_page_fault_1(void);
879 * 128 instructions for the fastpath handler is generous and should
882 #define FASTPATH_SIZE 128
884 u32 handle_tlbl
[FASTPATH_SIZE
] __cacheline_aligned
;
885 u32 handle_tlbs
[FASTPATH_SIZE
] __cacheline_aligned
;
886 u32 handle_tlbm
[FASTPATH_SIZE
] __cacheline_aligned
;
888 static void __cpuinit
889 iPTE_LW(u32
**p
, unsigned int pte
, unsigned int ptr
)
892 # ifdef CONFIG_64BIT_PHYS_ADDR
894 uasm_i_lld(p
, pte
, 0, ptr
);
897 UASM_i_LL(p
, pte
, 0, ptr
);
899 # ifdef CONFIG_64BIT_PHYS_ADDR
901 uasm_i_ld(p
, pte
, 0, ptr
);
904 UASM_i_LW(p
, pte
, 0, ptr
);
908 static void __cpuinit
909 iPTE_SW(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
, unsigned int ptr
,
912 #ifdef CONFIG_64BIT_PHYS_ADDR
913 unsigned int hwmode
= mode
& (_PAGE_VALID
| _PAGE_DIRTY
);
916 uasm_i_ori(p
, pte
, pte
, mode
);
918 # ifdef CONFIG_64BIT_PHYS_ADDR
920 uasm_i_scd(p
, pte
, 0, ptr
);
923 UASM_i_SC(p
, pte
, 0, ptr
);
925 if (r10000_llsc_war())
926 uasm_il_beqzl(p
, r
, pte
, label_smp_pgtable_change
);
928 uasm_il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
930 # ifdef CONFIG_64BIT_PHYS_ADDR
931 if (!cpu_has_64bits
) {
932 /* no uasm_i_nop needed */
933 uasm_i_ll(p
, pte
, sizeof(pte_t
) / 2, ptr
);
934 uasm_i_ori(p
, pte
, pte
, hwmode
);
935 uasm_i_sc(p
, pte
, sizeof(pte_t
) / 2, ptr
);
936 uasm_il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
937 /* no uasm_i_nop needed */
938 uasm_i_lw(p
, pte
, 0, ptr
);
945 # ifdef CONFIG_64BIT_PHYS_ADDR
947 uasm_i_sd(p
, pte
, 0, ptr
);
950 UASM_i_SW(p
, pte
, 0, ptr
);
952 # ifdef CONFIG_64BIT_PHYS_ADDR
953 if (!cpu_has_64bits
) {
954 uasm_i_lw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
955 uasm_i_ori(p
, pte
, pte
, hwmode
);
956 uasm_i_sw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
957 uasm_i_lw(p
, pte
, 0, ptr
);
964 * Check if PTE is present, if not then jump to LABEL. PTR points to
965 * the page table where this PTE is located, PTE will be re-loaded
966 * with it's original value.
968 static void __cpuinit
969 build_pte_present(u32
**p
, struct uasm_reloc
**r
,
970 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
972 uasm_i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
973 uasm_i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
974 uasm_il_bnez(p
, r
, pte
, lid
);
975 iPTE_LW(p
, pte
, ptr
);
978 /* Make PTE valid, store result in PTR. */
979 static void __cpuinit
980 build_make_valid(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
,
983 unsigned int mode
= _PAGE_VALID
| _PAGE_ACCESSED
;
985 iPTE_SW(p
, r
, pte
, ptr
, mode
);
989 * Check if PTE can be written to, if not branch to LABEL. Regardless
990 * restore PTE with value from PTR when done.
992 static void __cpuinit
993 build_pte_writable(u32
**p
, struct uasm_reloc
**r
,
994 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
996 uasm_i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
997 uasm_i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
998 uasm_il_bnez(p
, r
, pte
, lid
);
999 iPTE_LW(p
, pte
, ptr
);
1002 /* Make PTE writable, update software status bits as well, then store
1005 static void __cpuinit
1006 build_make_write(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
,
1009 unsigned int mode
= (_PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
1012 iPTE_SW(p
, r
, pte
, ptr
, mode
);
1016 * Check if PTE can be modified, if not branch to LABEL. Regardless
1017 * restore PTE with value from PTR when done.
1019 static void __cpuinit
1020 build_pte_modifiable(u32
**p
, struct uasm_reloc
**r
,
1021 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1023 uasm_i_andi(p
, pte
, pte
, _PAGE_WRITE
);
1024 uasm_il_beqz(p
, r
, pte
, lid
);
1025 iPTE_LW(p
, pte
, ptr
);
1029 * R3000 style TLB load/store/modify handlers.
1033 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1036 static void __cpuinit
1037 build_r3000_pte_reload_tlbwi(u32
**p
, unsigned int pte
, unsigned int tmp
)
1039 uasm_i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
1040 uasm_i_mfc0(p
, tmp
, C0_EPC
); /* cp0 delay */
1043 uasm_i_rfe(p
); /* branch delay */
1047 * This places the pte into ENTRYLO0 and writes it with tlbwi
1048 * or tlbwr as appropriate. This is because the index register
1049 * may have the probe fail bit set as a result of a trap on a
1050 * kseg2 access, i.e. without refill. Then it returns.
1052 static void __cpuinit
1053 build_r3000_tlb_reload_write(u32
**p
, struct uasm_label
**l
,
1054 struct uasm_reloc
**r
, unsigned int pte
,
1057 uasm_i_mfc0(p
, tmp
, C0_INDEX
);
1058 uasm_i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
1059 uasm_il_bltz(p
, r
, tmp
, label_r3000_write_probe_fail
); /* cp0 delay */
1060 uasm_i_mfc0(p
, tmp
, C0_EPC
); /* branch delay */
1061 uasm_i_tlbwi(p
); /* cp0 delay */
1063 uasm_i_rfe(p
); /* branch delay */
1064 uasm_l_r3000_write_probe_fail(l
, *p
);
1065 uasm_i_tlbwr(p
); /* cp0 delay */
1067 uasm_i_rfe(p
); /* branch delay */
1070 static void __cpuinit
1071 build_r3000_tlbchange_handler_head(u32
**p
, unsigned int pte
,
1074 long pgdc
= (long)pgd_current
;
1076 uasm_i_mfc0(p
, pte
, C0_BADVADDR
);
1077 uasm_i_lui(p
, ptr
, uasm_rel_hi(pgdc
)); /* cp0 delay */
1078 uasm_i_lw(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
1079 uasm_i_srl(p
, pte
, pte
, 22); /* load delay */
1080 uasm_i_sll(p
, pte
, pte
, 2);
1081 uasm_i_addu(p
, ptr
, ptr
, pte
);
1082 uasm_i_mfc0(p
, pte
, C0_CONTEXT
);
1083 uasm_i_lw(p
, ptr
, 0, ptr
); /* cp0 delay */
1084 uasm_i_andi(p
, pte
, pte
, 0xffc); /* load delay */
1085 uasm_i_addu(p
, ptr
, ptr
, pte
);
1086 uasm_i_lw(p
, pte
, 0, ptr
);
1087 uasm_i_tlbp(p
); /* load delay */
1090 static void __cpuinit
build_r3000_tlb_load_handler(void)
1092 u32
*p
= handle_tlbl
;
1093 struct uasm_label
*l
= labels
;
1094 struct uasm_reloc
*r
= relocs
;
1096 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1097 memset(labels
, 0, sizeof(labels
));
1098 memset(relocs
, 0, sizeof(relocs
));
1100 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1101 build_pte_present(&p
, &r
, K0
, K1
, label_nopage_tlbl
);
1102 uasm_i_nop(&p
); /* load delay */
1103 build_make_valid(&p
, &r
, K0
, K1
);
1104 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1106 uasm_l_nopage_tlbl(&l
, p
);
1107 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1110 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1111 panic("TLB load handler fastpath space exceeded");
1113 uasm_resolve_relocs(relocs
, labels
);
1114 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1115 (unsigned int)(p
- handle_tlbl
));
1117 dump_handler(handle_tlbl
, ARRAY_SIZE(handle_tlbl
));
1120 static void __cpuinit
build_r3000_tlb_store_handler(void)
1122 u32
*p
= handle_tlbs
;
1123 struct uasm_label
*l
= labels
;
1124 struct uasm_reloc
*r
= relocs
;
1126 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1127 memset(labels
, 0, sizeof(labels
));
1128 memset(relocs
, 0, sizeof(relocs
));
1130 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1131 build_pte_writable(&p
, &r
, K0
, K1
, label_nopage_tlbs
);
1132 uasm_i_nop(&p
); /* load delay */
1133 build_make_write(&p
, &r
, K0
, K1
);
1134 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1136 uasm_l_nopage_tlbs(&l
, p
);
1137 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1140 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1141 panic("TLB store handler fastpath space exceeded");
1143 uasm_resolve_relocs(relocs
, labels
);
1144 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1145 (unsigned int)(p
- handle_tlbs
));
1147 dump_handler(handle_tlbs
, ARRAY_SIZE(handle_tlbs
));
1150 static void __cpuinit
build_r3000_tlb_modify_handler(void)
1152 u32
*p
= handle_tlbm
;
1153 struct uasm_label
*l
= labels
;
1154 struct uasm_reloc
*r
= relocs
;
1156 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1157 memset(labels
, 0, sizeof(labels
));
1158 memset(relocs
, 0, sizeof(relocs
));
1160 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1161 build_pte_modifiable(&p
, &r
, K0
, K1
, label_nopage_tlbm
);
1162 uasm_i_nop(&p
); /* load delay */
1163 build_make_write(&p
, &r
, K0
, K1
);
1164 build_r3000_pte_reload_tlbwi(&p
, K0
, K1
);
1166 uasm_l_nopage_tlbm(&l
, p
);
1167 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1170 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1171 panic("TLB modify handler fastpath space exceeded");
1173 uasm_resolve_relocs(relocs
, labels
);
1174 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1175 (unsigned int)(p
- handle_tlbm
));
1177 dump_handler(handle_tlbm
, ARRAY_SIZE(handle_tlbm
));
1181 * R4000 style TLB load/store/modify handlers.
1183 static void __cpuinit
1184 build_r4000_tlbchange_handler_head(u32
**p
, struct uasm_label
**l
,
1185 struct uasm_reloc
**r
, unsigned int pte
,
1189 build_get_pmde64(p
, l
, r
, pte
, ptr
); /* get pmd in ptr */
1191 build_get_pgde32(p
, pte
, ptr
); /* get pgd in ptr */
1194 #ifdef CONFIG_HUGETLB_PAGE
1196 * For huge tlb entries, pmd doesn't contain an address but
1197 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1198 * see if we need to jump to huge tlb processing.
1200 build_is_huge_pte(p
, r
, pte
, ptr
, label_tlb_huge_update
);
1203 UASM_i_MFC0(p
, pte
, C0_BADVADDR
);
1204 UASM_i_LW(p
, ptr
, 0, ptr
);
1205 UASM_i_SRL(p
, pte
, pte
, PAGE_SHIFT
+ PTE_ORDER
- PTE_T_LOG2
);
1206 uasm_i_andi(p
, pte
, pte
, (PTRS_PER_PTE
- 1) << PTE_T_LOG2
);
1207 UASM_i_ADDU(p
, ptr
, ptr
, pte
);
1210 uasm_l_smp_pgtable_change(l
, *p
);
1212 iPTE_LW(p
, pte
, ptr
); /* get even pte */
1213 if (!m4kc_tlbp_war())
1214 build_tlb_probe_entry(p
);
1217 static void __cpuinit
1218 build_r4000_tlbchange_handler_tail(u32
**p
, struct uasm_label
**l
,
1219 struct uasm_reloc
**r
, unsigned int tmp
,
1222 uasm_i_ori(p
, ptr
, ptr
, sizeof(pte_t
));
1223 uasm_i_xori(p
, ptr
, ptr
, sizeof(pte_t
));
1224 build_update_entries(p
, tmp
, ptr
);
1225 build_tlb_write_entry(p
, l
, r
, tlb_indexed
);
1226 uasm_l_leave(l
, *p
);
1227 uasm_i_eret(p
); /* return from trap */
1230 build_get_pgd_vmalloc64(p
, l
, r
, tmp
, ptr
);
1234 static void __cpuinit
build_r4000_tlb_load_handler(void)
1236 u32
*p
= handle_tlbl
;
1237 struct uasm_label
*l
= labels
;
1238 struct uasm_reloc
*r
= relocs
;
1240 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1241 memset(labels
, 0, sizeof(labels
));
1242 memset(relocs
, 0, sizeof(relocs
));
1244 if (bcm1250_m3_war()) {
1245 UASM_i_MFC0(&p
, K0
, C0_BADVADDR
);
1246 UASM_i_MFC0(&p
, K1
, C0_ENTRYHI
);
1247 uasm_i_xor(&p
, K0
, K0
, K1
);
1248 UASM_i_SRL(&p
, K0
, K0
, PAGE_SHIFT
+ 1);
1249 uasm_il_bnez(&p
, &r
, K0
, label_leave
);
1250 /* No need for uasm_i_nop */
1253 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1254 build_pte_present(&p
, &r
, K0
, K1
, label_nopage_tlbl
);
1255 if (m4kc_tlbp_war())
1256 build_tlb_probe_entry(&p
);
1257 build_make_valid(&p
, &r
, K0
, K1
);
1258 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1260 #ifdef CONFIG_HUGETLB_PAGE
1262 * This is the entry point when build_r4000_tlbchange_handler_head
1263 * spots a huge page.
1265 uasm_l_tlb_huge_update(&l
, p
);
1266 iPTE_LW(&p
, K0
, K1
);
1267 build_pte_present(&p
, &r
, K0
, K1
, label_nopage_tlbl
);
1268 build_tlb_probe_entry(&p
);
1269 uasm_i_ori(&p
, K0
, K0
, (_PAGE_ACCESSED
| _PAGE_VALID
));
1270 build_huge_handler_tail(&p
, &r
, &l
, K0
, K1
);
1273 uasm_l_nopage_tlbl(&l
, p
);
1274 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1277 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1278 panic("TLB load handler fastpath space exceeded");
1280 uasm_resolve_relocs(relocs
, labels
);
1281 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1282 (unsigned int)(p
- handle_tlbl
));
1284 dump_handler(handle_tlbl
, ARRAY_SIZE(handle_tlbl
));
1287 static void __cpuinit
build_r4000_tlb_store_handler(void)
1289 u32
*p
= handle_tlbs
;
1290 struct uasm_label
*l
= labels
;
1291 struct uasm_reloc
*r
= relocs
;
1293 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1294 memset(labels
, 0, sizeof(labels
));
1295 memset(relocs
, 0, sizeof(relocs
));
1297 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1298 build_pte_writable(&p
, &r
, K0
, K1
, label_nopage_tlbs
);
1299 if (m4kc_tlbp_war())
1300 build_tlb_probe_entry(&p
);
1301 build_make_write(&p
, &r
, K0
, K1
);
1302 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1304 #ifdef CONFIG_HUGETLB_PAGE
1306 * This is the entry point when
1307 * build_r4000_tlbchange_handler_head spots a huge page.
1309 uasm_l_tlb_huge_update(&l
, p
);
1310 iPTE_LW(&p
, K0
, K1
);
1311 build_pte_writable(&p
, &r
, K0
, K1
, label_nopage_tlbs
);
1312 build_tlb_probe_entry(&p
);
1313 uasm_i_ori(&p
, K0
, K0
,
1314 _PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
| _PAGE_DIRTY
);
1315 build_huge_handler_tail(&p
, &r
, &l
, K0
, K1
);
1318 uasm_l_nopage_tlbs(&l
, p
);
1319 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1322 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1323 panic("TLB store handler fastpath space exceeded");
1325 uasm_resolve_relocs(relocs
, labels
);
1326 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1327 (unsigned int)(p
- handle_tlbs
));
1329 dump_handler(handle_tlbs
, ARRAY_SIZE(handle_tlbs
));
1332 static void __cpuinit
build_r4000_tlb_modify_handler(void)
1334 u32
*p
= handle_tlbm
;
1335 struct uasm_label
*l
= labels
;
1336 struct uasm_reloc
*r
= relocs
;
1338 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1339 memset(labels
, 0, sizeof(labels
));
1340 memset(relocs
, 0, sizeof(relocs
));
1342 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1343 build_pte_modifiable(&p
, &r
, K0
, K1
, label_nopage_tlbm
);
1344 if (m4kc_tlbp_war())
1345 build_tlb_probe_entry(&p
);
1346 /* Present and writable bits set, set accessed and dirty bits. */
1347 build_make_write(&p
, &r
, K0
, K1
);
1348 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1350 #ifdef CONFIG_HUGETLB_PAGE
1352 * This is the entry point when
1353 * build_r4000_tlbchange_handler_head spots a huge page.
1355 uasm_l_tlb_huge_update(&l
, p
);
1356 iPTE_LW(&p
, K0
, K1
);
1357 build_pte_modifiable(&p
, &r
, K0
, K1
, label_nopage_tlbm
);
1358 build_tlb_probe_entry(&p
);
1359 uasm_i_ori(&p
, K0
, K0
,
1360 _PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
| _PAGE_DIRTY
);
1361 build_huge_handler_tail(&p
, &r
, &l
, K0
, K1
);
1364 uasm_l_nopage_tlbm(&l
, p
);
1365 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1368 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1369 panic("TLB modify handler fastpath space exceeded");
1371 uasm_resolve_relocs(relocs
, labels
);
1372 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1373 (unsigned int)(p
- handle_tlbm
));
1375 dump_handler(handle_tlbm
, ARRAY_SIZE(handle_tlbm
));
1378 void __cpuinit
build_tlb_refill_handler(void)
1381 * The refill handler is generated per-CPU, multi-node systems
1382 * may have local storage for it. The other handlers are only
1385 static int run_once
= 0;
1387 switch (current_cpu_type()) {
1395 build_r3000_tlb_refill_handler();
1397 build_r3000_tlb_load_handler();
1398 build_r3000_tlb_store_handler();
1399 build_r3000_tlb_modify_handler();
1406 panic("No R6000 TLB refill handler yet");
1410 panic("No R8000 TLB refill handler yet");
1414 build_r4000_tlb_refill_handler();
1416 build_r4000_tlb_load_handler();
1417 build_r4000_tlb_store_handler();
1418 build_r4000_tlb_modify_handler();
1424 void __cpuinit
flush_tlb_handlers(void)
1426 local_flush_icache_range((unsigned long)handle_tlbl
,
1427 (unsigned long)handle_tlbl
+ sizeof(handle_tlbl
));
1428 local_flush_icache_range((unsigned long)handle_tlbs
,
1429 (unsigned long)handle_tlbs
+ sizeof(handle_tlbs
));
1430 local_flush_icache_range((unsigned long)handle_tlbm
,
1431 (unsigned long)handle_tlbm
+ sizeof(handle_tlbm
));