2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Synthesize TLB refill handlers at runtime.
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
13 * ... and the days got worse and worse and now you see
14 * I've gone completly out of my mind.
16 * They're coming to take me a away haha
17 * they're coming to take me a away hoho hihi haha
18 * to the funny farm where code is beautiful all the time ...
20 * (Condolences to Napoleon XIV)
23 #include <linux/bug.h>
24 #include <linux/kernel.h>
25 #include <linux/types.h>
26 #include <linux/smp.h>
27 #include <linux/string.h>
28 #include <linux/init.h>
30 #include <asm/mmu_context.h>
35 static inline int r45k_bvahwbug(void)
37 /* XXX: We should probe for the presence of this bug, but we don't. */
41 static inline int r4k_250MHZhwbug(void)
43 /* XXX: We should probe for the presence of this bug, but we don't. */
47 static inline int __maybe_unused
bcm1250_m3_war(void)
49 return BCM1250_M3_WAR
;
52 static inline int __maybe_unused
r10000_llsc_war(void)
54 return R10000_LLSC_WAR
;
58 * Found by experiment: At least some revisions of the 4kc throw under
59 * some circumstances a machine check exception, triggered by invalid
60 * values in the index register. Delaying the tlbp instruction until
61 * after the next branch, plus adding an additional nop in front of
62 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
63 * why; it's not an issue caused by the core RTL.
66 static int __cpuinit
m4kc_tlbp_war(void)
68 return (current_cpu_data
.processor_id
& 0xffff00) ==
69 (PRID_COMP_MIPS
| PRID_IMP_4KC
);
72 /* Handle labels (which must be positive integers). */
74 label_second_part
= 1,
83 label_smp_pgtable_change
,
84 label_r3000_write_probe_fail
,
85 #ifdef CONFIG_HUGETLB_PAGE
86 label_tlb_huge_update
,
90 UASM_L_LA(_second_part
)
93 UASM_L_LA(_vmalloc_done
)
94 UASM_L_LA(_tlbw_hazard
)
96 UASM_L_LA(_nopage_tlbl
)
97 UASM_L_LA(_nopage_tlbs
)
98 UASM_L_LA(_nopage_tlbm
)
99 UASM_L_LA(_smp_pgtable_change
)
100 UASM_L_LA(_r3000_write_probe_fail
)
101 #ifdef CONFIG_HUGETLB_PAGE
102 UASM_L_LA(_tlb_huge_update
)
106 * For debug purposes.
108 static inline void dump_handler(const u32
*handler
, int count
)
112 pr_debug("\t.set push\n");
113 pr_debug("\t.set noreorder\n");
115 for (i
= 0; i
< count
; i
++)
116 pr_debug("\t%p\t.word 0x%08x\n", &handler
[i
], handler
[i
]);
118 pr_debug("\t.set pop\n");
121 /* The only general purpose registers allowed in TLB handlers. */
125 /* Some CP0 registers */
126 #define C0_INDEX 0, 0
127 #define C0_ENTRYLO0 2, 0
128 #define C0_TCBIND 2, 2
129 #define C0_ENTRYLO1 3, 0
130 #define C0_CONTEXT 4, 0
131 #define C0_PAGEMASK 5, 0
132 #define C0_BADVADDR 8, 0
133 #define C0_ENTRYHI 10, 0
135 #define C0_XCONTEXT 20, 0
138 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
140 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
143 /* The worst case length of the handler is around 18 instructions for
144 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
145 * Maximum space available is 32 instructions for R3000 and 64
146 * instructions for R4000.
148 * We deliberately chose a buffer size of 128, so we won't scribble
149 * over anything important on overflow before we panic.
151 static u32 tlb_handler
[128] __cpuinitdata
;
153 /* simply assume worst case size for labels and relocs */
154 static struct uasm_label labels
[128] __cpuinitdata
;
155 static struct uasm_reloc relocs
[128] __cpuinitdata
;
157 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
159 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
160 * we cannot do r3000 under these circumstances.
164 * The R3000 TLB handler is simple.
166 static void __cpuinit
build_r3000_tlb_refill_handler(void)
168 long pgdc
= (long)pgd_current
;
171 memset(tlb_handler
, 0, sizeof(tlb_handler
));
174 uasm_i_mfc0(&p
, K0
, C0_BADVADDR
);
175 uasm_i_lui(&p
, K1
, uasm_rel_hi(pgdc
)); /* cp0 delay */
176 uasm_i_lw(&p
, K1
, uasm_rel_lo(pgdc
), K1
);
177 uasm_i_srl(&p
, K0
, K0
, 22); /* load delay */
178 uasm_i_sll(&p
, K0
, K0
, 2);
179 uasm_i_addu(&p
, K1
, K1
, K0
);
180 uasm_i_mfc0(&p
, K0
, C0_CONTEXT
);
181 uasm_i_lw(&p
, K1
, 0, K1
); /* cp0 delay */
182 uasm_i_andi(&p
, K0
, K0
, 0xffc); /* load delay */
183 uasm_i_addu(&p
, K1
, K1
, K0
);
184 uasm_i_lw(&p
, K0
, 0, K1
);
185 uasm_i_nop(&p
); /* load delay */
186 uasm_i_mtc0(&p
, K0
, C0_ENTRYLO0
);
187 uasm_i_mfc0(&p
, K1
, C0_EPC
); /* cp0 delay */
188 uasm_i_tlbwr(&p
); /* cp0 delay */
190 uasm_i_rfe(&p
); /* branch delay */
192 if (p
> tlb_handler
+ 32)
193 panic("TLB refill handler space exceeded");
195 pr_debug("Wrote TLB refill handler (%u instructions).\n",
196 (unsigned int)(p
- tlb_handler
));
198 memcpy((void *)ebase
, tlb_handler
, 0x80);
200 dump_handler((u32
*)ebase
, 32);
202 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
205 * The R4000 TLB handler is much more complicated. We have two
206 * consecutive handler areas with 32 instructions space each.
207 * Since they aren't used at the same time, we can overflow in the
208 * other one.To keep things simple, we first assume linear space,
209 * then we relocate it to the final handler layout as needed.
211 static u32 final_handler
[64] __cpuinitdata
;
216 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
217 * 2. A timing hazard exists for the TLBP instruction.
219 * stalling_instruction
222 * The JTLB is being read for the TLBP throughout the stall generated by the
223 * previous instruction. This is not really correct as the stalling instruction
224 * can modify the address used to access the JTLB. The failure symptom is that
225 * the TLBP instruction will use an address created for the stalling instruction
226 * and not the address held in C0_ENHI and thus report the wrong results.
228 * The software work-around is to not allow the instruction preceding the TLBP
229 * to stall - make it an NOP or some other instruction guaranteed not to stall.
231 * Errata 2 will not be fixed. This errata is also on the R5000.
233 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
235 static void __cpuinit __maybe_unused
build_tlb_probe_entry(u32
**p
)
237 switch (current_cpu_type()) {
238 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
255 * Write random or indexed TLB entry, and care about the hazards from
256 * the preceeding mtc0 and for the following eret.
258 enum tlb_write_entry
{ tlb_random
, tlb_indexed
};
260 static void __cpuinit
build_tlb_write_entry(u32
**p
, struct uasm_label
**l
,
261 struct uasm_reloc
**r
,
262 enum tlb_write_entry wmode
)
264 void(*tlbw
)(u32
**) = NULL
;
267 case tlb_random
: tlbw
= uasm_i_tlbwr
; break;
268 case tlb_indexed
: tlbw
= uasm_i_tlbwi
; break;
271 if (cpu_has_mips_r2
) {
272 if (cpu_has_mips_r2_exec_hazard
)
278 switch (current_cpu_type()) {
286 * This branch uses up a mtc0 hazard nop slot and saves
287 * two nops after the tlbw instruction.
289 uasm_il_bgezl(p
, r
, 0, label_tlbw_hazard
);
291 uasm_l_tlbw_hazard(l
, *p
);
337 uasm_i_nop(p
); /* QED specifies 2 nops hazard */
339 * This branch uses up a mtc0 hazard nop slot and saves
340 * a nop after the tlbw instruction.
342 uasm_il_bgezl(p
, r
, 0, label_tlbw_hazard
);
344 uasm_l_tlbw_hazard(l
, *p
);
357 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
358 * use of the JTLB for instructions should not occur for 4
359 * cpu cycles and use for data translations should not occur
394 panic("No TLB refill handler yet (CPU type: %d)",
395 current_cpu_data
.cputype
);
400 #ifdef CONFIG_HUGETLB_PAGE
401 static __cpuinit
void build_huge_tlb_write_entry(u32
**p
,
402 struct uasm_label
**l
,
403 struct uasm_reloc
**r
,
405 enum tlb_write_entry wmode
)
407 /* Set huge page tlb entry size */
408 uasm_i_lui(p
, tmp
, PM_HUGE_MASK
>> 16);
409 uasm_i_ori(p
, tmp
, tmp
, PM_HUGE_MASK
& 0xffff);
410 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
412 build_tlb_write_entry(p
, l
, r
, wmode
);
414 /* Reset default page size */
415 if (PM_DEFAULT_MASK
>> 16) {
416 uasm_i_lui(p
, tmp
, PM_DEFAULT_MASK
>> 16);
417 uasm_i_ori(p
, tmp
, tmp
, PM_DEFAULT_MASK
& 0xffff);
418 uasm_il_b(p
, r
, label_leave
);
419 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
420 } else if (PM_DEFAULT_MASK
) {
421 uasm_i_ori(p
, tmp
, 0, PM_DEFAULT_MASK
);
422 uasm_il_b(p
, r
, label_leave
);
423 uasm_i_mtc0(p
, tmp
, C0_PAGEMASK
);
425 uasm_il_b(p
, r
, label_leave
);
426 uasm_i_mtc0(p
, 0, C0_PAGEMASK
);
431 * Check if Huge PTE is present, if so then jump to LABEL.
433 static void __cpuinit
434 build_is_huge_pte(u32
**p
, struct uasm_reloc
**r
, unsigned int tmp
,
435 unsigned int pmd
, int lid
)
437 UASM_i_LW(p
, tmp
, 0, pmd
);
438 uasm_i_andi(p
, tmp
, tmp
, _PAGE_HUGE
);
439 uasm_il_bnez(p
, r
, tmp
, lid
);
442 static __cpuinit
void build_huge_update_entries(u32
**p
,
449 * A huge PTE describes an area the size of the
450 * configured huge page size. This is twice the
451 * of the large TLB entry size we intend to use.
452 * A TLB entry half the size of the configured
453 * huge page size is configured into entrylo0
454 * and entrylo1 to cover the contiguous huge PTE
457 small_sequence
= (HPAGE_SIZE
>> 7) < 0x10000;
459 /* We can clobber tmp. It isn't used after this.*/
461 uasm_i_lui(p
, tmp
, HPAGE_SIZE
>> (7 + 16));
463 UASM_i_SRL(p
, pte
, pte
, 6); /* convert to entrylo */
464 uasm_i_mtc0(p
, pte
, C0_ENTRYLO0
); /* load it */
465 /* convert to entrylo1 */
467 UASM_i_ADDIU(p
, pte
, pte
, HPAGE_SIZE
>> 7);
469 UASM_i_ADDU(p
, pte
, pte
, tmp
);
471 uasm_i_mtc0(p
, pte
, C0_ENTRYLO1
); /* load it */
474 static __cpuinit
void build_huge_handler_tail(u32
**p
,
475 struct uasm_reloc
**r
,
476 struct uasm_label
**l
,
481 UASM_i_SC(p
, pte
, 0, ptr
);
482 uasm_il_beqz(p
, r
, pte
, label_tlb_huge_update
);
483 UASM_i_LW(p
, pte
, 0, ptr
); /* Needed because SC killed our PTE */
485 UASM_i_SW(p
, pte
, 0, ptr
);
487 build_huge_update_entries(p
, pte
, ptr
);
488 build_huge_tlb_write_entry(p
, l
, r
, pte
, tlb_indexed
);
490 #endif /* CONFIG_HUGETLB_PAGE */
494 * TMP and PTR are scratch.
495 * TMP will be clobbered, PTR will hold the pmd entry.
497 static void __cpuinit
498 build_get_pmde64(u32
**p
, struct uasm_label
**l
, struct uasm_reloc
**r
,
499 unsigned int tmp
, unsigned int ptr
)
501 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
502 long pgdc
= (long)pgd_current
;
505 * The vmalloc handling is not in the hotpath.
507 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
);
508 uasm_il_bltz(p
, r
, tmp
, label_vmalloc
);
509 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
511 #ifdef CONFIG_MIPS_PGD_C0_CONTEXT
513 * &pgd << 11 stored in CONTEXT [23..63].
515 UASM_i_MFC0(p
, ptr
, C0_CONTEXT
);
516 uasm_i_dins(p
, ptr
, 0, 0, 23); /* Clear lower 23 bits of context. */
517 uasm_i_ori(p
, ptr
, ptr
, 0x540); /* 1 0 1 0 1 << 6 xkphys cached */
518 uasm_i_drotr(p
, ptr
, ptr
, 11);
519 #elif defined(CONFIG_SMP)
520 # ifdef CONFIG_MIPS_MT_SMTC
522 * SMTC uses TCBind value as "CPU" index
524 uasm_i_mfc0(p
, ptr
, C0_TCBIND
);
525 uasm_i_dsrl(p
, ptr
, ptr
, 19);
528 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
531 uasm_i_dmfc0(p
, ptr
, C0_CONTEXT
);
532 uasm_i_dsrl(p
, ptr
, ptr
, 23);
534 UASM_i_LA_mostly(p
, tmp
, pgdc
);
535 uasm_i_daddu(p
, ptr
, ptr
, tmp
);
536 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
);
537 uasm_i_ld(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
539 UASM_i_LA_mostly(p
, ptr
, pgdc
);
540 uasm_i_ld(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
543 uasm_l_vmalloc_done(l
, *p
);
545 if (PGDIR_SHIFT
- 3 < 32) /* get pgd offset in bytes */
546 uasm_i_dsrl(p
, tmp
, tmp
, PGDIR_SHIFT
-3);
548 uasm_i_dsrl32(p
, tmp
, tmp
, PGDIR_SHIFT
- 3 - 32);
550 uasm_i_andi(p
, tmp
, tmp
, (PTRS_PER_PGD
- 1)<<3);
551 uasm_i_daddu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
552 uasm_i_dmfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
553 uasm_i_ld(p
, ptr
, 0, ptr
); /* get pmd pointer */
554 uasm_i_dsrl(p
, tmp
, tmp
, PMD_SHIFT
-3); /* get pmd offset in bytes */
555 uasm_i_andi(p
, tmp
, tmp
, (PTRS_PER_PMD
- 1)<<3);
556 uasm_i_daddu(p
, ptr
, ptr
, tmp
); /* add in pmd offset */
560 * BVADDR is the faulting address, PTR is scratch.
561 * PTR will hold the pgd for vmalloc.
563 static void __cpuinit
564 build_get_pgd_vmalloc64(u32
**p
, struct uasm_label
**l
, struct uasm_reloc
**r
,
565 unsigned int bvaddr
, unsigned int ptr
)
567 long swpd
= (long)swapper_pg_dir
;
569 uasm_l_vmalloc(l
, *p
);
571 if (uasm_in_compat_space_p(swpd
) && !uasm_rel_lo(swpd
)) {
572 uasm_il_b(p
, r
, label_vmalloc_done
);
573 uasm_i_lui(p
, ptr
, uasm_rel_hi(swpd
));
575 UASM_i_LA_mostly(p
, ptr
, swpd
);
576 uasm_il_b(p
, r
, label_vmalloc_done
);
577 if (uasm_in_compat_space_p(swpd
))
578 uasm_i_addiu(p
, ptr
, ptr
, uasm_rel_lo(swpd
));
580 uasm_i_daddiu(p
, ptr
, ptr
, uasm_rel_lo(swpd
));
584 #else /* !CONFIG_64BIT */
587 * TMP and PTR are scratch.
588 * TMP will be clobbered, PTR will hold the pgd entry.
590 static void __cpuinit __maybe_unused
591 build_get_pgde32(u32
**p
, unsigned int tmp
, unsigned int ptr
)
593 long pgdc
= (long)pgd_current
;
595 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
597 #ifdef CONFIG_MIPS_MT_SMTC
599 * SMTC uses TCBind value as "CPU" index
601 uasm_i_mfc0(p
, ptr
, C0_TCBIND
);
602 UASM_i_LA_mostly(p
, tmp
, pgdc
);
603 uasm_i_srl(p
, ptr
, ptr
, 19);
606 * smp_processor_id() << 3 is stored in CONTEXT.
608 uasm_i_mfc0(p
, ptr
, C0_CONTEXT
);
609 UASM_i_LA_mostly(p
, tmp
, pgdc
);
610 uasm_i_srl(p
, ptr
, ptr
, 23);
612 uasm_i_addu(p
, ptr
, tmp
, ptr
);
614 UASM_i_LA_mostly(p
, ptr
, pgdc
);
616 uasm_i_mfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
617 uasm_i_lw(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
618 uasm_i_srl(p
, tmp
, tmp
, PGDIR_SHIFT
); /* get pgd only bits */
619 uasm_i_sll(p
, tmp
, tmp
, PGD_T_LOG2
);
620 uasm_i_addu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
623 #endif /* !CONFIG_64BIT */
625 static void __cpuinit
build_adjust_context(u32
**p
, unsigned int ctx
)
627 unsigned int shift
= 4 - (PTE_T_LOG2
+ 1) + PAGE_SHIFT
- 12;
628 unsigned int mask
= (PTRS_PER_PTE
/ 2 - 1) << (PTE_T_LOG2
+ 1);
630 switch (current_cpu_type()) {
647 UASM_i_SRL(p
, ctx
, ctx
, shift
);
648 uasm_i_andi(p
, ctx
, ctx
, mask
);
651 static void __cpuinit
build_get_ptep(u32
**p
, unsigned int tmp
, unsigned int ptr
)
654 * Bug workaround for the Nevada. It seems as if under certain
655 * circumstances the move from cp0_context might produce a
656 * bogus result when the mfc0 instruction and its consumer are
657 * in a different cacheline or a load instruction, probably any
658 * memory reference, is between them.
660 switch (current_cpu_type()) {
662 UASM_i_LW(p
, ptr
, 0, ptr
);
663 GET_CONTEXT(p
, tmp
); /* get context reg */
667 GET_CONTEXT(p
, tmp
); /* get context reg */
668 UASM_i_LW(p
, ptr
, 0, ptr
);
672 build_adjust_context(p
, tmp
);
673 UASM_i_ADDU(p
, ptr
, ptr
, tmp
); /* add in offset */
676 static void __cpuinit
build_update_entries(u32
**p
, unsigned int tmp
,
680 * 64bit address support (36bit on a 32bit CPU) in a 32bit
681 * Kernel is a special case. Only a few CPUs use it.
683 #ifdef CONFIG_64BIT_PHYS_ADDR
684 if (cpu_has_64bits
) {
685 uasm_i_ld(p
, tmp
, 0, ptep
); /* get even pte */
686 uasm_i_ld(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
687 uasm_i_dsrl(p
, tmp
, tmp
, 6); /* convert to entrylo0 */
688 uasm_i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
689 uasm_i_dsrl(p
, ptep
, ptep
, 6); /* convert to entrylo1 */
690 uasm_i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
692 int pte_off_even
= sizeof(pte_t
) / 2;
693 int pte_off_odd
= pte_off_even
+ sizeof(pte_t
);
695 /* The pte entries are pre-shifted */
696 uasm_i_lw(p
, tmp
, pte_off_even
, ptep
); /* get even pte */
697 uasm_i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
698 uasm_i_lw(p
, ptep
, pte_off_odd
, ptep
); /* get odd pte */
699 uasm_i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
702 UASM_i_LW(p
, tmp
, 0, ptep
); /* get even pte */
703 UASM_i_LW(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
705 build_tlb_probe_entry(p
);
706 UASM_i_SRL(p
, tmp
, tmp
, 6); /* convert to entrylo0 */
707 if (r4k_250MHZhwbug())
708 uasm_i_mtc0(p
, 0, C0_ENTRYLO0
);
709 uasm_i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
710 UASM_i_SRL(p
, ptep
, ptep
, 6); /* convert to entrylo1 */
712 uasm_i_mfc0(p
, tmp
, C0_INDEX
);
713 if (r4k_250MHZhwbug())
714 uasm_i_mtc0(p
, 0, C0_ENTRYLO1
);
715 uasm_i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
720 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
721 * because EXL == 0. If we wrap, we can also use the 32 instruction
722 * slots before the XTLB refill exception handler which belong to the
723 * unused TLB refill exception.
725 #define MIPS64_REFILL_INSNS 32
727 static void __cpuinit
build_r4000_tlb_refill_handler(void)
729 u32
*p
= tlb_handler
;
730 struct uasm_label
*l
= labels
;
731 struct uasm_reloc
*r
= relocs
;
733 unsigned int final_len
;
735 memset(tlb_handler
, 0, sizeof(tlb_handler
));
736 memset(labels
, 0, sizeof(labels
));
737 memset(relocs
, 0, sizeof(relocs
));
738 memset(final_handler
, 0, sizeof(final_handler
));
741 * create the plain linear handler
743 if (bcm1250_m3_war()) {
744 UASM_i_MFC0(&p
, K0
, C0_BADVADDR
);
745 UASM_i_MFC0(&p
, K1
, C0_ENTRYHI
);
746 uasm_i_xor(&p
, K0
, K0
, K1
);
747 UASM_i_SRL(&p
, K0
, K0
, PAGE_SHIFT
+ 1);
748 uasm_il_bnez(&p
, &r
, K0
, label_leave
);
749 /* No need for uasm_i_nop */
753 build_get_pmde64(&p
, &l
, &r
, K0
, K1
); /* get pmd in K1 */
755 build_get_pgde32(&p
, K0
, K1
); /* get pgd in K1 */
758 #ifdef CONFIG_HUGETLB_PAGE
759 build_is_huge_pte(&p
, &r
, K0
, K1
, label_tlb_huge_update
);
762 build_get_ptep(&p
, K0
, K1
);
763 build_update_entries(&p
, K0
, K1
);
764 build_tlb_write_entry(&p
, &l
, &r
, tlb_random
);
766 uasm_i_eret(&p
); /* return from trap */
768 #ifdef CONFIG_HUGETLB_PAGE
769 uasm_l_tlb_huge_update(&l
, p
);
770 UASM_i_LW(&p
, K0
, 0, K1
);
771 build_huge_update_entries(&p
, K0
, K1
);
772 build_huge_tlb_write_entry(&p
, &l
, &r
, K0
, tlb_random
);
776 build_get_pgd_vmalloc64(&p
, &l
, &r
, K0
, K1
);
780 * Overflow check: For the 64bit handler, we need at least one
781 * free instruction slot for the wrap-around branch. In worst
782 * case, if the intended insertion point is a delay slot, we
783 * need three, with the second nop'ed and the third being
786 /* Loongson2 ebase is different than r4k, we have more space */
787 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
788 if ((p
- tlb_handler
) > 64)
789 panic("TLB refill handler space exceeded");
791 if (((p
- tlb_handler
) > (MIPS64_REFILL_INSNS
* 2) - 1)
792 || (((p
- tlb_handler
) > (MIPS64_REFILL_INSNS
* 2) - 3)
793 && uasm_insn_has_bdelay(relocs
,
794 tlb_handler
+ MIPS64_REFILL_INSNS
- 3)))
795 panic("TLB refill handler space exceeded");
799 * Now fold the handler in the TLB refill handler space.
801 #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
803 /* Simplest case, just copy the handler. */
804 uasm_copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
805 final_len
= p
- tlb_handler
;
806 #else /* CONFIG_64BIT */
807 f
= final_handler
+ MIPS64_REFILL_INSNS
;
808 if ((p
- tlb_handler
) <= MIPS64_REFILL_INSNS
) {
809 /* Just copy the handler. */
810 uasm_copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
811 final_len
= p
- tlb_handler
;
813 #if defined(CONFIG_HUGETLB_PAGE)
814 const enum label_id ls
= label_tlb_huge_update
;
816 const enum label_id ls
= label_vmalloc
;
822 for (i
= 0; i
< ARRAY_SIZE(labels
) && labels
[i
].lab
!= ls
; i
++)
824 BUG_ON(i
== ARRAY_SIZE(labels
));
825 split
= labels
[i
].addr
;
828 * See if we have overflown one way or the other.
830 if (split
> tlb_handler
+ MIPS64_REFILL_INSNS
||
831 split
< p
- MIPS64_REFILL_INSNS
)
836 * Split two instructions before the end. One
837 * for the branch and one for the instruction
840 split
= tlb_handler
+ MIPS64_REFILL_INSNS
- 2;
843 * If the branch would fall in a delay slot,
844 * we must back up an additional instruction
845 * so that it is no longer in a delay slot.
847 if (uasm_insn_has_bdelay(relocs
, split
- 1))
850 /* Copy first part of the handler. */
851 uasm_copy_handler(relocs
, labels
, tlb_handler
, split
, f
);
852 f
+= split
- tlb_handler
;
856 uasm_l_split(&l
, final_handler
);
857 uasm_il_b(&f
, &r
, label_split
);
858 if (uasm_insn_has_bdelay(relocs
, split
))
861 uasm_copy_handler(relocs
, labels
,
862 split
, split
+ 1, f
);
863 uasm_move_labels(labels
, f
, f
+ 1, -1);
869 /* Copy the rest of the handler. */
870 uasm_copy_handler(relocs
, labels
, split
, p
, final_handler
);
871 final_len
= (f
- (final_handler
+ MIPS64_REFILL_INSNS
)) +
874 #endif /* CONFIG_64BIT */
876 uasm_resolve_relocs(relocs
, labels
);
877 pr_debug("Wrote TLB refill handler (%u instructions).\n",
880 memcpy((void *)ebase
, final_handler
, 0x100);
882 dump_handler((u32
*)ebase
, 64);
886 * TLB load/store/modify handlers.
888 * Only the fastpath gets synthesized at runtime, the slowpath for
889 * do_page_fault remains normal asm.
891 extern void tlb_do_page_fault_0(void);
892 extern void tlb_do_page_fault_1(void);
895 * 128 instructions for the fastpath handler is generous and should
898 #define FASTPATH_SIZE 128
900 u32 handle_tlbl
[FASTPATH_SIZE
] __cacheline_aligned
;
901 u32 handle_tlbs
[FASTPATH_SIZE
] __cacheline_aligned
;
902 u32 handle_tlbm
[FASTPATH_SIZE
] __cacheline_aligned
;
904 static void __cpuinit
905 iPTE_LW(u32
**p
, unsigned int pte
, unsigned int ptr
)
908 # ifdef CONFIG_64BIT_PHYS_ADDR
910 uasm_i_lld(p
, pte
, 0, ptr
);
913 UASM_i_LL(p
, pte
, 0, ptr
);
915 # ifdef CONFIG_64BIT_PHYS_ADDR
917 uasm_i_ld(p
, pte
, 0, ptr
);
920 UASM_i_LW(p
, pte
, 0, ptr
);
924 static void __cpuinit
925 iPTE_SW(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
, unsigned int ptr
,
928 #ifdef CONFIG_64BIT_PHYS_ADDR
929 unsigned int hwmode
= mode
& (_PAGE_VALID
| _PAGE_DIRTY
);
932 uasm_i_ori(p
, pte
, pte
, mode
);
934 # ifdef CONFIG_64BIT_PHYS_ADDR
936 uasm_i_scd(p
, pte
, 0, ptr
);
939 UASM_i_SC(p
, pte
, 0, ptr
);
941 if (r10000_llsc_war())
942 uasm_il_beqzl(p
, r
, pte
, label_smp_pgtable_change
);
944 uasm_il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
946 # ifdef CONFIG_64BIT_PHYS_ADDR
947 if (!cpu_has_64bits
) {
948 /* no uasm_i_nop needed */
949 uasm_i_ll(p
, pte
, sizeof(pte_t
) / 2, ptr
);
950 uasm_i_ori(p
, pte
, pte
, hwmode
);
951 uasm_i_sc(p
, pte
, sizeof(pte_t
) / 2, ptr
);
952 uasm_il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
953 /* no uasm_i_nop needed */
954 uasm_i_lw(p
, pte
, 0, ptr
);
961 # ifdef CONFIG_64BIT_PHYS_ADDR
963 uasm_i_sd(p
, pte
, 0, ptr
);
966 UASM_i_SW(p
, pte
, 0, ptr
);
968 # ifdef CONFIG_64BIT_PHYS_ADDR
969 if (!cpu_has_64bits
) {
970 uasm_i_lw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
971 uasm_i_ori(p
, pte
, pte
, hwmode
);
972 uasm_i_sw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
973 uasm_i_lw(p
, pte
, 0, ptr
);
980 * Check if PTE is present, if not then jump to LABEL. PTR points to
981 * the page table where this PTE is located, PTE will be re-loaded
982 * with it's original value.
984 static void __cpuinit
985 build_pte_present(u32
**p
, struct uasm_reloc
**r
,
986 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
988 uasm_i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
989 uasm_i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
990 uasm_il_bnez(p
, r
, pte
, lid
);
991 iPTE_LW(p
, pte
, ptr
);
994 /* Make PTE valid, store result in PTR. */
995 static void __cpuinit
996 build_make_valid(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
,
999 unsigned int mode
= _PAGE_VALID
| _PAGE_ACCESSED
;
1001 iPTE_SW(p
, r
, pte
, ptr
, mode
);
1005 * Check if PTE can be written to, if not branch to LABEL. Regardless
1006 * restore PTE with value from PTR when done.
1008 static void __cpuinit
1009 build_pte_writable(u32
**p
, struct uasm_reloc
**r
,
1010 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1012 uasm_i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
1013 uasm_i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
1014 uasm_il_bnez(p
, r
, pte
, lid
);
1015 iPTE_LW(p
, pte
, ptr
);
1018 /* Make PTE writable, update software status bits as well, then store
1021 static void __cpuinit
1022 build_make_write(u32
**p
, struct uasm_reloc
**r
, unsigned int pte
,
1025 unsigned int mode
= (_PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
1028 iPTE_SW(p
, r
, pte
, ptr
, mode
);
1032 * Check if PTE can be modified, if not branch to LABEL. Regardless
1033 * restore PTE with value from PTR when done.
1035 static void __cpuinit
1036 build_pte_modifiable(u32
**p
, struct uasm_reloc
**r
,
1037 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1039 uasm_i_andi(p
, pte
, pte
, _PAGE_WRITE
);
1040 uasm_il_beqz(p
, r
, pte
, lid
);
1041 iPTE_LW(p
, pte
, ptr
);
1044 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1046 * R3000 style TLB load/store/modify handlers.
1050 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1053 static void __cpuinit
1054 build_r3000_pte_reload_tlbwi(u32
**p
, unsigned int pte
, unsigned int tmp
)
1056 uasm_i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
1057 uasm_i_mfc0(p
, tmp
, C0_EPC
); /* cp0 delay */
1060 uasm_i_rfe(p
); /* branch delay */
1064 * This places the pte into ENTRYLO0 and writes it with tlbwi
1065 * or tlbwr as appropriate. This is because the index register
1066 * may have the probe fail bit set as a result of a trap on a
1067 * kseg2 access, i.e. without refill. Then it returns.
1069 static void __cpuinit
1070 build_r3000_tlb_reload_write(u32
**p
, struct uasm_label
**l
,
1071 struct uasm_reloc
**r
, unsigned int pte
,
1074 uasm_i_mfc0(p
, tmp
, C0_INDEX
);
1075 uasm_i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
1076 uasm_il_bltz(p
, r
, tmp
, label_r3000_write_probe_fail
); /* cp0 delay */
1077 uasm_i_mfc0(p
, tmp
, C0_EPC
); /* branch delay */
1078 uasm_i_tlbwi(p
); /* cp0 delay */
1080 uasm_i_rfe(p
); /* branch delay */
1081 uasm_l_r3000_write_probe_fail(l
, *p
);
1082 uasm_i_tlbwr(p
); /* cp0 delay */
1084 uasm_i_rfe(p
); /* branch delay */
1087 static void __cpuinit
1088 build_r3000_tlbchange_handler_head(u32
**p
, unsigned int pte
,
1091 long pgdc
= (long)pgd_current
;
1093 uasm_i_mfc0(p
, pte
, C0_BADVADDR
);
1094 uasm_i_lui(p
, ptr
, uasm_rel_hi(pgdc
)); /* cp0 delay */
1095 uasm_i_lw(p
, ptr
, uasm_rel_lo(pgdc
), ptr
);
1096 uasm_i_srl(p
, pte
, pte
, 22); /* load delay */
1097 uasm_i_sll(p
, pte
, pte
, 2);
1098 uasm_i_addu(p
, ptr
, ptr
, pte
);
1099 uasm_i_mfc0(p
, pte
, C0_CONTEXT
);
1100 uasm_i_lw(p
, ptr
, 0, ptr
); /* cp0 delay */
1101 uasm_i_andi(p
, pte
, pte
, 0xffc); /* load delay */
1102 uasm_i_addu(p
, ptr
, ptr
, pte
);
1103 uasm_i_lw(p
, pte
, 0, ptr
);
1104 uasm_i_tlbp(p
); /* load delay */
1107 static void __cpuinit
build_r3000_tlb_load_handler(void)
1109 u32
*p
= handle_tlbl
;
1110 struct uasm_label
*l
= labels
;
1111 struct uasm_reloc
*r
= relocs
;
1113 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1114 memset(labels
, 0, sizeof(labels
));
1115 memset(relocs
, 0, sizeof(relocs
));
1117 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1118 build_pte_present(&p
, &r
, K0
, K1
, label_nopage_tlbl
);
1119 uasm_i_nop(&p
); /* load delay */
1120 build_make_valid(&p
, &r
, K0
, K1
);
1121 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1123 uasm_l_nopage_tlbl(&l
, p
);
1124 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1127 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1128 panic("TLB load handler fastpath space exceeded");
1130 uasm_resolve_relocs(relocs
, labels
);
1131 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1132 (unsigned int)(p
- handle_tlbl
));
1134 dump_handler(handle_tlbl
, ARRAY_SIZE(handle_tlbl
));
1137 static void __cpuinit
build_r3000_tlb_store_handler(void)
1139 u32
*p
= handle_tlbs
;
1140 struct uasm_label
*l
= labels
;
1141 struct uasm_reloc
*r
= relocs
;
1143 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1144 memset(labels
, 0, sizeof(labels
));
1145 memset(relocs
, 0, sizeof(relocs
));
1147 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1148 build_pte_writable(&p
, &r
, K0
, K1
, label_nopage_tlbs
);
1149 uasm_i_nop(&p
); /* load delay */
1150 build_make_write(&p
, &r
, K0
, K1
);
1151 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1153 uasm_l_nopage_tlbs(&l
, p
);
1154 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1157 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1158 panic("TLB store handler fastpath space exceeded");
1160 uasm_resolve_relocs(relocs
, labels
);
1161 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1162 (unsigned int)(p
- handle_tlbs
));
1164 dump_handler(handle_tlbs
, ARRAY_SIZE(handle_tlbs
));
1167 static void __cpuinit
build_r3000_tlb_modify_handler(void)
1169 u32
*p
= handle_tlbm
;
1170 struct uasm_label
*l
= labels
;
1171 struct uasm_reloc
*r
= relocs
;
1173 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1174 memset(labels
, 0, sizeof(labels
));
1175 memset(relocs
, 0, sizeof(relocs
));
1177 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1178 build_pte_modifiable(&p
, &r
, K0
, K1
, label_nopage_tlbm
);
1179 uasm_i_nop(&p
); /* load delay */
1180 build_make_write(&p
, &r
, K0
, K1
);
1181 build_r3000_pte_reload_tlbwi(&p
, K0
, K1
);
1183 uasm_l_nopage_tlbm(&l
, p
);
1184 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1187 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1188 panic("TLB modify handler fastpath space exceeded");
1190 uasm_resolve_relocs(relocs
, labels
);
1191 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1192 (unsigned int)(p
- handle_tlbm
));
1194 dump_handler(handle_tlbm
, ARRAY_SIZE(handle_tlbm
));
1196 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
1199 * R4000 style TLB load/store/modify handlers.
1201 static void __cpuinit
1202 build_r4000_tlbchange_handler_head(u32
**p
, struct uasm_label
**l
,
1203 struct uasm_reloc
**r
, unsigned int pte
,
1207 build_get_pmde64(p
, l
, r
, pte
, ptr
); /* get pmd in ptr */
1209 build_get_pgde32(p
, pte
, ptr
); /* get pgd in ptr */
1212 #ifdef CONFIG_HUGETLB_PAGE
1214 * For huge tlb entries, pmd doesn't contain an address but
1215 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1216 * see if we need to jump to huge tlb processing.
1218 build_is_huge_pte(p
, r
, pte
, ptr
, label_tlb_huge_update
);
1221 UASM_i_MFC0(p
, pte
, C0_BADVADDR
);
1222 UASM_i_LW(p
, ptr
, 0, ptr
);
1223 UASM_i_SRL(p
, pte
, pte
, PAGE_SHIFT
+ PTE_ORDER
- PTE_T_LOG2
);
1224 uasm_i_andi(p
, pte
, pte
, (PTRS_PER_PTE
- 1) << PTE_T_LOG2
);
1225 UASM_i_ADDU(p
, ptr
, ptr
, pte
);
1228 uasm_l_smp_pgtable_change(l
, *p
);
1230 iPTE_LW(p
, pte
, ptr
); /* get even pte */
1231 if (!m4kc_tlbp_war())
1232 build_tlb_probe_entry(p
);
1235 static void __cpuinit
1236 build_r4000_tlbchange_handler_tail(u32
**p
, struct uasm_label
**l
,
1237 struct uasm_reloc
**r
, unsigned int tmp
,
1240 uasm_i_ori(p
, ptr
, ptr
, sizeof(pte_t
));
1241 uasm_i_xori(p
, ptr
, ptr
, sizeof(pte_t
));
1242 build_update_entries(p
, tmp
, ptr
);
1243 build_tlb_write_entry(p
, l
, r
, tlb_indexed
);
1244 uasm_l_leave(l
, *p
);
1245 uasm_i_eret(p
); /* return from trap */
1248 build_get_pgd_vmalloc64(p
, l
, r
, tmp
, ptr
);
1252 static void __cpuinit
build_r4000_tlb_load_handler(void)
1254 u32
*p
= handle_tlbl
;
1255 struct uasm_label
*l
= labels
;
1256 struct uasm_reloc
*r
= relocs
;
1258 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1259 memset(labels
, 0, sizeof(labels
));
1260 memset(relocs
, 0, sizeof(relocs
));
1262 if (bcm1250_m3_war()) {
1263 UASM_i_MFC0(&p
, K0
, C0_BADVADDR
);
1264 UASM_i_MFC0(&p
, K1
, C0_ENTRYHI
);
1265 uasm_i_xor(&p
, K0
, K0
, K1
);
1266 UASM_i_SRL(&p
, K0
, K0
, PAGE_SHIFT
+ 1);
1267 uasm_il_bnez(&p
, &r
, K0
, label_leave
);
1268 /* No need for uasm_i_nop */
1271 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1272 build_pte_present(&p
, &r
, K0
, K1
, label_nopage_tlbl
);
1273 if (m4kc_tlbp_war())
1274 build_tlb_probe_entry(&p
);
1275 build_make_valid(&p
, &r
, K0
, K1
);
1276 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1278 #ifdef CONFIG_HUGETLB_PAGE
1280 * This is the entry point when build_r4000_tlbchange_handler_head
1281 * spots a huge page.
1283 uasm_l_tlb_huge_update(&l
, p
);
1284 iPTE_LW(&p
, K0
, K1
);
1285 build_pte_present(&p
, &r
, K0
, K1
, label_nopage_tlbl
);
1286 build_tlb_probe_entry(&p
);
1287 uasm_i_ori(&p
, K0
, K0
, (_PAGE_ACCESSED
| _PAGE_VALID
));
1288 build_huge_handler_tail(&p
, &r
, &l
, K0
, K1
);
1291 uasm_l_nopage_tlbl(&l
, p
);
1292 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1295 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1296 panic("TLB load handler fastpath space exceeded");
1298 uasm_resolve_relocs(relocs
, labels
);
1299 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1300 (unsigned int)(p
- handle_tlbl
));
1302 dump_handler(handle_tlbl
, ARRAY_SIZE(handle_tlbl
));
1305 static void __cpuinit
build_r4000_tlb_store_handler(void)
1307 u32
*p
= handle_tlbs
;
1308 struct uasm_label
*l
= labels
;
1309 struct uasm_reloc
*r
= relocs
;
1311 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1312 memset(labels
, 0, sizeof(labels
));
1313 memset(relocs
, 0, sizeof(relocs
));
1315 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1316 build_pte_writable(&p
, &r
, K0
, K1
, label_nopage_tlbs
);
1317 if (m4kc_tlbp_war())
1318 build_tlb_probe_entry(&p
);
1319 build_make_write(&p
, &r
, K0
, K1
);
1320 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1322 #ifdef CONFIG_HUGETLB_PAGE
1324 * This is the entry point when
1325 * build_r4000_tlbchange_handler_head spots a huge page.
1327 uasm_l_tlb_huge_update(&l
, p
);
1328 iPTE_LW(&p
, K0
, K1
);
1329 build_pte_writable(&p
, &r
, K0
, K1
, label_nopage_tlbs
);
1330 build_tlb_probe_entry(&p
);
1331 uasm_i_ori(&p
, K0
, K0
,
1332 _PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
| _PAGE_DIRTY
);
1333 build_huge_handler_tail(&p
, &r
, &l
, K0
, K1
);
1336 uasm_l_nopage_tlbs(&l
, p
);
1337 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1340 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1341 panic("TLB store handler fastpath space exceeded");
1343 uasm_resolve_relocs(relocs
, labels
);
1344 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1345 (unsigned int)(p
- handle_tlbs
));
1347 dump_handler(handle_tlbs
, ARRAY_SIZE(handle_tlbs
));
1350 static void __cpuinit
build_r4000_tlb_modify_handler(void)
1352 u32
*p
= handle_tlbm
;
1353 struct uasm_label
*l
= labels
;
1354 struct uasm_reloc
*r
= relocs
;
1356 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1357 memset(labels
, 0, sizeof(labels
));
1358 memset(relocs
, 0, sizeof(relocs
));
1360 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1361 build_pte_modifiable(&p
, &r
, K0
, K1
, label_nopage_tlbm
);
1362 if (m4kc_tlbp_war())
1363 build_tlb_probe_entry(&p
);
1364 /* Present and writable bits set, set accessed and dirty bits. */
1365 build_make_write(&p
, &r
, K0
, K1
);
1366 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1368 #ifdef CONFIG_HUGETLB_PAGE
1370 * This is the entry point when
1371 * build_r4000_tlbchange_handler_head spots a huge page.
1373 uasm_l_tlb_huge_update(&l
, p
);
1374 iPTE_LW(&p
, K0
, K1
);
1375 build_pte_modifiable(&p
, &r
, K0
, K1
, label_nopage_tlbm
);
1376 build_tlb_probe_entry(&p
);
1377 uasm_i_ori(&p
, K0
, K0
,
1378 _PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
| _PAGE_DIRTY
);
1379 build_huge_handler_tail(&p
, &r
, &l
, K0
, K1
);
1382 uasm_l_nopage_tlbm(&l
, p
);
1383 uasm_i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1386 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1387 panic("TLB modify handler fastpath space exceeded");
1389 uasm_resolve_relocs(relocs
, labels
);
1390 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1391 (unsigned int)(p
- handle_tlbm
));
1393 dump_handler(handle_tlbm
, ARRAY_SIZE(handle_tlbm
));
1396 void __cpuinit
build_tlb_refill_handler(void)
1399 * The refill handler is generated per-CPU, multi-node systems
1400 * may have local storage for it. The other handlers are only
1403 static int run_once
= 0;
1405 switch (current_cpu_type()) {
1413 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1414 build_r3000_tlb_refill_handler();
1416 build_r3000_tlb_load_handler();
1417 build_r3000_tlb_store_handler();
1418 build_r3000_tlb_modify_handler();
1422 panic("No R3000 TLB refill handler");
1428 panic("No R6000 TLB refill handler yet");
1432 panic("No R8000 TLB refill handler yet");
1436 build_r4000_tlb_refill_handler();
1438 build_r4000_tlb_load_handler();
1439 build_r4000_tlb_store_handler();
1440 build_r4000_tlb_modify_handler();
1446 void __cpuinit
flush_tlb_handlers(void)
1448 local_flush_icache_range((unsigned long)handle_tlbl
,
1449 (unsigned long)handle_tlbl
+ sizeof(handle_tlbl
));
1450 local_flush_icache_range((unsigned long)handle_tlbs
,
1451 (unsigned long)handle_tlbs
+ sizeof(handle_tlbs
));
1452 local_flush_icache_range((unsigned long)handle_tlbm
,
1453 (unsigned long)handle_tlbm
+ sizeof(handle_tlbm
));