2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Synthesize TLB refill handlers at runtime.
8 * Copyright (C) 2004,2005 by Thiemo Seufer
9 * Copyright (C) 2005 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
12 * ... and the days got worse and worse and now you see
13 * I've gone completly out of my mind.
15 * They're coming to take me a away haha
16 * they're coming to take me a away hoho hihi haha
17 * to the funny farm where code is beautiful all the time ...
19 * (Condolences to Napoleon XIV)
24 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/init.h>
31 #include <asm/pgtable.h>
32 #include <asm/cacheflush.h>
33 #include <asm/mmu_context.h>
39 /* #define DEBUG_TLB */
41 static __init
int __attribute__((unused
)) r45k_bvahwbug(void)
43 /* XXX: We should probe for the presence of this bug, but we don't. */
47 static __init
int __attribute__((unused
)) r4k_250MHZhwbug(void)
49 /* XXX: We should probe for the presence of this bug, but we don't. */
53 static __init
int __attribute__((unused
)) bcm1250_m3_war(void)
55 return BCM1250_M3_WAR
;
58 static __init
int __attribute__((unused
)) r10000_llsc_war(void)
60 return R10000_LLSC_WAR
;
64 * A little micro-assembler, intended for TLB refill handler
65 * synthesizing. It is intentionally kept simple, does only support
66 * a subset of instructions, and does not try to hide pipeline effects
67 * like branch delay slots.
94 #define IMM_MASK 0xffff
96 #define JIMM_MASK 0x3ffffff
98 #define FUNC_MASK 0x2f
105 insn_addu
, insn_addiu
, insn_and
, insn_andi
, insn_beq
,
106 insn_beql
, insn_bgez
, insn_bgezl
, insn_bltz
, insn_bltzl
,
107 insn_bne
, insn_daddu
, insn_daddiu
, insn_dmfc0
, insn_dmtc0
,
108 insn_dsll
, insn_dsll32
, insn_dsra
, insn_dsrl
,
109 insn_dsubu
, insn_eret
, insn_j
, insn_jal
, insn_jr
, insn_ld
,
110 insn_ll
, insn_lld
, insn_lui
, insn_lw
, insn_mfc0
, insn_mtc0
,
111 insn_ori
, insn_rfe
, insn_sc
, insn_scd
, insn_sd
, insn_sll
,
112 insn_sra
, insn_srl
, insn_subu
, insn_sw
, insn_tlbp
, insn_tlbwi
,
113 insn_tlbwr
, insn_xor
, insn_xori
122 /* This macro sets the non-variable bits of an instruction. */
123 #define M(a, b, c, d, e, f) \
131 static __initdata
struct insn insn_table
[] = {
132 { insn_addiu
, M(addiu_op
,0,0,0,0,0), RS
| RT
| SIMM
},
133 { insn_addu
, M(spec_op
,0,0,0,0,addu_op
), RS
| RT
| RD
},
134 { insn_and
, M(spec_op
,0,0,0,0,and_op
), RS
| RT
| RD
},
135 { insn_andi
, M(andi_op
,0,0,0,0,0), RS
| RT
| UIMM
},
136 { insn_beq
, M(beq_op
,0,0,0,0,0), RS
| RT
| BIMM
},
137 { insn_beql
, M(beql_op
,0,0,0,0,0), RS
| RT
| BIMM
},
138 { insn_bgez
, M(bcond_op
,0,bgez_op
,0,0,0), RS
| BIMM
},
139 { insn_bgezl
, M(bcond_op
,0,bgezl_op
,0,0,0), RS
| BIMM
},
140 { insn_bltz
, M(bcond_op
,0,bltz_op
,0,0,0), RS
| BIMM
},
141 { insn_bltzl
, M(bcond_op
,0,bltzl_op
,0,0,0), RS
| BIMM
},
142 { insn_bne
, M(bne_op
,0,0,0,0,0), RS
| RT
| BIMM
},
143 { insn_daddiu
, M(daddiu_op
,0,0,0,0,0), RS
| RT
| SIMM
},
144 { insn_daddu
, M(spec_op
,0,0,0,0,daddu_op
), RS
| RT
| RD
},
145 { insn_dmfc0
, M(cop0_op
,dmfc_op
,0,0,0,0), RT
| RD
| SET
},
146 { insn_dmtc0
, M(cop0_op
,dmtc_op
,0,0,0,0), RT
| RD
| SET
},
147 { insn_dsll
, M(spec_op
,0,0,0,0,dsll_op
), RT
| RD
| RE
},
148 { insn_dsll32
, M(spec_op
,0,0,0,0,dsll32_op
), RT
| RD
| RE
},
149 { insn_dsra
, M(spec_op
,0,0,0,0,dsra_op
), RT
| RD
| RE
},
150 { insn_dsrl
, M(spec_op
,0,0,0,0,dsrl_op
), RT
| RD
| RE
},
151 { insn_dsubu
, M(spec_op
,0,0,0,0,dsubu_op
), RS
| RT
| RD
},
152 { insn_eret
, M(cop0_op
,cop_op
,0,0,0,eret_op
), 0 },
153 { insn_j
, M(j_op
,0,0,0,0,0), JIMM
},
154 { insn_jal
, M(jal_op
,0,0,0,0,0), JIMM
},
155 { insn_jr
, M(spec_op
,0,0,0,0,jr_op
), RS
},
156 { insn_ld
, M(ld_op
,0,0,0,0,0), RS
| RT
| SIMM
},
157 { insn_ll
, M(ll_op
,0,0,0,0,0), RS
| RT
| SIMM
},
158 { insn_lld
, M(lld_op
,0,0,0,0,0), RS
| RT
| SIMM
},
159 { insn_lui
, M(lui_op
,0,0,0,0,0), RT
| SIMM
},
160 { insn_lw
, M(lw_op
,0,0,0,0,0), RS
| RT
| SIMM
},
161 { insn_mfc0
, M(cop0_op
,mfc_op
,0,0,0,0), RT
| RD
| SET
},
162 { insn_mtc0
, M(cop0_op
,mtc_op
,0,0,0,0), RT
| RD
| SET
},
163 { insn_ori
, M(ori_op
,0,0,0,0,0), RS
| RT
| UIMM
},
164 { insn_rfe
, M(cop0_op
,cop_op
,0,0,0,rfe_op
), 0 },
165 { insn_sc
, M(sc_op
,0,0,0,0,0), RS
| RT
| SIMM
},
166 { insn_scd
, M(scd_op
,0,0,0,0,0), RS
| RT
| SIMM
},
167 { insn_sd
, M(sd_op
,0,0,0,0,0), RS
| RT
| SIMM
},
168 { insn_sll
, M(spec_op
,0,0,0,0,sll_op
), RT
| RD
| RE
},
169 { insn_sra
, M(spec_op
,0,0,0,0,sra_op
), RT
| RD
| RE
},
170 { insn_srl
, M(spec_op
,0,0,0,0,srl_op
), RT
| RD
| RE
},
171 { insn_subu
, M(spec_op
,0,0,0,0,subu_op
), RS
| RT
| RD
},
172 { insn_sw
, M(sw_op
,0,0,0,0,0), RS
| RT
| SIMM
},
173 { insn_tlbp
, M(cop0_op
,cop_op
,0,0,0,tlbp_op
), 0 },
174 { insn_tlbwi
, M(cop0_op
,cop_op
,0,0,0,tlbwi_op
), 0 },
175 { insn_tlbwr
, M(cop0_op
,cop_op
,0,0,0,tlbwr_op
), 0 },
176 { insn_xor
, M(spec_op
,0,0,0,0,xor_op
), RS
| RT
| RD
},
177 { insn_xori
, M(xori_op
,0,0,0,0,0), RS
| RT
| UIMM
},
178 { insn_invalid
, 0, 0 }
183 static __init u32
build_rs(u32 arg
)
186 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
188 return (arg
& RS_MASK
) << RS_SH
;
191 static __init u32
build_rt(u32 arg
)
194 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
196 return (arg
& RT_MASK
) << RT_SH
;
199 static __init u32
build_rd(u32 arg
)
202 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
204 return (arg
& RD_MASK
) << RD_SH
;
207 static __init u32
build_re(u32 arg
)
210 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
212 return (arg
& RE_MASK
) << RE_SH
;
215 static __init u32
build_simm(s32 arg
)
217 if (arg
> 0x7fff || arg
< -0x8000)
218 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
223 static __init u32
build_uimm(u32 arg
)
226 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
228 return arg
& IMM_MASK
;
231 static __init u32
build_bimm(s32 arg
)
233 if (arg
> 0x1ffff || arg
< -0x20000)
234 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
237 printk(KERN_WARNING
"Invalid TLB synthesizer branch target\n");
239 return ((arg
< 0) ? (1 << 15) : 0) | ((arg
>> 2) & 0x7fff);
242 static __init u32
build_jimm(u32 arg
)
244 if (arg
& ~((JIMM_MASK
) << 2))
245 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
247 return (arg
>> 2) & JIMM_MASK
;
250 static __init u32
build_func(u32 arg
)
252 if (arg
& ~FUNC_MASK
)
253 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
255 return arg
& FUNC_MASK
;
258 static __init u32
build_set(u32 arg
)
261 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
263 return arg
& SET_MASK
;
267 * The order of opcode arguments is implicitly left to right,
268 * starting with RS and ending with FUNC or IMM.
270 static void __init
build_insn(u32
**buf
, enum opcode opc
, ...)
272 struct insn
*ip
= NULL
;
277 for (i
= 0; insn_table
[i
].opcode
!= insn_invalid
; i
++)
278 if (insn_table
[i
].opcode
== opc
) {
284 panic("Unsupported TLB synthesizer instruction %d", opc
);
288 if (ip
->fields
& RS
) op
|= build_rs(va_arg(ap
, u32
));
289 if (ip
->fields
& RT
) op
|= build_rt(va_arg(ap
, u32
));
290 if (ip
->fields
& RD
) op
|= build_rd(va_arg(ap
, u32
));
291 if (ip
->fields
& RE
) op
|= build_re(va_arg(ap
, u32
));
292 if (ip
->fields
& SIMM
) op
|= build_simm(va_arg(ap
, s32
));
293 if (ip
->fields
& UIMM
) op
|= build_uimm(va_arg(ap
, u32
));
294 if (ip
->fields
& BIMM
) op
|= build_bimm(va_arg(ap
, s32
));
295 if (ip
->fields
& JIMM
) op
|= build_jimm(va_arg(ap
, u32
));
296 if (ip
->fields
& FUNC
) op
|= build_func(va_arg(ap
, u32
));
297 if (ip
->fields
& SET
) op
|= build_set(va_arg(ap
, u32
));
304 #define I_u1u2u3(op) \
305 static inline void __init i##op(u32 **buf, unsigned int a, \
306 unsigned int b, unsigned int c) \
308 build_insn(buf, insn##op, a, b, c); \
311 #define I_u2u1u3(op) \
312 static inline void __init i##op(u32 **buf, unsigned int a, \
313 unsigned int b, unsigned int c) \
315 build_insn(buf, insn##op, b, a, c); \
318 #define I_u3u1u2(op) \
319 static inline void __init i##op(u32 **buf, unsigned int a, \
320 unsigned int b, unsigned int c) \
322 build_insn(buf, insn##op, b, c, a); \
325 #define I_u1u2s3(op) \
326 static inline void __init i##op(u32 **buf, unsigned int a, \
327 unsigned int b, signed int c) \
329 build_insn(buf, insn##op, a, b, c); \
332 #define I_u2s3u1(op) \
333 static inline void __init i##op(u32 **buf, unsigned int a, \
334 signed int b, unsigned int c) \
336 build_insn(buf, insn##op, c, a, b); \
339 #define I_u2u1s3(op) \
340 static inline void __init i##op(u32 **buf, unsigned int a, \
341 unsigned int b, signed int c) \
343 build_insn(buf, insn##op, b, a, c); \
347 static inline void __init i##op(u32 **buf, unsigned int a, \
350 build_insn(buf, insn##op, a, b); \
354 static inline void __init i##op(u32 **buf, unsigned int a, \
357 build_insn(buf, insn##op, a, b); \
361 static inline void __init i##op(u32 **buf, unsigned int a) \
363 build_insn(buf, insn##op, a); \
367 static inline void __init i##op(u32 **buf) \
369 build_insn(buf, insn##op); \
434 label_smp_pgtable_change
,
435 label_r3000_write_probe_fail
,
443 static __init
void build_label(struct label
**lab
, u32
*addr
,
452 static inline void l##lb(struct label **lab, u32 *addr) \
454 build_label(lab, addr, label##lb); \
466 L_LA(_smp_pgtable_change
)
467 L_LA(_r3000_write_probe_fail
)
469 /* convenience macros for instructions */
471 # define i_LW(buf, rs, rt, off) i_ld(buf, rs, rt, off)
472 # define i_SW(buf, rs, rt, off) i_sd(buf, rs, rt, off)
473 # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh)
474 # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh)
475 # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh)
476 # define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd)
477 # define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd)
478 # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val)
479 # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd)
480 # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd)
481 # define i_LL(buf, rs, rt, off) i_lld(buf, rs, rt, off)
482 # define i_SC(buf, rs, rt, off) i_scd(buf, rs, rt, off)
484 # define i_LW(buf, rs, rt, off) i_lw(buf, rs, rt, off)
485 # define i_SW(buf, rs, rt, off) i_sw(buf, rs, rt, off)
486 # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh)
487 # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh)
488 # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh)
489 # define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd)
490 # define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd)
491 # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val)
492 # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd)
493 # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd)
494 # define i_LL(buf, rs, rt, off) i_ll(buf, rs, rt, off)
495 # define i_SC(buf, rs, rt, off) i_sc(buf, rs, rt, off)
498 #define i_b(buf, off) i_beq(buf, 0, 0, off)
499 #define i_beqz(buf, rs, off) i_beq(buf, rs, 0, off)
500 #define i_beqzl(buf, rs, off) i_beql(buf, rs, 0, off)
501 #define i_bnez(buf, rs, off) i_bne(buf, rs, 0, off)
502 #define i_bnezl(buf, rs, off) i_bnel(buf, rs, 0, off)
503 #define i_move(buf, a, b) i_ADDU(buf, a, 0, b)
504 #define i_nop(buf) i_sll(buf, 0, 0, 0)
505 #define i_ssnop(buf) i_sll(buf, 0, 0, 1)
506 #define i_ehb(buf) i_sll(buf, 0, 0, 3)
509 static __init
int __attribute__((unused
)) in_compat_space_p(long addr
)
511 /* Is this address in 32bit compat space? */
512 return (((addr
) & 0xffffffff00000000L
) == 0xffffffff00000000L
);
515 static __init
int __attribute__((unused
)) rel_highest(long val
)
517 return ((((val
+ 0x800080008000L
) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
520 static __init
int __attribute__((unused
)) rel_higher(long val
)
522 return ((((val
+ 0x80008000L
) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
526 static __init
int rel_hi(long val
)
528 return ((((val
+ 0x8000L
) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
531 static __init
int rel_lo(long val
)
533 return ((val
& 0xffff) ^ 0x8000) - 0x8000;
536 static __init
void i_LA_mostly(u32
**buf
, unsigned int rs
, long addr
)
539 if (!in_compat_space_p(addr
)) {
540 i_lui(buf
, rs
, rel_highest(addr
));
541 if (rel_higher(addr
))
542 i_daddiu(buf
, rs
, rs
, rel_higher(addr
));
544 i_dsll(buf
, rs
, rs
, 16);
545 i_daddiu(buf
, rs
, rs
, rel_hi(addr
));
546 i_dsll(buf
, rs
, rs
, 16);
548 i_dsll32(buf
, rs
, rs
, 0);
551 i_lui(buf
, rs
, rel_hi(addr
));
554 static __init
void __attribute__((unused
)) i_LA(u32
**buf
, unsigned int rs
,
557 i_LA_mostly(buf
, rs
, addr
);
559 i_ADDIU(buf
, rs
, rs
, rel_lo(addr
));
572 static __init
void r_mips_pc16(struct reloc
**rel
, u32
*addr
,
576 (*rel
)->type
= R_MIPS_PC16
;
581 static inline void __resolve_relocs(struct reloc
*rel
, struct label
*lab
)
583 long laddr
= (long)lab
->addr
;
584 long raddr
= (long)rel
->addr
;
588 *rel
->addr
|= build_bimm(laddr
- (raddr
+ 4));
592 panic("Unsupported TLB synthesizer relocation %d",
597 static __init
void resolve_relocs(struct reloc
*rel
, struct label
*lab
)
601 for (; rel
->lab
!= label_invalid
; rel
++)
602 for (l
= lab
; l
->lab
!= label_invalid
; l
++)
603 if (rel
->lab
== l
->lab
)
604 __resolve_relocs(rel
, l
);
607 static __init
void move_relocs(struct reloc
*rel
, u32
*first
, u32
*end
,
610 for (; rel
->lab
!= label_invalid
; rel
++)
611 if (rel
->addr
>= first
&& rel
->addr
< end
)
615 static __init
void move_labels(struct label
*lab
, u32
*first
, u32
*end
,
618 for (; lab
->lab
!= label_invalid
; lab
++)
619 if (lab
->addr
>= first
&& lab
->addr
< end
)
623 static __init
void copy_handler(struct reloc
*rel
, struct label
*lab
,
624 u32
*first
, u32
*end
, u32
*target
)
626 long off
= (long)(target
- first
);
628 memcpy(target
, first
, (end
- first
) * sizeof(u32
));
630 move_relocs(rel
, first
, end
, off
);
631 move_labels(lab
, first
, end
, off
);
634 static __init
int __attribute__((unused
)) insn_has_bdelay(struct reloc
*rel
,
637 for (; rel
->lab
!= label_invalid
; rel
++) {
638 if (rel
->addr
== addr
639 && (rel
->type
== R_MIPS_PC16
640 || rel
->type
== R_MIPS_26
))
647 /* convenience functions for labeled branches */
648 static void __init
__attribute__((unused
))
649 il_bltz(u32
**p
, struct reloc
**r
, unsigned int reg
, enum label_id l
)
651 r_mips_pc16(r
, *p
, l
);
655 static void __init
__attribute__((unused
)) il_b(u32
**p
, struct reloc
**r
,
658 r_mips_pc16(r
, *p
, l
);
662 static void __init
il_beqz(u32
**p
, struct reloc
**r
, unsigned int reg
,
665 r_mips_pc16(r
, *p
, l
);
669 static void __init
__attribute__((unused
))
670 il_beqzl(u32
**p
, struct reloc
**r
, unsigned int reg
, enum label_id l
)
672 r_mips_pc16(r
, *p
, l
);
676 static void __init
il_bnez(u32
**p
, struct reloc
**r
, unsigned int reg
,
679 r_mips_pc16(r
, *p
, l
);
683 static void __init
il_bgezl(u32
**p
, struct reloc
**r
, unsigned int reg
,
686 r_mips_pc16(r
, *p
, l
);
690 /* The only general purpose registers allowed in TLB handlers. */
694 /* Some CP0 registers */
695 #define C0_INDEX 0, 0
696 #define C0_ENTRYLO0 2, 0
697 #define C0_TCBIND 2, 2
698 #define C0_ENTRYLO1 3, 0
699 #define C0_CONTEXT 4, 0
700 #define C0_BADVADDR 8, 0
701 #define C0_ENTRYHI 10, 0
703 #define C0_XCONTEXT 20, 0
706 # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT)
708 # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_CONTEXT)
711 /* The worst case length of the handler is around 18 instructions for
712 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
713 * Maximum space available is 32 instructions for R3000 and 64
714 * instructions for R4000.
716 * We deliberately chose a buffer size of 128, so we won't scribble
717 * over anything important on overflow before we panic.
719 static __initdata u32 tlb_handler
[128];
721 /* simply assume worst case size for labels and relocs */
722 static __initdata
struct label labels
[128];
723 static __initdata
struct reloc relocs
[128];
726 * The R3000 TLB handler is simple.
728 static void __init
build_r3000_tlb_refill_handler(void)
730 long pgdc
= (long)pgd_current
;
733 memset(tlb_handler
, 0, sizeof(tlb_handler
));
736 i_mfc0(&p
, K0
, C0_BADVADDR
);
737 i_lui(&p
, K1
, rel_hi(pgdc
)); /* cp0 delay */
738 i_lw(&p
, K1
, rel_lo(pgdc
), K1
);
739 i_srl(&p
, K0
, K0
, 22); /* load delay */
740 i_sll(&p
, K0
, K0
, 2);
741 i_addu(&p
, K1
, K1
, K0
);
742 i_mfc0(&p
, K0
, C0_CONTEXT
);
743 i_lw(&p
, K1
, 0, K1
); /* cp0 delay */
744 i_andi(&p
, K0
, K0
, 0xffc); /* load delay */
745 i_addu(&p
, K1
, K1
, K0
);
747 i_nop(&p
); /* load delay */
748 i_mtc0(&p
, K0
, C0_ENTRYLO0
);
749 i_mfc0(&p
, K1
, C0_EPC
); /* cp0 delay */
750 i_tlbwr(&p
); /* cp0 delay */
752 i_rfe(&p
); /* branch delay */
754 if (p
> tlb_handler
+ 32)
755 panic("TLB refill handler space exceeded");
757 printk("Synthesized TLB refill handler (%u instructions).\n",
758 (unsigned int)(p
- tlb_handler
));
763 for (i
= 0; i
< (p
- tlb_handler
); i
++)
764 printk("%08x\n", tlb_handler
[i
]);
768 memcpy((void *)ebase
, tlb_handler
, 0x80);
772 * The R4000 TLB handler is much more complicated. We have two
773 * consecutive handler areas with 32 instructions space each.
774 * Since they aren't used at the same time, we can overflow in the
775 * other one.To keep things simple, we first assume linear space,
776 * then we relocate it to the final handler layout as needed.
778 static __initdata u32 final_handler
[64];
783 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
784 * 2. A timing hazard exists for the TLBP instruction.
786 * stalling_instruction
789 * The JTLB is being read for the TLBP throughout the stall generated by the
790 * previous instruction. This is not really correct as the stalling instruction
791 * can modify the address used to access the JTLB. The failure symptom is that
792 * the TLBP instruction will use an address created for the stalling instruction
793 * and not the address held in C0_ENHI and thus report the wrong results.
795 * The software work-around is to not allow the instruction preceding the TLBP
796 * to stall - make it an NOP or some other instruction guaranteed not to stall.
798 * Errata 2 will not be fixed. This errata is also on the R5000.
800 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
802 static __init
void __attribute__((unused
)) build_tlb_probe_entry(u32
**p
)
804 switch (current_cpu_data
.cputype
) {
805 /* Found by experiment: R4600 v2.0 needs this, too. */
821 * Write random or indexed TLB entry, and care about the hazards from
822 * the preceeding mtc0 and for the following eret.
824 enum tlb_write_entry
{ tlb_random
, tlb_indexed
};
826 static __init
void build_tlb_write_entry(u32
**p
, struct label
**l
,
828 enum tlb_write_entry wmode
)
830 void(*tlbw
)(u32
**) = NULL
;
833 case tlb_random
: tlbw
= i_tlbwr
; break;
834 case tlb_indexed
: tlbw
= i_tlbwi
; break;
837 switch (current_cpu_data
.cputype
) {
845 * This branch uses up a mtc0 hazard nop slot and saves
846 * two nops after the tlbw instruction.
848 il_bgezl(p
, r
, 0, label_tlbw_hazard
);
850 l_tlbw_hazard(l
, *p
);
888 i_nop(p
); /* QED specifies 2 nops hazard */
890 * This branch uses up a mtc0 hazard nop slot and saves
891 * a nop after the tlbw instruction.
893 il_bgezl(p
, r
, 0, label_tlbw_hazard
);
895 l_tlbw_hazard(l
, *p
);
916 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
917 * use of the JTLB for instructions should not occur for 4
918 * cpu cycles and use for data translations should not occur
953 panic("No TLB refill handler yet (CPU type: %d)",
954 current_cpu_data
.cputype
);
961 * TMP and PTR are scratch.
962 * TMP will be clobbered, PTR will hold the pmd entry.
965 build_get_pmde64(u32
**p
, struct label
**l
, struct reloc
**r
,
966 unsigned int tmp
, unsigned int ptr
)
968 long pgdc
= (long)pgd_current
;
971 * The vmalloc handling is not in the hotpath.
973 i_dmfc0(p
, tmp
, C0_BADVADDR
);
974 il_bltz(p
, r
, tmp
, label_vmalloc
);
975 /* No i_nop needed here, since the next insn doesn't touch TMP. */
978 # ifdef CONFIG_MIPS_MT_SMTC
980 * SMTC uses TCBind value as "CPU" index
982 i_mfc0(p
, ptr
, C0_TCBIND
);
983 i_dsrl(p
, ptr
, ptr
, 19);
986 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
989 i_dmfc0(p
, ptr
, C0_CONTEXT
);
990 i_dsrl(p
, ptr
, ptr
, 23);
992 i_LA_mostly(p
, tmp
, pgdc
);
993 i_daddu(p
, ptr
, ptr
, tmp
);
994 i_dmfc0(p
, tmp
, C0_BADVADDR
);
995 i_ld(p
, ptr
, rel_lo(pgdc
), ptr
);
997 i_LA_mostly(p
, ptr
, pgdc
);
998 i_ld(p
, ptr
, rel_lo(pgdc
), ptr
);
1001 l_vmalloc_done(l
, *p
);
1002 i_dsrl(p
, tmp
, tmp
, PGDIR_SHIFT
-3); /* get pgd offset in bytes */
1003 i_andi(p
, tmp
, tmp
, (PTRS_PER_PGD
- 1)<<3);
1004 i_daddu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
1005 i_dmfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
1006 i_ld(p
, ptr
, 0, ptr
); /* get pmd pointer */
1007 i_dsrl(p
, tmp
, tmp
, PMD_SHIFT
-3); /* get pmd offset in bytes */
1008 i_andi(p
, tmp
, tmp
, (PTRS_PER_PMD
- 1)<<3);
1009 i_daddu(p
, ptr
, ptr
, tmp
); /* add in pmd offset */
1013 * BVADDR is the faulting address, PTR is scratch.
1014 * PTR will hold the pgd for vmalloc.
1017 build_get_pgd_vmalloc64(u32
**p
, struct label
**l
, struct reloc
**r
,
1018 unsigned int bvaddr
, unsigned int ptr
)
1020 long swpd
= (long)swapper_pg_dir
;
1023 i_LA(p
, ptr
, VMALLOC_START
);
1024 i_dsubu(p
, bvaddr
, bvaddr
, ptr
);
1026 if (in_compat_space_p(swpd
) && !rel_lo(swpd
)) {
1027 il_b(p
, r
, label_vmalloc_done
);
1028 i_lui(p
, ptr
, rel_hi(swpd
));
1030 i_LA_mostly(p
, ptr
, swpd
);
1031 il_b(p
, r
, label_vmalloc_done
);
1032 i_daddiu(p
, ptr
, ptr
, rel_lo(swpd
));
1036 #else /* !CONFIG_64BIT */
1039 * TMP and PTR are scratch.
1040 * TMP will be clobbered, PTR will hold the pgd entry.
1042 static __init
void __attribute__((unused
))
1043 build_get_pgde32(u32
**p
, unsigned int tmp
, unsigned int ptr
)
1045 long pgdc
= (long)pgd_current
;
1047 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
1049 #ifdef CONFIG_MIPS_MT_SMTC
1051 * SMTC uses TCBind value as "CPU" index
1053 i_mfc0(p
, ptr
, C0_TCBIND
);
1054 i_LA_mostly(p
, tmp
, pgdc
);
1055 i_srl(p
, ptr
, ptr
, 19);
1058 * smp_processor_id() << 3 is stored in CONTEXT.
1060 i_mfc0(p
, ptr
, C0_CONTEXT
);
1061 i_LA_mostly(p
, tmp
, pgdc
);
1062 i_srl(p
, ptr
, ptr
, 23);
1064 i_addu(p
, ptr
, tmp
, ptr
);
1066 i_LA_mostly(p
, ptr
, pgdc
);
1068 i_mfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
1069 i_lw(p
, ptr
, rel_lo(pgdc
), ptr
);
1070 i_srl(p
, tmp
, tmp
, PGDIR_SHIFT
); /* get pgd only bits */
1071 i_sll(p
, tmp
, tmp
, PGD_T_LOG2
);
1072 i_addu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
1075 #endif /* !CONFIG_64BIT */
1077 static __init
void build_adjust_context(u32
**p
, unsigned int ctx
)
1079 unsigned int shift
= 4 - (PTE_T_LOG2
+ 1);
1080 unsigned int mask
= (PTRS_PER_PTE
/ 2 - 1) << (PTE_T_LOG2
+ 1);
1082 switch (current_cpu_data
.cputype
) {
1099 i_SRL(p
, ctx
, ctx
, shift
);
1100 i_andi(p
, ctx
, ctx
, mask
);
1103 static __init
void build_get_ptep(u32
**p
, unsigned int tmp
, unsigned int ptr
)
1106 * Bug workaround for the Nevada. It seems as if under certain
1107 * circumstances the move from cp0_context might produce a
1108 * bogus result when the mfc0 instruction and its consumer are
1109 * in a different cacheline or a load instruction, probably any
1110 * memory reference, is between them.
1112 switch (current_cpu_data
.cputype
) {
1114 i_LW(p
, ptr
, 0, ptr
);
1115 GET_CONTEXT(p
, tmp
); /* get context reg */
1119 GET_CONTEXT(p
, tmp
); /* get context reg */
1120 i_LW(p
, ptr
, 0, ptr
);
1124 build_adjust_context(p
, tmp
);
1125 i_ADDU(p
, ptr
, ptr
, tmp
); /* add in offset */
1128 static __init
void build_update_entries(u32
**p
, unsigned int tmp
,
1132 * 64bit address support (36bit on a 32bit CPU) in a 32bit
1133 * Kernel is a special case. Only a few CPUs use it.
1135 #ifdef CONFIG_64BIT_PHYS_ADDR
1136 if (cpu_has_64bits
) {
1137 i_ld(p
, tmp
, 0, ptep
); /* get even pte */
1138 i_ld(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
1139 i_dsrl(p
, tmp
, tmp
, 6); /* convert to entrylo0 */
1140 i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
1141 i_dsrl(p
, ptep
, ptep
, 6); /* convert to entrylo1 */
1142 i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
1144 int pte_off_even
= sizeof(pte_t
) / 2;
1145 int pte_off_odd
= pte_off_even
+ sizeof(pte_t
);
1147 /* The pte entries are pre-shifted */
1148 i_lw(p
, tmp
, pte_off_even
, ptep
); /* get even pte */
1149 i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
1150 i_lw(p
, ptep
, pte_off_odd
, ptep
); /* get odd pte */
1151 i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
1154 i_LW(p
, tmp
, 0, ptep
); /* get even pte */
1155 i_LW(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
1156 if (r45k_bvahwbug())
1157 build_tlb_probe_entry(p
);
1158 i_SRL(p
, tmp
, tmp
, 6); /* convert to entrylo0 */
1159 if (r4k_250MHZhwbug())
1160 i_mtc0(p
, 0, C0_ENTRYLO0
);
1161 i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
1162 i_SRL(p
, ptep
, ptep
, 6); /* convert to entrylo1 */
1163 if (r45k_bvahwbug())
1164 i_mfc0(p
, tmp
, C0_INDEX
);
1165 if (r4k_250MHZhwbug())
1166 i_mtc0(p
, 0, C0_ENTRYLO1
);
1167 i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
1171 static void __init
build_r4000_tlb_refill_handler(void)
1173 u32
*p
= tlb_handler
;
1174 struct label
*l
= labels
;
1175 struct reloc
*r
= relocs
;
1177 unsigned int final_len
;
1179 memset(tlb_handler
, 0, sizeof(tlb_handler
));
1180 memset(labels
, 0, sizeof(labels
));
1181 memset(relocs
, 0, sizeof(relocs
));
1182 memset(final_handler
, 0, sizeof(final_handler
));
1185 * create the plain linear handler
1187 if (bcm1250_m3_war()) {
1188 i_MFC0(&p
, K0
, C0_BADVADDR
);
1189 i_MFC0(&p
, K1
, C0_ENTRYHI
);
1190 i_xor(&p
, K0
, K0
, K1
);
1191 i_SRL(&p
, K0
, K0
, PAGE_SHIFT
+ 1);
1192 il_bnez(&p
, &r
, K0
, label_leave
);
1193 /* No need for i_nop */
1197 build_get_pmde64(&p
, &l
, &r
, K0
, K1
); /* get pmd in K1 */
1199 build_get_pgde32(&p
, K0
, K1
); /* get pgd in K1 */
1202 build_get_ptep(&p
, K0
, K1
);
1203 build_update_entries(&p
, K0
, K1
);
1204 build_tlb_write_entry(&p
, &l
, &r
, tlb_random
);
1206 i_eret(&p
); /* return from trap */
1209 build_get_pgd_vmalloc64(&p
, &l
, &r
, K0
, K1
);
1213 * Overflow check: For the 64bit handler, we need at least one
1214 * free instruction slot for the wrap-around branch. In worst
1215 * case, if the intended insertion point is a delay slot, we
1216 * need three, with the the second nop'ed and the third being
1220 if ((p
- tlb_handler
) > 64)
1221 panic("TLB refill handler space exceeded");
1223 if (((p
- tlb_handler
) > 63)
1224 || (((p
- tlb_handler
) > 61)
1225 && insn_has_bdelay(relocs
, tlb_handler
+ 29)))
1226 panic("TLB refill handler space exceeded");
1230 * Now fold the handler in the TLB refill handler space.
1234 /* Simplest case, just copy the handler. */
1235 copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
1236 final_len
= p
- tlb_handler
;
1237 #else /* CONFIG_64BIT */
1238 f
= final_handler
+ 32;
1239 if ((p
- tlb_handler
) <= 32) {
1240 /* Just copy the handler. */
1241 copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
1242 final_len
= p
- tlb_handler
;
1244 u32
*split
= tlb_handler
+ 30;
1247 * Find the split point.
1249 if (insn_has_bdelay(relocs
, split
- 1))
1252 /* Copy first part of the handler. */
1253 copy_handler(relocs
, labels
, tlb_handler
, split
, f
);
1254 f
+= split
- tlb_handler
;
1256 /* Insert branch. */
1257 l_split(&l
, final_handler
);
1258 il_b(&f
, &r
, label_split
);
1259 if (insn_has_bdelay(relocs
, split
))
1262 copy_handler(relocs
, labels
, split
, split
+ 1, f
);
1263 move_labels(labels
, f
, f
+ 1, -1);
1268 /* Copy the rest of the handler. */
1269 copy_handler(relocs
, labels
, split
, p
, final_handler
);
1270 final_len
= (f
- (final_handler
+ 32)) + (p
- split
);
1272 #endif /* CONFIG_64BIT */
1274 resolve_relocs(relocs
, labels
);
1275 printk("Synthesized TLB refill handler (%u instructions).\n",
1287 f
= final_handler
+ 32;
1288 #endif /* CONFIG_64BIT */
1289 for (i
= 0; i
< final_len
; i
++)
1290 printk("%08x\n", f
[i
]);
1294 memcpy((void *)ebase
, final_handler
, 0x100);
1298 * TLB load/store/modify handlers.
1300 * Only the fastpath gets synthesized at runtime, the slowpath for
1301 * do_page_fault remains normal asm.
1303 extern void tlb_do_page_fault_0(void);
1304 extern void tlb_do_page_fault_1(void);
1306 #define __tlb_handler_align \
1307 __attribute__((__aligned__(1 << CONFIG_MIPS_L1_CACHE_SHIFT)))
1310 * 128 instructions for the fastpath handler is generous and should
1311 * never be exceeded.
1313 #define FASTPATH_SIZE 128
1315 u32 __tlb_handler_align handle_tlbl
[FASTPATH_SIZE
];
1316 u32 __tlb_handler_align handle_tlbs
[FASTPATH_SIZE
];
1317 u32 __tlb_handler_align handle_tlbm
[FASTPATH_SIZE
];
1320 iPTE_LW(u32
**p
, struct label
**l
, unsigned int pte
, unsigned int ptr
)
1323 # ifdef CONFIG_64BIT_PHYS_ADDR
1325 i_lld(p
, pte
, 0, ptr
);
1328 i_LL(p
, pte
, 0, ptr
);
1330 # ifdef CONFIG_64BIT_PHYS_ADDR
1332 i_ld(p
, pte
, 0, ptr
);
1335 i_LW(p
, pte
, 0, ptr
);
1340 iPTE_SW(u32
**p
, struct reloc
**r
, unsigned int pte
, unsigned int ptr
,
1343 #ifdef CONFIG_64BIT_PHYS_ADDR
1344 unsigned int hwmode
= mode
& (_PAGE_VALID
| _PAGE_DIRTY
);
1347 i_ori(p
, pte
, pte
, mode
);
1349 # ifdef CONFIG_64BIT_PHYS_ADDR
1351 i_scd(p
, pte
, 0, ptr
);
1354 i_SC(p
, pte
, 0, ptr
);
1356 if (r10000_llsc_war())
1357 il_beqzl(p
, r
, pte
, label_smp_pgtable_change
);
1359 il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
1361 # ifdef CONFIG_64BIT_PHYS_ADDR
1362 if (!cpu_has_64bits
) {
1363 /* no i_nop needed */
1364 i_ll(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1365 i_ori(p
, pte
, pte
, hwmode
);
1366 i_sc(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1367 il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
1368 /* no i_nop needed */
1369 i_lw(p
, pte
, 0, ptr
);
1376 # ifdef CONFIG_64BIT_PHYS_ADDR
1378 i_sd(p
, pte
, 0, ptr
);
1381 i_SW(p
, pte
, 0, ptr
);
1383 # ifdef CONFIG_64BIT_PHYS_ADDR
1384 if (!cpu_has_64bits
) {
1385 i_lw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1386 i_ori(p
, pte
, pte
, hwmode
);
1387 i_sw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1388 i_lw(p
, pte
, 0, ptr
);
1395 * Check if PTE is present, if not then jump to LABEL. PTR points to
1396 * the page table where this PTE is located, PTE will be re-loaded
1397 * with it's original value.
1400 build_pte_present(u32
**p
, struct label
**l
, struct reloc
**r
,
1401 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1403 i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
1404 i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
1405 il_bnez(p
, r
, pte
, lid
);
1406 iPTE_LW(p
, l
, pte
, ptr
);
1409 /* Make PTE valid, store result in PTR. */
1411 build_make_valid(u32
**p
, struct reloc
**r
, unsigned int pte
,
1414 unsigned int mode
= _PAGE_VALID
| _PAGE_ACCESSED
;
1416 iPTE_SW(p
, r
, pte
, ptr
, mode
);
1420 * Check if PTE can be written to, if not branch to LABEL. Regardless
1421 * restore PTE with value from PTR when done.
1424 build_pte_writable(u32
**p
, struct label
**l
, struct reloc
**r
,
1425 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1427 i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
1428 i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
1429 il_bnez(p
, r
, pte
, lid
);
1430 iPTE_LW(p
, l
, pte
, ptr
);
1433 /* Make PTE writable, update software status bits as well, then store
1437 build_make_write(u32
**p
, struct reloc
**r
, unsigned int pte
,
1440 unsigned int mode
= (_PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
1443 iPTE_SW(p
, r
, pte
, ptr
, mode
);
1447 * Check if PTE can be modified, if not branch to LABEL. Regardless
1448 * restore PTE with value from PTR when done.
1451 build_pte_modifiable(u32
**p
, struct label
**l
, struct reloc
**r
,
1452 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1454 i_andi(p
, pte
, pte
, _PAGE_WRITE
);
1455 il_beqz(p
, r
, pte
, lid
);
1456 iPTE_LW(p
, l
, pte
, ptr
);
1460 * R3000 style TLB load/store/modify handlers.
1464 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1468 build_r3000_pte_reload_tlbwi(u32
**p
, unsigned int pte
, unsigned int tmp
)
1470 i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
1471 i_mfc0(p
, tmp
, C0_EPC
); /* cp0 delay */
1474 i_rfe(p
); /* branch delay */
1478 * This places the pte into ENTRYLO0 and writes it with tlbwi
1479 * or tlbwr as appropriate. This is because the index register
1480 * may have the probe fail bit set as a result of a trap on a
1481 * kseg2 access, i.e. without refill. Then it returns.
1484 build_r3000_tlb_reload_write(u32
**p
, struct label
**l
, struct reloc
**r
,
1485 unsigned int pte
, unsigned int tmp
)
1487 i_mfc0(p
, tmp
, C0_INDEX
);
1488 i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
1489 il_bltz(p
, r
, tmp
, label_r3000_write_probe_fail
); /* cp0 delay */
1490 i_mfc0(p
, tmp
, C0_EPC
); /* branch delay */
1491 i_tlbwi(p
); /* cp0 delay */
1493 i_rfe(p
); /* branch delay */
1494 l_r3000_write_probe_fail(l
, *p
);
1495 i_tlbwr(p
); /* cp0 delay */
1497 i_rfe(p
); /* branch delay */
1501 build_r3000_tlbchange_handler_head(u32
**p
, unsigned int pte
,
1504 long pgdc
= (long)pgd_current
;
1506 i_mfc0(p
, pte
, C0_BADVADDR
);
1507 i_lui(p
, ptr
, rel_hi(pgdc
)); /* cp0 delay */
1508 i_lw(p
, ptr
, rel_lo(pgdc
), ptr
);
1509 i_srl(p
, pte
, pte
, 22); /* load delay */
1510 i_sll(p
, pte
, pte
, 2);
1511 i_addu(p
, ptr
, ptr
, pte
);
1512 i_mfc0(p
, pte
, C0_CONTEXT
);
1513 i_lw(p
, ptr
, 0, ptr
); /* cp0 delay */
1514 i_andi(p
, pte
, pte
, 0xffc); /* load delay */
1515 i_addu(p
, ptr
, ptr
, pte
);
1516 i_lw(p
, pte
, 0, ptr
);
1517 i_tlbp(p
); /* load delay */
1520 static void __init
build_r3000_tlb_load_handler(void)
1522 u32
*p
= handle_tlbl
;
1523 struct label
*l
= labels
;
1524 struct reloc
*r
= relocs
;
1526 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1527 memset(labels
, 0, sizeof(labels
));
1528 memset(relocs
, 0, sizeof(relocs
));
1530 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1531 build_pte_present(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbl
);
1532 i_nop(&p
); /* load delay */
1533 build_make_valid(&p
, &r
, K0
, K1
);
1534 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1536 l_nopage_tlbl(&l
, p
);
1537 i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1540 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1541 panic("TLB load handler fastpath space exceeded");
1543 resolve_relocs(relocs
, labels
);
1544 printk("Synthesized TLB load handler fastpath (%u instructions).\n",
1545 (unsigned int)(p
- handle_tlbl
));
1551 for (i
= 0; i
< (p
- handle_tlbl
); i
++)
1552 printk("%08x\n", handle_tlbl
[i
]);
1557 static void __init
build_r3000_tlb_store_handler(void)
1559 u32
*p
= handle_tlbs
;
1560 struct label
*l
= labels
;
1561 struct reloc
*r
= relocs
;
1563 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1564 memset(labels
, 0, sizeof(labels
));
1565 memset(relocs
, 0, sizeof(relocs
));
1567 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1568 build_pte_writable(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbs
);
1569 i_nop(&p
); /* load delay */
1570 build_make_write(&p
, &r
, K0
, K1
);
1571 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1573 l_nopage_tlbs(&l
, p
);
1574 i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1577 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1578 panic("TLB store handler fastpath space exceeded");
1580 resolve_relocs(relocs
, labels
);
1581 printk("Synthesized TLB store handler fastpath (%u instructions).\n",
1582 (unsigned int)(p
- handle_tlbs
));
1588 for (i
= 0; i
< (p
- handle_tlbs
); i
++)
1589 printk("%08x\n", handle_tlbs
[i
]);
1594 static void __init
build_r3000_tlb_modify_handler(void)
1596 u32
*p
= handle_tlbm
;
1597 struct label
*l
= labels
;
1598 struct reloc
*r
= relocs
;
1600 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1601 memset(labels
, 0, sizeof(labels
));
1602 memset(relocs
, 0, sizeof(relocs
));
1604 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1605 build_pte_modifiable(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbm
);
1606 i_nop(&p
); /* load delay */
1607 build_make_write(&p
, &r
, K0
, K1
);
1608 build_r3000_pte_reload_tlbwi(&p
, K0
, K1
);
1610 l_nopage_tlbm(&l
, p
);
1611 i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1614 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1615 panic("TLB modify handler fastpath space exceeded");
1617 resolve_relocs(relocs
, labels
);
1618 printk("Synthesized TLB modify handler fastpath (%u instructions).\n",
1619 (unsigned int)(p
- handle_tlbm
));
1625 for (i
= 0; i
< (p
- handle_tlbm
); i
++)
1626 printk("%08x\n", handle_tlbm
[i
]);
1632 * R4000 style TLB load/store/modify handlers.
1635 build_r4000_tlbchange_handler_head(u32
**p
, struct label
**l
,
1636 struct reloc
**r
, unsigned int pte
,
1640 build_get_pmde64(p
, l
, r
, pte
, ptr
); /* get pmd in ptr */
1642 build_get_pgde32(p
, pte
, ptr
); /* get pgd in ptr */
1645 i_MFC0(p
, pte
, C0_BADVADDR
);
1646 i_LW(p
, ptr
, 0, ptr
);
1647 i_SRL(p
, pte
, pte
, PAGE_SHIFT
+ PTE_ORDER
- PTE_T_LOG2
);
1648 i_andi(p
, pte
, pte
, (PTRS_PER_PTE
- 1) << PTE_T_LOG2
);
1649 i_ADDU(p
, ptr
, ptr
, pte
);
1652 l_smp_pgtable_change(l
, *p
);
1654 iPTE_LW(p
, l
, pte
, ptr
); /* get even pte */
1655 build_tlb_probe_entry(p
);
1659 build_r4000_tlbchange_handler_tail(u32
**p
, struct label
**l
,
1660 struct reloc
**r
, unsigned int tmp
,
1663 i_ori(p
, ptr
, ptr
, sizeof(pte_t
));
1664 i_xori(p
, ptr
, ptr
, sizeof(pte_t
));
1665 build_update_entries(p
, tmp
, ptr
);
1666 build_tlb_write_entry(p
, l
, r
, tlb_indexed
);
1668 i_eret(p
); /* return from trap */
1671 build_get_pgd_vmalloc64(p
, l
, r
, tmp
, ptr
);
1675 static void __init
build_r4000_tlb_load_handler(void)
1677 u32
*p
= handle_tlbl
;
1678 struct label
*l
= labels
;
1679 struct reloc
*r
= relocs
;
1681 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1682 memset(labels
, 0, sizeof(labels
));
1683 memset(relocs
, 0, sizeof(relocs
));
1685 if (bcm1250_m3_war()) {
1686 i_MFC0(&p
, K0
, C0_BADVADDR
);
1687 i_MFC0(&p
, K1
, C0_ENTRYHI
);
1688 i_xor(&p
, K0
, K0
, K1
);
1689 i_SRL(&p
, K0
, K0
, PAGE_SHIFT
+ 1);
1690 il_bnez(&p
, &r
, K0
, label_leave
);
1691 /* No need for i_nop */
1694 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1695 build_pte_present(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbl
);
1696 build_make_valid(&p
, &r
, K0
, K1
);
1697 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1699 l_nopage_tlbl(&l
, p
);
1700 i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1703 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1704 panic("TLB load handler fastpath space exceeded");
1706 resolve_relocs(relocs
, labels
);
1707 printk("Synthesized TLB load handler fastpath (%u instructions).\n",
1708 (unsigned int)(p
- handle_tlbl
));
1714 for (i
= 0; i
< (p
- handle_tlbl
); i
++)
1715 printk("%08x\n", handle_tlbl
[i
]);
1720 static void __init
build_r4000_tlb_store_handler(void)
1722 u32
*p
= handle_tlbs
;
1723 struct label
*l
= labels
;
1724 struct reloc
*r
= relocs
;
1726 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1727 memset(labels
, 0, sizeof(labels
));
1728 memset(relocs
, 0, sizeof(relocs
));
1730 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1731 build_pte_writable(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbs
);
1732 build_make_write(&p
, &r
, K0
, K1
);
1733 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1735 l_nopage_tlbs(&l
, p
);
1736 i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1739 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1740 panic("TLB store handler fastpath space exceeded");
1742 resolve_relocs(relocs
, labels
);
1743 printk("Synthesized TLB store handler fastpath (%u instructions).\n",
1744 (unsigned int)(p
- handle_tlbs
));
1750 for (i
= 0; i
< (p
- handle_tlbs
); i
++)
1751 printk("%08x\n", handle_tlbs
[i
]);
1756 static void __init
build_r4000_tlb_modify_handler(void)
1758 u32
*p
= handle_tlbm
;
1759 struct label
*l
= labels
;
1760 struct reloc
*r
= relocs
;
1762 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1763 memset(labels
, 0, sizeof(labels
));
1764 memset(relocs
, 0, sizeof(relocs
));
1766 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1767 build_pte_modifiable(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbm
);
1768 /* Present and writable bits set, set accessed and dirty bits. */
1769 build_make_write(&p
, &r
, K0
, K1
);
1770 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1772 l_nopage_tlbm(&l
, p
);
1773 i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1776 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1777 panic("TLB modify handler fastpath space exceeded");
1779 resolve_relocs(relocs
, labels
);
1780 printk("Synthesized TLB modify handler fastpath (%u instructions).\n",
1781 (unsigned int)(p
- handle_tlbm
));
1787 for (i
= 0; i
< (p
- handle_tlbm
); i
++)
1788 printk("%08x\n", handle_tlbm
[i
]);
1793 void __init
build_tlb_refill_handler(void)
1796 * The refill handler is generated per-CPU, multi-node systems
1797 * may have local storage for it. The other handlers are only
1800 static int run_once
= 0;
1802 switch (current_cpu_data
.cputype
) {
1810 build_r3000_tlb_refill_handler();
1812 build_r3000_tlb_load_handler();
1813 build_r3000_tlb_store_handler();
1814 build_r3000_tlb_modify_handler();
1821 panic("No R6000 TLB refill handler yet");
1825 panic("No R8000 TLB refill handler yet");
1829 build_r4000_tlb_refill_handler();
1831 build_r4000_tlb_load_handler();
1832 build_r4000_tlb_store_handler();
1833 build_r4000_tlb_modify_handler();
1839 void __init
flush_tlb_handlers(void)
1841 flush_icache_range((unsigned long)handle_tlbl
,
1842 (unsigned long)handle_tlbl
+ sizeof(handle_tlbl
));
1843 flush_icache_range((unsigned long)handle_tlbs
,
1844 (unsigned long)handle_tlbs
+ sizeof(handle_tlbs
));
1845 flush_icache_range((unsigned long)handle_tlbm
,
1846 (unsigned long)handle_tlbm
+ sizeof(handle_tlbm
));