2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Synthesize TLB refill handlers at runtime.
8 * Copyright (C) 2004,2005,2006 by Thiemo Seufer
9 * Copyright (C) 2005 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
12 * ... and the days got worse and worse and now you see
13 * I've gone completly out of my mind.
15 * They're coming to take me a away haha
16 * they're coming to take me a away hoho hihi haha
17 * to the funny farm where code is beautiful all the time ...
19 * (Condolences to Napoleon XIV)
25 #include <linux/kernel.h>
26 #include <linux/types.h>
27 #include <linux/string.h>
28 #include <linux/init.h>
30 #include <asm/pgtable.h>
31 #include <asm/cacheflush.h>
32 #include <asm/mmu_context.h>
39 static __cpuinit
int __maybe_unused
r45k_bvahwbug(void)
41 /* XXX: We should probe for the presence of this bug, but we don't. */
45 static __cpuinit
int __maybe_unused
r4k_250MHZhwbug(void)
47 /* XXX: We should probe for the presence of this bug, but we don't. */
51 static __cpuinit
int __maybe_unused
bcm1250_m3_war(void)
53 return BCM1250_M3_WAR
;
56 static __cpuinit
int __maybe_unused
r10000_llsc_war(void)
58 return R10000_LLSC_WAR
;
62 * Found by experiment: At least some revisions of the 4kc throw under
63 * some circumstances a machine check exception, triggered by invalid
64 * values in the index register. Delaying the tlbp instruction until
65 * after the next branch, plus adding an additional nop in front of
66 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
67 * why; it's not an issue caused by the core RTL.
70 static __init
int __attribute__((unused
)) m4kc_tlbp_war(void)
72 return (current_cpu_data
.processor_id
& 0xffff00) ==
73 (PRID_COMP_MIPS
| PRID_IMP_4KC
);
77 * A little micro-assembler, intended for TLB refill handler
78 * synthesizing. It is intentionally kept simple, does only support
79 * a subset of instructions, and does not try to hide pipeline effects
80 * like branch delay slots.
108 #define IMM_MASK 0xffff
110 #define JIMM_MASK 0x3ffffff
112 #define FUNC_MASK 0x2f
116 #define SCIMM_MASK 0xfffff
125 /* This macro sets the non-variable bits of an instruction. */
126 #define M(a, b, c, d, e, f) \
134 static __cpuinitdata
struct insn insn_table
[] = {
135 { insn_addiu
, M(addiu_op
,0,0,0,0,0), RS
| RT
| SIMM
},
136 { insn_addu
, M(spec_op
,0,0,0,0,addu_op
), RS
| RT
| RD
},
137 { insn_and
, M(spec_op
,0,0,0,0,and_op
), RS
| RT
| RD
},
138 { insn_andi
, M(andi_op
,0,0,0,0,0), RS
| RT
| UIMM
},
139 { insn_beq
, M(beq_op
,0,0,0,0,0), RS
| RT
| BIMM
},
140 { insn_beql
, M(beql_op
,0,0,0,0,0), RS
| RT
| BIMM
},
141 { insn_bgez
, M(bcond_op
,0,bgez_op
,0,0,0), RS
| BIMM
},
142 { insn_bgezl
, M(bcond_op
,0,bgezl_op
,0,0,0), RS
| BIMM
},
143 { insn_bltz
, M(bcond_op
,0,bltz_op
,0,0,0), RS
| BIMM
},
144 { insn_bltzl
, M(bcond_op
,0,bltzl_op
,0,0,0), RS
| BIMM
},
145 { insn_bne
, M(bne_op
,0,0,0,0,0), RS
| RT
| BIMM
},
146 { insn_daddiu
, M(daddiu_op
,0,0,0,0,0), RS
| RT
| SIMM
},
147 { insn_daddu
, M(spec_op
,0,0,0,0,daddu_op
), RS
| RT
| RD
},
148 { insn_dmfc0
, M(cop0_op
,dmfc_op
,0,0,0,0), RT
| RD
| SET
},
149 { insn_dmtc0
, M(cop0_op
,dmtc_op
,0,0,0,0), RT
| RD
| SET
},
150 { insn_dsll
, M(spec_op
,0,0,0,0,dsll_op
), RT
| RD
| RE
},
151 { insn_dsll32
, M(spec_op
,0,0,0,0,dsll32_op
), RT
| RD
| RE
},
152 { insn_dsra
, M(spec_op
,0,0,0,0,dsra_op
), RT
| RD
| RE
},
153 { insn_dsrl
, M(spec_op
,0,0,0,0,dsrl_op
), RT
| RD
| RE
},
154 { insn_dsrl32
, M(spec_op
,0,0,0,0,dsrl32_op
), RT
| RD
| RE
},
155 { insn_dsubu
, M(spec_op
,0,0,0,0,dsubu_op
), RS
| RT
| RD
},
156 { insn_eret
, M(cop0_op
,cop_op
,0,0,0,eret_op
), 0 },
157 { insn_j
, M(j_op
,0,0,0,0,0), JIMM
},
158 { insn_jal
, M(jal_op
,0,0,0,0,0), JIMM
},
159 { insn_jr
, M(spec_op
,0,0,0,0,jr_op
), RS
},
160 { insn_ld
, M(ld_op
,0,0,0,0,0), RS
| RT
| SIMM
},
161 { insn_ll
, M(ll_op
,0,0,0,0,0), RS
| RT
| SIMM
},
162 { insn_lld
, M(lld_op
,0,0,0,0,0), RS
| RT
| SIMM
},
163 { insn_lui
, M(lui_op
,0,0,0,0,0), RT
| SIMM
},
164 { insn_lw
, M(lw_op
,0,0,0,0,0), RS
| RT
| SIMM
},
165 { insn_mfc0
, M(cop0_op
,mfc_op
,0,0,0,0), RT
| RD
| SET
},
166 { insn_mtc0
, M(cop0_op
,mtc_op
,0,0,0,0), RT
| RD
| SET
},
167 { insn_or
, M(spec_op
, 0, 0, 0, 0, or_op
), RS
| RT
| RD
},
168 { insn_ori
, M(ori_op
,0,0,0,0,0), RS
| RT
| UIMM
},
169 { insn_rfe
, M(cop0_op
,cop_op
,0,0,0,rfe_op
), 0 },
170 { insn_sc
, M(sc_op
,0,0,0,0,0), RS
| RT
| SIMM
},
171 { insn_scd
, M(scd_op
,0,0,0,0,0), RS
| RT
| SIMM
},
172 { insn_sd
, M(sd_op
,0,0,0,0,0), RS
| RT
| SIMM
},
173 { insn_sll
, M(spec_op
,0,0,0,0,sll_op
), RT
| RD
| RE
},
174 { insn_sra
, M(spec_op
,0,0,0,0,sra_op
), RT
| RD
| RE
},
175 { insn_srl
, M(spec_op
,0,0,0,0,srl_op
), RT
| RD
| RE
},
176 { insn_subu
, M(spec_op
,0,0,0,0,subu_op
), RS
| RT
| RD
},
177 { insn_sw
, M(sw_op
,0,0,0,0,0), RS
| RT
| SIMM
},
178 { insn_tlbp
, M(cop0_op
,cop_op
,0,0,0,tlbp_op
), 0 },
179 { insn_tlbwi
, M(cop0_op
,cop_op
,0,0,0,tlbwi_op
), 0 },
180 { insn_tlbwr
, M(cop0_op
,cop_op
,0,0,0,tlbwr_op
), 0 },
181 { insn_xor
, M(spec_op
,0,0,0,0,xor_op
), RS
| RT
| RD
},
182 { insn_xori
, M(xori_op
,0,0,0,0,0), RS
| RT
| UIMM
},
183 { insn_syscall
, M(spec_op
, 0, 0, 0, 0, syscall_op
), SCIMM
},
184 { insn_invalid
, 0, 0 }
189 static __cpuinit u32
build_rs(u32 arg
)
192 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
194 return (arg
& RS_MASK
) << RS_SH
;
197 static __cpuinit u32
build_rt(u32 arg
)
200 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
202 return (arg
& RT_MASK
) << RT_SH
;
205 static __cpuinit u32
build_rd(u32 arg
)
208 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
210 return (arg
& RD_MASK
) << RD_SH
;
213 static __cpuinit u32
build_re(u32 arg
)
216 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
218 return (arg
& RE_MASK
) << RE_SH
;
221 static __cpuinit u32
build_simm(s32 arg
)
223 if (arg
> 0x7fff || arg
< -0x8000)
224 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
229 static __cpuinit u32
build_uimm(u32 arg
)
232 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
234 return arg
& IMM_MASK
;
237 static __cpuinit u32
build_bimm(s32 arg
)
239 if (arg
> 0x1ffff || arg
< -0x20000)
240 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
243 printk(KERN_WARNING
"Invalid TLB synthesizer branch target\n");
245 return ((arg
< 0) ? (1 << 15) : 0) | ((arg
>> 2) & 0x7fff);
248 static __cpuinit u32
build_jimm(u32 arg
)
250 if (arg
& ~((JIMM_MASK
) << 2))
251 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
253 return (arg
>> 2) & JIMM_MASK
;
256 static inline __cpuinit u32
build_scimm(u32 arg
)
258 if (arg
& ~SCIMM_MASK
)
259 printk(KERN_WARNING
"Micro-assembler field overflow\n");
261 return (arg
& SCIMM_MASK
) << SCIMM_SH
;
264 static __cpuinit u32
build_func(u32 arg
)
266 if (arg
& ~FUNC_MASK
)
267 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
269 return arg
& FUNC_MASK
;
272 static __cpuinit u32
build_set(u32 arg
)
275 printk(KERN_WARNING
"TLB synthesizer field overflow\n");
277 return arg
& SET_MASK
;
281 * The order of opcode arguments is implicitly left to right,
282 * starting with RS and ending with FUNC or IMM.
284 void __cpuinit
build_insn(u32
**buf
, enum opcode opc
, ...)
286 struct insn
*ip
= NULL
;
291 for (i
= 0; insn_table
[i
].opcode
!= insn_invalid
; i
++)
292 if (insn_table
[i
].opcode
== opc
) {
298 panic("Unsupported TLB synthesizer instruction %d", opc
);
302 if (ip
->fields
& RS
) op
|= build_rs(va_arg(ap
, u32
));
303 if (ip
->fields
& RT
) op
|= build_rt(va_arg(ap
, u32
));
304 if (ip
->fields
& RD
) op
|= build_rd(va_arg(ap
, u32
));
305 if (ip
->fields
& RE
) op
|= build_re(va_arg(ap
, u32
));
306 if (ip
->fields
& SIMM
) op
|= build_simm(va_arg(ap
, s32
));
307 if (ip
->fields
& UIMM
) op
|= build_uimm(va_arg(ap
, u32
));
308 if (ip
->fields
& BIMM
) op
|= build_bimm(va_arg(ap
, s32
));
309 if (ip
->fields
& JIMM
) op
|= build_jimm(va_arg(ap
, u32
));
310 if (ip
->fields
& FUNC
) op
|= build_func(va_arg(ap
, u32
));
311 if (ip
->fields
& SET
) op
|= build_set(va_arg(ap
, u32
));
312 if (ip
->fields
& SCIMM
) op
|= build_scimm(va_arg(ap
, u32
));
320 static __cpuinit
int __maybe_unused
in_compat_space_p(long addr
)
322 /* Is this address in 32bit compat space? */
323 return (((addr
) & 0xffffffff00000000L
) == 0xffffffff00000000L
);
326 static __cpuinit
int __maybe_unused
rel_highest(long val
)
328 return ((((val
+ 0x800080008000L
) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
331 static __cpuinit
int __maybe_unused
rel_higher(long val
)
333 return ((((val
+ 0x80008000L
) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
337 static __cpuinit
int rel_hi(long val
)
339 return ((((val
+ 0x8000L
) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
342 static __cpuinit
int rel_lo(long val
)
344 return ((val
& 0xffff) ^ 0x8000) - 0x8000;
347 static __cpuinit
void i_LA_mostly(u32
**buf
, unsigned int rs
, long addr
)
350 if (!in_compat_space_p(addr
)) {
351 i_lui(buf
, rs
, rel_highest(addr
));
352 if (rel_higher(addr
))
353 i_daddiu(buf
, rs
, rs
, rel_higher(addr
));
355 i_dsll(buf
, rs
, rs
, 16);
356 i_daddiu(buf
, rs
, rs
, rel_hi(addr
));
357 i_dsll(buf
, rs
, rs
, 16);
359 i_dsll32(buf
, rs
, rs
, 0);
362 i_lui(buf
, rs
, rel_hi(addr
));
365 static __cpuinit
void __maybe_unused
i_LA(u32
**buf
, unsigned int rs
,
368 i_LA_mostly(buf
, rs
, addr
);
370 i_ADDIU(buf
, rs
, rs
, rel_lo(addr
));
383 static __cpuinit
void r_mips_pc16(struct reloc
**rel
, u32
*addr
,
387 (*rel
)->type
= R_MIPS_PC16
;
392 static inline void __resolve_relocs(struct reloc
*rel
, struct label
*lab
)
394 long laddr
= (long)lab
->addr
;
395 long raddr
= (long)rel
->addr
;
399 *rel
->addr
|= build_bimm(laddr
- (raddr
+ 4));
403 panic("Unsupported TLB synthesizer relocation %d",
408 static __cpuinit
void resolve_relocs(struct reloc
*rel
, struct label
*lab
)
412 for (; rel
->lab
!= label_invalid
; rel
++)
413 for (l
= lab
; l
->lab
!= label_invalid
; l
++)
414 if (rel
->lab
== l
->lab
)
415 __resolve_relocs(rel
, l
);
418 static __cpuinit
void move_relocs(struct reloc
*rel
, u32
*first
, u32
*end
,
421 for (; rel
->lab
!= label_invalid
; rel
++)
422 if (rel
->addr
>= first
&& rel
->addr
< end
)
426 static __cpuinit
void move_labels(struct label
*lab
, u32
*first
, u32
*end
,
429 for (; lab
->lab
!= label_invalid
; lab
++)
430 if (lab
->addr
>= first
&& lab
->addr
< end
)
434 static __cpuinit
void copy_handler(struct reloc
*rel
, struct label
*lab
,
435 u32
*first
, u32
*end
, u32
*target
)
437 long off
= (long)(target
- first
);
439 memcpy(target
, first
, (end
- first
) * sizeof(u32
));
441 move_relocs(rel
, first
, end
, off
);
442 move_labels(lab
, first
, end
, off
);
445 static __cpuinit
int __maybe_unused
insn_has_bdelay(struct reloc
*rel
,
448 for (; rel
->lab
!= label_invalid
; rel
++) {
449 if (rel
->addr
== addr
450 && (rel
->type
== R_MIPS_PC16
451 || rel
->type
== R_MIPS_26
))
458 /* convenience functions for labeled branches */
459 static void __cpuinit __maybe_unused
460 il_bltz(u32
**p
, struct reloc
**r
, unsigned int reg
, enum label_id l
)
462 r_mips_pc16(r
, *p
, l
);
466 static void __cpuinit __maybe_unused
il_b(u32
**p
, struct reloc
**r
,
469 r_mips_pc16(r
, *p
, l
);
473 static void __cpuinit
il_beqz(u32
**p
, struct reloc
**r
, unsigned int reg
,
476 r_mips_pc16(r
, *p
, l
);
480 static void __cpuinit __maybe_unused
481 il_beqzl(u32
**p
, struct reloc
**r
, unsigned int reg
, enum label_id l
)
483 r_mips_pc16(r
, *p
, l
);
487 static void __cpuinit
il_bnez(u32
**p
, struct reloc
**r
, unsigned int reg
,
490 r_mips_pc16(r
, *p
, l
);
494 static void __cpuinit
il_bgezl(u32
**p
, struct reloc
**r
, unsigned int reg
,
497 r_mips_pc16(r
, *p
, l
);
501 static void __cpuinit __maybe_unused
502 il_bgez(u32
**p
, struct reloc
**r
, unsigned int reg
, enum label_id l
)
504 r_mips_pc16(r
, *p
, l
);
508 /* The only general purpose registers allowed in TLB handlers. */
512 /* Some CP0 registers */
513 #define C0_INDEX 0, 0
514 #define C0_ENTRYLO0 2, 0
515 #define C0_TCBIND 2, 2
516 #define C0_ENTRYLO1 3, 0
517 #define C0_CONTEXT 4, 0
518 #define C0_BADVADDR 8, 0
519 #define C0_ENTRYHI 10, 0
521 #define C0_XCONTEXT 20, 0
524 # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT)
526 # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_CONTEXT)
529 /* The worst case length of the handler is around 18 instructions for
530 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
531 * Maximum space available is 32 instructions for R3000 and 64
532 * instructions for R4000.
534 * We deliberately chose a buffer size of 128, so we won't scribble
535 * over anything important on overflow before we panic.
537 static __cpuinitdata u32 tlb_handler
[128];
539 /* simply assume worst case size for labels and relocs */
540 static __cpuinitdata
struct label labels
[128];
541 static __cpuinitdata
struct reloc relocs
[128];
544 * The R3000 TLB handler is simple.
546 static void __cpuinit
build_r3000_tlb_refill_handler(void)
548 long pgdc
= (long)pgd_current
;
552 memset(tlb_handler
, 0, sizeof(tlb_handler
));
555 i_mfc0(&p
, K0
, C0_BADVADDR
);
556 i_lui(&p
, K1
, rel_hi(pgdc
)); /* cp0 delay */
557 i_lw(&p
, K1
, rel_lo(pgdc
), K1
);
558 i_srl(&p
, K0
, K0
, 22); /* load delay */
559 i_sll(&p
, K0
, K0
, 2);
560 i_addu(&p
, K1
, K1
, K0
);
561 i_mfc0(&p
, K0
, C0_CONTEXT
);
562 i_lw(&p
, K1
, 0, K1
); /* cp0 delay */
563 i_andi(&p
, K0
, K0
, 0xffc); /* load delay */
564 i_addu(&p
, K1
, K1
, K0
);
566 i_nop(&p
); /* load delay */
567 i_mtc0(&p
, K0
, C0_ENTRYLO0
);
568 i_mfc0(&p
, K1
, C0_EPC
); /* cp0 delay */
569 i_tlbwr(&p
); /* cp0 delay */
571 i_rfe(&p
); /* branch delay */
573 if (p
> tlb_handler
+ 32)
574 panic("TLB refill handler space exceeded");
576 pr_info("Synthesized TLB refill handler (%u instructions).\n",
577 (unsigned int)(p
- tlb_handler
));
579 pr_debug("\t.set push\n");
580 pr_debug("\t.set noreorder\n");
581 for (i
= 0; i
< (p
- tlb_handler
); i
++)
582 pr_debug("\t.word 0x%08x\n", tlb_handler
[i
]);
583 pr_debug("\t.set pop\n");
585 memcpy((void *)ebase
, tlb_handler
, 0x80);
589 * The R4000 TLB handler is much more complicated. We have two
590 * consecutive handler areas with 32 instructions space each.
591 * Since they aren't used at the same time, we can overflow in the
592 * other one.To keep things simple, we first assume linear space,
593 * then we relocate it to the final handler layout as needed.
595 static __cpuinitdata u32 final_handler
[64];
600 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
601 * 2. A timing hazard exists for the TLBP instruction.
603 * stalling_instruction
606 * The JTLB is being read for the TLBP throughout the stall generated by the
607 * previous instruction. This is not really correct as the stalling instruction
608 * can modify the address used to access the JTLB. The failure symptom is that
609 * the TLBP instruction will use an address created for the stalling instruction
610 * and not the address held in C0_ENHI and thus report the wrong results.
612 * The software work-around is to not allow the instruction preceding the TLBP
613 * to stall - make it an NOP or some other instruction guaranteed not to stall.
615 * Errata 2 will not be fixed. This errata is also on the R5000.
617 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
619 static __cpuinit
void __maybe_unused
build_tlb_probe_entry(u32
**p
)
621 switch (current_cpu_data
.cputype
) {
622 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
639 * Write random or indexed TLB entry, and care about the hazards from
640 * the preceeding mtc0 and for the following eret.
642 enum tlb_write_entry
{ tlb_random
, tlb_indexed
};
644 static __cpuinit
void build_tlb_write_entry(u32
**p
, struct label
**l
,
646 enum tlb_write_entry wmode
)
648 void(*tlbw
)(u32
**) = NULL
;
651 case tlb_random
: tlbw
= i_tlbwr
; break;
652 case tlb_indexed
: tlbw
= i_tlbwi
; break;
655 switch (current_cpu_data
.cputype
) {
663 * This branch uses up a mtc0 hazard nop slot and saves
664 * two nops after the tlbw instruction.
666 il_bgezl(p
, r
, 0, label_tlbw_hazard
);
668 l_tlbw_hazard(l
, *p
);
710 i_nop(p
); /* QED specifies 2 nops hazard */
712 * This branch uses up a mtc0 hazard nop slot and saves
713 * a nop after the tlbw instruction.
715 il_bgezl(p
, r
, 0, label_tlbw_hazard
);
717 l_tlbw_hazard(l
, *p
);
739 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
740 * use of the JTLB for instructions should not occur for 4
741 * cpu cycles and use for data translations should not occur
776 panic("No TLB refill handler yet (CPU type: %d)",
777 current_cpu_data
.cputype
);
784 * TMP and PTR are scratch.
785 * TMP will be clobbered, PTR will hold the pmd entry.
787 static __cpuinit
void
788 build_get_pmde64(u32
**p
, struct label
**l
, struct reloc
**r
,
789 unsigned int tmp
, unsigned int ptr
)
791 long pgdc
= (long)pgd_current
;
794 * The vmalloc handling is not in the hotpath.
796 i_dmfc0(p
, tmp
, C0_BADVADDR
);
798 il_bltz(p
, r
, tmp
, label_module_alloc
);
800 il_bltz(p
, r
, tmp
, label_vmalloc
);
802 /* No i_nop needed here, since the next insn doesn't touch TMP. */
805 # ifdef CONFIG_MIPS_MT_SMTC
807 * SMTC uses TCBind value as "CPU" index
809 i_mfc0(p
, ptr
, C0_TCBIND
);
810 i_dsrl(p
, ptr
, ptr
, 19);
813 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
816 i_dmfc0(p
, ptr
, C0_CONTEXT
);
817 i_dsrl(p
, ptr
, ptr
, 23);
819 i_LA_mostly(p
, tmp
, pgdc
);
820 i_daddu(p
, ptr
, ptr
, tmp
);
821 i_dmfc0(p
, tmp
, C0_BADVADDR
);
822 i_ld(p
, ptr
, rel_lo(pgdc
), ptr
);
824 i_LA_mostly(p
, ptr
, pgdc
);
825 i_ld(p
, ptr
, rel_lo(pgdc
), ptr
);
828 l_vmalloc_done(l
, *p
);
830 if (PGDIR_SHIFT
- 3 < 32) /* get pgd offset in bytes */
831 i_dsrl(p
, tmp
, tmp
, PGDIR_SHIFT
-3);
833 i_dsrl32(p
, tmp
, tmp
, PGDIR_SHIFT
- 3 - 32);
835 i_andi(p
, tmp
, tmp
, (PTRS_PER_PGD
- 1)<<3);
836 i_daddu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
837 i_dmfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
838 i_ld(p
, ptr
, 0, ptr
); /* get pmd pointer */
839 i_dsrl(p
, tmp
, tmp
, PMD_SHIFT
-3); /* get pmd offset in bytes */
840 i_andi(p
, tmp
, tmp
, (PTRS_PER_PMD
- 1)<<3);
841 i_daddu(p
, ptr
, ptr
, tmp
); /* add in pmd offset */
845 * BVADDR is the faulting address, PTR is scratch.
846 * PTR will hold the pgd for vmalloc.
848 static __cpuinit
void
849 build_get_pgd_vmalloc64(u32
**p
, struct label
**l
, struct reloc
**r
,
850 unsigned int bvaddr
, unsigned int ptr
)
852 long swpd
= (long)swapper_pg_dir
;
855 long modd
= (long)module_pg_dir
;
857 l_module_alloc(l
, *p
);
860 * VMALLOC_START >= 0xc000000000000000UL
861 * MODULE_START >= 0xe000000000000000UL
863 i_SLL(p
, ptr
, bvaddr
, 2);
864 il_bgez(p
, r
, ptr
, label_vmalloc
);
866 if (in_compat_space_p(MODULE_START
) && !rel_lo(MODULE_START
)) {
867 i_lui(p
, ptr
, rel_hi(MODULE_START
)); /* delay slot */
869 /* unlikely configuration */
870 i_nop(p
); /* delay slot */
871 i_LA(p
, ptr
, MODULE_START
);
873 i_dsubu(p
, bvaddr
, bvaddr
, ptr
);
875 if (in_compat_space_p(modd
) && !rel_lo(modd
)) {
876 il_b(p
, r
, label_vmalloc_done
);
877 i_lui(p
, ptr
, rel_hi(modd
));
879 i_LA_mostly(p
, ptr
, modd
);
880 il_b(p
, r
, label_vmalloc_done
);
881 i_daddiu(p
, ptr
, ptr
, rel_lo(modd
));
885 if (in_compat_space_p(MODULE_START
) && !rel_lo(MODULE_START
) &&
886 MODULE_START
<< 32 == VMALLOC_START
)
887 i_dsll32(p
, ptr
, ptr
, 0); /* typical case */
889 i_LA(p
, ptr
, VMALLOC_START
);
892 i_LA(p
, ptr
, VMALLOC_START
);
894 i_dsubu(p
, bvaddr
, bvaddr
, ptr
);
896 if (in_compat_space_p(swpd
) && !rel_lo(swpd
)) {
897 il_b(p
, r
, label_vmalloc_done
);
898 i_lui(p
, ptr
, rel_hi(swpd
));
900 i_LA_mostly(p
, ptr
, swpd
);
901 il_b(p
, r
, label_vmalloc_done
);
902 i_daddiu(p
, ptr
, ptr
, rel_lo(swpd
));
906 #else /* !CONFIG_64BIT */
909 * TMP and PTR are scratch.
910 * TMP will be clobbered, PTR will hold the pgd entry.
912 static __cpuinit
void __maybe_unused
913 build_get_pgde32(u32
**p
, unsigned int tmp
, unsigned int ptr
)
915 long pgdc
= (long)pgd_current
;
917 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
919 #ifdef CONFIG_MIPS_MT_SMTC
921 * SMTC uses TCBind value as "CPU" index
923 i_mfc0(p
, ptr
, C0_TCBIND
);
924 i_LA_mostly(p
, tmp
, pgdc
);
925 i_srl(p
, ptr
, ptr
, 19);
928 * smp_processor_id() << 3 is stored in CONTEXT.
930 i_mfc0(p
, ptr
, C0_CONTEXT
);
931 i_LA_mostly(p
, tmp
, pgdc
);
932 i_srl(p
, ptr
, ptr
, 23);
934 i_addu(p
, ptr
, tmp
, ptr
);
936 i_LA_mostly(p
, ptr
, pgdc
);
938 i_mfc0(p
, tmp
, C0_BADVADDR
); /* get faulting address */
939 i_lw(p
, ptr
, rel_lo(pgdc
), ptr
);
940 i_srl(p
, tmp
, tmp
, PGDIR_SHIFT
); /* get pgd only bits */
941 i_sll(p
, tmp
, tmp
, PGD_T_LOG2
);
942 i_addu(p
, ptr
, ptr
, tmp
); /* add in pgd offset */
945 #endif /* !CONFIG_64BIT */
947 static __cpuinit
void build_adjust_context(u32
**p
, unsigned int ctx
)
949 unsigned int shift
= 4 - (PTE_T_LOG2
+ 1) + PAGE_SHIFT
- 12;
950 unsigned int mask
= (PTRS_PER_PTE
/ 2 - 1) << (PTE_T_LOG2
+ 1);
952 switch (current_cpu_data
.cputype
) {
969 i_SRL(p
, ctx
, ctx
, shift
);
970 i_andi(p
, ctx
, ctx
, mask
);
973 static __cpuinit
void build_get_ptep(u32
**p
, unsigned int tmp
,
977 * Bug workaround for the Nevada. It seems as if under certain
978 * circumstances the move from cp0_context might produce a
979 * bogus result when the mfc0 instruction and its consumer are
980 * in a different cacheline or a load instruction, probably any
981 * memory reference, is between them.
983 switch (current_cpu_data
.cputype
) {
985 i_LW(p
, ptr
, 0, ptr
);
986 GET_CONTEXT(p
, tmp
); /* get context reg */
990 GET_CONTEXT(p
, tmp
); /* get context reg */
991 i_LW(p
, ptr
, 0, ptr
);
995 build_adjust_context(p
, tmp
);
996 i_ADDU(p
, ptr
, ptr
, tmp
); /* add in offset */
999 static __cpuinit
void build_update_entries(u32
**p
, unsigned int tmp
,
1003 * 64bit address support (36bit on a 32bit CPU) in a 32bit
1004 * Kernel is a special case. Only a few CPUs use it.
1006 #ifdef CONFIG_64BIT_PHYS_ADDR
1007 if (cpu_has_64bits
) {
1008 i_ld(p
, tmp
, 0, ptep
); /* get even pte */
1009 i_ld(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
1010 i_dsrl(p
, tmp
, tmp
, 6); /* convert to entrylo0 */
1011 i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
1012 i_dsrl(p
, ptep
, ptep
, 6); /* convert to entrylo1 */
1013 i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
1015 int pte_off_even
= sizeof(pte_t
) / 2;
1016 int pte_off_odd
= pte_off_even
+ sizeof(pte_t
);
1018 /* The pte entries are pre-shifted */
1019 i_lw(p
, tmp
, pte_off_even
, ptep
); /* get even pte */
1020 i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
1021 i_lw(p
, ptep
, pte_off_odd
, ptep
); /* get odd pte */
1022 i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
1025 i_LW(p
, tmp
, 0, ptep
); /* get even pte */
1026 i_LW(p
, ptep
, sizeof(pte_t
), ptep
); /* get odd pte */
1027 if (r45k_bvahwbug())
1028 build_tlb_probe_entry(p
);
1029 i_SRL(p
, tmp
, tmp
, 6); /* convert to entrylo0 */
1030 if (r4k_250MHZhwbug())
1031 i_mtc0(p
, 0, C0_ENTRYLO0
);
1032 i_mtc0(p
, tmp
, C0_ENTRYLO0
); /* load it */
1033 i_SRL(p
, ptep
, ptep
, 6); /* convert to entrylo1 */
1034 if (r45k_bvahwbug())
1035 i_mfc0(p
, tmp
, C0_INDEX
);
1036 if (r4k_250MHZhwbug())
1037 i_mtc0(p
, 0, C0_ENTRYLO1
);
1038 i_mtc0(p
, ptep
, C0_ENTRYLO1
); /* load it */
1042 static void __cpuinit
build_r4000_tlb_refill_handler(void)
1044 u32
*p
= tlb_handler
;
1045 struct label
*l
= labels
;
1046 struct reloc
*r
= relocs
;
1048 unsigned int final_len
;
1051 memset(tlb_handler
, 0, sizeof(tlb_handler
));
1052 memset(labels
, 0, sizeof(labels
));
1053 memset(relocs
, 0, sizeof(relocs
));
1054 memset(final_handler
, 0, sizeof(final_handler
));
1057 * create the plain linear handler
1059 if (bcm1250_m3_war()) {
1060 unsigned int segbits
= 44;
1062 i_dmfc0(&p
, K0
, C0_BADVADDR
);
1063 i_dmfc0(&p
, K1
, C0_ENTRYHI
);
1064 i_xor(&p
, K0
, K0
, K1
);
1065 i_dsrl32(&p
, K1
, K0
, 62 - 32);
1066 i_dsrl(&p
, K0
, K0
, 12 + 1);
1067 i_dsll32(&p
, K0
, K0
, 64 + 12 + 1 - segbits
- 32);
1068 i_or(&p
, K0
, K0
, K1
);
1069 il_bnez(&p
, &r
, K0
, label_leave
);
1070 /* No need for i_nop */
1074 build_get_pmde64(&p
, &l
, &r
, K0
, K1
); /* get pmd in K1 */
1076 build_get_pgde32(&p
, K0
, K1
); /* get pgd in K1 */
1079 build_get_ptep(&p
, K0
, K1
);
1080 build_update_entries(&p
, K0
, K1
);
1081 build_tlb_write_entry(&p
, &l
, &r
, tlb_random
);
1083 i_eret(&p
); /* return from trap */
1086 build_get_pgd_vmalloc64(&p
, &l
, &r
, K0
, K1
);
1090 * Overflow check: For the 64bit handler, we need at least one
1091 * free instruction slot for the wrap-around branch. In worst
1092 * case, if the intended insertion point is a delay slot, we
1093 * need three, with the second nop'ed and the third being
1097 if ((p
- tlb_handler
) > 64)
1098 panic("TLB refill handler space exceeded");
1100 if (((p
- tlb_handler
) > 63)
1101 || (((p
- tlb_handler
) > 61)
1102 && insn_has_bdelay(relocs
, tlb_handler
+ 29)))
1103 panic("TLB refill handler space exceeded");
1107 * Now fold the handler in the TLB refill handler space.
1111 /* Simplest case, just copy the handler. */
1112 copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
1113 final_len
= p
- tlb_handler
;
1114 #else /* CONFIG_64BIT */
1115 f
= final_handler
+ 32;
1116 if ((p
- tlb_handler
) <= 32) {
1117 /* Just copy the handler. */
1118 copy_handler(relocs
, labels
, tlb_handler
, p
, f
);
1119 final_len
= p
- tlb_handler
;
1121 u32
*split
= tlb_handler
+ 30;
1124 * Find the split point.
1126 if (insn_has_bdelay(relocs
, split
- 1))
1129 /* Copy first part of the handler. */
1130 copy_handler(relocs
, labels
, tlb_handler
, split
, f
);
1131 f
+= split
- tlb_handler
;
1133 /* Insert branch. */
1134 l_split(&l
, final_handler
);
1135 il_b(&f
, &r
, label_split
);
1136 if (insn_has_bdelay(relocs
, split
))
1139 copy_handler(relocs
, labels
, split
, split
+ 1, f
);
1140 move_labels(labels
, f
, f
+ 1, -1);
1145 /* Copy the rest of the handler. */
1146 copy_handler(relocs
, labels
, split
, p
, final_handler
);
1147 final_len
= (f
- (final_handler
+ 32)) + (p
- split
);
1149 #endif /* CONFIG_64BIT */
1151 resolve_relocs(relocs
, labels
);
1152 pr_info("Synthesized TLB refill handler (%u instructions).\n",
1160 f
= final_handler
+ 32;
1161 #endif /* CONFIG_64BIT */
1162 pr_debug("\t.set push\n");
1163 pr_debug("\t.set noreorder\n");
1164 for (i
= 0; i
< final_len
; i
++)
1165 pr_debug("\t.word 0x%08x\n", f
[i
]);
1166 pr_debug("\t.set pop\n");
1168 memcpy((void *)ebase
, final_handler
, 0x100);
1172 * TLB load/store/modify handlers.
1174 * Only the fastpath gets synthesized at runtime, the slowpath for
1175 * do_page_fault remains normal asm.
1177 extern void tlb_do_page_fault_0(void);
1178 extern void tlb_do_page_fault_1(void);
1181 * 128 instructions for the fastpath handler is generous and should
1182 * never be exceeded.
1184 #define FASTPATH_SIZE 128
1186 u32 handle_tlbl
[FASTPATH_SIZE
] __cacheline_aligned
;
1187 u32 handle_tlbs
[FASTPATH_SIZE
] __cacheline_aligned
;
1188 u32 handle_tlbm
[FASTPATH_SIZE
] __cacheline_aligned
;
1190 static void __cpuinit
1191 iPTE_LW(u32
**p
, struct label
**l
, unsigned int pte
, unsigned int ptr
)
1194 # ifdef CONFIG_64BIT_PHYS_ADDR
1196 i_lld(p
, pte
, 0, ptr
);
1199 i_LL(p
, pte
, 0, ptr
);
1201 # ifdef CONFIG_64BIT_PHYS_ADDR
1203 i_ld(p
, pte
, 0, ptr
);
1206 i_LW(p
, pte
, 0, ptr
);
1210 static void __cpuinit
1211 iPTE_SW(u32
**p
, struct reloc
**r
, unsigned int pte
, unsigned int ptr
,
1214 #ifdef CONFIG_64BIT_PHYS_ADDR
1215 unsigned int hwmode
= mode
& (_PAGE_VALID
| _PAGE_DIRTY
);
1218 i_ori(p
, pte
, pte
, mode
);
1220 # ifdef CONFIG_64BIT_PHYS_ADDR
1222 i_scd(p
, pte
, 0, ptr
);
1225 i_SC(p
, pte
, 0, ptr
);
1227 if (r10000_llsc_war())
1228 il_beqzl(p
, r
, pte
, label_smp_pgtable_change
);
1230 il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
1232 # ifdef CONFIG_64BIT_PHYS_ADDR
1233 if (!cpu_has_64bits
) {
1234 /* no i_nop needed */
1235 i_ll(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1236 i_ori(p
, pte
, pte
, hwmode
);
1237 i_sc(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1238 il_beqz(p
, r
, pte
, label_smp_pgtable_change
);
1239 /* no i_nop needed */
1240 i_lw(p
, pte
, 0, ptr
);
1247 # ifdef CONFIG_64BIT_PHYS_ADDR
1249 i_sd(p
, pte
, 0, ptr
);
1252 i_SW(p
, pte
, 0, ptr
);
1254 # ifdef CONFIG_64BIT_PHYS_ADDR
1255 if (!cpu_has_64bits
) {
1256 i_lw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1257 i_ori(p
, pte
, pte
, hwmode
);
1258 i_sw(p
, pte
, sizeof(pte_t
) / 2, ptr
);
1259 i_lw(p
, pte
, 0, ptr
);
1266 * Check if PTE is present, if not then jump to LABEL. PTR points to
1267 * the page table where this PTE is located, PTE will be re-loaded
1268 * with it's original value.
1270 static void __cpuinit
1271 build_pte_present(u32
**p
, struct label
**l
, struct reloc
**r
,
1272 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1274 i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
1275 i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_READ
);
1276 il_bnez(p
, r
, pte
, lid
);
1277 iPTE_LW(p
, l
, pte
, ptr
);
1280 /* Make PTE valid, store result in PTR. */
1281 static void __cpuinit
1282 build_make_valid(u32
**p
, struct reloc
**r
, unsigned int pte
,
1285 unsigned int mode
= _PAGE_VALID
| _PAGE_ACCESSED
;
1287 iPTE_SW(p
, r
, pte
, ptr
, mode
);
1291 * Check if PTE can be written to, if not branch to LABEL. Regardless
1292 * restore PTE with value from PTR when done.
1294 static void __cpuinit
1295 build_pte_writable(u32
**p
, struct label
**l
, struct reloc
**r
,
1296 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1298 i_andi(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
1299 i_xori(p
, pte
, pte
, _PAGE_PRESENT
| _PAGE_WRITE
);
1300 il_bnez(p
, r
, pte
, lid
);
1301 iPTE_LW(p
, l
, pte
, ptr
);
1304 /* Make PTE writable, update software status bits as well, then store
1307 static void __cpuinit
1308 build_make_write(u32
**p
, struct reloc
**r
, unsigned int pte
,
1311 unsigned int mode
= (_PAGE_ACCESSED
| _PAGE_MODIFIED
| _PAGE_VALID
1314 iPTE_SW(p
, r
, pte
, ptr
, mode
);
1318 * Check if PTE can be modified, if not branch to LABEL. Regardless
1319 * restore PTE with value from PTR when done.
1321 static void __cpuinit
1322 build_pte_modifiable(u32
**p
, struct label
**l
, struct reloc
**r
,
1323 unsigned int pte
, unsigned int ptr
, enum label_id lid
)
1325 i_andi(p
, pte
, pte
, _PAGE_WRITE
);
1326 il_beqz(p
, r
, pte
, lid
);
1327 iPTE_LW(p
, l
, pte
, ptr
);
1331 * R3000 style TLB load/store/modify handlers.
1335 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1338 static void __cpuinit
1339 build_r3000_pte_reload_tlbwi(u32
**p
, unsigned int pte
, unsigned int tmp
)
1341 i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
1342 i_mfc0(p
, tmp
, C0_EPC
); /* cp0 delay */
1345 i_rfe(p
); /* branch delay */
1349 * This places the pte into ENTRYLO0 and writes it with tlbwi
1350 * or tlbwr as appropriate. This is because the index register
1351 * may have the probe fail bit set as a result of a trap on a
1352 * kseg2 access, i.e. without refill. Then it returns.
1354 static void __cpuinit
1355 build_r3000_tlb_reload_write(u32
**p
, struct label
**l
, struct reloc
**r
,
1356 unsigned int pte
, unsigned int tmp
)
1358 i_mfc0(p
, tmp
, C0_INDEX
);
1359 i_mtc0(p
, pte
, C0_ENTRYLO0
); /* cp0 delay */
1360 il_bltz(p
, r
, tmp
, label_r3000_write_probe_fail
); /* cp0 delay */
1361 i_mfc0(p
, tmp
, C0_EPC
); /* branch delay */
1362 i_tlbwi(p
); /* cp0 delay */
1364 i_rfe(p
); /* branch delay */
1365 l_r3000_write_probe_fail(l
, *p
);
1366 i_tlbwr(p
); /* cp0 delay */
1368 i_rfe(p
); /* branch delay */
1371 static void __cpuinit
1372 build_r3000_tlbchange_handler_head(u32
**p
, unsigned int pte
,
1375 long pgdc
= (long)pgd_current
;
1377 i_mfc0(p
, pte
, C0_BADVADDR
);
1378 i_lui(p
, ptr
, rel_hi(pgdc
)); /* cp0 delay */
1379 i_lw(p
, ptr
, rel_lo(pgdc
), ptr
);
1380 i_srl(p
, pte
, pte
, 22); /* load delay */
1381 i_sll(p
, pte
, pte
, 2);
1382 i_addu(p
, ptr
, ptr
, pte
);
1383 i_mfc0(p
, pte
, C0_CONTEXT
);
1384 i_lw(p
, ptr
, 0, ptr
); /* cp0 delay */
1385 i_andi(p
, pte
, pte
, 0xffc); /* load delay */
1386 i_addu(p
, ptr
, ptr
, pte
);
1387 i_lw(p
, pte
, 0, ptr
);
1388 i_tlbp(p
); /* load delay */
1391 static void __cpuinit
build_r3000_tlb_load_handler(void)
1393 u32
*p
= handle_tlbl
;
1394 struct label
*l
= labels
;
1395 struct reloc
*r
= relocs
;
1398 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1399 memset(labels
, 0, sizeof(labels
));
1400 memset(relocs
, 0, sizeof(relocs
));
1402 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1403 build_pte_present(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbl
);
1404 i_nop(&p
); /* load delay */
1405 build_make_valid(&p
, &r
, K0
, K1
);
1406 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1408 l_nopage_tlbl(&l
, p
);
1409 i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1412 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1413 panic("TLB load handler fastpath space exceeded");
1415 resolve_relocs(relocs
, labels
);
1416 pr_info("Synthesized TLB load handler fastpath (%u instructions).\n",
1417 (unsigned int)(p
- handle_tlbl
));
1419 pr_debug("\t.set push\n");
1420 pr_debug("\t.set noreorder\n");
1421 for (i
= 0; i
< (p
- handle_tlbl
); i
++)
1422 pr_debug("\t.word 0x%08x\n", handle_tlbl
[i
]);
1423 pr_debug("\t.set pop\n");
1426 static void __cpuinit
build_r3000_tlb_store_handler(void)
1428 u32
*p
= handle_tlbs
;
1429 struct label
*l
= labels
;
1430 struct reloc
*r
= relocs
;
1433 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1434 memset(labels
, 0, sizeof(labels
));
1435 memset(relocs
, 0, sizeof(relocs
));
1437 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1438 build_pte_writable(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbs
);
1439 i_nop(&p
); /* load delay */
1440 build_make_write(&p
, &r
, K0
, K1
);
1441 build_r3000_tlb_reload_write(&p
, &l
, &r
, K0
, K1
);
1443 l_nopage_tlbs(&l
, p
);
1444 i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1447 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1448 panic("TLB store handler fastpath space exceeded");
1450 resolve_relocs(relocs
, labels
);
1451 pr_info("Synthesized TLB store handler fastpath (%u instructions).\n",
1452 (unsigned int)(p
- handle_tlbs
));
1454 pr_debug("\t.set push\n");
1455 pr_debug("\t.set noreorder\n");
1456 for (i
= 0; i
< (p
- handle_tlbs
); i
++)
1457 pr_debug("\t.word 0x%08x\n", handle_tlbs
[i
]);
1458 pr_debug("\t.set pop\n");
1461 static void __cpuinit
build_r3000_tlb_modify_handler(void)
1463 u32
*p
= handle_tlbm
;
1464 struct label
*l
= labels
;
1465 struct reloc
*r
= relocs
;
1468 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1469 memset(labels
, 0, sizeof(labels
));
1470 memset(relocs
, 0, sizeof(relocs
));
1472 build_r3000_tlbchange_handler_head(&p
, K0
, K1
);
1473 build_pte_modifiable(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbm
);
1474 i_nop(&p
); /* load delay */
1475 build_make_write(&p
, &r
, K0
, K1
);
1476 build_r3000_pte_reload_tlbwi(&p
, K0
, K1
);
1478 l_nopage_tlbm(&l
, p
);
1479 i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1482 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1483 panic("TLB modify handler fastpath space exceeded");
1485 resolve_relocs(relocs
, labels
);
1486 pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n",
1487 (unsigned int)(p
- handle_tlbm
));
1489 pr_debug("\t.set push\n");
1490 pr_debug("\t.set noreorder\n");
1491 for (i
= 0; i
< (p
- handle_tlbm
); i
++)
1492 pr_debug("\t.word 0x%08x\n", handle_tlbm
[i
]);
1493 pr_debug("\t.set pop\n");
1497 * R4000 style TLB load/store/modify handlers.
1499 static void __cpuinit
1500 build_r4000_tlbchange_handler_head(u32
**p
, struct label
**l
,
1501 struct reloc
**r
, unsigned int pte
,
1505 build_get_pmde64(p
, l
, r
, pte
, ptr
); /* get pmd in ptr */
1507 build_get_pgde32(p
, pte
, ptr
); /* get pgd in ptr */
1510 i_MFC0(p
, pte
, C0_BADVADDR
);
1511 i_LW(p
, ptr
, 0, ptr
);
1512 i_SRL(p
, pte
, pte
, PAGE_SHIFT
+ PTE_ORDER
- PTE_T_LOG2
);
1513 i_andi(p
, pte
, pte
, (PTRS_PER_PTE
- 1) << PTE_T_LOG2
);
1514 i_ADDU(p
, ptr
, ptr
, pte
);
1517 l_smp_pgtable_change(l
, *p
);
1519 iPTE_LW(p
, l
, pte
, ptr
); /* get even pte */
1520 if (!m4kc_tlbp_war())
1521 build_tlb_probe_entry(p
);
1524 static void __cpuinit
1525 build_r4000_tlbchange_handler_tail(u32
**p
, struct label
**l
,
1526 struct reloc
**r
, unsigned int tmp
,
1529 i_ori(p
, ptr
, ptr
, sizeof(pte_t
));
1530 i_xori(p
, ptr
, ptr
, sizeof(pte_t
));
1531 build_update_entries(p
, tmp
, ptr
);
1532 build_tlb_write_entry(p
, l
, r
, tlb_indexed
);
1534 i_eret(p
); /* return from trap */
1537 build_get_pgd_vmalloc64(p
, l
, r
, tmp
, ptr
);
1541 static void __cpuinit
build_r4000_tlb_load_handler(void)
1543 u32
*p
= handle_tlbl
;
1544 struct label
*l
= labels
;
1545 struct reloc
*r
= relocs
;
1548 memset(handle_tlbl
, 0, sizeof(handle_tlbl
));
1549 memset(labels
, 0, sizeof(labels
));
1550 memset(relocs
, 0, sizeof(relocs
));
1552 if (bcm1250_m3_war()) {
1553 unsigned int segbits
= 44;
1555 i_dmfc0(&p
, K0
, C0_BADVADDR
);
1556 i_dmfc0(&p
, K1
, C0_ENTRYHI
);
1557 i_xor(&p
, K0
, K0
, K1
);
1558 i_dsrl32(&p
, K1
, K0
, 62 - 32);
1559 i_dsrl(&p
, K0
, K0
, 12 + 1);
1560 i_dsll32(&p
, K0
, K0
, 64 + 12 + 1 - segbits
- 32);
1561 i_or(&p
, K0
, K0
, K1
);
1562 il_bnez(&p
, &r
, K0
, label_leave
);
1563 /* No need for i_nop */
1566 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1567 build_pte_present(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbl
);
1568 if (m4kc_tlbp_war())
1569 build_tlb_probe_entry(&p
);
1570 build_make_valid(&p
, &r
, K0
, K1
);
1571 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1573 l_nopage_tlbl(&l
, p
);
1574 i_j(&p
, (unsigned long)tlb_do_page_fault_0
& 0x0fffffff);
1577 if ((p
- handle_tlbl
) > FASTPATH_SIZE
)
1578 panic("TLB load handler fastpath space exceeded");
1580 resolve_relocs(relocs
, labels
);
1581 pr_info("Synthesized TLB load handler fastpath (%u instructions).\n",
1582 (unsigned int)(p
- handle_tlbl
));
1584 pr_debug("\t.set push\n");
1585 pr_debug("\t.set noreorder\n");
1586 for (i
= 0; i
< (p
- handle_tlbl
); i
++)
1587 pr_debug("\t.word 0x%08x\n", handle_tlbl
[i
]);
1588 pr_debug("\t.set pop\n");
1591 static void __cpuinit
build_r4000_tlb_store_handler(void)
1593 u32
*p
= handle_tlbs
;
1594 struct label
*l
= labels
;
1595 struct reloc
*r
= relocs
;
1598 memset(handle_tlbs
, 0, sizeof(handle_tlbs
));
1599 memset(labels
, 0, sizeof(labels
));
1600 memset(relocs
, 0, sizeof(relocs
));
1602 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1603 build_pte_writable(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbs
);
1604 if (m4kc_tlbp_war())
1605 build_tlb_probe_entry(&p
);
1606 build_make_write(&p
, &r
, K0
, K1
);
1607 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1609 l_nopage_tlbs(&l
, p
);
1610 i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1613 if ((p
- handle_tlbs
) > FASTPATH_SIZE
)
1614 panic("TLB store handler fastpath space exceeded");
1616 resolve_relocs(relocs
, labels
);
1617 pr_info("Synthesized TLB store handler fastpath (%u instructions).\n",
1618 (unsigned int)(p
- handle_tlbs
));
1620 pr_debug("\t.set push\n");
1621 pr_debug("\t.set noreorder\n");
1622 for (i
= 0; i
< (p
- handle_tlbs
); i
++)
1623 pr_debug("\t.word 0x%08x\n", handle_tlbs
[i
]);
1624 pr_debug("\t.set pop\n");
1627 static void __cpuinit
build_r4000_tlb_modify_handler(void)
1629 u32
*p
= handle_tlbm
;
1630 struct label
*l
= labels
;
1631 struct reloc
*r
= relocs
;
1634 memset(handle_tlbm
, 0, sizeof(handle_tlbm
));
1635 memset(labels
, 0, sizeof(labels
));
1636 memset(relocs
, 0, sizeof(relocs
));
1638 build_r4000_tlbchange_handler_head(&p
, &l
, &r
, K0
, K1
);
1639 build_pte_modifiable(&p
, &l
, &r
, K0
, K1
, label_nopage_tlbm
);
1640 if (m4kc_tlbp_war())
1641 build_tlb_probe_entry(&p
);
1642 /* Present and writable bits set, set accessed and dirty bits. */
1643 build_make_write(&p
, &r
, K0
, K1
);
1644 build_r4000_tlbchange_handler_tail(&p
, &l
, &r
, K0
, K1
);
1646 l_nopage_tlbm(&l
, p
);
1647 i_j(&p
, (unsigned long)tlb_do_page_fault_1
& 0x0fffffff);
1650 if ((p
- handle_tlbm
) > FASTPATH_SIZE
)
1651 panic("TLB modify handler fastpath space exceeded");
1653 resolve_relocs(relocs
, labels
);
1654 pr_info("Synthesized TLB modify handler fastpath (%u instructions).\n",
1655 (unsigned int)(p
- handle_tlbm
));
1657 pr_debug("\t.set push\n");
1658 pr_debug("\t.set noreorder\n");
1659 for (i
= 0; i
< (p
- handle_tlbm
); i
++)
1660 pr_debug("\t.word 0x%08x\n", handle_tlbm
[i
]);
1661 pr_debug("\t.set pop\n");
1664 void __cpuinit
build_tlb_refill_handler(void)
1667 * The refill handler is generated per-CPU, multi-node systems
1668 * may have local storage for it. The other handlers are only
1671 static int run_once
= 0;
1673 switch (current_cpu_data
.cputype
) {
1681 build_r3000_tlb_refill_handler();
1683 build_r3000_tlb_load_handler();
1684 build_r3000_tlb_store_handler();
1685 build_r3000_tlb_modify_handler();
1692 panic("No R6000 TLB refill handler yet");
1696 panic("No R8000 TLB refill handler yet");
1700 build_r4000_tlb_refill_handler();
1702 build_r4000_tlb_load_handler();
1703 build_r4000_tlb_store_handler();
1704 build_r4000_tlb_modify_handler();
1710 void __cpuinit
flush_tlb_handlers(void)
1712 flush_icache_range((unsigned long)handle_tlbl
,
1713 (unsigned long)handle_tlbl
+ sizeof(handle_tlbl
));
1714 flush_icache_range((unsigned long)handle_tlbs
,
1715 (unsigned long)handle_tlbs
+ sizeof(handle_tlbs
));
1716 flush_icache_range((unsigned long)handle_tlbm
,
1717 (unsigned long)handle_tlbm
+ sizeof(handle_tlbm
));