2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Silicon Graphics, Inc.
7 * Written by Ulf Carlsson (ulfc@engr.sgi.com)
8 * Copyright (C) 2002 Maciej W. Rozycki
10 #include <linux/config.h>
11 #include <linux/init.h>
12 #include <linux/threads.h>
14 #include <asm/regdef.h>
15 #include <asm/mipsregs.h>
16 #include <asm/pgtable.h>
17 #include <asm/stackframe.h>
21 * After this macro runs we have a pointer to the pte of the address
22 * that caused the fault in PTR.
24 .macro LOAD_PTE2, ptr, tmp, kaddr
26 dmfc0 \ptr, CP0_CONTEXT
27 dmfc0 \tmp, CP0_BADVADDR
28 dsra \ptr, 23 # get pgd_current[cpu]
30 dmfc0 \tmp, CP0_BADVADDR
35 dsrl \tmp, (PGDIR_SHIFT-3) # get pgd offset in bytes
36 andi \tmp, ((PTRS_PER_PGD - 1)<<3)
37 daddu \ptr, \tmp # add in pgd offset
38 dmfc0 \tmp, CP0_BADVADDR
39 ld \ptr, (\ptr) # get pmd pointer
40 dsrl \tmp, (PMD_SHIFT-3) # get pmd offset in bytes
41 andi \tmp, ((PTRS_PER_PMD - 1)<<3)
42 daddu \ptr, \tmp # add in pmd offset
43 dmfc0 \tmp, CP0_XCONTEXT
44 ld \ptr, (\ptr) # get pte pointer
45 andi \tmp, 0xff0 # get pte offset
51 * Ditto for the kernel table.
53 .macro LOAD_KPTE2, ptr, tmp, not_vmalloc
55 * First, determine that the address is in/above vmalloc range.
57 dmfc0 \tmp, CP0_BADVADDR
58 dli \ptr, VMALLOC_START
61 * Now find offset into kptbl.
63 dsubu \tmp, \tmp, \ptr
65 dsrl \tmp, (PAGE_SHIFT+1) # get vpn2
66 dsll \tmp, 4 # byte offset of pte
67 daddu \ptr, \ptr, \tmp
70 * Determine that fault address is within vmalloc range.
74 beqz \tmp, \not_vmalloc # not vmalloc
80 * This places the even/odd pte pair in the page table at the pte
81 * entry pointed to by PTE into ENTRYLO0 and ENTRYLO1.
83 .macro PTE_RELOAD, pte0, pte1
84 dsrl \pte0, 6 # convert to entrylo0
85 dmtc0 \pte0, CP0_ENTRYLO0 # load it
86 dsrl \pte1, 6 # convert to entrylo1
87 dmtc0 \pte1, CP0_ENTRYLO1 # load it
98 LEAF(except_vec0_generic)
100 PANIC("Unused vector called")
103 END(except_vec0_generic)
107 * TLB refill handlers for the R4000 and SB1.
108 * Attention: We may only use 32 instructions / 128 bytes.
111 LEAF(except_vec1_r4k)
113 dla k0, handle_vec1_r4k
118 LEAF(except_vec1_sb1)
120 dmfc0 k0, CP0_BADVADDR
121 dmfc0 k1, CP0_ENTRYHI
123 dsrl k0, k0, PAGE_SHIFT+1
127 dla k0, handle_vec1_r4k
138 LEAF(handle_vec1_r4k)
141 ld k0, 0(k1) # get even pte
142 ld k1, 8(k1) # get odd pte
149 9: # handle the vmalloc range
150 LOAD_KPTE2 k1 k0 invalid_vmalloc_address
151 ld k0, 0(k1) # get even pte
152 ld k1, 8(k1) # get odd pte
164 * TLB refill handler for the R10000.
165 * Attention: We may only use 32 instructions / 128 bytes.
168 LEAF(except_vec1_r10k)
170 dla k0, handle_vec1_r10k
173 END(except_vec1_r10k)
178 LEAF(handle_vec1_r10k)
181 ld k0, 0(k1) # get even pte
182 ld k1, 8(k1) # get odd pte
188 9: # handle the vmalloc range
189 LOAD_KPTE2 k1 k0 invalid_vmalloc_address
190 ld k0, 0(k1) # get even pte
191 ld k1, 8(k1) # get odd pte
196 END(handle_vec1_r10k)
200 LEAF(invalid_vmalloc_address)
202 PANIC("Invalid kernel address")
205 END(invalid_vmalloc_address)