2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/sched.h>
17 #include <asm/bootinfo.h>
18 #include <asm/mmu_context.h>
19 #include <asm/pgtable.h>
20 #include <asm/system.h>
22 extern void except_vec0_generic(void);
23 extern void except_vec0_nevada(void);
24 extern void except_vec0_r4000(void);
25 extern void except_vec0_r4600(void);
26 extern void except_vec1_generic(void);
27 extern void except_vec1_r4k(void);
29 /* CP0 hazard avoidance. */
30 #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
31 "nop; nop; nop; nop; nop; nop;\n\t" \
34 void local_flush_tlb_all(void)
37 unsigned long old_ctx
;
40 local_irq_save(flags
);
41 /* Save old context and create impossible VPN2 value */
42 old_ctx
= read_c0_entryhi();
46 entry
= read_c0_wired();
48 /* Blast 'em all away. */
49 while (entry
< current_cpu_data
.tlbsize
) {
51 * Make sure all entries differ. If they're not different
52 * MIPS32 will take revenge ...
54 write_c0_entryhi(CKSEG0
+ (entry
<< (PAGE_SHIFT
+ 1)));
55 write_c0_index(entry
);
61 write_c0_entryhi(old_ctx
);
62 local_irq_restore(flags
);
65 void local_flush_tlb_mm(struct mm_struct
*mm
)
67 int cpu
= smp_processor_id();
69 if (cpu_context(cpu
, mm
) != 0)
70 drop_mmu_context(mm
,cpu
);
73 void local_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
76 struct mm_struct
*mm
= vma
->vm_mm
;
77 int cpu
= smp_processor_id();
79 if (cpu_context(cpu
, mm
) != 0) {
83 local_irq_save(flags
);
84 size
= (end
- start
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
85 size
= (size
+ 1) >> 1;
86 if (size
<= current_cpu_data
.tlbsize
/2) {
87 int oldpid
= read_c0_entryhi();
88 int newpid
= cpu_asid(cpu
, mm
);
90 start
&= (PAGE_MASK
<< 1);
91 end
+= ((PAGE_SIZE
<< 1) - 1);
92 end
&= (PAGE_MASK
<< 1);
96 write_c0_entryhi(start
| newpid
);
97 start
+= (PAGE_SIZE
<< 1);
101 idx
= read_c0_index();
102 write_c0_entrylo0(0);
103 write_c0_entrylo1(0);
106 /* Make sure all entries differ. */
107 write_c0_entryhi(CKSEG0
+
108 (idx
<< (PAGE_SHIFT
+ 1)));
113 write_c0_entryhi(oldpid
);
115 drop_mmu_context(mm
, cpu
);
117 local_irq_restore(flags
);
121 void local_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
126 local_irq_save(flags
);
127 size
= (end
- start
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
128 size
= (size
+ 1) >> 1;
129 if (size
<= current_cpu_data
.tlbsize
/ 2) {
130 int pid
= read_c0_entryhi();
132 start
&= (PAGE_MASK
<< 1);
133 end
+= ((PAGE_SIZE
<< 1) - 1);
134 end
&= (PAGE_MASK
<< 1);
136 while (start
< end
) {
139 write_c0_entryhi(start
);
140 start
+= (PAGE_SIZE
<< 1);
144 idx
= read_c0_index();
145 write_c0_entrylo0(0);
146 write_c0_entrylo1(0);
149 /* Make sure all entries differ. */
150 write_c0_entryhi(CKSEG0
+ (idx
<< (PAGE_SHIFT
+ 1)));
155 write_c0_entryhi(pid
);
157 local_flush_tlb_all();
159 local_irq_restore(flags
);
162 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
164 int cpu
= smp_processor_id();
166 if (cpu_context(cpu
, vma
->vm_mm
) != 0) {
168 int oldpid
, newpid
, idx
;
170 newpid
= cpu_asid(cpu
, vma
->vm_mm
);
171 page
&= (PAGE_MASK
<< 1);
172 local_irq_save(flags
);
173 oldpid
= read_c0_entryhi();
174 write_c0_entryhi(page
| newpid
);
178 idx
= read_c0_index();
179 write_c0_entrylo0(0);
180 write_c0_entrylo1(0);
183 /* Make sure all entries differ. */
184 write_c0_entryhi(CKSEG0
+ (idx
<< (PAGE_SHIFT
+ 1)));
190 write_c0_entryhi(oldpid
);
191 local_irq_restore(flags
);
196 * This one is only used for pages with the global bit set so we don't care
197 * much about the ASID.
199 void local_flush_tlb_one(unsigned long page
)
204 local_irq_save(flags
);
205 page
&= (PAGE_MASK
<< 1);
206 oldpid
= read_c0_entryhi();
207 write_c0_entryhi(page
);
211 idx
= read_c0_index();
212 write_c0_entrylo0(0);
213 write_c0_entrylo1(0);
215 /* Make sure all entries differ. */
216 write_c0_entryhi(CKSEG0
+ (idx
<< (PAGE_SHIFT
+ 1)));
221 write_c0_entryhi(oldpid
);
223 local_irq_restore(flags
);
227 * We will need multiple versions of update_mmu_cache(), one that just
228 * updates the TLB with the new pte(s), and another which also checks
229 * for the R4k "end of page" hardware bug and does the needy.
231 void __update_tlb(struct vm_area_struct
* vma
, unsigned long address
, pte_t pte
)
240 * Handle debugger faulting in for debugee.
242 if (current
->active_mm
!= vma
->vm_mm
)
245 pid
= read_c0_entryhi() & ASID_MASK
;
247 local_irq_save(flags
);
248 address
&= (PAGE_MASK
<< 1);
249 write_c0_entryhi(address
| pid
);
250 pgdp
= pgd_offset(vma
->vm_mm
, address
);
254 pmdp
= pmd_offset(pgdp
, address
);
255 idx
= read_c0_index();
256 ptep
= pte_offset_map(pmdp
, address
);
258 write_c0_entrylo0(pte_val(*ptep
++) >> 6);
259 write_c0_entrylo1(pte_val(*ptep
) >> 6);
260 write_c0_entryhi(address
| pid
);
267 write_c0_entryhi(pid
);
268 local_irq_restore(flags
);
272 static void r4k_update_mmu_cache_hwbug(struct vm_area_struct
* vma
,
273 unsigned long address
, pte_t pte
)
282 local_irq_save(flags
);
283 address
&= (PAGE_MASK
<< 1);
284 asid
= read_c0_entryhi() & ASID_MASK
;
285 write_c0_entryhi(address
| asid
);
286 pgdp
= pgd_offset(vma
->vm_mm
, address
);
290 pmdp
= pmd_offset(pgdp
, address
);
291 idx
= read_c0_index();
292 ptep
= pte_offset_map(pmdp
, address
);
293 write_c0_entrylo0(pte_val(*ptep
++) >> 6);
294 write_c0_entrylo1(pte_val(*ptep
) >> 6);
301 local_irq_restore(flags
);
305 void __init
add_wired_entry(unsigned long entrylo0
, unsigned long entrylo1
,
306 unsigned long entryhi
, unsigned long pagemask
)
310 unsigned long old_pagemask
;
311 unsigned long old_ctx
;
313 local_irq_save(flags
);
314 /* Save old context and create impossible VPN2 value */
315 old_ctx
= read_c0_entryhi();
316 old_pagemask
= read_c0_pagemask();
317 wired
= read_c0_wired();
318 write_c0_wired(wired
+ 1);
319 write_c0_index(wired
);
321 write_c0_pagemask(pagemask
);
322 write_c0_entryhi(entryhi
);
323 write_c0_entrylo0(entrylo0
);
324 write_c0_entrylo1(entrylo1
);
329 write_c0_entryhi(old_ctx
);
331 write_c0_pagemask(old_pagemask
);
332 local_flush_tlb_all();
333 local_irq_restore(flags
);
337 * Used for loading TLB entries before trap_init() has started, when we
338 * don't actually want to add a wired entry which remains throughout the
339 * lifetime of the system
342 static int temp_tlb_entry __initdata
;
344 __init
int add_temporary_entry(unsigned long entrylo0
, unsigned long entrylo1
,
345 unsigned long entryhi
, unsigned long pagemask
)
350 unsigned long old_pagemask
;
351 unsigned long old_ctx
;
353 local_irq_save(flags
);
354 /* Save old context and create impossible VPN2 value */
355 old_ctx
= read_c0_entryhi();
356 old_pagemask
= read_c0_pagemask();
357 wired
= read_c0_wired();
358 if (--temp_tlb_entry
< wired
) {
359 printk(KERN_WARNING
"No TLB space left for add_temporary_entry\n");
364 write_c0_index(temp_tlb_entry
);
365 write_c0_pagemask(pagemask
);
366 write_c0_entryhi(entryhi
);
367 write_c0_entrylo0(entrylo0
);
368 write_c0_entrylo1(entrylo1
);
373 write_c0_entryhi(old_ctx
);
374 write_c0_pagemask(old_pagemask
);
376 local_irq_restore(flags
);
380 static void __init
probe_tlb(unsigned long config
)
382 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
386 * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
387 * is not supported, we assume R4k style. Cpu probing already figured
388 * out the number of tlb entries.
390 if ((c
->processor_id
& 0xff0000) == PRID_COMP_LEGACY
)
393 reg
= read_c0_config1();
394 if (!((config
>> 7) & 3))
395 panic("No TLB present");
397 c
->tlbsize
= ((reg
>> 25) & 0x3f) + 1;
400 void __init
tlb_init(void)
402 unsigned int config
= read_c0_config();
405 * You should never change this register:
406 * - On R4600 1.7 the tlbp never hits for pages smaller than
407 * the value in the c0_pagemask register.
408 * - The entire mm handling assumes the c0_pagemask register to
409 * be set for 4kb pages.
412 write_c0_pagemask(PM_DEFAULT_MASK
);
414 temp_tlb_entry
= current_cpu_data
.tlbsize
- 1;
415 local_flush_tlb_all();
418 if (current_cpu_data
.cputype
== CPU_NEVADA
)
419 memcpy((void *)KSEG0
, &except_vec0_nevada
, 0x80);
420 else if (current_cpu_data
.cputype
== CPU_R4600
)
421 memcpy((void *)KSEG0
, &except_vec0_r4600
, 0x80);
423 memcpy((void *)KSEG0
, &except_vec0_r4000
, 0x80);
424 memcpy((void *)(KSEG0
+ 0x080), &except_vec1_generic
, 0x80);
425 flush_icache_range(KSEG0
, KSEG0
+ 0x100);
428 memcpy((void *)(CKSEG0
+ 0x00), &except_vec0_generic
, 0x80);
429 memcpy((void *)(CKSEG0
+ 0x80), except_vec1_r4k
, 0x80);
430 flush_icache_range(CKSEG0
+ 0x80, CKSEG0
+ 0x100);