2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1997, 1998, 1999 Ralf Baechle (ralf@gnu.org)
7 * Copyright (C) 1999 Silicon Graphics, Inc.
8 * Copyright (C) 2000 Kanoj Sarcar (kanoj@sgi.com)
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
15 #include <asm/pgtable.h>
16 #include <asm/r10kcache.h>
17 #include <asm/system.h>
18 #include <asm/sgialib.h>
19 #include <asm/mmu_context.h>
21 static int scache_lsz64
;
24 * This version has been tuned on an Origin. For other machines the arguments
25 * of the pref instructin may have to be tuned differently.
27 static void andes_clear_page(void * page
)
33 "1:\tpref 7,512(%0)\n\t"
47 :"0" (page
), "I" (PAGE_SIZE
)
51 /* R10000 has no Create_Dirty type cacheops. */
52 static void andes_copy_page(void * to
, void * from
)
54 unsigned long dummy1
, dummy2
, reg1
, reg2
, reg3
, reg4
;
60 "1:\tpref\t0,2*128(%1)\n\t"
61 "pref\t1,2*128(%0)\n\t"
83 :"=r" (dummy1
), "=r" (dummy2
), "=&r" (reg1
), "=&r" (reg2
),
84 "=&r" (reg3
), "=&r" (reg4
)
85 :"0" (to
), "1" (from
), "I" (PAGE_SIZE
));
88 /* Cache operations. These are only used with the virtual memory system,
89 not for non-coherent I/O so it's ok to ignore the secondary caches. */
91 andes_flush_cache_l1(void)
93 blast_dcache32(); blast_icache64();
97 * This is only used during initialization time. vmalloc() also calls
98 * this, but that will be changed pretty soon.
101 andes_flush_cache_l2(void)
103 switch (sc_lsize()) {
111 printk("Unknown L2 line size\n");
117 andes_flush_icache_page(unsigned long page
)
120 blast_scache64_page(page
);
122 blast_scache128_page(page
);
126 andes_flush_cache_sigtramp(unsigned long addr
)
128 unsigned long daddr
, iaddr
;
130 daddr
= addr
& ~(dc_lsize
- 1);
131 protected_writeback_dcache_line(daddr
);
132 protected_writeback_dcache_line(daddr
+ dc_lsize
);
133 iaddr
= addr
& ~(ic_lsize
- 1);
134 protected_flush_icache_line(iaddr
);
135 protected_flush_icache_line(iaddr
+ ic_lsize
);
138 #define NTLB_ENTRIES 64
139 #define NTLB_ENTRIES_HALF 32
142 andes_flush_tlb_all(void)
145 unsigned long old_ctx
;
152 __save_and_cli(flags
);
153 /* Save old context and create impossible VPN2 value */
154 old_ctx
= get_entryhi() & 0xff;
161 /* Blast 'em all away. */
162 while(entry
< NTLB_ENTRIES
) {
167 set_entryhi(old_ctx
);
168 __restore_flags(flags
);
171 static void andes_flush_tlb_mm(struct mm_struct
*mm
)
173 if (CPU_CONTEXT(smp_processor_id(), mm
) != 0) {
177 printk("[tlbmm<%d>]", mm
->context
);
179 __save_and_cli(flags
);
180 get_new_cpu_mmu_context(mm
, smp_processor_id());
181 if(mm
== current
->mm
)
182 set_entryhi(CPU_CONTEXT(smp_processor_id(), mm
) & 0xff);
183 __restore_flags(flags
);
188 andes_flush_tlb_range(struct mm_struct
*mm
, unsigned long start
,
191 if (CPU_CONTEXT(smp_processor_id(), mm
) != 0) {
196 printk("[tlbrange<%02x,%08lx,%08lx>]", (mm
->context
& 0xff),
199 __save_and_cli(flags
);
200 size
= (end
- start
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
201 size
= (size
+ 1) >> 1;
202 if(size
<= NTLB_ENTRIES_HALF
) {
203 int oldpid
= (get_entryhi() & 0xff);
204 int newpid
= (CPU_CONTEXT(smp_processor_id(), mm
) & 0xff);
206 start
&= (PAGE_MASK
<< 1);
207 end
+= ((PAGE_SIZE
<< 1) - 1);
208 end
&= (PAGE_MASK
<< 1);
212 set_entryhi(start
| newpid
);
213 start
+= (PAGE_SIZE
<< 1);
225 get_new_cpu_mmu_context(mm
, smp_processor_id());
226 if(mm
== current
->mm
)
227 set_entryhi(CPU_CONTEXT(smp_processor_id(), mm
) &
230 __restore_flags(flags
);
235 andes_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
237 if (CPU_CONTEXT(smp_processor_id(), vma
->vm_mm
) != 0) {
239 int oldpid
, newpid
, idx
;
242 printk("[tlbpage<%d,%08lx>]", vma
->vm_mm
->context
, page
);
244 newpid
= (CPU_CONTEXT(smp_processor_id(), vma
->vm_mm
) & 0xff);
245 page
&= (PAGE_MASK
<< 1);
246 __save_and_cli(flags
);
247 oldpid
= (get_entryhi() & 0xff);
248 set_entryhi(page
| newpid
);
260 __restore_flags(flags
);
264 /* XXX Simplify this. On the R10000 writing a TLB entry for an virtual
265 address that already exists will overwrite the old entry and not result
266 in TLB malfunction or TLB shutdown. */
267 static void andes_update_mmu_cache(struct vm_area_struct
* vma
,
268 unsigned long address
, pte_t pte
)
277 * Handle debugger faulting in for debugee.
279 if (current
->active_mm
!= vma
->vm_mm
)
282 __save_and_cli(flags
);
283 pid
= get_entryhi() & 0xff;
285 if((pid
!= (CPU_CONTEXT(smp_processor_id(), vma
->vm_mm
) & 0xff)) ||
286 (CPU_CONTEXT(smp_processor_id(), vma
->vm_mm
) == 0)) {
287 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%d
288 tlbpid=%d\n", (int) (CPU_CONTEXT(smp_processor_id(),
289 vma
->vm_mm
) & 0xff), pid
);
292 address
&= (PAGE_MASK
<< 1);
293 set_entryhi(address
| (pid
));
294 pgdp
= pgd_offset(vma
->vm_mm
, address
);
296 pmdp
= pmd_offset(pgdp
, address
);
298 ptep
= pte_offset(pmdp
, address
);
299 set_entrylo0(pte_val(*ptep
++) >> 6);
300 set_entrylo1(pte_val(*ptep
) >> 6);
301 set_entryhi(address
| (pid
));
308 __restore_flags(flags
);
311 static void andes_show_regs(struct pt_regs
*regs
)
313 printk("Cpu %d\n", smp_processor_id());
314 /* Saved main processor registers. */
315 printk("$0 : %016lx %016lx %016lx %016lx\n",
316 0UL, regs
->regs
[1], regs
->regs
[2], regs
->regs
[3]);
317 printk("$4 : %016lx %016lx %016lx %016lx\n",
318 regs
->regs
[4], regs
->regs
[5], regs
->regs
[6], regs
->regs
[7]);
319 printk("$8 : %016lx %016lx %016lx %016lx\n",
320 regs
->regs
[8], regs
->regs
[9], regs
->regs
[10], regs
->regs
[11]);
321 printk("$12 : %016lx %016lx %016lx %016lx\n",
322 regs
->regs
[12], regs
->regs
[13], regs
->regs
[14], regs
->regs
[15]);
323 printk("$16 : %016lx %016lx %016lx %016lx\n",
324 regs
->regs
[16], regs
->regs
[17], regs
->regs
[18], regs
->regs
[19]);
325 printk("$20 : %016lx %016lx %016lx %016lx\n",
326 regs
->regs
[20], regs
->regs
[21], regs
->regs
[22], regs
->regs
[23]);
327 printk("$24 : %016lx %016lx\n",
328 regs
->regs
[24], regs
->regs
[25]);
329 printk("$28 : %016lx %016lx %016lx %016lx\n",
330 regs
->regs
[28], regs
->regs
[29], regs
->regs
[30], regs
->regs
[31]);
331 printk("Hi : %016lx\n", regs
->hi
);
332 printk("Lo : %016lx\n", regs
->lo
);
334 /* Saved cp0 registers. */
335 printk("epc : %016lx\nbadvaddr: %016lx\n",
336 regs
->cp0_epc
, regs
->cp0_badvaddr
);
337 printk("Status : %08x\nCause : %08x\n",
338 (unsigned int) regs
->cp0_status
, (unsigned int) regs
->cp0_cause
);
341 void __init
ld_mmu_andes(void)
343 printk("CPU revision is: %08x\n", read_32bit_cp0_register(CP0_PRID
));
345 printk("Primary instruction cache %dkb, linesize %d bytes\n",
346 icache_size
>> 10, ic_lsize
);
347 printk("Primary data cache %dkb, linesize %d bytes\n",
348 dcache_size
>> 10, dc_lsize
);
349 printk("Secondary cache sized at %ldK, linesize %ld\n",
350 scache_size() >> 10, sc_lsize());
352 _clear_page
= andes_clear_page
;
353 _copy_page
= andes_copy_page
;
355 _flush_cache_l1
= andes_flush_cache_l1
;
356 _flush_cache_l2
= andes_flush_cache_l2
;
357 _flush_cache_sigtramp
= andes_flush_cache_sigtramp
;
359 _flush_tlb_all
= andes_flush_tlb_all
;
360 _flush_tlb_mm
= andes_flush_tlb_mm
;
361 _flush_tlb_range
= andes_flush_tlb_range
;
362 _flush_tlb_page
= andes_flush_tlb_page
;
364 switch (sc_lsize()) {
372 printk("Unknown L2 line size\n");
376 update_mmu_cache
= andes_update_mmu_cache
;
378 _show_regs
= andes_show_regs
;
383 * You should never change this register:
384 * - On R4600 1.7 the tlbp never hits for pages smaller than
385 * the value in the c0_pagemask register.
386 * - The entire mm handling assumes the c0_pagemask register to
387 * be set for 4kb pages.
389 write_32bit_cp0_register(CP0_PAGEMASK
, PM_4K
);
391 /* From this point on the ARC firmware is dead. */
394 /* Did I tell you that ARC SUCKS? */