Add back R4300.
[linux-2.6/linux-mips.git] / arch / mips / mm / c-r4k.c
blobae4a991ed5e4355b4a75cc815b541505a42832ad
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */
10 #include <linux/config.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/bitops.h>
17 #include <asm/bcache.h>
18 #include <asm/bootinfo.h>
19 #include <asm/cpu.h>
20 #include <asm/io.h>
21 #include <asm/page.h>
22 #include <asm/pgtable.h>
23 #include <asm/system.h>
24 #include <asm/mmu_context.h>
25 #include <asm/war.h>
27 /* Primary cache parameters. */
28 static unsigned long icache_size, dcache_size, scache_size;
29 unsigned long icache_way_size, dcache_way_size, scache_way_size;
30 static unsigned long scache_size;
32 #include <asm/cacheops.h>
33 #include <asm/r4kcache.h>
35 extern void andes_clear_page(void * page);
36 extern void r4k_clear_page32_d16(void * page);
37 extern void r4k_clear_page32_d32(void * page);
38 extern void r4k_clear_page_d16(void * page);
39 extern void r4k_clear_page_d32(void * page);
40 extern void r4k_clear_page_r4600_v1(void * page);
41 extern void r4k_clear_page_r4600_v2(void * page);
42 extern void r4k_clear_page_s16(void * page);
43 extern void r4k_clear_page_s32(void * page);
44 extern void r4k_clear_page_s64(void * page);
45 extern void r4k_clear_page_s128(void * page);
46 extern void andes_copy_page(void * to, void * from);
47 extern void r4k_copy_page_d16(void * to, void * from);
48 extern void r4k_copy_page_d32(void * to, void * from);
49 extern void r4k_copy_page_r4600_v1(void * to, void * from);
50 extern void r4k_copy_page_r4600_v2(void * to, void * from);
51 extern void r4k_copy_page_s16(void * to, void * from);
52 extern void r4k_copy_page_s32(void * to, void * from);
53 extern void r4k_copy_page_s64(void * to, void * from);
54 extern void r4k_copy_page_s128(void * to, void * from);
57 * Dummy cache handling routines for machines without boardcaches
59 static void no_sc_noop(void) {}
61 static struct bcache_ops no_sc_ops = {
62 .bc_enable = (void *)no_sc_noop,
63 .bc_disable = (void *)no_sc_noop,
64 .bc_wback_inv = (void *)no_sc_noop,
65 .bc_inv = (void *)no_sc_noop
68 struct bcache_ops *bcops = &no_sc_ops;
70 #define R4600_HIT_CACHEOP_WAR_IMPL \
71 do { \
72 if (R4600_V2_HIT_CACHEOP_WAR && \
73 (read_c0_prid() & 0xfff0) == 0x2020) { /* R4600 V2.0 */\
74 *(volatile unsigned long *)KSEG1; \
75 } \
76 if (R4600_V1_HIT_CACHEOP_WAR) \
77 __asm__ __volatile__("nop;nop;nop;nop"); \
78 } while (0)
80 static void r4k_blast_dcache_page(unsigned long addr)
82 static void *l = &&init;
83 unsigned long dc_lsize;
85 goto *l;
87 dc_16:
88 blast_dcache16_page(addr);
89 return;
91 dc_32:
92 R4600_HIT_CACHEOP_WAR_IMPL;
93 blast_dcache32_page(addr);
94 return;
96 init:
97 dc_lsize = current_cpu_data.dcache.linesz;
99 if (dc_lsize == 16)
100 l = &&dc_16;
101 else if (dc_lsize == 32)
102 l = &&dc_32;
103 goto *l;
106 static void r4k_blast_dcache_page_indexed(unsigned long addr)
108 static void *l = &&init;
109 unsigned long dc_lsize;
111 goto *l;
113 dc_16:
114 blast_dcache16_page_indexed(addr);
115 return;
117 dc_32:
118 blast_dcache32_page_indexed(addr);
119 return;
121 init:
122 dc_lsize = current_cpu_data.dcache.linesz;
124 if (dc_lsize == 16)
125 l = &&dc_16;
126 else if (dc_lsize == 32)
127 l = &&dc_32;
128 goto *l;
131 static void r4k_blast_dcache(void)
133 static void *l = &&init;
134 unsigned long dc_lsize;
136 goto *l;
138 dc_16:
139 blast_dcache16();
140 return;
142 dc_32:
143 blast_dcache32();
144 return;
146 init:
147 dc_lsize = current_cpu_data.dcache.linesz;
149 if (dc_lsize == 16)
150 l = &&dc_16;
151 else if (dc_lsize == 32)
152 l = &&dc_32;
153 goto *l;
156 static void r4k_blast_icache_page(unsigned long addr)
158 unsigned long ic_lsize = current_cpu_data.icache.linesz;
159 static void *l = &&init;
161 goto *l;
163 ic_16:
164 blast_icache16_page(addr);
165 return;
167 ic_32:
168 blast_icache32_page(addr);
169 return;
171 ic_64:
172 blast_icache64_page(addr);
173 return;
175 init:
176 if (ic_lsize == 16)
177 l = &&ic_16;
178 else if (ic_lsize == 32)
179 l = &&ic_32;
180 else if (ic_lsize == 64)
181 l = &&ic_64;
182 goto *l;
185 static void r4k_blast_icache_page_indexed(unsigned long addr)
187 unsigned long ic_lsize = current_cpu_data.icache.linesz;
188 static void *l = &&init;
190 goto *l;
192 ic_16:
193 blast_icache16_page_indexed(addr);
194 return;
196 ic_32:
197 blast_icache32_page_indexed(addr);
198 return;
200 ic_64:
201 blast_icache64_page_indexed(addr);
202 return;
204 init:
205 if (ic_lsize == 16)
206 l = &&ic_16;
207 else if (ic_lsize == 32)
208 l = &&ic_32;
209 else if (ic_lsize == 64)
210 l = &&ic_64;
211 goto *l;
214 static void r4k_blast_icache(void)
216 unsigned long ic_lsize = current_cpu_data.icache.linesz;
217 static void *l = &&init;
219 goto *l;
221 ic_16:
222 blast_icache16();
223 return;
225 ic_32:
226 blast_icache32();
227 return;
229 ic_64:
230 blast_icache64();
231 return;
233 init:
234 if (ic_lsize == 16)
235 l = &&ic_16;
236 else if (ic_lsize == 32)
237 l = &&ic_32;
238 else if (ic_lsize == 64)
239 l = &&ic_64;
240 goto *l;
243 static void r4k_blast_scache_page(unsigned long addr)
245 unsigned long sc_lsize = current_cpu_data.scache.linesz;
246 static void *l = &&init;
248 goto *l;
250 sc_16:
251 blast_scache16_page(addr);
252 return;
254 sc_32:
255 blast_scache32_page(addr);
256 return;
258 sc_64:
259 blast_scache64_page(addr);
260 return;
262 sc_128:
263 blast_scache128_page(addr);
264 return;
266 init:
267 if (sc_lsize == 16)
268 l = &&sc_16;
269 else if (sc_lsize == 32)
270 l = &&sc_32;
271 else if (sc_lsize == 64)
272 l = &&sc_64;
273 else if (sc_lsize == 128)
274 l = &&sc_128;
275 goto *l;
278 static void r4k_blast_scache(void)
280 unsigned long sc_lsize = current_cpu_data.scache.linesz;
281 static void *l = &&init;
283 goto *l;
285 sc_16:
286 blast_scache16();
287 return;
289 sc_32:
290 blast_scache32();
291 return;
293 sc_64:
294 blast_scache64();
295 return;
297 sc_128:
298 blast_scache128();
299 return;
301 init:
302 if (sc_lsize == 16)
303 l = &&sc_16;
304 else if (sc_lsize == 32)
305 l = &&sc_32;
306 else if (sc_lsize == 64)
307 l = &&sc_64;
308 else if (sc_lsize == 128)
309 l = &&sc_128;
310 goto *l;
313 static void r4k_flush_cache_all(void)
315 if (!cpu_has_dc_aliases)
316 return;
318 r4k_blast_dcache();
319 r4k_blast_icache();
322 static void r4k___flush_cache_all(void)
324 r4k_blast_dcache();
325 r4k_blast_icache();
327 switch (current_cpu_data.cputype) {
328 case CPU_R4000SC:
329 case CPU_R4000MC:
330 case CPU_R4400SC:
331 case CPU_R4400MC:
332 case CPU_R10000:
333 case CPU_R12000:
334 r4k_blast_scache();
338 static void r4k_flush_cache_range(struct vm_area_struct *vma,
339 unsigned long start, unsigned long end)
341 if (cpu_context(smp_processor_id(), vma->vm_mm) != 0) {
342 r4k_blast_dcache();
343 if (vma->vm_flags & VM_EXEC)
344 r4k_blast_icache();
348 static void r4k_flush_cache_mm(struct mm_struct *mm)
350 if (!cpu_has_dc_aliases)
351 return;
353 if (!cpu_context(smp_processor_id(), mm))
354 return;
356 r4k_blast_dcache();
357 r4k_blast_icache();
360 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
361 * only flush the primary caches but R10000 and R12000 behave sane ...
363 if (current_cpu_data.cputype == CPU_R4000SC ||
364 current_cpu_data.cputype == CPU_R4000MC ||
365 current_cpu_data.cputype == CPU_R4400SC ||
366 current_cpu_data.cputype == CPU_R4400MC)
367 r4k_blast_scache();
370 static void r4k_flush_cache_page(struct vm_area_struct *vma,
371 unsigned long page)
373 int exec = vma->vm_flags & VM_EXEC;
374 struct mm_struct *mm = vma->vm_mm;
375 pgd_t *pgdp;
376 pmd_t *pmdp;
377 pte_t *ptep;
380 * If ownes no valid ASID yet, cannot possibly have gotten
381 * this page into the cache.
383 if (cpu_context(smp_processor_id(), mm) == 0)
384 return;
386 page &= PAGE_MASK;
387 pgdp = pgd_offset(mm, page);
388 pmdp = pmd_offset(pgdp, page);
389 ptep = pte_offset(pmdp, page);
392 * If the page isn't marked valid, the page cannot possibly be
393 * in the cache.
395 if (!(pte_val(*ptep) & _PAGE_PRESENT))
396 return;
399 * Doing flushes for another ASID than the current one is
400 * too difficult since stupid R4k caches do a TLB translation
401 * for every cache flush operation. So we do indexed flushes
402 * in that case, which doesn't overly flush the cache too much.
404 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
405 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
406 r4k_blast_dcache_page(page);
407 if (exec)
408 r4k_blast_icache_page(page);
410 return;
414 * Do indexed flush, too much work to get the (possible) TLB refills
415 * to work correctly.
417 page = (KSEG0 + (page & (dcache_size - 1)));
418 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
419 r4k_blast_dcache_page_indexed(page);
420 if (exec) {
421 if (cpu_has_vtag_icache) {
422 int cpu = smp_processor_id();
424 if (cpu_context(cpu, vma->vm_mm) != 0)
425 drop_mmu_context(vma->vm_mm, cpu);
426 } else
427 r4k_blast_icache_page_indexed(page);
431 static void r4k_flush_data_cache_page(unsigned long addr)
433 r4k_blast_dcache_page(addr);
436 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
438 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
439 unsigned long addr, aend;
441 if (!cpu_has_ic_fills_f_dc) {
442 if (end - start > dcache_size)
443 r4k_blast_dcache();
444 else {
445 addr = start & ~(dc_lsize - 1);
446 aend = (end - 1) & ~(dc_lsize - 1);
448 while (1) {
449 /* Hit_Writeback_Inv_D */
450 protected_writeback_dcache_line(addr);
451 if (addr == aend)
452 break;
453 addr += dc_lsize;
458 if (end - start > icache_size)
459 r4k_blast_icache();
460 else {
461 addr = start & ~(dc_lsize - 1);
462 aend = (end - 1) & ~(dc_lsize - 1);
463 while (1) {
464 /* Hit_Invalidate_I */
465 protected_flush_icache_line(addr);
466 if (addr == aend)
467 break;
468 addr += dc_lsize;
474 * Ok, this seriously sucks. We use them to flush a user page but don't
475 * know the virtual address, so we have to blast away the whole icache
476 * which is significantly more expensive than the real thing. Otoh we at
477 * least know the kernel address of the page so we can flush it
478 * selectivly.
480 static void r4k_flush_icache_page(struct vm_area_struct *vma,
481 struct page *page)
484 * If there's no context yet, or the page isn't executable, no icache
485 * flush is needed.
487 if (!(vma->vm_flags & VM_EXEC))
488 return;
491 * Tricky ... Because we don't know the virtual address we've got the
492 * choice of either invalidating the entire primary and secondary
493 * caches or invalidating the secondary caches also. With the subset
494 * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the
495 * secondary cache will result in any entries in the primary caches
496 * also getting invalidated which hopefully is a bit more economical.
498 if (cpu_has_subset_pcaches) {
499 unsigned long addr = (unsigned long) page_address(page);
500 r4k_blast_scache_page(addr);
502 return;
505 if (!cpu_has_ic_fills_f_dc) {
506 unsigned long addr = (unsigned long) page_address(page);
507 r4k_blast_dcache_page(addr);
511 * We're not sure of the virtual address(es) involved here, so
512 * we have to flush the entire I-cache.
514 if (cpu_has_vtag_icache) {
515 int cpu = smp_processor_id();
517 if (cpu_context(cpu, vma->vm_mm) != 0)
518 drop_mmu_context(vma->vm_mm, cpu);
519 } else
520 r4k_blast_icache();
523 #ifdef CONFIG_NONCOHERENT_IO
525 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
527 unsigned long end, a;
529 if (cpu_has_subset_pcaches) {
530 unsigned long sc_lsize = current_cpu_data.scache.linesz;
532 if (size >= scache_size) {
533 r4k_blast_scache();
534 return;
537 a = addr & ~(sc_lsize - 1);
538 end = (addr + size - 1) & ~(sc_lsize - 1);
539 while (1) {
540 flush_scache_line(a); /* Hit_Writeback_Inv_SD */
541 if (a == end)
542 break;
543 a += sc_lsize;
545 return;
549 * Either no secondary cache or the available caches don't have the
550 * subset property so we have to flush the primary caches
551 * explicitly
553 if (size >= dcache_size) {
554 r4k_blast_dcache();
555 } else {
556 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
558 R4600_HIT_CACHEOP_WAR_IMPL;
559 a = addr & ~(dc_lsize - 1);
560 end = (addr + size - 1) & ~(dc_lsize - 1);
561 while (1) {
562 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
563 if (a == end)
564 break;
565 a += dc_lsize;
569 bc_wback_inv(addr, size);
572 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
574 unsigned long end, a;
576 if (cpu_has_subset_pcaches) {
577 unsigned long sc_lsize = current_cpu_data.scache.linesz;
579 if (size >= scache_size) {
580 r4k_blast_scache();
581 return;
584 a = addr & ~(sc_lsize - 1);
585 end = (addr + size - 1) & ~(sc_lsize - 1);
586 while (1) {
587 flush_scache_line(a); /* Hit_Writeback_Inv_SD */
588 if (a == end)
589 break;
590 a += sc_lsize;
592 return;
595 if (size >= dcache_size) {
596 r4k_blast_dcache();
597 } else {
598 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
600 R4600_HIT_CACHEOP_WAR_IMPL;
601 a = addr & ~(dc_lsize - 1);
602 end = (addr + size - 1) & ~(dc_lsize - 1);
603 while (1) {
604 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
605 if (a == end)
606 break;
607 a += dc_lsize;
611 bc_inv(addr, size);
613 #endif /* CONFIG_NONCOHERENT_IO */
616 * While we're protected against bad userland addresses we don't care
617 * very much about what happens in that case. Usually a segmentation
618 * fault will dump the process later on anyway ...
620 static void r4k_flush_cache_sigtramp(unsigned long addr)
622 unsigned long ic_lsize = current_cpu_data.icache.linesz;
623 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
625 R4600_HIT_CACHEOP_WAR_IMPL;
626 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
627 protected_flush_icache_line(addr & ~(ic_lsize - 1));
630 static void r4k_flush_icache_all(void)
632 if (cpu_has_vtag_icache)
633 r4k_blast_icache();
636 static inline void rm7k_erratum31(void)
638 const unsigned long ic_lsize = 32;
639 unsigned long addr;
641 /* RM7000 erratum #31. The icache is screwed at startup. */
642 write_c0_taglo(0);
643 write_c0_taghi(0);
645 for (addr = KSEG0; addr <= KSEG0 + 4096; addr += ic_lsize) {
646 __asm__ __volatile__ (
647 ".set noreorder\n\t"
648 ".set mips3\n\t"
649 "cache\t%1, 0(%0)\n\t"
650 "cache\t%1, 0x1000(%0)\n\t"
651 "cache\t%1, 0x2000(%0)\n\t"
652 "cache\t%1, 0x3000(%0)\n\t"
653 "cache\t%2, 0(%0)\n\t"
654 "cache\t%2, 0x1000(%0)\n\t"
655 "cache\t%2, 0x2000(%0)\n\t"
656 "cache\t%2, 0x3000(%0)\n\t"
657 "cache\t%1, 0(%0)\n\t"
658 "cache\t%1, 0x1000(%0)\n\t"
659 "cache\t%1, 0x2000(%0)\n\t"
660 "cache\t%1, 0x3000(%0)\n\t"
661 ".set\tmips0\n\t"
662 ".set\treorder\n\t"
664 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
668 static char *way_string[] = { NULL, "direct mapped", "2-way", "3-way", "4-way",
669 "5-way", "6-way", "7-way", "8-way"
672 static void __init probe_pcache(void)
674 struct cpuinfo_mips *c = &current_cpu_data;
675 unsigned int config = read_c0_config();
676 unsigned int prid = read_c0_prid();
677 unsigned long config1;
678 unsigned int lsize;
680 switch (current_cpu_data.cputype) {
681 case CPU_R4600: /* QED style two way caches? */
682 case CPU_R4700:
683 case CPU_R5000:
684 case CPU_NEVADA:
685 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
686 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
687 c->icache.ways = 2;
688 c->icache.waybit = ffs(icache_size/2) - 1;
690 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
691 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
692 c->dcache.ways = 2;
693 c->dcache.waybit= ffs(dcache_size/2) - 1;
694 break;
696 case CPU_R5432:
697 case CPU_R5500:
698 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
699 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
700 c->icache.ways = 2;
701 c->icache.waybit= 0;
703 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
704 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
705 c->dcache.ways = 2;
706 c->dcache.waybit = 0;
707 break;
709 case CPU_TX49XX:
710 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
711 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
712 c->icache.ways = 4;
713 c->icache.waybit= 0;
715 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
716 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
717 c->dcache.ways = 4;
718 c->dcache.waybit = 0;
719 break;
721 case CPU_R4000PC:
722 case CPU_R4000SC:
723 case CPU_R4000MC:
724 case CPU_R4400PC:
725 case CPU_R4400SC:
726 case CPU_R4400MC:
727 case CPU_R4300:
728 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
729 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
730 c->icache.ways = 1;
731 c->icache.waybit = 0; /* doesn't matter */
733 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
734 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
735 c->dcache.ways = 1;
736 c->dcache.waybit = 0; /* does not matter */
737 break;
739 case CPU_R10000:
740 case CPU_R12000:
741 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
742 c->icache.linesz = 64;
743 c->icache.ways = 2;
744 c->icache.waybit = 0;
746 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
747 c->dcache.linesz = 32;
748 c->dcache.ways = 2;
749 c->dcache.waybit = 0;
750 break;
752 case CPU_VR4131:
753 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
754 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
755 c->icache.ways = 2;
756 c->icache.waybit = ffs(icache_size/2) - 1;
758 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
759 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
760 c->dcache.ways = 2;
761 c->dcache.waybit = ffs(dcache_size/2) - 1;
762 break;
764 case CPU_VR41XX:
765 case CPU_VR4111:
766 case CPU_VR4121:
767 case CPU_VR4122:
768 case CPU_VR4181:
769 case CPU_VR4181A:
770 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
771 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
772 c->icache.ways = 1;
773 c->icache.waybit = 0; /* doesn't matter */
775 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
776 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
777 c->dcache.ways = 1;
778 c->dcache.waybit = 0; /* does not matter */
779 break;
781 case CPU_RM7000:
782 rm7k_erratum31();
784 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
785 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
786 c->icache.ways = 4;
787 c->icache.waybit = ffs(icache_size / c->icache.ways) - 1;
789 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
790 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
791 c->dcache.ways = 4;
792 c->dcache.waybit = ffs(dcache_size / c->dcache.ways) - 1;
793 break;
795 default:
796 if (!(config & MIPS_CONF_M))
797 panic("Don't know how to probe P-caches on this cpu.");
800 * So we seem to be a MIPS32 or MIPS64 CPU
801 * So let's probe the I-cache ...
803 config1 = read_c0_config1();
805 if ((lsize = ((config1 >> 19) & 7)))
806 c->icache.linesz = 2 << lsize;
807 else
808 c->icache.linesz = lsize;
809 c->icache.sets = 64 << ((config1 >> 22) & 7);
810 c->icache.ways = 1 + ((config1 >> 16) & 7);
812 icache_size = c->icache.sets *
813 c->icache.ways *
814 c->icache.linesz;
815 c->icache.waybit = ffs(icache_size/c->icache.ways) - 1;
818 * Now probe the MIPS32 / MIPS64 data cache.
820 c->dcache.flags = 0;
822 if ((lsize = ((config1 >> 10) & 7)))
823 c->dcache.linesz = 2 << lsize;
824 else
825 c->dcache.linesz= lsize;
826 c->dcache.sets = 64 << ((config1 >> 13) & 7);
827 c->dcache.ways = 1 + ((config1 >> 7) & 7);
829 dcache_size = c->dcache.sets *
830 c->dcache.ways *
831 c->dcache.linesz;
832 c->dcache.waybit = ffs(dcache_size/c->dcache.ways) - 1;
833 break;
837 * Processor configuration sanity check for the R4000SC erratum
838 * #5. With page sizes larger than 32kB there is no possibility
839 * to get a VCE exception anymore so we don't care about this
840 * misconfiguration. The case is rather theoretical anyway;
841 * presumably no vendor is shipping his hardware in the "bad"
842 * configuration.
844 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
845 !(config & CONF_SC) && c->icache.linesz != 16 &&
846 PAGE_SIZE <= 0x8000)
847 panic("Improper R4000SC processor configuration detected");
849 /* compute a couple of other cache variables */
850 icache_way_size = icache_size / c->icache.ways;
851 dcache_way_size = dcache_size / c->dcache.ways;
853 c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways);
854 c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways);
857 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
858 * 2-way virtually indexed so normally would suffer from aliases. So
859 * normally they'd suffer from aliases but magic in the hardware deals
860 * with that for us so we don't need to take care ourselves.
862 if (current_cpu_data.cputype != CPU_R10000 &&
863 current_cpu_data.cputype != CPU_R12000)
864 if (dcache_way_size > PAGE_SIZE)
865 c->dcache.flags |= MIPS_CACHE_ALIASES;
867 if (config & 0x8) /* VI bit */
868 c->icache.flags |= MIPS_CACHE_VTAG;
870 switch (c->cputype) {
871 case CPU_20KC:
873 * Some older 20Kc chips doesn't have the 'VI' bit in
874 * the config register.
876 c->icache.flags |= MIPS_CACHE_VTAG;
877 break;
879 case CPU_AU1500:
880 c->icache.flags |= MIPS_CACHE_IC_F_DC;
881 break;
884 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
885 icache_size >> 10,
886 cpu_has_vtag_icache ? "virtually tagged" : "physically tagged",
887 way_string[c->icache.ways], c->icache.linesz);
889 printk("Primary data cache %ldkB %s, linesize %d bytes.\n",
890 dcache_size >> 10, way_string[c->dcache.ways], c->dcache.linesz);
894 * If you even _breathe_ on this function, look at the gcc output and make sure
895 * it does not pop things on and off the stack for the cache sizing loop that
896 * executes in KSEG1 space or else you will crash and burn badly. You have
897 * been warned.
899 static int __init probe_scache(void)
901 extern unsigned long stext;
902 unsigned long flags, addr, begin, end, pow2;
903 unsigned int config = read_c0_config();
904 struct cpuinfo_mips *c = &current_cpu_data;
905 int tmp;
907 if (config & CONF_SC)
908 return 0;
910 begin = (unsigned long) &stext;
911 begin &= ~((4 * 1024 * 1024) - 1);
912 end = begin + (4 * 1024 * 1024);
915 * This is such a bitch, you'd think they would make it easy to do
916 * this. Away you daemons of stupidity!
918 local_irq_save(flags);
920 /* Fill each size-multiple cache line with a valid tag. */
921 pow2 = (64 * 1024);
922 for (addr = begin; addr < end; addr = (begin + pow2)) {
923 unsigned long *p = (unsigned long *) addr;
924 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
925 pow2 <<= 1;
928 /* Load first line with zero (therefore invalid) tag. */
929 write_c0_taglo(0);
930 write_c0_taghi(0);
931 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
932 cache_op(Index_Store_Tag_I, begin);
933 cache_op(Index_Store_Tag_D, begin);
934 cache_op(Index_Store_Tag_SD, begin);
936 /* Now search for the wrap around point. */
937 pow2 = (128 * 1024);
938 tmp = 0;
939 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
940 cache_op(Index_Load_Tag_SD, addr);
941 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
942 if (!read_c0_taglo())
943 break;
944 pow2 <<= 1;
946 local_irq_restore(flags);
947 addr -= begin;
949 c = &current_cpu_data;
950 scache_size = addr;
951 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
952 c->scache.ways = 1;
953 c->dcache.waybit = 0; /* does not matter */
955 return 1;
958 static void __init setup_noscache_funcs(void)
960 unsigned int prid;
962 switch (current_cpu_data.dcache.linesz) {
963 case 16:
964 if (cpu_has_64bits)
965 _clear_page = r4k_clear_page_d16;
966 else
967 _clear_page = r4k_clear_page32_d16;
968 _copy_page = r4k_copy_page_d16;
970 break;
971 case 32:
972 prid = read_c0_prid() & 0xfff0;
973 if (prid == 0x2010) { /* R4600 V1.7 */
974 _clear_page = r4k_clear_page_r4600_v1;
975 _copy_page = r4k_copy_page_r4600_v1;
976 } else if (prid == 0x2020) { /* R4600 V2.0 */
977 _clear_page = r4k_clear_page_r4600_v2;
978 _copy_page = r4k_copy_page_r4600_v2;
979 } else {
980 if (cpu_has_64bits)
981 _clear_page = r4k_clear_page_d32;
982 else
983 _clear_page = r4k_clear_page32_d32;
984 _copy_page = r4k_copy_page_d32;
986 break;
990 static void __init setup_scache_funcs(void)
992 if (current_cpu_data.dcache.linesz > current_cpu_data.scache.linesz)
993 panic("Invalid primary cache configuration detected");
995 if (current_cpu_data.cputype == CPU_R10000 ||
996 current_cpu_data.cputype == CPU_R12000) {
997 _clear_page = andes_clear_page;
998 _copy_page = andes_copy_page;
999 return;
1002 switch (current_cpu_data.scache.linesz) {
1003 case 16:
1004 _clear_page = r4k_clear_page_s16;
1005 _copy_page = r4k_copy_page_s16;
1006 break;
1007 case 32:
1008 _clear_page = r4k_clear_page_s32;
1009 _copy_page = r4k_copy_page_s32;
1010 break;
1011 case 64:
1012 _clear_page = r4k_clear_page_s64;
1013 _copy_page = r4k_copy_page_s64;
1014 break;
1015 case 128:
1016 _clear_page = r4k_clear_page_s128;
1017 _copy_page = r4k_copy_page_s128;
1018 break;
1022 typedef int (*probe_func_t)(unsigned long);
1023 extern int r5k_sc_init(void);
1024 extern int rm7k_sc_init(void);
1026 static void __init setup_scache(void)
1028 struct cpuinfo_mips *c = &current_cpu_data;
1029 unsigned int config = read_c0_config();
1030 probe_func_t probe_scache_kseg1;
1031 int sc_present = 0;
1034 * Do the probing thing on R4000SC and R4400SC processors. Other
1035 * processors don't have a S-cache that would be relevant to the
1036 * Linux memory managment.
1038 switch (current_cpu_data.cputype) {
1039 case CPU_R4000PC:
1040 case CPU_R4000SC:
1041 case CPU_R4000MC:
1042 case CPU_R4400PC:
1043 case CPU_R4400SC:
1044 case CPU_R4400MC:
1045 probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
1046 sc_present = probe_scache_kseg1(config);
1047 break;
1049 case CPU_R10000:
1050 case CPU_R12000:
1051 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1052 c->scache.linesz = 64 << ((config >> 13) & 1);
1053 c->scache.ways = 2;
1054 c->scache.waybit= 0;
1055 sc_present = 1;
1056 break;
1058 case CPU_R5000:
1059 case CPU_NEVADA:
1060 setup_noscache_funcs();
1061 #ifdef CONFIG_R5000_CPU_SCACHE
1062 r5k_sc_init();
1063 #endif
1064 return;
1066 case CPU_RM7000:
1067 setup_noscache_funcs();
1068 #ifdef CONFIG_RM7000_CPU_SCACHE
1069 rm7k_sc_init();
1070 #endif
1071 return;
1073 default:
1074 sc_present = 0;
1077 if (!sc_present) {
1078 setup_noscache_funcs();
1079 return;
1082 if ((current_cpu_data.isa_level == MIPS_CPU_ISA_M32 ||
1083 current_cpu_data.isa_level == MIPS_CPU_ISA_M64) &&
1084 !(current_cpu_data.scache.flags & MIPS_CACHE_NOT_PRESENT))
1085 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1087 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1088 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1090 current_cpu_data.options |= MIPS_CPU_SUBSET_CACHES;
1091 setup_scache_funcs();
1094 static inline void coherency_setup(void)
1096 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1099 * c0_status.cu=0 specifies that updates by the sc instruction use
1100 * the coherency mode specified by the TLB; 1 means cachable
1101 * coherent update on write will be used. Not all processors have
1102 * this bit and; some wire it to zero, others like Toshiba had the
1103 * silly idea of putting something else there ...
1105 switch (current_cpu_data.cputype) {
1106 case CPU_R4000PC:
1107 case CPU_R4000SC:
1108 case CPU_R4000MC:
1109 case CPU_R4400PC:
1110 case CPU_R4400SC:
1111 case CPU_R4400MC:
1112 clear_c0_config(CONF_CU);
1113 break;
1118 void __init ld_mmu_r4xx0(void)
1120 extern char except_vec2_generic;
1122 /* Default cache error handler for R4000 and R5000 family */
1123 memcpy((void *)(KSEG0 + 0x100), &except_vec2_generic, 0x80);
1124 memcpy((void *)(KSEG1 + 0x100), &except_vec2_generic, 0x80);
1126 probe_pcache();
1127 setup_scache();
1128 coherency_setup();
1130 if (current_cpu_data.dcache.sets *
1131 current_cpu_data.dcache.ways > PAGE_SIZE)
1132 current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
1135 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1136 * This code supports virtually indexed processors and will be
1137 * unnecessarily unefficient on physically indexed processors.
1139 shm_align_mask = max_t(unsigned long,
1140 current_cpu_data.dcache.sets * current_cpu_data.dcache.linesz - 1,
1141 PAGE_SIZE - 1);
1143 flush_cache_all = r4k_flush_cache_all;
1144 __flush_cache_all = r4k___flush_cache_all;
1145 flush_cache_mm = r4k_flush_cache_mm;
1146 flush_cache_page = r4k_flush_cache_page;
1147 flush_icache_page = r4k_flush_icache_page;
1148 flush_cache_range = r4k_flush_cache_range;
1150 flush_cache_sigtramp = r4k_flush_cache_sigtramp;
1151 flush_icache_all = r4k_flush_icache_all;
1152 flush_data_cache_page = r4k_flush_data_cache_page;
1153 flush_icache_range = r4k_flush_icache_range;
1155 #ifdef CONFIG_NONCOHERENT_IO
1156 _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
1157 _dma_cache_wback = r4k_dma_cache_wback_inv;
1158 _dma_cache_inv = r4k_dma_cache_inv;
1159 #endif
1161 __flush_cache_all();