2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #include <linux/config.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
15 #include <linux/bitops.h>
17 #include <asm/bcache.h>
18 #include <asm/bootinfo.h>
22 #include <asm/pgtable.h>
23 #include <asm/system.h>
24 #include <asm/mmu_context.h>
27 /* Primary cache parameters. */
28 static unsigned long icache_size
, dcache_size
, scache_size
;
29 unsigned long icache_way_size
, dcache_way_size
, scache_way_size
;
30 static unsigned long scache_size
;
32 #include <asm/cacheops.h>
33 #include <asm/r4kcache.h>
35 extern void andes_clear_page(void * page
);
36 extern void r4k_clear_page32_d16(void * page
);
37 extern void r4k_clear_page32_d32(void * page
);
38 extern void r4k_clear_page_d16(void * page
);
39 extern void r4k_clear_page_d32(void * page
);
40 extern void r4k_clear_page_r4600_v1(void * page
);
41 extern void r4k_clear_page_r4600_v2(void * page
);
42 extern void r4k_clear_page_s16(void * page
);
43 extern void r4k_clear_page_s32(void * page
);
44 extern void r4k_clear_page_s64(void * page
);
45 extern void r4k_clear_page_s128(void * page
);
46 extern void andes_copy_page(void * to
, void * from
);
47 extern void r4k_copy_page_d16(void * to
, void * from
);
48 extern void r4k_copy_page_d32(void * to
, void * from
);
49 extern void r4k_copy_page_r4600_v1(void * to
, void * from
);
50 extern void r4k_copy_page_r4600_v2(void * to
, void * from
);
51 extern void r4k_copy_page_s16(void * to
, void * from
);
52 extern void r4k_copy_page_s32(void * to
, void * from
);
53 extern void r4k_copy_page_s64(void * to
, void * from
);
54 extern void r4k_copy_page_s128(void * to
, void * from
);
57 * Dummy cache handling routines for machines without boardcaches
59 static void no_sc_noop(void) {}
61 static struct bcache_ops no_sc_ops
= {
62 .bc_enable
= (void *)no_sc_noop
,
63 .bc_disable
= (void *)no_sc_noop
,
64 .bc_wback_inv
= (void *)no_sc_noop
,
65 .bc_inv
= (void *)no_sc_noop
68 struct bcache_ops
*bcops
= &no_sc_ops
;
70 #define R4600_HIT_CACHEOP_WAR_IMPL \
72 if (R4600_V2_HIT_CACHEOP_WAR && \
73 (read_c0_prid() & 0xfff0) == 0x2020) { /* R4600 V2.0 */\
74 *(volatile unsigned long *)KSEG1; \
76 if (R4600_V1_HIT_CACHEOP_WAR) \
77 __asm__ __volatile__("nop;nop;nop;nop"); \
80 static void r4k_blast_dcache_page(unsigned long addr
)
82 static void *l
= &&init
;
83 unsigned long dc_lsize
;
88 blast_dcache16_page(addr
);
92 R4600_HIT_CACHEOP_WAR_IMPL
;
93 blast_dcache32_page(addr
);
97 dc_lsize
= current_cpu_data
.dcache
.linesz
;
101 else if (dc_lsize
== 32)
106 static void r4k_blast_dcache_page_indexed(unsigned long addr
)
108 static void *l
= &&init
;
109 unsigned long dc_lsize
;
114 blast_dcache16_page_indexed(addr
);
118 blast_dcache32_page_indexed(addr
);
122 dc_lsize
= current_cpu_data
.dcache
.linesz
;
126 else if (dc_lsize
== 32)
131 static void r4k_blast_dcache(void)
133 static void *l
= &&init
;
134 unsigned long dc_lsize
;
147 dc_lsize
= current_cpu_data
.dcache
.linesz
;
151 else if (dc_lsize
== 32)
156 static void r4k_blast_icache_page(unsigned long addr
)
158 unsigned long ic_lsize
= current_cpu_data
.icache
.linesz
;
159 static void *l
= &&init
;
164 blast_icache16_page(addr
);
168 blast_icache32_page(addr
);
172 blast_icache64_page(addr
);
178 else if (ic_lsize
== 32)
180 else if (ic_lsize
== 64)
185 static void r4k_blast_icache_page_indexed(unsigned long addr
)
187 unsigned long ic_lsize
= current_cpu_data
.icache
.linesz
;
188 static void *l
= &&init
;
193 blast_icache16_page_indexed(addr
);
197 blast_icache32_page_indexed(addr
);
201 blast_icache64_page_indexed(addr
);
207 else if (ic_lsize
== 32)
209 else if (ic_lsize
== 64)
214 static void r4k_blast_icache(void)
216 unsigned long ic_lsize
= current_cpu_data
.icache
.linesz
;
217 static void *l
= &&init
;
236 else if (ic_lsize
== 32)
238 else if (ic_lsize
== 64)
243 static void r4k_blast_scache_page(unsigned long addr
)
245 unsigned long sc_lsize
= current_cpu_data
.scache
.linesz
;
246 static void *l
= &&init
;
251 blast_scache16_page(addr
);
255 blast_scache32_page(addr
);
259 blast_scache64_page(addr
);
263 blast_scache128_page(addr
);
269 else if (sc_lsize
== 32)
271 else if (sc_lsize
== 64)
273 else if (sc_lsize
== 128)
278 static void r4k_blast_scache(void)
280 unsigned long sc_lsize
= current_cpu_data
.scache
.linesz
;
281 static void *l
= &&init
;
304 else if (sc_lsize
== 32)
306 else if (sc_lsize
== 64)
308 else if (sc_lsize
== 128)
313 static void r4k_flush_cache_all(void)
315 if (!cpu_has_dc_aliases
)
322 static void r4k___flush_cache_all(void)
327 switch (current_cpu_data
.cputype
) {
338 static void r4k_flush_cache_range(struct vm_area_struct
*vma
,
339 unsigned long start
, unsigned long end
)
341 if (cpu_context(smp_processor_id(), vma
->vm_mm
) != 0) {
343 if (vma
->vm_flags
& VM_EXEC
)
348 static void r4k_flush_cache_mm(struct mm_struct
*mm
)
350 if (!cpu_has_dc_aliases
)
353 if (!cpu_context(smp_processor_id(), mm
))
360 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
361 * only flush the primary caches but R10000 and R12000 behave sane ...
363 if (current_cpu_data
.cputype
== CPU_R4000SC
||
364 current_cpu_data
.cputype
== CPU_R4000MC
||
365 current_cpu_data
.cputype
== CPU_R4400SC
||
366 current_cpu_data
.cputype
== CPU_R4400MC
)
370 static void r4k_flush_cache_page(struct vm_area_struct
*vma
,
373 int exec
= vma
->vm_flags
& VM_EXEC
;
374 struct mm_struct
*mm
= vma
->vm_mm
;
380 * If ownes no valid ASID yet, cannot possibly have gotten
381 * this page into the cache.
383 if (cpu_context(smp_processor_id(), mm
) == 0)
387 pgdp
= pgd_offset(mm
, page
);
388 pmdp
= pmd_offset(pgdp
, page
);
389 ptep
= pte_offset(pmdp
, page
);
392 * If the page isn't marked valid, the page cannot possibly be
395 if (!(pte_val(*ptep
) & _PAGE_PRESENT
))
399 * Doing flushes for another ASID than the current one is
400 * too difficult since stupid R4k caches do a TLB translation
401 * for every cache flush operation. So we do indexed flushes
402 * in that case, which doesn't overly flush the cache too much.
404 if ((mm
== current
->active_mm
) && (pte_val(*ptep
) & _PAGE_VALID
)) {
405 if (cpu_has_dc_aliases
|| (exec
&& !cpu_has_ic_fills_f_dc
))
406 r4k_blast_dcache_page(page
);
408 r4k_blast_icache_page(page
);
414 * Do indexed flush, too much work to get the (possible) TLB refills
417 page
= (KSEG0
+ (page
& (dcache_size
- 1)));
418 if (cpu_has_dc_aliases
|| (exec
&& !cpu_has_ic_fills_f_dc
))
419 r4k_blast_dcache_page_indexed(page
);
421 if (cpu_has_vtag_icache
) {
422 int cpu
= smp_processor_id();
424 if (cpu_context(cpu
, vma
->vm_mm
) != 0)
425 drop_mmu_context(vma
->vm_mm
, cpu
);
427 r4k_blast_icache_page_indexed(page
);
431 static void r4k_flush_data_cache_page(unsigned long addr
)
433 r4k_blast_dcache_page(addr
);
436 static void r4k_flush_icache_range(unsigned long start
, unsigned long end
)
438 unsigned long dc_lsize
= current_cpu_data
.dcache
.linesz
;
439 unsigned long addr
, aend
;
441 if (!cpu_has_ic_fills_f_dc
) {
442 if (end
- start
> dcache_size
)
445 addr
= start
& ~(dc_lsize
- 1);
446 aend
= (end
- 1) & ~(dc_lsize
- 1);
449 /* Hit_Writeback_Inv_D */
450 protected_writeback_dcache_line(addr
);
458 if (end
- start
> icache_size
)
461 addr
= start
& ~(dc_lsize
- 1);
462 aend
= (end
- 1) & ~(dc_lsize
- 1);
464 /* Hit_Invalidate_I */
465 protected_flush_icache_line(addr
);
474 * Ok, this seriously sucks. We use them to flush a user page but don't
475 * know the virtual address, so we have to blast away the whole icache
476 * which is significantly more expensive than the real thing. Otoh we at
477 * least know the kernel address of the page so we can flush it
480 static void r4k_flush_icache_page(struct vm_area_struct
*vma
,
484 * If there's no context yet, or the page isn't executable, no icache
487 if (!(vma
->vm_flags
& VM_EXEC
))
491 * Tricky ... Because we don't know the virtual address we've got the
492 * choice of either invalidating the entire primary and secondary
493 * caches or invalidating the secondary caches also. With the subset
494 * enforcment on R4000SC, R4400SC, R10000 and R12000 invalidating the
495 * secondary cache will result in any entries in the primary caches
496 * also getting invalidated which hopefully is a bit more economical.
498 if (cpu_has_subset_pcaches
) {
499 unsigned long addr
= (unsigned long) page_address(page
);
500 r4k_blast_scache_page(addr
);
505 if (!cpu_has_ic_fills_f_dc
) {
506 unsigned long addr
= (unsigned long) page_address(page
);
507 r4k_blast_dcache_page(addr
);
511 * We're not sure of the virtual address(es) involved here, so
512 * we have to flush the entire I-cache.
514 if (cpu_has_vtag_icache
) {
515 int cpu
= smp_processor_id();
517 if (cpu_context(cpu
, vma
->vm_mm
) != 0)
518 drop_mmu_context(vma
->vm_mm
, cpu
);
523 #ifdef CONFIG_NONCOHERENT_IO
525 static void r4k_dma_cache_wback_inv(unsigned long addr
, unsigned long size
)
527 unsigned long end
, a
;
529 if (cpu_has_subset_pcaches
) {
530 unsigned long sc_lsize
= current_cpu_data
.scache
.linesz
;
532 if (size
>= scache_size
) {
537 a
= addr
& ~(sc_lsize
- 1);
538 end
= (addr
+ size
- 1) & ~(sc_lsize
- 1);
540 flush_scache_line(a
); /* Hit_Writeback_Inv_SD */
549 * Either no secondary cache or the available caches don't have the
550 * subset property so we have to flush the primary caches
553 if (size
>= dcache_size
) {
556 unsigned long dc_lsize
= current_cpu_data
.dcache
.linesz
;
558 R4600_HIT_CACHEOP_WAR_IMPL
;
559 a
= addr
& ~(dc_lsize
- 1);
560 end
= (addr
+ size
- 1) & ~(dc_lsize
- 1);
562 flush_dcache_line(a
); /* Hit_Writeback_Inv_D */
569 bc_wback_inv(addr
, size
);
572 static void r4k_dma_cache_inv(unsigned long addr
, unsigned long size
)
574 unsigned long end
, a
;
576 if (cpu_has_subset_pcaches
) {
577 unsigned long sc_lsize
= current_cpu_data
.scache
.linesz
;
579 if (size
>= scache_size
) {
584 a
= addr
& ~(sc_lsize
- 1);
585 end
= (addr
+ size
- 1) & ~(sc_lsize
- 1);
587 flush_scache_line(a
); /* Hit_Writeback_Inv_SD */
595 if (size
>= dcache_size
) {
598 unsigned long dc_lsize
= current_cpu_data
.dcache
.linesz
;
600 R4600_HIT_CACHEOP_WAR_IMPL
;
601 a
= addr
& ~(dc_lsize
- 1);
602 end
= (addr
+ size
- 1) & ~(dc_lsize
- 1);
604 flush_dcache_line(a
); /* Hit_Writeback_Inv_D */
613 #endif /* CONFIG_NONCOHERENT_IO */
616 * While we're protected against bad userland addresses we don't care
617 * very much about what happens in that case. Usually a segmentation
618 * fault will dump the process later on anyway ...
620 static void r4k_flush_cache_sigtramp(unsigned long addr
)
622 unsigned long ic_lsize
= current_cpu_data
.icache
.linesz
;
623 unsigned long dc_lsize
= current_cpu_data
.dcache
.linesz
;
625 R4600_HIT_CACHEOP_WAR_IMPL
;
626 protected_writeback_dcache_line(addr
& ~(dc_lsize
- 1));
627 protected_flush_icache_line(addr
& ~(ic_lsize
- 1));
630 static void r4k_flush_icache_all(void)
632 if (cpu_has_vtag_icache
)
636 static inline void rm7k_erratum31(void)
638 const unsigned long ic_lsize
= 32;
641 /* RM7000 erratum #31. The icache is screwed at startup. */
645 for (addr
= KSEG0
; addr
<= KSEG0
+ 4096; addr
+= ic_lsize
) {
646 __asm__
__volatile__ (
649 "cache\t%1, 0(%0)\n\t"
650 "cache\t%1, 0x1000(%0)\n\t"
651 "cache\t%1, 0x2000(%0)\n\t"
652 "cache\t%1, 0x3000(%0)\n\t"
653 "cache\t%2, 0(%0)\n\t"
654 "cache\t%2, 0x1000(%0)\n\t"
655 "cache\t%2, 0x2000(%0)\n\t"
656 "cache\t%2, 0x3000(%0)\n\t"
657 "cache\t%1, 0(%0)\n\t"
658 "cache\t%1, 0x1000(%0)\n\t"
659 "cache\t%1, 0x2000(%0)\n\t"
660 "cache\t%1, 0x3000(%0)\n\t"
664 : "r" (addr
), "i" (Index_Store_Tag_I
), "i" (Fill
));
668 static char *way_string
[] = { NULL
, "direct mapped", "2-way", "3-way", "4-way",
669 "5-way", "6-way", "7-way", "8-way"
672 static void __init
probe_pcache(void)
674 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
675 unsigned int config
= read_c0_config();
676 unsigned int prid
= read_c0_prid();
677 unsigned long config1
;
680 switch (current_cpu_data
.cputype
) {
681 case CPU_R4600
: /* QED style two way caches? */
685 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
686 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
688 c
->icache
.waybit
= ffs(icache_size
/2) - 1;
690 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
691 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
693 c
->dcache
.waybit
= ffs(dcache_size
/2) - 1;
698 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
699 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
703 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
704 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
706 c
->dcache
.waybit
= 0;
710 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
711 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
715 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
716 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
718 c
->dcache
.waybit
= 0;
728 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
729 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
731 c
->icache
.waybit
= 0; /* doesn't matter */
733 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
734 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
736 c
->dcache
.waybit
= 0; /* does not matter */
741 icache_size
= 1 << (12 + ((config
& R10K_CONF_IC
) >> 29));
742 c
->icache
.linesz
= 64;
744 c
->icache
.waybit
= 0;
746 dcache_size
= 1 << (12 + ((config
& R10K_CONF_DC
) >> 26));
747 c
->dcache
.linesz
= 32;
749 c
->dcache
.waybit
= 0;
753 icache_size
= 1 << (10 + ((config
& CONF_IC
) >> 9));
754 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
756 c
->icache
.waybit
= ffs(icache_size
/2) - 1;
758 dcache_size
= 1 << (10 + ((config
& CONF_DC
) >> 6));
759 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
761 c
->dcache
.waybit
= ffs(dcache_size
/2) - 1;
770 icache_size
= 1 << (10 + ((config
& CONF_IC
) >> 9));
771 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
773 c
->icache
.waybit
= 0; /* doesn't matter */
775 dcache_size
= 1 << (10 + ((config
& CONF_DC
) >> 6));
776 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
778 c
->dcache
.waybit
= 0; /* does not matter */
784 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
785 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
787 c
->icache
.waybit
= ffs(icache_size
/ c
->icache
.ways
) - 1;
789 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
790 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
792 c
->dcache
.waybit
= ffs(dcache_size
/ c
->dcache
.ways
) - 1;
796 if (!(config
& MIPS_CONF_M
))
797 panic("Don't know how to probe P-caches on this cpu.");
800 * So we seem to be a MIPS32 or MIPS64 CPU
801 * So let's probe the I-cache ...
803 config1
= read_c0_config1();
805 if ((lsize
= ((config1
>> 19) & 7)))
806 c
->icache
.linesz
= 2 << lsize
;
808 c
->icache
.linesz
= lsize
;
809 c
->icache
.sets
= 64 << ((config1
>> 22) & 7);
810 c
->icache
.ways
= 1 + ((config1
>> 16) & 7);
812 icache_size
= c
->icache
.sets
*
815 c
->icache
.waybit
= ffs(icache_size
/c
->icache
.ways
) - 1;
818 * Now probe the MIPS32 / MIPS64 data cache.
822 if ((lsize
= ((config1
>> 10) & 7)))
823 c
->dcache
.linesz
= 2 << lsize
;
825 c
->dcache
.linesz
= lsize
;
826 c
->dcache
.sets
= 64 << ((config1
>> 13) & 7);
827 c
->dcache
.ways
= 1 + ((config1
>> 7) & 7);
829 dcache_size
= c
->dcache
.sets
*
832 c
->dcache
.waybit
= ffs(dcache_size
/c
->dcache
.ways
) - 1;
837 * Processor configuration sanity check for the R4000SC erratum
838 * #5. With page sizes larger than 32kB there is no possibility
839 * to get a VCE exception anymore so we don't care about this
840 * misconfiguration. The case is rather theoretical anyway;
841 * presumably no vendor is shipping his hardware in the "bad"
844 if ((prid
& 0xff00) == PRID_IMP_R4000
&& (prid
& 0xff) < 0x40 &&
845 !(config
& CONF_SC
) && c
->icache
.linesz
!= 16 &&
847 panic("Improper R4000SC processor configuration detected");
849 /* compute a couple of other cache variables */
850 icache_way_size
= icache_size
/ c
->icache
.ways
;
851 dcache_way_size
= dcache_size
/ c
->dcache
.ways
;
853 c
->icache
.sets
= icache_size
/ (c
->icache
.linesz
* c
->icache
.ways
);
854 c
->dcache
.sets
= dcache_size
/ (c
->dcache
.linesz
* c
->dcache
.ways
);
857 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
858 * 2-way virtually indexed so normally would suffer from aliases. So
859 * normally they'd suffer from aliases but magic in the hardware deals
860 * with that for us so we don't need to take care ourselves.
862 if (current_cpu_data
.cputype
!= CPU_R10000
&&
863 current_cpu_data
.cputype
!= CPU_R12000
)
864 if (dcache_way_size
> PAGE_SIZE
)
865 c
->dcache
.flags
|= MIPS_CACHE_ALIASES
;
867 if (config
& 0x8) /* VI bit */
868 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
870 switch (c
->cputype
) {
873 * Some older 20Kc chips doesn't have the 'VI' bit in
874 * the config register.
876 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
880 c
->icache
.flags
|= MIPS_CACHE_IC_F_DC
;
884 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
886 cpu_has_vtag_icache
? "virtually tagged" : "physically tagged",
887 way_string
[c
->icache
.ways
], c
->icache
.linesz
);
889 printk("Primary data cache %ldkB %s, linesize %d bytes.\n",
890 dcache_size
>> 10, way_string
[c
->dcache
.ways
], c
->dcache
.linesz
);
894 * If you even _breathe_ on this function, look at the gcc output and make sure
895 * it does not pop things on and off the stack for the cache sizing loop that
896 * executes in KSEG1 space or else you will crash and burn badly. You have
899 static int __init
probe_scache(void)
901 extern unsigned long stext
;
902 unsigned long flags
, addr
, begin
, end
, pow2
;
903 unsigned int config
= read_c0_config();
904 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
907 if (config
& CONF_SC
)
910 begin
= (unsigned long) &stext
;
911 begin
&= ~((4 * 1024 * 1024) - 1);
912 end
= begin
+ (4 * 1024 * 1024);
915 * This is such a bitch, you'd think they would make it easy to do
916 * this. Away you daemons of stupidity!
918 local_irq_save(flags
);
920 /* Fill each size-multiple cache line with a valid tag. */
922 for (addr
= begin
; addr
< end
; addr
= (begin
+ pow2
)) {
923 unsigned long *p
= (unsigned long *) addr
;
924 __asm__
__volatile__("nop" : : "r" (*p
)); /* whee... */
928 /* Load first line with zero (therefore invalid) tag. */
931 __asm__
__volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
932 cache_op(Index_Store_Tag_I
, begin
);
933 cache_op(Index_Store_Tag_D
, begin
);
934 cache_op(Index_Store_Tag_SD
, begin
);
936 /* Now search for the wrap around point. */
939 for (addr
= begin
+ (128 * 1024); addr
< end
; addr
= begin
+ pow2
) {
940 cache_op(Index_Load_Tag_SD
, addr
);
941 __asm__
__volatile__("nop; nop; nop; nop;"); /* hazard... */
942 if (!read_c0_taglo())
946 local_irq_restore(flags
);
949 c
= ¤t_cpu_data
;
951 c
->scache
.linesz
= 16 << ((config
& R4K_CONF_SB
) >> 22);
953 c
->dcache
.waybit
= 0; /* does not matter */
958 static void __init
setup_noscache_funcs(void)
962 switch (current_cpu_data
.dcache
.linesz
) {
965 _clear_page
= r4k_clear_page_d16
;
967 _clear_page
= r4k_clear_page32_d16
;
968 _copy_page
= r4k_copy_page_d16
;
972 prid
= read_c0_prid() & 0xfff0;
973 if (prid
== 0x2010) { /* R4600 V1.7 */
974 _clear_page
= r4k_clear_page_r4600_v1
;
975 _copy_page
= r4k_copy_page_r4600_v1
;
976 } else if (prid
== 0x2020) { /* R4600 V2.0 */
977 _clear_page
= r4k_clear_page_r4600_v2
;
978 _copy_page
= r4k_copy_page_r4600_v2
;
981 _clear_page
= r4k_clear_page_d32
;
983 _clear_page
= r4k_clear_page32_d32
;
984 _copy_page
= r4k_copy_page_d32
;
990 static void __init
setup_scache_funcs(void)
992 if (current_cpu_data
.dcache
.linesz
> current_cpu_data
.scache
.linesz
)
993 panic("Invalid primary cache configuration detected");
995 if (current_cpu_data
.cputype
== CPU_R10000
||
996 current_cpu_data
.cputype
== CPU_R12000
) {
997 _clear_page
= andes_clear_page
;
998 _copy_page
= andes_copy_page
;
1002 switch (current_cpu_data
.scache
.linesz
) {
1004 _clear_page
= r4k_clear_page_s16
;
1005 _copy_page
= r4k_copy_page_s16
;
1008 _clear_page
= r4k_clear_page_s32
;
1009 _copy_page
= r4k_copy_page_s32
;
1012 _clear_page
= r4k_clear_page_s64
;
1013 _copy_page
= r4k_copy_page_s64
;
1016 _clear_page
= r4k_clear_page_s128
;
1017 _copy_page
= r4k_copy_page_s128
;
1022 typedef int (*probe_func_t
)(unsigned long);
1023 extern int r5k_sc_init(void);
1024 extern int rm7k_sc_init(void);
1026 static void __init
setup_scache(void)
1028 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1029 unsigned int config
= read_c0_config();
1030 probe_func_t probe_scache_kseg1
;
1034 * Do the probing thing on R4000SC and R4400SC processors. Other
1035 * processors don't have a S-cache that would be relevant to the
1036 * Linux memory managment.
1038 switch (current_cpu_data
.cputype
) {
1045 probe_scache_kseg1
= (probe_func_t
) (KSEG1ADDR(&probe_scache
));
1046 sc_present
= probe_scache_kseg1(config
);
1051 scache_size
= 0x80000 << ((config
& R10K_CONF_SS
) >> 16);
1052 c
->scache
.linesz
= 64 << ((config
>> 13) & 1);
1054 c
->scache
.waybit
= 0;
1060 setup_noscache_funcs();
1061 #ifdef CONFIG_R5000_CPU_SCACHE
1067 setup_noscache_funcs();
1068 #ifdef CONFIG_RM7000_CPU_SCACHE
1078 setup_noscache_funcs();
1082 if ((current_cpu_data
.isa_level
== MIPS_CPU_ISA_M32
||
1083 current_cpu_data
.isa_level
== MIPS_CPU_ISA_M64
) &&
1084 !(current_cpu_data
.scache
.flags
& MIPS_CACHE_NOT_PRESENT
))
1085 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1087 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1088 scache_size
>> 10, way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1090 current_cpu_data
.options
|= MIPS_CPU_SUBSET_CACHES
;
1091 setup_scache_funcs();
1094 static inline void coherency_setup(void)
1096 change_c0_config(CONF_CM_CMASK
, CONF_CM_DEFAULT
);
1099 * c0_status.cu=0 specifies that updates by the sc instruction use
1100 * the coherency mode specified by the TLB; 1 means cachable
1101 * coherent update on write will be used. Not all processors have
1102 * this bit and; some wire it to zero, others like Toshiba had the
1103 * silly idea of putting something else there ...
1105 switch (current_cpu_data
.cputype
) {
1112 clear_c0_config(CONF_CU
);
1118 void __init
ld_mmu_r4xx0(void)
1120 extern char except_vec2_generic
;
1122 /* Default cache error handler for R4000 and R5000 family */
1123 memcpy((void *)(KSEG0
+ 0x100), &except_vec2_generic
, 0x80);
1124 memcpy((void *)(KSEG1
+ 0x100), &except_vec2_generic
, 0x80);
1130 if (current_cpu_data
.dcache
.sets
*
1131 current_cpu_data
.dcache
.ways
> PAGE_SIZE
)
1132 current_cpu_data
.dcache
.flags
|= MIPS_CACHE_ALIASES
;
1135 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1136 * This code supports virtually indexed processors and will be
1137 * unnecessarily unefficient on physically indexed processors.
1139 shm_align_mask
= max_t(unsigned long,
1140 current_cpu_data
.dcache
.sets
* current_cpu_data
.dcache
.linesz
- 1,
1143 flush_cache_all
= r4k_flush_cache_all
;
1144 __flush_cache_all
= r4k___flush_cache_all
;
1145 flush_cache_mm
= r4k_flush_cache_mm
;
1146 flush_cache_page
= r4k_flush_cache_page
;
1147 flush_icache_page
= r4k_flush_icache_page
;
1148 flush_cache_range
= r4k_flush_cache_range
;
1150 flush_cache_sigtramp
= r4k_flush_cache_sigtramp
;
1151 flush_icache_all
= r4k_flush_icache_all
;
1152 flush_data_cache_page
= r4k_flush_data_cache_page
;
1153 flush_icache_range
= r4k_flush_icache_range
;
1155 #ifdef CONFIG_NONCOHERENT_IO
1156 _dma_cache_wback_inv
= r4k_dma_cache_wback_inv
;
1157 _dma_cache_wback
= r4k_dma_cache_wback_inv
;
1158 _dma_cache_inv
= r4k_dma_cache_inv
;
1161 __flush_cache_all();