2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
15 #include <linux/bitops.h>
17 #include <asm/bcache.h>
18 #include <asm/bootinfo.h>
19 #include <asm/cache.h>
20 #include <asm/cacheops.h>
22 #include <asm/cpu-features.h>
25 #include <asm/pgtable.h>
26 #include <asm/r4kcache.h>
27 #include <asm/system.h>
28 #include <asm/mmu_context.h>
30 #include <asm/cacheflush.h> /* for run_uncached() */
35 /* For enabling BCM4710 cache workarounds */
39 * Special Variant of smp_call_function for use by cache functions:
42 * o collapses to normal function call on UP kernels
43 * o collapses to normal function call on systems with a single shared
46 static inline void r4k_on_each_cpu(void (*func
) (void *info
), void *info
,
51 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
52 smp_call_function(func
, info
, retry
, wait
);
61 static unsigned long icache_size __read_mostly
;
62 static unsigned long dcache_size __read_mostly
;
63 static unsigned long scache_size __read_mostly
;
66 * Dummy cache handling routines for machines without boardcaches
68 static void cache_noop(void) {}
70 static struct bcache_ops no_sc_ops
= {
71 .bc_enable
= (void *)cache_noop
,
72 .bc_disable
= (void *)cache_noop
,
73 .bc_wback_inv
= (void *)cache_noop
,
74 .bc_inv
= (void *)cache_noop
77 struct bcache_ops
*bcops
= &no_sc_ops
;
79 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
80 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
82 #define R4600_HIT_CACHEOP_WAR_IMPL \
84 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
85 *(volatile unsigned long *)CKSEG1; \
86 if (R4600_V1_HIT_CACHEOP_WAR) \
87 __asm__ __volatile__("nop;nop;nop;nop"); \
90 static void (*r4k_blast_dcache_page
)(unsigned long addr
);
92 static inline void r4k_blast_dcache_page_dc32(unsigned long addr
)
94 R4600_HIT_CACHEOP_WAR_IMPL
;
95 blast_dcache32_page(addr
);
98 static void __cpuinit
r4k_blast_dcache_page_setup(void)
100 unsigned long dc_lsize
= cpu_dcache_line_size();
103 r4k_blast_dcache_page
= blast_dcache_page
;
106 r4k_blast_dcache_page
= (void *)cache_noop
;
107 else if (dc_lsize
== 16)
108 r4k_blast_dcache_page
= blast_dcache16_page
;
109 else if (dc_lsize
== 32)
110 r4k_blast_dcache_page
= r4k_blast_dcache_page_dc32
;
113 static void (* r4k_blast_dcache_page_indexed
)(unsigned long addr
);
115 static void __cpuinit
r4k_blast_dcache_page_indexed_setup(void)
117 unsigned long dc_lsize
= cpu_dcache_line_size();
120 r4k_blast_dcache_page_indexed
= blast_dcache_page_indexed
;
123 r4k_blast_dcache_page_indexed
= (void *)cache_noop
;
124 else if (dc_lsize
== 16)
125 r4k_blast_dcache_page_indexed
= blast_dcache16_page_indexed
;
126 else if (dc_lsize
== 32)
127 r4k_blast_dcache_page_indexed
= blast_dcache32_page_indexed
;
130 static void (* r4k_blast_dcache
)(void);
132 static void __cpuinit
r4k_blast_dcache_setup(void)
134 unsigned long dc_lsize
= cpu_dcache_line_size();
137 r4k_blast_dcache
= blast_dcache
;
140 r4k_blast_dcache
= (void *)cache_noop
;
141 else if (dc_lsize
== 16)
142 r4k_blast_dcache
= blast_dcache16
;
143 else if (dc_lsize
== 32)
144 r4k_blast_dcache
= blast_dcache32
;
147 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
148 #define JUMP_TO_ALIGN(order) \
149 __asm__ __volatile__( \
151 ".align\t" #order "\n\t" \
154 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
155 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
157 static inline void blast_r4600_v1_icache32(void)
161 local_irq_save(flags
);
163 local_irq_restore(flags
);
166 static inline void tx49_blast_icache32(void)
168 unsigned long start
= INDEX_BASE
;
169 unsigned long end
= start
+ current_cpu_data
.icache
.waysize
;
170 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
171 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
172 current_cpu_data
.icache
.waybit
;
173 unsigned long ws
, addr
;
175 CACHE32_UNROLL32_ALIGN2
;
176 /* I'm in even chunk. blast odd chunks */
177 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
178 for (addr
= start
+ 0x400; addr
< end
; addr
+= 0x400 * 2)
179 cache32_unroll32(addr
|ws
,Index_Invalidate_I
);
180 CACHE32_UNROLL32_ALIGN
;
181 /* I'm in odd chunk. blast even chunks */
182 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
183 for (addr
= start
; addr
< end
; addr
+= 0x400 * 2)
184 cache32_unroll32(addr
|ws
,Index_Invalidate_I
);
187 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page
)
191 local_irq_save(flags
);
192 blast_icache32_page_indexed(page
);
193 local_irq_restore(flags
);
196 static inline void tx49_blast_icache32_page_indexed(unsigned long page
)
198 unsigned long indexmask
= current_cpu_data
.icache
.waysize
- 1;
199 unsigned long start
= INDEX_BASE
+ (page
& indexmask
);
200 unsigned long end
= start
+ PAGE_SIZE
;
201 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
202 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
203 current_cpu_data
.icache
.waybit
;
204 unsigned long ws
, addr
;
206 CACHE32_UNROLL32_ALIGN2
;
207 /* I'm in even chunk. blast odd chunks */
208 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
209 for (addr
= start
+ 0x400; addr
< end
; addr
+= 0x400 * 2)
210 cache32_unroll32(addr
|ws
,Index_Invalidate_I
);
211 CACHE32_UNROLL32_ALIGN
;
212 /* I'm in odd chunk. blast even chunks */
213 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
214 for (addr
= start
; addr
< end
; addr
+= 0x400 * 2)
215 cache32_unroll32(addr
|ws
,Index_Invalidate_I
);
218 static void (* r4k_blast_icache_page
)(unsigned long addr
);
220 static void __cpuinit
r4k_blast_icache_page_setup(void)
222 unsigned long ic_lsize
= cpu_icache_line_size();
225 r4k_blast_icache_page
= (void *)cache_noop
;
226 else if (ic_lsize
== 16)
227 r4k_blast_icache_page
= blast_icache16_page
;
228 else if (ic_lsize
== 32)
229 r4k_blast_icache_page
= blast_icache32_page
;
230 else if (ic_lsize
== 64)
231 r4k_blast_icache_page
= blast_icache64_page
;
235 static void (* r4k_blast_icache_page_indexed
)(unsigned long addr
);
237 static void __cpuinit
r4k_blast_icache_page_indexed_setup(void)
239 unsigned long ic_lsize
= cpu_icache_line_size();
242 r4k_blast_icache_page_indexed
= (void *)cache_noop
;
243 else if (ic_lsize
== 16)
244 r4k_blast_icache_page_indexed
= blast_icache16_page_indexed
;
245 else if (ic_lsize
== 32) {
246 if (R4600_V1_INDEX_ICACHEOP_WAR
&& cpu_is_r4600_v1_x())
247 r4k_blast_icache_page_indexed
=
248 blast_icache32_r4600_v1_page_indexed
;
249 else if (TX49XX_ICACHE_INDEX_INV_WAR
)
250 r4k_blast_icache_page_indexed
=
251 tx49_blast_icache32_page_indexed
;
253 r4k_blast_icache_page_indexed
=
254 blast_icache32_page_indexed
;
255 } else if (ic_lsize
== 64)
256 r4k_blast_icache_page_indexed
= blast_icache64_page_indexed
;
259 static void (* r4k_blast_icache
)(void);
261 static void __cpuinit
r4k_blast_icache_setup(void)
263 unsigned long ic_lsize
= cpu_icache_line_size();
266 r4k_blast_icache
= (void *)cache_noop
;
267 else if (ic_lsize
== 16)
268 r4k_blast_icache
= blast_icache16
;
269 else if (ic_lsize
== 32) {
270 if (R4600_V1_INDEX_ICACHEOP_WAR
&& cpu_is_r4600_v1_x())
271 r4k_blast_icache
= blast_r4600_v1_icache32
;
272 else if (TX49XX_ICACHE_INDEX_INV_WAR
)
273 r4k_blast_icache
= tx49_blast_icache32
;
275 r4k_blast_icache
= blast_icache32
;
276 } else if (ic_lsize
== 64)
277 r4k_blast_icache
= blast_icache64
;
280 static void (* r4k_blast_scache_page
)(unsigned long addr
);
282 static void __cpuinit
r4k_blast_scache_page_setup(void)
284 unsigned long sc_lsize
= cpu_scache_line_size();
286 if (scache_size
== 0)
287 r4k_blast_scache_page
= (void *)cache_noop
;
288 else if (sc_lsize
== 16)
289 r4k_blast_scache_page
= blast_scache16_page
;
290 else if (sc_lsize
== 32)
291 r4k_blast_scache_page
= blast_scache32_page
;
292 else if (sc_lsize
== 64)
293 r4k_blast_scache_page
= blast_scache64_page
;
294 else if (sc_lsize
== 128)
295 r4k_blast_scache_page
= blast_scache128_page
;
298 static void (* r4k_blast_scache_page_indexed
)(unsigned long addr
);
300 static void __cpuinit
r4k_blast_scache_page_indexed_setup(void)
302 unsigned long sc_lsize
= cpu_scache_line_size();
304 if (scache_size
== 0)
305 r4k_blast_scache_page_indexed
= (void *)cache_noop
;
306 else if (sc_lsize
== 16)
307 r4k_blast_scache_page_indexed
= blast_scache16_page_indexed
;
308 else if (sc_lsize
== 32)
309 r4k_blast_scache_page_indexed
= blast_scache32_page_indexed
;
310 else if (sc_lsize
== 64)
311 r4k_blast_scache_page_indexed
= blast_scache64_page_indexed
;
312 else if (sc_lsize
== 128)
313 r4k_blast_scache_page_indexed
= blast_scache128_page_indexed
;
316 static void (* r4k_blast_scache
)(void);
318 static void __cpuinit
r4k_blast_scache_setup(void)
320 unsigned long sc_lsize
= cpu_scache_line_size();
322 if (scache_size
== 0)
323 r4k_blast_scache
= (void *)cache_noop
;
324 else if (sc_lsize
== 16)
325 r4k_blast_scache
= blast_scache16
;
326 else if (sc_lsize
== 32)
327 r4k_blast_scache
= blast_scache32
;
328 else if (sc_lsize
== 64)
329 r4k_blast_scache
= blast_scache64
;
330 else if (sc_lsize
== 128)
331 r4k_blast_scache
= blast_scache128
;
335 * This is former mm's flush_cache_all() which really should be
336 * flush_cache_vunmap these days ...
338 static inline void local_r4k_flush_cache_all(void * args
)
343 static void r4k_flush_cache_all(void)
345 if (!cpu_has_dc_aliases
)
348 r4k_on_each_cpu(local_r4k_flush_cache_all
, NULL
, 1, 1);
351 static inline void local_r4k___flush_cache_all(void * args
)
356 switch (current_cpu_data
.cputype
) {
368 void r4k___flush_cache_all(void)
370 r4k_on_each_cpu(local_r4k___flush_cache_all
, NULL
, 1, 1);
373 static inline void local_r4k_flush_cache_range(void * args
)
375 struct vm_area_struct
*vma
= args
;
376 int exec
= vma
->vm_flags
& VM_EXEC
;
378 if (!(cpu_context(smp_processor_id(), vma
->vm_mm
)))
386 static void r4k_flush_cache_range(struct vm_area_struct
*vma
,
387 unsigned long start
, unsigned long end
)
389 int exec
= vma
->vm_flags
& VM_EXEC
;
391 if (cpu_has_dc_aliases
|| (exec
&& !cpu_has_ic_fills_f_dc
))
392 r4k_on_each_cpu(local_r4k_flush_cache_range
, vma
, 1, 1);
395 static inline void local_r4k_flush_cache_mm(void * args
)
397 struct mm_struct
*mm
= args
;
399 if (!cpu_context(smp_processor_id(), mm
))
403 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
404 * only flush the primary caches but R10000 and R12000 behave sane ...
405 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
406 * caches, so we can bail out early.
408 if (current_cpu_data
.cputype
== CPU_R4000SC
||
409 current_cpu_data
.cputype
== CPU_R4000MC
||
410 current_cpu_data
.cputype
== CPU_R4400SC
||
411 current_cpu_data
.cputype
== CPU_R4400MC
) {
419 static void r4k_flush_cache_mm(struct mm_struct
*mm
)
421 if (!cpu_has_dc_aliases
)
424 r4k_on_each_cpu(local_r4k_flush_cache_mm
, mm
, 1, 1);
427 struct flush_cache_page_args
{
428 struct vm_area_struct
*vma
;
433 static inline void local_r4k_flush_cache_page(void *args
)
435 struct flush_cache_page_args
*fcp_args
= args
;
436 struct vm_area_struct
*vma
= fcp_args
->vma
;
437 unsigned long addr
= fcp_args
->addr
;
438 unsigned long paddr
= fcp_args
->pfn
<< PAGE_SHIFT
;
439 int exec
= vma
->vm_flags
& VM_EXEC
;
440 struct mm_struct
*mm
= vma
->vm_mm
;
447 * If ownes no valid ASID yet, cannot possibly have gotten
448 * this page into the cache.
450 if (cpu_context(smp_processor_id(), mm
) == 0)
454 pgdp
= pgd_offset(mm
, addr
);
455 pudp
= pud_offset(pgdp
, addr
);
456 pmdp
= pmd_offset(pudp
, addr
);
457 ptep
= pte_offset(pmdp
, addr
);
460 * If the page isn't marked valid, the page cannot possibly be
463 if (!(pte_val(*ptep
) & _PAGE_PRESENT
))
467 * Doing flushes for another ASID than the current one is
468 * too difficult since stupid R4k caches do a TLB translation
469 * for every cache flush operation. So we do indexed flushes
470 * in that case, which doesn't overly flush the cache too much.
472 if ((mm
== current
->active_mm
) && (pte_val(*ptep
) & _PAGE_VALID
)) {
473 if (cpu_has_dc_aliases
|| (exec
&& !cpu_has_ic_fills_f_dc
)) {
474 r4k_blast_dcache_page(addr
);
475 if (exec
&& !cpu_icache_snoops_remote_store
)
476 r4k_blast_scache_page(addr
);
479 r4k_blast_icache_page(addr
);
485 * Do indexed flush, too much work to get the (possible) TLB refills
488 if (cpu_has_dc_aliases
|| (exec
&& !cpu_has_ic_fills_f_dc
)) {
489 r4k_blast_dcache_page_indexed(cpu_has_pindexed_dcache
?
491 if (exec
&& !cpu_icache_snoops_remote_store
) {
492 r4k_blast_scache_page_indexed(paddr
);
496 if (cpu_has_vtag_icache
&& mm
== current
->active_mm
) {
497 int cpu
= smp_processor_id();
499 if (cpu_context(cpu
, mm
) != 0)
500 drop_mmu_context(mm
, cpu
);
502 r4k_blast_icache_page_indexed(addr
);
506 void r4k_flush_cache_page(struct vm_area_struct
*vma
,
507 unsigned long addr
, unsigned long pfn
)
509 struct flush_cache_page_args args
;
515 r4k_on_each_cpu(local_r4k_flush_cache_page
, &args
, 1, 1);
518 static inline void local_r4k_flush_data_cache_page(void * addr
)
520 r4k_blast_dcache_page((unsigned long) addr
);
523 static void r4k_flush_data_cache_page(unsigned long addr
)
525 r4k_on_each_cpu(local_r4k_flush_data_cache_page
, (void *) addr
, 1, 1);
528 struct flush_icache_range_args
{
533 static inline void local_r4k_flush_icache_range(void *args
)
535 struct flush_icache_range_args
*fir_args
= args
;
536 unsigned long start
= fir_args
->start
;
537 unsigned long end
= fir_args
->end
;
539 if (!cpu_has_ic_fills_f_dc
) {
540 if (end
- start
>= dcache_size
) {
543 R4600_HIT_CACHEOP_WAR_IMPL
;
544 protected_blast_dcache_range(start
, end
);
547 if (!cpu_icache_snoops_remote_store
&& scache_size
) {
548 if (end
- start
> scache_size
)
551 protected_blast_scache_range(start
, end
);
555 if (end
- start
> icache_size
)
558 protected_blast_icache_range(start
, end
);
561 static void r4k_flush_icache_range(unsigned long start
, unsigned long end
)
563 struct flush_icache_range_args args
;
568 r4k_on_each_cpu(local_r4k_flush_icache_range
, &args
, 1, 1);
569 instruction_hazard();
572 #ifdef CONFIG_DMA_NONCOHERENT
574 static void BCMFASTPATH
r4k_dma_cache_wback_inv(unsigned long addr
, unsigned long size
)
576 /* Catch bad driver code */
579 if (cpu_has_inclusive_pcaches
) {
580 printk("r4k_dma_cache_wback_inv: cpu_has_inclusive_pcaches set!!!!\n");
581 if (size
>= scache_size
)
584 blast_scache_range(addr
, addr
+ size
);
589 * Either no secondary cache or the available caches don't have the
590 * subset property so we have to flush the primary caches
593 if (size
>= dcache_size
) {
596 R4600_HIT_CACHEOP_WAR_IMPL
;
597 blast_dcache_range(addr
, addr
+ size
);
600 bc_wback_inv(addr
, size
);
603 static void BCMFASTPATH
r4k_dma_cache_inv(unsigned long addr
, unsigned long size
)
605 /* Catch bad driver code */
608 if (cpu_has_inclusive_pcaches
) {
609 printk("r4k_dma_cache_wback_inv: cpu_has_inclusive_pcaches set!!!!\n");
610 if (size
>= scache_size
)
613 blast_scache_range(addr
, addr
+ size
);
617 if (size
>= dcache_size
) {
620 R4600_HIT_CACHEOP_WAR_IMPL
;
621 blast_dcache_range(addr
, addr
+ size
);
626 #endif /* CONFIG_DMA_NONCOHERENT */
629 * While we're protected against bad userland addresses we don't care
630 * very much about what happens in that case. Usually a segmentation
631 * fault will dump the process later on anyway ...
633 static void local_r4k_flush_cache_sigtramp(void * arg
)
635 unsigned long ic_lsize
= cpu_icache_line_size();
636 unsigned long dc_lsize
= cpu_dcache_line_size();
637 unsigned long sc_lsize
= cpu_scache_line_size();
638 unsigned long addr
= (unsigned long) arg
;
640 R4600_HIT_CACHEOP_WAR_IMPL
;
641 BCM4710_PROTECTED_FILL_TLB(addr
);
642 BCM4710_PROTECTED_FILL_TLB(addr
+ 4);
644 protected_writeback_dcache_line(addr
& ~(dc_lsize
- 1));
645 if (!cpu_icache_snoops_remote_store
&& scache_size
)
646 protected_writeback_scache_line(addr
& ~(sc_lsize
- 1));
648 protected_flush_icache_line(addr
& ~(ic_lsize
- 1));
649 if (MIPS4K_ICACHE_REFILL_WAR
) {
650 __asm__
__volatile__ (
665 : "i" (Hit_Invalidate_I
));
667 if (MIPS_CACHE_SYNC_WAR
)
668 __asm__
__volatile__ ("sync");
671 static void r4k_flush_cache_sigtramp(unsigned long addr
)
673 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp
, (void *) addr
, 1, 1);
676 static void r4k_flush_icache_all(void)
678 if (cpu_has_vtag_icache
)
682 static inline void rm7k_erratum31(void)
684 const unsigned long ic_lsize
= 32;
687 /* RM7000 erratum #31. The icache is screwed at startup. */
691 for (addr
= INDEX_BASE
; addr
<= INDEX_BASE
+ 4096; addr
+= ic_lsize
) {
692 __asm__
__volatile__ (
696 "cache\t%1, 0(%0)\n\t"
697 "cache\t%1, 0x1000(%0)\n\t"
698 "cache\t%1, 0x2000(%0)\n\t"
699 "cache\t%1, 0x3000(%0)\n\t"
700 "cache\t%2, 0(%0)\n\t"
701 "cache\t%2, 0x1000(%0)\n\t"
702 "cache\t%2, 0x2000(%0)\n\t"
703 "cache\t%2, 0x3000(%0)\n\t"
704 "cache\t%1, 0(%0)\n\t"
705 "cache\t%1, 0x1000(%0)\n\t"
706 "cache\t%1, 0x2000(%0)\n\t"
707 "cache\t%1, 0x3000(%0)\n\t"
710 : "r" (addr
), "i" (Index_Store_Tag_I
), "i" (Fill
));
714 static char *way_string
[] __cpuinitdata
= { NULL
, "direct mapped", "2-way",
715 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
718 static void __cpuinit
probe_pcache(void)
720 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
721 unsigned int config
= read_c0_config();
722 unsigned int prid
= read_c0_prid();
723 unsigned long config1
;
726 switch (c
->cputype
) {
727 case CPU_R4600
: /* QED style two way caches? */
731 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
732 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
734 c
->icache
.waybit
= __ffs(icache_size
/2);
736 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
737 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
739 c
->dcache
.waybit
= __ffs(dcache_size
/2);
741 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
746 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
747 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
751 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
752 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
754 c
->dcache
.waybit
= 0;
756 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
760 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
761 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
765 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
766 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
768 c
->dcache
.waybit
= 0;
770 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
771 c
->options
|= MIPS_CPU_PREFETCH
;
781 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
782 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
784 c
->icache
.waybit
= 0; /* doesn't matter */
786 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
787 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
789 c
->dcache
.waybit
= 0; /* does not matter */
791 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
797 icache_size
= 1 << (12 + ((config
& R10K_CONF_IC
) >> 29));
798 c
->icache
.linesz
= 64;
800 c
->icache
.waybit
= 0;
802 dcache_size
= 1 << (12 + ((config
& R10K_CONF_DC
) >> 26));
803 c
->dcache
.linesz
= 32;
805 c
->dcache
.waybit
= 0;
807 c
->options
|= MIPS_CPU_PREFETCH
;
811 write_c0_config(config
& ~VR41_CONF_P4K
);
813 if (c
->processor_id
== 0x0c80U
|| c
->processor_id
== 0x0c81U
||
814 c
->processor_id
== 0x0c82U
) {
815 config
|= 0x00400000U
;
816 if (c
->processor_id
== 0x0c80U
)
817 config
|= VR41_CONF_BP
;
818 write_c0_config(config
);
820 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
822 icache_size
= 1 << (10 + ((config
& CONF_IC
) >> 9));
823 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
825 c
->icache
.waybit
= __ffs(icache_size
/2);
827 dcache_size
= 1 << (10 + ((config
& CONF_DC
) >> 6));
828 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
830 c
->dcache
.waybit
= __ffs(dcache_size
/2);
839 icache_size
= 1 << (10 + ((config
& CONF_IC
) >> 9));
840 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
842 c
->icache
.waybit
= 0; /* doesn't matter */
844 dcache_size
= 1 << (10 + ((config
& CONF_DC
) >> 6));
845 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
847 c
->dcache
.waybit
= 0; /* does not matter */
849 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
856 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
857 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
859 c
->icache
.waybit
= __ffs(icache_size
/ c
->icache
.ways
);
861 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
862 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
864 c
->dcache
.waybit
= __ffs(dcache_size
/ c
->dcache
.ways
);
866 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
867 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
869 c
->options
|= MIPS_CPU_PREFETCH
;
873 if (!(config
& MIPS_CONF_M
))
874 panic("Don't know how to probe P-caches on this cpu.");
877 * So we seem to be a MIPS32 or MIPS64 CPU
878 * So let's probe the I-cache ...
880 config1
= read_c0_config1();
882 if ((lsize
= ((config1
>> 19) & 7)))
883 c
->icache
.linesz
= 2 << lsize
;
885 c
->icache
.linesz
= lsize
;
886 c
->icache
.sets
= 64 << ((config1
>> 22) & 7);
887 c
->icache
.ways
= 1 + ((config1
>> 16) & 7);
889 icache_size
= c
->icache
.sets
*
892 c
->icache
.waybit
= __ffs(icache_size
/c
->icache
.ways
);
894 if (config
& 0x8) /* VI bit */
895 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
898 * Now probe the MIPS32 / MIPS64 data cache.
902 if ((lsize
= ((config1
>> 10) & 7)))
903 c
->dcache
.linesz
= 2 << lsize
;
905 c
->dcache
.linesz
= lsize
;
906 c
->dcache
.sets
= 64 << ((config1
>> 13) & 7);
907 c
->dcache
.ways
= 1 + ((config1
>> 7) & 7);
909 dcache_size
= c
->dcache
.sets
*
912 c
->dcache
.waybit
= __ffs(dcache_size
/c
->dcache
.ways
);
914 c
->options
|= MIPS_CPU_PREFETCH
;
919 * Processor configuration sanity check for the R4000SC erratum
920 * #5. With page sizes larger than 32kB there is no possibility
921 * to get a VCE exception anymore so we don't care about this
922 * misconfiguration. The case is rather theoretical anyway;
923 * presumably no vendor is shipping his hardware in the "bad"
926 if ((prid
& 0xff00) == PRID_IMP_R4000
&& (prid
& 0xff) < 0x40 &&
927 !(config
& CONF_SC
) && c
->icache
.linesz
!= 16 &&
929 panic("Improper R4000SC processor configuration detected");
931 /* compute a couple of other cache variables */
932 c
->icache
.waysize
= icache_size
/ c
->icache
.ways
;
933 c
->dcache
.waysize
= dcache_size
/ c
->dcache
.ways
;
935 c
->icache
.sets
= c
->icache
.linesz
?
936 icache_size
/ (c
->icache
.linesz
* c
->icache
.ways
) : 0;
937 c
->dcache
.sets
= c
->dcache
.linesz
?
938 dcache_size
/ (c
->dcache
.linesz
* c
->dcache
.ways
) : 0;
941 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
942 * 2-way virtually indexed so normally would suffer from aliases. So
943 * normally they'd suffer from aliases but magic in the hardware deals
944 * with that for us so we don't need to take care ourselves.
946 switch (c
->cputype
) {
949 c
->dcache
.flags
|= MIPS_CACHE_PINDEX
;
958 if ((read_c0_config7() & (1 << 16))) {
959 /* effectively physically indexed dcache,
960 thus no virtual aliases. */
961 c
->dcache
.flags
|= MIPS_CACHE_PINDEX
;
965 if (c
->dcache
.waysize
> PAGE_SIZE
)
966 c
->dcache
.flags
|= MIPS_CACHE_ALIASES
;
969 switch (c
->cputype
) {
972 * Some older 20Kc chips doesn't have the 'VI' bit in
973 * the config register.
975 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
983 c
->icache
.flags
|= MIPS_CACHE_IC_F_DC
;
987 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
989 c
->icache
.flags
& MIPS_CACHE_VTAG
?
990 "virtually tagged" : "physically tagged",
991 way_string
[c
->icache
.ways
], c
->icache
.linesz
);
993 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n",
994 dcache_size
>> 10, way_string
[c
->dcache
.ways
], c
->dcache
.linesz
);
997 void __init
r4k_probe_cache(void)
999 unsigned long config1
= read_c0_config1();
1000 unsigned int dcache_size
, lsize
, ways
, sets
;
1002 if ((lsize
= ((config1
>> 10) & 7)))
1005 sets
= 64 << ((config1
>> 13) & 7);
1006 ways
= 1 + ((config1
>> 7) & 7);
1009 shm_align_mask
= max_t( unsigned long,
1013 if (shm_align_mask
!= (PAGE_SIZE
- 1))
1014 shm_align_shift
= ffs((shm_align_mask
+ 1)) - 1;
1016 shm_align_mask
= PAGE_SIZE
-1;
1022 * If you even _breathe_ on this function, look at the gcc output and make sure
1023 * it does not pop things on and off the stack for the cache sizing loop that
1024 * executes in KSEG1 space or else you will crash and burn badly. You have
1027 static int __cpuinit
probe_scache(void)
1029 extern unsigned long stext
;
1030 unsigned long flags
, addr
, begin
, end
, pow2
;
1031 unsigned int config
= read_c0_config();
1032 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1034 if (config
& CONF_SC
)
1037 begin
= (unsigned long) &stext
;
1038 begin
&= ~((4 * 1024 * 1024) - 1);
1039 end
= begin
+ (4 * 1024 * 1024);
1042 * This is such a bitch, you'd think they would make it easy to do
1043 * this. Away you daemons of stupidity!
1045 local_irq_save(flags
);
1047 /* Fill each size-multiple cache line with a valid tag. */
1049 for (addr
= begin
; addr
< end
; addr
= (begin
+ pow2
)) {
1050 unsigned long *p
= (unsigned long *) addr
;
1051 __asm__
__volatile__("nop" : : "r" (*p
)); /* whee... */
1055 /* Load first line with zero (therefore invalid) tag. */
1058 __asm__
__volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1059 cache_op(Index_Store_Tag_I
, begin
);
1060 cache_op(Index_Store_Tag_D
, begin
);
1061 cache_op(Index_Store_Tag_SD
, begin
);
1063 /* Now search for the wrap around point. */
1064 pow2
= (128 * 1024);
1065 for (addr
= begin
+ (128 * 1024); addr
< end
; addr
= begin
+ pow2
) {
1066 cache_op(Index_Load_Tag_SD
, addr
);
1067 __asm__
__volatile__("nop; nop; nop; nop;"); /* hazard... */
1068 if (!read_c0_taglo())
1072 local_irq_restore(flags
);
1076 c
->scache
.linesz
= 16 << ((config
& R4K_CONF_SB
) >> 22);
1078 c
->dcache
.waybit
= 0; /* does not matter */
1083 extern int r5k_sc_init(void);
1084 extern int rm7k_sc_init(void);
1085 extern int mips_sc_init(void);
1087 static void __cpuinit
setup_scache(void)
1089 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1090 unsigned int config
= read_c0_config();
1094 * Do the probing thing on R4000SC and R4400SC processors. Other
1095 * processors don't have a S-cache that would be relevant to the
1096 * Linux memory managment.
1098 switch (c
->cputype
) {
1103 sc_present
= run_uncached(probe_scache
);
1105 c
->options
|= MIPS_CPU_CACHE_CDEX_S
;
1111 scache_size
= 0x80000 << ((config
& R10K_CONF_SS
) >> 16);
1112 c
->scache
.linesz
= 64 << ((config
>> 13) & 1);
1114 c
->scache
.waybit
= 0;
1120 #ifdef CONFIG_R5000_CPU_SCACHE
1127 #ifdef CONFIG_RM7000_CPU_SCACHE
1133 if (c
->isa_level
== MIPS_CPU_ISA_M32R1
||
1134 c
->isa_level
== MIPS_CPU_ISA_M32R2
||
1135 c
->isa_level
== MIPS_CPU_ISA_M64R1
||
1136 c
->isa_level
== MIPS_CPU_ISA_M64R2
) {
1137 #ifdef CONFIG_MIPS_CPU_SCACHE
1138 if (mips_sc_init ()) {
1139 scache_size
= c
->scache
.ways
* c
->scache
.sets
* c
->scache
.linesz
;
1140 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1142 way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1145 if (!(c
->scache
.flags
& MIPS_CACHE_NOT_PRESENT
))
1146 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1156 /* compute a couple of other cache variables */
1157 c
->scache
.waysize
= scache_size
/ c
->scache
.ways
;
1159 c
->scache
.sets
= scache_size
/ (c
->scache
.linesz
* c
->scache
.ways
);
1161 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1162 scache_size
>> 10, way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1164 c
->options
|= MIPS_CPU_INCLUSIVE_CACHES
;
1167 void au1x00_fixup_config_od(void)
1170 * c0_config.od (bit 19) was write only (and read as 0)
1171 * on the early revisions of Alchemy SOCs. It disables the bus
1172 * transaction overlapping and needs to be set to fix various errata.
1174 switch (read_c0_prid()) {
1175 case 0x00030100: /* Au1000 DA */
1176 case 0x00030201: /* Au1000 HA */
1177 case 0x00030202: /* Au1000 HB */
1178 case 0x01030200: /* Au1500 AB */
1180 * Au1100 errata actually keeps silence about this bit, so we set it
1181 * just in case for those revisions that require it to be set according
1182 * to arch/mips/au1000/common/cputable.c
1184 case 0x02030200: /* Au1100 AB */
1185 case 0x02030201: /* Au1100 BA */
1186 case 0x02030202: /* Au1100 BC */
1187 set_c0_config(1 << 19);
1192 #if defined(CONFIG_BCM47XX) || defined(CONFIG_BCM5365)
1193 static void __cpuinit
_change_cachability(u32 cm
)
1195 change_c0_config(CONF_CM_CMASK
, cm
);
1197 if (BCM330X(current_cpu_data
.processor_id
)) {
1198 cm
= read_c0_diag();
1206 static void (*change_cachability
)(u32
);
1208 static void __cpuinit
coherency_setup(void)
1210 change_c0_config(CONF_CM_CMASK
, CONF_CM_DEFAULT
);
1213 * c0_status.cu=0 specifies that updates by the sc instruction use
1214 * the coherency mode specified by the TLB; 1 means cachable
1215 * coherent update on write will be used. Not all processors have
1216 * this bit and; some wire it to zero, others like Toshiba had the
1217 * silly idea of putting something else there ...
1219 switch (current_cpu_data
.cputype
) {
1223 cm
= read_c0_diag();
1237 clear_c0_config(CONF_CU
);
1240 * We need to catch the early Alchemy SOCs with
1241 * the write-only co_config.od bit and set it back to one...
1243 case CPU_AU1000
: /* rev. DA, HA, HB */
1244 case CPU_AU1100
: /* rev. AB, BA, BC ?? */
1245 case CPU_AU1500
: /* rev. AB */
1246 au1x00_fixup_config_od();
1252 void __cpuinit
r4k_cache_init(void)
1254 extern void build_clear_page(void);
1255 extern void build_copy_page(void);
1256 extern char except_vec2_generic
;
1257 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1259 /* Default cache error handler for R4000 and R5000 family */
1260 set_uncached_handler (0x100, &except_vec2_generic
, 0x80);
1262 /* Check if special workarounds are required */
1263 #ifdef CONFIG_BCM47XX
1264 if (current_cpu_data
.cputype
== CPU_BCM4710
&& (current_cpu_data
.processor_id
& 0xff) == 0) {
1265 printk("Enabling BCM4710A0 cache workarounds.\n");
1274 r4k_blast_dcache_page_setup();
1275 r4k_blast_dcache_page_indexed_setup();
1276 r4k_blast_dcache_setup();
1277 r4k_blast_icache_page_setup();
1278 r4k_blast_icache_page_indexed_setup();
1279 r4k_blast_icache_setup();
1280 r4k_blast_scache_page_setup();
1281 r4k_blast_scache_page_indexed_setup();
1282 r4k_blast_scache_setup();
1285 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1286 * This code supports virtually indexed processors and will be
1287 * unnecessarily inefficient on physically indexed processors.
1290 flush_cache_all
= r4k_flush_cache_all
;
1291 __flush_cache_all
= r4k___flush_cache_all
;
1292 flush_cache_mm
= r4k_flush_cache_mm
;
1293 flush_cache_page
= r4k_flush_cache_page
;
1294 flush_cache_range
= r4k_flush_cache_range
;
1296 flush_cache_sigtramp
= r4k_flush_cache_sigtramp
;
1297 flush_icache_all
= r4k_flush_icache_all
;
1298 local_flush_data_cache_page
= local_r4k_flush_data_cache_page
;
1299 flush_data_cache_page
= r4k_flush_data_cache_page
;
1300 flush_icache_range
= r4k_flush_icache_range
;
1302 #ifdef CONFIG_DMA_NONCOHERENT
1303 _dma_cache_wback_inv
= r4k_dma_cache_wback_inv
;
1304 _dma_cache_wback
= r4k_dma_cache_wback_inv
;
1305 _dma_cache_inv
= r4k_dma_cache_inv
;
1310 local_r4k___flush_cache_all(NULL
);
1312 #if defined(CONFIG_BCM47XX) || defined(CONFIG_BCM5365)
1313 change_cachability
= (void (*)(u32
)) KSEG1ADDR((unsigned long)(_change_cachability
));
1314 _change_cachability(CONF_CM_DEFAULT
);
1320 /* fuse package DCACHE BUG patch exports */
1321 void (*fuse_flush_cache_all
)(void) = r4k___flush_cache_all
;
1322 void (*fuse_flush_cache_page
)(struct vm_area_struct
*vma
, unsigned long page
,
1323 unsigned long pfn
) = r4k_flush_cache_page
;
1324 EXPORT_SYMBOL(fuse_flush_cache_page
);
1325 EXPORT_SYMBOL(fuse_flush_cache_all
);