2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 #include <linux/hardirq.h>
11 #include <linux/init.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
15 #include <linux/sched.h>
17 #include <linux/module.h>
18 #include <linux/bitops.h>
20 #include <asm/bcache.h>
21 #include <asm/bootinfo.h>
22 #include <asm/cache.h>
23 #include <asm/cacheops.h>
25 #include <asm/cpu-features.h>
28 #include <asm/pgtable.h>
29 #include <asm/r4kcache.h>
30 #include <asm/sections.h>
31 #include <asm/system.h>
32 #include <asm/mmu_context.h>
34 #include <asm/cacheflush.h> /* for run_uncached() */
38 * Special Variant of smp_call_function for use by cache functions:
41 * o collapses to normal function call on UP kernels
42 * o collapses to normal function call on systems with a single shared
45 static inline void r4k_on_each_cpu(void (*func
) (void *info
), void *info
,
50 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
51 smp_call_function(func
, info
, wait
);
57 #if defined(CONFIG_MIPS_CMP)
58 #define cpu_has_safe_index_cacheops 0
60 #define cpu_has_safe_index_cacheops 1
66 static unsigned long icache_size __read_mostly
;
67 static unsigned long dcache_size __read_mostly
;
68 static unsigned long scache_size __read_mostly
;
71 * Dummy cache handling routines for machines without boardcaches
73 static void cache_noop(void) {}
75 static struct bcache_ops no_sc_ops
= {
76 .bc_enable
= (void *)cache_noop
,
77 .bc_disable
= (void *)cache_noop
,
78 .bc_wback_inv
= (void *)cache_noop
,
79 .bc_inv
= (void *)cache_noop
82 struct bcache_ops
*bcops
= &no_sc_ops
;
84 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
85 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
87 #define R4600_HIT_CACHEOP_WAR_IMPL \
89 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
90 *(volatile unsigned long *)CKSEG1; \
91 if (R4600_V1_HIT_CACHEOP_WAR) \
92 __asm__ __volatile__("nop;nop;nop;nop"); \
95 static void (*r4k_blast_dcache_page
)(unsigned long addr
);
97 static inline void r4k_blast_dcache_page_dc32(unsigned long addr
)
99 R4600_HIT_CACHEOP_WAR_IMPL
;
100 blast_dcache32_page(addr
);
103 static inline void r4k_blast_dcache_page_dc64(unsigned long addr
)
105 R4600_HIT_CACHEOP_WAR_IMPL
;
106 blast_dcache64_page(addr
);
109 static void __cpuinit
r4k_blast_dcache_page_setup(void)
111 unsigned long dc_lsize
= cpu_dcache_line_size();
114 r4k_blast_dcache_page
= (void *)cache_noop
;
115 else if (dc_lsize
== 16)
116 r4k_blast_dcache_page
= blast_dcache16_page
;
117 else if (dc_lsize
== 32)
118 r4k_blast_dcache_page
= r4k_blast_dcache_page_dc32
;
119 else if (dc_lsize
== 64)
120 r4k_blast_dcache_page
= r4k_blast_dcache_page_dc64
;
123 static void (* r4k_blast_dcache_page_indexed
)(unsigned long addr
);
125 static void __cpuinit
r4k_blast_dcache_page_indexed_setup(void)
127 unsigned long dc_lsize
= cpu_dcache_line_size();
130 r4k_blast_dcache_page_indexed
= (void *)cache_noop
;
131 else if (dc_lsize
== 16)
132 r4k_blast_dcache_page_indexed
= blast_dcache16_page_indexed
;
133 else if (dc_lsize
== 32)
134 r4k_blast_dcache_page_indexed
= blast_dcache32_page_indexed
;
135 else if (dc_lsize
== 64)
136 r4k_blast_dcache_page_indexed
= blast_dcache64_page_indexed
;
139 static void (* r4k_blast_dcache
)(void);
141 static void __cpuinit
r4k_blast_dcache_setup(void)
143 unsigned long dc_lsize
= cpu_dcache_line_size();
146 r4k_blast_dcache
= (void *)cache_noop
;
147 else if (dc_lsize
== 16)
148 r4k_blast_dcache
= blast_dcache16
;
149 else if (dc_lsize
== 32)
150 r4k_blast_dcache
= blast_dcache32
;
151 else if (dc_lsize
== 64)
152 r4k_blast_dcache
= blast_dcache64
;
155 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
156 #define JUMP_TO_ALIGN(order) \
157 __asm__ __volatile__( \
159 ".align\t" #order "\n\t" \
162 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
163 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
165 static inline void blast_r4600_v1_icache32(void)
169 local_irq_save(flags
);
171 local_irq_restore(flags
);
174 static inline void tx49_blast_icache32(void)
176 unsigned long start
= INDEX_BASE
;
177 unsigned long end
= start
+ current_cpu_data
.icache
.waysize
;
178 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
179 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
180 current_cpu_data
.icache
.waybit
;
181 unsigned long ws
, addr
;
183 CACHE32_UNROLL32_ALIGN2
;
184 /* I'm in even chunk. blast odd chunks */
185 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
186 for (addr
= start
+ 0x400; addr
< end
; addr
+= 0x400 * 2)
187 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
188 CACHE32_UNROLL32_ALIGN
;
189 /* I'm in odd chunk. blast even chunks */
190 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
191 for (addr
= start
; addr
< end
; addr
+= 0x400 * 2)
192 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
195 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page
)
199 local_irq_save(flags
);
200 blast_icache32_page_indexed(page
);
201 local_irq_restore(flags
);
204 static inline void tx49_blast_icache32_page_indexed(unsigned long page
)
206 unsigned long indexmask
= current_cpu_data
.icache
.waysize
- 1;
207 unsigned long start
= INDEX_BASE
+ (page
& indexmask
);
208 unsigned long end
= start
+ PAGE_SIZE
;
209 unsigned long ws_inc
= 1UL << current_cpu_data
.icache
.waybit
;
210 unsigned long ws_end
= current_cpu_data
.icache
.ways
<<
211 current_cpu_data
.icache
.waybit
;
212 unsigned long ws
, addr
;
214 CACHE32_UNROLL32_ALIGN2
;
215 /* I'm in even chunk. blast odd chunks */
216 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
217 for (addr
= start
+ 0x400; addr
< end
; addr
+= 0x400 * 2)
218 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
219 CACHE32_UNROLL32_ALIGN
;
220 /* I'm in odd chunk. blast even chunks */
221 for (ws
= 0; ws
< ws_end
; ws
+= ws_inc
)
222 for (addr
= start
; addr
< end
; addr
+= 0x400 * 2)
223 cache32_unroll32(addr
|ws
, Index_Invalidate_I
);
226 static void (* r4k_blast_icache_page
)(unsigned long addr
);
228 static void __cpuinit
r4k_blast_icache_page_setup(void)
230 unsigned long ic_lsize
= cpu_icache_line_size();
233 r4k_blast_icache_page
= (void *)cache_noop
;
234 else if (ic_lsize
== 16)
235 r4k_blast_icache_page
= blast_icache16_page
;
236 else if (ic_lsize
== 32)
237 r4k_blast_icache_page
= blast_icache32_page
;
238 else if (ic_lsize
== 64)
239 r4k_blast_icache_page
= blast_icache64_page
;
243 static void (* r4k_blast_icache_page_indexed
)(unsigned long addr
);
245 static void __cpuinit
r4k_blast_icache_page_indexed_setup(void)
247 unsigned long ic_lsize
= cpu_icache_line_size();
250 r4k_blast_icache_page_indexed
= (void *)cache_noop
;
251 else if (ic_lsize
== 16)
252 r4k_blast_icache_page_indexed
= blast_icache16_page_indexed
;
253 else if (ic_lsize
== 32) {
254 if (R4600_V1_INDEX_ICACHEOP_WAR
&& cpu_is_r4600_v1_x())
255 r4k_blast_icache_page_indexed
=
256 blast_icache32_r4600_v1_page_indexed
;
257 else if (TX49XX_ICACHE_INDEX_INV_WAR
)
258 r4k_blast_icache_page_indexed
=
259 tx49_blast_icache32_page_indexed
;
261 r4k_blast_icache_page_indexed
=
262 blast_icache32_page_indexed
;
263 } else if (ic_lsize
== 64)
264 r4k_blast_icache_page_indexed
= blast_icache64_page_indexed
;
267 static void (* r4k_blast_icache
)(void);
269 static void __cpuinit
r4k_blast_icache_setup(void)
271 unsigned long ic_lsize
= cpu_icache_line_size();
274 r4k_blast_icache
= (void *)cache_noop
;
275 else if (ic_lsize
== 16)
276 r4k_blast_icache
= blast_icache16
;
277 else if (ic_lsize
== 32) {
278 if (R4600_V1_INDEX_ICACHEOP_WAR
&& cpu_is_r4600_v1_x())
279 r4k_blast_icache
= blast_r4600_v1_icache32
;
280 else if (TX49XX_ICACHE_INDEX_INV_WAR
)
281 r4k_blast_icache
= tx49_blast_icache32
;
283 r4k_blast_icache
= blast_icache32
;
284 } else if (ic_lsize
== 64)
285 r4k_blast_icache
= blast_icache64
;
288 static void (* r4k_blast_scache_page
)(unsigned long addr
);
290 static void __cpuinit
r4k_blast_scache_page_setup(void)
292 unsigned long sc_lsize
= cpu_scache_line_size();
294 if (scache_size
== 0)
295 r4k_blast_scache_page
= (void *)cache_noop
;
296 else if (sc_lsize
== 16)
297 r4k_blast_scache_page
= blast_scache16_page
;
298 else if (sc_lsize
== 32)
299 r4k_blast_scache_page
= blast_scache32_page
;
300 else if (sc_lsize
== 64)
301 r4k_blast_scache_page
= blast_scache64_page
;
302 else if (sc_lsize
== 128)
303 r4k_blast_scache_page
= blast_scache128_page
;
306 static void (* r4k_blast_scache_page_indexed
)(unsigned long addr
);
308 static void __cpuinit
r4k_blast_scache_page_indexed_setup(void)
310 unsigned long sc_lsize
= cpu_scache_line_size();
312 if (scache_size
== 0)
313 r4k_blast_scache_page_indexed
= (void *)cache_noop
;
314 else if (sc_lsize
== 16)
315 r4k_blast_scache_page_indexed
= blast_scache16_page_indexed
;
316 else if (sc_lsize
== 32)
317 r4k_blast_scache_page_indexed
= blast_scache32_page_indexed
;
318 else if (sc_lsize
== 64)
319 r4k_blast_scache_page_indexed
= blast_scache64_page_indexed
;
320 else if (sc_lsize
== 128)
321 r4k_blast_scache_page_indexed
= blast_scache128_page_indexed
;
324 static void (* r4k_blast_scache
)(void);
326 static void __cpuinit
r4k_blast_scache_setup(void)
328 unsigned long sc_lsize
= cpu_scache_line_size();
330 if (scache_size
== 0)
331 r4k_blast_scache
= (void *)cache_noop
;
332 else if (sc_lsize
== 16)
333 r4k_blast_scache
= blast_scache16
;
334 else if (sc_lsize
== 32)
335 r4k_blast_scache
= blast_scache32
;
336 else if (sc_lsize
== 64)
337 r4k_blast_scache
= blast_scache64
;
338 else if (sc_lsize
== 128)
339 r4k_blast_scache
= blast_scache128
;
342 static inline void local_r4k___flush_cache_all(void * args
)
344 #if defined(CONFIG_CPU_LOONGSON2)
351 switch (current_cpu_type()) {
363 static void r4k___flush_cache_all(void)
365 r4k_on_each_cpu(local_r4k___flush_cache_all
, NULL
, 1);
368 static inline int has_valid_asid(const struct mm_struct
*mm
)
370 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
373 for_each_online_cpu(i
)
374 if (cpu_context(i
, mm
))
379 return cpu_context(smp_processor_id(), mm
);
383 static void r4k__flush_cache_vmap(void)
388 static void r4k__flush_cache_vunmap(void)
393 static inline void local_r4k_flush_cache_range(void * args
)
395 struct vm_area_struct
*vma
= args
;
396 int exec
= vma
->vm_flags
& VM_EXEC
;
398 if (!(has_valid_asid(vma
->vm_mm
)))
406 static void r4k_flush_cache_range(struct vm_area_struct
*vma
,
407 unsigned long start
, unsigned long end
)
409 int exec
= vma
->vm_flags
& VM_EXEC
;
411 if (cpu_has_dc_aliases
|| (exec
&& !cpu_has_ic_fills_f_dc
))
412 r4k_on_each_cpu(local_r4k_flush_cache_range
, vma
, 1);
415 static inline void local_r4k_flush_cache_mm(void * args
)
417 struct mm_struct
*mm
= args
;
419 if (!has_valid_asid(mm
))
423 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
424 * only flush the primary caches but R10000 and R12000 behave sane ...
425 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
426 * caches, so we can bail out early.
428 if (current_cpu_type() == CPU_R4000SC
||
429 current_cpu_type() == CPU_R4000MC
||
430 current_cpu_type() == CPU_R4400SC
||
431 current_cpu_type() == CPU_R4400MC
) {
439 static void r4k_flush_cache_mm(struct mm_struct
*mm
)
441 if (!cpu_has_dc_aliases
)
444 r4k_on_each_cpu(local_r4k_flush_cache_mm
, mm
, 1);
447 struct flush_cache_page_args
{
448 struct vm_area_struct
*vma
;
453 static inline void local_r4k_flush_cache_page(void *args
)
455 struct flush_cache_page_args
*fcp_args
= args
;
456 struct vm_area_struct
*vma
= fcp_args
->vma
;
457 unsigned long addr
= fcp_args
->addr
;
458 struct page
*page
= pfn_to_page(fcp_args
->pfn
);
459 int exec
= vma
->vm_flags
& VM_EXEC
;
460 struct mm_struct
*mm
= vma
->vm_mm
;
461 int map_coherent
= 0;
469 * If ownes no valid ASID yet, cannot possibly have gotten
470 * this page into the cache.
472 if (!has_valid_asid(mm
))
476 pgdp
= pgd_offset(mm
, addr
);
477 pudp
= pud_offset(pgdp
, addr
);
478 pmdp
= pmd_offset(pudp
, addr
);
479 ptep
= pte_offset(pmdp
, addr
);
482 * If the page isn't marked valid, the page cannot possibly be
485 if (!(pte_present(*ptep
)))
488 if ((mm
== current
->active_mm
) && (pte_val(*ptep
) & _PAGE_VALID
))
492 * Use kmap_coherent or kmap_atomic to do flushes for
493 * another ASID than the current one.
495 map_coherent
= (cpu_has_dc_aliases
&&
496 page_mapped(page
) && !Page_dcache_dirty(page
));
498 vaddr
= kmap_coherent(page
, addr
);
500 vaddr
= kmap_atomic(page
, KM_USER0
);
501 addr
= (unsigned long)vaddr
;
504 if (cpu_has_dc_aliases
|| (exec
&& !cpu_has_ic_fills_f_dc
)) {
505 r4k_blast_dcache_page(addr
);
506 if (exec
&& !cpu_icache_snoops_remote_store
)
507 r4k_blast_scache_page(addr
);
510 if (vaddr
&& cpu_has_vtag_icache
&& mm
== current
->active_mm
) {
511 int cpu
= smp_processor_id();
513 if (cpu_context(cpu
, mm
) != 0)
514 drop_mmu_context(mm
, cpu
);
516 r4k_blast_icache_page(addr
);
523 kunmap_atomic(vaddr
, KM_USER0
);
527 static void r4k_flush_cache_page(struct vm_area_struct
*vma
,
528 unsigned long addr
, unsigned long pfn
)
530 struct flush_cache_page_args args
;
536 r4k_on_each_cpu(local_r4k_flush_cache_page
, &args
, 1);
539 static inline void local_r4k_flush_data_cache_page(void * addr
)
541 r4k_blast_dcache_page((unsigned long) addr
);
544 static void r4k_flush_data_cache_page(unsigned long addr
)
547 local_r4k_flush_data_cache_page((void *)addr
);
549 r4k_on_each_cpu(local_r4k_flush_data_cache_page
, (void *) addr
,
553 struct flush_icache_range_args
{
558 static inline void local_r4k_flush_icache_range(unsigned long start
, unsigned long end
)
560 if (!cpu_has_ic_fills_f_dc
) {
561 if (end
- start
>= dcache_size
) {
564 R4600_HIT_CACHEOP_WAR_IMPL
;
565 protected_blast_dcache_range(start
, end
);
569 if (end
- start
> icache_size
)
572 protected_blast_icache_range(start
, end
);
575 static inline void local_r4k_flush_icache_range_ipi(void *args
)
577 struct flush_icache_range_args
*fir_args
= args
;
578 unsigned long start
= fir_args
->start
;
579 unsigned long end
= fir_args
->end
;
581 local_r4k_flush_icache_range(start
, end
);
584 static void r4k_flush_icache_range(unsigned long start
, unsigned long end
)
586 struct flush_icache_range_args args
;
591 r4k_on_each_cpu(local_r4k_flush_icache_range_ipi
, &args
, 1);
592 instruction_hazard();
595 #ifdef CONFIG_DMA_NONCOHERENT
597 static void r4k_dma_cache_wback_inv(unsigned long addr
, unsigned long size
)
599 /* Catch bad driver code */
602 if (cpu_has_inclusive_pcaches
) {
603 if (size
>= scache_size
)
606 blast_scache_range(addr
, addr
+ size
);
611 * Either no secondary cache or the available caches don't have the
612 * subset property so we have to flush the primary caches
615 if (cpu_has_safe_index_cacheops
&& size
>= dcache_size
) {
618 R4600_HIT_CACHEOP_WAR_IMPL
;
619 blast_dcache_range(addr
, addr
+ size
);
622 bc_wback_inv(addr
, size
);
625 static void r4k_dma_cache_inv(unsigned long addr
, unsigned long size
)
627 /* Catch bad driver code */
630 if (cpu_has_inclusive_pcaches
) {
631 if (size
>= scache_size
)
634 unsigned long lsize
= cpu_scache_line_size();
635 unsigned long almask
= ~(lsize
- 1);
638 * There is no clearly documented alignment requirement
639 * for the cache instruction on MIPS processors and
640 * some processors, among them the RM5200 and RM7000
641 * QED processors will throw an address error for cache
642 * hit ops with insufficient alignment. Solved by
643 * aligning the address to cache line size.
645 cache_op(Hit_Writeback_Inv_SD
, addr
& almask
);
646 cache_op(Hit_Writeback_Inv_SD
,
647 (addr
+ size
- 1) & almask
);
648 blast_inv_scache_range(addr
, addr
+ size
);
653 if (cpu_has_safe_index_cacheops
&& size
>= dcache_size
) {
656 unsigned long lsize
= cpu_dcache_line_size();
657 unsigned long almask
= ~(lsize
- 1);
659 R4600_HIT_CACHEOP_WAR_IMPL
;
660 cache_op(Hit_Writeback_Inv_D
, addr
& almask
);
661 cache_op(Hit_Writeback_Inv_D
, (addr
+ size
- 1) & almask
);
662 blast_inv_dcache_range(addr
, addr
+ size
);
667 #endif /* CONFIG_DMA_NONCOHERENT */
670 * While we're protected against bad userland addresses we don't care
671 * very much about what happens in that case. Usually a segmentation
672 * fault will dump the process later on anyway ...
674 static void local_r4k_flush_cache_sigtramp(void * arg
)
676 unsigned long ic_lsize
= cpu_icache_line_size();
677 unsigned long dc_lsize
= cpu_dcache_line_size();
678 unsigned long sc_lsize
= cpu_scache_line_size();
679 unsigned long addr
= (unsigned long) arg
;
681 R4600_HIT_CACHEOP_WAR_IMPL
;
683 protected_writeback_dcache_line(addr
& ~(dc_lsize
- 1));
684 if (!cpu_icache_snoops_remote_store
&& scache_size
)
685 protected_writeback_scache_line(addr
& ~(sc_lsize
- 1));
687 protected_flush_icache_line(addr
& ~(ic_lsize
- 1));
688 if (MIPS4K_ICACHE_REFILL_WAR
) {
689 __asm__
__volatile__ (
704 : "i" (Hit_Invalidate_I
));
706 if (MIPS_CACHE_SYNC_WAR
)
707 __asm__
__volatile__ ("sync");
710 static void r4k_flush_cache_sigtramp(unsigned long addr
)
712 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp
, (void *) addr
, 1);
715 static void r4k_flush_icache_all(void)
717 if (cpu_has_vtag_icache
)
721 static inline void rm7k_erratum31(void)
723 const unsigned long ic_lsize
= 32;
726 /* RM7000 erratum #31. The icache is screwed at startup. */
730 for (addr
= INDEX_BASE
; addr
<= INDEX_BASE
+ 4096; addr
+= ic_lsize
) {
731 __asm__
__volatile__ (
735 "cache\t%1, 0(%0)\n\t"
736 "cache\t%1, 0x1000(%0)\n\t"
737 "cache\t%1, 0x2000(%0)\n\t"
738 "cache\t%1, 0x3000(%0)\n\t"
739 "cache\t%2, 0(%0)\n\t"
740 "cache\t%2, 0x1000(%0)\n\t"
741 "cache\t%2, 0x2000(%0)\n\t"
742 "cache\t%2, 0x3000(%0)\n\t"
743 "cache\t%1, 0(%0)\n\t"
744 "cache\t%1, 0x1000(%0)\n\t"
745 "cache\t%1, 0x2000(%0)\n\t"
746 "cache\t%1, 0x3000(%0)\n\t"
749 : "r" (addr
), "i" (Index_Store_Tag_I
), "i" (Fill
));
753 static char *way_string
[] __cpuinitdata
= { NULL
, "direct mapped", "2-way",
754 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
757 static void __cpuinit
probe_pcache(void)
759 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
760 unsigned int config
= read_c0_config();
761 unsigned int prid
= read_c0_prid();
762 unsigned long config1
;
765 switch (c
->cputype
) {
766 case CPU_R4600
: /* QED style two way caches? */
770 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
771 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
773 c
->icache
.waybit
= __ffs(icache_size
/2);
775 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
776 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
778 c
->dcache
.waybit
= __ffs(dcache_size
/2);
780 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
785 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
786 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
790 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
791 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
793 c
->dcache
.waybit
= 0;
795 c
->options
|= MIPS_CPU_CACHE_CDEX_P
| MIPS_CPU_PREFETCH
;
799 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
800 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
804 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
805 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
807 c
->dcache
.waybit
= 0;
809 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
810 c
->options
|= MIPS_CPU_PREFETCH
;
820 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
821 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
823 c
->icache
.waybit
= 0; /* doesn't matter */
825 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
826 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
828 c
->dcache
.waybit
= 0; /* does not matter */
830 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
836 icache_size
= 1 << (12 + ((config
& R10K_CONF_IC
) >> 29));
837 c
->icache
.linesz
= 64;
839 c
->icache
.waybit
= 0;
841 dcache_size
= 1 << (12 + ((config
& R10K_CONF_DC
) >> 26));
842 c
->dcache
.linesz
= 32;
844 c
->dcache
.waybit
= 0;
846 c
->options
|= MIPS_CPU_PREFETCH
;
850 write_c0_config(config
& ~VR41_CONF_P4K
);
852 /* Workaround for cache instruction bug of VR4131 */
853 if (c
->processor_id
== 0x0c80U
|| c
->processor_id
== 0x0c81U
||
854 c
->processor_id
== 0x0c82U
) {
855 config
|= 0x00400000U
;
856 if (c
->processor_id
== 0x0c80U
)
857 config
|= VR41_CONF_BP
;
858 write_c0_config(config
);
860 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
862 icache_size
= 1 << (10 + ((config
& CONF_IC
) >> 9));
863 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
865 c
->icache
.waybit
= __ffs(icache_size
/2);
867 dcache_size
= 1 << (10 + ((config
& CONF_DC
) >> 6));
868 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
870 c
->dcache
.waybit
= __ffs(dcache_size
/2);
879 icache_size
= 1 << (10 + ((config
& CONF_IC
) >> 9));
880 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
882 c
->icache
.waybit
= 0; /* doesn't matter */
884 dcache_size
= 1 << (10 + ((config
& CONF_DC
) >> 6));
885 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
887 c
->dcache
.waybit
= 0; /* does not matter */
889 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
896 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
897 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
899 c
->icache
.waybit
= __ffs(icache_size
/ c
->icache
.ways
);
901 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
902 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
904 c
->dcache
.waybit
= __ffs(dcache_size
/ c
->dcache
.ways
);
906 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
907 c
->options
|= MIPS_CPU_CACHE_CDEX_P
;
909 c
->options
|= MIPS_CPU_PREFETCH
;
913 icache_size
= 1 << (12 + ((config
& CONF_IC
) >> 9));
914 c
->icache
.linesz
= 16 << ((config
& CONF_IB
) >> 5);
919 c
->icache
.waybit
= 0;
921 dcache_size
= 1 << (12 + ((config
& CONF_DC
) >> 6));
922 c
->dcache
.linesz
= 16 << ((config
& CONF_DB
) >> 4);
927 c
->dcache
.waybit
= 0;
931 if (!(config
& MIPS_CONF_M
))
932 panic("Don't know how to probe P-caches on this cpu.");
935 * So we seem to be a MIPS32 or MIPS64 CPU
936 * So let's probe the I-cache ...
938 config1
= read_c0_config1();
940 if ((lsize
= ((config1
>> 19) & 7)))
941 c
->icache
.linesz
= 2 << lsize
;
943 c
->icache
.linesz
= lsize
;
944 c
->icache
.sets
= 64 << ((config1
>> 22) & 7);
945 c
->icache
.ways
= 1 + ((config1
>> 16) & 7);
947 icache_size
= c
->icache
.sets
*
950 c
->icache
.waybit
= __ffs(icache_size
/c
->icache
.ways
);
952 if (config
& 0x8) /* VI bit */
953 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
956 * Now probe the MIPS32 / MIPS64 data cache.
960 if ((lsize
= ((config1
>> 10) & 7)))
961 c
->dcache
.linesz
= 2 << lsize
;
963 c
->dcache
.linesz
= lsize
;
964 c
->dcache
.sets
= 64 << ((config1
>> 13) & 7);
965 c
->dcache
.ways
= 1 + ((config1
>> 7) & 7);
967 dcache_size
= c
->dcache
.sets
*
970 c
->dcache
.waybit
= __ffs(dcache_size
/c
->dcache
.ways
);
972 c
->options
|= MIPS_CPU_PREFETCH
;
977 * Processor configuration sanity check for the R4000SC erratum
978 * #5. With page sizes larger than 32kB there is no possibility
979 * to get a VCE exception anymore so we don't care about this
980 * misconfiguration. The case is rather theoretical anyway;
981 * presumably no vendor is shipping his hardware in the "bad"
984 if ((prid
& 0xff00) == PRID_IMP_R4000
&& (prid
& 0xff) < 0x40 &&
985 !(config
& CONF_SC
) && c
->icache
.linesz
!= 16 &&
987 panic("Improper R4000SC processor configuration detected");
989 /* compute a couple of other cache variables */
990 c
->icache
.waysize
= icache_size
/ c
->icache
.ways
;
991 c
->dcache
.waysize
= dcache_size
/ c
->dcache
.ways
;
993 c
->icache
.sets
= c
->icache
.linesz
?
994 icache_size
/ (c
->icache
.linesz
* c
->icache
.ways
) : 0;
995 c
->dcache
.sets
= c
->dcache
.linesz
?
996 dcache_size
/ (c
->dcache
.linesz
* c
->dcache
.ways
) : 0;
999 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
1000 * 2-way virtually indexed so normally would suffer from aliases. So
1001 * normally they'd suffer from aliases but magic in the hardware deals
1002 * with that for us so we don't need to take care ourselves.
1004 switch (c
->cputype
) {
1009 c
->dcache
.flags
|= MIPS_CACHE_PINDEX
;
1021 if ((read_c0_config7() & (1 << 16))) {
1022 /* effectively physically indexed dcache,
1023 thus no virtual aliases. */
1024 c
->dcache
.flags
|= MIPS_CACHE_PINDEX
;
1028 if (c
->dcache
.waysize
> PAGE_SIZE
)
1029 c
->dcache
.flags
|= MIPS_CACHE_ALIASES
;
1032 switch (c
->cputype
) {
1035 * Some older 20Kc chips doesn't have the 'VI' bit in
1036 * the config register.
1038 c
->icache
.flags
|= MIPS_CACHE_VTAG
;
1042 c
->icache
.flags
|= MIPS_CACHE_IC_F_DC
;
1046 #ifdef CONFIG_CPU_LOONGSON2
1048 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1049 * one op will act on all 4 ways
1054 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1056 c
->icache
.flags
& MIPS_CACHE_VTAG
? "VIVT" : "VIPT",
1057 way_string
[c
->icache
.ways
], c
->icache
.linesz
);
1059 printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1060 dcache_size
>> 10, way_string
[c
->dcache
.ways
],
1061 (c
->dcache
.flags
& MIPS_CACHE_PINDEX
) ? "PIPT" : "VIPT",
1062 (c
->dcache
.flags
& MIPS_CACHE_ALIASES
) ?
1063 "cache aliases" : "no aliases",
1068 * If you even _breathe_ on this function, look at the gcc output and make sure
1069 * it does not pop things on and off the stack for the cache sizing loop that
1070 * executes in KSEG1 space or else you will crash and burn badly. You have
1073 static int __cpuinit
probe_scache(void)
1075 unsigned long flags
, addr
, begin
, end
, pow2
;
1076 unsigned int config
= read_c0_config();
1077 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1080 if (config
& CONF_SC
)
1083 begin
= (unsigned long) &_stext
;
1084 begin
&= ~((4 * 1024 * 1024) - 1);
1085 end
= begin
+ (4 * 1024 * 1024);
1088 * This is such a bitch, you'd think they would make it easy to do
1089 * this. Away you daemons of stupidity!
1091 local_irq_save(flags
);
1093 /* Fill each size-multiple cache line with a valid tag. */
1095 for (addr
= begin
; addr
< end
; addr
= (begin
+ pow2
)) {
1096 unsigned long *p
= (unsigned long *) addr
;
1097 __asm__
__volatile__("nop" : : "r" (*p
)); /* whee... */
1101 /* Load first line with zero (therefore invalid) tag. */
1104 __asm__
__volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1105 cache_op(Index_Store_Tag_I
, begin
);
1106 cache_op(Index_Store_Tag_D
, begin
);
1107 cache_op(Index_Store_Tag_SD
, begin
);
1109 /* Now search for the wrap around point. */
1110 pow2
= (128 * 1024);
1112 for (addr
= begin
+ (128 * 1024); addr
< end
; addr
= begin
+ pow2
) {
1113 cache_op(Index_Load_Tag_SD
, addr
);
1114 __asm__
__volatile__("nop; nop; nop; nop;"); /* hazard... */
1115 if (!read_c0_taglo())
1119 local_irq_restore(flags
);
1123 c
->scache
.linesz
= 16 << ((config
& R4K_CONF_SB
) >> 22);
1125 c
->dcache
.waybit
= 0; /* does not matter */
1130 #if defined(CONFIG_CPU_LOONGSON2)
1131 static void __init
loongson2_sc_init(void)
1133 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1135 scache_size
= 512*1024;
1136 c
->scache
.linesz
= 32;
1138 c
->scache
.waybit
= 0;
1139 c
->scache
.waysize
= scache_size
/ (c
->scache
.ways
);
1140 c
->scache
.sets
= scache_size
/ (c
->scache
.linesz
* c
->scache
.ways
);
1141 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1142 scache_size
>> 10, way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1144 c
->options
|= MIPS_CPU_INCLUSIVE_CACHES
;
1148 extern int r5k_sc_init(void);
1149 extern int rm7k_sc_init(void);
1150 extern int mips_sc_init(void);
1152 static void __cpuinit
setup_scache(void)
1154 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1155 unsigned int config
= read_c0_config();
1159 * Do the probing thing on R4000SC and R4400SC processors. Other
1160 * processors don't have a S-cache that would be relevant to the
1161 * Linux memory management.
1163 switch (c
->cputype
) {
1168 sc_present
= run_uncached(probe_scache
);
1170 c
->options
|= MIPS_CPU_CACHE_CDEX_S
;
1176 scache_size
= 0x80000 << ((config
& R10K_CONF_SS
) >> 16);
1177 c
->scache
.linesz
= 64 << ((config
>> 13) & 1);
1179 c
->scache
.waybit
= 0;
1185 #ifdef CONFIG_R5000_CPU_SCACHE
1192 #ifdef CONFIG_RM7000_CPU_SCACHE
1197 #if defined(CONFIG_CPU_LOONGSON2)
1199 loongson2_sc_init();
1204 if (c
->isa_level
== MIPS_CPU_ISA_M32R1
||
1205 c
->isa_level
== MIPS_CPU_ISA_M32R2
||
1206 c
->isa_level
== MIPS_CPU_ISA_M64R1
||
1207 c
->isa_level
== MIPS_CPU_ISA_M64R2
) {
1208 #ifdef CONFIG_MIPS_CPU_SCACHE
1209 if (mips_sc_init ()) {
1210 scache_size
= c
->scache
.ways
* c
->scache
.sets
* c
->scache
.linesz
;
1211 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1213 way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1216 if (!(c
->scache
.flags
& MIPS_CACHE_NOT_PRESENT
))
1217 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1227 /* compute a couple of other cache variables */
1228 c
->scache
.waysize
= scache_size
/ c
->scache
.ways
;
1230 c
->scache
.sets
= scache_size
/ (c
->scache
.linesz
* c
->scache
.ways
);
1232 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1233 scache_size
>> 10, way_string
[c
->scache
.ways
], c
->scache
.linesz
);
1235 c
->options
|= MIPS_CPU_INCLUSIVE_CACHES
;
1238 void au1x00_fixup_config_od(void)
1241 * c0_config.od (bit 19) was write only (and read as 0)
1242 * on the early revisions of Alchemy SOCs. It disables the bus
1243 * transaction overlapping and needs to be set to fix various errata.
1245 switch (read_c0_prid()) {
1246 case 0x00030100: /* Au1000 DA */
1247 case 0x00030201: /* Au1000 HA */
1248 case 0x00030202: /* Au1000 HB */
1249 case 0x01030200: /* Au1500 AB */
1251 * Au1100 errata actually keeps silence about this bit, so we set it
1252 * just in case for those revisions that require it to be set according
1253 * to the (now gone) cpu table.
1255 case 0x02030200: /* Au1100 AB */
1256 case 0x02030201: /* Au1100 BA */
1257 case 0x02030202: /* Au1100 BC */
1258 set_c0_config(1 << 19);
1263 /* CP0 hazard avoidance. */
1264 #define NXP_BARRIER() \
1265 __asm__ __volatile__( \
1266 ".set noreorder\n\t" \
1267 "nop; nop; nop; nop; nop; nop;\n\t" \
1270 static void nxp_pr4450_fixup_config(void)
1272 unsigned long config0
;
1274 config0
= read_c0_config();
1276 /* clear all three cache coherency fields */
1277 config0
&= ~(0x7 | (7 << 25) | (7 << 28));
1278 config0
|= (((_page_cachable_default
>> _CACHE_SHIFT
) << 0) |
1279 ((_page_cachable_default
>> _CACHE_SHIFT
) << 25) |
1280 ((_page_cachable_default
>> _CACHE_SHIFT
) << 28));
1281 write_c0_config(config0
);
1285 static int __cpuinitdata cca
= -1;
1287 static int __init
cca_setup(char *str
)
1289 get_option(&str
, &cca
);
1294 __setup("cca=", cca_setup
);
1296 static void __cpuinit
coherency_setup(void)
1298 if (cca
< 0 || cca
> 7)
1299 cca
= read_c0_config() & CONF_CM_CMASK
;
1300 _page_cachable_default
= cca
<< _CACHE_SHIFT
;
1302 pr_debug("Using cache attribute %d\n", cca
);
1303 change_c0_config(CONF_CM_CMASK
, cca
);
1306 * c0_status.cu=0 specifies that updates by the sc instruction use
1307 * the coherency mode specified by the TLB; 1 means cachable
1308 * coherent update on write will be used. Not all processors have
1309 * this bit and; some wire it to zero, others like Toshiba had the
1310 * silly idea of putting something else there ...
1312 switch (current_cpu_type()) {
1319 clear_c0_config(CONF_CU
);
1322 * We need to catch the early Alchemy SOCs with
1323 * the write-only co_config.od bit and set it back to one on:
1324 * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
1327 au1x00_fixup_config_od();
1330 case PRID_IMP_PR4450
:
1331 nxp_pr4450_fixup_config();
1336 #if defined(CONFIG_DMA_NONCOHERENT)
1338 static int __cpuinitdata coherentio
;
1340 static int __init
setcoherentio(char *str
)
1347 __setup("coherentio", setcoherentio
);
1350 void __cpuinit
r4k_cache_init(void)
1352 extern void build_clear_page(void);
1353 extern void build_copy_page(void);
1354 extern char __weak except_vec2_generic
;
1355 extern char __weak except_vec2_sb1
;
1356 struct cpuinfo_mips
*c
= ¤t_cpu_data
;
1358 switch (c
->cputype
) {
1361 set_uncached_handler(0x100, &except_vec2_sb1
, 0x80);
1365 set_uncached_handler(0x100, &except_vec2_generic
, 0x80);
1372 r4k_blast_dcache_page_setup();
1373 r4k_blast_dcache_page_indexed_setup();
1374 r4k_blast_dcache_setup();
1375 r4k_blast_icache_page_setup();
1376 r4k_blast_icache_page_indexed_setup();
1377 r4k_blast_icache_setup();
1378 r4k_blast_scache_page_setup();
1379 r4k_blast_scache_page_indexed_setup();
1380 r4k_blast_scache_setup();
1383 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1384 * This code supports virtually indexed processors and will be
1385 * unnecessarily inefficient on physically indexed processors.
1387 if (c
->dcache
.linesz
)
1388 shm_align_mask
= max_t( unsigned long,
1389 c
->dcache
.sets
* c
->dcache
.linesz
- 1,
1392 shm_align_mask
= PAGE_SIZE
-1;
1394 __flush_cache_vmap
= r4k__flush_cache_vmap
;
1395 __flush_cache_vunmap
= r4k__flush_cache_vunmap
;
1397 flush_cache_all
= cache_noop
;
1398 __flush_cache_all
= r4k___flush_cache_all
;
1399 flush_cache_mm
= r4k_flush_cache_mm
;
1400 flush_cache_page
= r4k_flush_cache_page
;
1401 flush_cache_range
= r4k_flush_cache_range
;
1403 flush_cache_sigtramp
= r4k_flush_cache_sigtramp
;
1404 flush_icache_all
= r4k_flush_icache_all
;
1405 local_flush_data_cache_page
= local_r4k_flush_data_cache_page
;
1406 flush_data_cache_page
= r4k_flush_data_cache_page
;
1407 flush_icache_range
= r4k_flush_icache_range
;
1408 local_flush_icache_range
= local_r4k_flush_icache_range
;
1410 #if defined(CONFIG_DMA_NONCOHERENT)
1412 _dma_cache_wback_inv
= (void *)cache_noop
;
1413 _dma_cache_wback
= (void *)cache_noop
;
1414 _dma_cache_inv
= (void *)cache_noop
;
1416 _dma_cache_wback_inv
= r4k_dma_cache_wback_inv
;
1417 _dma_cache_wback
= r4k_dma_cache_wback_inv
;
1418 _dma_cache_inv
= r4k_dma_cache_inv
;
1424 #if !defined(CONFIG_MIPS_CMP)
1425 local_r4k___flush_cache_all(NULL
);