2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 2003, 07 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 MIPS Technologies, Inc.
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
15 #include <asm/cacheflush.h>
16 #include <asm/processor.h>
18 #include <asm/cpu-features.h>
20 /* Cache operations. */
21 void (*flush_cache_all
)(void);
22 void (*__flush_cache_all
)(void);
23 void (*flush_cache_mm
)(struct mm_struct
*mm
);
24 void (*flush_cache_range
)(struct vm_area_struct
*vma
, unsigned long start
,
26 void (*flush_cache_page
)(struct vm_area_struct
*vma
, unsigned long page
,
28 void (*flush_icache_range
)(unsigned long start
, unsigned long end
);
30 /* MIPS specific cache operations */
31 void (*flush_cache_sigtramp
)(unsigned long addr
);
32 void (*local_flush_data_cache_page
)(void * addr
);
33 void (*flush_data_cache_page
)(unsigned long addr
);
34 void (*flush_icache_all
)(void);
36 EXPORT_SYMBOL_GPL(local_flush_data_cache_page
);
37 EXPORT_SYMBOL(flush_data_cache_page
);
39 #ifdef CONFIG_DMA_NONCOHERENT
41 /* DMA cache operations. */
42 void (*_dma_cache_wback_inv
)(unsigned long start
, unsigned long size
);
43 void (*_dma_cache_wback
)(unsigned long start
, unsigned long size
);
44 void (*_dma_cache_inv
)(unsigned long start
, unsigned long size
);
46 EXPORT_SYMBOL(_dma_cache_wback_inv
);
47 EXPORT_SYMBOL(_dma_cache_wback
);
48 EXPORT_SYMBOL(_dma_cache_inv
);
50 #endif /* CONFIG_DMA_NONCOHERENT */
53 * We could optimize the case where the cache argument is not BCACHE but
54 * that seems very atypical use ...
56 asmlinkage
int sys_cacheflush(unsigned long addr
,
57 unsigned long bytes
, unsigned int cache
)
59 struct vm_area_struct
* vma
;
63 if (!access_ok(VERIFY_WRITE
, (void __user
*) addr
, bytes
))
68 vma
= find_vma(current
->mm
, (unsigned long) addr
);
70 flush_cache_range(vma
,(unsigned long)addr
,((unsigned long)addr
) + bytes
);
78 flush_icache_range(addr
, addr
+ bytes
);
84 void *kmap_atomic_page_address(struct page
*page
);
86 void __flush_dcache_page(struct page
*page
)
90 if (PageHighMem(page
)) {
91 addr
= (unsigned long) kmap_atomic_page_address(page
);
93 flush_data_cache_page(addr
);
99 * We could delay the flush for the !page_mapping case too. But that
100 * case is for exec env/arg pages and those are %99 certainly going to
101 * get faulted into the tlb (and thus flushed) anyways.
103 addr
= (unsigned long) page_address(page
);
105 flush_data_cache_page(addr
);
108 EXPORT_SYMBOL(__flush_dcache_page
);
110 void __flush_anon_page(struct page
*page
, unsigned long vmaddr
)
112 unsigned long addr
= (unsigned long) page_address(page
);
114 if (pages_do_alias(addr
, vmaddr
)) {
115 if (page_mapped(page
) && !Page_dcache_dirty(page
)) {
118 kaddr
= kmap_coherent(page
, vmaddr
);
119 flush_data_cache_page((unsigned long)kaddr
);
122 flush_data_cache_page(addr
);
126 EXPORT_SYMBOL(__flush_anon_page
);
128 void __update_cache(struct vm_area_struct
*vma
, unsigned long address
,
132 unsigned long pfn
, addr
;
133 int exec
= (vma
->vm_flags
& VM_EXEC
) && !cpu_has_ic_fills_f_dc
;
136 if (unlikely(!pfn_valid(pfn
)))
138 page
= pfn_to_page(pfn
);
139 if (page_mapping(page
))
141 addr
= (unsigned long) page_address(page
);
142 if (exec
|| pages_do_alias(addr
, address
& PAGE_MASK
))
144 flush_data_cache_page(addr
);
148 static char cache_panic
[] __cpuinitdata
=
149 "Yeee, unsupported cache architecture.";
151 void __init
cpu_early_probe_cache(void)
153 if (cpu_has_4k_cache
) {
154 extern void __weak
r4k_probe_cache(void);
156 return r4k_probe_cache();
160 void __cpuinit
cpu_cache_init(void)
162 if (cpu_has_3k_cache
) {
163 extern void __weak
r3k_cache_init(void);
168 if (cpu_has_6k_cache
) {
169 extern void __weak
r6k_cache_init(void);
174 if (cpu_has_4k_cache
) {
175 extern void __weak
r4k_cache_init(void);
180 if (cpu_has_8k_cache
) {
181 extern void __weak
r8k_cache_init(void);
186 if (cpu_has_tx39_cache
) {
187 extern void __weak
tx39_cache_init(void);
192 if (cpu_has_sb1_cache
) {
193 extern void __weak
sb1_cache_init(void);