2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 MIPS Technologies, Inc.
10 #include <linux/fcntl.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/linkage.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/syscalls.h>
19 #include <asm/cacheflush.h>
20 #include <asm/processor.h>
22 #include <asm/cpu-features.h>
24 /* Cache operations. */
25 void (*flush_cache_all
)(void);
26 void (*__flush_cache_all
)(void);
27 void (*flush_cache_mm
)(struct mm_struct
*mm
);
28 void (*flush_cache_range
)(struct vm_area_struct
*vma
, unsigned long start
,
30 void (*flush_cache_page
)(struct vm_area_struct
*vma
, unsigned long page
,
32 void (*flush_icache_range
)(unsigned long start
, unsigned long end
);
33 void (*local_flush_icache_range
)(unsigned long start
, unsigned long end
);
35 void (*__flush_cache_vmap
)(void);
36 void (*__flush_cache_vunmap
)(void);
38 /* MIPS specific cache operations */
39 void (*flush_cache_sigtramp
)(unsigned long addr
);
40 void (*local_flush_data_cache_page
)(void * addr
);
41 void (*flush_data_cache_page
)(unsigned long addr
);
42 void (*flush_icache_all
)(void);
44 EXPORT_SYMBOL_GPL(local_flush_data_cache_page
);
45 EXPORT_SYMBOL(flush_data_cache_page
);
47 #ifdef CONFIG_DMA_NONCOHERENT
49 /* DMA cache operations. */
50 void (*_dma_cache_wback_inv
)(unsigned long start
, unsigned long size
);
51 void (*_dma_cache_wback
)(unsigned long start
, unsigned long size
);
52 void (*_dma_cache_inv
)(unsigned long start
, unsigned long size
);
54 EXPORT_SYMBOL(_dma_cache_wback_inv
);
56 #endif /* CONFIG_DMA_NONCOHERENT */
59 * We could optimize the case where the cache argument is not BCACHE but
60 * that seems very atypical use ...
62 SYSCALL_DEFINE3(cacheflush
, unsigned long, addr
, unsigned long, bytes
,
67 if (!access_ok(VERIFY_WRITE
, (void __user
*) addr
, bytes
))
70 flush_icache_range(addr
, addr
+ bytes
);
75 void __flush_dcache_page(struct page
*page
)
77 struct address_space
*mapping
= page_mapping(page
);
80 if (PageHighMem(page
))
82 if (mapping
&& !mapping_mapped(mapping
)) {
83 SetPageDcacheDirty(page
);
88 * We could delay the flush for the !page_mapping case too. But that
89 * case is for exec env/arg pages and those are %99 certainly going to
90 * get faulted into the tlb (and thus flushed) anyways.
92 addr
= (unsigned long) page_address(page
);
93 flush_data_cache_page(addr
);
96 EXPORT_SYMBOL(__flush_dcache_page
);
98 void __flush_anon_page(struct page
*page
, unsigned long vmaddr
)
100 unsigned long addr
= (unsigned long) page_address(page
);
102 if (pages_do_alias(addr
, vmaddr
)) {
103 if (page_mapped(page
) && !Page_dcache_dirty(page
)) {
106 kaddr
= kmap_coherent(page
, vmaddr
);
107 flush_data_cache_page((unsigned long)kaddr
);
110 flush_data_cache_page(addr
);
114 EXPORT_SYMBOL(__flush_anon_page
);
116 void __update_cache(struct vm_area_struct
*vma
, unsigned long address
,
120 unsigned long pfn
, addr
;
121 int exec
= (vma
->vm_flags
& VM_EXEC
) && !cpu_has_ic_fills_f_dc
;
124 if (unlikely(!pfn_valid(pfn
)))
126 page
= pfn_to_page(pfn
);
127 if (page_mapping(page
) && Page_dcache_dirty(page
)) {
128 addr
= (unsigned long) page_address(page
);
129 if (exec
|| pages_do_alias(addr
, address
& PAGE_MASK
))
130 flush_data_cache_page(addr
);
131 ClearPageDcacheDirty(page
);
135 unsigned long _page_cachable_default
;
136 EXPORT_SYMBOL(_page_cachable_default
);
138 static inline void setup_protection_map(void)
140 if (kernel_uses_smartmips_rixi
) {
141 protection_map
[0] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
| _PAGE_NO_EXEC
| _PAGE_NO_READ
);
142 protection_map
[1] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
| _PAGE_NO_EXEC
);
143 protection_map
[2] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
| _PAGE_NO_EXEC
| _PAGE_NO_READ
);
144 protection_map
[3] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
| _PAGE_NO_EXEC
);
145 protection_map
[4] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
| _PAGE_NO_READ
);
146 protection_map
[5] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
);
147 protection_map
[6] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
| _PAGE_NO_READ
);
148 protection_map
[7] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
);
150 protection_map
[8] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
| _PAGE_NO_EXEC
| _PAGE_NO_READ
);
151 protection_map
[9] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
| _PAGE_NO_EXEC
);
152 protection_map
[10] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
| _PAGE_NO_EXEC
| _PAGE_WRITE
| _PAGE_NO_READ
);
153 protection_map
[11] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
| _PAGE_NO_EXEC
| _PAGE_WRITE
);
154 protection_map
[12] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
| _PAGE_NO_READ
);
155 protection_map
[13] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
);
156 protection_map
[14] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
| _PAGE_WRITE
| _PAGE_NO_READ
);
157 protection_map
[15] = __pgprot(_page_cachable_default
| _PAGE_PRESENT
| _PAGE_WRITE
);
160 protection_map
[0] = PAGE_NONE
;
161 protection_map
[1] = PAGE_READONLY
;
162 protection_map
[2] = PAGE_COPY
;
163 protection_map
[3] = PAGE_COPY
;
164 protection_map
[4] = PAGE_READONLY
;
165 protection_map
[5] = PAGE_READONLY
;
166 protection_map
[6] = PAGE_COPY
;
167 protection_map
[7] = PAGE_COPY
;
168 protection_map
[8] = PAGE_NONE
;
169 protection_map
[9] = PAGE_READONLY
;
170 protection_map
[10] = PAGE_SHARED
;
171 protection_map
[11] = PAGE_SHARED
;
172 protection_map
[12] = PAGE_READONLY
;
173 protection_map
[13] = PAGE_READONLY
;
174 protection_map
[14] = PAGE_SHARED
;
175 protection_map
[15] = PAGE_SHARED
;
179 void __cpuinit
cpu_cache_init(void)
181 if (cpu_has_3k_cache
) {
182 extern void __weak
r3k_cache_init(void);
186 if (cpu_has_6k_cache
) {
187 extern void __weak
r6k_cache_init(void);
191 if (cpu_has_4k_cache
) {
192 extern void __weak
r4k_cache_init(void);
196 if (cpu_has_8k_cache
) {
197 extern void __weak
r8k_cache_init(void);
201 if (cpu_has_tx39_cache
) {
202 extern void __weak
tx39_cache_init(void);
207 if (cpu_has_octeon_cache
) {
208 extern void __weak
octeon_cache_init(void);
213 setup_protection_map();
216 int __weak
__uncached_access(struct file
*file
, unsigned long addr
)
218 if (file
->f_flags
& O_DSYNC
)
221 return addr
>= __pa(high_memory
);