x86: xen: size struct xen_spinlock to always fit in arch_spinlock_t
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / mips / mm / cache.c
blob829320c7b175372f3695248aee63329f18f7661f
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 MIPS Technologies, Inc.
8 */
9 #include <linux/fs.h>
10 #include <linux/fcntl.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/linkage.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/syscalls.h>
17 #include <linux/mm.h>
19 #include <asm/cacheflush.h>
20 #include <asm/processor.h>
21 #include <asm/cpu.h>
22 #include <asm/cpu-features.h>
24 /* Cache operations. */
25 void (*flush_cache_all)(void);
26 void (*__flush_cache_all)(void);
27 void (*flush_cache_mm)(struct mm_struct *mm);
28 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
29 unsigned long end);
30 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
31 unsigned long pfn);
32 void (*flush_icache_range)(unsigned long start, unsigned long end);
33 void (*local_flush_icache_range)(unsigned long start, unsigned long end);
35 void (*__flush_cache_vmap)(void);
36 void (*__flush_cache_vunmap)(void);
38 void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
39 void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
41 EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
43 /* MIPS specific cache operations */
44 void (*flush_cache_sigtramp)(unsigned long addr);
45 void (*local_flush_data_cache_page)(void * addr);
46 void (*flush_data_cache_page)(unsigned long addr);
47 void (*flush_icache_all)(void);
49 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
50 EXPORT_SYMBOL(flush_data_cache_page);
52 #ifdef CONFIG_DMA_NONCOHERENT
54 /* DMA cache operations. */
55 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
56 void (*_dma_cache_wback)(unsigned long start, unsigned long size);
57 void (*_dma_cache_inv)(unsigned long start, unsigned long size);
59 EXPORT_SYMBOL(_dma_cache_wback_inv);
61 #endif /* CONFIG_DMA_NONCOHERENT */
64 * We could optimize the case where the cache argument is not BCACHE but
65 * that seems very atypical use ...
67 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
68 unsigned int, cache)
70 if (bytes == 0)
71 return 0;
72 if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
73 return -EFAULT;
75 flush_icache_range(addr, addr + bytes);
77 return 0;
80 void __flush_dcache_page(struct page *page)
82 struct address_space *mapping = page_mapping(page);
83 unsigned long addr;
85 if (PageHighMem(page))
86 return;
87 if (mapping && !mapping_mapped(mapping)) {
88 SetPageDcacheDirty(page);
89 return;
93 * We could delay the flush for the !page_mapping case too. But that
94 * case is for exec env/arg pages and those are %99 certainly going to
95 * get faulted into the tlb (and thus flushed) anyways.
97 addr = (unsigned long) page_address(page);
98 flush_data_cache_page(addr);
101 EXPORT_SYMBOL(__flush_dcache_page);
103 void __flush_anon_page(struct page *page, unsigned long vmaddr)
105 unsigned long addr = (unsigned long) page_address(page);
107 if (pages_do_alias(addr, vmaddr)) {
108 if (page_mapped(page) && !Page_dcache_dirty(page)) {
109 void *kaddr;
111 kaddr = kmap_coherent(page, vmaddr);
112 flush_data_cache_page((unsigned long)kaddr);
113 kunmap_coherent();
114 } else
115 flush_data_cache_page(addr);
119 EXPORT_SYMBOL(__flush_anon_page);
121 void __update_cache(struct vm_area_struct *vma, unsigned long address,
122 pte_t pte)
124 struct page *page;
125 unsigned long pfn, addr;
126 int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
128 pfn = pte_pfn(pte);
129 if (unlikely(!pfn_valid(pfn)))
130 return;
131 page = pfn_to_page(pfn);
132 if (page_mapping(page) && Page_dcache_dirty(page)) {
133 addr = (unsigned long) page_address(page);
134 if (exec || pages_do_alias(addr, address & PAGE_MASK))
135 flush_data_cache_page(addr);
136 ClearPageDcacheDirty(page);
140 unsigned long _page_cachable_default;
141 EXPORT_SYMBOL(_page_cachable_default);
143 static inline void setup_protection_map(void)
145 if (kernel_uses_smartmips_rixi) {
146 protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
147 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
148 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
149 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
150 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
151 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
152 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
153 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
155 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
156 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
157 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
158 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
159 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
160 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
161 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ);
162 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
164 } else {
165 protection_map[0] = PAGE_NONE;
166 protection_map[1] = PAGE_READONLY;
167 protection_map[2] = PAGE_COPY;
168 protection_map[3] = PAGE_COPY;
169 protection_map[4] = PAGE_READONLY;
170 protection_map[5] = PAGE_READONLY;
171 protection_map[6] = PAGE_COPY;
172 protection_map[7] = PAGE_COPY;
173 protection_map[8] = PAGE_NONE;
174 protection_map[9] = PAGE_READONLY;
175 protection_map[10] = PAGE_SHARED;
176 protection_map[11] = PAGE_SHARED;
177 protection_map[12] = PAGE_READONLY;
178 protection_map[13] = PAGE_READONLY;
179 protection_map[14] = PAGE_SHARED;
180 protection_map[15] = PAGE_SHARED;
184 void __cpuinit cpu_cache_init(void)
186 if (cpu_has_3k_cache) {
187 extern void __weak r3k_cache_init(void);
189 r3k_cache_init();
191 if (cpu_has_6k_cache) {
192 extern void __weak r6k_cache_init(void);
194 r6k_cache_init();
196 if (cpu_has_4k_cache) {
197 extern void __weak r4k_cache_init(void);
199 r4k_cache_init();
201 if (cpu_has_8k_cache) {
202 extern void __weak r8k_cache_init(void);
204 r8k_cache_init();
206 if (cpu_has_tx39_cache) {
207 extern void __weak tx39_cache_init(void);
209 tx39_cache_init();
212 if (cpu_has_octeon_cache) {
213 extern void __weak octeon_cache_init(void);
215 octeon_cache_init();
218 setup_protection_map();
221 int __weak __uncached_access(struct file *file, unsigned long addr)
223 if (file->f_flags & O_DSYNC)
224 return 1;
226 return addr >= __pa(high_memory);