[PATCH] sparse: trivial parts of fs/* annotation
[linux-2.6/history.git] / include / asm-parisc / cacheflush.h
bloba5700c29f18a868d9f2732e7ba7644cb96d0dc80
1 #ifndef _PARISC_CACHEFLUSH_H
2 #define _PARISC_CACHEFLUSH_H
4 #include <linux/config.h>
5 #include <linux/mm.h>
7 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
8 * Unfortunately, that doesn't apply to PA-RISC. */
10 /* Cache flush operations */
12 #ifdef CONFIG_SMP
13 #define flush_cache_mm(mm) flush_cache_all()
14 #else
15 #define flush_cache_mm(mm) flush_cache_all_local()
16 #endif
18 #define flush_kernel_dcache_range(start,size) \
19 flush_kernel_dcache_range_asm((start), (start)+(size));
21 extern void flush_cache_all_local(void);
23 static inline void cacheflush_h_tmp_function(void *dummy)
25 flush_cache_all_local();
28 static inline void flush_cache_all(void)
30 on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
33 #define flush_cache_vmap(start, end) flush_cache_all()
34 #define flush_cache_vunmap(start, end) flush_cache_all()
36 /* The following value needs to be tuned and probably scaled with the
37 * cache size.
40 #define FLUSH_THRESHOLD 0x80000
42 static inline void
43 flush_user_dcache_range(unsigned long start, unsigned long end)
45 #ifdef CONFIG_SMP
46 flush_user_dcache_range_asm(start,end);
47 #else
48 if ((end - start) < FLUSH_THRESHOLD)
49 flush_user_dcache_range_asm(start,end);
50 else
51 flush_data_cache();
52 #endif
55 static inline void
56 flush_user_icache_range(unsigned long start, unsigned long end)
58 #ifdef CONFIG_SMP
59 flush_user_icache_range_asm(start,end);
60 #else
61 if ((end - start) < FLUSH_THRESHOLD)
62 flush_user_icache_range_asm(start,end);
63 else
64 flush_instruction_cache();
65 #endif
68 extern void __flush_dcache_page(struct page *page);
70 static inline void flush_dcache_page(struct page *page)
72 struct address_space *mapping = page_mapping(page);
74 if (mapping && !mapping_mapped(mapping)) {
75 set_bit(PG_dcache_dirty, &page->flags);
76 } else {
77 __flush_dcache_page(page);
81 #define flush_dcache_mmap_lock(mapping) \
82 spin_lock_irq(&(mapping)->tree_lock)
83 #define flush_dcache_mmap_unlock(mapping) \
84 spin_unlock_irq(&(mapping)->tree_lock)
86 #define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)
88 #define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
90 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
91 do { memcpy(dst, src, len); \
92 flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
93 } while (0)
94 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
95 memcpy(dst, src, len)
97 static inline void flush_cache_range(struct vm_area_struct *vma,
98 unsigned long start, unsigned long end)
100 int sr3;
102 if (!vma->vm_mm->context) {
103 BUG();
104 return;
107 sr3 = mfsp(3);
108 if (vma->vm_mm->context == sr3) {
109 flush_user_dcache_range(start,end);
110 flush_user_icache_range(start,end);
111 } else {
112 flush_cache_all();
116 /* Simple function to work out if we have an existing address translation
117 * for a user space vma. */
118 static inline int translation_exists(struct vm_area_struct *vma,
119 unsigned long addr)
121 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
122 pmd_t *pmd;
123 pte_t *pte;
125 if(pgd_none(*pgd))
126 return 0;
128 pmd = pmd_offset(pgd, addr);
129 if(pmd_none(*pmd) || pmd_bad(*pmd))
130 return 0;
132 pte = pte_offset_map(pmd, addr);
134 /* The PA flush mappings show up as pte_none, but they're
135 * valid none the less */
136 if(pte_none(*pte) && ((pte_val(*pte) & _PAGE_FLUSH) == 0))
137 return 0;
138 return 1;
142 /* Private function to flush a page from the cache of a non-current
143 * process. cr25 contains the Page Directory of the current user
144 * process; we're going to hijack both it and the user space %sr3 to
145 * temporarily make the non-current process current. We have to do
146 * this because cache flushing may cause a non-access tlb miss which
147 * the handlers have to fill in from the pgd of the non-current
148 * process. */
149 static inline void
150 flush_user_cache_page_non_current(struct vm_area_struct *vma,
151 unsigned long vmaddr)
153 /* save the current process space and pgd */
154 unsigned long space = mfsp(3), pgd = mfctl(25);
156 /* we don't mind taking interrups since they may not
157 * do anything with user space, but we can't
158 * be preempted here */
159 preempt_disable();
161 /* make us current */
162 mtctl(__pa(vma->vm_mm->pgd), 25);
163 mtsp(vma->vm_mm->context, 3);
165 flush_user_dcache_page(vmaddr);
166 if(vma->vm_flags & VM_EXEC)
167 flush_user_icache_page(vmaddr);
169 /* put the old current process back */
170 mtsp(space, 3);
171 mtctl(pgd, 25);
172 preempt_enable();
175 static inline void
176 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
178 if (likely(vma->vm_mm->context == mfsp(3))) {
179 flush_user_dcache_page(vmaddr);
180 if (vma->vm_flags & VM_EXEC)
181 flush_user_icache_page(vmaddr);
182 } else {
183 flush_user_cache_page_non_current(vma, vmaddr);
187 static inline void
188 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
190 BUG_ON(!vma->vm_mm->context);
192 if(likely(translation_exists(vma, vmaddr)))
193 __flush_cache_page(vma, vmaddr);
196 #endif