1 #ifndef _PARISC_CACHEFLUSH_H
2 #define _PARISC_CACHEFLUSH_H
4 #include <linux/config.h>
7 /* The usual comment is "Caches aren't brain-dead on the <architecture>".
8 * Unfortunately, that doesn't apply to PA-RISC. */
10 /* Cache flush operations */
13 #define flush_cache_mm(mm) flush_cache_all()
15 #define flush_cache_mm(mm) flush_cache_all_local()
18 #define flush_kernel_dcache_range(start,size) \
19 flush_kernel_dcache_range_asm((start), (start)+(size));
21 extern void flush_cache_all_local(void);
23 static inline void cacheflush_h_tmp_function(void *dummy
)
25 flush_cache_all_local();
28 static inline void flush_cache_all(void)
30 on_each_cpu(cacheflush_h_tmp_function
, NULL
, 1, 1);
33 /* The following value needs to be tuned and probably scaled with the
37 #define FLUSH_THRESHOLD 0x80000
40 flush_user_dcache_range(unsigned long start
, unsigned long end
)
43 flush_user_dcache_range_asm(start
,end
);
45 if ((end
- start
) < FLUSH_THRESHOLD
)
46 flush_user_dcache_range_asm(start
,end
);
53 flush_user_icache_range(unsigned long start
, unsigned long end
)
56 flush_user_icache_range_asm(start
,end
);
58 if ((end
- start
) < FLUSH_THRESHOLD
)
59 flush_user_icache_range_asm(start
,end
);
61 flush_instruction_cache();
65 extern void __flush_dcache_page(struct page
*page
);
67 static inline void flush_dcache_page(struct page
*page
)
69 if (page
->mapping
&& list_empty(&page
->mapping
->i_mmap
) &&
70 list_empty(&page
->mapping
->i_mmap_shared
)) {
71 set_bit(PG_dcache_dirty
, &page
->flags
);
73 __flush_dcache_page(page
);
77 #define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)
79 #define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
81 #define flush_icache_user_range(vma, page, addr, len) \
82 flush_icache_page((vma), (page))
84 static inline void flush_cache_range(struct vm_area_struct
*vma
,
85 unsigned long start
, unsigned long end
)
89 if (!vma
->vm_mm
->context
) {
95 if (vma
->vm_mm
->context
== sr3
) {
96 flush_user_dcache_range(start
,end
);
97 flush_user_icache_range(start
,end
);
104 flush_cache_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
108 if (!vma
->vm_mm
->context
) {
114 if (vma
->vm_mm
->context
== sr3
) {
115 flush_user_dcache_range(vmaddr
,vmaddr
+ PAGE_SIZE
);
116 if (vma
->vm_flags
& VM_EXEC
)
117 flush_user_icache_range(vmaddr
,vmaddr
+ PAGE_SIZE
);
119 if (vma
->vm_flags
& VM_EXEC
)