1 #ifndef _ASM_SCORE_CACHEFLUSH_H
2 #define _ASM_SCORE_CACHEFLUSH_H
4 /* Keep includes the same across arches. */
7 extern void flush_cache_all(void);
8 extern void flush_cache_mm(struct mm_struct
*mm
);
9 extern void flush_cache_range(struct vm_area_struct
*vma
,
10 unsigned long start
, unsigned long end
);
11 extern void flush_cache_page(struct vm_area_struct
*vma
,
12 unsigned long page
, unsigned long pfn
);
13 extern void flush_cache_sigtramp(unsigned long addr
);
14 extern void flush_icache_all(void);
15 extern void flush_icache_range(unsigned long start
, unsigned long end
);
16 extern void flush_dcache_range(unsigned long start
, unsigned long end
);
17 extern void flush_dcache_page(struct page
*page
);
19 #define PG_dcache_dirty PG_arch_1
21 #define flush_cache_dup_mm(mm) do {} while (0)
22 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
23 #define flush_dcache_mmap_lock(mapping) do {} while (0)
24 #define flush_dcache_mmap_unlock(mapping) do {} while (0)
25 #define flush_cache_vmap(start, end) do {} while (0)
26 #define flush_cache_vunmap(start, end) do {} while (0)
28 static inline void flush_icache_page(struct vm_area_struct
*vma
,
31 if (vma
->vm_flags
& VM_EXEC
) {
32 void *v
= page_address(page
);
33 flush_icache_range((unsigned long) v
,
34 (unsigned long) v
+ PAGE_SIZE
);
38 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
41 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
43 memcpy(dst, src, len); \
44 if ((vma->vm_flags & VM_EXEC)) \
45 flush_cache_page(vma, vaddr, page_to_pfn(page));\
48 #endif /* _ASM_SCORE_CACHEFLUSH_H */