Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / include / asm-sh / cpu-sh4 / cacheflush.h
blob5fd5c89ef86a8c859429d257f31bc488dfdc47cf
1 /*
2 * include/asm-sh/cpu-sh4/cacheflush.h
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
11 #ifndef __ASM_CPU_SH4_CACHEFLUSH_H
12 #define __ASM_CPU_SH4_CACHEFLUSH_H
15 * Caches are broken on SH-4 (unless we use write-through
16 * caching; in which case they're only semi-broken),
17 * so we need them.
19 void flush_cache_all(void);
20 void flush_dcache_all(void);
21 void flush_cache_mm(struct mm_struct *mm);
22 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
23 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
24 unsigned long end);
25 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
26 unsigned long pfn);
27 void flush_dcache_page(struct page *pg);
29 #define flush_dcache_mmap_lock(mapping) do { } while (0)
30 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
32 void flush_icache_range(unsigned long start, unsigned long end);
33 void flush_cache_sigtramp(unsigned long addr);
34 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
35 unsigned long addr, int len);
37 #define flush_icache_page(vma,pg) do { } while (0)
39 /* Initialization of P3 area for copy_user_page */
40 void p3_cache_init(void);
42 #define PG_mapped PG_arch_1
44 #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */