2 * arch/arm/include/asm/cacheflush.h
4 * Copyright (C) 1999-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #ifndef _ASMARM_CACHEFLUSH_H
11 #define _ASMARM_CACHEFLUSH_H
16 #include <asm/shmparam.h>
17 #include <asm/cachetype.h>
19 #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
28 #if defined(CONFIG_CPU_CACHE_V3)
30 # define MULTI_CACHE 1
36 #if defined(CONFIG_CPU_CACHE_V4)
38 # define MULTI_CACHE 1
44 #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
46 defined(CONFIG_CPU_ARM1026)
47 # define MULTI_CACHE 1
50 #if defined(CONFIG_CPU_FA526)
52 # define MULTI_CACHE 1
58 #if defined(CONFIG_CPU_ARM926T)
60 # define MULTI_CACHE 1
62 # define _CACHE arm926
66 #if defined(CONFIG_CPU_ARM940T)
68 # define MULTI_CACHE 1
70 # define _CACHE arm940
74 #if defined(CONFIG_CPU_ARM946E)
76 # define MULTI_CACHE 1
78 # define _CACHE arm946
82 #if defined(CONFIG_CPU_CACHE_V4WB)
84 # define MULTI_CACHE 1
90 #if defined(CONFIG_CPU_XSCALE)
92 # define MULTI_CACHE 1
94 # define _CACHE xscale
98 #if defined(CONFIG_CPU_XSC3)
100 # define MULTI_CACHE 1
106 #if defined(CONFIG_CPU_MOHAWK)
108 # define MULTI_CACHE 1
110 # define _CACHE mohawk
114 #if defined(CONFIG_CPU_FEROCEON)
115 # define MULTI_CACHE 1
118 #if defined(CONFIG_CPU_V6)
120 # define MULTI_CACHE 1
126 #if defined(CONFIG_CPU_V7)
128 # define MULTI_CACHE 1
134 #if !defined(_CACHE) && !defined(MULTI_CACHE)
135 #error Unknown cache maintainence model
139 * This flag is used to indicate that the page pointed to by a pte
140 * is dirty and requires cleaning before returning it to the user.
142 #define PG_dcache_dirty PG_arch_1
145 * MM Cache Management
146 * ===================
148 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
149 * implement these methods.
151 * Start addresses are inclusive and end addresses are exclusive;
152 * start addresses should be rounded down, end addresses up.
154 * See Documentation/cachetlb.txt for more information.
155 * Please note that the implementation of these, and the required
156 * effects are cache-type (VIVT/VIPT/PIPT) specific.
160 * Unconditionally clean and invalidate the entire cache.
164 * Clean and invalidate all user space cache entries
165 * before a change of page tables.
167 * flush_user_range(start, end, flags)
169 * Clean and invalidate a range of cache entries in the
170 * specified address space before a change of page tables.
171 * - start - user start address (inclusive, page aligned)
172 * - end - user end address (exclusive, page aligned)
173 * - flags - vma->vm_flags field
175 * coherent_kern_range(start, end)
177 * Ensure coherency between the Icache and the Dcache in the
178 * region described by start, end. If you have non-snooping
179 * Harvard caches, you need to implement this function.
180 * - start - virtual start address
181 * - end - virtual end address
183 * coherent_user_range(start, end)
185 * Ensure coherency between the Icache and the Dcache in the
186 * region described by start, end. If you have non-snooping
187 * Harvard caches, you need to implement this function.
188 * - start - virtual start address
189 * - end - virtual end address
191 * flush_kern_dcache_area(kaddr, size)
193 * Ensure that the data held in page is written back.
194 * - kaddr - page address
195 * - size - region size
197 * DMA Cache Coherency
198 * ===================
200 * dma_flush_range(start, end)
202 * Clean and invalidate the specified virtual address range.
203 * - start - virtual start address
204 * - end - virtual end address
207 struct cpu_cache_fns
{
208 void (*flush_kern_all
)(void);
209 void (*flush_user_all
)(void);
210 void (*flush_user_range
)(unsigned long, unsigned long, unsigned int);
212 void (*coherent_kern_range
)(unsigned long, unsigned long);
213 void (*coherent_user_range
)(unsigned long, unsigned long);
214 void (*flush_kern_dcache_area
)(void *, size_t);
216 void (*dma_map_area
)(const void *, size_t, int);
217 void (*dma_unmap_area
)(const void *, size_t, int);
219 void (*dma_flush_range
)(const void *, const void *);
222 struct outer_cache_fns
{
223 void (*inv_range
)(unsigned long, unsigned long);
224 void (*clean_range
)(unsigned long, unsigned long);
225 void (*flush_range
)(unsigned long, unsigned long);
229 * Select the calling method
233 extern struct cpu_cache_fns cpu_cache
;
235 #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
236 #define __cpuc_flush_user_all cpu_cache.flush_user_all
237 #define __cpuc_flush_user_range cpu_cache.flush_user_range
238 #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
239 #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
240 #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
243 * These are private to the dma-mapping API. Do not use directly.
244 * Their sole purpose is to ensure that data held in the cache
245 * is visible to DMA, or data written by DMA to system memory is
246 * visible to the CPU.
248 #define dmac_map_area cpu_cache.dma_map_area
249 #define dmac_unmap_area cpu_cache.dma_unmap_area
250 #define dmac_flush_range cpu_cache.dma_flush_range
254 #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
255 #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
256 #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
257 #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
258 #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
259 #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
261 extern void __cpuc_flush_kern_all(void);
262 extern void __cpuc_flush_user_all(void);
263 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
264 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
265 extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
266 extern void __cpuc_flush_dcache_area(void *, size_t);
269 * These are private to the dma-mapping API. Do not use directly.
270 * Their sole purpose is to ensure that data held in the cache
271 * is visible to DMA, or data written by DMA to system memory is
272 * visible to the CPU.
274 #define dmac_map_area __glue(_CACHE,_dma_map_area)
275 #define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
276 #define dmac_flush_range __glue(_CACHE,_dma_flush_range)
278 extern void dmac_map_area(const void *, size_t, int);
279 extern void dmac_unmap_area(const void *, size_t, int);
280 extern void dmac_flush_range(const void *, const void *);
284 #ifdef CONFIG_OUTER_CACHE
286 extern struct outer_cache_fns outer_cache
;
288 static inline void outer_inv_range(unsigned long start
, unsigned long end
)
290 if (outer_cache
.inv_range
)
291 outer_cache
.inv_range(start
, end
);
293 static inline void outer_clean_range(unsigned long start
, unsigned long end
)
295 if (outer_cache
.clean_range
)
296 outer_cache
.clean_range(start
, end
);
298 static inline void outer_flush_range(unsigned long start
, unsigned long end
)
300 if (outer_cache
.flush_range
)
301 outer_cache
.flush_range(start
, end
);
306 static inline void outer_inv_range(unsigned long start
, unsigned long end
)
308 static inline void outer_clean_range(unsigned long start
, unsigned long end
)
310 static inline void outer_flush_range(unsigned long start
, unsigned long end
)
316 * Copy user data from/to a page which is mapped into a different
317 * processes address space. Really, we want to allow our "user
318 * space" model to handle this.
320 extern void copy_to_user_page(struct vm_area_struct
*, struct page
*,
321 unsigned long, void *, const void *, unsigned long);
322 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
324 memcpy(dst, src, len); \
328 * Convert calls to our calling convention.
330 #define flush_cache_all() __cpuc_flush_kern_all()
332 static inline void vivt_flush_cache_mm(struct mm_struct
*mm
)
334 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm
)))
335 __cpuc_flush_user_all();
339 vivt_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
341 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma
->vm_mm
)))
342 __cpuc_flush_user_range(start
& PAGE_MASK
, PAGE_ALIGN(end
),
347 vivt_flush_cache_page(struct vm_area_struct
*vma
, unsigned long user_addr
, unsigned long pfn
)
349 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma
->vm_mm
))) {
350 unsigned long addr
= user_addr
& PAGE_MASK
;
351 __cpuc_flush_user_range(addr
, addr
+ PAGE_SIZE
, vma
->vm_flags
);
355 #ifndef CONFIG_CPU_CACHE_VIPT
356 #define flush_cache_mm(mm) \
357 vivt_flush_cache_mm(mm)
358 #define flush_cache_range(vma,start,end) \
359 vivt_flush_cache_range(vma,start,end)
360 #define flush_cache_page(vma,addr,pfn) \
361 vivt_flush_cache_page(vma,addr,pfn)
363 extern void flush_cache_mm(struct mm_struct
*mm
);
364 extern void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
365 extern void flush_cache_page(struct vm_area_struct
*vma
, unsigned long user_addr
, unsigned long pfn
);
368 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
371 * flush_cache_user_range is used when we want to ensure that the
372 * Harvard caches are synchronised for the user space address range.
373 * This is used for the ARM private sys_cacheflush system call.
375 #define flush_cache_user_range(vma,start,end) \
376 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
379 * Perform necessary cache operations to ensure that data previously
380 * stored within this range of addresses can be executed by the CPU.
382 #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
385 * Perform necessary cache operations to ensure that the TLB will
386 * see data written in the specified area.
388 #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
391 * flush_dcache_page is used when the kernel has written to the page
392 * cache page at virtual address page->virtual.
394 * If this page isn't mapped (ie, page_mapping == NULL), or it might
395 * have userspace mappings, then we _must_ always clean + invalidate
396 * the dcache entries associated with the kernel mapping.
398 * Otherwise we can defer the operation, and clean the cache when we are
399 * about to change to user space. This is the same method as used on SPARC64.
400 * See update_mmu_cache for the user space part.
402 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
403 extern void flush_dcache_page(struct page
*);
405 static inline void __flush_icache_all(void)
407 #ifdef CONFIG_ARM_ERRATA_411920
408 extern void v6_icache_inval_all(void);
409 v6_icache_inval_all();
411 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
416 static inline void flush_kernel_vmap_range(void *addr
, int size
)
418 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
419 __cpuc_flush_dcache_area(addr
, (size_t)size
);
421 static inline void invalidate_kernel_vmap_range(void *addr
, int size
)
423 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
424 __cpuc_flush_dcache_area(addr
, (size_t)size
);
427 #define ARCH_HAS_FLUSH_ANON_PAGE
428 static inline void flush_anon_page(struct vm_area_struct
*vma
,
429 struct page
*page
, unsigned long vmaddr
)
431 extern void __flush_anon_page(struct vm_area_struct
*vma
,
432 struct page
*, unsigned long);
434 __flush_anon_page(vma
, page
, vmaddr
);
437 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
438 static inline void flush_kernel_dcache_page(struct page
*page
)
440 /* highmem pages are always flushed upon kunmap already */
441 if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page
))
442 __cpuc_flush_dcache_area(page_address(page
), PAGE_SIZE
);
445 #define flush_dcache_mmap_lock(mapping) \
446 spin_lock_irq(&(mapping)->tree_lock)
447 #define flush_dcache_mmap_unlock(mapping) \
448 spin_unlock_irq(&(mapping)->tree_lock)
450 #define flush_icache_user_range(vma,page,addr,len) \
451 flush_dcache_page(page)
454 * We don't appear to need to do anything here. In fact, if we did, we'd
455 * duplicate cache flushing elsewhere performed by flush_dcache_page().
457 #define flush_icache_page(vma,page) do { } while (0)
460 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
461 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
462 * caches, since the direct-mappings of these pages may contain cached
463 * data, we need to do a full cache flush to ensure that writebacks
464 * don't corrupt data placed into these pages via the new mappings.
466 static inline void flush_cache_vmap(unsigned long start
, unsigned long end
)
468 if (!cache_is_vipt_nonaliasing())
472 * set_pte_at() called from vmap_pte_range() does not
473 * have a DSB after cleaning the cache line.
478 static inline void flush_cache_vunmap(unsigned long start
, unsigned long end
)
480 if (!cache_is_vipt_nonaliasing())