2 * arch/arm/include/asm/cacheflush.h
4 * Copyright (C) 1999-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #ifndef _ASMARM_CACHEFLUSH_H
11 #define _ASMARM_CACHEFLUSH_H
16 #include <asm/shmparam.h>
17 #include <asm/cachetype.h>
19 #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
28 #if defined(CONFIG_CPU_CACHE_V3)
30 # define MULTI_CACHE 1
36 #if defined(CONFIG_CPU_CACHE_V4)
38 # define MULTI_CACHE 1
44 #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
46 # define MULTI_CACHE 1
49 #if defined(CONFIG_CPU_FA526)
51 # define MULTI_CACHE 1
57 #if defined(CONFIG_CPU_ARM926T)
59 # define MULTI_CACHE 1
61 # define _CACHE arm926
65 #if defined(CONFIG_CPU_ARM940T)
67 # define MULTI_CACHE 1
69 # define _CACHE arm940
73 #if defined(CONFIG_CPU_ARM946E)
75 # define MULTI_CACHE 1
77 # define _CACHE arm946
81 #if defined(CONFIG_CPU_CACHE_V4WB)
83 # define MULTI_CACHE 1
89 #if defined(CONFIG_CPU_XSCALE)
91 # define MULTI_CACHE 1
93 # define _CACHE xscale
97 #if defined(CONFIG_CPU_XSC3)
99 # define MULTI_CACHE 1
105 #if defined(CONFIG_CPU_FEROCEON)
106 # define MULTI_CACHE 1
109 #if defined(CONFIG_CPU_V6)
111 # define MULTI_CACHE 1
117 #if defined(CONFIG_CPU_V7)
119 # define MULTI_CACHE 1
125 #if !defined(_CACHE) && !defined(MULTI_CACHE)
126 #error Unknown cache maintainence model
130 * This flag is used to indicate that the page pointed to by a pte
131 * is dirty and requires cleaning before returning it to the user.
133 #define PG_dcache_dirty PG_arch_1
136 * MM Cache Management
137 * ===================
139 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
140 * implement these methods.
142 * Start addresses are inclusive and end addresses are exclusive;
143 * start addresses should be rounded down, end addresses up.
145 * See Documentation/cachetlb.txt for more information.
146 * Please note that the implementation of these, and the required
147 * effects are cache-type (VIVT/VIPT/PIPT) specific.
149 * flush_cache_kern_all()
151 * Unconditionally clean and invalidate the entire cache.
153 * flush_cache_user_mm(mm)
155 * Clean and invalidate all user space cache entries
156 * before a change of page tables.
158 * flush_cache_user_range(start, end, flags)
160 * Clean and invalidate a range of cache entries in the
161 * specified address space before a change of page tables.
162 * - start - user start address (inclusive, page aligned)
163 * - end - user end address (exclusive, page aligned)
164 * - flags - vma->vm_flags field
166 * coherent_kern_range(start, end)
168 * Ensure coherency between the Icache and the Dcache in the
169 * region described by start, end. If you have non-snooping
170 * Harvard caches, you need to implement this function.
171 * - start - virtual start address
172 * - end - virtual end address
174 * DMA Cache Coherency
175 * ===================
177 * dma_inv_range(start, end)
179 * Invalidate (discard) the specified virtual address range.
180 * May not write back any entries. If 'start' or 'end'
181 * are not cache line aligned, those lines must be written
183 * - start - virtual start address
184 * - end - virtual end address
186 * dma_clean_range(start, end)
188 * Clean (write back) the specified virtual address range.
189 * - start - virtual start address
190 * - end - virtual end address
192 * dma_flush_range(start, end)
194 * Clean and invalidate the specified virtual address range.
195 * - start - virtual start address
196 * - end - virtual end address
199 struct cpu_cache_fns
{
200 void (*flush_kern_all
)(void);
201 void (*flush_user_all
)(void);
202 void (*flush_user_range
)(unsigned long, unsigned long, unsigned int);
204 void (*coherent_kern_range
)(unsigned long, unsigned long);
205 void (*coherent_user_range
)(unsigned long, unsigned long);
206 void (*flush_kern_dcache_page
)(void *);
208 void (*dma_inv_range
)(const void *, const void *);
209 void (*dma_clean_range
)(const void *, const void *);
210 void (*dma_flush_range
)(const void *, const void *);
213 struct outer_cache_fns
{
214 void (*inv_range
)(unsigned long, unsigned long);
215 void (*clean_range
)(unsigned long, unsigned long);
216 void (*flush_range
)(unsigned long, unsigned long);
220 * Select the calling method
224 extern struct cpu_cache_fns cpu_cache
;
226 #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
227 #define __cpuc_flush_user_all cpu_cache.flush_user_all
228 #define __cpuc_flush_user_range cpu_cache.flush_user_range
229 #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
230 #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
231 #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
234 * These are private to the dma-mapping API. Do not use directly.
235 * Their sole purpose is to ensure that data held in the cache
236 * is visible to DMA, or data written by DMA to system memory is
237 * visible to the CPU.
239 #define dmac_inv_range cpu_cache.dma_inv_range
240 #define dmac_clean_range cpu_cache.dma_clean_range
241 #define dmac_flush_range cpu_cache.dma_flush_range
245 #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
246 #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
247 #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
248 #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
249 #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
250 #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
252 extern void __cpuc_flush_kern_all(void);
253 extern void __cpuc_flush_user_all(void);
254 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
255 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
256 extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
257 extern void __cpuc_flush_dcache_page(void *);
260 * These are private to the dma-mapping API. Do not use directly.
261 * Their sole purpose is to ensure that data held in the cache
262 * is visible to DMA, or data written by DMA to system memory is
263 * visible to the CPU.
265 #define dmac_inv_range __glue(_CACHE,_dma_inv_range)
266 #define dmac_clean_range __glue(_CACHE,_dma_clean_range)
267 #define dmac_flush_range __glue(_CACHE,_dma_flush_range)
269 extern void dmac_inv_range(const void *, const void *);
270 extern void dmac_clean_range(const void *, const void *);
271 extern void dmac_flush_range(const void *, const void *);
275 #ifdef CONFIG_OUTER_CACHE
277 extern struct outer_cache_fns outer_cache
;
279 static inline void outer_inv_range(unsigned long start
, unsigned long end
)
281 if (outer_cache
.inv_range
)
282 outer_cache
.inv_range(start
, end
);
284 static inline void outer_clean_range(unsigned long start
, unsigned long end
)
286 if (outer_cache
.clean_range
)
287 outer_cache
.clean_range(start
, end
);
289 static inline void outer_flush_range(unsigned long start
, unsigned long end
)
291 if (outer_cache
.flush_range
)
292 outer_cache
.flush_range(start
, end
);
297 static inline void outer_inv_range(unsigned long start
, unsigned long end
)
299 static inline void outer_clean_range(unsigned long start
, unsigned long end
)
301 static inline void outer_flush_range(unsigned long start
, unsigned long end
)
307 * Copy user data from/to a page which is mapped into a different
308 * processes address space. Really, we want to allow our "user
309 * space" model to handle this.
311 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
313 memcpy(dst, src, len); \
314 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
317 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
319 memcpy(dst, src, len); \
323 * Convert calls to our calling convention.
325 #define flush_cache_all() __cpuc_flush_kern_all()
326 #ifndef CONFIG_CPU_CACHE_VIPT
327 static inline void flush_cache_mm(struct mm_struct
*mm
)
329 if (cpu_isset(smp_processor_id(), mm
->cpu_vm_mask
))
330 __cpuc_flush_user_all();
334 flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
336 if (cpu_isset(smp_processor_id(), vma
->vm_mm
->cpu_vm_mask
))
337 __cpuc_flush_user_range(start
& PAGE_MASK
, PAGE_ALIGN(end
),
342 flush_cache_page(struct vm_area_struct
*vma
, unsigned long user_addr
, unsigned long pfn
)
344 if (cpu_isset(smp_processor_id(), vma
->vm_mm
->cpu_vm_mask
)) {
345 unsigned long addr
= user_addr
& PAGE_MASK
;
346 __cpuc_flush_user_range(addr
, addr
+ PAGE_SIZE
, vma
->vm_flags
);
351 flush_ptrace_access(struct vm_area_struct
*vma
, struct page
*page
,
352 unsigned long uaddr
, void *kaddr
,
353 unsigned long len
, int write
)
355 if (cpu_isset(smp_processor_id(), vma
->vm_mm
->cpu_vm_mask
)) {
356 unsigned long addr
= (unsigned long)kaddr
;
357 __cpuc_coherent_kern_range(addr
, addr
+ len
);
361 extern void flush_cache_mm(struct mm_struct
*mm
);
362 extern void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
363 extern void flush_cache_page(struct vm_area_struct
*vma
, unsigned long user_addr
, unsigned long pfn
);
364 extern void flush_ptrace_access(struct vm_area_struct
*vma
, struct page
*page
,
365 unsigned long uaddr
, void *kaddr
,
366 unsigned long len
, int write
);
369 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
372 * flush_cache_user_range is used when we want to ensure that the
373 * Harvard caches are synchronised for the user space address range.
374 * This is used for the ARM private sys_cacheflush system call.
376 #define flush_cache_user_range(vma,start,end) \
377 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
380 * Perform necessary cache operations to ensure that data previously
381 * stored within this range of addresses can be executed by the CPU.
383 #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
386 * Perform necessary cache operations to ensure that the TLB will
387 * see data written in the specified area.
389 #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
392 * flush_dcache_page is used when the kernel has written to the page
393 * cache page at virtual address page->virtual.
395 * If this page isn't mapped (ie, page_mapping == NULL), or it might
396 * have userspace mappings, then we _must_ always clean + invalidate
397 * the dcache entries associated with the kernel mapping.
399 * Otherwise we can defer the operation, and clean the cache when we are
400 * about to change to user space. This is the same method as used on SPARC64.
401 * See update_mmu_cache for the user space part.
403 extern void flush_dcache_page(struct page
*);
405 extern void __flush_dcache_page(struct address_space
*mapping
, struct page
*page
);
407 static inline void __flush_icache_all(void)
409 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
414 #define ARCH_HAS_FLUSH_ANON_PAGE
415 static inline void flush_anon_page(struct vm_area_struct
*vma
,
416 struct page
*page
, unsigned long vmaddr
)
418 extern void __flush_anon_page(struct vm_area_struct
*vma
,
419 struct page
*, unsigned long);
421 __flush_anon_page(vma
, page
, vmaddr
);
424 #define flush_dcache_mmap_lock(mapping) \
425 spin_lock_irq(&(mapping)->tree_lock)
426 #define flush_dcache_mmap_unlock(mapping) \
427 spin_unlock_irq(&(mapping)->tree_lock)
429 #define flush_icache_user_range(vma,page,addr,len) \
430 flush_dcache_page(page)
433 * We don't appear to need to do anything here. In fact, if we did, we'd
434 * duplicate cache flushing elsewhere performed by flush_dcache_page().
436 #define flush_icache_page(vma,page) do { } while (0)
438 static inline void flush_ioremap_region(unsigned long phys
, void __iomem
*virt
,
439 unsigned offset
, size_t size
)
441 const void *start
= (void __force
*)virt
+ offset
;
442 dmac_inv_range(start
, start
+ size
);
446 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
447 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
448 * caches, since the direct-mappings of these pages may contain cached
449 * data, we need to do a full cache flush to ensure that writebacks
450 * don't corrupt data placed into these pages via the new mappings.
452 static inline void flush_cache_vmap(unsigned long start
, unsigned long end
)
454 if (!cache_is_vipt_nonaliasing())
458 * set_pte_at() called from vmap_pte_range() does not
459 * have a DSB after cleaning the cache line.
464 static inline void flush_cache_vunmap(unsigned long start
, unsigned long end
)
466 if (!cache_is_vipt_nonaliasing())