2 * linux/arch/arm/mm/flush.c
4 * Copyright (C) 1995-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
12 #include <linux/pagemap.h>
14 #include <asm/cacheflush.h>
15 #include <asm/system.h>
19 #include <asm/tlbflush.h>
23 #ifdef CONFIG_CPU_CACHE_VIPT
25 #define ALIAS_FLUSH_START 0xffff4000
27 static void flush_pfn_alias(unsigned long pfn
, unsigned long vaddr
)
29 unsigned long to
= ALIAS_FLUSH_START
+ (CACHE_COLOUR(vaddr
) << PAGE_SHIFT
);
32 set_pte(TOP_PTE(to
), pfn_pte(pfn
, PAGE_KERNEL
));
33 flush_tlb_kernel_page(to
);
35 asm( "mcrr p15, 0, %1, %0, c14\n"
36 " mcr p15, 0, %2, c7, c10, 4\n"
37 " mcr p15, 0, %2, c7, c5, 0\n"
39 : "r" (to
), "r" (to
+ PAGE_SIZE
- L1_CACHE_BYTES
), "r" (zero
)
43 void flush_cache_mm(struct mm_struct
*mm
)
45 if (cache_is_vivt()) {
46 if (cpu_isset(smp_processor_id(), mm
->cpu_vm_mask
))
47 __cpuc_flush_user_all();
51 if (cache_is_vipt_aliasing()) {
52 asm( "mcr p15, 0, %0, c7, c14, 0\n"
53 " mcr p15, 0, %0, c7, c5, 0\n"
54 " mcr p15, 0, %0, c7, c10, 4"
61 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
63 if (cache_is_vivt()) {
64 if (cpu_isset(smp_processor_id(), vma
->vm_mm
->cpu_vm_mask
))
65 __cpuc_flush_user_range(start
& PAGE_MASK
, PAGE_ALIGN(end
),
70 if (cache_is_vipt_aliasing()) {
71 asm( "mcr p15, 0, %0, c7, c14, 0\n"
72 " mcr p15, 0, %0, c7, c5, 0\n"
73 " mcr p15, 0, %0, c7, c10, 4"
80 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long user_addr
, unsigned long pfn
)
82 if (cache_is_vivt()) {
83 if (cpu_isset(smp_processor_id(), vma
->vm_mm
->cpu_vm_mask
)) {
84 unsigned long addr
= user_addr
& PAGE_MASK
;
85 __cpuc_flush_user_range(addr
, addr
+ PAGE_SIZE
, vma
->vm_flags
);
90 if (cache_is_vipt_aliasing())
91 flush_pfn_alias(pfn
, user_addr
);
94 void flush_ptrace_access(struct vm_area_struct
*vma
, struct page
*page
,
95 unsigned long uaddr
, void *kaddr
,
96 unsigned long len
, int write
)
98 if (cache_is_vivt()) {
99 if (cpu_isset(smp_processor_id(), vma
->vm_mm
->cpu_vm_mask
)) {
100 unsigned long addr
= (unsigned long)kaddr
;
101 __cpuc_coherent_kern_range(addr
, addr
+ len
);
106 if (cache_is_vipt_aliasing()) {
107 flush_pfn_alias(page_to_pfn(page
), uaddr
);
111 /* VIPT non-aliasing cache */
112 if (cpu_isset(smp_processor_id(), vma
->vm_mm
->cpu_vm_mask
) &&
113 vma
->vm_flags
& VM_EXEC
) {
114 unsigned long addr
= (unsigned long)kaddr
;
115 /* only flushing the kernel mapping on non-aliasing VIPT */
116 __cpuc_coherent_kern_range(addr
, addr
+ len
);
120 #define flush_pfn_alias(pfn,vaddr) do { } while (0)
123 #define flush_pfn_alias(pfn,vaddr) do { } while (0)
124 #endif /* CONFIG_MMU */
126 void __flush_dcache_page(struct address_space
*mapping
, struct page
*page
)
129 * Writeback any data associated with the kernel mapping of this
130 * page. This ensures that data in the physical page is mutually
131 * coherent with the kernels mapping.
133 __cpuc_flush_dcache_page(page_address(page
));
136 * If this is a page cache page, and we have an aliasing VIPT cache,
137 * we only need to do one flush - which would be at the relevant
138 * userspace colour, which is congruent with page->index.
140 if (mapping
&& cache_is_vipt_aliasing())
141 flush_pfn_alias(page_to_pfn(page
),
142 page
->index
<< PAGE_CACHE_SHIFT
);
145 static void __flush_dcache_aliases(struct address_space
*mapping
, struct page
*page
)
147 struct mm_struct
*mm
= current
->active_mm
;
148 struct vm_area_struct
*mpnt
;
149 struct prio_tree_iter iter
;
153 * There are possible user space mappings of this page:
154 * - VIVT cache: we need to also write back and invalidate all user
155 * data in the current VM view associated with this page.
156 * - aliasing VIPT: we only need to find one mapping of this page.
158 pgoff
= page
->index
<< (PAGE_CACHE_SHIFT
- PAGE_SHIFT
);
160 flush_dcache_mmap_lock(mapping
);
161 vma_prio_tree_foreach(mpnt
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
162 unsigned long offset
;
165 * If this VMA is not in our MM, we can ignore it.
167 if (mpnt
->vm_mm
!= mm
)
169 if (!(mpnt
->vm_flags
& VM_MAYSHARE
))
171 offset
= (pgoff
- mpnt
->vm_pgoff
) << PAGE_SHIFT
;
172 flush_cache_page(mpnt
, mpnt
->vm_start
+ offset
, page_to_pfn(page
));
174 flush_dcache_mmap_unlock(mapping
);
178 * Ensure cache coherency between kernel mapping and userspace mapping
181 * We have three cases to consider:
182 * - VIPT non-aliasing cache: fully coherent so nothing required.
183 * - VIVT: fully aliasing, so we need to handle every alias in our
185 * - VIPT aliasing: need to handle one alias in our current VM view.
187 * If we need to handle aliasing:
188 * If the page only exists in the page cache and there are no user
189 * space mappings, we can be lazy and remember that we may have dirty
190 * kernel cache lines for later. Otherwise, we assume we have
193 * Note that we disable the lazy flush for SMP.
195 void flush_dcache_page(struct page
*page
)
197 struct address_space
*mapping
= page_mapping(page
);
200 if (mapping
&& !mapping_mapped(mapping
))
201 set_bit(PG_dcache_dirty
, &page
->flags
);
205 __flush_dcache_page(mapping
, page
);
206 if (mapping
&& cache_is_vivt())
207 __flush_dcache_aliases(mapping
, page
);
210 EXPORT_SYMBOL(flush_dcache_page
);