2 * linux/arch/arm/mm/fault-armv.c
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2002 Russell King
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
15 #include <linux/bitops.h>
16 #include <linux/vmalloc.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/gfp.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cachetype.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
29 static unsigned long shared_pte_mask
= L_PTE_MT_BUFFERABLE
;
31 #if __LINUX_ARM_ARCH__ < 6
33 * We take the easy way out of this problem - we make the
34 * PTE uncacheable. However, we leave the write buffer on.
36 * Note that the pte lock held when calling update_mmu_cache must also
37 * guard the pte (somewhere else in the same mm) that we modify here.
38 * Therefore those configurations which might call adjust_pte (those
39 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
41 static int do_adjust_pte(struct vm_area_struct
*vma
, unsigned long address
,
42 unsigned long pfn
, pte_t
*ptep
)
48 * If this page is present, it's actually being shared.
50 ret
= pte_present(entry
);
53 * If this page isn't present, or is already setup to
54 * fault (ie, is old), we can safely ignore any issues.
56 if (ret
&& (pte_val(entry
) & L_PTE_MT_MASK
) != shared_pte_mask
) {
57 flush_cache_page(vma
, address
, pfn
);
58 outer_flush_range((pfn
<< PAGE_SHIFT
),
59 (pfn
<< PAGE_SHIFT
) + PAGE_SIZE
);
60 pte_val(entry
) &= ~L_PTE_MT_MASK
;
61 pte_val(entry
) |= shared_pte_mask
;
62 set_pte_at(vma
->vm_mm
, address
, ptep
, entry
);
63 flush_tlb_page(vma
, address
);
69 static int adjust_pte(struct vm_area_struct
*vma
, unsigned long address
,
78 pgd
= pgd_offset(vma
->vm_mm
, address
);
79 if (pgd_none_or_clear_bad(pgd
))
82 pmd
= pmd_offset(pgd
, address
);
83 if (pmd_none_or_clear_bad(pmd
))
87 * This is called while another page table is mapped, so we
88 * must use the nested version. This also means we need to
89 * open-code the spin-locking.
91 ptl
= pte_lockptr(vma
->vm_mm
, pmd
);
92 pte
= pte_offset_map_nested(pmd
, address
);
95 ret
= do_adjust_pte(vma
, address
, pfn
, pte
);
98 pte_unmap_nested(pte
);
104 make_coherent(struct address_space
*mapping
, struct vm_area_struct
*vma
,
105 unsigned long addr
, pte_t
*ptep
, unsigned long pfn
)
107 struct mm_struct
*mm
= vma
->vm_mm
;
108 struct vm_area_struct
*mpnt
;
109 struct prio_tree_iter iter
;
110 unsigned long offset
;
114 pgoff
= vma
->vm_pgoff
+ ((addr
- vma
->vm_start
) >> PAGE_SHIFT
);
117 * If we have any shared mappings that are in the same mm
118 * space, then we need to handle them specially to maintain
121 flush_dcache_mmap_lock(mapping
);
122 vma_prio_tree_foreach(mpnt
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
124 * If this VMA is not in our MM, we can ignore it.
125 * Note that we intentionally mask out the VMA
126 * that we are fixing up.
128 if (mpnt
->vm_mm
!= mm
|| mpnt
== vma
)
130 if (!(mpnt
->vm_flags
& VM_MAYSHARE
))
132 offset
= (pgoff
- mpnt
->vm_pgoff
) << PAGE_SHIFT
;
133 aliases
+= adjust_pte(mpnt
, mpnt
->vm_start
+ offset
, pfn
);
135 flush_dcache_mmap_unlock(mapping
);
137 do_adjust_pte(vma
, addr
, pfn
, ptep
);
141 * Take care of architecture specific things when placing a new PTE into
142 * a page table, or changing an existing PTE. Basically, there are two
143 * things that we need to take care of:
145 * 1. If PG_dcache_clean is not set for the page, we need to ensure
146 * that any cache entries for the kernels virtual memory
147 * range are written back to the page.
148 * 2. If we have multiple shared mappings of the same space in
149 * an object, we need to deal with the cache aliasing issues.
151 * Note that the pte lock will be held.
153 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long addr
,
156 unsigned long pfn
= pte_pfn(*ptep
);
157 struct address_space
*mapping
;
164 * The zero page is never written to, so never has any dirty
165 * cache lines, and therefore never needs to be flushed.
167 page
= pfn_to_page(pfn
);
168 if (page
== ZERO_PAGE(0))
171 mapping
= page_mapping(page
);
172 if (!test_and_set_bit(PG_dcache_clean
, &page
->flags
))
173 __flush_dcache_page(mapping
, page
);
176 make_coherent(mapping
, vma
, addr
, ptep
, pfn
);
177 else if (vma
->vm_flags
& VM_EXEC
)
178 __flush_icache_all();
181 #endif /* __LINUX_ARM_ARCH__ < 6 */
184 * Check whether the write buffer has physical address aliasing
185 * issues. If it has, we need to avoid them for the case where
186 * we have several shared mappings of the same object in user
189 static int __init
check_writebuffer(unsigned long *p1
, unsigned long *p2
)
191 register unsigned long zero
= 0, one
= 1, val
;
205 void __init
check_writebuffer_bugs(void)
211 printk(KERN_INFO
"CPU: Testing write buffer coherency: ");
213 page
= alloc_page(GFP_KERNEL
);
215 unsigned long *p1
, *p2
;
216 pgprot_t prot
= __pgprot_modify(PAGE_KERNEL
,
217 L_PTE_MT_MASK
, L_PTE_MT_BUFFERABLE
);
219 p1
= vmap(&page
, 1, VM_IOREMAP
, prot
);
220 p2
= vmap(&page
, 1, VM_IOREMAP
, prot
);
223 v
= check_writebuffer(p1
, p2
);
224 reason
= "enabling work-around";
226 reason
= "unable to map memory\n";
233 reason
= "unable to grab page\n";
237 printk("failed, %s\n", reason
);
238 shared_pte_mask
= L_PTE_MT_UNCACHED
;