[ARM] Always mark ARMv6 PTWs outer cacheable
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / arm / mm / mmap.c
blobb0b5f46940705431a468efb6681a7636e40e988e
1 /*
2 * linux/arch/arm/mm/mmap.c
3 */
4 #include <linux/fs.h>
5 #include <linux/mm.h>
6 #include <linux/mman.h>
7 #include <linux/shm.h>
9 #include <asm/system.h>
11 #define COLOUR_ALIGN(addr,pgoff) \
12 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
13 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
16 * We need to ensure that shared mappings are correctly aligned to
17 * avoid aliasing issues with VIPT caches. We need to ensure that
18 * a specific page of an object is always mapped at a multiple of
19 * SHMLBA bytes.
21 * We unconditionally provide this function for all cases, however
22 * in the VIVT case, we optimise out the alignment rules.
24 unsigned long
25 arch_get_unmapped_area(struct file *filp, unsigned long addr,
26 unsigned long len, unsigned long pgoff, unsigned long flags)
28 struct mm_struct *mm = current->mm;
29 struct vm_area_struct *vma;
30 unsigned long start_addr;
31 #ifdef CONFIG_CPU_V6
32 unsigned int cache_type;
33 int do_align = 0, aliasing = 0;
36 * We only need to do colour alignment if either the I or D
37 * caches alias. This is indicated by bits 9 and 21 of the
38 * cache type register.
40 cache_type = read_cpuid(CPUID_CACHETYPE);
41 if (cache_type != read_cpuid(CPUID_ID)) {
42 aliasing = (cache_type | cache_type >> 12) & (1 << 11);
43 if (aliasing)
44 do_align = filp || flags & MAP_SHARED;
46 #else
47 #define do_align 0
48 #define aliasing 0
49 #endif
52 * We should enforce the MAP_FIXED case. However, currently
53 * the generic kernel code doesn't allow us to handle this.
55 if (flags & MAP_FIXED) {
56 if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
57 return -EINVAL;
58 return addr;
61 if (len > TASK_SIZE)
62 return -ENOMEM;
64 if (addr) {
65 if (do_align)
66 addr = COLOUR_ALIGN(addr, pgoff);
67 else
68 addr = PAGE_ALIGN(addr);
70 vma = find_vma(mm, addr);
71 if (TASK_SIZE - len >= addr &&
72 (!vma || addr + len <= vma->vm_start))
73 return addr;
75 if (len > mm->cached_hole_size) {
76 start_addr = addr = mm->free_area_cache;
77 } else {
78 start_addr = addr = TASK_UNMAPPED_BASE;
79 mm->cached_hole_size = 0;
82 full_search:
83 if (do_align)
84 addr = COLOUR_ALIGN(addr, pgoff);
85 else
86 addr = PAGE_ALIGN(addr);
88 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
89 /* At this point: (!vma || addr < vma->vm_end). */
90 if (TASK_SIZE - len < addr) {
92 * Start a new search - just in case we missed
93 * some holes.
95 if (start_addr != TASK_UNMAPPED_BASE) {
96 start_addr = addr = TASK_UNMAPPED_BASE;
97 mm->cached_hole_size = 0;
98 goto full_search;
100 return -ENOMEM;
102 if (!vma || addr + len <= vma->vm_start) {
104 * Remember the place where we stopped the search:
106 mm->free_area_cache = addr + len;
107 return addr;
109 if (addr + mm->cached_hole_size < vma->vm_start)
110 mm->cached_hole_size = vma->vm_start - addr;
111 addr = vma->vm_end;
112 if (do_align)
113 addr = COLOUR_ALIGN(addr, pgoff);
119 * You really shouldn't be using read() or write() on /dev/mem. This
120 * might go away in the future.
122 int valid_phys_addr_range(unsigned long addr, size_t size)
124 if (addr + size > __pa(high_memory))
125 return 0;
127 return 1;
131 * We don't use supersection mappings for mmap() on /dev/mem, which
132 * means that we can't map the memory area above the 4G barrier into
133 * userspace.
135 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
137 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);