2 * linux/arch/arm/mm/mmap.c
6 #include <linux/mman.h>
8 #include <linux/sched.h>
10 #include <linux/personality.h>
11 #include <linux/random.h>
12 #include <asm/cachetype.h>
14 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr
,
17 unsigned long base
= addr
& ~(SHMLBA
-1);
18 unsigned long off
= (pgoff
<< PAGE_SHIFT
) & (SHMLBA
-1);
20 if (base
+ off
<= addr
)
26 #define COLOUR_ALIGN(addr,pgoff) \
27 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
28 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
30 /* gap between mmap and stack */
31 #define MIN_GAP (128*1024*1024UL)
32 #define MAX_GAP ((TASK_SIZE)/6*5)
34 static int mmap_is_legacy(void)
36 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
39 if (rlimit(RLIMIT_STACK
) == RLIM_INFINITY
)
42 return sysctl_legacy_va_layout
;
45 static unsigned long mmap_base(unsigned long rnd
)
47 unsigned long gap
= rlimit(RLIMIT_STACK
);
51 else if (gap
> MAX_GAP
)
54 return PAGE_ALIGN(TASK_SIZE
- gap
- rnd
);
58 * We need to ensure that shared mappings are correctly aligned to
59 * avoid aliasing issues with VIPT caches. We need to ensure that
60 * a specific page of an object is always mapped at a multiple of
63 * We unconditionally provide this function for all cases, however
64 * in the VIVT case, we optimise out the alignment rules.
67 arch_get_unmapped_area(struct file
*filp
, unsigned long addr
,
68 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
70 struct mm_struct
*mm
= current
->mm
;
71 struct vm_area_struct
*vma
;
72 unsigned long start_addr
;
74 int aliasing
= cache_is_vipt_aliasing();
77 * We only need to do colour alignment if either the I or D
81 do_align
= filp
|| (flags
& MAP_SHARED
);
84 * We enforce the MAP_FIXED case.
86 if (flags
& MAP_FIXED
) {
87 if (aliasing
&& flags
& MAP_SHARED
&&
88 (addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1))
98 addr
= COLOUR_ALIGN(addr
, pgoff
);
100 addr
= PAGE_ALIGN(addr
);
102 vma
= find_vma(mm
, addr
);
103 if (TASK_SIZE
- len
>= addr
&&
104 (!vma
|| addr
+ len
<= vma
->vm_start
))
107 if (len
> mm
->cached_hole_size
) {
108 start_addr
= addr
= mm
->free_area_cache
;
110 start_addr
= addr
= mm
->mmap_base
;
111 mm
->cached_hole_size
= 0;
116 addr
= COLOUR_ALIGN(addr
, pgoff
);
118 addr
= PAGE_ALIGN(addr
);
120 for (vma
= find_vma(mm
, addr
); ; vma
= vma
->vm_next
) {
121 /* At this point: (!vma || addr < vma->vm_end). */
122 if (TASK_SIZE
- len
< addr
) {
124 * Start a new search - just in case we missed
127 if (start_addr
!= TASK_UNMAPPED_BASE
) {
128 start_addr
= addr
= TASK_UNMAPPED_BASE
;
129 mm
->cached_hole_size
= 0;
134 if (!vma
|| addr
+ len
<= vma
->vm_start
) {
136 * Remember the place where we stopped the search:
138 mm
->free_area_cache
= addr
+ len
;
141 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
142 mm
->cached_hole_size
= vma
->vm_start
- addr
;
145 addr
= COLOUR_ALIGN(addr
, pgoff
);
150 arch_get_unmapped_area_topdown(struct file
*filp
, const unsigned long addr0
,
151 const unsigned long len
, const unsigned long pgoff
,
152 const unsigned long flags
)
154 struct vm_area_struct
*vma
;
155 struct mm_struct
*mm
= current
->mm
;
156 unsigned long addr
= addr0
;
158 int aliasing
= cache_is_vipt_aliasing();
161 * We only need to do colour alignment if either the I or D
165 do_align
= filp
|| (flags
& MAP_SHARED
);
167 /* requested length too big for entire address space */
171 if (flags
& MAP_FIXED
) {
172 if (aliasing
&& flags
& MAP_SHARED
&&
173 (addr
- (pgoff
<< PAGE_SHIFT
)) & (SHMLBA
- 1))
178 /* requesting a specific address */
181 addr
= COLOUR_ALIGN(addr
, pgoff
);
183 addr
= PAGE_ALIGN(addr
);
184 vma
= find_vma(mm
, addr
);
185 if (TASK_SIZE
- len
>= addr
&&
186 (!vma
|| addr
+ len
<= vma
->vm_start
))
190 /* check if free_area_cache is useful for us */
191 if (len
<= mm
->cached_hole_size
) {
192 mm
->cached_hole_size
= 0;
193 mm
->free_area_cache
= mm
->mmap_base
;
196 /* either no address requested or can't fit in requested address hole */
197 addr
= mm
->free_area_cache
;
199 unsigned long base
= COLOUR_ALIGN_DOWN(addr
- len
, pgoff
);
203 /* make sure it can fit in the remaining address space */
205 vma
= find_vma(mm
, addr
-len
);
206 if (!vma
|| addr
<= vma
->vm_start
)
207 /* remember the address as a hint for next time */
208 return (mm
->free_area_cache
= addr
-len
);
211 if (mm
->mmap_base
< len
)
214 addr
= mm
->mmap_base
- len
;
216 addr
= COLOUR_ALIGN_DOWN(addr
, pgoff
);
220 * Lookup failure means no vma is above this address,
221 * else if new region fits below vma->vm_start,
222 * return with success:
224 vma
= find_vma(mm
, addr
);
225 if (!vma
|| addr
+len
<= vma
->vm_start
)
226 /* remember the address as a hint for next time */
227 return (mm
->free_area_cache
= addr
);
229 /* remember the largest hole we saw so far */
230 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
231 mm
->cached_hole_size
= vma
->vm_start
- addr
;
233 /* try just below the current vma->vm_start */
234 addr
= vma
->vm_start
- len
;
236 addr
= COLOUR_ALIGN_DOWN(addr
, pgoff
);
237 } while (len
< vma
->vm_start
);
241 * A failed mmap() very likely causes application failure,
242 * so fall back to the bottom-up function here. This scenario
243 * can happen with large stack limits and large mmap()
246 mm
->cached_hole_size
= ~0UL;
247 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
248 addr
= arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
);
250 * Restore the topdown base:
252 mm
->free_area_cache
= mm
->mmap_base
;
253 mm
->cached_hole_size
= ~0UL;
258 void arch_pick_mmap_layout(struct mm_struct
*mm
)
260 unsigned long random_factor
= 0UL;
262 /* 8 bits of randomness in 20 address space bits */
263 if ((current
->flags
& PF_RANDOMIZE
) &&
264 !(current
->personality
& ADDR_NO_RANDOMIZE
))
265 random_factor
= (get_random_int() % (1 << 8)) << PAGE_SHIFT
;
267 if (mmap_is_legacy()) {
268 mm
->mmap_base
= TASK_UNMAPPED_BASE
+ random_factor
;
269 mm
->get_unmapped_area
= arch_get_unmapped_area
;
270 mm
->unmap_area
= arch_unmap_area
;
272 mm
->mmap_base
= mmap_base(random_factor
);
273 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
274 mm
->unmap_area
= arch_unmap_area_topdown
;
279 * You really shouldn't be using read() or write() on /dev/mem. This
280 * might go away in the future.
282 int valid_phys_addr_range(unsigned long addr
, size_t size
)
284 if (addr
< PHYS_OFFSET
)
286 if (addr
+ size
> __pa(high_memory
- 1) + 1)
293 * We don't use supersection mappings for mmap() on /dev/mem, which
294 * means that we can't map the memory area above the 4G barrier into
297 int valid_mmap_phys_addr_range(unsigned long pfn
, size_t size
)
299 return !(pfn
+ (size
>> PAGE_SHIFT
) > 0x00100000);
302 #ifdef CONFIG_STRICT_DEVMEM
304 #include <linux/ioport.h>
307 * devmem_is_allowed() checks to see if /dev/mem access to a certain
308 * address is valid. The argument is a physical page number.
309 * We mimic x86 here by disallowing access to system RAM as well as
310 * device-exclusive MMIO regions. This effectively disable read()/write()
313 int devmem_is_allowed(unsigned long pfn
)
315 if (iomem_is_exclusive(pfn
<< PAGE_SHIFT
))
317 if (!page_is_ram(pfn
))