2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2011 Wind River Systems,
7 * written by Ralf Baechle <ralf@linux-mips.org>
9 #include <linux/compiler.h>
10 #include <linux/errno.h>
12 #include <linux/mman.h>
13 #include <linux/module.h>
14 #include <linux/personality.h>
15 #include <linux/random.h>
16 #include <linux/sched.h>
18 unsigned long shm_align_mask
= PAGE_SIZE
- 1; /* Sane caches */
19 EXPORT_SYMBOL(shm_align_mask
);
21 /* gap between mmap and stack */
22 #define MIN_GAP (128*1024*1024UL)
23 #define MAX_GAP ((TASK_SIZE)/6*5)
25 static int mmap_is_legacy(void)
27 if (current
->personality
& ADDR_COMPAT_LAYOUT
)
30 if (rlimit(RLIMIT_STACK
) == RLIM_INFINITY
)
33 return sysctl_legacy_va_layout
;
36 static unsigned long mmap_base(unsigned long rnd
)
38 unsigned long gap
= rlimit(RLIMIT_STACK
);
42 else if (gap
> MAX_GAP
)
45 return PAGE_ALIGN(TASK_SIZE
- gap
- rnd
);
48 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr
,
51 unsigned long base
= addr
& ~shm_align_mask
;
52 unsigned long off
= (pgoff
<< PAGE_SHIFT
) & shm_align_mask
;
54 if (base
+ off
<= addr
)
60 #define COLOUR_ALIGN(addr, pgoff) \
61 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
62 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
64 enum mmap_allocation_direction
{UP
, DOWN
};
66 static unsigned long arch_get_unmapped_area_common(struct file
*filp
,
67 unsigned long addr0
, unsigned long len
, unsigned long pgoff
,
68 unsigned long flags
, enum mmap_allocation_direction dir
)
70 struct mm_struct
*mm
= current
->mm
;
71 struct vm_area_struct
*vma
;
72 unsigned long addr
= addr0
;
75 if (unlikely(len
> TASK_SIZE
))
78 if (flags
& MAP_FIXED
) {
79 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
80 if (TASK_SIZE
- len
< addr
)
84 * We do not accept a shared mapping if it would violate
85 * cache aliasing constraints.
87 if ((flags
& MAP_SHARED
) &&
88 ((addr
- (pgoff
<< PAGE_SHIFT
)) & shm_align_mask
))
94 if (filp
|| (flags
& MAP_SHARED
))
97 /* requesting a specific address */
100 addr
= COLOUR_ALIGN(addr
, pgoff
);
102 addr
= PAGE_ALIGN(addr
);
104 vma
= find_vma(mm
, addr
);
105 if (TASK_SIZE
- len
>= addr
&&
106 (!vma
|| addr
+ len
<= vma
->vm_start
))
111 addr
= mm
->mmap_base
;
113 addr
= COLOUR_ALIGN(addr
, pgoff
);
115 addr
= PAGE_ALIGN(addr
);
117 for (vma
= find_vma(current
->mm
, addr
); ; vma
= vma
->vm_next
) {
118 /* At this point: (!vma || addr < vma->vm_end). */
119 if (TASK_SIZE
- len
< addr
)
121 if (!vma
|| addr
+ len
<= vma
->vm_start
)
125 addr
= COLOUR_ALIGN(addr
, pgoff
);
128 /* check if free_area_cache is useful for us */
129 if (len
<= mm
->cached_hole_size
) {
130 mm
->cached_hole_size
= 0;
131 mm
->free_area_cache
= mm
->mmap_base
;
135 * either no address requested, or the mapping can't fit into
136 * the requested address hole
138 addr
= mm
->free_area_cache
;
139 if (do_color_align
) {
141 COLOUR_ALIGN_DOWN(addr
- len
, pgoff
);
145 /* make sure it can fit in the remaining address space */
146 if (likely(addr
> len
)) {
147 vma
= find_vma(mm
, addr
- len
);
148 if (!vma
|| addr
<= vma
->vm_start
) {
149 /* cache the address as a hint for next time */
150 return mm
->free_area_cache
= addr
- len
;
154 if (unlikely(mm
->mmap_base
< len
))
157 addr
= mm
->mmap_base
- len
;
159 addr
= COLOUR_ALIGN_DOWN(addr
, pgoff
);
163 * Lookup failure means no vma is above this address,
164 * else if new region fits below vma->vm_start,
165 * return with success:
167 vma
= find_vma(mm
, addr
);
168 if (likely(!vma
|| addr
+ len
<= vma
->vm_start
)) {
169 /* cache the address as a hint for next time */
170 return mm
->free_area_cache
= addr
;
173 /* remember the largest hole we saw so far */
174 if (addr
+ mm
->cached_hole_size
< vma
->vm_start
)
175 mm
->cached_hole_size
= vma
->vm_start
- addr
;
177 /* try just below the current vma->vm_start */
178 addr
= vma
->vm_start
- len
;
180 addr
= COLOUR_ALIGN_DOWN(addr
, pgoff
);
181 } while (likely(len
< vma
->vm_start
));
185 * A failed mmap() very likely causes application failure,
186 * so fall back to the bottom-up function here. This scenario
187 * can happen with large stack limits and large mmap()
190 mm
->cached_hole_size
= ~0UL;
191 mm
->free_area_cache
= TASK_UNMAPPED_BASE
;
192 addr
= arch_get_unmapped_area(filp
, addr0
, len
, pgoff
, flags
);
194 * Restore the topdown base:
196 mm
->free_area_cache
= mm
->mmap_base
;
197 mm
->cached_hole_size
= ~0UL;
203 unsigned long arch_get_unmapped_area(struct file
*filp
, unsigned long addr0
,
204 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
206 return arch_get_unmapped_area_common(filp
,
207 addr0
, len
, pgoff
, flags
, UP
);
211 * There is no need to export this but sched.h declares the function as
212 * extern so making it static here results in an error.
214 unsigned long arch_get_unmapped_area_topdown(struct file
*filp
,
215 unsigned long addr0
, unsigned long len
, unsigned long pgoff
,
218 return arch_get_unmapped_area_common(filp
,
219 addr0
, len
, pgoff
, flags
, DOWN
);
222 void arch_pick_mmap_layout(struct mm_struct
*mm
)
224 unsigned long random_factor
= 0UL;
226 if (current
->flags
& PF_RANDOMIZE
) {
227 random_factor
= get_random_int();
228 random_factor
= random_factor
<< PAGE_SHIFT
;
229 if (TASK_IS_32BIT_ADDR
)
230 random_factor
&= 0xfffffful
;
232 random_factor
&= 0xffffffful
;
235 if (mmap_is_legacy()) {
236 mm
->mmap_base
= TASK_UNMAPPED_BASE
+ random_factor
;
237 mm
->get_unmapped_area
= arch_get_unmapped_area
;
238 mm
->unmap_area
= arch_unmap_area
;
240 mm
->mmap_base
= mmap_base(random_factor
);
241 mm
->get_unmapped_area
= arch_get_unmapped_area_topdown
;
242 mm
->unmap_area
= arch_unmap_area_topdown
;
246 static inline unsigned long brk_rnd(void)
248 unsigned long rnd
= get_random_int();
250 rnd
= rnd
<< PAGE_SHIFT
;
251 /* 8MB for 32bit, 256MB for 64bit */
252 if (TASK_IS_32BIT_ADDR
)
253 rnd
= rnd
& 0x7ffffful
;
255 rnd
= rnd
& 0xffffffful
;
260 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
262 unsigned long base
= mm
->brk
;
265 ret
= PAGE_ALIGN(base
+ brk_rnd());