updated on Fri Jan 13 00:14:41 UTC 2012
[aur-mirror.git] / linux-fedora / linux-2.6-32bit-mmap-exec-randomization.patch
blobc253233233a2a340bd281ac93d59897d534e988f
1 Before:
2 Heap randomisation test (PIE) : 16 bits (guessed)
3 Main executable randomisation (PIE) : 8 bits (guessed)
5 after:
6 Heap randomisation test (PIE) : 19 bits (guessed)
7 Main executable randomisation (PIE) : 12 bits (guessed)
11 --- b/include/linux/sched.h
12 +++ b/include/linux/sched.h
13 @@ -397,6 +397,10 @@
14 extern unsigned long
15 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
16 unsigned long, unsigned long);
18 +extern unsigned long
19 +arch_get_unmapped_exec_area(struct file *, unsigned long, unsigned long,
20 + unsigned long, unsigned long);
21 extern unsigned long
22 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
23 unsigned long len, unsigned long pgoff,
24 --- b/mm/mmap.c
25 +++ b/mm/mmap.c
26 @@ -28,6 +28,7 @@
27 #include <linux/perf_event.h>
28 #include <linux/audit.h>
29 #include <linux/khugepaged.h>
30 +#include <linux/random.h>
32 #include <asm/uaccess.h>
33 #include <asm/cacheflush.h>
34 @@ -1000,7 +1001,8 @@
35 /* Obtain the address to map to. we verify (or select) it and ensure
36 * that it represents a valid section of the address space.
38 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
39 + addr = get_unmapped_area_prot(file, addr, len, pgoff, flags,
40 + prot & PROT_EXEC);
41 if (addr & ~PAGE_MASK)
42 return addr;
44 @@ -1552,8 +1554,8 @@
47 unsigned long
48 -get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
49 - unsigned long pgoff, unsigned long flags)
50 +get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
51 + unsigned long pgoff, unsigned long flags, int exec)
53 unsigned long (*get_area)(struct file *, unsigned long,
54 unsigned long, unsigned long, unsigned long);
55 @@ -1566,7 +1568,11 @@
56 if (len > TASK_SIZE)
57 return -ENOMEM;
59 - get_area = current->mm->get_unmapped_area;
60 + if (exec && current->mm->get_unmapped_exec_area)
61 + get_area = current->mm->get_unmapped_exec_area;
62 + else
63 + get_area = current->mm->get_unmapped_area;
65 if (file && file->f_op && file->f_op->get_unmapped_area)
66 get_area = file->f_op->get_unmapped_area;
67 addr = get_area(file, addr, len, pgoff, flags);
68 @@ -1580,8 +1586,83 @@
70 return arch_rebalance_pgtables(addr, len);
72 +EXPORT_SYMBOL(get_unmapped_area_prot);
74 +static bool should_randomize(void)
76 + return (current->flags & PF_RANDOMIZE) &&
77 + !(current->personality & ADDR_NO_RANDOMIZE);
80 +#define SHLIB_BASE 0x00110000
82 +unsigned long
83 +arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
84 + unsigned long len0, unsigned long pgoff, unsigned long flags)
86 + unsigned long addr = addr0, len = len0;
87 + struct mm_struct *mm = current->mm;
88 + struct vm_area_struct *vma;
89 + unsigned long tmp;
91 + if (len > TASK_SIZE)
92 + return -ENOMEM;
94 + if (flags & MAP_FIXED)
95 + return addr;
97 + if (!addr)
98 + addr = !should_randomize() ? SHLIB_BASE :
99 + randomize_range(SHLIB_BASE, 0x01000000, len);
101 + if (addr) {
102 + addr = PAGE_ALIGN(addr);
103 + vma = find_vma(mm, addr);
104 + if (TASK_SIZE - len >= addr &&
105 + (!vma || addr + len <= vma->vm_start))
106 + return addr;
109 + addr = SHLIB_BASE;
110 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
111 + /* At this point: (!vma || addr < vma->vm_end). */
112 + if (TASK_SIZE - len < addr)
113 + return -ENOMEM;
115 + if (!vma || addr + len <= vma->vm_start) {
116 + /*
117 + * Must not let a PROT_EXEC mapping get into the
118 + * brk area:
119 + */
120 + if (addr + len > mm->brk)
121 + goto failed;
123 + /*
124 + * Up until the brk area we randomize addresses
125 + * as much as possible:
126 + */
127 + if (addr >= 0x01000000 && should_randomize()) {
128 + tmp = randomize_range(0x01000000,
129 + PAGE_ALIGN(max(mm->start_brk,
130 + (unsigned long)0x08000000)), len);
131 + vma = find_vma(mm, tmp);
132 + if (TASK_SIZE - len >= tmp &&
133 + (!vma || tmp + len <= vma->vm_start))
134 + return tmp;
136 + /*
137 + * Ok, randomization didnt work out - return
138 + * the result of the linear search:
139 + */
140 + return addr;
142 + addr = vma->vm_end;
145 +failed:
146 + return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
149 -EXPORT_SYMBOL(get_unmapped_area);
151 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
152 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
153 --- a/arch/x86/mm/mmap.c
154 +++ b/arch/x86/mm/mmap.c
155 @@ -124,13 +124,19 @@ static unsigned long mmap_legacy_base(void)
157 void arch_pick_mmap_layout(struct mm_struct *mm)
159 if (mmap_is_legacy()) {
160 mm->mmap_base = mmap_legacy_base();
161 mm->get_unmapped_area = arch_get_unmapped_area;
162 mm->unmap_area = arch_unmap_area;
163 } else {
164 mm->mmap_base = mmap_base();
165 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
166 +#ifdef CONFIG_X86_32
167 + if (!(current->personality & READ_IMPLIES_EXEC)
168 + && !(__supported_pte_mask & _PAGE_NX)
169 + && mmap_is_ia32())
170 + mm->get_unmapped_exec_area = arch_get_unmapped_exec_area;
171 +#endif
172 mm->unmap_area = arch_unmap_area_topdown;
175 --- a/arch/x86/vdso/vdso32-setup.c
176 +++ b/arch/x86/vdso/vdso32-setup.c
177 @@ -331,7 +331,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
178 if (compat)
179 addr = VDSO_HIGH_BASE;
180 else {
181 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
182 + addr = get_unmapped_area_prot(NULL, 0, PAGE_SIZE, 0, 0, 1);
183 if (IS_ERR_VALUE(addr)) {
184 ret = addr;
185 goto up_fail;
186 --- a/include/linux/mm.h
187 +++ b/include/linux/mm.h
188 @@ -1263,7 +1263,13 @@ extern int install_special_mapping(struct mm_struct *mm,
189 unsigned long addr, unsigned long len,
190 unsigned long flags, struct page **pages);
192 -extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
193 +extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int);
195 +static inline unsigned long get_unmapped_area(struct file *file, unsigned long addr,
196 + unsigned long len, unsigned long pgoff, unsigned long flags)
198 + return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
201 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
202 unsigned long len, unsigned long prot,
203 --- a/include/linux/mm_types.h
204 +++ b/include/linux/mm_types.h
205 @@ -227,6 +227,9 @@ struct mm_struct {
206 unsigned long (*get_unmapped_area) (struct file *filp,
207 unsigned long addr, unsigned long len,
208 unsigned long pgoff, unsigned long flags);
209 + unsigned long (*get_unmapped_exec_area) (struct file *filp,
210 + unsigned long addr, unsigned long len,
211 + unsigned long pgoff, unsigned long flags);
212 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
213 #endif
214 unsigned long mmap_base; /* base of mmap area */
215 --- a/mm/mremap.c
216 +++ b/mm/mremap.c
217 @@ -487,10 +487,10 @@ unsigned long do_mremap(unsigned long addr,
218 if (vma->vm_flags & VM_MAYSHARE)
219 map_flags |= MAP_SHARED;
221 - new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
222 + new_addr = get_unmapped_area_prot(vma->vm_file, 0, new_len,
223 vma->vm_pgoff +
224 ((addr - vma->vm_start) >> PAGE_SHIFT),
225 - map_flags);
226 + map_flags, vma->vm_flags & VM_EXEC);
227 if (new_addr & ~PAGE_MASK) {
228 ret = new_addr;
229 goto out;
230 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
231 index 57d1868..29c0c35 100644
232 --- a/arch/x86/kernel/process.c
233 +++ b/arch/x86/kernel/process.c
234 @@ -669,6 +669,16 @@ unsigned long arch_align_stack(unsigned long sp)
235 unsigned long arch_randomize_brk(struct mm_struct *mm)
237 unsigned long range_end = mm->brk + 0x02000000;
238 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
239 + unsigned long bump = 0;
240 +#ifdef CONFIG_X86_32
241 + /* in the case of NX emulation, shove the brk segment way out of the
242 + way of the exec randomization area, since it can collide with
243 + future allocations if not. */
244 + if ( (mm->get_unmapped_exec_area == arch_get_unmapped_exec_area) &&
245 + (mm->brk < 0x08000000) ) {
246 + bump = (TASK_SIZE/6);
248 +#endif
249 + return bump + (randomize_range(mm->brk, range_end, 0) ? : mm->brk);