Merge branch 'fixes' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur...
[linux-2.6/kvm.git] / arch / x86 / vdso / vma.c
blob316fbca3490e98afe758d7a4e535eaecd4482a8f
1 /*
2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
5 */
6 #include <linux/mm.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
16 #include <asm/vdso.h>
17 #include <asm/page.h>
19 unsigned int __read_mostly vdso_enabled = 1;
21 extern char vdso_start[], vdso_end[];
22 extern unsigned short vdso_sync_cpuid;
24 extern struct page *vdso_pages[];
25 static unsigned vdso_size;
27 static void __init patch_vdso(void *vdso, size_t len)
29 Elf64_Ehdr *hdr = vdso;
30 Elf64_Shdr *sechdrs, *alt_sec = 0;
31 char *secstrings;
32 void *alt_data;
33 int i;
35 BUG_ON(len < sizeof(Elf64_Ehdr));
36 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
38 sechdrs = (void *)hdr + hdr->e_shoff;
39 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
41 for (i = 1; i < hdr->e_shnum; i++) {
42 Elf64_Shdr *shdr = &sechdrs[i];
43 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
44 alt_sec = shdr;
45 goto found;
49 /* If we get here, it's probably a bug. */
50 pr_warning("patch_vdso: .altinstructions not found\n");
51 return; /* nothing to patch */
53 found:
54 alt_data = (void *)hdr + alt_sec->sh_offset;
55 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
58 static int __init init_vdso(void)
60 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
61 int i;
63 patch_vdso(vdso_start, vdso_end - vdso_start);
65 vdso_size = npages << PAGE_SHIFT;
66 for (i = 0; i < npages; i++)
67 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
69 return 0;
71 subsys_initcall(init_vdso);
73 struct linux_binprm;
75 /* Put the vdso above the (randomized) stack with another randomized offset.
76 This way there is no hole in the middle of address space.
77 To save memory make sure it is still in the same PTE as the stack top.
78 This doesn't give that many random bits */
79 static unsigned long vdso_addr(unsigned long start, unsigned len)
81 unsigned long addr, end;
82 unsigned offset;
83 end = (start + PMD_SIZE - 1) & PMD_MASK;
84 if (end >= TASK_SIZE_MAX)
85 end = TASK_SIZE_MAX;
86 end -= len;
87 /* This loses some more bits than a modulo, but is cheaper */
88 offset = get_random_int() & (PTRS_PER_PTE - 1);
89 addr = start + (offset << PAGE_SHIFT);
90 if (addr >= end)
91 addr = end;
92 return addr;
95 /* Setup a VMA at program startup for the vsyscall page.
96 Not called for compat tasks */
97 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
99 struct mm_struct *mm = current->mm;
100 unsigned long addr;
101 int ret;
103 if (!vdso_enabled)
104 return 0;
106 down_write(&mm->mmap_sem);
107 addr = vdso_addr(mm->start_stack, vdso_size);
108 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
109 if (IS_ERR_VALUE(addr)) {
110 ret = addr;
111 goto up_fail;
114 current->mm->context.vdso = (void *)addr;
116 ret = install_special_mapping(mm, addr, vdso_size,
117 VM_READ|VM_EXEC|
118 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
119 VM_ALWAYSDUMP,
120 vdso_pages);
121 if (ret) {
122 current->mm->context.vdso = NULL;
123 goto up_fail;
126 up_fail:
127 up_write(&mm->mmap_sem);
128 return ret;
131 static __init int vdso_setup(char *s)
133 vdso_enabled = simple_strtoul(s, NULL, 0);
134 return 0;
136 __setup("vdso=", vdso_setup);