Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzi...
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / s390 / kernel / vdso.c
blob89b2e7f1b7a9ca85c207880b54f7c220ac428728
1 /*
2 * vdso setup for s390
4 * Copyright IBM Corp. 2008
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/errno.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/smp.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/slab.h>
21 #include <linux/user.h>
22 #include <linux/elf.h>
23 #include <linux/security.h>
24 #include <linux/bootmem.h>
26 #include <asm/pgtable.h>
27 #include <asm/system.h>
28 #include <asm/processor.h>
29 #include <asm/mmu.h>
30 #include <asm/mmu_context.h>
31 #include <asm/sections.h>
32 #include <asm/vdso.h>
34 #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
35 extern char vdso32_start, vdso32_end;
36 static void *vdso32_kbase = &vdso32_start;
37 static unsigned int vdso32_pages;
38 static struct page **vdso32_pagelist;
39 #endif
41 #ifdef CONFIG_64BIT
42 extern char vdso64_start, vdso64_end;
43 static void *vdso64_kbase = &vdso64_start;
44 static unsigned int vdso64_pages;
45 static struct page **vdso64_pagelist;
46 #endif /* CONFIG_64BIT */
49 * Should the kernel map a VDSO page into processes and pass its
50 * address down to glibc upon exec()?
52 unsigned int __read_mostly vdso_enabled = 1;
54 static int __init vdso_setup(char *s)
56 vdso_enabled = simple_strtoul(s, NULL, 0);
57 return 1;
59 __setup("vdso=", vdso_setup);
62 * The vdso data page
64 static union {
65 struct vdso_data data;
66 u8 page[PAGE_SIZE];
67 } vdso_data_store __attribute__((__section__(".data.page_aligned")));
68 struct vdso_data *vdso_data = &vdso_data_store.data;
71 * Setup vdso data page.
73 static void vdso_init_data(struct vdso_data *vd)
75 unsigned int facility_list;
77 facility_list = stfl();
78 vd->ectg_available = switch_amode && (facility_list & 1);
81 #ifdef CONFIG_64BIT
83 * Setup per cpu vdso data page.
85 static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
90 * Allocate/free per cpu vdso data.
92 #ifdef CONFIG_64BIT
93 #define SEGMENT_ORDER 2
94 #else
95 #define SEGMENT_ORDER 1
96 #endif
98 int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
100 unsigned long segment_table, page_table, page_frame;
101 u32 *psal, *aste;
102 int i;
104 lowcore->vdso_per_cpu_data = __LC_PASTE;
106 if (!switch_amode || !vdso_enabled)
107 return 0;
109 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
110 page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
111 page_frame = get_zeroed_page(GFP_KERNEL);
112 if (!segment_table || !page_table || !page_frame)
113 goto out;
115 clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
116 PAGE_SIZE << SEGMENT_ORDER);
117 clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY,
118 256*sizeof(unsigned long));
120 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
121 *(unsigned long *) page_table = _PAGE_RO + page_frame;
123 psal = (u32 *) (page_table + 256*sizeof(unsigned long));
124 aste = psal + 32;
126 for (i = 4; i < 32; i += 4)
127 psal[i] = 0x80000000;
129 lowcore->paste[4] = (u32)(addr_t) psal;
130 psal[0] = 0x20000000;
131 psal[2] = (u32)(addr_t) aste;
132 *(unsigned long *) (aste + 2) = segment_table +
133 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
134 aste[4] = (u32)(addr_t) psal;
135 lowcore->vdso_per_cpu_data = page_frame;
137 vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame);
138 return 0;
140 out:
141 free_page(page_frame);
142 free_page(page_table);
143 free_pages(segment_table, SEGMENT_ORDER);
144 return -ENOMEM;
147 void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
149 unsigned long segment_table, page_table, page_frame;
150 u32 *psal, *aste;
152 if (!switch_amode || !vdso_enabled)
153 return;
155 psal = (u32 *)(addr_t) lowcore->paste[4];
156 aste = (u32 *)(addr_t) psal[2];
157 segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
158 page_table = *(unsigned long *) segment_table;
159 page_frame = *(unsigned long *) page_table;
161 free_page(page_frame);
162 free_page(page_table);
163 free_pages(segment_table, SEGMENT_ORDER);
166 static void __vdso_init_cr5(void *dummy)
168 unsigned long cr5;
170 cr5 = offsetof(struct _lowcore, paste);
171 __ctl_load(cr5, 5, 5);
174 static void vdso_init_cr5(void)
176 if (switch_amode && vdso_enabled)
177 on_each_cpu(__vdso_init_cr5, NULL, 1);
179 #endif /* CONFIG_64BIT */
182 * This is called from binfmt_elf, we create the special vma for the
183 * vDSO and insert it into the mm struct tree
185 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
187 struct mm_struct *mm = current->mm;
188 struct page **vdso_pagelist;
189 unsigned long vdso_pages;
190 unsigned long vdso_base;
191 int rc;
193 if (!vdso_enabled)
194 return 0;
196 * Only map the vdso for dynamically linked elf binaries.
198 if (!uses_interp)
199 return 0;
201 vdso_base = mm->mmap_base;
202 #ifdef CONFIG_64BIT
203 vdso_pagelist = vdso64_pagelist;
204 vdso_pages = vdso64_pages;
205 #ifdef CONFIG_COMPAT
206 if (test_thread_flag(TIF_31BIT)) {
207 vdso_pagelist = vdso32_pagelist;
208 vdso_pages = vdso32_pages;
210 #endif
211 #else
212 vdso_pagelist = vdso32_pagelist;
213 vdso_pages = vdso32_pages;
214 #endif
217 * vDSO has a problem and was disabled, just don't "enable" it for
218 * the process
220 if (vdso_pages == 0)
221 return 0;
223 current->mm->context.vdso_base = 0;
226 * pick a base address for the vDSO in process space. We try to put
227 * it at vdso_base which is the "natural" base for it, but we might
228 * fail and end up putting it elsewhere.
230 down_write(&mm->mmap_sem);
231 vdso_base = get_unmapped_area(NULL, vdso_base,
232 vdso_pages << PAGE_SHIFT, 0, 0);
233 if (IS_ERR_VALUE(vdso_base)) {
234 rc = vdso_base;
235 goto out_up;
239 * our vma flags don't have VM_WRITE so by default, the process
240 * isn't allowed to write those pages.
241 * gdb can break that with ptrace interface, and thus trigger COW
242 * on those pages but it's then your responsibility to never do that
243 * on the "data" page of the vDSO or you'll stop getting kernel
244 * updates and your nice userland gettimeofday will be totally dead.
245 * It's fine to use that for setting breakpoints in the vDSO code
246 * pages though
248 * Make sure the vDSO gets into every core dump.
249 * Dumping its contents makes post-mortem fully interpretable later
250 * without matching up the same kernel and hardware config to see
251 * what PC values meant.
253 rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
254 VM_READ|VM_EXEC|
255 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
256 VM_ALWAYSDUMP,
257 vdso_pagelist);
258 if (rc)
259 goto out_up;
261 /* Put vDSO base into mm struct */
262 current->mm->context.vdso_base = vdso_base;
264 up_write(&mm->mmap_sem);
265 return 0;
267 out_up:
268 up_write(&mm->mmap_sem);
269 return rc;
272 const char *arch_vma_name(struct vm_area_struct *vma)
274 if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
275 return "[vdso]";
276 return NULL;
279 static int __init vdso_init(void)
281 int i;
283 if (!vdso_enabled)
284 return 0;
285 vdso_init_data(vdso_data);
286 #if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
287 /* Calculate the size of the 32 bit vDSO */
288 vdso32_pages = ((&vdso32_end - &vdso32_start
289 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
291 /* Make sure pages are in the correct state */
292 vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
293 GFP_KERNEL);
294 BUG_ON(vdso32_pagelist == NULL);
295 for (i = 0; i < vdso32_pages - 1; i++) {
296 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
297 ClearPageReserved(pg);
298 get_page(pg);
299 vdso32_pagelist[i] = pg;
301 vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
302 vdso32_pagelist[vdso32_pages] = NULL;
303 #endif
305 #ifdef CONFIG_64BIT
306 /* Calculate the size of the 64 bit vDSO */
307 vdso64_pages = ((&vdso64_end - &vdso64_start
308 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
310 /* Make sure pages are in the correct state */
311 vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
312 GFP_KERNEL);
313 BUG_ON(vdso64_pagelist == NULL);
314 for (i = 0; i < vdso64_pages - 1; i++) {
315 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
316 ClearPageReserved(pg);
317 get_page(pg);
318 vdso64_pagelist[i] = pg;
320 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
321 vdso64_pagelist[vdso64_pages] = NULL;
322 #ifndef CONFIG_SMP
323 if (vdso_alloc_per_cpu(0, &S390_lowcore))
324 BUG();
325 #endif
326 vdso_init_cr5();
327 #endif /* CONFIG_64BIT */
329 get_page(virt_to_page(vdso_data));
331 smp_wmb();
333 return 0;
335 arch_initcall(vdso_init);
337 int in_gate_area_no_task(unsigned long addr)
339 return 0;
342 int in_gate_area(struct task_struct *task, unsigned long addr)
344 return 0;
347 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
349 return NULL;