ata/sata_fsl: Update for ata_link introduction
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / sysenter_32.c
blob5a2d951e26088e59263e6dc3df5223e7e9db7ad3
1 /*
2 * (C) Copyright 2002 Linus Torvalds
3 * Portions based on the vdso-randomization code from exec-shield:
4 * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
6 * This file contains the needed initializations to support sysenter.
7 */
9 #include <linux/init.h>
10 #include <linux/smp.h>
11 #include <linux/thread_info.h>
12 #include <linux/sched.h>
13 #include <linux/gfp.h>
14 #include <linux/string.h>
15 #include <linux/elf.h>
16 #include <linux/mm.h>
17 #include <linux/err.h>
18 #include <linux/module.h>
20 #include <asm/cpufeature.h>
21 #include <asm/msr.h>
22 #include <asm/pgtable.h>
23 #include <asm/unistd.h>
24 #include <asm/elf.h>
25 #include <asm/tlbflush.h>
27 enum {
28 VDSO_DISABLED = 0,
29 VDSO_ENABLED = 1,
30 VDSO_COMPAT = 2,
33 #ifdef CONFIG_COMPAT_VDSO
34 #define VDSO_DEFAULT VDSO_COMPAT
35 #else
36 #define VDSO_DEFAULT VDSO_ENABLED
37 #endif
40 * Should the kernel map a VDSO page into processes and pass its
41 * address down to glibc upon exec()?
43 unsigned int __read_mostly vdso_enabled = VDSO_DEFAULT;
45 EXPORT_SYMBOL_GPL(vdso_enabled);
47 static int __init vdso_setup(char *s)
49 vdso_enabled = simple_strtoul(s, NULL, 0);
51 return 1;
54 __setup("vdso=", vdso_setup);
56 extern asmlinkage void sysenter_entry(void);
58 static __init void reloc_symtab(Elf32_Ehdr *ehdr,
59 unsigned offset, unsigned size)
61 Elf32_Sym *sym = (void *)ehdr + offset;
62 unsigned nsym = size / sizeof(*sym);
63 unsigned i;
65 for(i = 0; i < nsym; i++, sym++) {
66 if (sym->st_shndx == SHN_UNDEF ||
67 sym->st_shndx == SHN_ABS)
68 continue; /* skip */
70 if (sym->st_shndx > SHN_LORESERVE) {
71 printk(KERN_INFO "VDSO: unexpected st_shndx %x\n",
72 sym->st_shndx);
73 continue;
76 switch(ELF_ST_TYPE(sym->st_info)) {
77 case STT_OBJECT:
78 case STT_FUNC:
79 case STT_SECTION:
80 case STT_FILE:
81 sym->st_value += VDSO_HIGH_BASE;
86 static __init void reloc_dyn(Elf32_Ehdr *ehdr, unsigned offset)
88 Elf32_Dyn *dyn = (void *)ehdr + offset;
90 for(; dyn->d_tag != DT_NULL; dyn++)
91 switch(dyn->d_tag) {
92 case DT_PLTGOT:
93 case DT_HASH:
94 case DT_STRTAB:
95 case DT_SYMTAB:
96 case DT_RELA:
97 case DT_INIT:
98 case DT_FINI:
99 case DT_REL:
100 case DT_DEBUG:
101 case DT_JMPREL:
102 case DT_VERSYM:
103 case DT_VERDEF:
104 case DT_VERNEED:
105 case DT_ADDRRNGLO ... DT_ADDRRNGHI:
106 /* definitely pointers needing relocation */
107 dyn->d_un.d_ptr += VDSO_HIGH_BASE;
108 break;
110 case DT_ENCODING ... OLD_DT_LOOS-1:
111 case DT_LOOS ... DT_HIOS-1:
112 /* Tags above DT_ENCODING are pointers if
113 they're even */
114 if (dyn->d_tag >= DT_ENCODING &&
115 (dyn->d_tag & 1) == 0)
116 dyn->d_un.d_ptr += VDSO_HIGH_BASE;
117 break;
119 case DT_VERDEFNUM:
120 case DT_VERNEEDNUM:
121 case DT_FLAGS_1:
122 case DT_RELACOUNT:
123 case DT_RELCOUNT:
124 case DT_VALRNGLO ... DT_VALRNGHI:
125 /* definitely not pointers */
126 break;
128 case OLD_DT_LOOS ... DT_LOOS-1:
129 case DT_HIOS ... DT_VALRNGLO-1:
130 default:
131 if (dyn->d_tag > DT_ENCODING)
132 printk(KERN_INFO "VDSO: unexpected DT_tag %x\n",
133 dyn->d_tag);
134 break;
138 static __init void relocate_vdso(Elf32_Ehdr *ehdr)
140 Elf32_Phdr *phdr;
141 Elf32_Shdr *shdr;
142 int i;
144 BUG_ON(memcmp(ehdr->e_ident, ELFMAG, 4) != 0 ||
145 !elf_check_arch(ehdr) ||
146 ehdr->e_type != ET_DYN);
148 ehdr->e_entry += VDSO_HIGH_BASE;
150 /* rebase phdrs */
151 phdr = (void *)ehdr + ehdr->e_phoff;
152 for (i = 0; i < ehdr->e_phnum; i++) {
153 phdr[i].p_vaddr += VDSO_HIGH_BASE;
155 /* relocate dynamic stuff */
156 if (phdr[i].p_type == PT_DYNAMIC)
157 reloc_dyn(ehdr, phdr[i].p_offset);
160 /* rebase sections */
161 shdr = (void *)ehdr + ehdr->e_shoff;
162 for(i = 0; i < ehdr->e_shnum; i++) {
163 if (!(shdr[i].sh_flags & SHF_ALLOC))
164 continue;
166 shdr[i].sh_addr += VDSO_HIGH_BASE;
168 if (shdr[i].sh_type == SHT_SYMTAB ||
169 shdr[i].sh_type == SHT_DYNSYM)
170 reloc_symtab(ehdr, shdr[i].sh_offset,
171 shdr[i].sh_size);
175 void enable_sep_cpu(void)
177 int cpu = get_cpu();
178 struct tss_struct *tss = &per_cpu(init_tss, cpu);
180 if (!boot_cpu_has(X86_FEATURE_SEP)) {
181 put_cpu();
182 return;
185 tss->x86_tss.ss1 = __KERNEL_CS;
186 tss->x86_tss.esp1 = sizeof(struct tss_struct) + (unsigned long) tss;
187 wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
188 wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.esp1, 0);
189 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
190 put_cpu();
193 static struct vm_area_struct gate_vma;
195 static int __init gate_vma_init(void)
197 gate_vma.vm_mm = NULL;
198 gate_vma.vm_start = FIXADDR_USER_START;
199 gate_vma.vm_end = FIXADDR_USER_END;
200 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
201 gate_vma.vm_page_prot = __P101;
203 * Make sure the vDSO gets into every core dump.
204 * Dumping its contents makes post-mortem fully interpretable later
205 * without matching up the same kernel and hardware config to see
206 * what PC values meant.
208 gate_vma.vm_flags |= VM_ALWAYSDUMP;
209 return 0;
213 * These symbols are defined by vsyscall.o to mark the bounds
214 * of the ELF DSO images included therein.
216 extern const char vsyscall_int80_start, vsyscall_int80_end;
217 extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
218 static struct page *syscall_pages[1];
220 static void map_compat_vdso(int map)
222 static int vdso_mapped;
224 if (map == vdso_mapped)
225 return;
227 vdso_mapped = map;
229 __set_fixmap(FIX_VDSO, page_to_pfn(syscall_pages[0]) << PAGE_SHIFT,
230 map ? PAGE_READONLY_EXEC : PAGE_NONE);
232 /* flush stray tlbs */
233 flush_tlb_all();
236 int __init sysenter_setup(void)
238 void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
239 const void *vsyscall;
240 size_t vsyscall_len;
242 syscall_pages[0] = virt_to_page(syscall_page);
244 gate_vma_init();
246 printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
248 if (!boot_cpu_has(X86_FEATURE_SEP)) {
249 vsyscall = &vsyscall_int80_start;
250 vsyscall_len = &vsyscall_int80_end - &vsyscall_int80_start;
251 } else {
252 vsyscall = &vsyscall_sysenter_start;
253 vsyscall_len = &vsyscall_sysenter_end - &vsyscall_sysenter_start;
256 memcpy(syscall_page, vsyscall, vsyscall_len);
257 relocate_vdso(syscall_page);
259 return 0;
262 /* Defined in vsyscall-sysenter.S */
263 extern void SYSENTER_RETURN;
265 /* Setup a VMA at program startup for the vsyscall page */
266 int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
268 struct mm_struct *mm = current->mm;
269 unsigned long addr;
270 int ret = 0;
271 bool compat;
273 down_write(&mm->mmap_sem);
275 /* Test compat mode once here, in case someone
276 changes it via sysctl */
277 compat = (vdso_enabled == VDSO_COMPAT);
279 map_compat_vdso(compat);
281 if (compat)
282 addr = VDSO_HIGH_BASE;
283 else {
284 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
285 if (IS_ERR_VALUE(addr)) {
286 ret = addr;
287 goto up_fail;
291 * MAYWRITE to allow gdb to COW and set breakpoints
293 * Make sure the vDSO gets into every core dump.
294 * Dumping its contents makes post-mortem fully
295 * interpretable later without matching up the same
296 * kernel and hardware config to see what PC values
297 * meant.
299 ret = install_special_mapping(mm, addr, PAGE_SIZE,
300 VM_READ|VM_EXEC|
301 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
302 VM_ALWAYSDUMP,
303 syscall_pages);
305 if (ret)
306 goto up_fail;
309 current->mm->context.vdso = (void *)addr;
310 current_thread_info()->sysenter_return =
311 (void *)VDSO_SYM(&SYSENTER_RETURN);
313 up_fail:
314 up_write(&mm->mmap_sem);
316 return ret;
319 const char *arch_vma_name(struct vm_area_struct *vma)
321 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
322 return "[vdso]";
323 return NULL;
326 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
328 struct mm_struct *mm = tsk->mm;
330 /* Check to see if this task was created in compat vdso mode */
331 if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
332 return &gate_vma;
333 return NULL;
336 int in_gate_area(struct task_struct *task, unsigned long addr)
338 const struct vm_area_struct *vma = get_gate_vma(task);
340 return vma && addr >= vma->vm_start && addr < vma->vm_end;
343 int in_gate_area_no_task(unsigned long addr)
345 return 0;