x86: PIE executable randomization
[linux-2.6/sactl.git] / fs / binfmt_elf.c
blob8193d24be15901191ec6c09ab381be877900ac3d
1 /*
2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/fs.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/compiler.h>
35 #include <linux/highmem.h>
36 #include <linux/pagemap.h>
37 #include <linux/security.h>
38 #include <linux/syscalls.h>
39 #include <linux/random.h>
40 #include <linux/elf.h>
41 #include <linux/utsname.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
44 #include <asm/page.h>
46 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
47 static int load_elf_library(struct file *);
48 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int, unsigned long);
51 * If we don't support core dumping, then supply a NULL so we
52 * don't even try.
54 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
55 static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
56 #else
57 #define elf_core_dump NULL
58 #endif
60 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
61 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
62 #else
63 #define ELF_MIN_ALIGN PAGE_SIZE
64 #endif
66 #ifndef ELF_CORE_EFLAGS
67 #define ELF_CORE_EFLAGS 0
68 #endif
70 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
71 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
72 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
74 static struct linux_binfmt elf_format = {
75 .module = THIS_MODULE,
76 .load_binary = load_elf_binary,
77 .load_shlib = load_elf_library,
78 .core_dump = elf_core_dump,
79 .min_coredump = ELF_EXEC_PAGESIZE,
80 .hasvdso = 1
83 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
85 static int set_brk(unsigned long start, unsigned long end)
87 start = ELF_PAGEALIGN(start);
88 end = ELF_PAGEALIGN(end);
89 if (end > start) {
90 unsigned long addr;
91 down_write(&current->mm->mmap_sem);
92 addr = do_brk(start, end - start);
93 up_write(&current->mm->mmap_sem);
94 if (BAD_ADDR(addr))
95 return addr;
97 current->mm->start_brk = current->mm->brk = end;
98 return 0;
101 /* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
104 be in memory
106 static int padzero(unsigned long elf_bss)
108 unsigned long nbyte;
110 nbyte = ELF_PAGEOFFSET(elf_bss);
111 if (nbyte) {
112 nbyte = ELF_MIN_ALIGN - nbyte;
113 if (clear_user((void __user *) elf_bss, nbyte))
114 return -EFAULT;
116 return 0;
119 /* Let's use some macros to make this stack manipulation a litle clearer */
120 #ifdef CONFIG_STACK_GROWSUP
121 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
122 #define STACK_ROUND(sp, items) \
123 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
124 #define STACK_ALLOC(sp, len) ({ \
125 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
126 old_sp; })
127 #else
128 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
129 #define STACK_ROUND(sp, items) \
130 (((unsigned long) (sp - items)) &~ 15UL)
131 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
132 #endif
134 static int
135 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
136 int interp_aout, unsigned long load_addr,
137 unsigned long interp_load_addr)
139 unsigned long p = bprm->p;
140 int argc = bprm->argc;
141 int envc = bprm->envc;
142 elf_addr_t __user *argv;
143 elf_addr_t __user *envp;
144 elf_addr_t __user *sp;
145 elf_addr_t __user *u_platform;
146 const char *k_platform = ELF_PLATFORM;
147 int items;
148 elf_addr_t *elf_info;
149 int ei_index = 0;
150 struct task_struct *tsk = current;
151 struct vm_area_struct *vma;
154 * In some cases (e.g. Hyper-Threading), we want to avoid L1
155 * evictions by the processes running on the same package. One
156 * thing we can do is to shuffle the initial stack for them.
159 p = arch_align_stack(p);
162 * If this architecture has a platform capability string, copy it
163 * to userspace. In some cases (Sparc), this info is impossible
164 * for userspace to get any other way, in others (i386) it is
165 * merely difficult.
167 u_platform = NULL;
168 if (k_platform) {
169 size_t len = strlen(k_platform) + 1;
171 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
172 if (__copy_to_user(u_platform, k_platform, len))
173 return -EFAULT;
176 /* Create the ELF interpreter info */
177 elf_info = (elf_addr_t *)current->mm->saved_auxv;
178 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
179 #define NEW_AUX_ENT(id, val) \
180 do { \
181 elf_info[ei_index++] = id; \
182 elf_info[ei_index++] = val; \
183 } while (0)
185 #ifdef ARCH_DLINFO
187 * ARCH_DLINFO must come first so PPC can do its special alignment of
188 * AUXV.
189 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
190 * ARCH_DLINFO changes
192 ARCH_DLINFO;
193 #endif
194 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
195 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
196 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
197 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
198 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
199 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
200 NEW_AUX_ENT(AT_BASE, interp_load_addr);
201 NEW_AUX_ENT(AT_FLAGS, 0);
202 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
203 NEW_AUX_ENT(AT_UID, tsk->uid);
204 NEW_AUX_ENT(AT_EUID, tsk->euid);
205 NEW_AUX_ENT(AT_GID, tsk->gid);
206 NEW_AUX_ENT(AT_EGID, tsk->egid);
207 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
208 if (k_platform) {
209 NEW_AUX_ENT(AT_PLATFORM,
210 (elf_addr_t)(unsigned long)u_platform);
212 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
213 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
215 #undef NEW_AUX_ENT
216 /* AT_NULL is zero; clear the rest too */
217 memset(&elf_info[ei_index], 0,
218 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
220 /* And advance past the AT_NULL entry. */
221 ei_index += 2;
223 sp = STACK_ADD(p, ei_index);
225 items = (argc + 1) + (envc + 1);
226 if (interp_aout) {
227 items += 3; /* a.out interpreters require argv & envp too */
228 } else {
229 items += 1; /* ELF interpreters only put argc on the stack */
231 bprm->p = STACK_ROUND(sp, items);
233 /* Point sp at the lowest address on the stack */
234 #ifdef CONFIG_STACK_GROWSUP
235 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
236 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
237 #else
238 sp = (elf_addr_t __user *)bprm->p;
239 #endif
243 * Grow the stack manually; some architectures have a limit on how
244 * far ahead a user-space access may be in order to grow the stack.
246 vma = find_extend_vma(current->mm, bprm->p);
247 if (!vma)
248 return -EFAULT;
250 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
251 if (__put_user(argc, sp++))
252 return -EFAULT;
253 if (interp_aout) {
254 argv = sp + 2;
255 envp = argv + argc + 1;
256 if (__put_user((elf_addr_t)(unsigned long)argv, sp++) ||
257 __put_user((elf_addr_t)(unsigned long)envp, sp++))
258 return -EFAULT;
259 } else {
260 argv = sp;
261 envp = argv + argc + 1;
264 /* Populate argv and envp */
265 p = current->mm->arg_end = current->mm->arg_start;
266 while (argc-- > 0) {
267 size_t len;
268 if (__put_user((elf_addr_t)p, argv++))
269 return -EFAULT;
270 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
271 if (!len || len > MAX_ARG_STRLEN)
272 return 0;
273 p += len;
275 if (__put_user(0, argv))
276 return -EFAULT;
277 current->mm->arg_end = current->mm->env_start = p;
278 while (envc-- > 0) {
279 size_t len;
280 if (__put_user((elf_addr_t)p, envp++))
281 return -EFAULT;
282 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
283 if (!len || len > MAX_ARG_STRLEN)
284 return 0;
285 p += len;
287 if (__put_user(0, envp))
288 return -EFAULT;
289 current->mm->env_end = p;
291 /* Put the elf_info on the stack in the right place. */
292 sp = (elf_addr_t __user *)envp + 1;
293 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
294 return -EFAULT;
295 return 0;
298 #ifndef elf_map
300 static unsigned long elf_map(struct file *filep, unsigned long addr,
301 struct elf_phdr *eppnt, int prot, int type,
302 unsigned long total_size)
304 unsigned long map_addr;
305 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
306 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
307 addr = ELF_PAGESTART(addr);
308 size = ELF_PAGEALIGN(size);
310 /* mmap() will return -EINVAL if given a zero size, but a
311 * segment with zero filesize is perfectly valid */
312 if (!size)
313 return addr;
315 down_write(&current->mm->mmap_sem);
317 * total_size is the size of the ELF (interpreter) image.
318 * The _first_ mmap needs to know the full size, otherwise
319 * randomization might put this image into an overlapping
320 * position with the ELF binary image. (since size < total_size)
321 * So we first map the 'big' image - and unmap the remainder at
322 * the end. (which unmap is needed for ELF images with holes.)
324 if (total_size) {
325 total_size = ELF_PAGEALIGN(total_size);
326 map_addr = do_mmap(filep, addr, total_size, prot, type, off);
327 if (!BAD_ADDR(map_addr))
328 do_munmap(current->mm, map_addr+size, total_size-size);
329 } else
330 map_addr = do_mmap(filep, addr, size, prot, type, off);
332 up_write(&current->mm->mmap_sem);
333 return(map_addr);
336 #endif /* !elf_map */
338 static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
340 int i, first_idx = -1, last_idx = -1;
342 for (i = 0; i < nr; i++) {
343 if (cmds[i].p_type == PT_LOAD) {
344 last_idx = i;
345 if (first_idx == -1)
346 first_idx = i;
349 if (first_idx == -1)
350 return 0;
352 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
353 ELF_PAGESTART(cmds[first_idx].p_vaddr);
357 /* This is much more generalized than the library routine read function,
358 so we keep this separate. Technically the library read function
359 is only provided so that we can read a.out libraries that have
360 an ELF header */
362 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
363 struct file *interpreter, unsigned long *interp_map_addr,
364 unsigned long no_base)
366 struct elf_phdr *elf_phdata;
367 struct elf_phdr *eppnt;
368 unsigned long load_addr = 0;
369 int load_addr_set = 0;
370 unsigned long last_bss = 0, elf_bss = 0;
371 unsigned long error = ~0UL;
372 unsigned long total_size;
373 int retval, i, size;
375 /* First of all, some simple consistency checks */
376 if (interp_elf_ex->e_type != ET_EXEC &&
377 interp_elf_ex->e_type != ET_DYN)
378 goto out;
379 if (!elf_check_arch(interp_elf_ex))
380 goto out;
381 if (!interpreter->f_op || !interpreter->f_op->mmap)
382 goto out;
385 * If the size of this structure has changed, then punt, since
386 * we will be doing the wrong thing.
388 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
389 goto out;
390 if (interp_elf_ex->e_phnum < 1 ||
391 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
392 goto out;
394 /* Now read in all of the header information */
395 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
396 if (size > ELF_MIN_ALIGN)
397 goto out;
398 elf_phdata = kmalloc(size, GFP_KERNEL);
399 if (!elf_phdata)
400 goto out;
402 retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
403 (char *)elf_phdata,size);
404 error = -EIO;
405 if (retval != size) {
406 if (retval < 0)
407 error = retval;
408 goto out_close;
411 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
412 if (!total_size) {
413 error = -EINVAL;
414 goto out_close;
417 eppnt = elf_phdata;
418 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
419 if (eppnt->p_type == PT_LOAD) {
420 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
421 int elf_prot = 0;
422 unsigned long vaddr = 0;
423 unsigned long k, map_addr;
425 if (eppnt->p_flags & PF_R)
426 elf_prot = PROT_READ;
427 if (eppnt->p_flags & PF_W)
428 elf_prot |= PROT_WRITE;
429 if (eppnt->p_flags & PF_X)
430 elf_prot |= PROT_EXEC;
431 vaddr = eppnt->p_vaddr;
432 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
433 elf_type |= MAP_FIXED;
434 else if (no_base && interp_elf_ex->e_type == ET_DYN)
435 load_addr = -vaddr;
437 map_addr = elf_map(interpreter, load_addr + vaddr,
438 eppnt, elf_prot, elf_type, total_size);
439 total_size = 0;
440 if (!*interp_map_addr)
441 *interp_map_addr = map_addr;
442 error = map_addr;
443 if (BAD_ADDR(map_addr))
444 goto out_close;
446 if (!load_addr_set &&
447 interp_elf_ex->e_type == ET_DYN) {
448 load_addr = map_addr - ELF_PAGESTART(vaddr);
449 load_addr_set = 1;
453 * Check to see if the section's size will overflow the
454 * allowed task size. Note that p_filesz must always be
455 * <= p_memsize so it's only necessary to check p_memsz.
457 k = load_addr + eppnt->p_vaddr;
458 if (BAD_ADDR(k) ||
459 eppnt->p_filesz > eppnt->p_memsz ||
460 eppnt->p_memsz > TASK_SIZE ||
461 TASK_SIZE - eppnt->p_memsz < k) {
462 error = -ENOMEM;
463 goto out_close;
467 * Find the end of the file mapping for this phdr, and
468 * keep track of the largest address we see for this.
470 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
471 if (k > elf_bss)
472 elf_bss = k;
475 * Do the same thing for the memory mapping - between
476 * elf_bss and last_bss is the bss section.
478 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
479 if (k > last_bss)
480 last_bss = k;
485 * Now fill out the bss section. First pad the last page up
486 * to the page boundary, and then perform a mmap to make sure
487 * that there are zero-mapped pages up to and including the
488 * last bss page.
490 if (padzero(elf_bss)) {
491 error = -EFAULT;
492 goto out_close;
495 /* What we have mapped so far */
496 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
498 /* Map the last of the bss segment */
499 if (last_bss > elf_bss) {
500 down_write(&current->mm->mmap_sem);
501 error = do_brk(elf_bss, last_bss - elf_bss);
502 up_write(&current->mm->mmap_sem);
503 if (BAD_ADDR(error))
504 goto out_close;
507 error = load_addr;
509 out_close:
510 kfree(elf_phdata);
511 out:
512 return error;
515 static unsigned long load_aout_interp(struct exec *interp_ex,
516 struct file *interpreter)
518 unsigned long text_data, elf_entry = ~0UL;
519 char __user * addr;
520 loff_t offset;
522 current->mm->end_code = interp_ex->a_text;
523 text_data = interp_ex->a_text + interp_ex->a_data;
524 current->mm->end_data = text_data;
525 current->mm->brk = interp_ex->a_bss + text_data;
527 switch (N_MAGIC(*interp_ex)) {
528 case OMAGIC:
529 offset = 32;
530 addr = (char __user *)0;
531 break;
532 case ZMAGIC:
533 case QMAGIC:
534 offset = N_TXTOFF(*interp_ex);
535 addr = (char __user *)N_TXTADDR(*interp_ex);
536 break;
537 default:
538 goto out;
541 down_write(&current->mm->mmap_sem);
542 do_brk(0, text_data);
543 up_write(&current->mm->mmap_sem);
544 if (!interpreter->f_op || !interpreter->f_op->read)
545 goto out;
546 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
547 goto out;
548 flush_icache_range((unsigned long)addr,
549 (unsigned long)addr + text_data);
551 down_write(&current->mm->mmap_sem);
552 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
553 interp_ex->a_bss);
554 up_write(&current->mm->mmap_sem);
555 elf_entry = interp_ex->a_entry;
557 out:
558 return elf_entry;
562 * These are the functions used to load ELF style executables and shared
563 * libraries. There is no binary dependent code anywhere else.
566 #define INTERPRETER_NONE 0
567 #define INTERPRETER_AOUT 1
568 #define INTERPRETER_ELF 2
570 #ifndef STACK_RND_MASK
571 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
572 #endif
574 static unsigned long randomize_stack_top(unsigned long stack_top)
576 unsigned int random_variable = 0;
578 if ((current->flags & PF_RANDOMIZE) &&
579 !(current->personality & ADDR_NO_RANDOMIZE)) {
580 random_variable = get_random_int() & STACK_RND_MASK;
581 random_variable <<= PAGE_SHIFT;
583 #ifdef CONFIG_STACK_GROWSUP
584 return PAGE_ALIGN(stack_top) + random_variable;
585 #else
586 return PAGE_ALIGN(stack_top) - random_variable;
587 #endif
590 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
592 struct file *interpreter = NULL; /* to shut gcc up */
593 unsigned long load_addr = 0, load_bias = 0;
594 int load_addr_set = 0;
595 char * elf_interpreter = NULL;
596 unsigned int interpreter_type = INTERPRETER_NONE;
597 unsigned char ibcs2_interpreter = 0;
598 unsigned long error;
599 struct elf_phdr *elf_ppnt, *elf_phdata;
600 unsigned long elf_bss, elf_brk;
601 int elf_exec_fileno;
602 int retval, i;
603 unsigned int size;
604 unsigned long elf_entry;
605 unsigned long interp_load_addr = 0;
606 unsigned long start_code, end_code, start_data, end_data;
607 unsigned long reloc_func_desc = 0;
608 char passed_fileno[6];
609 struct files_struct *files;
610 int executable_stack = EXSTACK_DEFAULT;
611 unsigned long def_flags = 0;
612 struct {
613 struct elfhdr elf_ex;
614 struct elfhdr interp_elf_ex;
615 struct exec interp_ex;
616 } *loc;
618 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
619 if (!loc) {
620 retval = -ENOMEM;
621 goto out_ret;
624 /* Get the exec-header */
625 loc->elf_ex = *((struct elfhdr *)bprm->buf);
627 retval = -ENOEXEC;
628 /* First of all, some simple consistency checks */
629 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
630 goto out;
632 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
633 goto out;
634 if (!elf_check_arch(&loc->elf_ex))
635 goto out;
636 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
637 goto out;
639 /* Now read in all of the header information */
640 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
641 goto out;
642 if (loc->elf_ex.e_phnum < 1 ||
643 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
644 goto out;
645 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
646 retval = -ENOMEM;
647 elf_phdata = kmalloc(size, GFP_KERNEL);
648 if (!elf_phdata)
649 goto out;
651 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
652 (char *)elf_phdata, size);
653 if (retval != size) {
654 if (retval >= 0)
655 retval = -EIO;
656 goto out_free_ph;
659 files = current->files; /* Refcounted so ok */
660 retval = unshare_files();
661 if (retval < 0)
662 goto out_free_ph;
663 if (files == current->files) {
664 put_files_struct(files);
665 files = NULL;
668 /* exec will make our files private anyway, but for the a.out
669 loader stuff we need to do it earlier */
670 retval = get_unused_fd();
671 if (retval < 0)
672 goto out_free_fh;
673 get_file(bprm->file);
674 fd_install(elf_exec_fileno = retval, bprm->file);
676 elf_ppnt = elf_phdata;
677 elf_bss = 0;
678 elf_brk = 0;
680 start_code = ~0UL;
681 end_code = 0;
682 start_data = 0;
683 end_data = 0;
685 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
686 if (elf_ppnt->p_type == PT_INTERP) {
687 /* This is the program interpreter used for
688 * shared libraries - for now assume that this
689 * is an a.out format binary
691 retval = -ENOEXEC;
692 if (elf_ppnt->p_filesz > PATH_MAX ||
693 elf_ppnt->p_filesz < 2)
694 goto out_free_file;
696 retval = -ENOMEM;
697 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
698 GFP_KERNEL);
699 if (!elf_interpreter)
700 goto out_free_file;
702 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
703 elf_interpreter,
704 elf_ppnt->p_filesz);
705 if (retval != elf_ppnt->p_filesz) {
706 if (retval >= 0)
707 retval = -EIO;
708 goto out_free_interp;
710 /* make sure path is NULL terminated */
711 retval = -ENOEXEC;
712 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
713 goto out_free_interp;
715 /* If the program interpreter is one of these two,
716 * then assume an iBCS2 image. Otherwise assume
717 * a native linux image.
719 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
720 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
721 ibcs2_interpreter = 1;
724 * The early SET_PERSONALITY here is so that the lookup
725 * for the interpreter happens in the namespace of the
726 * to-be-execed image. SET_PERSONALITY can select an
727 * alternate root.
729 * However, SET_PERSONALITY is NOT allowed to switch
730 * this task into the new images's memory mapping
731 * policy - that is, TASK_SIZE must still evaluate to
732 * that which is appropriate to the execing application.
733 * This is because exit_mmap() needs to have TASK_SIZE
734 * evaluate to the size of the old image.
736 * So if (say) a 64-bit application is execing a 32-bit
737 * application it is the architecture's responsibility
738 * to defer changing the value of TASK_SIZE until the
739 * switch really is going to happen - do this in
740 * flush_thread(). - akpm
742 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
744 interpreter = open_exec(elf_interpreter);
745 retval = PTR_ERR(interpreter);
746 if (IS_ERR(interpreter))
747 goto out_free_interp;
750 * If the binary is not readable then enforce
751 * mm->dumpable = 0 regardless of the interpreter's
752 * permissions.
754 if (file_permission(interpreter, MAY_READ) < 0)
755 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
757 retval = kernel_read(interpreter, 0, bprm->buf,
758 BINPRM_BUF_SIZE);
759 if (retval != BINPRM_BUF_SIZE) {
760 if (retval >= 0)
761 retval = -EIO;
762 goto out_free_dentry;
765 /* Get the exec headers */
766 loc->interp_ex = *((struct exec *)bprm->buf);
767 loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
768 break;
770 elf_ppnt++;
773 elf_ppnt = elf_phdata;
774 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
775 if (elf_ppnt->p_type == PT_GNU_STACK) {
776 if (elf_ppnt->p_flags & PF_X)
777 executable_stack = EXSTACK_ENABLE_X;
778 else
779 executable_stack = EXSTACK_DISABLE_X;
780 break;
783 /* Some simple consistency checks for the interpreter */
784 if (elf_interpreter) {
785 static int warn;
786 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
788 /* Now figure out which format our binary is */
789 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
790 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
791 (N_MAGIC(loc->interp_ex) != QMAGIC))
792 interpreter_type = INTERPRETER_ELF;
794 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
795 interpreter_type &= ~INTERPRETER_ELF;
797 if (interpreter_type == INTERPRETER_AOUT && warn < 10) {
798 printk(KERN_WARNING "a.out ELF interpreter %s is "
799 "deprecated and will not be supported "
800 "after Linux 2.6.25\n", elf_interpreter);
801 warn++;
804 retval = -ELIBBAD;
805 if (!interpreter_type)
806 goto out_free_dentry;
808 /* Make sure only one type was selected */
809 if ((interpreter_type & INTERPRETER_ELF) &&
810 interpreter_type != INTERPRETER_ELF) {
811 // FIXME - ratelimit this before re-enabling
812 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
813 interpreter_type = INTERPRETER_ELF;
815 /* Verify the interpreter has a valid arch */
816 if ((interpreter_type == INTERPRETER_ELF) &&
817 !elf_check_arch(&loc->interp_elf_ex))
818 goto out_free_dentry;
819 } else {
820 /* Executables without an interpreter also need a personality */
821 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
824 /* OK, we are done with that, now set up the arg stuff,
825 and then start this sucker up */
826 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
827 char *passed_p = passed_fileno;
828 sprintf(passed_fileno, "%d", elf_exec_fileno);
830 if (elf_interpreter) {
831 retval = copy_strings_kernel(1, &passed_p, bprm);
832 if (retval)
833 goto out_free_dentry;
834 bprm->argc++;
838 /* Flush all traces of the currently running executable */
839 retval = flush_old_exec(bprm);
840 if (retval)
841 goto out_free_dentry;
843 /* Discard our unneeded old files struct */
844 if (files) {
845 put_files_struct(files);
846 files = NULL;
849 /* OK, This is the point of no return */
850 current->flags &= ~PF_FORKNOEXEC;
851 current->mm->def_flags = def_flags;
853 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
854 may depend on the personality. */
855 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
856 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
857 current->personality |= READ_IMPLIES_EXEC;
859 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
860 current->flags |= PF_RANDOMIZE;
861 arch_pick_mmap_layout(current->mm);
863 /* Do this so that we can load the interpreter, if need be. We will
864 change some of these later */
865 current->mm->free_area_cache = current->mm->mmap_base;
866 current->mm->cached_hole_size = 0;
867 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
868 executable_stack);
869 if (retval < 0) {
870 send_sig(SIGKILL, current, 0);
871 goto out_free_dentry;
874 current->mm->start_stack = bprm->p;
876 /* Now we do a little grungy work by mmaping the ELF image into
877 the correct location in memory. */
878 for(i = 0, elf_ppnt = elf_phdata;
879 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
880 int elf_prot = 0, elf_flags;
881 unsigned long k, vaddr;
883 if (elf_ppnt->p_type != PT_LOAD)
884 continue;
886 if (unlikely (elf_brk > elf_bss)) {
887 unsigned long nbyte;
889 /* There was a PT_LOAD segment with p_memsz > p_filesz
890 before this one. Map anonymous pages, if needed,
891 and clear the area. */
892 retval = set_brk (elf_bss + load_bias,
893 elf_brk + load_bias);
894 if (retval) {
895 send_sig(SIGKILL, current, 0);
896 goto out_free_dentry;
898 nbyte = ELF_PAGEOFFSET(elf_bss);
899 if (nbyte) {
900 nbyte = ELF_MIN_ALIGN - nbyte;
901 if (nbyte > elf_brk - elf_bss)
902 nbyte = elf_brk - elf_bss;
903 if (clear_user((void __user *)elf_bss +
904 load_bias, nbyte)) {
906 * This bss-zeroing can fail if the ELF
907 * file specifies odd protections. So
908 * we don't check the return value
914 if (elf_ppnt->p_flags & PF_R)
915 elf_prot |= PROT_READ;
916 if (elf_ppnt->p_flags & PF_W)
917 elf_prot |= PROT_WRITE;
918 if (elf_ppnt->p_flags & PF_X)
919 elf_prot |= PROT_EXEC;
921 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
923 vaddr = elf_ppnt->p_vaddr;
924 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
925 elf_flags |= MAP_FIXED;
926 } else if (loc->elf_ex.e_type == ET_DYN) {
927 /* Try and get dynamic programs out of the way of the
928 * default mmap base, as well as whatever program they
929 * might try to exec. This is because the brk will
930 * follow the loader, and is not movable. */
931 #ifdef CONFIG_X86
932 load_bias = 0;
933 #else
934 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
935 #endif
938 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
939 elf_prot, elf_flags,0);
940 if (BAD_ADDR(error)) {
941 send_sig(SIGKILL, current, 0);
942 retval = IS_ERR((void *)error) ?
943 PTR_ERR((void*)error) : -EINVAL;
944 goto out_free_dentry;
947 if (!load_addr_set) {
948 load_addr_set = 1;
949 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
950 if (loc->elf_ex.e_type == ET_DYN) {
951 load_bias += error -
952 ELF_PAGESTART(load_bias + vaddr);
953 load_addr += load_bias;
954 reloc_func_desc = load_bias;
957 k = elf_ppnt->p_vaddr;
958 if (k < start_code)
959 start_code = k;
960 if (start_data < k)
961 start_data = k;
964 * Check to see if the section's size will overflow the
965 * allowed task size. Note that p_filesz must always be
966 * <= p_memsz so it is only necessary to check p_memsz.
968 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
969 elf_ppnt->p_memsz > TASK_SIZE ||
970 TASK_SIZE - elf_ppnt->p_memsz < k) {
971 /* set_brk can never work. Avoid overflows. */
972 send_sig(SIGKILL, current, 0);
973 retval = -EINVAL;
974 goto out_free_dentry;
977 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
979 if (k > elf_bss)
980 elf_bss = k;
981 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
982 end_code = k;
983 if (end_data < k)
984 end_data = k;
985 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
986 if (k > elf_brk)
987 elf_brk = k;
990 loc->elf_ex.e_entry += load_bias;
991 elf_bss += load_bias;
992 elf_brk += load_bias;
993 start_code += load_bias;
994 end_code += load_bias;
995 start_data += load_bias;
996 end_data += load_bias;
998 /* Calling set_brk effectively mmaps the pages that we need
999 * for the bss and break sections. We must do this before
1000 * mapping in the interpreter, to make sure it doesn't wind
1001 * up getting placed where the bss needs to go.
1003 retval = set_brk(elf_bss, elf_brk);
1004 if (retval) {
1005 send_sig(SIGKILL, current, 0);
1006 goto out_free_dentry;
1008 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1009 send_sig(SIGSEGV, current, 0);
1010 retval = -EFAULT; /* Nobody gets to see this, but.. */
1011 goto out_free_dentry;
1014 if (elf_interpreter) {
1015 if (interpreter_type == INTERPRETER_AOUT) {
1016 elf_entry = load_aout_interp(&loc->interp_ex,
1017 interpreter);
1018 } else {
1019 unsigned long uninitialized_var(interp_map_addr);
1021 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1022 interpreter,
1023 &interp_map_addr,
1024 load_bias);
1025 if (!IS_ERR((void *)elf_entry)) {
1027 * load_elf_interp() returns relocation
1028 * adjustment
1030 interp_load_addr = elf_entry;
1031 elf_entry += loc->interp_elf_ex.e_entry;
1034 if (BAD_ADDR(elf_entry)) {
1035 force_sig(SIGSEGV, current);
1036 retval = IS_ERR((void *)elf_entry) ?
1037 (int)elf_entry : -EINVAL;
1038 goto out_free_dentry;
1040 reloc_func_desc = interp_load_addr;
1042 allow_write_access(interpreter);
1043 fput(interpreter);
1044 kfree(elf_interpreter);
1045 } else {
1046 elf_entry = loc->elf_ex.e_entry;
1047 if (BAD_ADDR(elf_entry)) {
1048 force_sig(SIGSEGV, current);
1049 retval = -EINVAL;
1050 goto out_free_dentry;
1054 kfree(elf_phdata);
1056 if (interpreter_type != INTERPRETER_AOUT)
1057 sys_close(elf_exec_fileno);
1059 set_binfmt(&elf_format);
1061 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1062 retval = arch_setup_additional_pages(bprm, executable_stack);
1063 if (retval < 0) {
1064 send_sig(SIGKILL, current, 0);
1065 goto out;
1067 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1069 compute_creds(bprm);
1070 current->flags &= ~PF_FORKNOEXEC;
1071 retval = create_elf_tables(bprm, &loc->elf_ex,
1072 (interpreter_type == INTERPRETER_AOUT),
1073 load_addr, interp_load_addr);
1074 if (retval < 0) {
1075 send_sig(SIGKILL, current, 0);
1076 goto out;
1078 /* N.B. passed_fileno might not be initialized? */
1079 if (interpreter_type == INTERPRETER_AOUT)
1080 current->mm->arg_start += strlen(passed_fileno) + 1;
1081 current->mm->end_code = end_code;
1082 current->mm->start_code = start_code;
1083 current->mm->start_data = start_data;
1084 current->mm->end_data = end_data;
1085 current->mm->start_stack = bprm->p;
1087 #ifdef arch_randomize_brk
1088 if (current->flags & PF_RANDOMIZE)
1089 current->mm->brk = current->mm->start_brk =
1090 arch_randomize_brk(current->mm);
1091 #endif
1093 if (current->personality & MMAP_PAGE_ZERO) {
1094 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1095 and some applications "depend" upon this behavior.
1096 Since we do not have the power to recompile these, we
1097 emulate the SVr4 behavior. Sigh. */
1098 down_write(&current->mm->mmap_sem);
1099 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1100 MAP_FIXED | MAP_PRIVATE, 0);
1101 up_write(&current->mm->mmap_sem);
1104 #ifdef ELF_PLAT_INIT
1106 * The ABI may specify that certain registers be set up in special
1107 * ways (on i386 %edx is the address of a DT_FINI function, for
1108 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1109 * that the e_entry field is the address of the function descriptor
1110 * for the startup routine, rather than the address of the startup
1111 * routine itself. This macro performs whatever initialization to
1112 * the regs structure is required as well as any relocations to the
1113 * function descriptor entries when executing dynamically links apps.
1115 ELF_PLAT_INIT(regs, reloc_func_desc);
1116 #endif
1118 start_thread(regs, elf_entry, bprm->p);
1119 if (unlikely(current->ptrace & PT_PTRACED)) {
1120 if (current->ptrace & PT_TRACE_EXEC)
1121 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1122 else
1123 send_sig(SIGTRAP, current, 0);
1125 retval = 0;
1126 out:
1127 kfree(loc);
1128 out_ret:
1129 return retval;
1131 /* error cleanup */
1132 out_free_dentry:
1133 allow_write_access(interpreter);
1134 if (interpreter)
1135 fput(interpreter);
1136 out_free_interp:
1137 kfree(elf_interpreter);
1138 out_free_file:
1139 sys_close(elf_exec_fileno);
1140 out_free_fh:
1141 if (files)
1142 reset_files_struct(current, files);
1143 out_free_ph:
1144 kfree(elf_phdata);
1145 goto out;
1148 /* This is really simpleminded and specialized - we are loading an
1149 a.out library that is given an ELF header. */
1150 static int load_elf_library(struct file *file)
1152 struct elf_phdr *elf_phdata;
1153 struct elf_phdr *eppnt;
1154 unsigned long elf_bss, bss, len;
1155 int retval, error, i, j;
1156 struct elfhdr elf_ex;
1158 error = -ENOEXEC;
1159 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1160 if (retval != sizeof(elf_ex))
1161 goto out;
1163 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1164 goto out;
1166 /* First of all, some simple consistency checks */
1167 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1168 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1169 goto out;
1171 /* Now read in all of the header information */
1173 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1174 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1176 error = -ENOMEM;
1177 elf_phdata = kmalloc(j, GFP_KERNEL);
1178 if (!elf_phdata)
1179 goto out;
1181 eppnt = elf_phdata;
1182 error = -ENOEXEC;
1183 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1184 if (retval != j)
1185 goto out_free_ph;
1187 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1188 if ((eppnt + i)->p_type == PT_LOAD)
1189 j++;
1190 if (j != 1)
1191 goto out_free_ph;
1193 while (eppnt->p_type != PT_LOAD)
1194 eppnt++;
1196 /* Now use mmap to map the library into memory. */
1197 down_write(&current->mm->mmap_sem);
1198 error = do_mmap(file,
1199 ELF_PAGESTART(eppnt->p_vaddr),
1200 (eppnt->p_filesz +
1201 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1202 PROT_READ | PROT_WRITE | PROT_EXEC,
1203 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1204 (eppnt->p_offset -
1205 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1206 up_write(&current->mm->mmap_sem);
1207 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1208 goto out_free_ph;
1210 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1211 if (padzero(elf_bss)) {
1212 error = -EFAULT;
1213 goto out_free_ph;
1216 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1217 ELF_MIN_ALIGN - 1);
1218 bss = eppnt->p_memsz + eppnt->p_vaddr;
1219 if (bss > len) {
1220 down_write(&current->mm->mmap_sem);
1221 do_brk(len, bss - len);
1222 up_write(&current->mm->mmap_sem);
1224 error = 0;
1226 out_free_ph:
1227 kfree(elf_phdata);
1228 out:
1229 return error;
1233 * Note that some platforms still use traditional core dumps and not
1234 * the ELF core dump. Each platform can select it as appropriate.
1236 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1239 * ELF core dumper
1241 * Modelled on fs/exec.c:aout_core_dump()
1242 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1245 * These are the only things you should do on a core-file: use only these
1246 * functions to write out all the necessary info.
1248 static int dump_write(struct file *file, const void *addr, int nr)
1250 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1253 static int dump_seek(struct file *file, loff_t off)
1255 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
1256 if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
1257 return 0;
1258 } else {
1259 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
1260 if (!buf)
1261 return 0;
1262 while (off > 0) {
1263 unsigned long n = off;
1264 if (n > PAGE_SIZE)
1265 n = PAGE_SIZE;
1266 if (!dump_write(file, buf, n))
1267 return 0;
1268 off -= n;
1270 free_page((unsigned long)buf);
1272 return 1;
1276 * Decide what to dump of a segment, part, all or none.
1278 static unsigned long vma_dump_size(struct vm_area_struct *vma,
1279 unsigned long mm_flags)
1281 /* The vma can be set up to tell us the answer directly. */
1282 if (vma->vm_flags & VM_ALWAYSDUMP)
1283 goto whole;
1285 /* Do not dump I/O mapped devices or special mappings */
1286 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1287 return 0;
1289 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1291 /* By default, dump shared memory if mapped from an anonymous file. */
1292 if (vma->vm_flags & VM_SHARED) {
1293 if (vma->vm_file->f_path.dentry->d_inode->i_nlink == 0 ?
1294 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1295 goto whole;
1296 return 0;
1299 /* Dump segments that have been written to. */
1300 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1301 goto whole;
1302 if (vma->vm_file == NULL)
1303 return 0;
1305 if (FILTER(MAPPED_PRIVATE))
1306 goto whole;
1309 * If this looks like the beginning of a DSO or executable mapping,
1310 * check for an ELF header. If we find one, dump the first page to
1311 * aid in determining what was mapped here.
1313 if (FILTER(ELF_HEADERS) && vma->vm_file != NULL && vma->vm_pgoff == 0) {
1314 u32 __user *header = (u32 __user *) vma->vm_start;
1315 u32 word;
1317 * Doing it this way gets the constant folded by GCC.
1319 union {
1320 u32 cmp;
1321 char elfmag[SELFMAG];
1322 } magic;
1323 BUILD_BUG_ON(SELFMAG != sizeof word);
1324 magic.elfmag[EI_MAG0] = ELFMAG0;
1325 magic.elfmag[EI_MAG1] = ELFMAG1;
1326 magic.elfmag[EI_MAG2] = ELFMAG2;
1327 magic.elfmag[EI_MAG3] = ELFMAG3;
1328 if (get_user(word, header) == 0 && word == magic.cmp)
1329 return PAGE_SIZE;
1332 #undef FILTER
1334 return 0;
1336 whole:
1337 return vma->vm_end - vma->vm_start;
1340 /* An ELF note in memory */
1341 struct memelfnote
1343 const char *name;
1344 int type;
1345 unsigned int datasz;
1346 void *data;
1349 static int notesize(struct memelfnote *en)
1351 int sz;
1353 sz = sizeof(struct elf_note);
1354 sz += roundup(strlen(en->name) + 1, 4);
1355 sz += roundup(en->datasz, 4);
1357 return sz;
1360 #define DUMP_WRITE(addr, nr, foffset) \
1361 do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
1363 static int alignfile(struct file *file, loff_t *foffset)
1365 static const char buf[4] = { 0, };
1366 DUMP_WRITE(buf, roundup(*foffset, 4) - *foffset, foffset);
1367 return 1;
1370 static int writenote(struct memelfnote *men, struct file *file,
1371 loff_t *foffset)
1373 struct elf_note en;
1374 en.n_namesz = strlen(men->name) + 1;
1375 en.n_descsz = men->datasz;
1376 en.n_type = men->type;
1378 DUMP_WRITE(&en, sizeof(en), foffset);
1379 DUMP_WRITE(men->name, en.n_namesz, foffset);
1380 if (!alignfile(file, foffset))
1381 return 0;
1382 DUMP_WRITE(men->data, men->datasz, foffset);
1383 if (!alignfile(file, foffset))
1384 return 0;
1386 return 1;
1388 #undef DUMP_WRITE
1390 #define DUMP_WRITE(addr, nr) \
1391 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1392 goto end_coredump;
1393 #define DUMP_SEEK(off) \
1394 if (!dump_seek(file, (off))) \
1395 goto end_coredump;
1397 static void fill_elf_header(struct elfhdr *elf, int segs)
1399 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1400 elf->e_ident[EI_CLASS] = ELF_CLASS;
1401 elf->e_ident[EI_DATA] = ELF_DATA;
1402 elf->e_ident[EI_VERSION] = EV_CURRENT;
1403 elf->e_ident[EI_OSABI] = ELF_OSABI;
1404 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1406 elf->e_type = ET_CORE;
1407 elf->e_machine = ELF_ARCH;
1408 elf->e_version = EV_CURRENT;
1409 elf->e_entry = 0;
1410 elf->e_phoff = sizeof(struct elfhdr);
1411 elf->e_shoff = 0;
1412 elf->e_flags = ELF_CORE_EFLAGS;
1413 elf->e_ehsize = sizeof(struct elfhdr);
1414 elf->e_phentsize = sizeof(struct elf_phdr);
1415 elf->e_phnum = segs;
1416 elf->e_shentsize = 0;
1417 elf->e_shnum = 0;
1418 elf->e_shstrndx = 0;
1419 return;
1422 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1424 phdr->p_type = PT_NOTE;
1425 phdr->p_offset = offset;
1426 phdr->p_vaddr = 0;
1427 phdr->p_paddr = 0;
1428 phdr->p_filesz = sz;
1429 phdr->p_memsz = 0;
1430 phdr->p_flags = 0;
1431 phdr->p_align = 0;
1432 return;
1435 static void fill_note(struct memelfnote *note, const char *name, int type,
1436 unsigned int sz, void *data)
1438 note->name = name;
1439 note->type = type;
1440 note->datasz = sz;
1441 note->data = data;
1442 return;
1446 * fill up all the fields in prstatus from the given task struct, except
1447 * registers which need to be filled up separately.
1449 static void fill_prstatus(struct elf_prstatus *prstatus,
1450 struct task_struct *p, long signr)
1452 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1453 prstatus->pr_sigpend = p->pending.signal.sig[0];
1454 prstatus->pr_sighold = p->blocked.sig[0];
1455 prstatus->pr_pid = task_pid_vnr(p);
1456 prstatus->pr_ppid = task_pid_vnr(p->real_parent);
1457 prstatus->pr_pgrp = task_pgrp_vnr(p);
1458 prstatus->pr_sid = task_session_vnr(p);
1459 if (thread_group_leader(p)) {
1461 * This is the record for the group leader. Add in the
1462 * cumulative times of previous dead threads. This total
1463 * won't include the time of each live thread whose state
1464 * is included in the core dump. The final total reported
1465 * to our parent process when it calls wait4 will include
1466 * those sums as well as the little bit more time it takes
1467 * this and each other thread to finish dying after the
1468 * core dump synchronization phase.
1470 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1471 &prstatus->pr_utime);
1472 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1473 &prstatus->pr_stime);
1474 } else {
1475 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1476 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1478 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1479 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1482 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1483 struct mm_struct *mm)
1485 unsigned int i, len;
1487 /* first copy the parameters from user space */
1488 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1490 len = mm->arg_end - mm->arg_start;
1491 if (len >= ELF_PRARGSZ)
1492 len = ELF_PRARGSZ-1;
1493 if (copy_from_user(&psinfo->pr_psargs,
1494 (const char __user *)mm->arg_start, len))
1495 return -EFAULT;
1496 for(i = 0; i < len; i++)
1497 if (psinfo->pr_psargs[i] == 0)
1498 psinfo->pr_psargs[i] = ' ';
1499 psinfo->pr_psargs[len] = 0;
1501 psinfo->pr_pid = task_pid_vnr(p);
1502 psinfo->pr_ppid = task_pid_vnr(p->real_parent);
1503 psinfo->pr_pgrp = task_pgrp_vnr(p);
1504 psinfo->pr_sid = task_session_vnr(p);
1506 i = p->state ? ffz(~p->state) + 1 : 0;
1507 psinfo->pr_state = i;
1508 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1509 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1510 psinfo->pr_nice = task_nice(p);
1511 psinfo->pr_flag = p->flags;
1512 SET_UID(psinfo->pr_uid, p->uid);
1513 SET_GID(psinfo->pr_gid, p->gid);
1514 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1516 return 0;
1519 /* Here is the structure in which status of each thread is captured. */
1520 struct elf_thread_status
1522 struct list_head list;
1523 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1524 elf_fpregset_t fpu; /* NT_PRFPREG */
1525 struct task_struct *thread;
1526 #ifdef ELF_CORE_COPY_XFPREGS
1527 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1528 #endif
1529 struct memelfnote notes[3];
1530 int num_notes;
1534 * In order to add the specific thread information for the elf file format,
1535 * we need to keep a linked list of every threads pr_status and then create
1536 * a single section for them in the final core file.
1538 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1540 int sz = 0;
1541 struct task_struct *p = t->thread;
1542 t->num_notes = 0;
1544 fill_prstatus(&t->prstatus, p, signr);
1545 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1547 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1548 &(t->prstatus));
1549 t->num_notes++;
1550 sz += notesize(&t->notes[0]);
1552 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1553 &t->fpu))) {
1554 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1555 &(t->fpu));
1556 t->num_notes++;
1557 sz += notesize(&t->notes[1]);
1560 #ifdef ELF_CORE_COPY_XFPREGS
1561 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1562 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1563 sizeof(t->xfpu), &t->xfpu);
1564 t->num_notes++;
1565 sz += notesize(&t->notes[2]);
1567 #endif
1568 return sz;
1571 static struct vm_area_struct *first_vma(struct task_struct *tsk,
1572 struct vm_area_struct *gate_vma)
1574 struct vm_area_struct *ret = tsk->mm->mmap;
1576 if (ret)
1577 return ret;
1578 return gate_vma;
1581 * Helper function for iterating across a vma list. It ensures that the caller
1582 * will visit `gate_vma' prior to terminating the search.
1584 static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
1585 struct vm_area_struct *gate_vma)
1587 struct vm_area_struct *ret;
1589 ret = this_vma->vm_next;
1590 if (ret)
1591 return ret;
1592 if (this_vma == gate_vma)
1593 return NULL;
1594 return gate_vma;
1598 * Actual dumper
1600 * This is a two-pass process; first we find the offsets of the bits,
1601 * and then they are actually written out. If we run out of core limit
1602 * we just truncate.
1604 static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
1606 #define NUM_NOTES 6
1607 int has_dumped = 0;
1608 mm_segment_t fs;
1609 int segs;
1610 size_t size = 0;
1611 int i;
1612 struct vm_area_struct *vma, *gate_vma;
1613 struct elfhdr *elf = NULL;
1614 loff_t offset = 0, dataoff, foffset;
1615 int numnote;
1616 struct memelfnote *notes = NULL;
1617 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1618 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1619 struct task_struct *g, *p;
1620 LIST_HEAD(thread_list);
1621 struct list_head *t;
1622 elf_fpregset_t *fpu = NULL;
1623 #ifdef ELF_CORE_COPY_XFPREGS
1624 elf_fpxregset_t *xfpu = NULL;
1625 #endif
1626 int thread_status_size = 0;
1627 elf_addr_t *auxv;
1628 unsigned long mm_flags;
1631 * We no longer stop all VM operations.
1633 * This is because those proceses that could possibly change map_count
1634 * or the mmap / vma pages are now blocked in do_exit on current
1635 * finishing this core dump.
1637 * Only ptrace can touch these memory addresses, but it doesn't change
1638 * the map_count or the pages allocated. So no possibility of crashing
1639 * exists while dumping the mm->vm_next areas to the core file.
1642 /* alloc memory for large data structures: too large to be on stack */
1643 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1644 if (!elf)
1645 goto cleanup;
1646 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1647 if (!prstatus)
1648 goto cleanup;
1649 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1650 if (!psinfo)
1651 goto cleanup;
1652 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1653 if (!notes)
1654 goto cleanup;
1655 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1656 if (!fpu)
1657 goto cleanup;
1658 #ifdef ELF_CORE_COPY_XFPREGS
1659 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1660 if (!xfpu)
1661 goto cleanup;
1662 #endif
1664 if (signr) {
1665 struct elf_thread_status *tmp;
1666 rcu_read_lock();
1667 do_each_thread(g,p)
1668 if (current->mm == p->mm && current != p) {
1669 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1670 if (!tmp) {
1671 rcu_read_unlock();
1672 goto cleanup;
1674 tmp->thread = p;
1675 list_add(&tmp->list, &thread_list);
1677 while_each_thread(g,p);
1678 rcu_read_unlock();
1679 list_for_each(t, &thread_list) {
1680 struct elf_thread_status *tmp;
1681 int sz;
1683 tmp = list_entry(t, struct elf_thread_status, list);
1684 sz = elf_dump_thread_status(signr, tmp);
1685 thread_status_size += sz;
1688 /* now collect the dump for the current */
1689 memset(prstatus, 0, sizeof(*prstatus));
1690 fill_prstatus(prstatus, current, signr);
1691 elf_core_copy_regs(&prstatus->pr_reg, regs);
1693 segs = current->mm->map_count;
1694 #ifdef ELF_CORE_EXTRA_PHDRS
1695 segs += ELF_CORE_EXTRA_PHDRS;
1696 #endif
1698 gate_vma = get_gate_vma(current);
1699 if (gate_vma != NULL)
1700 segs++;
1702 /* Set up header */
1703 fill_elf_header(elf, segs + 1); /* including notes section */
1705 has_dumped = 1;
1706 current->flags |= PF_DUMPCORE;
1709 * Set up the notes in similar form to SVR4 core dumps made
1710 * with info from their /proc.
1713 fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1714 fill_psinfo(psinfo, current->group_leader, current->mm);
1715 fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1717 numnote = 2;
1719 auxv = (elf_addr_t *)current->mm->saved_auxv;
1721 i = 0;
1723 i += 2;
1724 while (auxv[i - 2] != AT_NULL);
1725 fill_note(&notes[numnote++], "CORE", NT_AUXV,
1726 i * sizeof(elf_addr_t), auxv);
1728 /* Try to dump the FPU. */
1729 if ((prstatus->pr_fpvalid =
1730 elf_core_copy_task_fpregs(current, regs, fpu)))
1731 fill_note(notes + numnote++,
1732 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1733 #ifdef ELF_CORE_COPY_XFPREGS
1734 if (elf_core_copy_task_xfpregs(current, xfpu))
1735 fill_note(notes + numnote++,
1736 "LINUX", ELF_CORE_XFPREG_TYPE, sizeof(*xfpu), xfpu);
1737 #endif
1739 fs = get_fs();
1740 set_fs(KERNEL_DS);
1742 DUMP_WRITE(elf, sizeof(*elf));
1743 offset += sizeof(*elf); /* Elf header */
1744 offset += (segs + 1) * sizeof(struct elf_phdr); /* Program headers */
1745 foffset = offset;
1747 /* Write notes phdr entry */
1749 struct elf_phdr phdr;
1750 int sz = 0;
1752 for (i = 0; i < numnote; i++)
1753 sz += notesize(notes + i);
1755 sz += thread_status_size;
1757 sz += elf_coredump_extra_notes_size();
1759 fill_elf_note_phdr(&phdr, sz, offset);
1760 offset += sz;
1761 DUMP_WRITE(&phdr, sizeof(phdr));
1764 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1767 * We must use the same mm->flags while dumping core to avoid
1768 * inconsistency between the program headers and bodies, otherwise an
1769 * unusable core file can be generated.
1771 mm_flags = current->mm->flags;
1773 /* Write program headers for segments dump */
1774 for (vma = first_vma(current, gate_vma); vma != NULL;
1775 vma = next_vma(vma, gate_vma)) {
1776 struct elf_phdr phdr;
1778 phdr.p_type = PT_LOAD;
1779 phdr.p_offset = offset;
1780 phdr.p_vaddr = vma->vm_start;
1781 phdr.p_paddr = 0;
1782 phdr.p_filesz = vma_dump_size(vma, mm_flags);
1783 phdr.p_memsz = vma->vm_end - vma->vm_start;
1784 offset += phdr.p_filesz;
1785 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1786 if (vma->vm_flags & VM_WRITE)
1787 phdr.p_flags |= PF_W;
1788 if (vma->vm_flags & VM_EXEC)
1789 phdr.p_flags |= PF_X;
1790 phdr.p_align = ELF_EXEC_PAGESIZE;
1792 DUMP_WRITE(&phdr, sizeof(phdr));
1795 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1796 ELF_CORE_WRITE_EXTRA_PHDRS;
1797 #endif
1799 /* write out the notes section */
1800 for (i = 0; i < numnote; i++)
1801 if (!writenote(notes + i, file, &foffset))
1802 goto end_coredump;
1804 if (elf_coredump_extra_notes_write(file, &foffset))
1805 goto end_coredump;
1807 /* write out the thread status notes section */
1808 list_for_each(t, &thread_list) {
1809 struct elf_thread_status *tmp =
1810 list_entry(t, struct elf_thread_status, list);
1812 for (i = 0; i < tmp->num_notes; i++)
1813 if (!writenote(&tmp->notes[i], file, &foffset))
1814 goto end_coredump;
1817 /* Align to page */
1818 DUMP_SEEK(dataoff - foffset);
1820 for (vma = first_vma(current, gate_vma); vma != NULL;
1821 vma = next_vma(vma, gate_vma)) {
1822 unsigned long addr;
1823 unsigned long end;
1825 end = vma->vm_start + vma_dump_size(vma, mm_flags);
1827 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
1828 struct page *page;
1829 struct vm_area_struct *vma;
1831 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1832 &page, &vma) <= 0) {
1833 DUMP_SEEK(PAGE_SIZE);
1834 } else {
1835 if (page == ZERO_PAGE(0)) {
1836 if (!dump_seek(file, PAGE_SIZE)) {
1837 page_cache_release(page);
1838 goto end_coredump;
1840 } else {
1841 void *kaddr;
1842 flush_cache_page(vma, addr,
1843 page_to_pfn(page));
1844 kaddr = kmap(page);
1845 if ((size += PAGE_SIZE) > limit ||
1846 !dump_write(file, kaddr,
1847 PAGE_SIZE)) {
1848 kunmap(page);
1849 page_cache_release(page);
1850 goto end_coredump;
1852 kunmap(page);
1854 page_cache_release(page);
1859 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1860 ELF_CORE_WRITE_EXTRA_DATA;
1861 #endif
1863 end_coredump:
1864 set_fs(fs);
1866 cleanup:
1867 while (!list_empty(&thread_list)) {
1868 struct list_head *tmp = thread_list.next;
1869 list_del(tmp);
1870 kfree(list_entry(tmp, struct elf_thread_status, list));
1873 kfree(elf);
1874 kfree(prstatus);
1875 kfree(psinfo);
1876 kfree(notes);
1877 kfree(fpu);
1878 #ifdef ELF_CORE_COPY_XFPREGS
1879 kfree(xfpu);
1880 #endif
1881 return has_dumped;
1882 #undef NUM_NOTES
1885 #endif /* USE_ELF_CORE_DUMP */
1887 static int __init init_elf_binfmt(void)
1889 return register_binfmt(&elf_format);
1892 static void __exit exit_elf_binfmt(void)
1894 /* Remove the COFF and ELF loaders. */
1895 unregister_binfmt(&elf_format);
1898 core_initcall(init_elf_binfmt);
1899 module_exit(exit_elf_binfmt);
1900 MODULE_LICENSE("GPL");