kconfig/lxdialog: support resize
[linux-2.6.git] / fs / binfmt_elf.c
blob6eb48e1446ecb4fd7c239f2cdca17e7faef89c28
1 /*
2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/fs.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
41 #include <linux/elf.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
44 #include <asm/page.h>
46 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs);
47 static int load_elf_library(struct file *);
48 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
49 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
51 #ifndef elf_addr_t
52 #define elf_addr_t unsigned long
53 #endif
56 * If we don't support core dumping, then supply a NULL so we
57 * don't even try.
59 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
60 static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file);
61 #else
62 #define elf_core_dump NULL
63 #endif
65 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
66 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
67 #else
68 #define ELF_MIN_ALIGN PAGE_SIZE
69 #endif
71 #ifndef ELF_CORE_EFLAGS
72 #define ELF_CORE_EFLAGS 0
73 #endif
75 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
76 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
77 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
79 static struct linux_binfmt elf_format = {
80 .module = THIS_MODULE,
81 .load_binary = load_elf_binary,
82 .load_shlib = load_elf_library,
83 .core_dump = elf_core_dump,
84 .min_coredump = ELF_EXEC_PAGESIZE
87 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
89 static int set_brk(unsigned long start, unsigned long end)
91 start = ELF_PAGEALIGN(start);
92 end = ELF_PAGEALIGN(end);
93 if (end > start) {
94 unsigned long addr;
95 down_write(&current->mm->mmap_sem);
96 addr = do_brk(start, end - start);
97 up_write(&current->mm->mmap_sem);
98 if (BAD_ADDR(addr))
99 return addr;
101 current->mm->start_brk = current->mm->brk = end;
102 return 0;
105 /* We need to explicitly zero any fractional pages
106 after the data section (i.e. bss). This would
107 contain the junk from the file that should not
108 be in memory
110 static int padzero(unsigned long elf_bss)
112 unsigned long nbyte;
114 nbyte = ELF_PAGEOFFSET(elf_bss);
115 if (nbyte) {
116 nbyte = ELF_MIN_ALIGN - nbyte;
117 if (clear_user((void __user *) elf_bss, nbyte))
118 return -EFAULT;
120 return 0;
123 /* Let's use some macros to make this stack manipulation a litle clearer */
124 #ifdef CONFIG_STACK_GROWSUP
125 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
126 #define STACK_ROUND(sp, items) \
127 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
128 #define STACK_ALLOC(sp, len) ({ \
129 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
130 old_sp; })
131 #else
132 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
133 #define STACK_ROUND(sp, items) \
134 (((unsigned long) (sp - items)) &~ 15UL)
135 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
136 #endif
138 static int
139 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
140 int interp_aout, unsigned long load_addr,
141 unsigned long interp_load_addr)
143 unsigned long p = bprm->p;
144 int argc = bprm->argc;
145 int envc = bprm->envc;
146 elf_addr_t __user *argv;
147 elf_addr_t __user *envp;
148 elf_addr_t __user *sp;
149 elf_addr_t __user *u_platform;
150 const char *k_platform = ELF_PLATFORM;
151 int items;
152 elf_addr_t *elf_info;
153 int ei_index = 0;
154 struct task_struct *tsk = current;
157 * If this architecture has a platform capability string, copy it
158 * to userspace. In some cases (Sparc), this info is impossible
159 * for userspace to get any other way, in others (i386) it is
160 * merely difficult.
162 u_platform = NULL;
163 if (k_platform) {
164 size_t len = strlen(k_platform) + 1;
167 * In some cases (e.g. Hyper-Threading), we want to avoid L1
168 * evictions by the processes running on the same package. One
169 * thing we can do is to shuffle the initial stack for them.
172 p = arch_align_stack(p);
174 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
175 if (__copy_to_user(u_platform, k_platform, len))
176 return -EFAULT;
179 /* Create the ELF interpreter info */
180 elf_info = (elf_addr_t *)current->mm->saved_auxv;
181 #define NEW_AUX_ENT(id, val) \
182 do { \
183 elf_info[ei_index++] = id; \
184 elf_info[ei_index++] = val; \
185 } while (0)
187 #ifdef ARCH_DLINFO
189 * ARCH_DLINFO must come first so PPC can do its special alignment of
190 * AUXV.
192 ARCH_DLINFO;
193 #endif
194 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
195 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
196 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
197 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
198 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
199 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
200 NEW_AUX_ENT(AT_BASE, interp_load_addr);
201 NEW_AUX_ENT(AT_FLAGS, 0);
202 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
203 NEW_AUX_ENT(AT_UID, tsk->uid);
204 NEW_AUX_ENT(AT_EUID, tsk->euid);
205 NEW_AUX_ENT(AT_GID, tsk->gid);
206 NEW_AUX_ENT(AT_EGID, tsk->egid);
207 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
208 if (k_platform) {
209 NEW_AUX_ENT(AT_PLATFORM,
210 (elf_addr_t)(unsigned long)u_platform);
212 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
213 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
215 #undef NEW_AUX_ENT
216 /* AT_NULL is zero; clear the rest too */
217 memset(&elf_info[ei_index], 0,
218 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
220 /* And advance past the AT_NULL entry. */
221 ei_index += 2;
223 sp = STACK_ADD(p, ei_index);
225 items = (argc + 1) + (envc + 1);
226 if (interp_aout) {
227 items += 3; /* a.out interpreters require argv & envp too */
228 } else {
229 items += 1; /* ELF interpreters only put argc on the stack */
231 bprm->p = STACK_ROUND(sp, items);
233 /* Point sp at the lowest address on the stack */
234 #ifdef CONFIG_STACK_GROWSUP
235 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
236 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
237 #else
238 sp = (elf_addr_t __user *)bprm->p;
239 #endif
241 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
242 if (__put_user(argc, sp++))
243 return -EFAULT;
244 if (interp_aout) {
245 argv = sp + 2;
246 envp = argv + argc + 1;
247 __put_user((elf_addr_t)(unsigned long)argv, sp++);
248 __put_user((elf_addr_t)(unsigned long)envp, sp++);
249 } else {
250 argv = sp;
251 envp = argv + argc + 1;
254 /* Populate argv and envp */
255 p = current->mm->arg_end = current->mm->arg_start;
256 while (argc-- > 0) {
257 size_t len;
258 __put_user((elf_addr_t)p, argv++);
259 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
260 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
261 return 0;
262 p += len;
264 if (__put_user(0, argv))
265 return -EFAULT;
266 current->mm->arg_end = current->mm->env_start = p;
267 while (envc-- > 0) {
268 size_t len;
269 __put_user((elf_addr_t)p, envp++);
270 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
271 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
272 return 0;
273 p += len;
275 if (__put_user(0, envp))
276 return -EFAULT;
277 current->mm->env_end = p;
279 /* Put the elf_info on the stack in the right place. */
280 sp = (elf_addr_t __user *)envp + 1;
281 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
282 return -EFAULT;
283 return 0;
286 #ifndef elf_map
288 static unsigned long elf_map(struct file *filep, unsigned long addr,
289 struct elf_phdr *eppnt, int prot, int type)
291 unsigned long map_addr;
292 unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr);
294 down_write(&current->mm->mmap_sem);
295 /* mmap() will return -EINVAL if given a zero size, but a
296 * segment with zero filesize is perfectly valid */
297 if (eppnt->p_filesz + pageoffset)
298 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
299 eppnt->p_filesz + pageoffset, prot, type,
300 eppnt->p_offset - pageoffset);
301 else
302 map_addr = ELF_PAGESTART(addr);
303 up_write(&current->mm->mmap_sem);
304 return(map_addr);
307 #endif /* !elf_map */
309 /* This is much more generalized than the library routine read function,
310 so we keep this separate. Technically the library read function
311 is only provided so that we can read a.out libraries that have
312 an ELF header */
314 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
315 struct file *interpreter, unsigned long *interp_load_addr)
317 struct elf_phdr *elf_phdata;
318 struct elf_phdr *eppnt;
319 unsigned long load_addr = 0;
320 int load_addr_set = 0;
321 unsigned long last_bss = 0, elf_bss = 0;
322 unsigned long error = ~0UL;
323 int retval, i, size;
325 /* First of all, some simple consistency checks */
326 if (interp_elf_ex->e_type != ET_EXEC &&
327 interp_elf_ex->e_type != ET_DYN)
328 goto out;
329 if (!elf_check_arch(interp_elf_ex))
330 goto out;
331 if (!interpreter->f_op || !interpreter->f_op->mmap)
332 goto out;
335 * If the size of this structure has changed, then punt, since
336 * we will be doing the wrong thing.
338 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
339 goto out;
340 if (interp_elf_ex->e_phnum < 1 ||
341 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
342 goto out;
344 /* Now read in all of the header information */
345 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
346 if (size > ELF_MIN_ALIGN)
347 goto out;
348 elf_phdata = kmalloc(size, GFP_KERNEL);
349 if (!elf_phdata)
350 goto out;
352 retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
353 (char *)elf_phdata,size);
354 error = -EIO;
355 if (retval != size) {
356 if (retval < 0)
357 error = retval;
358 goto out_close;
361 eppnt = elf_phdata;
362 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
363 if (eppnt->p_type == PT_LOAD) {
364 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
365 int elf_prot = 0;
366 unsigned long vaddr = 0;
367 unsigned long k, map_addr;
369 if (eppnt->p_flags & PF_R)
370 elf_prot = PROT_READ;
371 if (eppnt->p_flags & PF_W)
372 elf_prot |= PROT_WRITE;
373 if (eppnt->p_flags & PF_X)
374 elf_prot |= PROT_EXEC;
375 vaddr = eppnt->p_vaddr;
376 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
377 elf_type |= MAP_FIXED;
379 map_addr = elf_map(interpreter, load_addr + vaddr,
380 eppnt, elf_prot, elf_type);
381 error = map_addr;
382 if (BAD_ADDR(map_addr))
383 goto out_close;
385 if (!load_addr_set &&
386 interp_elf_ex->e_type == ET_DYN) {
387 load_addr = map_addr - ELF_PAGESTART(vaddr);
388 load_addr_set = 1;
392 * Check to see if the section's size will overflow the
393 * allowed task size. Note that p_filesz must always be
394 * <= p_memsize so it's only necessary to check p_memsz.
396 k = load_addr + eppnt->p_vaddr;
397 if (BAD_ADDR(k) ||
398 eppnt->p_filesz > eppnt->p_memsz ||
399 eppnt->p_memsz > TASK_SIZE ||
400 TASK_SIZE - eppnt->p_memsz < k) {
401 error = -ENOMEM;
402 goto out_close;
406 * Find the end of the file mapping for this phdr, and
407 * keep track of the largest address we see for this.
409 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
410 if (k > elf_bss)
411 elf_bss = k;
414 * Do the same thing for the memory mapping - between
415 * elf_bss and last_bss is the bss section.
417 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
418 if (k > last_bss)
419 last_bss = k;
424 * Now fill out the bss section. First pad the last page up
425 * to the page boundary, and then perform a mmap to make sure
426 * that there are zero-mapped pages up to and including the
427 * last bss page.
429 if (padzero(elf_bss)) {
430 error = -EFAULT;
431 goto out_close;
434 /* What we have mapped so far */
435 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
437 /* Map the last of the bss segment */
438 if (last_bss > elf_bss) {
439 down_write(&current->mm->mmap_sem);
440 error = do_brk(elf_bss, last_bss - elf_bss);
441 up_write(&current->mm->mmap_sem);
442 if (BAD_ADDR(error))
443 goto out_close;
446 *interp_load_addr = load_addr;
447 error = ((unsigned long)interp_elf_ex->e_entry) + load_addr;
449 out_close:
450 kfree(elf_phdata);
451 out:
452 return error;
455 static unsigned long load_aout_interp(struct exec *interp_ex,
456 struct file *interpreter)
458 unsigned long text_data, elf_entry = ~0UL;
459 char __user * addr;
460 loff_t offset;
462 current->mm->end_code = interp_ex->a_text;
463 text_data = interp_ex->a_text + interp_ex->a_data;
464 current->mm->end_data = text_data;
465 current->mm->brk = interp_ex->a_bss + text_data;
467 switch (N_MAGIC(*interp_ex)) {
468 case OMAGIC:
469 offset = 32;
470 addr = (char __user *)0;
471 break;
472 case ZMAGIC:
473 case QMAGIC:
474 offset = N_TXTOFF(*interp_ex);
475 addr = (char __user *)N_TXTADDR(*interp_ex);
476 break;
477 default:
478 goto out;
481 down_write(&current->mm->mmap_sem);
482 do_brk(0, text_data);
483 up_write(&current->mm->mmap_sem);
484 if (!interpreter->f_op || !interpreter->f_op->read)
485 goto out;
486 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
487 goto out;
488 flush_icache_range((unsigned long)addr,
489 (unsigned long)addr + text_data);
491 down_write(&current->mm->mmap_sem);
492 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
493 interp_ex->a_bss);
494 up_write(&current->mm->mmap_sem);
495 elf_entry = interp_ex->a_entry;
497 out:
498 return elf_entry;
502 * These are the functions used to load ELF style executables and shared
503 * libraries. There is no binary dependent code anywhere else.
506 #define INTERPRETER_NONE 0
507 #define INTERPRETER_AOUT 1
508 #define INTERPRETER_ELF 2
510 #ifndef STACK_RND_MASK
511 #define STACK_RND_MASK 0x7ff /* with 4K pages 8MB of VA */
512 #endif
514 static unsigned long randomize_stack_top(unsigned long stack_top)
516 unsigned int random_variable = 0;
518 if ((current->flags & PF_RANDOMIZE) &&
519 !(current->personality & ADDR_NO_RANDOMIZE)) {
520 random_variable = get_random_int() & STACK_RND_MASK;
521 random_variable <<= PAGE_SHIFT;
523 #ifdef CONFIG_STACK_GROWSUP
524 return PAGE_ALIGN(stack_top) + random_variable;
525 #else
526 return PAGE_ALIGN(stack_top) - random_variable;
527 #endif
530 static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
532 struct file *interpreter = NULL; /* to shut gcc up */
533 unsigned long load_addr = 0, load_bias = 0;
534 int load_addr_set = 0;
535 char * elf_interpreter = NULL;
536 unsigned int interpreter_type = INTERPRETER_NONE;
537 unsigned char ibcs2_interpreter = 0;
538 unsigned long error;
539 struct elf_phdr *elf_ppnt, *elf_phdata;
540 unsigned long elf_bss, elf_brk;
541 int elf_exec_fileno;
542 int retval, i;
543 unsigned int size;
544 unsigned long elf_entry, interp_load_addr = 0;
545 unsigned long start_code, end_code, start_data, end_data;
546 unsigned long reloc_func_desc = 0;
547 char passed_fileno[6];
548 struct files_struct *files;
549 int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT;
550 unsigned long def_flags = 0;
551 struct {
552 struct elfhdr elf_ex;
553 struct elfhdr interp_elf_ex;
554 struct exec interp_ex;
555 } *loc;
557 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
558 if (!loc) {
559 retval = -ENOMEM;
560 goto out_ret;
563 /* Get the exec-header */
564 loc->elf_ex = *((struct elfhdr *)bprm->buf);
566 retval = -ENOEXEC;
567 /* First of all, some simple consistency checks */
568 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
569 goto out;
571 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
572 goto out;
573 if (!elf_check_arch(&loc->elf_ex))
574 goto out;
575 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
576 goto out;
578 /* Now read in all of the header information */
579 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
580 goto out;
581 if (loc->elf_ex.e_phnum < 1 ||
582 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
583 goto out;
584 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
585 retval = -ENOMEM;
586 elf_phdata = kmalloc(size, GFP_KERNEL);
587 if (!elf_phdata)
588 goto out;
590 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
591 (char *)elf_phdata, size);
592 if (retval != size) {
593 if (retval >= 0)
594 retval = -EIO;
595 goto out_free_ph;
598 files = current->files; /* Refcounted so ok */
599 retval = unshare_files();
600 if (retval < 0)
601 goto out_free_ph;
602 if (files == current->files) {
603 put_files_struct(files);
604 files = NULL;
607 /* exec will make our files private anyway, but for the a.out
608 loader stuff we need to do it earlier */
609 retval = get_unused_fd();
610 if (retval < 0)
611 goto out_free_fh;
612 get_file(bprm->file);
613 fd_install(elf_exec_fileno = retval, bprm->file);
615 elf_ppnt = elf_phdata;
616 elf_bss = 0;
617 elf_brk = 0;
619 start_code = ~0UL;
620 end_code = 0;
621 start_data = 0;
622 end_data = 0;
624 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
625 if (elf_ppnt->p_type == PT_INTERP) {
626 /* This is the program interpreter used for
627 * shared libraries - for now assume that this
628 * is an a.out format binary
630 retval = -ENOEXEC;
631 if (elf_ppnt->p_filesz > PATH_MAX ||
632 elf_ppnt->p_filesz < 2)
633 goto out_free_file;
635 retval = -ENOMEM;
636 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
637 GFP_KERNEL);
638 if (!elf_interpreter)
639 goto out_free_file;
641 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
642 elf_interpreter,
643 elf_ppnt->p_filesz);
644 if (retval != elf_ppnt->p_filesz) {
645 if (retval >= 0)
646 retval = -EIO;
647 goto out_free_interp;
649 /* make sure path is NULL terminated */
650 retval = -ENOEXEC;
651 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
652 goto out_free_interp;
654 /* If the program interpreter is one of these two,
655 * then assume an iBCS2 image. Otherwise assume
656 * a native linux image.
658 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
659 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
660 ibcs2_interpreter = 1;
663 * The early SET_PERSONALITY here is so that the lookup
664 * for the interpreter happens in the namespace of the
665 * to-be-execed image. SET_PERSONALITY can select an
666 * alternate root.
668 * However, SET_PERSONALITY is NOT allowed to switch
669 * this task into the new images's memory mapping
670 * policy - that is, TASK_SIZE must still evaluate to
671 * that which is appropriate to the execing application.
672 * This is because exit_mmap() needs to have TASK_SIZE
673 * evaluate to the size of the old image.
675 * So if (say) a 64-bit application is execing a 32-bit
676 * application it is the architecture's responsibility
677 * to defer changing the value of TASK_SIZE until the
678 * switch really is going to happen - do this in
679 * flush_thread(). - akpm
681 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
683 interpreter = open_exec(elf_interpreter);
684 retval = PTR_ERR(interpreter);
685 if (IS_ERR(interpreter))
686 goto out_free_interp;
687 retval = kernel_read(interpreter, 0, bprm->buf,
688 BINPRM_BUF_SIZE);
689 if (retval != BINPRM_BUF_SIZE) {
690 if (retval >= 0)
691 retval = -EIO;
692 goto out_free_dentry;
695 /* Get the exec headers */
696 loc->interp_ex = *((struct exec *)bprm->buf);
697 loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
698 break;
700 elf_ppnt++;
703 elf_ppnt = elf_phdata;
704 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
705 if (elf_ppnt->p_type == PT_GNU_STACK) {
706 if (elf_ppnt->p_flags & PF_X)
707 executable_stack = EXSTACK_ENABLE_X;
708 else
709 executable_stack = EXSTACK_DISABLE_X;
710 break;
712 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
714 /* Some simple consistency checks for the interpreter */
715 if (elf_interpreter) {
716 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
718 /* Now figure out which format our binary is */
719 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
720 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
721 (N_MAGIC(loc->interp_ex) != QMAGIC))
722 interpreter_type = INTERPRETER_ELF;
724 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
725 interpreter_type &= ~INTERPRETER_ELF;
727 retval = -ELIBBAD;
728 if (!interpreter_type)
729 goto out_free_dentry;
731 /* Make sure only one type was selected */
732 if ((interpreter_type & INTERPRETER_ELF) &&
733 interpreter_type != INTERPRETER_ELF) {
734 // FIXME - ratelimit this before re-enabling
735 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
736 interpreter_type = INTERPRETER_ELF;
738 /* Verify the interpreter has a valid arch */
739 if ((interpreter_type == INTERPRETER_ELF) &&
740 !elf_check_arch(&loc->interp_elf_ex))
741 goto out_free_dentry;
742 } else {
743 /* Executables without an interpreter also need a personality */
744 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
747 /* OK, we are done with that, now set up the arg stuff,
748 and then start this sucker up */
749 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
750 char *passed_p = passed_fileno;
751 sprintf(passed_fileno, "%d", elf_exec_fileno);
753 if (elf_interpreter) {
754 retval = copy_strings_kernel(1, &passed_p, bprm);
755 if (retval)
756 goto out_free_dentry;
757 bprm->argc++;
761 /* Flush all traces of the currently running executable */
762 retval = flush_old_exec(bprm);
763 if (retval)
764 goto out_free_dentry;
766 /* Discard our unneeded old files struct */
767 if (files) {
768 put_files_struct(files);
769 files = NULL;
772 /* OK, This is the point of no return */
773 current->mm->start_data = 0;
774 current->mm->end_data = 0;
775 current->mm->end_code = 0;
776 current->mm->mmap = NULL;
777 current->flags &= ~PF_FORKNOEXEC;
778 current->mm->def_flags = def_flags;
780 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
781 may depend on the personality. */
782 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
783 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
784 current->personality |= READ_IMPLIES_EXEC;
786 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
787 current->flags |= PF_RANDOMIZE;
788 arch_pick_mmap_layout(current->mm);
790 /* Do this so that we can load the interpreter, if need be. We will
791 change some of these later */
792 current->mm->free_area_cache = current->mm->mmap_base;
793 current->mm->cached_hole_size = 0;
794 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
795 executable_stack);
796 if (retval < 0) {
797 send_sig(SIGKILL, current, 0);
798 goto out_free_dentry;
801 current->mm->start_stack = bprm->p;
803 /* Now we do a little grungy work by mmaping the ELF image into
804 the correct location in memory. At this point, we assume that
805 the image should be loaded at fixed address, not at a variable
806 address. */
807 for(i = 0, elf_ppnt = elf_phdata;
808 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
809 int elf_prot = 0, elf_flags;
810 unsigned long k, vaddr;
812 if (elf_ppnt->p_type != PT_LOAD)
813 continue;
815 if (unlikely (elf_brk > elf_bss)) {
816 unsigned long nbyte;
818 /* There was a PT_LOAD segment with p_memsz > p_filesz
819 before this one. Map anonymous pages, if needed,
820 and clear the area. */
821 retval = set_brk (elf_bss + load_bias,
822 elf_brk + load_bias);
823 if (retval) {
824 send_sig(SIGKILL, current, 0);
825 goto out_free_dentry;
827 nbyte = ELF_PAGEOFFSET(elf_bss);
828 if (nbyte) {
829 nbyte = ELF_MIN_ALIGN - nbyte;
830 if (nbyte > elf_brk - elf_bss)
831 nbyte = elf_brk - elf_bss;
832 if (clear_user((void __user *)elf_bss +
833 load_bias, nbyte)) {
835 * This bss-zeroing can fail if the ELF
836 * file specifies odd protections. So
837 * we don't check the return value
843 if (elf_ppnt->p_flags & PF_R)
844 elf_prot |= PROT_READ;
845 if (elf_ppnt->p_flags & PF_W)
846 elf_prot |= PROT_WRITE;
847 if (elf_ppnt->p_flags & PF_X)
848 elf_prot |= PROT_EXEC;
850 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
852 vaddr = elf_ppnt->p_vaddr;
853 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
854 elf_flags |= MAP_FIXED;
855 } else if (loc->elf_ex.e_type == ET_DYN) {
856 /* Try and get dynamic programs out of the way of the
857 * default mmap base, as well as whatever program they
858 * might try to exec. This is because the brk will
859 * follow the loader, and is not movable. */
860 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
863 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
864 elf_prot, elf_flags);
865 if (BAD_ADDR(error)) {
866 send_sig(SIGKILL, current, 0);
867 goto out_free_dentry;
870 if (!load_addr_set) {
871 load_addr_set = 1;
872 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
873 if (loc->elf_ex.e_type == ET_DYN) {
874 load_bias += error -
875 ELF_PAGESTART(load_bias + vaddr);
876 load_addr += load_bias;
877 reloc_func_desc = load_bias;
880 k = elf_ppnt->p_vaddr;
881 if (k < start_code)
882 start_code = k;
883 if (start_data < k)
884 start_data = k;
887 * Check to see if the section's size will overflow the
888 * allowed task size. Note that p_filesz must always be
889 * <= p_memsz so it is only necessary to check p_memsz.
891 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
892 elf_ppnt->p_memsz > TASK_SIZE ||
893 TASK_SIZE - elf_ppnt->p_memsz < k) {
894 /* set_brk can never work. Avoid overflows. */
895 send_sig(SIGKILL, current, 0);
896 goto out_free_dentry;
899 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
901 if (k > elf_bss)
902 elf_bss = k;
903 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
904 end_code = k;
905 if (end_data < k)
906 end_data = k;
907 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
908 if (k > elf_brk)
909 elf_brk = k;
912 loc->elf_ex.e_entry += load_bias;
913 elf_bss += load_bias;
914 elf_brk += load_bias;
915 start_code += load_bias;
916 end_code += load_bias;
917 start_data += load_bias;
918 end_data += load_bias;
920 /* Calling set_brk effectively mmaps the pages that we need
921 * for the bss and break sections. We must do this before
922 * mapping in the interpreter, to make sure it doesn't wind
923 * up getting placed where the bss needs to go.
925 retval = set_brk(elf_bss, elf_brk);
926 if (retval) {
927 send_sig(SIGKILL, current, 0);
928 goto out_free_dentry;
930 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
931 send_sig(SIGSEGV, current, 0);
932 retval = -EFAULT; /* Nobody gets to see this, but.. */
933 goto out_free_dentry;
936 if (elf_interpreter) {
937 if (interpreter_type == INTERPRETER_AOUT)
938 elf_entry = load_aout_interp(&loc->interp_ex,
939 interpreter);
940 else
941 elf_entry = load_elf_interp(&loc->interp_elf_ex,
942 interpreter,
943 &interp_load_addr);
944 if (BAD_ADDR(elf_entry)) {
945 force_sig(SIGSEGV, current);
946 retval = IS_ERR((void *)elf_entry) ?
947 (int)elf_entry : -EINVAL;
948 goto out_free_dentry;
950 reloc_func_desc = interp_load_addr;
952 allow_write_access(interpreter);
953 fput(interpreter);
954 kfree(elf_interpreter);
955 } else {
956 elf_entry = loc->elf_ex.e_entry;
957 if (BAD_ADDR(elf_entry)) {
958 force_sig(SIGSEGV, current);
959 retval = -EINVAL;
960 goto out_free_dentry;
964 kfree(elf_phdata);
966 if (interpreter_type != INTERPRETER_AOUT)
967 sys_close(elf_exec_fileno);
969 set_binfmt(&elf_format);
971 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
972 retval = arch_setup_additional_pages(bprm, executable_stack);
973 if (retval < 0) {
974 send_sig(SIGKILL, current, 0);
975 goto out;
977 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
979 compute_creds(bprm);
980 current->flags &= ~PF_FORKNOEXEC;
981 create_elf_tables(bprm, &loc->elf_ex,
982 (interpreter_type == INTERPRETER_AOUT),
983 load_addr, interp_load_addr);
984 /* N.B. passed_fileno might not be initialized? */
985 if (interpreter_type == INTERPRETER_AOUT)
986 current->mm->arg_start += strlen(passed_fileno) + 1;
987 current->mm->end_code = end_code;
988 current->mm->start_code = start_code;
989 current->mm->start_data = start_data;
990 current->mm->end_data = end_data;
991 current->mm->start_stack = bprm->p;
993 if (current->personality & MMAP_PAGE_ZERO) {
994 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
995 and some applications "depend" upon this behavior.
996 Since we do not have the power to recompile these, we
997 emulate the SVr4 behavior. Sigh. */
998 down_write(&current->mm->mmap_sem);
999 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1000 MAP_FIXED | MAP_PRIVATE, 0);
1001 up_write(&current->mm->mmap_sem);
1004 #ifdef ELF_PLAT_INIT
1006 * The ABI may specify that certain registers be set up in special
1007 * ways (on i386 %edx is the address of a DT_FINI function, for
1008 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1009 * that the e_entry field is the address of the function descriptor
1010 * for the startup routine, rather than the address of the startup
1011 * routine itself. This macro performs whatever initialization to
1012 * the regs structure is required as well as any relocations to the
1013 * function descriptor entries when executing dynamically links apps.
1015 ELF_PLAT_INIT(regs, reloc_func_desc);
1016 #endif
1018 start_thread(regs, elf_entry, bprm->p);
1019 if (unlikely(current->ptrace & PT_PTRACED)) {
1020 if (current->ptrace & PT_TRACE_EXEC)
1021 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1022 else
1023 send_sig(SIGTRAP, current, 0);
1025 retval = 0;
1026 out:
1027 kfree(loc);
1028 out_ret:
1029 return retval;
1031 /* error cleanup */
1032 out_free_dentry:
1033 allow_write_access(interpreter);
1034 if (interpreter)
1035 fput(interpreter);
1036 out_free_interp:
1037 kfree(elf_interpreter);
1038 out_free_file:
1039 sys_close(elf_exec_fileno);
1040 out_free_fh:
1041 if (files)
1042 reset_files_struct(current, files);
1043 out_free_ph:
1044 kfree(elf_phdata);
1045 goto out;
1048 /* This is really simpleminded and specialized - we are loading an
1049 a.out library that is given an ELF header. */
1050 static int load_elf_library(struct file *file)
1052 struct elf_phdr *elf_phdata;
1053 struct elf_phdr *eppnt;
1054 unsigned long elf_bss, bss, len;
1055 int retval, error, i, j;
1056 struct elfhdr elf_ex;
1058 error = -ENOEXEC;
1059 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
1060 if (retval != sizeof(elf_ex))
1061 goto out;
1063 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1064 goto out;
1066 /* First of all, some simple consistency checks */
1067 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1068 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1069 goto out;
1071 /* Now read in all of the header information */
1073 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1074 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1076 error = -ENOMEM;
1077 elf_phdata = kmalloc(j, GFP_KERNEL);
1078 if (!elf_phdata)
1079 goto out;
1081 eppnt = elf_phdata;
1082 error = -ENOEXEC;
1083 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1084 if (retval != j)
1085 goto out_free_ph;
1087 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1088 if ((eppnt + i)->p_type == PT_LOAD)
1089 j++;
1090 if (j != 1)
1091 goto out_free_ph;
1093 while (eppnt->p_type != PT_LOAD)
1094 eppnt++;
1096 /* Now use mmap to map the library into memory. */
1097 down_write(&current->mm->mmap_sem);
1098 error = do_mmap(file,
1099 ELF_PAGESTART(eppnt->p_vaddr),
1100 (eppnt->p_filesz +
1101 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1102 PROT_READ | PROT_WRITE | PROT_EXEC,
1103 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1104 (eppnt->p_offset -
1105 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1106 up_write(&current->mm->mmap_sem);
1107 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1108 goto out_free_ph;
1110 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1111 if (padzero(elf_bss)) {
1112 error = -EFAULT;
1113 goto out_free_ph;
1116 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1117 ELF_MIN_ALIGN - 1);
1118 bss = eppnt->p_memsz + eppnt->p_vaddr;
1119 if (bss > len) {
1120 down_write(&current->mm->mmap_sem);
1121 do_brk(len, bss - len);
1122 up_write(&current->mm->mmap_sem);
1124 error = 0;
1126 out_free_ph:
1127 kfree(elf_phdata);
1128 out:
1129 return error;
1133 * Note that some platforms still use traditional core dumps and not
1134 * the ELF core dump. Each platform can select it as appropriate.
1136 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1139 * ELF core dumper
1141 * Modelled on fs/exec.c:aout_core_dump()
1142 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1145 * These are the only things you should do on a core-file: use only these
1146 * functions to write out all the necessary info.
1148 static int dump_write(struct file *file, const void *addr, int nr)
1150 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1153 static int dump_seek(struct file *file, loff_t off)
1155 if (file->f_op->llseek) {
1156 if (file->f_op->llseek(file, off, 0) != off)
1157 return 0;
1158 } else
1159 file->f_pos = off;
1160 return 1;
1164 * Decide whether a segment is worth dumping; default is yes to be
1165 * sure (missing info is worse than too much; etc).
1166 * Personally I'd include everything, and use the coredump limit...
1168 * I think we should skip something. But I am not sure how. H.J.
1170 static int maydump(struct vm_area_struct *vma)
1172 /* Do not dump I/O mapped devices or special mappings */
1173 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1174 return 0;
1176 /* Dump shared memory only if mapped from an anonymous file. */
1177 if (vma->vm_flags & VM_SHARED)
1178 return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
1180 /* If it hasn't been written to, don't write it out */
1181 if (!vma->anon_vma)
1182 return 0;
1184 return 1;
1187 /* An ELF note in memory */
1188 struct memelfnote
1190 const char *name;
1191 int type;
1192 unsigned int datasz;
1193 void *data;
1196 static int notesize(struct memelfnote *en)
1198 int sz;
1200 sz = sizeof(struct elf_note);
1201 sz += roundup(strlen(en->name) + 1, 4);
1202 sz += roundup(en->datasz, 4);
1204 return sz;
1207 #define DUMP_WRITE(addr, nr) \
1208 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1209 #define DUMP_SEEK(off) \
1210 do { if (!dump_seek(file, (off))) return 0; } while(0)
1212 static int writenote(struct memelfnote *men, struct file *file)
1214 struct elf_note en;
1216 en.n_namesz = strlen(men->name) + 1;
1217 en.n_descsz = men->datasz;
1218 en.n_type = men->type;
1220 DUMP_WRITE(&en, sizeof(en));
1221 DUMP_WRITE(men->name, en.n_namesz);
1222 /* XXX - cast from long long to long to avoid need for libgcc.a */
1223 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1224 DUMP_WRITE(men->data, men->datasz);
1225 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1227 return 1;
1229 #undef DUMP_WRITE
1230 #undef DUMP_SEEK
1232 #define DUMP_WRITE(addr, nr) \
1233 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1234 goto end_coredump;
1235 #define DUMP_SEEK(off) \
1236 if (!dump_seek(file, (off))) \
1237 goto end_coredump;
1239 static void fill_elf_header(struct elfhdr *elf, int segs)
1241 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1242 elf->e_ident[EI_CLASS] = ELF_CLASS;
1243 elf->e_ident[EI_DATA] = ELF_DATA;
1244 elf->e_ident[EI_VERSION] = EV_CURRENT;
1245 elf->e_ident[EI_OSABI] = ELF_OSABI;
1246 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1248 elf->e_type = ET_CORE;
1249 elf->e_machine = ELF_ARCH;
1250 elf->e_version = EV_CURRENT;
1251 elf->e_entry = 0;
1252 elf->e_phoff = sizeof(struct elfhdr);
1253 elf->e_shoff = 0;
1254 elf->e_flags = ELF_CORE_EFLAGS;
1255 elf->e_ehsize = sizeof(struct elfhdr);
1256 elf->e_phentsize = sizeof(struct elf_phdr);
1257 elf->e_phnum = segs;
1258 elf->e_shentsize = 0;
1259 elf->e_shnum = 0;
1260 elf->e_shstrndx = 0;
1261 return;
1264 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1266 phdr->p_type = PT_NOTE;
1267 phdr->p_offset = offset;
1268 phdr->p_vaddr = 0;
1269 phdr->p_paddr = 0;
1270 phdr->p_filesz = sz;
1271 phdr->p_memsz = 0;
1272 phdr->p_flags = 0;
1273 phdr->p_align = 0;
1274 return;
1277 static void fill_note(struct memelfnote *note, const char *name, int type,
1278 unsigned int sz, void *data)
1280 note->name = name;
1281 note->type = type;
1282 note->datasz = sz;
1283 note->data = data;
1284 return;
1288 * fill up all the fields in prstatus from the given task struct, except
1289 * registers which need to be filled up separately.
1291 static void fill_prstatus(struct elf_prstatus *prstatus,
1292 struct task_struct *p, long signr)
1294 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1295 prstatus->pr_sigpend = p->pending.signal.sig[0];
1296 prstatus->pr_sighold = p->blocked.sig[0];
1297 prstatus->pr_pid = p->pid;
1298 prstatus->pr_ppid = p->parent->pid;
1299 prstatus->pr_pgrp = process_group(p);
1300 prstatus->pr_sid = p->signal->session;
1301 if (thread_group_leader(p)) {
1303 * This is the record for the group leader. Add in the
1304 * cumulative times of previous dead threads. This total
1305 * won't include the time of each live thread whose state
1306 * is included in the core dump. The final total reported
1307 * to our parent process when it calls wait4 will include
1308 * those sums as well as the little bit more time it takes
1309 * this and each other thread to finish dying after the
1310 * core dump synchronization phase.
1312 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1313 &prstatus->pr_utime);
1314 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1315 &prstatus->pr_stime);
1316 } else {
1317 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1318 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1320 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1321 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1324 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1325 struct mm_struct *mm)
1327 unsigned int i, len;
1329 /* first copy the parameters from user space */
1330 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1332 len = mm->arg_end - mm->arg_start;
1333 if (len >= ELF_PRARGSZ)
1334 len = ELF_PRARGSZ-1;
1335 if (copy_from_user(&psinfo->pr_psargs,
1336 (const char __user *)mm->arg_start, len))
1337 return -EFAULT;
1338 for(i = 0; i < len; i++)
1339 if (psinfo->pr_psargs[i] == 0)
1340 psinfo->pr_psargs[i] = ' ';
1341 psinfo->pr_psargs[len] = 0;
1343 psinfo->pr_pid = p->pid;
1344 psinfo->pr_ppid = p->parent->pid;
1345 psinfo->pr_pgrp = process_group(p);
1346 psinfo->pr_sid = p->signal->session;
1348 i = p->state ? ffz(~p->state) + 1 : 0;
1349 psinfo->pr_state = i;
1350 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1351 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1352 psinfo->pr_nice = task_nice(p);
1353 psinfo->pr_flag = p->flags;
1354 SET_UID(psinfo->pr_uid, p->uid);
1355 SET_GID(psinfo->pr_gid, p->gid);
1356 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1358 return 0;
1361 /* Here is the structure in which status of each thread is captured. */
1362 struct elf_thread_status
1364 struct list_head list;
1365 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1366 elf_fpregset_t fpu; /* NT_PRFPREG */
1367 struct task_struct *thread;
1368 #ifdef ELF_CORE_COPY_XFPREGS
1369 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1370 #endif
1371 struct memelfnote notes[3];
1372 int num_notes;
1376 * In order to add the specific thread information for the elf file format,
1377 * we need to keep a linked list of every threads pr_status and then create
1378 * a single section for them in the final core file.
1380 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1382 int sz = 0;
1383 struct task_struct *p = t->thread;
1384 t->num_notes = 0;
1386 fill_prstatus(&t->prstatus, p, signr);
1387 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1389 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1390 &(t->prstatus));
1391 t->num_notes++;
1392 sz += notesize(&t->notes[0]);
1394 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1395 &t->fpu))) {
1396 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1397 &(t->fpu));
1398 t->num_notes++;
1399 sz += notesize(&t->notes[1]);
1402 #ifdef ELF_CORE_COPY_XFPREGS
1403 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1404 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu),
1405 &t->xfpu);
1406 t->num_notes++;
1407 sz += notesize(&t->notes[2]);
1409 #endif
1410 return sz;
1414 * Actual dumper
1416 * This is a two-pass process; first we find the offsets of the bits,
1417 * and then they are actually written out. If we run out of core limit
1418 * we just truncate.
1420 static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file)
1422 #define NUM_NOTES 6
1423 int has_dumped = 0;
1424 mm_segment_t fs;
1425 int segs;
1426 size_t size = 0;
1427 int i;
1428 struct vm_area_struct *vma;
1429 struct elfhdr *elf = NULL;
1430 loff_t offset = 0, dataoff;
1431 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1432 int numnote;
1433 struct memelfnote *notes = NULL;
1434 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1435 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1436 struct task_struct *g, *p;
1437 LIST_HEAD(thread_list);
1438 struct list_head *t;
1439 elf_fpregset_t *fpu = NULL;
1440 #ifdef ELF_CORE_COPY_XFPREGS
1441 elf_fpxregset_t *xfpu = NULL;
1442 #endif
1443 int thread_status_size = 0;
1444 elf_addr_t *auxv;
1447 * We no longer stop all VM operations.
1449 * This is because those proceses that could possibly change map_count
1450 * or the mmap / vma pages are now blocked in do_exit on current
1451 * finishing this core dump.
1453 * Only ptrace can touch these memory addresses, but it doesn't change
1454 * the map_count or the pages allocated. So no possibility of crashing
1455 * exists while dumping the mm->vm_next areas to the core file.
1458 /* alloc memory for large data structures: too large to be on stack */
1459 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1460 if (!elf)
1461 goto cleanup;
1462 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1463 if (!prstatus)
1464 goto cleanup;
1465 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1466 if (!psinfo)
1467 goto cleanup;
1468 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1469 if (!notes)
1470 goto cleanup;
1471 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1472 if (!fpu)
1473 goto cleanup;
1474 #ifdef ELF_CORE_COPY_XFPREGS
1475 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1476 if (!xfpu)
1477 goto cleanup;
1478 #endif
1480 if (signr) {
1481 struct elf_thread_status *tmp;
1482 rcu_read_lock();
1483 do_each_thread(g,p)
1484 if (current->mm == p->mm && current != p) {
1485 tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
1486 if (!tmp) {
1487 rcu_read_unlock();
1488 goto cleanup;
1490 tmp->thread = p;
1491 list_add(&tmp->list, &thread_list);
1493 while_each_thread(g,p);
1494 rcu_read_unlock();
1495 list_for_each(t, &thread_list) {
1496 struct elf_thread_status *tmp;
1497 int sz;
1499 tmp = list_entry(t, struct elf_thread_status, list);
1500 sz = elf_dump_thread_status(signr, tmp);
1501 thread_status_size += sz;
1504 /* now collect the dump for the current */
1505 memset(prstatus, 0, sizeof(*prstatus));
1506 fill_prstatus(prstatus, current, signr);
1507 elf_core_copy_regs(&prstatus->pr_reg, regs);
1509 segs = current->mm->map_count;
1510 #ifdef ELF_CORE_EXTRA_PHDRS
1511 segs += ELF_CORE_EXTRA_PHDRS;
1512 #endif
1514 /* Set up header */
1515 fill_elf_header(elf, segs + 1); /* including notes section */
1517 has_dumped = 1;
1518 current->flags |= PF_DUMPCORE;
1521 * Set up the notes in similar form to SVR4 core dumps made
1522 * with info from their /proc.
1525 fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1526 fill_psinfo(psinfo, current->group_leader, current->mm);
1527 fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1529 numnote = 2;
1531 auxv = (elf_addr_t *)current->mm->saved_auxv;
1533 i = 0;
1535 i += 2;
1536 while (auxv[i - 2] != AT_NULL);
1537 fill_note(&notes[numnote++], "CORE", NT_AUXV,
1538 i * sizeof(elf_addr_t), auxv);
1540 /* Try to dump the FPU. */
1541 if ((prstatus->pr_fpvalid =
1542 elf_core_copy_task_fpregs(current, regs, fpu)))
1543 fill_note(notes + numnote++,
1544 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1545 #ifdef ELF_CORE_COPY_XFPREGS
1546 if (elf_core_copy_task_xfpregs(current, xfpu))
1547 fill_note(notes + numnote++,
1548 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1549 #endif
1551 fs = get_fs();
1552 set_fs(KERNEL_DS);
1554 DUMP_WRITE(elf, sizeof(*elf));
1555 offset += sizeof(*elf); /* Elf header */
1556 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1558 /* Write notes phdr entry */
1560 struct elf_phdr phdr;
1561 int sz = 0;
1563 for (i = 0; i < numnote; i++)
1564 sz += notesize(notes + i);
1566 sz += thread_status_size;
1568 fill_elf_note_phdr(&phdr, sz, offset);
1569 offset += sz;
1570 DUMP_WRITE(&phdr, sizeof(phdr));
1573 /* Page-align dumped data */
1574 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1576 /* Write program headers for segments dump */
1577 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1578 struct elf_phdr phdr;
1579 size_t sz;
1581 sz = vma->vm_end - vma->vm_start;
1583 phdr.p_type = PT_LOAD;
1584 phdr.p_offset = offset;
1585 phdr.p_vaddr = vma->vm_start;
1586 phdr.p_paddr = 0;
1587 phdr.p_filesz = maydump(vma) ? sz : 0;
1588 phdr.p_memsz = sz;
1589 offset += phdr.p_filesz;
1590 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1591 if (vma->vm_flags & VM_WRITE)
1592 phdr.p_flags |= PF_W;
1593 if (vma->vm_flags & VM_EXEC)
1594 phdr.p_flags |= PF_X;
1595 phdr.p_align = ELF_EXEC_PAGESIZE;
1597 DUMP_WRITE(&phdr, sizeof(phdr));
1600 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1601 ELF_CORE_WRITE_EXTRA_PHDRS;
1602 #endif
1604 /* write out the notes section */
1605 for (i = 0; i < numnote; i++)
1606 if (!writenote(notes + i, file))
1607 goto end_coredump;
1609 /* write out the thread status notes section */
1610 list_for_each(t, &thread_list) {
1611 struct elf_thread_status *tmp =
1612 list_entry(t, struct elf_thread_status, list);
1614 for (i = 0; i < tmp->num_notes; i++)
1615 if (!writenote(&tmp->notes[i], file))
1616 goto end_coredump;
1619 DUMP_SEEK(dataoff);
1621 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1622 unsigned long addr;
1624 if (!maydump(vma))
1625 continue;
1627 for (addr = vma->vm_start;
1628 addr < vma->vm_end;
1629 addr += PAGE_SIZE) {
1630 struct page *page;
1631 struct vm_area_struct *vma;
1633 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1634 &page, &vma) <= 0) {
1635 DUMP_SEEK(file->f_pos + PAGE_SIZE);
1636 } else {
1637 if (page == ZERO_PAGE(addr)) {
1638 DUMP_SEEK(file->f_pos + PAGE_SIZE);
1639 } else {
1640 void *kaddr;
1641 flush_cache_page(vma, addr,
1642 page_to_pfn(page));
1643 kaddr = kmap(page);
1644 if ((size += PAGE_SIZE) > limit ||
1645 !dump_write(file, kaddr,
1646 PAGE_SIZE)) {
1647 kunmap(page);
1648 page_cache_release(page);
1649 goto end_coredump;
1651 kunmap(page);
1653 page_cache_release(page);
1658 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1659 ELF_CORE_WRITE_EXTRA_DATA;
1660 #endif
1662 if (file->f_pos != offset) {
1663 /* Sanity check */
1664 printk(KERN_WARNING
1665 "elf_core_dump: file->f_pos (%Ld) != offset (%Ld)\n",
1666 file->f_pos, offset);
1669 end_coredump:
1670 set_fs(fs);
1672 cleanup:
1673 while (!list_empty(&thread_list)) {
1674 struct list_head *tmp = thread_list.next;
1675 list_del(tmp);
1676 kfree(list_entry(tmp, struct elf_thread_status, list));
1679 kfree(elf);
1680 kfree(prstatus);
1681 kfree(psinfo);
1682 kfree(notes);
1683 kfree(fpu);
1684 #ifdef ELF_CORE_COPY_XFPREGS
1685 kfree(xfpu);
1686 #endif
1687 return has_dumped;
1688 #undef NUM_NOTES
1691 #endif /* USE_ELF_CORE_DUMP */
1693 static int __init init_elf_binfmt(void)
1695 return register_binfmt(&elf_format);
1698 static void __exit exit_elf_binfmt(void)
1700 /* Remove the COFF and ELF loaders. */
1701 unregister_binfmt(&elf_format);
1704 core_initcall(init_elf_binfmt);
1705 module_exit(exit_elf_binfmt);
1706 MODULE_LICENSE("GPL");