[PATCH] powerpc/PCI hotplug: de-convolute rpaphp_unconfig_pci_adap
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / binfmt_elf.c
blob1b117a441298048b29fc8f4c405d0bfb864852ae
1 /*
2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/fs.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
44 #include <asm/page.h>
46 #include <linux/elf.h>
48 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
49 static int load_elf_library(struct file*);
50 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
51 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
53 #ifndef elf_addr_t
54 #define elf_addr_t unsigned long
55 #endif
58 * If we don't support core dumping, then supply a NULL so we
59 * don't even try.
61 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
62 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
63 #else
64 #define elf_core_dump NULL
65 #endif
67 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
68 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
69 #else
70 # define ELF_MIN_ALIGN PAGE_SIZE
71 #endif
73 #ifndef ELF_CORE_EFLAGS
74 #define ELF_CORE_EFLAGS 0
75 #endif
77 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
78 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
79 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
81 static struct linux_binfmt elf_format = {
82 .module = THIS_MODULE,
83 .load_binary = load_elf_binary,
84 .load_shlib = load_elf_library,
85 .core_dump = elf_core_dump,
86 .min_coredump = ELF_EXEC_PAGESIZE
89 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
91 static int set_brk(unsigned long start, unsigned long end)
93 start = ELF_PAGEALIGN(start);
94 end = ELF_PAGEALIGN(end);
95 if (end > start) {
96 unsigned long addr;
97 down_write(&current->mm->mmap_sem);
98 addr = do_brk(start, end - start);
99 up_write(&current->mm->mmap_sem);
100 if (BAD_ADDR(addr))
101 return addr;
103 current->mm->start_brk = current->mm->brk = end;
104 return 0;
108 /* We need to explicitly zero any fractional pages
109 after the data section (i.e. bss). This would
110 contain the junk from the file that should not
111 be in memory */
114 static int padzero(unsigned long elf_bss)
116 unsigned long nbyte;
118 nbyte = ELF_PAGEOFFSET(elf_bss);
119 if (nbyte) {
120 nbyte = ELF_MIN_ALIGN - nbyte;
121 if (clear_user((void __user *) elf_bss, nbyte))
122 return -EFAULT;
124 return 0;
127 /* Let's use some macros to make this stack manipulation a litle clearer */
128 #ifdef CONFIG_STACK_GROWSUP
129 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
130 #define STACK_ROUND(sp, items) \
131 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
132 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
133 #else
134 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
135 #define STACK_ROUND(sp, items) \
136 (((unsigned long) (sp - items)) &~ 15UL)
137 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
138 #endif
140 static int
141 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
142 int interp_aout, unsigned long load_addr,
143 unsigned long interp_load_addr)
145 unsigned long p = bprm->p;
146 int argc = bprm->argc;
147 int envc = bprm->envc;
148 elf_addr_t __user *argv;
149 elf_addr_t __user *envp;
150 elf_addr_t __user *sp;
151 elf_addr_t __user *u_platform;
152 const char *k_platform = ELF_PLATFORM;
153 int items;
154 elf_addr_t *elf_info;
155 int ei_index = 0;
156 struct task_struct *tsk = current;
159 * If this architecture has a platform capability string, copy it
160 * to userspace. In some cases (Sparc), this info is impossible
161 * for userspace to get any other way, in others (i386) it is
162 * merely difficult.
165 u_platform = NULL;
166 if (k_platform) {
167 size_t len = strlen(k_platform) + 1;
170 * In some cases (e.g. Hyper-Threading), we want to avoid L1
171 * evictions by the processes running on the same package. One
172 * thing we can do is to shuffle the initial stack for them.
175 p = arch_align_stack(p);
177 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
178 if (__copy_to_user(u_platform, k_platform, len))
179 return -EFAULT;
182 /* Create the ELF interpreter info */
183 elf_info = (elf_addr_t *) current->mm->saved_auxv;
184 #define NEW_AUX_ENT(id, val) \
185 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
187 #ifdef ARCH_DLINFO
189 * ARCH_DLINFO must come first so PPC can do its special alignment of
190 * AUXV.
192 ARCH_DLINFO;
193 #endif
194 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
195 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
196 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
197 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
198 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
199 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
200 NEW_AUX_ENT(AT_BASE, interp_load_addr);
201 NEW_AUX_ENT(AT_FLAGS, 0);
202 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
203 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
204 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
205 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
206 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
207 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
208 if (k_platform) {
209 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
211 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
212 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
214 #undef NEW_AUX_ENT
215 /* AT_NULL is zero; clear the rest too */
216 memset(&elf_info[ei_index], 0,
217 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
219 /* And advance past the AT_NULL entry. */
220 ei_index += 2;
222 sp = STACK_ADD(p, ei_index);
224 items = (argc + 1) + (envc + 1);
225 if (interp_aout) {
226 items += 3; /* a.out interpreters require argv & envp too */
227 } else {
228 items += 1; /* ELF interpreters only put argc on the stack */
230 bprm->p = STACK_ROUND(sp, items);
232 /* Point sp at the lowest address on the stack */
233 #ifdef CONFIG_STACK_GROWSUP
234 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
235 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
236 #else
237 sp = (elf_addr_t __user *)bprm->p;
238 #endif
240 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
241 if (__put_user(argc, sp++))
242 return -EFAULT;
243 if (interp_aout) {
244 argv = sp + 2;
245 envp = argv + argc + 1;
246 __put_user((elf_addr_t)(unsigned long)argv, sp++);
247 __put_user((elf_addr_t)(unsigned long)envp, sp++);
248 } else {
249 argv = sp;
250 envp = argv + argc + 1;
253 /* Populate argv and envp */
254 p = current->mm->arg_end = current->mm->arg_start;
255 while (argc-- > 0) {
256 size_t len;
257 __put_user((elf_addr_t)p, argv++);
258 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
259 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
260 return 0;
261 p += len;
263 if (__put_user(0, argv))
264 return -EFAULT;
265 current->mm->arg_end = current->mm->env_start = p;
266 while (envc-- > 0) {
267 size_t len;
268 __put_user((elf_addr_t)p, envp++);
269 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
270 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
271 return 0;
272 p += len;
274 if (__put_user(0, envp))
275 return -EFAULT;
276 current->mm->env_end = p;
278 /* Put the elf_info on the stack in the right place. */
279 sp = (elf_addr_t __user *)envp + 1;
280 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
281 return -EFAULT;
282 return 0;
285 #ifndef elf_map
287 static unsigned long elf_map(struct file *filep, unsigned long addr,
288 struct elf_phdr *eppnt, int prot, int type)
290 unsigned long map_addr;
291 unsigned long pageoffset = ELF_PAGEOFFSET(eppnt->p_vaddr);
293 down_write(&current->mm->mmap_sem);
294 /* mmap() will return -EINVAL if given a zero size, but a
295 * segment with zero filesize is perfectly valid */
296 if (eppnt->p_filesz + pageoffset)
297 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
298 eppnt->p_filesz + pageoffset, prot, type,
299 eppnt->p_offset - pageoffset);
300 else
301 map_addr = ELF_PAGESTART(addr);
302 up_write(&current->mm->mmap_sem);
303 return(map_addr);
306 #endif /* !elf_map */
308 /* This is much more generalized than the library routine read function,
309 so we keep this separate. Technically the library read function
310 is only provided so that we can read a.out libraries that have
311 an ELF header */
313 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
314 struct file * interpreter,
315 unsigned long *interp_load_addr)
317 struct elf_phdr *elf_phdata;
318 struct elf_phdr *eppnt;
319 unsigned long load_addr = 0;
320 int load_addr_set = 0;
321 unsigned long last_bss = 0, elf_bss = 0;
322 unsigned long error = ~0UL;
323 int retval, i, size;
325 /* First of all, some simple consistency checks */
326 if (interp_elf_ex->e_type != ET_EXEC &&
327 interp_elf_ex->e_type != ET_DYN)
328 goto out;
329 if (!elf_check_arch(interp_elf_ex))
330 goto out;
331 if (!interpreter->f_op || !interpreter->f_op->mmap)
332 goto out;
335 * If the size of this structure has changed, then punt, since
336 * we will be doing the wrong thing.
338 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
339 goto out;
340 if (interp_elf_ex->e_phnum < 1 ||
341 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
342 goto out;
344 /* Now read in all of the header information */
346 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
347 if (size > ELF_MIN_ALIGN)
348 goto out;
349 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
350 if (!elf_phdata)
351 goto out;
353 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
354 error = -EIO;
355 if (retval != size) {
356 if (retval < 0)
357 error = retval;
358 goto out_close;
361 eppnt = elf_phdata;
362 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
363 if (eppnt->p_type == PT_LOAD) {
364 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
365 int elf_prot = 0;
366 unsigned long vaddr = 0;
367 unsigned long k, map_addr;
369 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
370 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
371 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
372 vaddr = eppnt->p_vaddr;
373 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
374 elf_type |= MAP_FIXED;
376 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
377 error = map_addr;
378 if (BAD_ADDR(map_addr))
379 goto out_close;
381 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
382 load_addr = map_addr - ELF_PAGESTART(vaddr);
383 load_addr_set = 1;
387 * Check to see if the section's size will overflow the
388 * allowed task size. Note that p_filesz must always be
389 * <= p_memsize so it is only necessary to check p_memsz.
391 k = load_addr + eppnt->p_vaddr;
392 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
393 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
394 error = -ENOMEM;
395 goto out_close;
399 * Find the end of the file mapping for this phdr, and keep
400 * track of the largest address we see for this.
402 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
403 if (k > elf_bss)
404 elf_bss = k;
407 * Do the same thing for the memory mapping - between
408 * elf_bss and last_bss is the bss section.
410 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
411 if (k > last_bss)
412 last_bss = k;
417 * Now fill out the bss section. First pad the last page up
418 * to the page boundary, and then perform a mmap to make sure
419 * that there are zero-mapped pages up to and including the
420 * last bss page.
422 if (padzero(elf_bss)) {
423 error = -EFAULT;
424 goto out_close;
427 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
429 /* Map the last of the bss segment */
430 if (last_bss > elf_bss) {
431 down_write(&current->mm->mmap_sem);
432 error = do_brk(elf_bss, last_bss - elf_bss);
433 up_write(&current->mm->mmap_sem);
434 if (BAD_ADDR(error))
435 goto out_close;
438 *interp_load_addr = load_addr;
439 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
441 out_close:
442 kfree(elf_phdata);
443 out:
444 return error;
447 static unsigned long load_aout_interp(struct exec * interp_ex,
448 struct file * interpreter)
450 unsigned long text_data, elf_entry = ~0UL;
451 char __user * addr;
452 loff_t offset;
454 current->mm->end_code = interp_ex->a_text;
455 text_data = interp_ex->a_text + interp_ex->a_data;
456 current->mm->end_data = text_data;
457 current->mm->brk = interp_ex->a_bss + text_data;
459 switch (N_MAGIC(*interp_ex)) {
460 case OMAGIC:
461 offset = 32;
462 addr = (char __user *)0;
463 break;
464 case ZMAGIC:
465 case QMAGIC:
466 offset = N_TXTOFF(*interp_ex);
467 addr = (char __user *) N_TXTADDR(*interp_ex);
468 break;
469 default:
470 goto out;
473 down_write(&current->mm->mmap_sem);
474 do_brk(0, text_data);
475 up_write(&current->mm->mmap_sem);
476 if (!interpreter->f_op || !interpreter->f_op->read)
477 goto out;
478 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
479 goto out;
480 flush_icache_range((unsigned long)addr,
481 (unsigned long)addr + text_data);
484 down_write(&current->mm->mmap_sem);
485 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
486 interp_ex->a_bss);
487 up_write(&current->mm->mmap_sem);
488 elf_entry = interp_ex->a_entry;
490 out:
491 return elf_entry;
495 * These are the functions used to load ELF style executables and shared
496 * libraries. There is no binary dependent code anywhere else.
499 #define INTERPRETER_NONE 0
500 #define INTERPRETER_AOUT 1
501 #define INTERPRETER_ELF 2
504 static unsigned long randomize_stack_top(unsigned long stack_top)
506 unsigned int random_variable = 0;
508 if (current->flags & PF_RANDOMIZE)
509 random_variable = get_random_int() % (8*1024*1024);
510 #ifdef CONFIG_STACK_GROWSUP
511 return PAGE_ALIGN(stack_top + random_variable);
512 #else
513 return PAGE_ALIGN(stack_top - random_variable);
514 #endif
517 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
519 struct file *interpreter = NULL; /* to shut gcc up */
520 unsigned long load_addr = 0, load_bias = 0;
521 int load_addr_set = 0;
522 char * elf_interpreter = NULL;
523 unsigned int interpreter_type = INTERPRETER_NONE;
524 unsigned char ibcs2_interpreter = 0;
525 unsigned long error;
526 struct elf_phdr * elf_ppnt, *elf_phdata;
527 unsigned long elf_bss, elf_brk;
528 int elf_exec_fileno;
529 int retval, i;
530 unsigned int size;
531 unsigned long elf_entry, interp_load_addr = 0;
532 unsigned long start_code, end_code, start_data, end_data;
533 unsigned long reloc_func_desc = 0;
534 char passed_fileno[6];
535 struct files_struct *files;
536 int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT;
537 unsigned long def_flags = 0;
538 struct {
539 struct elfhdr elf_ex;
540 struct elfhdr interp_elf_ex;
541 struct exec interp_ex;
542 } *loc;
544 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
545 if (!loc) {
546 retval = -ENOMEM;
547 goto out_ret;
550 /* Get the exec-header */
551 loc->elf_ex = *((struct elfhdr *) bprm->buf);
553 retval = -ENOEXEC;
554 /* First of all, some simple consistency checks */
555 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
556 goto out;
558 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
559 goto out;
560 if (!elf_check_arch(&loc->elf_ex))
561 goto out;
562 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
563 goto out;
565 /* Now read in all of the header information */
567 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
568 goto out;
569 if (loc->elf_ex.e_phnum < 1 ||
570 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
571 goto out;
572 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
573 retval = -ENOMEM;
574 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
575 if (!elf_phdata)
576 goto out;
578 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
579 if (retval != size) {
580 if (retval >= 0)
581 retval = -EIO;
582 goto out_free_ph;
585 files = current->files; /* Refcounted so ok */
586 retval = unshare_files();
587 if (retval < 0)
588 goto out_free_ph;
589 if (files == current->files) {
590 put_files_struct(files);
591 files = NULL;
594 /* exec will make our files private anyway, but for the a.out
595 loader stuff we need to do it earlier */
597 retval = get_unused_fd();
598 if (retval < 0)
599 goto out_free_fh;
600 get_file(bprm->file);
601 fd_install(elf_exec_fileno = retval, bprm->file);
603 elf_ppnt = elf_phdata;
604 elf_bss = 0;
605 elf_brk = 0;
607 start_code = ~0UL;
608 end_code = 0;
609 start_data = 0;
610 end_data = 0;
612 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
613 if (elf_ppnt->p_type == PT_INTERP) {
614 /* This is the program interpreter used for
615 * shared libraries - for now assume that this
616 * is an a.out format binary
619 retval = -ENOEXEC;
620 if (elf_ppnt->p_filesz > PATH_MAX ||
621 elf_ppnt->p_filesz < 2)
622 goto out_free_file;
624 retval = -ENOMEM;
625 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
626 GFP_KERNEL);
627 if (!elf_interpreter)
628 goto out_free_file;
630 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
631 elf_interpreter,
632 elf_ppnt->p_filesz);
633 if (retval != elf_ppnt->p_filesz) {
634 if (retval >= 0)
635 retval = -EIO;
636 goto out_free_interp;
638 /* make sure path is NULL terminated */
639 retval = -ENOEXEC;
640 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
641 goto out_free_interp;
643 /* If the program interpreter is one of these two,
644 * then assume an iBCS2 image. Otherwise assume
645 * a native linux image.
647 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
648 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
649 ibcs2_interpreter = 1;
652 * The early SET_PERSONALITY here is so that the lookup
653 * for the interpreter happens in the namespace of the
654 * to-be-execed image. SET_PERSONALITY can select an
655 * alternate root.
657 * However, SET_PERSONALITY is NOT allowed to switch
658 * this task into the new images's memory mapping
659 * policy - that is, TASK_SIZE must still evaluate to
660 * that which is appropriate to the execing application.
661 * This is because exit_mmap() needs to have TASK_SIZE
662 * evaluate to the size of the old image.
664 * So if (say) a 64-bit application is execing a 32-bit
665 * application it is the architecture's responsibility
666 * to defer changing the value of TASK_SIZE until the
667 * switch really is going to happen - do this in
668 * flush_thread(). - akpm
670 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
672 interpreter = open_exec(elf_interpreter);
673 retval = PTR_ERR(interpreter);
674 if (IS_ERR(interpreter))
675 goto out_free_interp;
676 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
677 if (retval != BINPRM_BUF_SIZE) {
678 if (retval >= 0)
679 retval = -EIO;
680 goto out_free_dentry;
683 /* Get the exec headers */
684 loc->interp_ex = *((struct exec *) bprm->buf);
685 loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
686 break;
688 elf_ppnt++;
691 elf_ppnt = elf_phdata;
692 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
693 if (elf_ppnt->p_type == PT_GNU_STACK) {
694 if (elf_ppnt->p_flags & PF_X)
695 executable_stack = EXSTACK_ENABLE_X;
696 else
697 executable_stack = EXSTACK_DISABLE_X;
698 break;
700 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
702 /* Some simple consistency checks for the interpreter */
703 if (elf_interpreter) {
704 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
706 /* Now figure out which format our binary is */
707 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
708 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
709 (N_MAGIC(loc->interp_ex) != QMAGIC))
710 interpreter_type = INTERPRETER_ELF;
712 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
713 interpreter_type &= ~INTERPRETER_ELF;
715 retval = -ELIBBAD;
716 if (!interpreter_type)
717 goto out_free_dentry;
719 /* Make sure only one type was selected */
720 if ((interpreter_type & INTERPRETER_ELF) &&
721 interpreter_type != INTERPRETER_ELF) {
722 // FIXME - ratelimit this before re-enabling
723 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
724 interpreter_type = INTERPRETER_ELF;
726 /* Verify the interpreter has a valid arch */
727 if ((interpreter_type == INTERPRETER_ELF) &&
728 !elf_check_arch(&loc->interp_elf_ex))
729 goto out_free_dentry;
730 } else {
731 /* Executables without an interpreter also need a personality */
732 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
735 /* OK, we are done with that, now set up the arg stuff,
736 and then start this sucker up */
738 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
739 char *passed_p = passed_fileno;
740 sprintf(passed_fileno, "%d", elf_exec_fileno);
742 if (elf_interpreter) {
743 retval = copy_strings_kernel(1, &passed_p, bprm);
744 if (retval)
745 goto out_free_dentry;
746 bprm->argc++;
750 /* Flush all traces of the currently running executable */
751 retval = flush_old_exec(bprm);
752 if (retval)
753 goto out_free_dentry;
755 /* Discard our unneeded old files struct */
756 if (files) {
757 steal_locks(files);
758 put_files_struct(files);
759 files = NULL;
762 /* OK, This is the point of no return */
763 current->mm->start_data = 0;
764 current->mm->end_data = 0;
765 current->mm->end_code = 0;
766 current->mm->mmap = NULL;
767 current->flags &= ~PF_FORKNOEXEC;
768 current->mm->def_flags = def_flags;
770 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
771 may depend on the personality. */
772 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
773 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
774 current->personality |= READ_IMPLIES_EXEC;
776 if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
777 current->flags |= PF_RANDOMIZE;
778 arch_pick_mmap_layout(current->mm);
780 /* Do this so that we can load the interpreter, if need be. We will
781 change some of these later */
782 current->mm->free_area_cache = current->mm->mmap_base;
783 current->mm->cached_hole_size = 0;
784 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
785 executable_stack);
786 if (retval < 0) {
787 send_sig(SIGKILL, current, 0);
788 goto out_free_dentry;
791 current->mm->start_stack = bprm->p;
793 /* Now we do a little grungy work by mmaping the ELF image into
794 the correct location in memory. At this point, we assume that
795 the image should be loaded at fixed address, not at a variable
796 address. */
798 for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
799 int elf_prot = 0, elf_flags;
800 unsigned long k, vaddr;
802 if (elf_ppnt->p_type != PT_LOAD)
803 continue;
805 if (unlikely (elf_brk > elf_bss)) {
806 unsigned long nbyte;
808 /* There was a PT_LOAD segment with p_memsz > p_filesz
809 before this one. Map anonymous pages, if needed,
810 and clear the area. */
811 retval = set_brk (elf_bss + load_bias,
812 elf_brk + load_bias);
813 if (retval) {
814 send_sig(SIGKILL, current, 0);
815 goto out_free_dentry;
817 nbyte = ELF_PAGEOFFSET(elf_bss);
818 if (nbyte) {
819 nbyte = ELF_MIN_ALIGN - nbyte;
820 if (nbyte > elf_brk - elf_bss)
821 nbyte = elf_brk - elf_bss;
822 if (clear_user((void __user *)elf_bss +
823 load_bias, nbyte)) {
825 * This bss-zeroing can fail if the ELF
826 * file specifies odd protections. So
827 * we don't check the return value
833 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
834 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
835 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
837 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
839 vaddr = elf_ppnt->p_vaddr;
840 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
841 elf_flags |= MAP_FIXED;
842 } else if (loc->elf_ex.e_type == ET_DYN) {
843 /* Try and get dynamic programs out of the way of the default mmap
844 base, as well as whatever program they might try to exec. This
845 is because the brk will follow the loader, and is not movable. */
846 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
849 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
850 if (BAD_ADDR(error)) {
851 send_sig(SIGKILL, current, 0);
852 goto out_free_dentry;
855 if (!load_addr_set) {
856 load_addr_set = 1;
857 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
858 if (loc->elf_ex.e_type == ET_DYN) {
859 load_bias += error -
860 ELF_PAGESTART(load_bias + vaddr);
861 load_addr += load_bias;
862 reloc_func_desc = load_bias;
865 k = elf_ppnt->p_vaddr;
866 if (k < start_code) start_code = k;
867 if (start_data < k) start_data = k;
870 * Check to see if the section's size will overflow the
871 * allowed task size. Note that p_filesz must always be
872 * <= p_memsz so it is only necessary to check p_memsz.
874 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
875 elf_ppnt->p_memsz > TASK_SIZE ||
876 TASK_SIZE - elf_ppnt->p_memsz < k) {
877 /* set_brk can never work. Avoid overflows. */
878 send_sig(SIGKILL, current, 0);
879 goto out_free_dentry;
882 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
884 if (k > elf_bss)
885 elf_bss = k;
886 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
887 end_code = k;
888 if (end_data < k)
889 end_data = k;
890 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
891 if (k > elf_brk)
892 elf_brk = k;
895 loc->elf_ex.e_entry += load_bias;
896 elf_bss += load_bias;
897 elf_brk += load_bias;
898 start_code += load_bias;
899 end_code += load_bias;
900 start_data += load_bias;
901 end_data += load_bias;
903 /* Calling set_brk effectively mmaps the pages that we need
904 * for the bss and break sections. We must do this before
905 * mapping in the interpreter, to make sure it doesn't wind
906 * up getting placed where the bss needs to go.
908 retval = set_brk(elf_bss, elf_brk);
909 if (retval) {
910 send_sig(SIGKILL, current, 0);
911 goto out_free_dentry;
913 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
914 send_sig(SIGSEGV, current, 0);
915 retval = -EFAULT; /* Nobody gets to see this, but.. */
916 goto out_free_dentry;
919 if (elf_interpreter) {
920 if (interpreter_type == INTERPRETER_AOUT)
921 elf_entry = load_aout_interp(&loc->interp_ex,
922 interpreter);
923 else
924 elf_entry = load_elf_interp(&loc->interp_elf_ex,
925 interpreter,
926 &interp_load_addr);
927 if (BAD_ADDR(elf_entry)) {
928 printk(KERN_ERR "Unable to load interpreter %.128s\n",
929 elf_interpreter);
930 force_sig(SIGSEGV, current);
931 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
932 goto out_free_dentry;
934 reloc_func_desc = interp_load_addr;
936 allow_write_access(interpreter);
937 fput(interpreter);
938 kfree(elf_interpreter);
939 } else {
940 elf_entry = loc->elf_ex.e_entry;
943 kfree(elf_phdata);
945 if (interpreter_type != INTERPRETER_AOUT)
946 sys_close(elf_exec_fileno);
948 set_binfmt(&elf_format);
950 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
951 retval = arch_setup_additional_pages(bprm, executable_stack);
952 if (retval < 0) {
953 send_sig(SIGKILL, current, 0);
954 goto out;
956 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
958 compute_creds(bprm);
959 current->flags &= ~PF_FORKNOEXEC;
960 create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
961 load_addr, interp_load_addr);
962 /* N.B. passed_fileno might not be initialized? */
963 if (interpreter_type == INTERPRETER_AOUT)
964 current->mm->arg_start += strlen(passed_fileno) + 1;
965 current->mm->end_code = end_code;
966 current->mm->start_code = start_code;
967 current->mm->start_data = start_data;
968 current->mm->end_data = end_data;
969 current->mm->start_stack = bprm->p;
971 if (current->personality & MMAP_PAGE_ZERO) {
972 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
973 and some applications "depend" upon this behavior.
974 Since we do not have the power to recompile these, we
975 emulate the SVr4 behavior. Sigh. */
976 down_write(&current->mm->mmap_sem);
977 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
978 MAP_FIXED | MAP_PRIVATE, 0);
979 up_write(&current->mm->mmap_sem);
982 #ifdef ELF_PLAT_INIT
984 * The ABI may specify that certain registers be set up in special
985 * ways (on i386 %edx is the address of a DT_FINI function, for
986 * example. In addition, it may also specify (eg, PowerPC64 ELF)
987 * that the e_entry field is the address of the function descriptor
988 * for the startup routine, rather than the address of the startup
989 * routine itself. This macro performs whatever initialization to
990 * the regs structure is required as well as any relocations to the
991 * function descriptor entries when executing dynamically links apps.
993 ELF_PLAT_INIT(regs, reloc_func_desc);
994 #endif
996 start_thread(regs, elf_entry, bprm->p);
997 if (unlikely(current->ptrace & PT_PTRACED)) {
998 if (current->ptrace & PT_TRACE_EXEC)
999 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
1000 else
1001 send_sig(SIGTRAP, current, 0);
1003 retval = 0;
1004 out:
1005 kfree(loc);
1006 out_ret:
1007 return retval;
1009 /* error cleanup */
1010 out_free_dentry:
1011 allow_write_access(interpreter);
1012 if (interpreter)
1013 fput(interpreter);
1014 out_free_interp:
1015 kfree(elf_interpreter);
1016 out_free_file:
1017 sys_close(elf_exec_fileno);
1018 out_free_fh:
1019 if (files) {
1020 put_files_struct(current->files);
1021 current->files = files;
1023 out_free_ph:
1024 kfree(elf_phdata);
1025 goto out;
1028 /* This is really simpleminded and specialized - we are loading an
1029 a.out library that is given an ELF header. */
1031 static int load_elf_library(struct file *file)
1033 struct elf_phdr *elf_phdata;
1034 struct elf_phdr *eppnt;
1035 unsigned long elf_bss, bss, len;
1036 int retval, error, i, j;
1037 struct elfhdr elf_ex;
1039 error = -ENOEXEC;
1040 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1041 if (retval != sizeof(elf_ex))
1042 goto out;
1044 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1045 goto out;
1047 /* First of all, some simple consistency checks */
1048 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1049 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1050 goto out;
1052 /* Now read in all of the header information */
1054 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1055 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1057 error = -ENOMEM;
1058 elf_phdata = kmalloc(j, GFP_KERNEL);
1059 if (!elf_phdata)
1060 goto out;
1062 eppnt = elf_phdata;
1063 error = -ENOEXEC;
1064 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1065 if (retval != j)
1066 goto out_free_ph;
1068 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1069 if ((eppnt + i)->p_type == PT_LOAD)
1070 j++;
1071 if (j != 1)
1072 goto out_free_ph;
1074 while (eppnt->p_type != PT_LOAD)
1075 eppnt++;
1077 /* Now use mmap to map the library into memory. */
1078 down_write(&current->mm->mmap_sem);
1079 error = do_mmap(file,
1080 ELF_PAGESTART(eppnt->p_vaddr),
1081 (eppnt->p_filesz +
1082 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1083 PROT_READ | PROT_WRITE | PROT_EXEC,
1084 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1085 (eppnt->p_offset -
1086 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1087 up_write(&current->mm->mmap_sem);
1088 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1089 goto out_free_ph;
1091 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1092 if (padzero(elf_bss)) {
1093 error = -EFAULT;
1094 goto out_free_ph;
1097 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
1098 bss = eppnt->p_memsz + eppnt->p_vaddr;
1099 if (bss > len) {
1100 down_write(&current->mm->mmap_sem);
1101 do_brk(len, bss - len);
1102 up_write(&current->mm->mmap_sem);
1104 error = 0;
1106 out_free_ph:
1107 kfree(elf_phdata);
1108 out:
1109 return error;
1113 * Note that some platforms still use traditional core dumps and not
1114 * the ELF core dump. Each platform can select it as appropriate.
1116 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1119 * ELF core dumper
1121 * Modelled on fs/exec.c:aout_core_dump()
1122 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1125 * These are the only things you should do on a core-file: use only these
1126 * functions to write out all the necessary info.
1128 static int dump_write(struct file *file, const void *addr, int nr)
1130 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1133 static int dump_seek(struct file *file, loff_t off)
1135 if (file->f_op->llseek) {
1136 if (file->f_op->llseek(file, off, 0) != off)
1137 return 0;
1138 } else
1139 file->f_pos = off;
1140 return 1;
1144 * Decide whether a segment is worth dumping; default is yes to be
1145 * sure (missing info is worse than too much; etc).
1146 * Personally I'd include everything, and use the coredump limit...
1148 * I think we should skip something. But I am not sure how. H.J.
1150 static int maydump(struct vm_area_struct *vma)
1152 /* Do not dump I/O mapped devices or special mappings */
1153 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1154 return 0;
1156 /* Dump shared memory only if mapped from an anonymous file. */
1157 if (vma->vm_flags & VM_SHARED)
1158 return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
1160 /* If it hasn't been written to, don't write it out */
1161 if (!vma->anon_vma)
1162 return 0;
1164 return 1;
1167 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1169 /* An ELF note in memory */
1170 struct memelfnote
1172 const char *name;
1173 int type;
1174 unsigned int datasz;
1175 void *data;
1178 static int notesize(struct memelfnote *en)
1180 int sz;
1182 sz = sizeof(struct elf_note);
1183 sz += roundup(strlen(en->name) + 1, 4);
1184 sz += roundup(en->datasz, 4);
1186 return sz;
1189 #define DUMP_WRITE(addr, nr) \
1190 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1191 #define DUMP_SEEK(off) \
1192 do { if (!dump_seek(file, (off))) return 0; } while(0)
1194 static int writenote(struct memelfnote *men, struct file *file)
1196 struct elf_note en;
1198 en.n_namesz = strlen(men->name) + 1;
1199 en.n_descsz = men->datasz;
1200 en.n_type = men->type;
1202 DUMP_WRITE(&en, sizeof(en));
1203 DUMP_WRITE(men->name, en.n_namesz);
1204 /* XXX - cast from long long to long to avoid need for libgcc.a */
1205 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1206 DUMP_WRITE(men->data, men->datasz);
1207 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1209 return 1;
1211 #undef DUMP_WRITE
1212 #undef DUMP_SEEK
1214 #define DUMP_WRITE(addr, nr) \
1215 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1216 goto end_coredump;
1217 #define DUMP_SEEK(off) \
1218 if (!dump_seek(file, (off))) \
1219 goto end_coredump;
1221 static void fill_elf_header(struct elfhdr *elf, int segs)
1223 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1224 elf->e_ident[EI_CLASS] = ELF_CLASS;
1225 elf->e_ident[EI_DATA] = ELF_DATA;
1226 elf->e_ident[EI_VERSION] = EV_CURRENT;
1227 elf->e_ident[EI_OSABI] = ELF_OSABI;
1228 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1230 elf->e_type = ET_CORE;
1231 elf->e_machine = ELF_ARCH;
1232 elf->e_version = EV_CURRENT;
1233 elf->e_entry = 0;
1234 elf->e_phoff = sizeof(struct elfhdr);
1235 elf->e_shoff = 0;
1236 elf->e_flags = ELF_CORE_EFLAGS;
1237 elf->e_ehsize = sizeof(struct elfhdr);
1238 elf->e_phentsize = sizeof(struct elf_phdr);
1239 elf->e_phnum = segs;
1240 elf->e_shentsize = 0;
1241 elf->e_shnum = 0;
1242 elf->e_shstrndx = 0;
1243 return;
1246 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1248 phdr->p_type = PT_NOTE;
1249 phdr->p_offset = offset;
1250 phdr->p_vaddr = 0;
1251 phdr->p_paddr = 0;
1252 phdr->p_filesz = sz;
1253 phdr->p_memsz = 0;
1254 phdr->p_flags = 0;
1255 phdr->p_align = 0;
1256 return;
1259 static void fill_note(struct memelfnote *note, const char *name, int type,
1260 unsigned int sz, void *data)
1262 note->name = name;
1263 note->type = type;
1264 note->datasz = sz;
1265 note->data = data;
1266 return;
1270 * fill up all the fields in prstatus from the given task struct, except registers
1271 * which need to be filled up separately.
1273 static void fill_prstatus(struct elf_prstatus *prstatus,
1274 struct task_struct *p, long signr)
1276 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1277 prstatus->pr_sigpend = p->pending.signal.sig[0];
1278 prstatus->pr_sighold = p->blocked.sig[0];
1279 prstatus->pr_pid = p->pid;
1280 prstatus->pr_ppid = p->parent->pid;
1281 prstatus->pr_pgrp = process_group(p);
1282 prstatus->pr_sid = p->signal->session;
1283 if (thread_group_leader(p)) {
1285 * This is the record for the group leader. Add in the
1286 * cumulative times of previous dead threads. This total
1287 * won't include the time of each live thread whose state
1288 * is included in the core dump. The final total reported
1289 * to our parent process when it calls wait4 will include
1290 * those sums as well as the little bit more time it takes
1291 * this and each other thread to finish dying after the
1292 * core dump synchronization phase.
1294 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1295 &prstatus->pr_utime);
1296 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1297 &prstatus->pr_stime);
1298 } else {
1299 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1300 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1302 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1303 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1306 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1307 struct mm_struct *mm)
1309 unsigned int i, len;
1311 /* first copy the parameters from user space */
1312 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1314 len = mm->arg_end - mm->arg_start;
1315 if (len >= ELF_PRARGSZ)
1316 len = ELF_PRARGSZ-1;
1317 if (copy_from_user(&psinfo->pr_psargs,
1318 (const char __user *)mm->arg_start, len))
1319 return -EFAULT;
1320 for(i = 0; i < len; i++)
1321 if (psinfo->pr_psargs[i] == 0)
1322 psinfo->pr_psargs[i] = ' ';
1323 psinfo->pr_psargs[len] = 0;
1325 psinfo->pr_pid = p->pid;
1326 psinfo->pr_ppid = p->parent->pid;
1327 psinfo->pr_pgrp = process_group(p);
1328 psinfo->pr_sid = p->signal->session;
1330 i = p->state ? ffz(~p->state) + 1 : 0;
1331 psinfo->pr_state = i;
1332 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1333 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1334 psinfo->pr_nice = task_nice(p);
1335 psinfo->pr_flag = p->flags;
1336 SET_UID(psinfo->pr_uid, p->uid);
1337 SET_GID(psinfo->pr_gid, p->gid);
1338 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1340 return 0;
1343 /* Here is the structure in which status of each thread is captured. */
1344 struct elf_thread_status
1346 struct list_head list;
1347 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1348 elf_fpregset_t fpu; /* NT_PRFPREG */
1349 struct task_struct *thread;
1350 #ifdef ELF_CORE_COPY_XFPREGS
1351 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1352 #endif
1353 struct memelfnote notes[3];
1354 int num_notes;
1358 * In order to add the specific thread information for the elf file format,
1359 * we need to keep a linked list of every threads pr_status and then
1360 * create a single section for them in the final core file.
1362 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1364 int sz = 0;
1365 struct task_struct *p = t->thread;
1366 t->num_notes = 0;
1368 fill_prstatus(&t->prstatus, p, signr);
1369 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1371 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1372 t->num_notes++;
1373 sz += notesize(&t->notes[0]);
1375 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1376 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1377 t->num_notes++;
1378 sz += notesize(&t->notes[1]);
1381 #ifdef ELF_CORE_COPY_XFPREGS
1382 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1383 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1384 t->num_notes++;
1385 sz += notesize(&t->notes[2]);
1387 #endif
1388 return sz;
1392 * Actual dumper
1394 * This is a two-pass process; first we find the offsets of the bits,
1395 * and then they are actually written out. If we run out of core limit
1396 * we just truncate.
1398 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1400 #define NUM_NOTES 6
1401 int has_dumped = 0;
1402 mm_segment_t fs;
1403 int segs;
1404 size_t size = 0;
1405 int i;
1406 struct vm_area_struct *vma;
1407 struct elfhdr *elf = NULL;
1408 off_t offset = 0, dataoff;
1409 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1410 int numnote;
1411 struct memelfnote *notes = NULL;
1412 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1413 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1414 struct task_struct *g, *p;
1415 LIST_HEAD(thread_list);
1416 struct list_head *t;
1417 elf_fpregset_t *fpu = NULL;
1418 #ifdef ELF_CORE_COPY_XFPREGS
1419 elf_fpxregset_t *xfpu = NULL;
1420 #endif
1421 int thread_status_size = 0;
1422 elf_addr_t *auxv;
1425 * We no longer stop all VM operations.
1427 * This is because those proceses that could possibly change map_count or
1428 * the mmap / vma pages are now blocked in do_exit on current finishing
1429 * this core dump.
1431 * Only ptrace can touch these memory addresses, but it doesn't change
1432 * the map_count or the pages allocated. So no possibility of crashing
1433 * exists while dumping the mm->vm_next areas to the core file.
1436 /* alloc memory for large data structures: too large to be on stack */
1437 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1438 if (!elf)
1439 goto cleanup;
1440 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1441 if (!prstatus)
1442 goto cleanup;
1443 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1444 if (!psinfo)
1445 goto cleanup;
1446 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1447 if (!notes)
1448 goto cleanup;
1449 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1450 if (!fpu)
1451 goto cleanup;
1452 #ifdef ELF_CORE_COPY_XFPREGS
1453 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1454 if (!xfpu)
1455 goto cleanup;
1456 #endif
1458 if (signr) {
1459 struct elf_thread_status *tmp;
1460 read_lock(&tasklist_lock);
1461 do_each_thread(g,p)
1462 if (current->mm == p->mm && current != p) {
1463 tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
1464 if (!tmp) {
1465 read_unlock(&tasklist_lock);
1466 goto cleanup;
1468 memset(tmp, 0, sizeof(*tmp));
1469 INIT_LIST_HEAD(&tmp->list);
1470 tmp->thread = p;
1471 list_add(&tmp->list, &thread_list);
1473 while_each_thread(g,p);
1474 read_unlock(&tasklist_lock);
1475 list_for_each(t, &thread_list) {
1476 struct elf_thread_status *tmp;
1477 int sz;
1479 tmp = list_entry(t, struct elf_thread_status, list);
1480 sz = elf_dump_thread_status(signr, tmp);
1481 thread_status_size += sz;
1484 /* now collect the dump for the current */
1485 memset(prstatus, 0, sizeof(*prstatus));
1486 fill_prstatus(prstatus, current, signr);
1487 elf_core_copy_regs(&prstatus->pr_reg, regs);
1489 segs = current->mm->map_count;
1490 #ifdef ELF_CORE_EXTRA_PHDRS
1491 segs += ELF_CORE_EXTRA_PHDRS;
1492 #endif
1494 /* Set up header */
1495 fill_elf_header(elf, segs+1); /* including notes section */
1497 has_dumped = 1;
1498 current->flags |= PF_DUMPCORE;
1501 * Set up the notes in similar form to SVR4 core dumps made
1502 * with info from their /proc.
1505 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1507 fill_psinfo(psinfo, current->group_leader, current->mm);
1508 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1510 numnote = 2;
1512 auxv = (elf_addr_t *) current->mm->saved_auxv;
1514 i = 0;
1516 i += 2;
1517 while (auxv[i - 2] != AT_NULL);
1518 fill_note(&notes[numnote++], "CORE", NT_AUXV,
1519 i * sizeof (elf_addr_t), auxv);
1521 /* Try to dump the FPU. */
1522 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1523 fill_note(notes + numnote++,
1524 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1525 #ifdef ELF_CORE_COPY_XFPREGS
1526 if (elf_core_copy_task_xfpregs(current, xfpu))
1527 fill_note(notes + numnote++,
1528 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1529 #endif
1531 fs = get_fs();
1532 set_fs(KERNEL_DS);
1534 DUMP_WRITE(elf, sizeof(*elf));
1535 offset += sizeof(*elf); /* Elf header */
1536 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1538 /* Write notes phdr entry */
1540 struct elf_phdr phdr;
1541 int sz = 0;
1543 for (i = 0; i < numnote; i++)
1544 sz += notesize(notes + i);
1546 sz += thread_status_size;
1548 fill_elf_note_phdr(&phdr, sz, offset);
1549 offset += sz;
1550 DUMP_WRITE(&phdr, sizeof(phdr));
1553 /* Page-align dumped data */
1554 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1556 /* Write program headers for segments dump */
1557 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1558 struct elf_phdr phdr;
1559 size_t sz;
1561 sz = vma->vm_end - vma->vm_start;
1563 phdr.p_type = PT_LOAD;
1564 phdr.p_offset = offset;
1565 phdr.p_vaddr = vma->vm_start;
1566 phdr.p_paddr = 0;
1567 phdr.p_filesz = maydump(vma) ? sz : 0;
1568 phdr.p_memsz = sz;
1569 offset += phdr.p_filesz;
1570 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1571 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1572 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1573 phdr.p_align = ELF_EXEC_PAGESIZE;
1575 DUMP_WRITE(&phdr, sizeof(phdr));
1578 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1579 ELF_CORE_WRITE_EXTRA_PHDRS;
1580 #endif
1582 /* write out the notes section */
1583 for (i = 0; i < numnote; i++)
1584 if (!writenote(notes + i, file))
1585 goto end_coredump;
1587 /* write out the thread status notes section */
1588 list_for_each(t, &thread_list) {
1589 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1590 for (i = 0; i < tmp->num_notes; i++)
1591 if (!writenote(&tmp->notes[i], file))
1592 goto end_coredump;
1595 DUMP_SEEK(dataoff);
1597 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1598 unsigned long addr;
1600 if (!maydump(vma))
1601 continue;
1603 for (addr = vma->vm_start;
1604 addr < vma->vm_end;
1605 addr += PAGE_SIZE) {
1606 struct page* page;
1607 struct vm_area_struct *vma;
1609 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1610 &page, &vma) <= 0) {
1611 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1612 } else {
1613 if (page == ZERO_PAGE(addr)) {
1614 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1615 } else {
1616 void *kaddr;
1617 flush_cache_page(vma, addr, page_to_pfn(page));
1618 kaddr = kmap(page);
1619 if ((size += PAGE_SIZE) > limit ||
1620 !dump_write(file, kaddr,
1621 PAGE_SIZE)) {
1622 kunmap(page);
1623 page_cache_release(page);
1624 goto end_coredump;
1626 kunmap(page);
1628 page_cache_release(page);
1633 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1634 ELF_CORE_WRITE_EXTRA_DATA;
1635 #endif
1637 if ((off_t)file->f_pos != offset) {
1638 /* Sanity check */
1639 printk(KERN_WARNING "elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1640 (off_t)file->f_pos, offset);
1643 end_coredump:
1644 set_fs(fs);
1646 cleanup:
1647 while (!list_empty(&thread_list)) {
1648 struct list_head *tmp = thread_list.next;
1649 list_del(tmp);
1650 kfree(list_entry(tmp, struct elf_thread_status, list));
1653 kfree(elf);
1654 kfree(prstatus);
1655 kfree(psinfo);
1656 kfree(notes);
1657 kfree(fpu);
1658 #ifdef ELF_CORE_COPY_XFPREGS
1659 kfree(xfpu);
1660 #endif
1661 return has_dumped;
1662 #undef NUM_NOTES
1665 #endif /* USE_ELF_CORE_DUMP */
1667 static int __init init_elf_binfmt(void)
1669 return register_binfmt(&elf_format);
1672 static void __exit exit_elf_binfmt(void)
1674 /* Remove the COFF and ELF loaders. */
1675 unregister_binfmt(&elf_format);
1678 core_initcall(init_elf_binfmt);
1679 module_exit(exit_elf_binfmt);
1680 MODULE_LICENSE("GPL");