[IPV4]: Replace __in_dev_get with __in_dev_get_rcu/rtnl
[linux-2.6.22.y-op.git] / fs / binfmt_elf.c
blob7976a238f0a3d60bb4c4a0d1fba0b3ad5183ad08
1 /*
2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/fs.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
44 #include <asm/page.h>
46 #include <linux/elf.h>
48 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
49 static int load_elf_library(struct file*);
50 static unsigned long elf_map (struct file *, unsigned long, struct elf_phdr *, int, int);
51 extern int dump_fpu (struct pt_regs *, elf_fpregset_t *);
53 #ifndef elf_addr_t
54 #define elf_addr_t unsigned long
55 #endif
58 * If we don't support core dumping, then supply a NULL so we
59 * don't even try.
61 #ifdef USE_ELF_CORE_DUMP
62 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file);
63 #else
64 #define elf_core_dump NULL
65 #endif
67 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
68 # define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
69 #else
70 # define ELF_MIN_ALIGN PAGE_SIZE
71 #endif
73 #ifndef ELF_CORE_EFLAGS
74 #define ELF_CORE_EFLAGS 0
75 #endif
77 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
78 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
79 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
81 static struct linux_binfmt elf_format = {
82 .module = THIS_MODULE,
83 .load_binary = load_elf_binary,
84 .load_shlib = load_elf_library,
85 .core_dump = elf_core_dump,
86 .min_coredump = ELF_EXEC_PAGESIZE
89 #define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
91 static int set_brk(unsigned long start, unsigned long end)
93 start = ELF_PAGEALIGN(start);
94 end = ELF_PAGEALIGN(end);
95 if (end > start) {
96 unsigned long addr;
97 down_write(&current->mm->mmap_sem);
98 addr = do_brk(start, end - start);
99 up_write(&current->mm->mmap_sem);
100 if (BAD_ADDR(addr))
101 return addr;
103 current->mm->start_brk = current->mm->brk = end;
104 return 0;
108 /* We need to explicitly zero any fractional pages
109 after the data section (i.e. bss). This would
110 contain the junk from the file that should not
111 be in memory */
114 static int padzero(unsigned long elf_bss)
116 unsigned long nbyte;
118 nbyte = ELF_PAGEOFFSET(elf_bss);
119 if (nbyte) {
120 nbyte = ELF_MIN_ALIGN - nbyte;
121 if (clear_user((void __user *) elf_bss, nbyte))
122 return -EFAULT;
124 return 0;
127 /* Let's use some macros to make this stack manipulation a litle clearer */
128 #ifdef CONFIG_STACK_GROWSUP
129 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
130 #define STACK_ROUND(sp, items) \
131 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
132 #define STACK_ALLOC(sp, len) ({ elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; old_sp; })
133 #else
134 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
135 #define STACK_ROUND(sp, items) \
136 (((unsigned long) (sp - items)) &~ 15UL)
137 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
138 #endif
140 static int
141 create_elf_tables(struct linux_binprm *bprm, struct elfhdr * exec,
142 int interp_aout, unsigned long load_addr,
143 unsigned long interp_load_addr)
145 unsigned long p = bprm->p;
146 int argc = bprm->argc;
147 int envc = bprm->envc;
148 elf_addr_t __user *argv;
149 elf_addr_t __user *envp;
150 elf_addr_t __user *sp;
151 elf_addr_t __user *u_platform;
152 const char *k_platform = ELF_PLATFORM;
153 int items;
154 elf_addr_t *elf_info;
155 int ei_index = 0;
156 struct task_struct *tsk = current;
159 * If this architecture has a platform capability string, copy it
160 * to userspace. In some cases (Sparc), this info is impossible
161 * for userspace to get any other way, in others (i386) it is
162 * merely difficult.
165 u_platform = NULL;
166 if (k_platform) {
167 size_t len = strlen(k_platform) + 1;
170 * In some cases (e.g. Hyper-Threading), we want to avoid L1
171 * evictions by the processes running on the same package. One
172 * thing we can do is to shuffle the initial stack for them.
175 p = arch_align_stack(p);
177 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
178 if (__copy_to_user(u_platform, k_platform, len))
179 return -EFAULT;
182 /* Create the ELF interpreter info */
183 elf_info = (elf_addr_t *) current->mm->saved_auxv;
184 #define NEW_AUX_ENT(id, val) \
185 do { elf_info[ei_index++] = id; elf_info[ei_index++] = val; } while (0)
187 #ifdef ARCH_DLINFO
189 * ARCH_DLINFO must come first so PPC can do its special alignment of
190 * AUXV.
192 ARCH_DLINFO;
193 #endif
194 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
195 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
196 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
197 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
198 NEW_AUX_ENT(AT_PHENT, sizeof (struct elf_phdr));
199 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
200 NEW_AUX_ENT(AT_BASE, interp_load_addr);
201 NEW_AUX_ENT(AT_FLAGS, 0);
202 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
203 NEW_AUX_ENT(AT_UID, (elf_addr_t) tsk->uid);
204 NEW_AUX_ENT(AT_EUID, (elf_addr_t) tsk->euid);
205 NEW_AUX_ENT(AT_GID, (elf_addr_t) tsk->gid);
206 NEW_AUX_ENT(AT_EGID, (elf_addr_t) tsk->egid);
207 NEW_AUX_ENT(AT_SECURE, (elf_addr_t) security_bprm_secureexec(bprm));
208 if (k_platform) {
209 NEW_AUX_ENT(AT_PLATFORM, (elf_addr_t)(unsigned long)u_platform);
211 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
212 NEW_AUX_ENT(AT_EXECFD, (elf_addr_t) bprm->interp_data);
214 #undef NEW_AUX_ENT
215 /* AT_NULL is zero; clear the rest too */
216 memset(&elf_info[ei_index], 0,
217 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
219 /* And advance past the AT_NULL entry. */
220 ei_index += 2;
222 sp = STACK_ADD(p, ei_index);
224 items = (argc + 1) + (envc + 1);
225 if (interp_aout) {
226 items += 3; /* a.out interpreters require argv & envp too */
227 } else {
228 items += 1; /* ELF interpreters only put argc on the stack */
230 bprm->p = STACK_ROUND(sp, items);
232 /* Point sp at the lowest address on the stack */
233 #ifdef CONFIG_STACK_GROWSUP
234 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
235 bprm->exec = (unsigned long) sp; /* XXX: PARISC HACK */
236 #else
237 sp = (elf_addr_t __user *)bprm->p;
238 #endif
240 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
241 if (__put_user(argc, sp++))
242 return -EFAULT;
243 if (interp_aout) {
244 argv = sp + 2;
245 envp = argv + argc + 1;
246 __put_user((elf_addr_t)(unsigned long)argv, sp++);
247 __put_user((elf_addr_t)(unsigned long)envp, sp++);
248 } else {
249 argv = sp;
250 envp = argv + argc + 1;
253 /* Populate argv and envp */
254 p = current->mm->arg_end = current->mm->arg_start;
255 while (argc-- > 0) {
256 size_t len;
257 __put_user((elf_addr_t)p, argv++);
258 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
259 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
260 return 0;
261 p += len;
263 if (__put_user(0, argv))
264 return -EFAULT;
265 current->mm->arg_end = current->mm->env_start = p;
266 while (envc-- > 0) {
267 size_t len;
268 __put_user((elf_addr_t)p, envp++);
269 len = strnlen_user((void __user *)p, PAGE_SIZE*MAX_ARG_PAGES);
270 if (!len || len > PAGE_SIZE*MAX_ARG_PAGES)
271 return 0;
272 p += len;
274 if (__put_user(0, envp))
275 return -EFAULT;
276 current->mm->env_end = p;
278 /* Put the elf_info on the stack in the right place. */
279 sp = (elf_addr_t __user *)envp + 1;
280 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
281 return -EFAULT;
282 return 0;
285 #ifndef elf_map
287 static unsigned long elf_map(struct file *filep, unsigned long addr,
288 struct elf_phdr *eppnt, int prot, int type)
290 unsigned long map_addr;
292 down_write(&current->mm->mmap_sem);
293 map_addr = do_mmap(filep, ELF_PAGESTART(addr),
294 eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr), prot, type,
295 eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr));
296 up_write(&current->mm->mmap_sem);
297 return(map_addr);
300 #endif /* !elf_map */
302 /* This is much more generalized than the library routine read function,
303 so we keep this separate. Technically the library read function
304 is only provided so that we can read a.out libraries that have
305 an ELF header */
307 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
308 struct file * interpreter,
309 unsigned long *interp_load_addr)
311 struct elf_phdr *elf_phdata;
312 struct elf_phdr *eppnt;
313 unsigned long load_addr = 0;
314 int load_addr_set = 0;
315 unsigned long last_bss = 0, elf_bss = 0;
316 unsigned long error = ~0UL;
317 int retval, i, size;
319 /* First of all, some simple consistency checks */
320 if (interp_elf_ex->e_type != ET_EXEC &&
321 interp_elf_ex->e_type != ET_DYN)
322 goto out;
323 if (!elf_check_arch(interp_elf_ex))
324 goto out;
325 if (!interpreter->f_op || !interpreter->f_op->mmap)
326 goto out;
329 * If the size of this structure has changed, then punt, since
330 * we will be doing the wrong thing.
332 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
333 goto out;
334 if (interp_elf_ex->e_phnum < 1 ||
335 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
336 goto out;
338 /* Now read in all of the header information */
340 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
341 if (size > ELF_MIN_ALIGN)
342 goto out;
343 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
344 if (!elf_phdata)
345 goto out;
347 retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
348 error = -EIO;
349 if (retval != size) {
350 if (retval < 0)
351 error = retval;
352 goto out_close;
355 eppnt = elf_phdata;
356 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
357 if (eppnt->p_type == PT_LOAD) {
358 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
359 int elf_prot = 0;
360 unsigned long vaddr = 0;
361 unsigned long k, map_addr;
363 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
364 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
365 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
366 vaddr = eppnt->p_vaddr;
367 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
368 elf_type |= MAP_FIXED;
370 map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
371 error = map_addr;
372 if (BAD_ADDR(map_addr))
373 goto out_close;
375 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
376 load_addr = map_addr - ELF_PAGESTART(vaddr);
377 load_addr_set = 1;
381 * Check to see if the section's size will overflow the
382 * allowed task size. Note that p_filesz must always be
383 * <= p_memsize so it is only necessary to check p_memsz.
385 k = load_addr + eppnt->p_vaddr;
386 if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
387 eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
388 error = -ENOMEM;
389 goto out_close;
393 * Find the end of the file mapping for this phdr, and keep
394 * track of the largest address we see for this.
396 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
397 if (k > elf_bss)
398 elf_bss = k;
401 * Do the same thing for the memory mapping - between
402 * elf_bss and last_bss is the bss section.
404 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
405 if (k > last_bss)
406 last_bss = k;
411 * Now fill out the bss section. First pad the last page up
412 * to the page boundary, and then perform a mmap to make sure
413 * that there are zero-mapped pages up to and including the
414 * last bss page.
416 if (padzero(elf_bss)) {
417 error = -EFAULT;
418 goto out_close;
421 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* What we have mapped so far */
423 /* Map the last of the bss segment */
424 if (last_bss > elf_bss) {
425 down_write(&current->mm->mmap_sem);
426 error = do_brk(elf_bss, last_bss - elf_bss);
427 up_write(&current->mm->mmap_sem);
428 if (BAD_ADDR(error))
429 goto out_close;
432 *interp_load_addr = load_addr;
433 error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;
435 out_close:
436 kfree(elf_phdata);
437 out:
438 return error;
441 static unsigned long load_aout_interp(struct exec * interp_ex,
442 struct file * interpreter)
444 unsigned long text_data, elf_entry = ~0UL;
445 char __user * addr;
446 loff_t offset;
448 current->mm->end_code = interp_ex->a_text;
449 text_data = interp_ex->a_text + interp_ex->a_data;
450 current->mm->end_data = text_data;
451 current->mm->brk = interp_ex->a_bss + text_data;
453 switch (N_MAGIC(*interp_ex)) {
454 case OMAGIC:
455 offset = 32;
456 addr = (char __user *)0;
457 break;
458 case ZMAGIC:
459 case QMAGIC:
460 offset = N_TXTOFF(*interp_ex);
461 addr = (char __user *) N_TXTADDR(*interp_ex);
462 break;
463 default:
464 goto out;
467 down_write(&current->mm->mmap_sem);
468 do_brk(0, text_data);
469 up_write(&current->mm->mmap_sem);
470 if (!interpreter->f_op || !interpreter->f_op->read)
471 goto out;
472 if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
473 goto out;
474 flush_icache_range((unsigned long)addr,
475 (unsigned long)addr + text_data);
478 down_write(&current->mm->mmap_sem);
479 do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
480 interp_ex->a_bss);
481 up_write(&current->mm->mmap_sem);
482 elf_entry = interp_ex->a_entry;
484 out:
485 return elf_entry;
489 * These are the functions used to load ELF style executables and shared
490 * libraries. There is no binary dependent code anywhere else.
493 #define INTERPRETER_NONE 0
494 #define INTERPRETER_AOUT 1
495 #define INTERPRETER_ELF 2
498 static unsigned long randomize_stack_top(unsigned long stack_top)
500 unsigned int random_variable = 0;
502 if (current->flags & PF_RANDOMIZE)
503 random_variable = get_random_int() % (8*1024*1024);
504 #ifdef CONFIG_STACK_GROWSUP
505 return PAGE_ALIGN(stack_top + random_variable);
506 #else
507 return PAGE_ALIGN(stack_top - random_variable);
508 #endif
511 static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
513 struct file *interpreter = NULL; /* to shut gcc up */
514 unsigned long load_addr = 0, load_bias = 0;
515 int load_addr_set = 0;
516 char * elf_interpreter = NULL;
517 unsigned int interpreter_type = INTERPRETER_NONE;
518 unsigned char ibcs2_interpreter = 0;
519 unsigned long error;
520 struct elf_phdr * elf_ppnt, *elf_phdata;
521 unsigned long elf_bss, elf_brk;
522 int elf_exec_fileno;
523 int retval, i;
524 unsigned int size;
525 unsigned long elf_entry, interp_load_addr = 0;
526 unsigned long start_code, end_code, start_data, end_data;
527 unsigned long reloc_func_desc = 0;
528 char passed_fileno[6];
529 struct files_struct *files;
530 int have_pt_gnu_stack, executable_stack = EXSTACK_DEFAULT;
531 unsigned long def_flags = 0;
532 struct {
533 struct elfhdr elf_ex;
534 struct elfhdr interp_elf_ex;
535 struct exec interp_ex;
536 } *loc;
538 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
539 if (!loc) {
540 retval = -ENOMEM;
541 goto out_ret;
544 /* Get the exec-header */
545 loc->elf_ex = *((struct elfhdr *) bprm->buf);
547 retval = -ENOEXEC;
548 /* First of all, some simple consistency checks */
549 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
550 goto out;
552 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
553 goto out;
554 if (!elf_check_arch(&loc->elf_ex))
555 goto out;
556 if (!bprm->file->f_op||!bprm->file->f_op->mmap)
557 goto out;
559 /* Now read in all of the header information */
561 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
562 goto out;
563 if (loc->elf_ex.e_phnum < 1 ||
564 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
565 goto out;
566 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
567 retval = -ENOMEM;
568 elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
569 if (!elf_phdata)
570 goto out;
572 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *) elf_phdata, size);
573 if (retval != size) {
574 if (retval >= 0)
575 retval = -EIO;
576 goto out_free_ph;
579 files = current->files; /* Refcounted so ok */
580 retval = unshare_files();
581 if (retval < 0)
582 goto out_free_ph;
583 if (files == current->files) {
584 put_files_struct(files);
585 files = NULL;
588 /* exec will make our files private anyway, but for the a.out
589 loader stuff we need to do it earlier */
591 retval = get_unused_fd();
592 if (retval < 0)
593 goto out_free_fh;
594 get_file(bprm->file);
595 fd_install(elf_exec_fileno = retval, bprm->file);
597 elf_ppnt = elf_phdata;
598 elf_bss = 0;
599 elf_brk = 0;
601 start_code = ~0UL;
602 end_code = 0;
603 start_data = 0;
604 end_data = 0;
606 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
607 if (elf_ppnt->p_type == PT_INTERP) {
608 /* This is the program interpreter used for
609 * shared libraries - for now assume that this
610 * is an a.out format binary
613 retval = -ENOEXEC;
614 if (elf_ppnt->p_filesz > PATH_MAX ||
615 elf_ppnt->p_filesz < 2)
616 goto out_free_file;
618 retval = -ENOMEM;
619 elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
620 GFP_KERNEL);
621 if (!elf_interpreter)
622 goto out_free_file;
624 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
625 elf_interpreter,
626 elf_ppnt->p_filesz);
627 if (retval != elf_ppnt->p_filesz) {
628 if (retval >= 0)
629 retval = -EIO;
630 goto out_free_interp;
632 /* make sure path is NULL terminated */
633 retval = -ENOEXEC;
634 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
635 goto out_free_interp;
637 /* If the program interpreter is one of these two,
638 * then assume an iBCS2 image. Otherwise assume
639 * a native linux image.
641 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
642 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
643 ibcs2_interpreter = 1;
646 * The early SET_PERSONALITY here is so that the lookup
647 * for the interpreter happens in the namespace of the
648 * to-be-execed image. SET_PERSONALITY can select an
649 * alternate root.
651 * However, SET_PERSONALITY is NOT allowed to switch
652 * this task into the new images's memory mapping
653 * policy - that is, TASK_SIZE must still evaluate to
654 * that which is appropriate to the execing application.
655 * This is because exit_mmap() needs to have TASK_SIZE
656 * evaluate to the size of the old image.
658 * So if (say) a 64-bit application is execing a 32-bit
659 * application it is the architecture's responsibility
660 * to defer changing the value of TASK_SIZE until the
661 * switch really is going to happen - do this in
662 * flush_thread(). - akpm
664 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
666 interpreter = open_exec(elf_interpreter);
667 retval = PTR_ERR(interpreter);
668 if (IS_ERR(interpreter))
669 goto out_free_interp;
670 retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
671 if (retval != BINPRM_BUF_SIZE) {
672 if (retval >= 0)
673 retval = -EIO;
674 goto out_free_dentry;
677 /* Get the exec headers */
678 loc->interp_ex = *((struct exec *) bprm->buf);
679 loc->interp_elf_ex = *((struct elfhdr *) bprm->buf);
680 break;
682 elf_ppnt++;
685 elf_ppnt = elf_phdata;
686 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
687 if (elf_ppnt->p_type == PT_GNU_STACK) {
688 if (elf_ppnt->p_flags & PF_X)
689 executable_stack = EXSTACK_ENABLE_X;
690 else
691 executable_stack = EXSTACK_DISABLE_X;
692 break;
694 have_pt_gnu_stack = (i < loc->elf_ex.e_phnum);
696 /* Some simple consistency checks for the interpreter */
697 if (elf_interpreter) {
698 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
700 /* Now figure out which format our binary is */
701 if ((N_MAGIC(loc->interp_ex) != OMAGIC) &&
702 (N_MAGIC(loc->interp_ex) != ZMAGIC) &&
703 (N_MAGIC(loc->interp_ex) != QMAGIC))
704 interpreter_type = INTERPRETER_ELF;
706 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
707 interpreter_type &= ~INTERPRETER_ELF;
709 retval = -ELIBBAD;
710 if (!interpreter_type)
711 goto out_free_dentry;
713 /* Make sure only one type was selected */
714 if ((interpreter_type & INTERPRETER_ELF) &&
715 interpreter_type != INTERPRETER_ELF) {
716 // FIXME - ratelimit this before re-enabling
717 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
718 interpreter_type = INTERPRETER_ELF;
720 /* Verify the interpreter has a valid arch */
721 if ((interpreter_type == INTERPRETER_ELF) &&
722 !elf_check_arch(&loc->interp_elf_ex))
723 goto out_free_dentry;
724 } else {
725 /* Executables without an interpreter also need a personality */
726 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
729 /* OK, we are done with that, now set up the arg stuff,
730 and then start this sucker up */
732 if ((!bprm->sh_bang) && (interpreter_type == INTERPRETER_AOUT)) {
733 char *passed_p = passed_fileno;
734 sprintf(passed_fileno, "%d", elf_exec_fileno);
736 if (elf_interpreter) {
737 retval = copy_strings_kernel(1, &passed_p, bprm);
738 if (retval)
739 goto out_free_dentry;
740 bprm->argc++;
744 /* Flush all traces of the currently running executable */
745 retval = flush_old_exec(bprm);
746 if (retval)
747 goto out_free_dentry;
749 /* Discard our unneeded old files struct */
750 if (files) {
751 steal_locks(files);
752 put_files_struct(files);
753 files = NULL;
756 /* OK, This is the point of no return */
757 current->mm->start_data = 0;
758 current->mm->end_data = 0;
759 current->mm->end_code = 0;
760 current->mm->mmap = NULL;
761 current->flags &= ~PF_FORKNOEXEC;
762 current->mm->def_flags = def_flags;
764 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
765 may depend on the personality. */
766 SET_PERSONALITY(loc->elf_ex, ibcs2_interpreter);
767 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
768 current->personality |= READ_IMPLIES_EXEC;
770 if ( !(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
771 current->flags |= PF_RANDOMIZE;
772 arch_pick_mmap_layout(current->mm);
774 /* Do this so that we can load the interpreter, if need be. We will
775 change some of these later */
776 set_mm_counter(current->mm, rss, 0);
777 current->mm->free_area_cache = current->mm->mmap_base;
778 current->mm->cached_hole_size = 0;
779 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
780 executable_stack);
781 if (retval < 0) {
782 send_sig(SIGKILL, current, 0);
783 goto out_free_dentry;
786 current->mm->start_stack = bprm->p;
788 /* Now we do a little grungy work by mmaping the ELF image into
789 the correct location in memory. At this point, we assume that
790 the image should be loaded at fixed address, not at a variable
791 address. */
793 for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
794 int elf_prot = 0, elf_flags;
795 unsigned long k, vaddr;
797 if (elf_ppnt->p_type != PT_LOAD)
798 continue;
800 if (unlikely (elf_brk > elf_bss)) {
801 unsigned long nbyte;
803 /* There was a PT_LOAD segment with p_memsz > p_filesz
804 before this one. Map anonymous pages, if needed,
805 and clear the area. */
806 retval = set_brk (elf_bss + load_bias,
807 elf_brk + load_bias);
808 if (retval) {
809 send_sig(SIGKILL, current, 0);
810 goto out_free_dentry;
812 nbyte = ELF_PAGEOFFSET(elf_bss);
813 if (nbyte) {
814 nbyte = ELF_MIN_ALIGN - nbyte;
815 if (nbyte > elf_brk - elf_bss)
816 nbyte = elf_brk - elf_bss;
817 if (clear_user((void __user *)elf_bss +
818 load_bias, nbyte)) {
820 * This bss-zeroing can fail if the ELF
821 * file specifies odd protections. So
822 * we don't check the return value
828 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
829 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
830 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
832 elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;
834 vaddr = elf_ppnt->p_vaddr;
835 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
836 elf_flags |= MAP_FIXED;
837 } else if (loc->elf_ex.e_type == ET_DYN) {
838 /* Try and get dynamic programs out of the way of the default mmap
839 base, as well as whatever program they might try to exec. This
840 is because the brk will follow the loader, and is not movable. */
841 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
844 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
845 if (BAD_ADDR(error)) {
846 send_sig(SIGKILL, current, 0);
847 goto out_free_dentry;
850 if (!load_addr_set) {
851 load_addr_set = 1;
852 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
853 if (loc->elf_ex.e_type == ET_DYN) {
854 load_bias += error -
855 ELF_PAGESTART(load_bias + vaddr);
856 load_addr += load_bias;
857 reloc_func_desc = load_bias;
860 k = elf_ppnt->p_vaddr;
861 if (k < start_code) start_code = k;
862 if (start_data < k) start_data = k;
865 * Check to see if the section's size will overflow the
866 * allowed task size. Note that p_filesz must always be
867 * <= p_memsz so it is only necessary to check p_memsz.
869 if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
870 elf_ppnt->p_memsz > TASK_SIZE ||
871 TASK_SIZE - elf_ppnt->p_memsz < k) {
872 /* set_brk can never work. Avoid overflows. */
873 send_sig(SIGKILL, current, 0);
874 goto out_free_dentry;
877 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
879 if (k > elf_bss)
880 elf_bss = k;
881 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
882 end_code = k;
883 if (end_data < k)
884 end_data = k;
885 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
886 if (k > elf_brk)
887 elf_brk = k;
890 loc->elf_ex.e_entry += load_bias;
891 elf_bss += load_bias;
892 elf_brk += load_bias;
893 start_code += load_bias;
894 end_code += load_bias;
895 start_data += load_bias;
896 end_data += load_bias;
898 /* Calling set_brk effectively mmaps the pages that we need
899 * for the bss and break sections. We must do this before
900 * mapping in the interpreter, to make sure it doesn't wind
901 * up getting placed where the bss needs to go.
903 retval = set_brk(elf_bss, elf_brk);
904 if (retval) {
905 send_sig(SIGKILL, current, 0);
906 goto out_free_dentry;
908 if (padzero(elf_bss)) {
909 send_sig(SIGSEGV, current, 0);
910 retval = -EFAULT; /* Nobody gets to see this, but.. */
911 goto out_free_dentry;
914 if (elf_interpreter) {
915 if (interpreter_type == INTERPRETER_AOUT)
916 elf_entry = load_aout_interp(&loc->interp_ex,
917 interpreter);
918 else
919 elf_entry = load_elf_interp(&loc->interp_elf_ex,
920 interpreter,
921 &interp_load_addr);
922 if (BAD_ADDR(elf_entry)) {
923 printk(KERN_ERR "Unable to load interpreter %.128s\n",
924 elf_interpreter);
925 force_sig(SIGSEGV, current);
926 retval = -ENOEXEC; /* Nobody gets to see this, but.. */
927 goto out_free_dentry;
929 reloc_func_desc = interp_load_addr;
931 allow_write_access(interpreter);
932 fput(interpreter);
933 kfree(elf_interpreter);
934 } else {
935 elf_entry = loc->elf_ex.e_entry;
938 kfree(elf_phdata);
940 if (interpreter_type != INTERPRETER_AOUT)
941 sys_close(elf_exec_fileno);
943 set_binfmt(&elf_format);
945 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
946 retval = arch_setup_additional_pages(bprm, executable_stack);
947 if (retval < 0) {
948 send_sig(SIGKILL, current, 0);
949 goto out;
951 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
953 compute_creds(bprm);
954 current->flags &= ~PF_FORKNOEXEC;
955 create_elf_tables(bprm, &loc->elf_ex, (interpreter_type == INTERPRETER_AOUT),
956 load_addr, interp_load_addr);
957 /* N.B. passed_fileno might not be initialized? */
958 if (interpreter_type == INTERPRETER_AOUT)
959 current->mm->arg_start += strlen(passed_fileno) + 1;
960 current->mm->end_code = end_code;
961 current->mm->start_code = start_code;
962 current->mm->start_data = start_data;
963 current->mm->end_data = end_data;
964 current->mm->start_stack = bprm->p;
966 if (current->personality & MMAP_PAGE_ZERO) {
967 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
968 and some applications "depend" upon this behavior.
969 Since we do not have the power to recompile these, we
970 emulate the SVr4 behavior. Sigh. */
971 down_write(&current->mm->mmap_sem);
972 error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
973 MAP_FIXED | MAP_PRIVATE, 0);
974 up_write(&current->mm->mmap_sem);
977 #ifdef ELF_PLAT_INIT
979 * The ABI may specify that certain registers be set up in special
980 * ways (on i386 %edx is the address of a DT_FINI function, for
981 * example. In addition, it may also specify (eg, PowerPC64 ELF)
982 * that the e_entry field is the address of the function descriptor
983 * for the startup routine, rather than the address of the startup
984 * routine itself. This macro performs whatever initialization to
985 * the regs structure is required as well as any relocations to the
986 * function descriptor entries when executing dynamically links apps.
988 ELF_PLAT_INIT(regs, reloc_func_desc);
989 #endif
991 start_thread(regs, elf_entry, bprm->p);
992 if (unlikely(current->ptrace & PT_PTRACED)) {
993 if (current->ptrace & PT_TRACE_EXEC)
994 ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
995 else
996 send_sig(SIGTRAP, current, 0);
998 retval = 0;
999 out:
1000 kfree(loc);
1001 out_ret:
1002 return retval;
1004 /* error cleanup */
1005 out_free_dentry:
1006 allow_write_access(interpreter);
1007 if (interpreter)
1008 fput(interpreter);
1009 out_free_interp:
1010 if (elf_interpreter)
1011 kfree(elf_interpreter);
1012 out_free_file:
1013 sys_close(elf_exec_fileno);
1014 out_free_fh:
1015 if (files) {
1016 put_files_struct(current->files);
1017 current->files = files;
1019 out_free_ph:
1020 kfree(elf_phdata);
1021 goto out;
1024 /* This is really simpleminded and specialized - we are loading an
1025 a.out library that is given an ELF header. */
1027 static int load_elf_library(struct file *file)
1029 struct elf_phdr *elf_phdata;
1030 struct elf_phdr *eppnt;
1031 unsigned long elf_bss, bss, len;
1032 int retval, error, i, j;
1033 struct elfhdr elf_ex;
1035 error = -ENOEXEC;
1036 retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
1037 if (retval != sizeof(elf_ex))
1038 goto out;
1040 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1041 goto out;
1043 /* First of all, some simple consistency checks */
1044 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1045 !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
1046 goto out;
1048 /* Now read in all of the header information */
1050 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1051 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1053 error = -ENOMEM;
1054 elf_phdata = kmalloc(j, GFP_KERNEL);
1055 if (!elf_phdata)
1056 goto out;
1058 eppnt = elf_phdata;
1059 error = -ENOEXEC;
1060 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1061 if (retval != j)
1062 goto out_free_ph;
1064 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1065 if ((eppnt + i)->p_type == PT_LOAD)
1066 j++;
1067 if (j != 1)
1068 goto out_free_ph;
1070 while (eppnt->p_type != PT_LOAD)
1071 eppnt++;
1073 /* Now use mmap to map the library into memory. */
1074 down_write(&current->mm->mmap_sem);
1075 error = do_mmap(file,
1076 ELF_PAGESTART(eppnt->p_vaddr),
1077 (eppnt->p_filesz +
1078 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1079 PROT_READ | PROT_WRITE | PROT_EXEC,
1080 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1081 (eppnt->p_offset -
1082 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1083 up_write(&current->mm->mmap_sem);
1084 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1085 goto out_free_ph;
1087 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1088 if (padzero(elf_bss)) {
1089 error = -EFAULT;
1090 goto out_free_ph;
1093 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
1094 bss = eppnt->p_memsz + eppnt->p_vaddr;
1095 if (bss > len) {
1096 down_write(&current->mm->mmap_sem);
1097 do_brk(len, bss - len);
1098 up_write(&current->mm->mmap_sem);
1100 error = 0;
1102 out_free_ph:
1103 kfree(elf_phdata);
1104 out:
1105 return error;
1109 * Note that some platforms still use traditional core dumps and not
1110 * the ELF core dump. Each platform can select it as appropriate.
1112 #ifdef USE_ELF_CORE_DUMP
1115 * ELF core dumper
1117 * Modelled on fs/exec.c:aout_core_dump()
1118 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1121 * These are the only things you should do on a core-file: use only these
1122 * functions to write out all the necessary info.
1124 static int dump_write(struct file *file, const void *addr, int nr)
1126 return file->f_op->write(file, addr, nr, &file->f_pos) == nr;
1129 static int dump_seek(struct file *file, loff_t off)
1131 if (file->f_op->llseek) {
1132 if (file->f_op->llseek(file, off, 0) != off)
1133 return 0;
1134 } else
1135 file->f_pos = off;
1136 return 1;
1140 * Decide whether a segment is worth dumping; default is yes to be
1141 * sure (missing info is worse than too much; etc).
1142 * Personally I'd include everything, and use the coredump limit...
1144 * I think we should skip something. But I am not sure how. H.J.
1146 static int maydump(struct vm_area_struct *vma)
1148 /* Do not dump I/O mapped devices or special mappings */
1149 if (vma->vm_flags & (VM_IO | VM_RESERVED))
1150 return 0;
1152 /* Dump shared memory only if mapped from an anonymous file. */
1153 if (vma->vm_flags & VM_SHARED)
1154 return vma->vm_file->f_dentry->d_inode->i_nlink == 0;
1156 /* If it hasn't been written to, don't write it out */
1157 if (!vma->anon_vma)
1158 return 0;
1160 return 1;
1163 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
1165 /* An ELF note in memory */
1166 struct memelfnote
1168 const char *name;
1169 int type;
1170 unsigned int datasz;
1171 void *data;
1174 static int notesize(struct memelfnote *en)
1176 int sz;
1178 sz = sizeof(struct elf_note);
1179 sz += roundup(strlen(en->name) + 1, 4);
1180 sz += roundup(en->datasz, 4);
1182 return sz;
1185 #define DUMP_WRITE(addr, nr) \
1186 do { if (!dump_write(file, (addr), (nr))) return 0; } while(0)
1187 #define DUMP_SEEK(off) \
1188 do { if (!dump_seek(file, (off))) return 0; } while(0)
1190 static int writenote(struct memelfnote *men, struct file *file)
1192 struct elf_note en;
1194 en.n_namesz = strlen(men->name) + 1;
1195 en.n_descsz = men->datasz;
1196 en.n_type = men->type;
1198 DUMP_WRITE(&en, sizeof(en));
1199 DUMP_WRITE(men->name, en.n_namesz);
1200 /* XXX - cast from long long to long to avoid need for libgcc.a */
1201 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1202 DUMP_WRITE(men->data, men->datasz);
1203 DUMP_SEEK(roundup((unsigned long)file->f_pos, 4)); /* XXX */
1205 return 1;
1207 #undef DUMP_WRITE
1208 #undef DUMP_SEEK
1210 #define DUMP_WRITE(addr, nr) \
1211 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1212 goto end_coredump;
1213 #define DUMP_SEEK(off) \
1214 if (!dump_seek(file, (off))) \
1215 goto end_coredump;
1217 static inline void fill_elf_header(struct elfhdr *elf, int segs)
1219 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1220 elf->e_ident[EI_CLASS] = ELF_CLASS;
1221 elf->e_ident[EI_DATA] = ELF_DATA;
1222 elf->e_ident[EI_VERSION] = EV_CURRENT;
1223 elf->e_ident[EI_OSABI] = ELF_OSABI;
1224 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
1226 elf->e_type = ET_CORE;
1227 elf->e_machine = ELF_ARCH;
1228 elf->e_version = EV_CURRENT;
1229 elf->e_entry = 0;
1230 elf->e_phoff = sizeof(struct elfhdr);
1231 elf->e_shoff = 0;
1232 elf->e_flags = ELF_CORE_EFLAGS;
1233 elf->e_ehsize = sizeof(struct elfhdr);
1234 elf->e_phentsize = sizeof(struct elf_phdr);
1235 elf->e_phnum = segs;
1236 elf->e_shentsize = 0;
1237 elf->e_shnum = 0;
1238 elf->e_shstrndx = 0;
1239 return;
1242 static inline void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
1244 phdr->p_type = PT_NOTE;
1245 phdr->p_offset = offset;
1246 phdr->p_vaddr = 0;
1247 phdr->p_paddr = 0;
1248 phdr->p_filesz = sz;
1249 phdr->p_memsz = 0;
1250 phdr->p_flags = 0;
1251 phdr->p_align = 0;
1252 return;
1255 static void fill_note(struct memelfnote *note, const char *name, int type,
1256 unsigned int sz, void *data)
1258 note->name = name;
1259 note->type = type;
1260 note->datasz = sz;
1261 note->data = data;
1262 return;
1266 * fill up all the fields in prstatus from the given task struct, except registers
1267 * which need to be filled up separately.
1269 static void fill_prstatus(struct elf_prstatus *prstatus,
1270 struct task_struct *p, long signr)
1272 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1273 prstatus->pr_sigpend = p->pending.signal.sig[0];
1274 prstatus->pr_sighold = p->blocked.sig[0];
1275 prstatus->pr_pid = p->pid;
1276 prstatus->pr_ppid = p->parent->pid;
1277 prstatus->pr_pgrp = process_group(p);
1278 prstatus->pr_sid = p->signal->session;
1279 if (thread_group_leader(p)) {
1281 * This is the record for the group leader. Add in the
1282 * cumulative times of previous dead threads. This total
1283 * won't include the time of each live thread whose state
1284 * is included in the core dump. The final total reported
1285 * to our parent process when it calls wait4 will include
1286 * those sums as well as the little bit more time it takes
1287 * this and each other thread to finish dying after the
1288 * core dump synchronization phase.
1290 cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
1291 &prstatus->pr_utime);
1292 cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
1293 &prstatus->pr_stime);
1294 } else {
1295 cputime_to_timeval(p->utime, &prstatus->pr_utime);
1296 cputime_to_timeval(p->stime, &prstatus->pr_stime);
1298 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1299 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1302 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1303 struct mm_struct *mm)
1305 unsigned int i, len;
1307 /* first copy the parameters from user space */
1308 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1310 len = mm->arg_end - mm->arg_start;
1311 if (len >= ELF_PRARGSZ)
1312 len = ELF_PRARGSZ-1;
1313 if (copy_from_user(&psinfo->pr_psargs,
1314 (const char __user *)mm->arg_start, len))
1315 return -EFAULT;
1316 for(i = 0; i < len; i++)
1317 if (psinfo->pr_psargs[i] == 0)
1318 psinfo->pr_psargs[i] = ' ';
1319 psinfo->pr_psargs[len] = 0;
1321 psinfo->pr_pid = p->pid;
1322 psinfo->pr_ppid = p->parent->pid;
1323 psinfo->pr_pgrp = process_group(p);
1324 psinfo->pr_sid = p->signal->session;
1326 i = p->state ? ffz(~p->state) + 1 : 0;
1327 psinfo->pr_state = i;
1328 psinfo->pr_sname = (i < 0 || i > 5) ? '.' : "RSDTZW"[i];
1329 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1330 psinfo->pr_nice = task_nice(p);
1331 psinfo->pr_flag = p->flags;
1332 SET_UID(psinfo->pr_uid, p->uid);
1333 SET_GID(psinfo->pr_gid, p->gid);
1334 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1336 return 0;
1339 /* Here is the structure in which status of each thread is captured. */
1340 struct elf_thread_status
1342 struct list_head list;
1343 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1344 elf_fpregset_t fpu; /* NT_PRFPREG */
1345 struct task_struct *thread;
1346 #ifdef ELF_CORE_COPY_XFPREGS
1347 elf_fpxregset_t xfpu; /* NT_PRXFPREG */
1348 #endif
1349 struct memelfnote notes[3];
1350 int num_notes;
1354 * In order to add the specific thread information for the elf file format,
1355 * we need to keep a linked list of every threads pr_status and then
1356 * create a single section for them in the final core file.
1358 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1360 int sz = 0;
1361 struct task_struct *p = t->thread;
1362 t->num_notes = 0;
1364 fill_prstatus(&t->prstatus, p, signr);
1365 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1367 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus));
1368 t->num_notes++;
1369 sz += notesize(&t->notes[0]);
1371 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu))) {
1372 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &(t->fpu));
1373 t->num_notes++;
1374 sz += notesize(&t->notes[1]);
1377 #ifdef ELF_CORE_COPY_XFPREGS
1378 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1379 fill_note(&t->notes[2], "LINUX", NT_PRXFPREG, sizeof(t->xfpu), &t->xfpu);
1380 t->num_notes++;
1381 sz += notesize(&t->notes[2]);
1383 #endif
1384 return sz;
1388 * Actual dumper
1390 * This is a two-pass process; first we find the offsets of the bits,
1391 * and then they are actually written out. If we run out of core limit
1392 * we just truncate.
1394 static int elf_core_dump(long signr, struct pt_regs * regs, struct file * file)
1396 #define NUM_NOTES 6
1397 int has_dumped = 0;
1398 mm_segment_t fs;
1399 int segs;
1400 size_t size = 0;
1401 int i;
1402 struct vm_area_struct *vma;
1403 struct elfhdr *elf = NULL;
1404 off_t offset = 0, dataoff;
1405 unsigned long limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
1406 int numnote;
1407 struct memelfnote *notes = NULL;
1408 struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */
1409 struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */
1410 struct task_struct *g, *p;
1411 LIST_HEAD(thread_list);
1412 struct list_head *t;
1413 elf_fpregset_t *fpu = NULL;
1414 #ifdef ELF_CORE_COPY_XFPREGS
1415 elf_fpxregset_t *xfpu = NULL;
1416 #endif
1417 int thread_status_size = 0;
1418 elf_addr_t *auxv;
1421 * We no longer stop all VM operations.
1423 * This is because those proceses that could possibly change map_count or
1424 * the mmap / vma pages are now blocked in do_exit on current finishing
1425 * this core dump.
1427 * Only ptrace can touch these memory addresses, but it doesn't change
1428 * the map_count or the pages allocated. So no possibility of crashing
1429 * exists while dumping the mm->vm_next areas to the core file.
1432 /* alloc memory for large data structures: too large to be on stack */
1433 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
1434 if (!elf)
1435 goto cleanup;
1436 prstatus = kmalloc(sizeof(*prstatus), GFP_KERNEL);
1437 if (!prstatus)
1438 goto cleanup;
1439 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1440 if (!psinfo)
1441 goto cleanup;
1442 notes = kmalloc(NUM_NOTES * sizeof(struct memelfnote), GFP_KERNEL);
1443 if (!notes)
1444 goto cleanup;
1445 fpu = kmalloc(sizeof(*fpu), GFP_KERNEL);
1446 if (!fpu)
1447 goto cleanup;
1448 #ifdef ELF_CORE_COPY_XFPREGS
1449 xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL);
1450 if (!xfpu)
1451 goto cleanup;
1452 #endif
1454 if (signr) {
1455 struct elf_thread_status *tmp;
1456 read_lock(&tasklist_lock);
1457 do_each_thread(g,p)
1458 if (current->mm == p->mm && current != p) {
1459 tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC);
1460 if (!tmp) {
1461 read_unlock(&tasklist_lock);
1462 goto cleanup;
1464 memset(tmp, 0, sizeof(*tmp));
1465 INIT_LIST_HEAD(&tmp->list);
1466 tmp->thread = p;
1467 list_add(&tmp->list, &thread_list);
1469 while_each_thread(g,p);
1470 read_unlock(&tasklist_lock);
1471 list_for_each(t, &thread_list) {
1472 struct elf_thread_status *tmp;
1473 int sz;
1475 tmp = list_entry(t, struct elf_thread_status, list);
1476 sz = elf_dump_thread_status(signr, tmp);
1477 thread_status_size += sz;
1480 /* now collect the dump for the current */
1481 memset(prstatus, 0, sizeof(*prstatus));
1482 fill_prstatus(prstatus, current, signr);
1483 elf_core_copy_regs(&prstatus->pr_reg, regs);
1485 segs = current->mm->map_count;
1486 #ifdef ELF_CORE_EXTRA_PHDRS
1487 segs += ELF_CORE_EXTRA_PHDRS;
1488 #endif
1490 /* Set up header */
1491 fill_elf_header(elf, segs+1); /* including notes section */
1493 has_dumped = 1;
1494 current->flags |= PF_DUMPCORE;
1497 * Set up the notes in similar form to SVR4 core dumps made
1498 * with info from their /proc.
1501 fill_note(notes +0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus);
1503 fill_psinfo(psinfo, current->group_leader, current->mm);
1504 fill_note(notes +1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1506 fill_note(notes +2, "CORE", NT_TASKSTRUCT, sizeof(*current), current);
1508 numnote = 3;
1510 auxv = (elf_addr_t *) current->mm->saved_auxv;
1512 i = 0;
1514 i += 2;
1515 while (auxv[i - 2] != AT_NULL);
1516 fill_note(&notes[numnote++], "CORE", NT_AUXV,
1517 i * sizeof (elf_addr_t), auxv);
1519 /* Try to dump the FPU. */
1520 if ((prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs, fpu)))
1521 fill_note(notes + numnote++,
1522 "CORE", NT_PRFPREG, sizeof(*fpu), fpu);
1523 #ifdef ELF_CORE_COPY_XFPREGS
1524 if (elf_core_copy_task_xfpregs(current, xfpu))
1525 fill_note(notes + numnote++,
1526 "LINUX", NT_PRXFPREG, sizeof(*xfpu), xfpu);
1527 #endif
1529 fs = get_fs();
1530 set_fs(KERNEL_DS);
1532 DUMP_WRITE(elf, sizeof(*elf));
1533 offset += sizeof(*elf); /* Elf header */
1534 offset += (segs+1) * sizeof(struct elf_phdr); /* Program headers */
1536 /* Write notes phdr entry */
1538 struct elf_phdr phdr;
1539 int sz = 0;
1541 for (i = 0; i < numnote; i++)
1542 sz += notesize(notes + i);
1544 sz += thread_status_size;
1546 fill_elf_note_phdr(&phdr, sz, offset);
1547 offset += sz;
1548 DUMP_WRITE(&phdr, sizeof(phdr));
1551 /* Page-align dumped data */
1552 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
1554 /* Write program headers for segments dump */
1555 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1556 struct elf_phdr phdr;
1557 size_t sz;
1559 sz = vma->vm_end - vma->vm_start;
1561 phdr.p_type = PT_LOAD;
1562 phdr.p_offset = offset;
1563 phdr.p_vaddr = vma->vm_start;
1564 phdr.p_paddr = 0;
1565 phdr.p_filesz = maydump(vma) ? sz : 0;
1566 phdr.p_memsz = sz;
1567 offset += phdr.p_filesz;
1568 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
1569 if (vma->vm_flags & VM_WRITE) phdr.p_flags |= PF_W;
1570 if (vma->vm_flags & VM_EXEC) phdr.p_flags |= PF_X;
1571 phdr.p_align = ELF_EXEC_PAGESIZE;
1573 DUMP_WRITE(&phdr, sizeof(phdr));
1576 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1577 ELF_CORE_WRITE_EXTRA_PHDRS;
1578 #endif
1580 /* write out the notes section */
1581 for (i = 0; i < numnote; i++)
1582 if (!writenote(notes + i, file))
1583 goto end_coredump;
1585 /* write out the thread status notes section */
1586 list_for_each(t, &thread_list) {
1587 struct elf_thread_status *tmp = list_entry(t, struct elf_thread_status, list);
1588 for (i = 0; i < tmp->num_notes; i++)
1589 if (!writenote(&tmp->notes[i], file))
1590 goto end_coredump;
1593 DUMP_SEEK(dataoff);
1595 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1596 unsigned long addr;
1598 if (!maydump(vma))
1599 continue;
1601 for (addr = vma->vm_start;
1602 addr < vma->vm_end;
1603 addr += PAGE_SIZE) {
1604 struct page* page;
1605 struct vm_area_struct *vma;
1607 if (get_user_pages(current, current->mm, addr, 1, 0, 1,
1608 &page, &vma) <= 0) {
1609 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1610 } else {
1611 if (page == ZERO_PAGE(addr)) {
1612 DUMP_SEEK (file->f_pos + PAGE_SIZE);
1613 } else {
1614 void *kaddr;
1615 flush_cache_page(vma, addr, page_to_pfn(page));
1616 kaddr = kmap(page);
1617 if ((size += PAGE_SIZE) > limit ||
1618 !dump_write(file, kaddr,
1619 PAGE_SIZE)) {
1620 kunmap(page);
1621 page_cache_release(page);
1622 goto end_coredump;
1624 kunmap(page);
1626 page_cache_release(page);
1631 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1632 ELF_CORE_WRITE_EXTRA_DATA;
1633 #endif
1635 if ((off_t) file->f_pos != offset) {
1636 /* Sanity check */
1637 printk("elf_core_dump: file->f_pos (%ld) != offset (%ld)\n",
1638 (off_t) file->f_pos, offset);
1641 end_coredump:
1642 set_fs(fs);
1644 cleanup:
1645 while(!list_empty(&thread_list)) {
1646 struct list_head *tmp = thread_list.next;
1647 list_del(tmp);
1648 kfree(list_entry(tmp, struct elf_thread_status, list));
1651 kfree(elf);
1652 kfree(prstatus);
1653 kfree(psinfo);
1654 kfree(notes);
1655 kfree(fpu);
1656 #ifdef ELF_CORE_COPY_XFPREGS
1657 kfree(xfpu);
1658 #endif
1659 return has_dumped;
1660 #undef NUM_NOTES
1663 #endif /* USE_ELF_CORE_DUMP */
1665 static int __init init_elf_binfmt(void)
1667 return register_binfmt(&elf_format);
1670 static void __exit exit_elf_binfmt(void)
1672 /* Remove the COFF and ELF loaders. */
1673 unregister_binfmt(&elf_format);
1676 core_initcall(init_elf_binfmt);
1677 module_exit(exit_elf_binfmt);
1678 MODULE_LICENSE("GPL");