2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
41 #include <linux/elf.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
46 static int load_elf_binary(struct linux_binprm
*bprm
, struct pt_regs
*regs
);
47 static int load_elf_library(struct file
*);
48 static unsigned long elf_map (struct file
*, unsigned long, struct elf_phdr
*, int, int);
51 * If we don't support core dumping, then supply a NULL so we
54 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
55 static int elf_core_dump(long signr
, struct pt_regs
*regs
, struct file
*file
);
57 #define elf_core_dump NULL
60 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
61 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
63 #define ELF_MIN_ALIGN PAGE_SIZE
66 #ifndef ELF_CORE_EFLAGS
67 #define ELF_CORE_EFLAGS 0
70 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
71 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
72 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
74 static struct linux_binfmt elf_format
= {
75 .module
= THIS_MODULE
,
76 .load_binary
= load_elf_binary
,
77 .load_shlib
= load_elf_library
,
78 .core_dump
= elf_core_dump
,
79 .min_coredump
= ELF_EXEC_PAGESIZE
82 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
84 static int set_brk(unsigned long start
, unsigned long end
)
86 start
= ELF_PAGEALIGN(start
);
87 end
= ELF_PAGEALIGN(end
);
90 down_write(¤t
->mm
->mmap_sem
);
91 addr
= do_brk(start
, end
- start
);
92 up_write(¤t
->mm
->mmap_sem
);
96 current
->mm
->start_brk
= current
->mm
->brk
= end
;
100 /* We need to explicitly zero any fractional pages
101 after the data section (i.e. bss). This would
102 contain the junk from the file that should not
105 static int padzero(unsigned long elf_bss
)
109 nbyte
= ELF_PAGEOFFSET(elf_bss
);
111 nbyte
= ELF_MIN_ALIGN
- nbyte
;
112 if (clear_user((void __user
*) elf_bss
, nbyte
))
118 /* Let's use some macros to make this stack manipulation a litle clearer */
119 #ifdef CONFIG_STACK_GROWSUP
120 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
121 #define STACK_ROUND(sp, items) \
122 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
123 #define STACK_ALLOC(sp, len) ({ \
124 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
127 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
128 #define STACK_ROUND(sp, items) \
129 (((unsigned long) (sp - items)) &~ 15UL)
130 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
134 create_elf_tables(struct linux_binprm
*bprm
, struct elfhdr
*exec
,
135 int interp_aout
, unsigned long load_addr
,
136 unsigned long interp_load_addr
)
138 unsigned long p
= bprm
->p
;
139 int argc
= bprm
->argc
;
140 int envc
= bprm
->envc
;
141 elf_addr_t __user
*argv
;
142 elf_addr_t __user
*envp
;
143 elf_addr_t __user
*sp
;
144 elf_addr_t __user
*u_platform
;
145 const char *k_platform
= ELF_PLATFORM
;
147 elf_addr_t
*elf_info
;
149 struct task_struct
*tsk
= current
;
152 * If this architecture has a platform capability string, copy it
153 * to userspace. In some cases (Sparc), this info is impossible
154 * for userspace to get any other way, in others (i386) it is
159 size_t len
= strlen(k_platform
) + 1;
162 * In some cases (e.g. Hyper-Threading), we want to avoid L1
163 * evictions by the processes running on the same package. One
164 * thing we can do is to shuffle the initial stack for them.
167 p
= arch_align_stack(p
);
169 u_platform
= (elf_addr_t __user
*)STACK_ALLOC(p
, len
);
170 if (__copy_to_user(u_platform
, k_platform
, len
))
174 /* Create the ELF interpreter info */
175 elf_info
= (elf_addr_t
*)current
->mm
->saved_auxv
;
176 #define NEW_AUX_ENT(id, val) \
178 elf_info[ei_index++] = id; \
179 elf_info[ei_index++] = val; \
184 * ARCH_DLINFO must come first so PPC can do its special alignment of
189 NEW_AUX_ENT(AT_HWCAP
, ELF_HWCAP
);
190 NEW_AUX_ENT(AT_PAGESZ
, ELF_EXEC_PAGESIZE
);
191 NEW_AUX_ENT(AT_CLKTCK
, CLOCKS_PER_SEC
);
192 NEW_AUX_ENT(AT_PHDR
, load_addr
+ exec
->e_phoff
);
193 NEW_AUX_ENT(AT_PHENT
, sizeof(struct elf_phdr
));
194 NEW_AUX_ENT(AT_PHNUM
, exec
->e_phnum
);
195 NEW_AUX_ENT(AT_BASE
, interp_load_addr
);
196 NEW_AUX_ENT(AT_FLAGS
, 0);
197 NEW_AUX_ENT(AT_ENTRY
, exec
->e_entry
);
198 NEW_AUX_ENT(AT_UID
, tsk
->uid
);
199 NEW_AUX_ENT(AT_EUID
, tsk
->euid
);
200 NEW_AUX_ENT(AT_GID
, tsk
->gid
);
201 NEW_AUX_ENT(AT_EGID
, tsk
->egid
);
202 NEW_AUX_ENT(AT_SECURE
, security_bprm_secureexec(bprm
));
204 NEW_AUX_ENT(AT_PLATFORM
,
205 (elf_addr_t
)(unsigned long)u_platform
);
207 if (bprm
->interp_flags
& BINPRM_FLAGS_EXECFD
) {
208 NEW_AUX_ENT(AT_EXECFD
, bprm
->interp_data
);
211 /* AT_NULL is zero; clear the rest too */
212 memset(&elf_info
[ei_index
], 0,
213 sizeof current
->mm
->saved_auxv
- ei_index
* sizeof elf_info
[0]);
215 /* And advance past the AT_NULL entry. */
218 sp
= STACK_ADD(p
, ei_index
);
220 items
= (argc
+ 1) + (envc
+ 1);
222 items
+= 3; /* a.out interpreters require argv & envp too */
224 items
+= 1; /* ELF interpreters only put argc on the stack */
226 bprm
->p
= STACK_ROUND(sp
, items
);
228 /* Point sp at the lowest address on the stack */
229 #ifdef CONFIG_STACK_GROWSUP
230 sp
= (elf_addr_t __user
*)bprm
->p
- items
- ei_index
;
231 bprm
->exec
= (unsigned long)sp
; /* XXX: PARISC HACK */
233 sp
= (elf_addr_t __user
*)bprm
->p
;
236 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
237 if (__put_user(argc
, sp
++))
241 envp
= argv
+ argc
+ 1;
242 if (__put_user((elf_addr_t
)(unsigned long)argv
, sp
++) ||
243 __put_user((elf_addr_t
)(unsigned long)envp
, sp
++))
247 envp
= argv
+ argc
+ 1;
250 /* Populate argv and envp */
251 p
= current
->mm
->arg_end
= current
->mm
->arg_start
;
254 if (__put_user((elf_addr_t
)p
, argv
++))
256 len
= strnlen_user((void __user
*)p
, PAGE_SIZE
*MAX_ARG_PAGES
);
257 if (!len
|| len
> PAGE_SIZE
*MAX_ARG_PAGES
)
261 if (__put_user(0, argv
))
263 current
->mm
->arg_end
= current
->mm
->env_start
= p
;
266 if (__put_user((elf_addr_t
)p
, envp
++))
268 len
= strnlen_user((void __user
*)p
, PAGE_SIZE
*MAX_ARG_PAGES
);
269 if (!len
|| len
> PAGE_SIZE
*MAX_ARG_PAGES
)
273 if (__put_user(0, envp
))
275 current
->mm
->env_end
= p
;
277 /* Put the elf_info on the stack in the right place. */
278 sp
= (elf_addr_t __user
*)envp
+ 1;
279 if (copy_to_user(sp
, elf_info
, ei_index
* sizeof(elf_addr_t
)))
286 static unsigned long elf_map(struct file
*filep
, unsigned long addr
,
287 struct elf_phdr
*eppnt
, int prot
, int type
)
289 unsigned long map_addr
;
290 unsigned long pageoffset
= ELF_PAGEOFFSET(eppnt
->p_vaddr
);
292 down_write(¤t
->mm
->mmap_sem
);
293 /* mmap() will return -EINVAL if given a zero size, but a
294 * segment with zero filesize is perfectly valid */
295 if (eppnt
->p_filesz
+ pageoffset
)
296 map_addr
= do_mmap(filep
, ELF_PAGESTART(addr
),
297 eppnt
->p_filesz
+ pageoffset
, prot
, type
,
298 eppnt
->p_offset
- pageoffset
);
300 map_addr
= ELF_PAGESTART(addr
);
301 up_write(¤t
->mm
->mmap_sem
);
305 #endif /* !elf_map */
307 /* This is much more generalized than the library routine read function,
308 so we keep this separate. Technically the library read function
309 is only provided so that we can read a.out libraries that have
312 static unsigned long load_elf_interp(struct elfhdr
*interp_elf_ex
,
313 struct file
*interpreter
, unsigned long *interp_load_addr
)
315 struct elf_phdr
*elf_phdata
;
316 struct elf_phdr
*eppnt
;
317 unsigned long load_addr
= 0;
318 int load_addr_set
= 0;
319 unsigned long last_bss
= 0, elf_bss
= 0;
320 unsigned long error
= ~0UL;
323 /* First of all, some simple consistency checks */
324 if (interp_elf_ex
->e_type
!= ET_EXEC
&&
325 interp_elf_ex
->e_type
!= ET_DYN
)
327 if (!elf_check_arch(interp_elf_ex
))
329 if (!interpreter
->f_op
|| !interpreter
->f_op
->mmap
)
333 * If the size of this structure has changed, then punt, since
334 * we will be doing the wrong thing.
336 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
))
338 if (interp_elf_ex
->e_phnum
< 1 ||
339 interp_elf_ex
->e_phnum
> 65536U / sizeof(struct elf_phdr
))
342 /* Now read in all of the header information */
343 size
= sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
;
344 if (size
> ELF_MIN_ALIGN
)
346 elf_phdata
= kmalloc(size
, GFP_KERNEL
);
350 retval
= kernel_read(interpreter
, interp_elf_ex
->e_phoff
,
351 (char *)elf_phdata
,size
);
353 if (retval
!= size
) {
360 for (i
= 0; i
< interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
361 if (eppnt
->p_type
== PT_LOAD
) {
362 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
364 unsigned long vaddr
= 0;
365 unsigned long k
, map_addr
;
367 if (eppnt
->p_flags
& PF_R
)
368 elf_prot
= PROT_READ
;
369 if (eppnt
->p_flags
& PF_W
)
370 elf_prot
|= PROT_WRITE
;
371 if (eppnt
->p_flags
& PF_X
)
372 elf_prot
|= PROT_EXEC
;
373 vaddr
= eppnt
->p_vaddr
;
374 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
)
375 elf_type
|= MAP_FIXED
;
377 map_addr
= elf_map(interpreter
, load_addr
+ vaddr
,
378 eppnt
, elf_prot
, elf_type
);
380 if (BAD_ADDR(map_addr
))
383 if (!load_addr_set
&&
384 interp_elf_ex
->e_type
== ET_DYN
) {
385 load_addr
= map_addr
- ELF_PAGESTART(vaddr
);
390 * Check to see if the section's size will overflow the
391 * allowed task size. Note that p_filesz must always be
392 * <= p_memsize so it's only necessary to check p_memsz.
394 k
= load_addr
+ eppnt
->p_vaddr
;
396 eppnt
->p_filesz
> eppnt
->p_memsz
||
397 eppnt
->p_memsz
> TASK_SIZE
||
398 TASK_SIZE
- eppnt
->p_memsz
< k
) {
404 * Find the end of the file mapping for this phdr, and
405 * keep track of the largest address we see for this.
407 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
412 * Do the same thing for the memory mapping - between
413 * elf_bss and last_bss is the bss section.
415 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
422 * Now fill out the bss section. First pad the last page up
423 * to the page boundary, and then perform a mmap to make sure
424 * that there are zero-mapped pages up to and including the
427 if (padzero(elf_bss
)) {
432 /* What we have mapped so far */
433 elf_bss
= ELF_PAGESTART(elf_bss
+ ELF_MIN_ALIGN
- 1);
435 /* Map the last of the bss segment */
436 if (last_bss
> elf_bss
) {
437 down_write(¤t
->mm
->mmap_sem
);
438 error
= do_brk(elf_bss
, last_bss
- elf_bss
);
439 up_write(¤t
->mm
->mmap_sem
);
444 *interp_load_addr
= load_addr
;
445 error
= ((unsigned long)interp_elf_ex
->e_entry
) + load_addr
;
453 static unsigned long load_aout_interp(struct exec
*interp_ex
,
454 struct file
*interpreter
)
456 unsigned long text_data
, elf_entry
= ~0UL;
460 current
->mm
->end_code
= interp_ex
->a_text
;
461 text_data
= interp_ex
->a_text
+ interp_ex
->a_data
;
462 current
->mm
->end_data
= text_data
;
463 current
->mm
->brk
= interp_ex
->a_bss
+ text_data
;
465 switch (N_MAGIC(*interp_ex
)) {
468 addr
= (char __user
*)0;
472 offset
= N_TXTOFF(*interp_ex
);
473 addr
= (char __user
*)N_TXTADDR(*interp_ex
);
479 down_write(¤t
->mm
->mmap_sem
);
480 do_brk(0, text_data
);
481 up_write(¤t
->mm
->mmap_sem
);
482 if (!interpreter
->f_op
|| !interpreter
->f_op
->read
)
484 if (interpreter
->f_op
->read(interpreter
, addr
, text_data
, &offset
) < 0)
486 flush_icache_range((unsigned long)addr
,
487 (unsigned long)addr
+ text_data
);
489 down_write(¤t
->mm
->mmap_sem
);
490 do_brk(ELF_PAGESTART(text_data
+ ELF_MIN_ALIGN
- 1),
492 up_write(¤t
->mm
->mmap_sem
);
493 elf_entry
= interp_ex
->a_entry
;
500 * These are the functions used to load ELF style executables and shared
501 * libraries. There is no binary dependent code anywhere else.
504 #define INTERPRETER_NONE 0
505 #define INTERPRETER_AOUT 1
506 #define INTERPRETER_ELF 2
508 #ifndef STACK_RND_MASK
509 #define STACK_RND_MASK 0x7ff /* with 4K pages 8MB of VA */
512 static unsigned long randomize_stack_top(unsigned long stack_top
)
514 unsigned int random_variable
= 0;
516 if ((current
->flags
& PF_RANDOMIZE
) &&
517 !(current
->personality
& ADDR_NO_RANDOMIZE
)) {
518 random_variable
= get_random_int() & STACK_RND_MASK
;
519 random_variable
<<= PAGE_SHIFT
;
521 #ifdef CONFIG_STACK_GROWSUP
522 return PAGE_ALIGN(stack_top
) + random_variable
;
524 return PAGE_ALIGN(stack_top
) - random_variable
;
528 static int load_elf_binary(struct linux_binprm
*bprm
, struct pt_regs
*regs
)
530 struct file
*interpreter
= NULL
; /* to shut gcc up */
531 unsigned long load_addr
= 0, load_bias
= 0;
532 int load_addr_set
= 0;
533 char * elf_interpreter
= NULL
;
534 unsigned int interpreter_type
= INTERPRETER_NONE
;
535 unsigned char ibcs2_interpreter
= 0;
537 struct elf_phdr
*elf_ppnt
, *elf_phdata
;
538 unsigned long elf_bss
, elf_brk
;
542 unsigned long elf_entry
, interp_load_addr
= 0;
543 unsigned long start_code
, end_code
, start_data
, end_data
;
544 unsigned long reloc_func_desc
= 0;
545 char passed_fileno
[6];
546 struct files_struct
*files
;
547 int executable_stack
= EXSTACK_DEFAULT
;
548 unsigned long def_flags
= 0;
550 struct elfhdr elf_ex
;
551 struct elfhdr interp_elf_ex
;
552 struct exec interp_ex
;
555 loc
= kmalloc(sizeof(*loc
), GFP_KERNEL
);
561 /* Get the exec-header */
562 loc
->elf_ex
= *((struct elfhdr
*)bprm
->buf
);
565 /* First of all, some simple consistency checks */
566 if (memcmp(loc
->elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
569 if (loc
->elf_ex
.e_type
!= ET_EXEC
&& loc
->elf_ex
.e_type
!= ET_DYN
)
571 if (!elf_check_arch(&loc
->elf_ex
))
573 if (!bprm
->file
->f_op
||!bprm
->file
->f_op
->mmap
)
576 /* Now read in all of the header information */
577 if (loc
->elf_ex
.e_phentsize
!= sizeof(struct elf_phdr
))
579 if (loc
->elf_ex
.e_phnum
< 1 ||
580 loc
->elf_ex
.e_phnum
> 65536U / sizeof(struct elf_phdr
))
582 size
= loc
->elf_ex
.e_phnum
* sizeof(struct elf_phdr
);
584 elf_phdata
= kmalloc(size
, GFP_KERNEL
);
588 retval
= kernel_read(bprm
->file
, loc
->elf_ex
.e_phoff
,
589 (char *)elf_phdata
, size
);
590 if (retval
!= size
) {
596 files
= current
->files
; /* Refcounted so ok */
597 retval
= unshare_files();
600 if (files
== current
->files
) {
601 put_files_struct(files
);
605 /* exec will make our files private anyway, but for the a.out
606 loader stuff we need to do it earlier */
607 retval
= get_unused_fd();
610 get_file(bprm
->file
);
611 fd_install(elf_exec_fileno
= retval
, bprm
->file
);
613 elf_ppnt
= elf_phdata
;
622 for (i
= 0; i
< loc
->elf_ex
.e_phnum
; i
++) {
623 if (elf_ppnt
->p_type
== PT_INTERP
) {
624 /* This is the program interpreter used for
625 * shared libraries - for now assume that this
626 * is an a.out format binary
629 if (elf_ppnt
->p_filesz
> PATH_MAX
||
630 elf_ppnt
->p_filesz
< 2)
634 elf_interpreter
= kmalloc(elf_ppnt
->p_filesz
,
636 if (!elf_interpreter
)
639 retval
= kernel_read(bprm
->file
, elf_ppnt
->p_offset
,
642 if (retval
!= elf_ppnt
->p_filesz
) {
645 goto out_free_interp
;
647 /* make sure path is NULL terminated */
649 if (elf_interpreter
[elf_ppnt
->p_filesz
- 1] != '\0')
650 goto out_free_interp
;
652 /* If the program interpreter is one of these two,
653 * then assume an iBCS2 image. Otherwise assume
654 * a native linux image.
656 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
657 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0)
658 ibcs2_interpreter
= 1;
661 * The early SET_PERSONALITY here is so that the lookup
662 * for the interpreter happens in the namespace of the
663 * to-be-execed image. SET_PERSONALITY can select an
666 * However, SET_PERSONALITY is NOT allowed to switch
667 * this task into the new images's memory mapping
668 * policy - that is, TASK_SIZE must still evaluate to
669 * that which is appropriate to the execing application.
670 * This is because exit_mmap() needs to have TASK_SIZE
671 * evaluate to the size of the old image.
673 * So if (say) a 64-bit application is execing a 32-bit
674 * application it is the architecture's responsibility
675 * to defer changing the value of TASK_SIZE until the
676 * switch really is going to happen - do this in
677 * flush_thread(). - akpm
679 SET_PERSONALITY(loc
->elf_ex
, ibcs2_interpreter
);
681 interpreter
= open_exec(elf_interpreter
);
682 retval
= PTR_ERR(interpreter
);
683 if (IS_ERR(interpreter
))
684 goto out_free_interp
;
685 retval
= kernel_read(interpreter
, 0, bprm
->buf
,
687 if (retval
!= BINPRM_BUF_SIZE
) {
690 goto out_free_dentry
;
693 /* Get the exec headers */
694 loc
->interp_ex
= *((struct exec
*)bprm
->buf
);
695 loc
->interp_elf_ex
= *((struct elfhdr
*)bprm
->buf
);
701 elf_ppnt
= elf_phdata
;
702 for (i
= 0; i
< loc
->elf_ex
.e_phnum
; i
++, elf_ppnt
++)
703 if (elf_ppnt
->p_type
== PT_GNU_STACK
) {
704 if (elf_ppnt
->p_flags
& PF_X
)
705 executable_stack
= EXSTACK_ENABLE_X
;
707 executable_stack
= EXSTACK_DISABLE_X
;
711 /* Some simple consistency checks for the interpreter */
712 if (elf_interpreter
) {
713 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
715 /* Now figure out which format our binary is */
716 if ((N_MAGIC(loc
->interp_ex
) != OMAGIC
) &&
717 (N_MAGIC(loc
->interp_ex
) != ZMAGIC
) &&
718 (N_MAGIC(loc
->interp_ex
) != QMAGIC
))
719 interpreter_type
= INTERPRETER_ELF
;
721 if (memcmp(loc
->interp_elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
722 interpreter_type
&= ~INTERPRETER_ELF
;
725 if (!interpreter_type
)
726 goto out_free_dentry
;
728 /* Make sure only one type was selected */
729 if ((interpreter_type
& INTERPRETER_ELF
) &&
730 interpreter_type
!= INTERPRETER_ELF
) {
731 // FIXME - ratelimit this before re-enabling
732 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
733 interpreter_type
= INTERPRETER_ELF
;
735 /* Verify the interpreter has a valid arch */
736 if ((interpreter_type
== INTERPRETER_ELF
) &&
737 !elf_check_arch(&loc
->interp_elf_ex
))
738 goto out_free_dentry
;
740 /* Executables without an interpreter also need a personality */
741 SET_PERSONALITY(loc
->elf_ex
, ibcs2_interpreter
);
744 /* OK, we are done with that, now set up the arg stuff,
745 and then start this sucker up */
746 if ((!bprm
->sh_bang
) && (interpreter_type
== INTERPRETER_AOUT
)) {
747 char *passed_p
= passed_fileno
;
748 sprintf(passed_fileno
, "%d", elf_exec_fileno
);
750 if (elf_interpreter
) {
751 retval
= copy_strings_kernel(1, &passed_p
, bprm
);
753 goto out_free_dentry
;
758 /* Flush all traces of the currently running executable */
759 retval
= flush_old_exec(bprm
);
761 goto out_free_dentry
;
763 /* Discard our unneeded old files struct */
765 put_files_struct(files
);
769 /* OK, This is the point of no return */
770 current
->mm
->start_data
= 0;
771 current
->mm
->end_data
= 0;
772 current
->mm
->end_code
= 0;
773 current
->mm
->mmap
= NULL
;
774 current
->flags
&= ~PF_FORKNOEXEC
;
775 current
->mm
->def_flags
= def_flags
;
777 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
778 may depend on the personality. */
779 SET_PERSONALITY(loc
->elf_ex
, ibcs2_interpreter
);
780 if (elf_read_implies_exec(loc
->elf_ex
, executable_stack
))
781 current
->personality
|= READ_IMPLIES_EXEC
;
783 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
784 current
->flags
|= PF_RANDOMIZE
;
785 arch_pick_mmap_layout(current
->mm
);
787 /* Do this so that we can load the interpreter, if need be. We will
788 change some of these later */
789 current
->mm
->free_area_cache
= current
->mm
->mmap_base
;
790 current
->mm
->cached_hole_size
= 0;
791 retval
= setup_arg_pages(bprm
, randomize_stack_top(STACK_TOP
),
794 send_sig(SIGKILL
, current
, 0);
795 goto out_free_dentry
;
798 current
->mm
->start_stack
= bprm
->p
;
800 /* Now we do a little grungy work by mmaping the ELF image into
801 the correct location in memory. At this point, we assume that
802 the image should be loaded at fixed address, not at a variable
804 for(i
= 0, elf_ppnt
= elf_phdata
;
805 i
< loc
->elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
806 int elf_prot
= 0, elf_flags
;
807 unsigned long k
, vaddr
;
809 if (elf_ppnt
->p_type
!= PT_LOAD
)
812 if (unlikely (elf_brk
> elf_bss
)) {
815 /* There was a PT_LOAD segment with p_memsz > p_filesz
816 before this one. Map anonymous pages, if needed,
817 and clear the area. */
818 retval
= set_brk (elf_bss
+ load_bias
,
819 elf_brk
+ load_bias
);
821 send_sig(SIGKILL
, current
, 0);
822 goto out_free_dentry
;
824 nbyte
= ELF_PAGEOFFSET(elf_bss
);
826 nbyte
= ELF_MIN_ALIGN
- nbyte
;
827 if (nbyte
> elf_brk
- elf_bss
)
828 nbyte
= elf_brk
- elf_bss
;
829 if (clear_user((void __user
*)elf_bss
+
832 * This bss-zeroing can fail if the ELF
833 * file specifies odd protections. So
834 * we don't check the return value
840 if (elf_ppnt
->p_flags
& PF_R
)
841 elf_prot
|= PROT_READ
;
842 if (elf_ppnt
->p_flags
& PF_W
)
843 elf_prot
|= PROT_WRITE
;
844 if (elf_ppnt
->p_flags
& PF_X
)
845 elf_prot
|= PROT_EXEC
;
847 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
| MAP_EXECUTABLE
;
849 vaddr
= elf_ppnt
->p_vaddr
;
850 if (loc
->elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
851 elf_flags
|= MAP_FIXED
;
852 } else if (loc
->elf_ex
.e_type
== ET_DYN
) {
853 /* Try and get dynamic programs out of the way of the
854 * default mmap base, as well as whatever program they
855 * might try to exec. This is because the brk will
856 * follow the loader, and is not movable. */
857 load_bias
= ELF_PAGESTART(ELF_ET_DYN_BASE
- vaddr
);
860 error
= elf_map(bprm
->file
, load_bias
+ vaddr
, elf_ppnt
,
861 elf_prot
, elf_flags
);
862 if (BAD_ADDR(error
)) {
863 send_sig(SIGKILL
, current
, 0);
864 goto out_free_dentry
;
867 if (!load_addr_set
) {
869 load_addr
= (elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
);
870 if (loc
->elf_ex
.e_type
== ET_DYN
) {
872 ELF_PAGESTART(load_bias
+ vaddr
);
873 load_addr
+= load_bias
;
874 reloc_func_desc
= load_bias
;
877 k
= elf_ppnt
->p_vaddr
;
884 * Check to see if the section's size will overflow the
885 * allowed task size. Note that p_filesz must always be
886 * <= p_memsz so it is only necessary to check p_memsz.
888 if (BAD_ADDR(k
) || elf_ppnt
->p_filesz
> elf_ppnt
->p_memsz
||
889 elf_ppnt
->p_memsz
> TASK_SIZE
||
890 TASK_SIZE
- elf_ppnt
->p_memsz
< k
) {
891 /* set_brk can never work. Avoid overflows. */
892 send_sig(SIGKILL
, current
, 0);
893 goto out_free_dentry
;
896 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
900 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
904 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
909 loc
->elf_ex
.e_entry
+= load_bias
;
910 elf_bss
+= load_bias
;
911 elf_brk
+= load_bias
;
912 start_code
+= load_bias
;
913 end_code
+= load_bias
;
914 start_data
+= load_bias
;
915 end_data
+= load_bias
;
917 /* Calling set_brk effectively mmaps the pages that we need
918 * for the bss and break sections. We must do this before
919 * mapping in the interpreter, to make sure it doesn't wind
920 * up getting placed where the bss needs to go.
922 retval
= set_brk(elf_bss
, elf_brk
);
924 send_sig(SIGKILL
, current
, 0);
925 goto out_free_dentry
;
927 if (likely(elf_bss
!= elf_brk
) && unlikely(padzero(elf_bss
))) {
928 send_sig(SIGSEGV
, current
, 0);
929 retval
= -EFAULT
; /* Nobody gets to see this, but.. */
930 goto out_free_dentry
;
933 if (elf_interpreter
) {
934 if (interpreter_type
== INTERPRETER_AOUT
)
935 elf_entry
= load_aout_interp(&loc
->interp_ex
,
938 elf_entry
= load_elf_interp(&loc
->interp_elf_ex
,
941 if (BAD_ADDR(elf_entry
)) {
942 force_sig(SIGSEGV
, current
);
943 retval
= IS_ERR((void *)elf_entry
) ?
944 (int)elf_entry
: -EINVAL
;
945 goto out_free_dentry
;
947 reloc_func_desc
= interp_load_addr
;
949 allow_write_access(interpreter
);
951 kfree(elf_interpreter
);
953 elf_entry
= loc
->elf_ex
.e_entry
;
954 if (BAD_ADDR(elf_entry
)) {
955 force_sig(SIGSEGV
, current
);
957 goto out_free_dentry
;
963 if (interpreter_type
!= INTERPRETER_AOUT
)
964 sys_close(elf_exec_fileno
);
966 set_binfmt(&elf_format
);
968 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
969 retval
= arch_setup_additional_pages(bprm
, executable_stack
);
971 send_sig(SIGKILL
, current
, 0);
974 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
977 current
->flags
&= ~PF_FORKNOEXEC
;
978 create_elf_tables(bprm
, &loc
->elf_ex
,
979 (interpreter_type
== INTERPRETER_AOUT
),
980 load_addr
, interp_load_addr
);
981 /* N.B. passed_fileno might not be initialized? */
982 if (interpreter_type
== INTERPRETER_AOUT
)
983 current
->mm
->arg_start
+= strlen(passed_fileno
) + 1;
984 current
->mm
->end_code
= end_code
;
985 current
->mm
->start_code
= start_code
;
986 current
->mm
->start_data
= start_data
;
987 current
->mm
->end_data
= end_data
;
988 current
->mm
->start_stack
= bprm
->p
;
990 if (current
->personality
& MMAP_PAGE_ZERO
) {
991 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
992 and some applications "depend" upon this behavior.
993 Since we do not have the power to recompile these, we
994 emulate the SVr4 behavior. Sigh. */
995 down_write(¤t
->mm
->mmap_sem
);
996 error
= do_mmap(NULL
, 0, PAGE_SIZE
, PROT_READ
| PROT_EXEC
,
997 MAP_FIXED
| MAP_PRIVATE
, 0);
998 up_write(¤t
->mm
->mmap_sem
);
1001 #ifdef ELF_PLAT_INIT
1003 * The ABI may specify that certain registers be set up in special
1004 * ways (on i386 %edx is the address of a DT_FINI function, for
1005 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1006 * that the e_entry field is the address of the function descriptor
1007 * for the startup routine, rather than the address of the startup
1008 * routine itself. This macro performs whatever initialization to
1009 * the regs structure is required as well as any relocations to the
1010 * function descriptor entries when executing dynamically links apps.
1012 ELF_PLAT_INIT(regs
, reloc_func_desc
);
1015 start_thread(regs
, elf_entry
, bprm
->p
);
1016 if (unlikely(current
->ptrace
& PT_PTRACED
)) {
1017 if (current
->ptrace
& PT_TRACE_EXEC
)
1018 ptrace_notify ((PTRACE_EVENT_EXEC
<< 8) | SIGTRAP
);
1020 send_sig(SIGTRAP
, current
, 0);
1030 allow_write_access(interpreter
);
1034 kfree(elf_interpreter
);
1036 sys_close(elf_exec_fileno
);
1039 reset_files_struct(current
, files
);
1045 /* This is really simpleminded and specialized - we are loading an
1046 a.out library that is given an ELF header. */
1047 static int load_elf_library(struct file
*file
)
1049 struct elf_phdr
*elf_phdata
;
1050 struct elf_phdr
*eppnt
;
1051 unsigned long elf_bss
, bss
, len
;
1052 int retval
, error
, i
, j
;
1053 struct elfhdr elf_ex
;
1056 retval
= kernel_read(file
, 0, (char *)&elf_ex
, sizeof(elf_ex
));
1057 if (retval
!= sizeof(elf_ex
))
1060 if (memcmp(elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
1063 /* First of all, some simple consistency checks */
1064 if (elf_ex
.e_type
!= ET_EXEC
|| elf_ex
.e_phnum
> 2 ||
1065 !elf_check_arch(&elf_ex
) || !file
->f_op
|| !file
->f_op
->mmap
)
1068 /* Now read in all of the header information */
1070 j
= sizeof(struct elf_phdr
) * elf_ex
.e_phnum
;
1071 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1074 elf_phdata
= kmalloc(j
, GFP_KERNEL
);
1080 retval
= kernel_read(file
, elf_ex
.e_phoff
, (char *)eppnt
, j
);
1084 for (j
= 0, i
= 0; i
<elf_ex
.e_phnum
; i
++)
1085 if ((eppnt
+ i
)->p_type
== PT_LOAD
)
1090 while (eppnt
->p_type
!= PT_LOAD
)
1093 /* Now use mmap to map the library into memory. */
1094 down_write(¤t
->mm
->mmap_sem
);
1095 error
= do_mmap(file
,
1096 ELF_PAGESTART(eppnt
->p_vaddr
),
1098 ELF_PAGEOFFSET(eppnt
->p_vaddr
)),
1099 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
1100 MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
,
1102 ELF_PAGEOFFSET(eppnt
->p_vaddr
)));
1103 up_write(¤t
->mm
->mmap_sem
);
1104 if (error
!= ELF_PAGESTART(eppnt
->p_vaddr
))
1107 elf_bss
= eppnt
->p_vaddr
+ eppnt
->p_filesz
;
1108 if (padzero(elf_bss
)) {
1113 len
= ELF_PAGESTART(eppnt
->p_filesz
+ eppnt
->p_vaddr
+
1115 bss
= eppnt
->p_memsz
+ eppnt
->p_vaddr
;
1117 down_write(¤t
->mm
->mmap_sem
);
1118 do_brk(len
, bss
- len
);
1119 up_write(¤t
->mm
->mmap_sem
);
1130 * Note that some platforms still use traditional core dumps and not
1131 * the ELF core dump. Each platform can select it as appropriate.
1133 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1138 * Modelled on fs/exec.c:aout_core_dump()
1139 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1142 * These are the only things you should do on a core-file: use only these
1143 * functions to write out all the necessary info.
1145 static int dump_write(struct file
*file
, const void *addr
, int nr
)
1147 return file
->f_op
->write(file
, addr
, nr
, &file
->f_pos
) == nr
;
1150 static int dump_seek(struct file
*file
, loff_t off
)
1152 if (file
->f_op
->llseek
&& file
->f_op
->llseek
!= no_llseek
) {
1153 if (file
->f_op
->llseek(file
, off
, SEEK_CUR
) < 0)
1156 char *buf
= (char *)get_zeroed_page(GFP_KERNEL
);
1160 unsigned long n
= off
;
1163 if (!dump_write(file
, buf
, n
))
1167 free_page((unsigned long)buf
);
1173 * Decide whether a segment is worth dumping; default is yes to be
1174 * sure (missing info is worse than too much; etc).
1175 * Personally I'd include everything, and use the coredump limit...
1177 * I think we should skip something. But I am not sure how. H.J.
1179 static int maydump(struct vm_area_struct
*vma
)
1181 /* The vma can be set up to tell us the answer directly. */
1182 if (vma
->vm_flags
& VM_ALWAYSDUMP
)
1185 /* Do not dump I/O mapped devices or special mappings */
1186 if (vma
->vm_flags
& (VM_IO
| VM_RESERVED
))
1189 /* Dump shared memory only if mapped from an anonymous file. */
1190 if (vma
->vm_flags
& VM_SHARED
)
1191 return vma
->vm_file
->f_path
.dentry
->d_inode
->i_nlink
== 0;
1193 /* If it hasn't been written to, don't write it out */
1200 /* An ELF note in memory */
1205 unsigned int datasz
;
1209 static int notesize(struct memelfnote
*en
)
1213 sz
= sizeof(struct elf_note
);
1214 sz
+= roundup(strlen(en
->name
) + 1, 4);
1215 sz
+= roundup(en
->datasz
, 4);
1220 #define DUMP_WRITE(addr, nr, foffset) \
1221 do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
1223 static int alignfile(struct file
*file
, loff_t
*foffset
)
1225 static const char buf
[4] = { 0, };
1226 DUMP_WRITE(buf
, roundup(*foffset
, 4) - *foffset
, foffset
);
1230 static int writenote(struct memelfnote
*men
, struct file
*file
,
1234 en
.n_namesz
= strlen(men
->name
) + 1;
1235 en
.n_descsz
= men
->datasz
;
1236 en
.n_type
= men
->type
;
1238 DUMP_WRITE(&en
, sizeof(en
), foffset
);
1239 DUMP_WRITE(men
->name
, en
.n_namesz
, foffset
);
1240 if (!alignfile(file
, foffset
))
1242 DUMP_WRITE(men
->data
, men
->datasz
, foffset
);
1243 if (!alignfile(file
, foffset
))
1250 #define DUMP_WRITE(addr, nr) \
1251 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1253 #define DUMP_SEEK(off) \
1254 if (!dump_seek(file, (off))) \
1257 static void fill_elf_header(struct elfhdr
*elf
, int segs
)
1259 memcpy(elf
->e_ident
, ELFMAG
, SELFMAG
);
1260 elf
->e_ident
[EI_CLASS
] = ELF_CLASS
;
1261 elf
->e_ident
[EI_DATA
] = ELF_DATA
;
1262 elf
->e_ident
[EI_VERSION
] = EV_CURRENT
;
1263 elf
->e_ident
[EI_OSABI
] = ELF_OSABI
;
1264 memset(elf
->e_ident
+EI_PAD
, 0, EI_NIDENT
-EI_PAD
);
1266 elf
->e_type
= ET_CORE
;
1267 elf
->e_machine
= ELF_ARCH
;
1268 elf
->e_version
= EV_CURRENT
;
1270 elf
->e_phoff
= sizeof(struct elfhdr
);
1272 elf
->e_flags
= ELF_CORE_EFLAGS
;
1273 elf
->e_ehsize
= sizeof(struct elfhdr
);
1274 elf
->e_phentsize
= sizeof(struct elf_phdr
);
1275 elf
->e_phnum
= segs
;
1276 elf
->e_shentsize
= 0;
1278 elf
->e_shstrndx
= 0;
1282 static void fill_elf_note_phdr(struct elf_phdr
*phdr
, int sz
, loff_t offset
)
1284 phdr
->p_type
= PT_NOTE
;
1285 phdr
->p_offset
= offset
;
1288 phdr
->p_filesz
= sz
;
1295 static void fill_note(struct memelfnote
*note
, const char *name
, int type
,
1296 unsigned int sz
, void *data
)
1306 * fill up all the fields in prstatus from the given task struct, except
1307 * registers which need to be filled up separately.
1309 static void fill_prstatus(struct elf_prstatus
*prstatus
,
1310 struct task_struct
*p
, long signr
)
1312 prstatus
->pr_info
.si_signo
= prstatus
->pr_cursig
= signr
;
1313 prstatus
->pr_sigpend
= p
->pending
.signal
.sig
[0];
1314 prstatus
->pr_sighold
= p
->blocked
.sig
[0];
1315 prstatus
->pr_pid
= p
->pid
;
1316 prstatus
->pr_ppid
= p
->parent
->pid
;
1317 prstatus
->pr_pgrp
= process_group(p
);
1318 prstatus
->pr_sid
= process_session(p
);
1319 if (thread_group_leader(p
)) {
1321 * This is the record for the group leader. Add in the
1322 * cumulative times of previous dead threads. This total
1323 * won't include the time of each live thread whose state
1324 * is included in the core dump. The final total reported
1325 * to our parent process when it calls wait4 will include
1326 * those sums as well as the little bit more time it takes
1327 * this and each other thread to finish dying after the
1328 * core dump synchronization phase.
1330 cputime_to_timeval(cputime_add(p
->utime
, p
->signal
->utime
),
1331 &prstatus
->pr_utime
);
1332 cputime_to_timeval(cputime_add(p
->stime
, p
->signal
->stime
),
1333 &prstatus
->pr_stime
);
1335 cputime_to_timeval(p
->utime
, &prstatus
->pr_utime
);
1336 cputime_to_timeval(p
->stime
, &prstatus
->pr_stime
);
1338 cputime_to_timeval(p
->signal
->cutime
, &prstatus
->pr_cutime
);
1339 cputime_to_timeval(p
->signal
->cstime
, &prstatus
->pr_cstime
);
1342 static int fill_psinfo(struct elf_prpsinfo
*psinfo
, struct task_struct
*p
,
1343 struct mm_struct
*mm
)
1345 unsigned int i
, len
;
1347 /* first copy the parameters from user space */
1348 memset(psinfo
, 0, sizeof(struct elf_prpsinfo
));
1350 len
= mm
->arg_end
- mm
->arg_start
;
1351 if (len
>= ELF_PRARGSZ
)
1352 len
= ELF_PRARGSZ
-1;
1353 if (copy_from_user(&psinfo
->pr_psargs
,
1354 (const char __user
*)mm
->arg_start
, len
))
1356 for(i
= 0; i
< len
; i
++)
1357 if (psinfo
->pr_psargs
[i
] == 0)
1358 psinfo
->pr_psargs
[i
] = ' ';
1359 psinfo
->pr_psargs
[len
] = 0;
1361 psinfo
->pr_pid
= p
->pid
;
1362 psinfo
->pr_ppid
= p
->parent
->pid
;
1363 psinfo
->pr_pgrp
= process_group(p
);
1364 psinfo
->pr_sid
= process_session(p
);
1366 i
= p
->state
? ffz(~p
->state
) + 1 : 0;
1367 psinfo
->pr_state
= i
;
1368 psinfo
->pr_sname
= (i
> 5) ? '.' : "RSDTZW"[i
];
1369 psinfo
->pr_zomb
= psinfo
->pr_sname
== 'Z';
1370 psinfo
->pr_nice
= task_nice(p
);
1371 psinfo
->pr_flag
= p
->flags
;
1372 SET_UID(psinfo
->pr_uid
, p
->uid
);
1373 SET_GID(psinfo
->pr_gid
, p
->gid
);
1374 strncpy(psinfo
->pr_fname
, p
->comm
, sizeof(psinfo
->pr_fname
));
1379 /* Here is the structure in which status of each thread is captured. */
1380 struct elf_thread_status
1382 struct list_head list
;
1383 struct elf_prstatus prstatus
; /* NT_PRSTATUS */
1384 elf_fpregset_t fpu
; /* NT_PRFPREG */
1385 struct task_struct
*thread
;
1386 #ifdef ELF_CORE_COPY_XFPREGS
1387 elf_fpxregset_t xfpu
; /* NT_PRXFPREG */
1389 struct memelfnote notes
[3];
1394 * In order to add the specific thread information for the elf file format,
1395 * we need to keep a linked list of every threads pr_status and then create
1396 * a single section for them in the final core file.
1398 static int elf_dump_thread_status(long signr
, struct elf_thread_status
*t
)
1401 struct task_struct
*p
= t
->thread
;
1404 fill_prstatus(&t
->prstatus
, p
, signr
);
1405 elf_core_copy_task_regs(p
, &t
->prstatus
.pr_reg
);
1407 fill_note(&t
->notes
[0], "CORE", NT_PRSTATUS
, sizeof(t
->prstatus
),
1410 sz
+= notesize(&t
->notes
[0]);
1412 if ((t
->prstatus
.pr_fpvalid
= elf_core_copy_task_fpregs(p
, NULL
,
1414 fill_note(&t
->notes
[1], "CORE", NT_PRFPREG
, sizeof(t
->fpu
),
1417 sz
+= notesize(&t
->notes
[1]);
1420 #ifdef ELF_CORE_COPY_XFPREGS
1421 if (elf_core_copy_task_xfpregs(p
, &t
->xfpu
)) {
1422 fill_note(&t
->notes
[2], "LINUX", NT_PRXFPREG
, sizeof(t
->xfpu
),
1425 sz
+= notesize(&t
->notes
[2]);
1434 * This is a two-pass process; first we find the offsets of the bits,
1435 * and then they are actually written out. If we run out of core limit
1438 static int elf_core_dump(long signr
, struct pt_regs
*regs
, struct file
*file
)
1446 struct vm_area_struct
*vma
;
1447 struct elfhdr
*elf
= NULL
;
1448 loff_t offset
= 0, dataoff
, foffset
;
1449 unsigned long limit
= current
->signal
->rlim
[RLIMIT_CORE
].rlim_cur
;
1451 struct memelfnote
*notes
= NULL
;
1452 struct elf_prstatus
*prstatus
= NULL
; /* NT_PRSTATUS */
1453 struct elf_prpsinfo
*psinfo
= NULL
; /* NT_PRPSINFO */
1454 struct task_struct
*g
, *p
;
1455 LIST_HEAD(thread_list
);
1456 struct list_head
*t
;
1457 elf_fpregset_t
*fpu
= NULL
;
1458 #ifdef ELF_CORE_COPY_XFPREGS
1459 elf_fpxregset_t
*xfpu
= NULL
;
1461 int thread_status_size
= 0;
1465 * We no longer stop all VM operations.
1467 * This is because those proceses that could possibly change map_count
1468 * or the mmap / vma pages are now blocked in do_exit on current
1469 * finishing this core dump.
1471 * Only ptrace can touch these memory addresses, but it doesn't change
1472 * the map_count or the pages allocated. So no possibility of crashing
1473 * exists while dumping the mm->vm_next areas to the core file.
1476 /* alloc memory for large data structures: too large to be on stack */
1477 elf
= kmalloc(sizeof(*elf
), GFP_KERNEL
);
1480 prstatus
= kmalloc(sizeof(*prstatus
), GFP_KERNEL
);
1483 psinfo
= kmalloc(sizeof(*psinfo
), GFP_KERNEL
);
1486 notes
= kmalloc(NUM_NOTES
* sizeof(struct memelfnote
), GFP_KERNEL
);
1489 fpu
= kmalloc(sizeof(*fpu
), GFP_KERNEL
);
1492 #ifdef ELF_CORE_COPY_XFPREGS
1493 xfpu
= kmalloc(sizeof(*xfpu
), GFP_KERNEL
);
1499 struct elf_thread_status
*tmp
;
1502 if (current
->mm
== p
->mm
&& current
!= p
) {
1503 tmp
= kzalloc(sizeof(*tmp
), GFP_ATOMIC
);
1509 list_add(&tmp
->list
, &thread_list
);
1511 while_each_thread(g
,p
);
1513 list_for_each(t
, &thread_list
) {
1514 struct elf_thread_status
*tmp
;
1517 tmp
= list_entry(t
, struct elf_thread_status
, list
);
1518 sz
= elf_dump_thread_status(signr
, tmp
);
1519 thread_status_size
+= sz
;
1522 /* now collect the dump for the current */
1523 memset(prstatus
, 0, sizeof(*prstatus
));
1524 fill_prstatus(prstatus
, current
, signr
);
1525 elf_core_copy_regs(&prstatus
->pr_reg
, regs
);
1527 segs
= current
->mm
->map_count
;
1528 #ifdef ELF_CORE_EXTRA_PHDRS
1529 segs
+= ELF_CORE_EXTRA_PHDRS
;
1533 fill_elf_header(elf
, segs
+ 1); /* including notes section */
1536 current
->flags
|= PF_DUMPCORE
;
1539 * Set up the notes in similar form to SVR4 core dumps made
1540 * with info from their /proc.
1543 fill_note(notes
+ 0, "CORE", NT_PRSTATUS
, sizeof(*prstatus
), prstatus
);
1544 fill_psinfo(psinfo
, current
->group_leader
, current
->mm
);
1545 fill_note(notes
+ 1, "CORE", NT_PRPSINFO
, sizeof(*psinfo
), psinfo
);
1549 auxv
= (elf_addr_t
*)current
->mm
->saved_auxv
;
1554 while (auxv
[i
- 2] != AT_NULL
);
1555 fill_note(¬es
[numnote
++], "CORE", NT_AUXV
,
1556 i
* sizeof(elf_addr_t
), auxv
);
1558 /* Try to dump the FPU. */
1559 if ((prstatus
->pr_fpvalid
=
1560 elf_core_copy_task_fpregs(current
, regs
, fpu
)))
1561 fill_note(notes
+ numnote
++,
1562 "CORE", NT_PRFPREG
, sizeof(*fpu
), fpu
);
1563 #ifdef ELF_CORE_COPY_XFPREGS
1564 if (elf_core_copy_task_xfpregs(current
, xfpu
))
1565 fill_note(notes
+ numnote
++,
1566 "LINUX", NT_PRXFPREG
, sizeof(*xfpu
), xfpu
);
1572 DUMP_WRITE(elf
, sizeof(*elf
));
1573 offset
+= sizeof(*elf
); /* Elf header */
1574 offset
+= (segs
+ 1) * sizeof(struct elf_phdr
); /* Program headers */
1577 /* Write notes phdr entry */
1579 struct elf_phdr phdr
;
1582 for (i
= 0; i
< numnote
; i
++)
1583 sz
+= notesize(notes
+ i
);
1585 sz
+= thread_status_size
;
1587 #ifdef ELF_CORE_WRITE_EXTRA_NOTES
1588 sz
+= ELF_CORE_EXTRA_NOTES_SIZE
;
1591 fill_elf_note_phdr(&phdr
, sz
, offset
);
1593 DUMP_WRITE(&phdr
, sizeof(phdr
));
1596 dataoff
= offset
= roundup(offset
, ELF_EXEC_PAGESIZE
);
1598 /* Write program headers for segments dump */
1599 for (vma
= current
->mm
->mmap
; vma
!= NULL
; vma
= vma
->vm_next
) {
1600 struct elf_phdr phdr
;
1603 sz
= vma
->vm_end
- vma
->vm_start
;
1605 phdr
.p_type
= PT_LOAD
;
1606 phdr
.p_offset
= offset
;
1607 phdr
.p_vaddr
= vma
->vm_start
;
1609 phdr
.p_filesz
= maydump(vma
) ? sz
: 0;
1611 offset
+= phdr
.p_filesz
;
1612 phdr
.p_flags
= vma
->vm_flags
& VM_READ
? PF_R
: 0;
1613 if (vma
->vm_flags
& VM_WRITE
)
1614 phdr
.p_flags
|= PF_W
;
1615 if (vma
->vm_flags
& VM_EXEC
)
1616 phdr
.p_flags
|= PF_X
;
1617 phdr
.p_align
= ELF_EXEC_PAGESIZE
;
1619 DUMP_WRITE(&phdr
, sizeof(phdr
));
1622 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1623 ELF_CORE_WRITE_EXTRA_PHDRS
;
1626 /* write out the notes section */
1627 for (i
= 0; i
< numnote
; i
++)
1628 if (!writenote(notes
+ i
, file
, &foffset
))
1631 #ifdef ELF_CORE_WRITE_EXTRA_NOTES
1632 ELF_CORE_WRITE_EXTRA_NOTES
;
1635 /* write out the thread status notes section */
1636 list_for_each(t
, &thread_list
) {
1637 struct elf_thread_status
*tmp
=
1638 list_entry(t
, struct elf_thread_status
, list
);
1640 for (i
= 0; i
< tmp
->num_notes
; i
++)
1641 if (!writenote(&tmp
->notes
[i
], file
, &foffset
))
1646 DUMP_SEEK(dataoff
- foffset
);
1648 for (vma
= current
->mm
->mmap
; vma
!= NULL
; vma
= vma
->vm_next
) {
1654 for (addr
= vma
->vm_start
;
1656 addr
+= PAGE_SIZE
) {
1658 struct vm_area_struct
*vma
;
1660 if (get_user_pages(current
, current
->mm
, addr
, 1, 0, 1,
1661 &page
, &vma
) <= 0) {
1662 DUMP_SEEK(PAGE_SIZE
);
1664 if (page
== ZERO_PAGE(addr
)) {
1665 DUMP_SEEK(PAGE_SIZE
);
1668 flush_cache_page(vma
, addr
,
1671 if ((size
+= PAGE_SIZE
) > limit
||
1672 !dump_write(file
, kaddr
,
1675 page_cache_release(page
);
1680 page_cache_release(page
);
1685 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1686 ELF_CORE_WRITE_EXTRA_DATA
;
1693 while (!list_empty(&thread_list
)) {
1694 struct list_head
*tmp
= thread_list
.next
;
1696 kfree(list_entry(tmp
, struct elf_thread_status
, list
));
1704 #ifdef ELF_CORE_COPY_XFPREGS
1711 #endif /* USE_ELF_CORE_DUMP */
1713 static int __init
init_elf_binfmt(void)
1715 return register_binfmt(&elf_format
);
1718 static void __exit
exit_elf_binfmt(void)
1720 /* Remove the COFF and ELF loaders. */
1721 unregister_binfmt(&elf_format
);
1724 core_initcall(init_elf_binfmt
);
1725 module_exit(exit_elf_binfmt
);
1726 MODULE_LICENSE("GPL");