2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/smp_lock.h>
35 #include <linux/compiler.h>
36 #include <linux/highmem.h>
37 #include <linux/pagemap.h>
38 #include <linux/security.h>
39 #include <linux/syscalls.h>
40 #include <linux/random.h>
41 #include <linux/elf.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
46 static int load_elf_binary(struct linux_binprm
*bprm
, struct pt_regs
*regs
);
47 static int load_elf_library(struct file
*);
48 static unsigned long elf_map (struct file
*, unsigned long, struct elf_phdr
*, int, int);
51 * If we don't support core dumping, then supply a NULL so we
54 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
55 static int elf_core_dump(long signr
, struct pt_regs
*regs
, struct file
*file
);
57 #define elf_core_dump NULL
60 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
61 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
63 #define ELF_MIN_ALIGN PAGE_SIZE
66 #ifndef ELF_CORE_EFLAGS
67 #define ELF_CORE_EFLAGS 0
70 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
71 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
72 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
74 static struct linux_binfmt elf_format
= {
75 .module
= THIS_MODULE
,
76 .load_binary
= load_elf_binary
,
77 .load_shlib
= load_elf_library
,
78 .core_dump
= elf_core_dump
,
79 .min_coredump
= ELF_EXEC_PAGESIZE
,
83 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
85 static int set_brk(unsigned long start
, unsigned long end
)
87 start
= ELF_PAGEALIGN(start
);
88 end
= ELF_PAGEALIGN(end
);
91 down_write(¤t
->mm
->mmap_sem
);
92 addr
= do_brk(start
, end
- start
);
93 up_write(¤t
->mm
->mmap_sem
);
97 current
->mm
->start_brk
= current
->mm
->brk
= end
;
101 /* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
106 static int padzero(unsigned long elf_bss
)
110 nbyte
= ELF_PAGEOFFSET(elf_bss
);
112 nbyte
= ELF_MIN_ALIGN
- nbyte
;
113 if (clear_user((void __user
*) elf_bss
, nbyte
))
119 /* Let's use some macros to make this stack manipulation a litle clearer */
120 #ifdef CONFIG_STACK_GROWSUP
121 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
122 #define STACK_ROUND(sp, items) \
123 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
124 #define STACK_ALLOC(sp, len) ({ \
125 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
128 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
129 #define STACK_ROUND(sp, items) \
130 (((unsigned long) (sp - items)) &~ 15UL)
131 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
135 create_elf_tables(struct linux_binprm
*bprm
, struct elfhdr
*exec
,
136 int interp_aout
, unsigned long load_addr
,
137 unsigned long interp_load_addr
)
139 unsigned long p
= bprm
->p
;
140 int argc
= bprm
->argc
;
141 int envc
= bprm
->envc
;
142 elf_addr_t __user
*argv
;
143 elf_addr_t __user
*envp
;
144 elf_addr_t __user
*sp
;
145 elf_addr_t __user
*u_platform
;
146 const char *k_platform
= ELF_PLATFORM
;
148 elf_addr_t
*elf_info
;
150 struct task_struct
*tsk
= current
;
153 * If this architecture has a platform capability string, copy it
154 * to userspace. In some cases (Sparc), this info is impossible
155 * for userspace to get any other way, in others (i386) it is
160 size_t len
= strlen(k_platform
) + 1;
163 * In some cases (e.g. Hyper-Threading), we want to avoid L1
164 * evictions by the processes running on the same package. One
165 * thing we can do is to shuffle the initial stack for them.
168 p
= arch_align_stack(p
);
170 u_platform
= (elf_addr_t __user
*)STACK_ALLOC(p
, len
);
171 if (__copy_to_user(u_platform
, k_platform
, len
))
175 /* Create the ELF interpreter info */
176 elf_info
= (elf_addr_t
*)current
->mm
->saved_auxv
;
177 #define NEW_AUX_ENT(id, val) \
179 elf_info[ei_index++] = id; \
180 elf_info[ei_index++] = val; \
185 * ARCH_DLINFO must come first so PPC can do its special alignment of
190 NEW_AUX_ENT(AT_HWCAP
, ELF_HWCAP
);
191 NEW_AUX_ENT(AT_PAGESZ
, ELF_EXEC_PAGESIZE
);
192 NEW_AUX_ENT(AT_CLKTCK
, CLOCKS_PER_SEC
);
193 NEW_AUX_ENT(AT_PHDR
, load_addr
+ exec
->e_phoff
);
194 NEW_AUX_ENT(AT_PHENT
, sizeof(struct elf_phdr
));
195 NEW_AUX_ENT(AT_PHNUM
, exec
->e_phnum
);
196 NEW_AUX_ENT(AT_BASE
, interp_load_addr
);
197 NEW_AUX_ENT(AT_FLAGS
, 0);
198 NEW_AUX_ENT(AT_ENTRY
, exec
->e_entry
);
199 NEW_AUX_ENT(AT_UID
, tsk
->uid
);
200 NEW_AUX_ENT(AT_EUID
, tsk
->euid
);
201 NEW_AUX_ENT(AT_GID
, tsk
->gid
);
202 NEW_AUX_ENT(AT_EGID
, tsk
->egid
);
203 NEW_AUX_ENT(AT_SECURE
, security_bprm_secureexec(bprm
));
205 NEW_AUX_ENT(AT_PLATFORM
,
206 (elf_addr_t
)(unsigned long)u_platform
);
208 if (bprm
->interp_flags
& BINPRM_FLAGS_EXECFD
) {
209 NEW_AUX_ENT(AT_EXECFD
, bprm
->interp_data
);
212 /* AT_NULL is zero; clear the rest too */
213 memset(&elf_info
[ei_index
], 0,
214 sizeof current
->mm
->saved_auxv
- ei_index
* sizeof elf_info
[0]);
216 /* And advance past the AT_NULL entry. */
219 sp
= STACK_ADD(p
, ei_index
);
221 items
= (argc
+ 1) + (envc
+ 1);
223 items
+= 3; /* a.out interpreters require argv & envp too */
225 items
+= 1; /* ELF interpreters only put argc on the stack */
227 bprm
->p
= STACK_ROUND(sp
, items
);
229 /* Point sp at the lowest address on the stack */
230 #ifdef CONFIG_STACK_GROWSUP
231 sp
= (elf_addr_t __user
*)bprm
->p
- items
- ei_index
;
232 bprm
->exec
= (unsigned long)sp
; /* XXX: PARISC HACK */
234 sp
= (elf_addr_t __user
*)bprm
->p
;
237 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
238 if (__put_user(argc
, sp
++))
242 envp
= argv
+ argc
+ 1;
243 if (__put_user((elf_addr_t
)(unsigned long)argv
, sp
++) ||
244 __put_user((elf_addr_t
)(unsigned long)envp
, sp
++))
248 envp
= argv
+ argc
+ 1;
251 /* Populate argv and envp */
252 p
= current
->mm
->arg_end
= current
->mm
->arg_start
;
255 if (__put_user((elf_addr_t
)p
, argv
++))
257 len
= strnlen_user((void __user
*)p
, PAGE_SIZE
*MAX_ARG_PAGES
);
258 if (!len
|| len
> PAGE_SIZE
*MAX_ARG_PAGES
)
262 if (__put_user(0, argv
))
264 current
->mm
->arg_end
= current
->mm
->env_start
= p
;
267 if (__put_user((elf_addr_t
)p
, envp
++))
269 len
= strnlen_user((void __user
*)p
, PAGE_SIZE
*MAX_ARG_PAGES
);
270 if (!len
|| len
> PAGE_SIZE
*MAX_ARG_PAGES
)
274 if (__put_user(0, envp
))
276 current
->mm
->env_end
= p
;
278 /* Put the elf_info on the stack in the right place. */
279 sp
= (elf_addr_t __user
*)envp
+ 1;
280 if (copy_to_user(sp
, elf_info
, ei_index
* sizeof(elf_addr_t
)))
287 static unsigned long elf_map(struct file
*filep
, unsigned long addr
,
288 struct elf_phdr
*eppnt
, int prot
, int type
)
290 unsigned long map_addr
;
291 unsigned long pageoffset
= ELF_PAGEOFFSET(eppnt
->p_vaddr
);
293 down_write(¤t
->mm
->mmap_sem
);
294 /* mmap() will return -EINVAL if given a zero size, but a
295 * segment with zero filesize is perfectly valid */
296 if (eppnt
->p_filesz
+ pageoffset
)
297 map_addr
= do_mmap(filep
, ELF_PAGESTART(addr
),
298 eppnt
->p_filesz
+ pageoffset
, prot
, type
,
299 eppnt
->p_offset
- pageoffset
);
301 map_addr
= ELF_PAGESTART(addr
);
302 up_write(¤t
->mm
->mmap_sem
);
306 #endif /* !elf_map */
308 /* This is much more generalized than the library routine read function,
309 so we keep this separate. Technically the library read function
310 is only provided so that we can read a.out libraries that have
313 static unsigned long load_elf_interp(struct elfhdr
*interp_elf_ex
,
314 struct file
*interpreter
, unsigned long *interp_load_addr
)
316 struct elf_phdr
*elf_phdata
;
317 struct elf_phdr
*eppnt
;
318 unsigned long load_addr
= 0;
319 int load_addr_set
= 0;
320 unsigned long last_bss
= 0, elf_bss
= 0;
321 unsigned long error
= ~0UL;
324 /* First of all, some simple consistency checks */
325 if (interp_elf_ex
->e_type
!= ET_EXEC
&&
326 interp_elf_ex
->e_type
!= ET_DYN
)
328 if (!elf_check_arch(interp_elf_ex
))
330 if (!interpreter
->f_op
|| !interpreter
->f_op
->mmap
)
334 * If the size of this structure has changed, then punt, since
335 * we will be doing the wrong thing.
337 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
))
339 if (interp_elf_ex
->e_phnum
< 1 ||
340 interp_elf_ex
->e_phnum
> 65536U / sizeof(struct elf_phdr
))
343 /* Now read in all of the header information */
344 size
= sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
;
345 if (size
> ELF_MIN_ALIGN
)
347 elf_phdata
= kmalloc(size
, GFP_KERNEL
);
351 retval
= kernel_read(interpreter
, interp_elf_ex
->e_phoff
,
352 (char *)elf_phdata
,size
);
354 if (retval
!= size
) {
361 for (i
= 0; i
< interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
362 if (eppnt
->p_type
== PT_LOAD
) {
363 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
365 unsigned long vaddr
= 0;
366 unsigned long k
, map_addr
;
368 if (eppnt
->p_flags
& PF_R
)
369 elf_prot
= PROT_READ
;
370 if (eppnt
->p_flags
& PF_W
)
371 elf_prot
|= PROT_WRITE
;
372 if (eppnt
->p_flags
& PF_X
)
373 elf_prot
|= PROT_EXEC
;
374 vaddr
= eppnt
->p_vaddr
;
375 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
)
376 elf_type
|= MAP_FIXED
;
378 map_addr
= elf_map(interpreter
, load_addr
+ vaddr
,
379 eppnt
, elf_prot
, elf_type
);
381 if (BAD_ADDR(map_addr
))
384 if (!load_addr_set
&&
385 interp_elf_ex
->e_type
== ET_DYN
) {
386 load_addr
= map_addr
- ELF_PAGESTART(vaddr
);
391 * Check to see if the section's size will overflow the
392 * allowed task size. Note that p_filesz must always be
393 * <= p_memsize so it's only necessary to check p_memsz.
395 k
= load_addr
+ eppnt
->p_vaddr
;
397 eppnt
->p_filesz
> eppnt
->p_memsz
||
398 eppnt
->p_memsz
> TASK_SIZE
||
399 TASK_SIZE
- eppnt
->p_memsz
< k
) {
405 * Find the end of the file mapping for this phdr, and
406 * keep track of the largest address we see for this.
408 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
413 * Do the same thing for the memory mapping - between
414 * elf_bss and last_bss is the bss section.
416 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
423 * Now fill out the bss section. First pad the last page up
424 * to the page boundary, and then perform a mmap to make sure
425 * that there are zero-mapped pages up to and including the
428 if (padzero(elf_bss
)) {
433 /* What we have mapped so far */
434 elf_bss
= ELF_PAGESTART(elf_bss
+ ELF_MIN_ALIGN
- 1);
436 /* Map the last of the bss segment */
437 if (last_bss
> elf_bss
) {
438 down_write(¤t
->mm
->mmap_sem
);
439 error
= do_brk(elf_bss
, last_bss
- elf_bss
);
440 up_write(¤t
->mm
->mmap_sem
);
445 *interp_load_addr
= load_addr
;
446 error
= ((unsigned long)interp_elf_ex
->e_entry
) + load_addr
;
454 static unsigned long load_aout_interp(struct exec
*interp_ex
,
455 struct file
*interpreter
)
457 unsigned long text_data
, elf_entry
= ~0UL;
461 current
->mm
->end_code
= interp_ex
->a_text
;
462 text_data
= interp_ex
->a_text
+ interp_ex
->a_data
;
463 current
->mm
->end_data
= text_data
;
464 current
->mm
->brk
= interp_ex
->a_bss
+ text_data
;
466 switch (N_MAGIC(*interp_ex
)) {
469 addr
= (char __user
*)0;
473 offset
= N_TXTOFF(*interp_ex
);
474 addr
= (char __user
*)N_TXTADDR(*interp_ex
);
480 down_write(¤t
->mm
->mmap_sem
);
481 do_brk(0, text_data
);
482 up_write(¤t
->mm
->mmap_sem
);
483 if (!interpreter
->f_op
|| !interpreter
->f_op
->read
)
485 if (interpreter
->f_op
->read(interpreter
, addr
, text_data
, &offset
) < 0)
487 flush_icache_range((unsigned long)addr
,
488 (unsigned long)addr
+ text_data
);
490 down_write(¤t
->mm
->mmap_sem
);
491 do_brk(ELF_PAGESTART(text_data
+ ELF_MIN_ALIGN
- 1),
493 up_write(¤t
->mm
->mmap_sem
);
494 elf_entry
= interp_ex
->a_entry
;
501 * These are the functions used to load ELF style executables and shared
502 * libraries. There is no binary dependent code anywhere else.
505 #define INTERPRETER_NONE 0
506 #define INTERPRETER_AOUT 1
507 #define INTERPRETER_ELF 2
509 #ifndef STACK_RND_MASK
510 #define STACK_RND_MASK 0x7ff /* with 4K pages 8MB of VA */
513 static unsigned long randomize_stack_top(unsigned long stack_top
)
515 unsigned int random_variable
= 0;
517 if ((current
->flags
& PF_RANDOMIZE
) &&
518 !(current
->personality
& ADDR_NO_RANDOMIZE
)) {
519 random_variable
= get_random_int() & STACK_RND_MASK
;
520 random_variable
<<= PAGE_SHIFT
;
522 #ifdef CONFIG_STACK_GROWSUP
523 return PAGE_ALIGN(stack_top
) + random_variable
;
525 return PAGE_ALIGN(stack_top
) - random_variable
;
529 static int load_elf_binary(struct linux_binprm
*bprm
, struct pt_regs
*regs
)
531 struct file
*interpreter
= NULL
; /* to shut gcc up */
532 unsigned long load_addr
= 0, load_bias
= 0;
533 int load_addr_set
= 0;
534 char * elf_interpreter
= NULL
;
535 unsigned int interpreter_type
= INTERPRETER_NONE
;
536 unsigned char ibcs2_interpreter
= 0;
538 struct elf_phdr
*elf_ppnt
, *elf_phdata
;
539 unsigned long elf_bss
, elf_brk
;
543 unsigned long elf_entry
, interp_load_addr
= 0;
544 unsigned long start_code
, end_code
, start_data
, end_data
;
545 unsigned long reloc_func_desc
= 0;
546 char passed_fileno
[6];
547 struct files_struct
*files
;
548 int executable_stack
= EXSTACK_DEFAULT
;
549 unsigned long def_flags
= 0;
551 struct elfhdr elf_ex
;
552 struct elfhdr interp_elf_ex
;
553 struct exec interp_ex
;
556 loc
= kmalloc(sizeof(*loc
), GFP_KERNEL
);
562 /* Get the exec-header */
563 loc
->elf_ex
= *((struct elfhdr
*)bprm
->buf
);
566 /* First of all, some simple consistency checks */
567 if (memcmp(loc
->elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
570 if (loc
->elf_ex
.e_type
!= ET_EXEC
&& loc
->elf_ex
.e_type
!= ET_DYN
)
572 if (!elf_check_arch(&loc
->elf_ex
))
574 if (!bprm
->file
->f_op
||!bprm
->file
->f_op
->mmap
)
577 /* Now read in all of the header information */
578 if (loc
->elf_ex
.e_phentsize
!= sizeof(struct elf_phdr
))
580 if (loc
->elf_ex
.e_phnum
< 1 ||
581 loc
->elf_ex
.e_phnum
> 65536U / sizeof(struct elf_phdr
))
583 size
= loc
->elf_ex
.e_phnum
* sizeof(struct elf_phdr
);
585 elf_phdata
= kmalloc(size
, GFP_KERNEL
);
589 retval
= kernel_read(bprm
->file
, loc
->elf_ex
.e_phoff
,
590 (char *)elf_phdata
, size
);
591 if (retval
!= size
) {
597 files
= current
->files
; /* Refcounted so ok */
598 retval
= unshare_files();
601 if (files
== current
->files
) {
602 put_files_struct(files
);
606 /* exec will make our files private anyway, but for the a.out
607 loader stuff we need to do it earlier */
608 retval
= get_unused_fd();
611 get_file(bprm
->file
);
612 fd_install(elf_exec_fileno
= retval
, bprm
->file
);
614 elf_ppnt
= elf_phdata
;
623 for (i
= 0; i
< loc
->elf_ex
.e_phnum
; i
++) {
624 if (elf_ppnt
->p_type
== PT_INTERP
) {
625 /* This is the program interpreter used for
626 * shared libraries - for now assume that this
627 * is an a.out format binary
630 if (elf_ppnt
->p_filesz
> PATH_MAX
||
631 elf_ppnt
->p_filesz
< 2)
635 elf_interpreter
= kmalloc(elf_ppnt
->p_filesz
,
637 if (!elf_interpreter
)
640 retval
= kernel_read(bprm
->file
, elf_ppnt
->p_offset
,
643 if (retval
!= elf_ppnt
->p_filesz
) {
646 goto out_free_interp
;
648 /* make sure path is NULL terminated */
650 if (elf_interpreter
[elf_ppnt
->p_filesz
- 1] != '\0')
651 goto out_free_interp
;
653 /* If the program interpreter is one of these two,
654 * then assume an iBCS2 image. Otherwise assume
655 * a native linux image.
657 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
658 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0)
659 ibcs2_interpreter
= 1;
662 * The early SET_PERSONALITY here is so that the lookup
663 * for the interpreter happens in the namespace of the
664 * to-be-execed image. SET_PERSONALITY can select an
667 * However, SET_PERSONALITY is NOT allowed to switch
668 * this task into the new images's memory mapping
669 * policy - that is, TASK_SIZE must still evaluate to
670 * that which is appropriate to the execing application.
671 * This is because exit_mmap() needs to have TASK_SIZE
672 * evaluate to the size of the old image.
674 * So if (say) a 64-bit application is execing a 32-bit
675 * application it is the architecture's responsibility
676 * to defer changing the value of TASK_SIZE until the
677 * switch really is going to happen - do this in
678 * flush_thread(). - akpm
680 SET_PERSONALITY(loc
->elf_ex
, ibcs2_interpreter
);
682 interpreter
= open_exec(elf_interpreter
);
683 retval
= PTR_ERR(interpreter
);
684 if (IS_ERR(interpreter
))
685 goto out_free_interp
;
688 * If the binary is not readable then enforce
689 * mm->dumpable = 0 regardless of the interpreter's
692 if (file_permission(interpreter
, MAY_READ
) < 0)
693 bprm
->interp_flags
|= BINPRM_FLAGS_ENFORCE_NONDUMP
;
695 retval
= kernel_read(interpreter
, 0, bprm
->buf
,
697 if (retval
!= BINPRM_BUF_SIZE
) {
700 goto out_free_dentry
;
703 /* Get the exec headers */
704 loc
->interp_ex
= *((struct exec
*)bprm
->buf
);
705 loc
->interp_elf_ex
= *((struct elfhdr
*)bprm
->buf
);
711 elf_ppnt
= elf_phdata
;
712 for (i
= 0; i
< loc
->elf_ex
.e_phnum
; i
++, elf_ppnt
++)
713 if (elf_ppnt
->p_type
== PT_GNU_STACK
) {
714 if (elf_ppnt
->p_flags
& PF_X
)
715 executable_stack
= EXSTACK_ENABLE_X
;
717 executable_stack
= EXSTACK_DISABLE_X
;
721 /* Some simple consistency checks for the interpreter */
722 if (elf_interpreter
) {
723 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
725 /* Now figure out which format our binary is */
726 if ((N_MAGIC(loc
->interp_ex
) != OMAGIC
) &&
727 (N_MAGIC(loc
->interp_ex
) != ZMAGIC
) &&
728 (N_MAGIC(loc
->interp_ex
) != QMAGIC
))
729 interpreter_type
= INTERPRETER_ELF
;
731 if (memcmp(loc
->interp_elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
732 interpreter_type
&= ~INTERPRETER_ELF
;
735 if (!interpreter_type
)
736 goto out_free_dentry
;
738 /* Make sure only one type was selected */
739 if ((interpreter_type
& INTERPRETER_ELF
) &&
740 interpreter_type
!= INTERPRETER_ELF
) {
741 // FIXME - ratelimit this before re-enabling
742 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
743 interpreter_type
= INTERPRETER_ELF
;
745 /* Verify the interpreter has a valid arch */
746 if ((interpreter_type
== INTERPRETER_ELF
) &&
747 !elf_check_arch(&loc
->interp_elf_ex
))
748 goto out_free_dentry
;
750 /* Executables without an interpreter also need a personality */
751 SET_PERSONALITY(loc
->elf_ex
, ibcs2_interpreter
);
754 /* OK, we are done with that, now set up the arg stuff,
755 and then start this sucker up */
756 if ((!bprm
->sh_bang
) && (interpreter_type
== INTERPRETER_AOUT
)) {
757 char *passed_p
= passed_fileno
;
758 sprintf(passed_fileno
, "%d", elf_exec_fileno
);
760 if (elf_interpreter
) {
761 retval
= copy_strings_kernel(1, &passed_p
, bprm
);
763 goto out_free_dentry
;
768 /* Flush all traces of the currently running executable */
769 retval
= flush_old_exec(bprm
);
771 goto out_free_dentry
;
773 /* Discard our unneeded old files struct */
775 put_files_struct(files
);
779 /* OK, This is the point of no return */
780 current
->mm
->start_data
= 0;
781 current
->mm
->end_data
= 0;
782 current
->mm
->end_code
= 0;
783 current
->mm
->mmap
= NULL
;
784 current
->flags
&= ~PF_FORKNOEXEC
;
785 current
->mm
->def_flags
= def_flags
;
787 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
788 may depend on the personality. */
789 SET_PERSONALITY(loc
->elf_ex
, ibcs2_interpreter
);
790 if (elf_read_implies_exec(loc
->elf_ex
, executable_stack
))
791 current
->personality
|= READ_IMPLIES_EXEC
;
793 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
794 current
->flags
|= PF_RANDOMIZE
;
795 arch_pick_mmap_layout(current
->mm
);
797 /* Do this so that we can load the interpreter, if need be. We will
798 change some of these later */
799 current
->mm
->free_area_cache
= current
->mm
->mmap_base
;
800 current
->mm
->cached_hole_size
= 0;
801 retval
= setup_arg_pages(bprm
, randomize_stack_top(STACK_TOP
),
804 send_sig(SIGKILL
, current
, 0);
805 goto out_free_dentry
;
808 current
->mm
->start_stack
= bprm
->p
;
810 /* Now we do a little grungy work by mmaping the ELF image into
811 the correct location in memory. At this point, we assume that
812 the image should be loaded at fixed address, not at a variable
814 for(i
= 0, elf_ppnt
= elf_phdata
;
815 i
< loc
->elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
816 int elf_prot
= 0, elf_flags
;
817 unsigned long k
, vaddr
;
819 if (elf_ppnt
->p_type
!= PT_LOAD
)
822 if (unlikely (elf_brk
> elf_bss
)) {
825 /* There was a PT_LOAD segment with p_memsz > p_filesz
826 before this one. Map anonymous pages, if needed,
827 and clear the area. */
828 retval
= set_brk (elf_bss
+ load_bias
,
829 elf_brk
+ load_bias
);
831 send_sig(SIGKILL
, current
, 0);
832 goto out_free_dentry
;
834 nbyte
= ELF_PAGEOFFSET(elf_bss
);
836 nbyte
= ELF_MIN_ALIGN
- nbyte
;
837 if (nbyte
> elf_brk
- elf_bss
)
838 nbyte
= elf_brk
- elf_bss
;
839 if (clear_user((void __user
*)elf_bss
+
842 * This bss-zeroing can fail if the ELF
843 * file specifies odd protections. So
844 * we don't check the return value
850 if (elf_ppnt
->p_flags
& PF_R
)
851 elf_prot
|= PROT_READ
;
852 if (elf_ppnt
->p_flags
& PF_W
)
853 elf_prot
|= PROT_WRITE
;
854 if (elf_ppnt
->p_flags
& PF_X
)
855 elf_prot
|= PROT_EXEC
;
857 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
| MAP_EXECUTABLE
;
859 vaddr
= elf_ppnt
->p_vaddr
;
860 if (loc
->elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
861 elf_flags
|= MAP_FIXED
;
862 } else if (loc
->elf_ex
.e_type
== ET_DYN
) {
863 /* Try and get dynamic programs out of the way of the
864 * default mmap base, as well as whatever program they
865 * might try to exec. This is because the brk will
866 * follow the loader, and is not movable. */
867 load_bias
= ELF_PAGESTART(ELF_ET_DYN_BASE
- vaddr
);
870 error
= elf_map(bprm
->file
, load_bias
+ vaddr
, elf_ppnt
,
871 elf_prot
, elf_flags
);
872 if (BAD_ADDR(error
)) {
873 send_sig(SIGKILL
, current
, 0);
874 goto out_free_dentry
;
877 if (!load_addr_set
) {
879 load_addr
= (elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
);
880 if (loc
->elf_ex
.e_type
== ET_DYN
) {
882 ELF_PAGESTART(load_bias
+ vaddr
);
883 load_addr
+= load_bias
;
884 reloc_func_desc
= load_bias
;
887 k
= elf_ppnt
->p_vaddr
;
894 * Check to see if the section's size will overflow the
895 * allowed task size. Note that p_filesz must always be
896 * <= p_memsz so it is only necessary to check p_memsz.
898 if (BAD_ADDR(k
) || elf_ppnt
->p_filesz
> elf_ppnt
->p_memsz
||
899 elf_ppnt
->p_memsz
> TASK_SIZE
||
900 TASK_SIZE
- elf_ppnt
->p_memsz
< k
) {
901 /* set_brk can never work. Avoid overflows. */
902 send_sig(SIGKILL
, current
, 0);
903 goto out_free_dentry
;
906 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
910 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
914 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
919 loc
->elf_ex
.e_entry
+= load_bias
;
920 elf_bss
+= load_bias
;
921 elf_brk
+= load_bias
;
922 start_code
+= load_bias
;
923 end_code
+= load_bias
;
924 start_data
+= load_bias
;
925 end_data
+= load_bias
;
927 /* Calling set_brk effectively mmaps the pages that we need
928 * for the bss and break sections. We must do this before
929 * mapping in the interpreter, to make sure it doesn't wind
930 * up getting placed where the bss needs to go.
932 retval
= set_brk(elf_bss
, elf_brk
);
934 send_sig(SIGKILL
, current
, 0);
935 goto out_free_dentry
;
937 if (likely(elf_bss
!= elf_brk
) && unlikely(padzero(elf_bss
))) {
938 send_sig(SIGSEGV
, current
, 0);
939 retval
= -EFAULT
; /* Nobody gets to see this, but.. */
940 goto out_free_dentry
;
943 if (elf_interpreter
) {
944 if (interpreter_type
== INTERPRETER_AOUT
)
945 elf_entry
= load_aout_interp(&loc
->interp_ex
,
948 elf_entry
= load_elf_interp(&loc
->interp_elf_ex
,
951 if (BAD_ADDR(elf_entry
)) {
952 force_sig(SIGSEGV
, current
);
953 retval
= IS_ERR((void *)elf_entry
) ?
954 (int)elf_entry
: -EINVAL
;
955 goto out_free_dentry
;
957 reloc_func_desc
= interp_load_addr
;
959 allow_write_access(interpreter
);
961 kfree(elf_interpreter
);
963 elf_entry
= loc
->elf_ex
.e_entry
;
964 if (BAD_ADDR(elf_entry
)) {
965 force_sig(SIGSEGV
, current
);
967 goto out_free_dentry
;
973 if (interpreter_type
!= INTERPRETER_AOUT
)
974 sys_close(elf_exec_fileno
);
976 set_binfmt(&elf_format
);
978 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
979 retval
= arch_setup_additional_pages(bprm
, executable_stack
);
981 send_sig(SIGKILL
, current
, 0);
984 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
987 current
->flags
&= ~PF_FORKNOEXEC
;
988 create_elf_tables(bprm
, &loc
->elf_ex
,
989 (interpreter_type
== INTERPRETER_AOUT
),
990 load_addr
, interp_load_addr
);
991 /* N.B. passed_fileno might not be initialized? */
992 if (interpreter_type
== INTERPRETER_AOUT
)
993 current
->mm
->arg_start
+= strlen(passed_fileno
) + 1;
994 current
->mm
->end_code
= end_code
;
995 current
->mm
->start_code
= start_code
;
996 current
->mm
->start_data
= start_data
;
997 current
->mm
->end_data
= end_data
;
998 current
->mm
->start_stack
= bprm
->p
;
1000 if (current
->personality
& MMAP_PAGE_ZERO
) {
1001 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1002 and some applications "depend" upon this behavior.
1003 Since we do not have the power to recompile these, we
1004 emulate the SVr4 behavior. Sigh. */
1005 down_write(¤t
->mm
->mmap_sem
);
1006 error
= do_mmap(NULL
, 0, PAGE_SIZE
, PROT_READ
| PROT_EXEC
,
1007 MAP_FIXED
| MAP_PRIVATE
, 0);
1008 up_write(¤t
->mm
->mmap_sem
);
1011 #ifdef ELF_PLAT_INIT
1013 * The ABI may specify that certain registers be set up in special
1014 * ways (on i386 %edx is the address of a DT_FINI function, for
1015 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1016 * that the e_entry field is the address of the function descriptor
1017 * for the startup routine, rather than the address of the startup
1018 * routine itself. This macro performs whatever initialization to
1019 * the regs structure is required as well as any relocations to the
1020 * function descriptor entries when executing dynamically links apps.
1022 ELF_PLAT_INIT(regs
, reloc_func_desc
);
1025 start_thread(regs
, elf_entry
, bprm
->p
);
1026 if (unlikely(current
->ptrace
& PT_PTRACED
)) {
1027 if (current
->ptrace
& PT_TRACE_EXEC
)
1028 ptrace_notify ((PTRACE_EVENT_EXEC
<< 8) | SIGTRAP
);
1030 send_sig(SIGTRAP
, current
, 0);
1040 allow_write_access(interpreter
);
1044 kfree(elf_interpreter
);
1046 sys_close(elf_exec_fileno
);
1049 reset_files_struct(current
, files
);
1055 /* This is really simpleminded and specialized - we are loading an
1056 a.out library that is given an ELF header. */
1057 static int load_elf_library(struct file
*file
)
1059 struct elf_phdr
*elf_phdata
;
1060 struct elf_phdr
*eppnt
;
1061 unsigned long elf_bss
, bss
, len
;
1062 int retval
, error
, i
, j
;
1063 struct elfhdr elf_ex
;
1066 retval
= kernel_read(file
, 0, (char *)&elf_ex
, sizeof(elf_ex
));
1067 if (retval
!= sizeof(elf_ex
))
1070 if (memcmp(elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
1073 /* First of all, some simple consistency checks */
1074 if (elf_ex
.e_type
!= ET_EXEC
|| elf_ex
.e_phnum
> 2 ||
1075 !elf_check_arch(&elf_ex
) || !file
->f_op
|| !file
->f_op
->mmap
)
1078 /* Now read in all of the header information */
1080 j
= sizeof(struct elf_phdr
) * elf_ex
.e_phnum
;
1081 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1084 elf_phdata
= kmalloc(j
, GFP_KERNEL
);
1090 retval
= kernel_read(file
, elf_ex
.e_phoff
, (char *)eppnt
, j
);
1094 for (j
= 0, i
= 0; i
<elf_ex
.e_phnum
; i
++)
1095 if ((eppnt
+ i
)->p_type
== PT_LOAD
)
1100 while (eppnt
->p_type
!= PT_LOAD
)
1103 /* Now use mmap to map the library into memory. */
1104 down_write(¤t
->mm
->mmap_sem
);
1105 error
= do_mmap(file
,
1106 ELF_PAGESTART(eppnt
->p_vaddr
),
1108 ELF_PAGEOFFSET(eppnt
->p_vaddr
)),
1109 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
1110 MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
,
1112 ELF_PAGEOFFSET(eppnt
->p_vaddr
)));
1113 up_write(¤t
->mm
->mmap_sem
);
1114 if (error
!= ELF_PAGESTART(eppnt
->p_vaddr
))
1117 elf_bss
= eppnt
->p_vaddr
+ eppnt
->p_filesz
;
1118 if (padzero(elf_bss
)) {
1123 len
= ELF_PAGESTART(eppnt
->p_filesz
+ eppnt
->p_vaddr
+
1125 bss
= eppnt
->p_memsz
+ eppnt
->p_vaddr
;
1127 down_write(¤t
->mm
->mmap_sem
);
1128 do_brk(len
, bss
- len
);
1129 up_write(¤t
->mm
->mmap_sem
);
1140 * Note that some platforms still use traditional core dumps and not
1141 * the ELF core dump. Each platform can select it as appropriate.
1143 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1148 * Modelled on fs/exec.c:aout_core_dump()
1149 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1152 * These are the only things you should do on a core-file: use only these
1153 * functions to write out all the necessary info.
1155 static int dump_write(struct file
*file
, const void *addr
, int nr
)
1157 return file
->f_op
->write(file
, addr
, nr
, &file
->f_pos
) == nr
;
1160 static int dump_seek(struct file
*file
, loff_t off
)
1162 if (file
->f_op
->llseek
&& file
->f_op
->llseek
!= no_llseek
) {
1163 if (file
->f_op
->llseek(file
, off
, SEEK_CUR
) < 0)
1166 char *buf
= (char *)get_zeroed_page(GFP_KERNEL
);
1170 unsigned long n
= off
;
1173 if (!dump_write(file
, buf
, n
))
1177 free_page((unsigned long)buf
);
1183 * Decide whether a segment is worth dumping; default is yes to be
1184 * sure (missing info is worse than too much; etc).
1185 * Personally I'd include everything, and use the coredump limit...
1187 * I think we should skip something. But I am not sure how. H.J.
1189 static int maydump(struct vm_area_struct
*vma
)
1191 /* The vma can be set up to tell us the answer directly. */
1192 if (vma
->vm_flags
& VM_ALWAYSDUMP
)
1195 /* Do not dump I/O mapped devices or special mappings */
1196 if (vma
->vm_flags
& (VM_IO
| VM_RESERVED
))
1199 /* Dump shared memory only if mapped from an anonymous file. */
1200 if (vma
->vm_flags
& VM_SHARED
)
1201 return vma
->vm_file
->f_path
.dentry
->d_inode
->i_nlink
== 0;
1203 /* If it hasn't been written to, don't write it out */
1210 /* An ELF note in memory */
1215 unsigned int datasz
;
1219 static int notesize(struct memelfnote
*en
)
1223 sz
= sizeof(struct elf_note
);
1224 sz
+= roundup(strlen(en
->name
) + 1, 4);
1225 sz
+= roundup(en
->datasz
, 4);
1230 #define DUMP_WRITE(addr, nr, foffset) \
1231 do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
1233 static int alignfile(struct file
*file
, loff_t
*foffset
)
1235 static const char buf
[4] = { 0, };
1236 DUMP_WRITE(buf
, roundup(*foffset
, 4) - *foffset
, foffset
);
1240 static int writenote(struct memelfnote
*men
, struct file
*file
,
1244 en
.n_namesz
= strlen(men
->name
) + 1;
1245 en
.n_descsz
= men
->datasz
;
1246 en
.n_type
= men
->type
;
1248 DUMP_WRITE(&en
, sizeof(en
), foffset
);
1249 DUMP_WRITE(men
->name
, en
.n_namesz
, foffset
);
1250 if (!alignfile(file
, foffset
))
1252 DUMP_WRITE(men
->data
, men
->datasz
, foffset
);
1253 if (!alignfile(file
, foffset
))
1260 #define DUMP_WRITE(addr, nr) \
1261 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1263 #define DUMP_SEEK(off) \
1264 if (!dump_seek(file, (off))) \
1267 static void fill_elf_header(struct elfhdr
*elf
, int segs
)
1269 memcpy(elf
->e_ident
, ELFMAG
, SELFMAG
);
1270 elf
->e_ident
[EI_CLASS
] = ELF_CLASS
;
1271 elf
->e_ident
[EI_DATA
] = ELF_DATA
;
1272 elf
->e_ident
[EI_VERSION
] = EV_CURRENT
;
1273 elf
->e_ident
[EI_OSABI
] = ELF_OSABI
;
1274 memset(elf
->e_ident
+EI_PAD
, 0, EI_NIDENT
-EI_PAD
);
1276 elf
->e_type
= ET_CORE
;
1277 elf
->e_machine
= ELF_ARCH
;
1278 elf
->e_version
= EV_CURRENT
;
1280 elf
->e_phoff
= sizeof(struct elfhdr
);
1282 elf
->e_flags
= ELF_CORE_EFLAGS
;
1283 elf
->e_ehsize
= sizeof(struct elfhdr
);
1284 elf
->e_phentsize
= sizeof(struct elf_phdr
);
1285 elf
->e_phnum
= segs
;
1286 elf
->e_shentsize
= 0;
1288 elf
->e_shstrndx
= 0;
1292 static void fill_elf_note_phdr(struct elf_phdr
*phdr
, int sz
, loff_t offset
)
1294 phdr
->p_type
= PT_NOTE
;
1295 phdr
->p_offset
= offset
;
1298 phdr
->p_filesz
= sz
;
1305 static void fill_note(struct memelfnote
*note
, const char *name
, int type
,
1306 unsigned int sz
, void *data
)
1316 * fill up all the fields in prstatus from the given task struct, except
1317 * registers which need to be filled up separately.
1319 static void fill_prstatus(struct elf_prstatus
*prstatus
,
1320 struct task_struct
*p
, long signr
)
1322 prstatus
->pr_info
.si_signo
= prstatus
->pr_cursig
= signr
;
1323 prstatus
->pr_sigpend
= p
->pending
.signal
.sig
[0];
1324 prstatus
->pr_sighold
= p
->blocked
.sig
[0];
1325 prstatus
->pr_pid
= p
->pid
;
1326 prstatus
->pr_ppid
= p
->parent
->pid
;
1327 prstatus
->pr_pgrp
= process_group(p
);
1328 prstatus
->pr_sid
= process_session(p
);
1329 if (thread_group_leader(p
)) {
1331 * This is the record for the group leader. Add in the
1332 * cumulative times of previous dead threads. This total
1333 * won't include the time of each live thread whose state
1334 * is included in the core dump. The final total reported
1335 * to our parent process when it calls wait4 will include
1336 * those sums as well as the little bit more time it takes
1337 * this and each other thread to finish dying after the
1338 * core dump synchronization phase.
1340 cputime_to_timeval(cputime_add(p
->utime
, p
->signal
->utime
),
1341 &prstatus
->pr_utime
);
1342 cputime_to_timeval(cputime_add(p
->stime
, p
->signal
->stime
),
1343 &prstatus
->pr_stime
);
1345 cputime_to_timeval(p
->utime
, &prstatus
->pr_utime
);
1346 cputime_to_timeval(p
->stime
, &prstatus
->pr_stime
);
1348 cputime_to_timeval(p
->signal
->cutime
, &prstatus
->pr_cutime
);
1349 cputime_to_timeval(p
->signal
->cstime
, &prstatus
->pr_cstime
);
1352 static int fill_psinfo(struct elf_prpsinfo
*psinfo
, struct task_struct
*p
,
1353 struct mm_struct
*mm
)
1355 unsigned int i
, len
;
1357 /* first copy the parameters from user space */
1358 memset(psinfo
, 0, sizeof(struct elf_prpsinfo
));
1360 len
= mm
->arg_end
- mm
->arg_start
;
1361 if (len
>= ELF_PRARGSZ
)
1362 len
= ELF_PRARGSZ
-1;
1363 if (copy_from_user(&psinfo
->pr_psargs
,
1364 (const char __user
*)mm
->arg_start
, len
))
1366 for(i
= 0; i
< len
; i
++)
1367 if (psinfo
->pr_psargs
[i
] == 0)
1368 psinfo
->pr_psargs
[i
] = ' ';
1369 psinfo
->pr_psargs
[len
] = 0;
1371 psinfo
->pr_pid
= p
->pid
;
1372 psinfo
->pr_ppid
= p
->parent
->pid
;
1373 psinfo
->pr_pgrp
= process_group(p
);
1374 psinfo
->pr_sid
= process_session(p
);
1376 i
= p
->state
? ffz(~p
->state
) + 1 : 0;
1377 psinfo
->pr_state
= i
;
1378 psinfo
->pr_sname
= (i
> 5) ? '.' : "RSDTZW"[i
];
1379 psinfo
->pr_zomb
= psinfo
->pr_sname
== 'Z';
1380 psinfo
->pr_nice
= task_nice(p
);
1381 psinfo
->pr_flag
= p
->flags
;
1382 SET_UID(psinfo
->pr_uid
, p
->uid
);
1383 SET_GID(psinfo
->pr_gid
, p
->gid
);
1384 strncpy(psinfo
->pr_fname
, p
->comm
, sizeof(psinfo
->pr_fname
));
1389 /* Here is the structure in which status of each thread is captured. */
1390 struct elf_thread_status
1392 struct list_head list
;
1393 struct elf_prstatus prstatus
; /* NT_PRSTATUS */
1394 elf_fpregset_t fpu
; /* NT_PRFPREG */
1395 struct task_struct
*thread
;
1396 #ifdef ELF_CORE_COPY_XFPREGS
1397 elf_fpxregset_t xfpu
; /* NT_PRXFPREG */
1399 struct memelfnote notes
[3];
1404 * In order to add the specific thread information for the elf file format,
1405 * we need to keep a linked list of every threads pr_status and then create
1406 * a single section for them in the final core file.
1408 static int elf_dump_thread_status(long signr
, struct elf_thread_status
*t
)
1411 struct task_struct
*p
= t
->thread
;
1414 fill_prstatus(&t
->prstatus
, p
, signr
);
1415 elf_core_copy_task_regs(p
, &t
->prstatus
.pr_reg
);
1417 fill_note(&t
->notes
[0], "CORE", NT_PRSTATUS
, sizeof(t
->prstatus
),
1420 sz
+= notesize(&t
->notes
[0]);
1422 if ((t
->prstatus
.pr_fpvalid
= elf_core_copy_task_fpregs(p
, NULL
,
1424 fill_note(&t
->notes
[1], "CORE", NT_PRFPREG
, sizeof(t
->fpu
),
1427 sz
+= notesize(&t
->notes
[1]);
1430 #ifdef ELF_CORE_COPY_XFPREGS
1431 if (elf_core_copy_task_xfpregs(p
, &t
->xfpu
)) {
1432 fill_note(&t
->notes
[2], "LINUX", NT_PRXFPREG
, sizeof(t
->xfpu
),
1435 sz
+= notesize(&t
->notes
[2]);
1441 static struct vm_area_struct
*first_vma(struct task_struct
*tsk
,
1442 struct vm_area_struct
*gate_vma
)
1444 struct vm_area_struct
*ret
= tsk
->mm
->mmap
;
1451 * Helper function for iterating across a vma list. It ensures that the caller
1452 * will visit `gate_vma' prior to terminating the search.
1454 static struct vm_area_struct
*next_vma(struct vm_area_struct
*this_vma
,
1455 struct vm_area_struct
*gate_vma
)
1457 struct vm_area_struct
*ret
;
1459 ret
= this_vma
->vm_next
;
1462 if (this_vma
== gate_vma
)
1470 * This is a two-pass process; first we find the offsets of the bits,
1471 * and then they are actually written out. If we run out of core limit
1474 static int elf_core_dump(long signr
, struct pt_regs
*regs
, struct file
*file
)
1482 struct vm_area_struct
*vma
, *gate_vma
;
1483 struct elfhdr
*elf
= NULL
;
1484 loff_t offset
= 0, dataoff
, foffset
;
1485 unsigned long limit
= current
->signal
->rlim
[RLIMIT_CORE
].rlim_cur
;
1487 struct memelfnote
*notes
= NULL
;
1488 struct elf_prstatus
*prstatus
= NULL
; /* NT_PRSTATUS */
1489 struct elf_prpsinfo
*psinfo
= NULL
; /* NT_PRPSINFO */
1490 struct task_struct
*g
, *p
;
1491 LIST_HEAD(thread_list
);
1492 struct list_head
*t
;
1493 elf_fpregset_t
*fpu
= NULL
;
1494 #ifdef ELF_CORE_COPY_XFPREGS
1495 elf_fpxregset_t
*xfpu
= NULL
;
1497 int thread_status_size
= 0;
1501 * We no longer stop all VM operations.
1503 * This is because those proceses that could possibly change map_count
1504 * or the mmap / vma pages are now blocked in do_exit on current
1505 * finishing this core dump.
1507 * Only ptrace can touch these memory addresses, but it doesn't change
1508 * the map_count or the pages allocated. So no possibility of crashing
1509 * exists while dumping the mm->vm_next areas to the core file.
1512 /* alloc memory for large data structures: too large to be on stack */
1513 elf
= kmalloc(sizeof(*elf
), GFP_KERNEL
);
1516 prstatus
= kmalloc(sizeof(*prstatus
), GFP_KERNEL
);
1519 psinfo
= kmalloc(sizeof(*psinfo
), GFP_KERNEL
);
1522 notes
= kmalloc(NUM_NOTES
* sizeof(struct memelfnote
), GFP_KERNEL
);
1525 fpu
= kmalloc(sizeof(*fpu
), GFP_KERNEL
);
1528 #ifdef ELF_CORE_COPY_XFPREGS
1529 xfpu
= kmalloc(sizeof(*xfpu
), GFP_KERNEL
);
1535 struct elf_thread_status
*tmp
;
1538 if (current
->mm
== p
->mm
&& current
!= p
) {
1539 tmp
= kzalloc(sizeof(*tmp
), GFP_ATOMIC
);
1545 list_add(&tmp
->list
, &thread_list
);
1547 while_each_thread(g
,p
);
1549 list_for_each(t
, &thread_list
) {
1550 struct elf_thread_status
*tmp
;
1553 tmp
= list_entry(t
, struct elf_thread_status
, list
);
1554 sz
= elf_dump_thread_status(signr
, tmp
);
1555 thread_status_size
+= sz
;
1558 /* now collect the dump for the current */
1559 memset(prstatus
, 0, sizeof(*prstatus
));
1560 fill_prstatus(prstatus
, current
, signr
);
1561 elf_core_copy_regs(&prstatus
->pr_reg
, regs
);
1563 segs
= current
->mm
->map_count
;
1564 #ifdef ELF_CORE_EXTRA_PHDRS
1565 segs
+= ELF_CORE_EXTRA_PHDRS
;
1568 gate_vma
= get_gate_vma(current
);
1569 if (gate_vma
!= NULL
)
1573 fill_elf_header(elf
, segs
+ 1); /* including notes section */
1576 current
->flags
|= PF_DUMPCORE
;
1579 * Set up the notes in similar form to SVR4 core dumps made
1580 * with info from their /proc.
1583 fill_note(notes
+ 0, "CORE", NT_PRSTATUS
, sizeof(*prstatus
), prstatus
);
1584 fill_psinfo(psinfo
, current
->group_leader
, current
->mm
);
1585 fill_note(notes
+ 1, "CORE", NT_PRPSINFO
, sizeof(*psinfo
), psinfo
);
1589 auxv
= (elf_addr_t
*)current
->mm
->saved_auxv
;
1594 while (auxv
[i
- 2] != AT_NULL
);
1595 fill_note(¬es
[numnote
++], "CORE", NT_AUXV
,
1596 i
* sizeof(elf_addr_t
), auxv
);
1598 /* Try to dump the FPU. */
1599 if ((prstatus
->pr_fpvalid
=
1600 elf_core_copy_task_fpregs(current
, regs
, fpu
)))
1601 fill_note(notes
+ numnote
++,
1602 "CORE", NT_PRFPREG
, sizeof(*fpu
), fpu
);
1603 #ifdef ELF_CORE_COPY_XFPREGS
1604 if (elf_core_copy_task_xfpregs(current
, xfpu
))
1605 fill_note(notes
+ numnote
++,
1606 "LINUX", NT_PRXFPREG
, sizeof(*xfpu
), xfpu
);
1612 DUMP_WRITE(elf
, sizeof(*elf
));
1613 offset
+= sizeof(*elf
); /* Elf header */
1614 offset
+= (segs
+ 1) * sizeof(struct elf_phdr
); /* Program headers */
1617 /* Write notes phdr entry */
1619 struct elf_phdr phdr
;
1622 for (i
= 0; i
< numnote
; i
++)
1623 sz
+= notesize(notes
+ i
);
1625 sz
+= thread_status_size
;
1627 #ifdef ELF_CORE_WRITE_EXTRA_NOTES
1628 sz
+= ELF_CORE_EXTRA_NOTES_SIZE
;
1631 fill_elf_note_phdr(&phdr
, sz
, offset
);
1633 DUMP_WRITE(&phdr
, sizeof(phdr
));
1636 dataoff
= offset
= roundup(offset
, ELF_EXEC_PAGESIZE
);
1638 /* Write program headers for segments dump */
1639 for (vma
= first_vma(current
, gate_vma
); vma
!= NULL
;
1640 vma
= next_vma(vma
, gate_vma
)) {
1641 struct elf_phdr phdr
;
1644 sz
= vma
->vm_end
- vma
->vm_start
;
1646 phdr
.p_type
= PT_LOAD
;
1647 phdr
.p_offset
= offset
;
1648 phdr
.p_vaddr
= vma
->vm_start
;
1650 phdr
.p_filesz
= maydump(vma
) ? sz
: 0;
1652 offset
+= phdr
.p_filesz
;
1653 phdr
.p_flags
= vma
->vm_flags
& VM_READ
? PF_R
: 0;
1654 if (vma
->vm_flags
& VM_WRITE
)
1655 phdr
.p_flags
|= PF_W
;
1656 if (vma
->vm_flags
& VM_EXEC
)
1657 phdr
.p_flags
|= PF_X
;
1658 phdr
.p_align
= ELF_EXEC_PAGESIZE
;
1660 DUMP_WRITE(&phdr
, sizeof(phdr
));
1663 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1664 ELF_CORE_WRITE_EXTRA_PHDRS
;
1667 /* write out the notes section */
1668 for (i
= 0; i
< numnote
; i
++)
1669 if (!writenote(notes
+ i
, file
, &foffset
))
1672 #ifdef ELF_CORE_WRITE_EXTRA_NOTES
1673 ELF_CORE_WRITE_EXTRA_NOTES
;
1676 /* write out the thread status notes section */
1677 list_for_each(t
, &thread_list
) {
1678 struct elf_thread_status
*tmp
=
1679 list_entry(t
, struct elf_thread_status
, list
);
1681 for (i
= 0; i
< tmp
->num_notes
; i
++)
1682 if (!writenote(&tmp
->notes
[i
], file
, &foffset
))
1687 DUMP_SEEK(dataoff
- foffset
);
1689 for (vma
= first_vma(current
, gate_vma
); vma
!= NULL
;
1690 vma
= next_vma(vma
, gate_vma
)) {
1696 for (addr
= vma
->vm_start
;
1698 addr
+= PAGE_SIZE
) {
1700 struct vm_area_struct
*vma
;
1702 if (get_user_pages(current
, current
->mm
, addr
, 1, 0, 1,
1703 &page
, &vma
) <= 0) {
1704 DUMP_SEEK(PAGE_SIZE
);
1706 if (page
== ZERO_PAGE(addr
)) {
1707 DUMP_SEEK(PAGE_SIZE
);
1710 flush_cache_page(vma
, addr
,
1713 if ((size
+= PAGE_SIZE
) > limit
||
1714 !dump_write(file
, kaddr
,
1717 page_cache_release(page
);
1722 page_cache_release(page
);
1727 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1728 ELF_CORE_WRITE_EXTRA_DATA
;
1735 while (!list_empty(&thread_list
)) {
1736 struct list_head
*tmp
= thread_list
.next
;
1738 kfree(list_entry(tmp
, struct elf_thread_status
, list
));
1746 #ifdef ELF_CORE_COPY_XFPREGS
1753 #endif /* USE_ELF_CORE_DUMP */
1755 static int __init
init_elf_binfmt(void)
1757 return register_binfmt(&elf_format
);
1760 static void __exit
exit_elf_binfmt(void)
1762 /* Remove the COFF and ELF loaders. */
1763 unregister_binfmt(&elf_format
);
1766 core_initcall(init_elf_binfmt
);
1767 module_exit(exit_elf_binfmt
);
1768 MODULE_LICENSE("GPL");