2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/a.out.h>
20 #include <linux/errno.h>
21 #include <linux/signal.h>
22 #include <linux/binfmts.h>
23 #include <linux/string.h>
24 #include <linux/file.h>
25 #include <linux/fcntl.h>
26 #include <linux/ptrace.h>
27 #include <linux/slab.h>
28 #include <linux/shm.h>
29 #include <linux/personality.h>
30 #include <linux/elfcore.h>
31 #include <linux/init.h>
32 #include <linux/highuid.h>
33 #include <linux/smp.h>
34 #include <linux/compiler.h>
35 #include <linux/highmem.h>
36 #include <linux/pagemap.h>
37 #include <linux/security.h>
38 #include <linux/syscalls.h>
39 #include <linux/random.h>
40 #include <linux/elf.h>
41 #include <linux/utsname.h>
42 #include <asm/uaccess.h>
43 #include <asm/param.h>
46 static int load_elf_binary(struct linux_binprm
*bprm
, struct pt_regs
*regs
);
47 static int load_elf_library(struct file
*);
48 static unsigned long elf_map (struct file
*, unsigned long, struct elf_phdr
*, int, int);
51 * If we don't support core dumping, then supply a NULL so we
54 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
55 static int elf_core_dump(long signr
, struct pt_regs
*regs
, struct file
*file
, unsigned long limit
);
57 #define elf_core_dump NULL
60 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
61 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
63 #define ELF_MIN_ALIGN PAGE_SIZE
66 #ifndef ELF_CORE_EFLAGS
67 #define ELF_CORE_EFLAGS 0
70 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
71 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
72 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
74 static struct linux_binfmt elf_format
= {
75 .module
= THIS_MODULE
,
76 .load_binary
= load_elf_binary
,
77 .load_shlib
= load_elf_library
,
78 .core_dump
= elf_core_dump
,
79 .min_coredump
= ELF_EXEC_PAGESIZE
,
83 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
85 static int set_brk(unsigned long start
, unsigned long end
)
87 start
= ELF_PAGEALIGN(start
);
88 end
= ELF_PAGEALIGN(end
);
91 down_write(¤t
->mm
->mmap_sem
);
92 addr
= do_brk(start
, end
- start
);
93 up_write(¤t
->mm
->mmap_sem
);
97 current
->mm
->start_brk
= current
->mm
->brk
= end
;
101 /* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
106 static int padzero(unsigned long elf_bss
)
110 nbyte
= ELF_PAGEOFFSET(elf_bss
);
112 nbyte
= ELF_MIN_ALIGN
- nbyte
;
113 if (clear_user((void __user
*) elf_bss
, nbyte
))
119 /* Let's use some macros to make this stack manipulation a litle clearer */
120 #ifdef CONFIG_STACK_GROWSUP
121 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
122 #define STACK_ROUND(sp, items) \
123 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
124 #define STACK_ALLOC(sp, len) ({ \
125 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
128 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
129 #define STACK_ROUND(sp, items) \
130 (((unsigned long) (sp - items)) &~ 15UL)
131 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
135 create_elf_tables(struct linux_binprm
*bprm
, struct elfhdr
*exec
,
136 int interp_aout
, unsigned long load_addr
,
137 unsigned long interp_load_addr
)
139 unsigned long p
= bprm
->p
;
140 int argc
= bprm
->argc
;
141 int envc
= bprm
->envc
;
142 elf_addr_t __user
*argv
;
143 elf_addr_t __user
*envp
;
144 elf_addr_t __user
*sp
;
145 elf_addr_t __user
*u_platform
;
146 const char *k_platform
= ELF_PLATFORM
;
148 elf_addr_t
*elf_info
;
150 struct task_struct
*tsk
= current
;
151 struct vm_area_struct
*vma
;
154 * In some cases (e.g. Hyper-Threading), we want to avoid L1
155 * evictions by the processes running on the same package. One
156 * thing we can do is to shuffle the initial stack for them.
159 p
= arch_align_stack(p
);
162 * If this architecture has a platform capability string, copy it
163 * to userspace. In some cases (Sparc), this info is impossible
164 * for userspace to get any other way, in others (i386) it is
169 size_t len
= strlen(k_platform
) + 1;
171 u_platform
= (elf_addr_t __user
*)STACK_ALLOC(p
, len
);
172 if (__copy_to_user(u_platform
, k_platform
, len
))
176 /* Create the ELF interpreter info */
177 elf_info
= (elf_addr_t
*)current
->mm
->saved_auxv
;
178 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
179 #define NEW_AUX_ENT(id, val) \
181 elf_info[ei_index++] = id; \
182 elf_info[ei_index++] = val; \
187 * ARCH_DLINFO must come first so PPC can do its special alignment of
189 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
190 * ARCH_DLINFO changes
194 NEW_AUX_ENT(AT_HWCAP
, ELF_HWCAP
);
195 NEW_AUX_ENT(AT_PAGESZ
, ELF_EXEC_PAGESIZE
);
196 NEW_AUX_ENT(AT_CLKTCK
, CLOCKS_PER_SEC
);
197 NEW_AUX_ENT(AT_PHDR
, load_addr
+ exec
->e_phoff
);
198 NEW_AUX_ENT(AT_PHENT
, sizeof(struct elf_phdr
));
199 NEW_AUX_ENT(AT_PHNUM
, exec
->e_phnum
);
200 NEW_AUX_ENT(AT_BASE
, interp_load_addr
);
201 NEW_AUX_ENT(AT_FLAGS
, 0);
202 NEW_AUX_ENT(AT_ENTRY
, exec
->e_entry
);
203 NEW_AUX_ENT(AT_UID
, tsk
->uid
);
204 NEW_AUX_ENT(AT_EUID
, tsk
->euid
);
205 NEW_AUX_ENT(AT_GID
, tsk
->gid
);
206 NEW_AUX_ENT(AT_EGID
, tsk
->egid
);
207 NEW_AUX_ENT(AT_SECURE
, security_bprm_secureexec(bprm
));
209 NEW_AUX_ENT(AT_PLATFORM
,
210 (elf_addr_t
)(unsigned long)u_platform
);
212 if (bprm
->interp_flags
& BINPRM_FLAGS_EXECFD
) {
213 NEW_AUX_ENT(AT_EXECFD
, bprm
->interp_data
);
216 /* AT_NULL is zero; clear the rest too */
217 memset(&elf_info
[ei_index
], 0,
218 sizeof current
->mm
->saved_auxv
- ei_index
* sizeof elf_info
[0]);
220 /* And advance past the AT_NULL entry. */
223 sp
= STACK_ADD(p
, ei_index
);
225 items
= (argc
+ 1) + (envc
+ 1);
227 items
+= 3; /* a.out interpreters require argv & envp too */
229 items
+= 1; /* ELF interpreters only put argc on the stack */
231 bprm
->p
= STACK_ROUND(sp
, items
);
233 /* Point sp at the lowest address on the stack */
234 #ifdef CONFIG_STACK_GROWSUP
235 sp
= (elf_addr_t __user
*)bprm
->p
- items
- ei_index
;
236 bprm
->exec
= (unsigned long)sp
; /* XXX: PARISC HACK */
238 sp
= (elf_addr_t __user
*)bprm
->p
;
243 * Grow the stack manually; some architectures have a limit on how
244 * far ahead a user-space access may be in order to grow the stack.
246 vma
= find_extend_vma(current
->mm
, bprm
->p
);
250 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
251 if (__put_user(argc
, sp
++))
255 envp
= argv
+ argc
+ 1;
256 if (__put_user((elf_addr_t
)(unsigned long)argv
, sp
++) ||
257 __put_user((elf_addr_t
)(unsigned long)envp
, sp
++))
261 envp
= argv
+ argc
+ 1;
264 /* Populate argv and envp */
265 p
= current
->mm
->arg_end
= current
->mm
->arg_start
;
268 if (__put_user((elf_addr_t
)p
, argv
++))
270 len
= strnlen_user((void __user
*)p
, MAX_ARG_STRLEN
);
271 if (!len
|| len
> MAX_ARG_STRLEN
)
275 if (__put_user(0, argv
))
277 current
->mm
->arg_end
= current
->mm
->env_start
= p
;
280 if (__put_user((elf_addr_t
)p
, envp
++))
282 len
= strnlen_user((void __user
*)p
, MAX_ARG_STRLEN
);
283 if (!len
|| len
> MAX_ARG_STRLEN
)
287 if (__put_user(0, envp
))
289 current
->mm
->env_end
= p
;
291 /* Put the elf_info on the stack in the right place. */
292 sp
= (elf_addr_t __user
*)envp
+ 1;
293 if (copy_to_user(sp
, elf_info
, ei_index
* sizeof(elf_addr_t
)))
300 static unsigned long elf_map(struct file
*filep
, unsigned long addr
,
301 struct elf_phdr
*eppnt
, int prot
, int type
)
303 unsigned long map_addr
;
304 unsigned long pageoffset
= ELF_PAGEOFFSET(eppnt
->p_vaddr
);
306 down_write(¤t
->mm
->mmap_sem
);
307 /* mmap() will return -EINVAL if given a zero size, but a
308 * segment with zero filesize is perfectly valid */
309 if (eppnt
->p_filesz
+ pageoffset
)
310 map_addr
= do_mmap(filep
, ELF_PAGESTART(addr
),
311 eppnt
->p_filesz
+ pageoffset
, prot
, type
,
312 eppnt
->p_offset
- pageoffset
);
314 map_addr
= ELF_PAGESTART(addr
);
315 up_write(¤t
->mm
->mmap_sem
);
319 #endif /* !elf_map */
321 /* This is much more generalized than the library routine read function,
322 so we keep this separate. Technically the library read function
323 is only provided so that we can read a.out libraries that have
326 static unsigned long load_elf_interp(struct elfhdr
*interp_elf_ex
,
327 struct file
*interpreter
, unsigned long *interp_load_addr
)
329 struct elf_phdr
*elf_phdata
;
330 struct elf_phdr
*eppnt
;
331 unsigned long load_addr
= 0;
332 int load_addr_set
= 0;
333 unsigned long last_bss
= 0, elf_bss
= 0;
334 unsigned long error
= ~0UL;
337 /* First of all, some simple consistency checks */
338 if (interp_elf_ex
->e_type
!= ET_EXEC
&&
339 interp_elf_ex
->e_type
!= ET_DYN
)
341 if (!elf_check_arch(interp_elf_ex
))
343 if (!interpreter
->f_op
|| !interpreter
->f_op
->mmap
)
347 * If the size of this structure has changed, then punt, since
348 * we will be doing the wrong thing.
350 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
))
352 if (interp_elf_ex
->e_phnum
< 1 ||
353 interp_elf_ex
->e_phnum
> 65536U / sizeof(struct elf_phdr
))
356 /* Now read in all of the header information */
357 size
= sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
;
358 if (size
> ELF_MIN_ALIGN
)
360 elf_phdata
= kmalloc(size
, GFP_KERNEL
);
364 retval
= kernel_read(interpreter
, interp_elf_ex
->e_phoff
,
365 (char *)elf_phdata
,size
);
367 if (retval
!= size
) {
374 for (i
= 0; i
< interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
375 if (eppnt
->p_type
== PT_LOAD
) {
376 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
378 unsigned long vaddr
= 0;
379 unsigned long k
, map_addr
;
381 if (eppnt
->p_flags
& PF_R
)
382 elf_prot
= PROT_READ
;
383 if (eppnt
->p_flags
& PF_W
)
384 elf_prot
|= PROT_WRITE
;
385 if (eppnt
->p_flags
& PF_X
)
386 elf_prot
|= PROT_EXEC
;
387 vaddr
= eppnt
->p_vaddr
;
388 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
)
389 elf_type
|= MAP_FIXED
;
391 map_addr
= elf_map(interpreter
, load_addr
+ vaddr
,
392 eppnt
, elf_prot
, elf_type
);
394 if (BAD_ADDR(map_addr
))
397 if (!load_addr_set
&&
398 interp_elf_ex
->e_type
== ET_DYN
) {
399 load_addr
= map_addr
- ELF_PAGESTART(vaddr
);
404 * Check to see if the section's size will overflow the
405 * allowed task size. Note that p_filesz must always be
406 * <= p_memsize so it's only necessary to check p_memsz.
408 k
= load_addr
+ eppnt
->p_vaddr
;
410 eppnt
->p_filesz
> eppnt
->p_memsz
||
411 eppnt
->p_memsz
> TASK_SIZE
||
412 TASK_SIZE
- eppnt
->p_memsz
< k
) {
418 * Find the end of the file mapping for this phdr, and
419 * keep track of the largest address we see for this.
421 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
426 * Do the same thing for the memory mapping - between
427 * elf_bss and last_bss is the bss section.
429 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
436 * Now fill out the bss section. First pad the last page up
437 * to the page boundary, and then perform a mmap to make sure
438 * that there are zero-mapped pages up to and including the
441 if (padzero(elf_bss
)) {
446 /* What we have mapped so far */
447 elf_bss
= ELF_PAGESTART(elf_bss
+ ELF_MIN_ALIGN
- 1);
449 /* Map the last of the bss segment */
450 if (last_bss
> elf_bss
) {
451 down_write(¤t
->mm
->mmap_sem
);
452 error
= do_brk(elf_bss
, last_bss
- elf_bss
);
453 up_write(¤t
->mm
->mmap_sem
);
458 *interp_load_addr
= load_addr
;
459 error
= ((unsigned long)interp_elf_ex
->e_entry
) + load_addr
;
467 static unsigned long load_aout_interp(struct exec
*interp_ex
,
468 struct file
*interpreter
)
470 unsigned long text_data
, elf_entry
= ~0UL;
474 current
->mm
->end_code
= interp_ex
->a_text
;
475 text_data
= interp_ex
->a_text
+ interp_ex
->a_data
;
476 current
->mm
->end_data
= text_data
;
477 current
->mm
->brk
= interp_ex
->a_bss
+ text_data
;
479 switch (N_MAGIC(*interp_ex
)) {
482 addr
= (char __user
*)0;
486 offset
= N_TXTOFF(*interp_ex
);
487 addr
= (char __user
*)N_TXTADDR(*interp_ex
);
493 down_write(¤t
->mm
->mmap_sem
);
494 do_brk(0, text_data
);
495 up_write(¤t
->mm
->mmap_sem
);
496 if (!interpreter
->f_op
|| !interpreter
->f_op
->read
)
498 if (interpreter
->f_op
->read(interpreter
, addr
, text_data
, &offset
) < 0)
500 flush_icache_range((unsigned long)addr
,
501 (unsigned long)addr
+ text_data
);
503 down_write(¤t
->mm
->mmap_sem
);
504 do_brk(ELF_PAGESTART(text_data
+ ELF_MIN_ALIGN
- 1),
506 up_write(¤t
->mm
->mmap_sem
);
507 elf_entry
= interp_ex
->a_entry
;
514 * These are the functions used to load ELF style executables and shared
515 * libraries. There is no binary dependent code anywhere else.
518 #define INTERPRETER_NONE 0
519 #define INTERPRETER_AOUT 1
520 #define INTERPRETER_ELF 2
522 #ifndef STACK_RND_MASK
523 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
526 static unsigned long randomize_stack_top(unsigned long stack_top
)
528 unsigned int random_variable
= 0;
530 if ((current
->flags
& PF_RANDOMIZE
) &&
531 !(current
->personality
& ADDR_NO_RANDOMIZE
)) {
532 random_variable
= get_random_int() & STACK_RND_MASK
;
533 random_variable
<<= PAGE_SHIFT
;
535 #ifdef CONFIG_STACK_GROWSUP
536 return PAGE_ALIGN(stack_top
) + random_variable
;
538 return PAGE_ALIGN(stack_top
) - random_variable
;
542 static int load_elf_binary(struct linux_binprm
*bprm
, struct pt_regs
*regs
)
544 struct file
*interpreter
= NULL
; /* to shut gcc up */
545 unsigned long load_addr
= 0, load_bias
= 0;
546 int load_addr_set
= 0;
547 char * elf_interpreter
= NULL
;
548 unsigned int interpreter_type
= INTERPRETER_NONE
;
549 unsigned char ibcs2_interpreter
= 0;
551 struct elf_phdr
*elf_ppnt
, *elf_phdata
;
552 unsigned long elf_bss
, elf_brk
;
556 unsigned long elf_entry
, interp_load_addr
= 0;
557 unsigned long start_code
, end_code
, start_data
, end_data
;
558 unsigned long reloc_func_desc
= 0;
559 char passed_fileno
[6];
560 struct files_struct
*files
;
561 int executable_stack
= EXSTACK_DEFAULT
;
562 unsigned long def_flags
= 0;
564 struct elfhdr elf_ex
;
565 struct elfhdr interp_elf_ex
;
566 struct exec interp_ex
;
569 loc
= kmalloc(sizeof(*loc
), GFP_KERNEL
);
575 /* Get the exec-header */
576 loc
->elf_ex
= *((struct elfhdr
*)bprm
->buf
);
579 /* First of all, some simple consistency checks */
580 if (memcmp(loc
->elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
583 if (loc
->elf_ex
.e_type
!= ET_EXEC
&& loc
->elf_ex
.e_type
!= ET_DYN
)
585 if (!elf_check_arch(&loc
->elf_ex
))
587 if (!bprm
->file
->f_op
||!bprm
->file
->f_op
->mmap
)
590 /* Now read in all of the header information */
591 if (loc
->elf_ex
.e_phentsize
!= sizeof(struct elf_phdr
))
593 if (loc
->elf_ex
.e_phnum
< 1 ||
594 loc
->elf_ex
.e_phnum
> 65536U / sizeof(struct elf_phdr
))
596 size
= loc
->elf_ex
.e_phnum
* sizeof(struct elf_phdr
);
598 elf_phdata
= kmalloc(size
, GFP_KERNEL
);
602 retval
= kernel_read(bprm
->file
, loc
->elf_ex
.e_phoff
,
603 (char *)elf_phdata
, size
);
604 if (retval
!= size
) {
610 files
= current
->files
; /* Refcounted so ok */
611 retval
= unshare_files();
614 if (files
== current
->files
) {
615 put_files_struct(files
);
619 /* exec will make our files private anyway, but for the a.out
620 loader stuff we need to do it earlier */
621 retval
= get_unused_fd();
624 get_file(bprm
->file
);
625 fd_install(elf_exec_fileno
= retval
, bprm
->file
);
627 elf_ppnt
= elf_phdata
;
636 for (i
= 0; i
< loc
->elf_ex
.e_phnum
; i
++) {
637 if (elf_ppnt
->p_type
== PT_INTERP
) {
638 /* This is the program interpreter used for
639 * shared libraries - for now assume that this
640 * is an a.out format binary
643 if (elf_ppnt
->p_filesz
> PATH_MAX
||
644 elf_ppnt
->p_filesz
< 2)
648 elf_interpreter
= kmalloc(elf_ppnt
->p_filesz
,
650 if (!elf_interpreter
)
653 retval
= kernel_read(bprm
->file
, elf_ppnt
->p_offset
,
656 if (retval
!= elf_ppnt
->p_filesz
) {
659 goto out_free_interp
;
661 /* make sure path is NULL terminated */
663 if (elf_interpreter
[elf_ppnt
->p_filesz
- 1] != '\0')
664 goto out_free_interp
;
666 /* If the program interpreter is one of these two,
667 * then assume an iBCS2 image. Otherwise assume
668 * a native linux image.
670 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
671 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0)
672 ibcs2_interpreter
= 1;
675 * The early SET_PERSONALITY here is so that the lookup
676 * for the interpreter happens in the namespace of the
677 * to-be-execed image. SET_PERSONALITY can select an
680 * However, SET_PERSONALITY is NOT allowed to switch
681 * this task into the new images's memory mapping
682 * policy - that is, TASK_SIZE must still evaluate to
683 * that which is appropriate to the execing application.
684 * This is because exit_mmap() needs to have TASK_SIZE
685 * evaluate to the size of the old image.
687 * So if (say) a 64-bit application is execing a 32-bit
688 * application it is the architecture's responsibility
689 * to defer changing the value of TASK_SIZE until the
690 * switch really is going to happen - do this in
691 * flush_thread(). - akpm
693 SET_PERSONALITY(loc
->elf_ex
, ibcs2_interpreter
);
695 interpreter
= open_exec(elf_interpreter
);
696 retval
= PTR_ERR(interpreter
);
697 if (IS_ERR(interpreter
))
698 goto out_free_interp
;
701 * If the binary is not readable then enforce
702 * mm->dumpable = 0 regardless of the interpreter's
705 if (file_permission(interpreter
, MAY_READ
) < 0)
706 bprm
->interp_flags
|= BINPRM_FLAGS_ENFORCE_NONDUMP
;
708 retval
= kernel_read(interpreter
, 0, bprm
->buf
,
710 if (retval
!= BINPRM_BUF_SIZE
) {
713 goto out_free_dentry
;
716 /* Get the exec headers */
717 loc
->interp_ex
= *((struct exec
*)bprm
->buf
);
718 loc
->interp_elf_ex
= *((struct elfhdr
*)bprm
->buf
);
724 elf_ppnt
= elf_phdata
;
725 for (i
= 0; i
< loc
->elf_ex
.e_phnum
; i
++, elf_ppnt
++)
726 if (elf_ppnt
->p_type
== PT_GNU_STACK
) {
727 if (elf_ppnt
->p_flags
& PF_X
)
728 executable_stack
= EXSTACK_ENABLE_X
;
730 executable_stack
= EXSTACK_DISABLE_X
;
734 /* Some simple consistency checks for the interpreter */
735 if (elf_interpreter
) {
737 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
739 /* Now figure out which format our binary is */
740 if ((N_MAGIC(loc
->interp_ex
) != OMAGIC
) &&
741 (N_MAGIC(loc
->interp_ex
) != ZMAGIC
) &&
742 (N_MAGIC(loc
->interp_ex
) != QMAGIC
))
743 interpreter_type
= INTERPRETER_ELF
;
745 if (memcmp(loc
->interp_elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
746 interpreter_type
&= ~INTERPRETER_ELF
;
748 if (interpreter_type
== INTERPRETER_AOUT
&& warn
< 10) {
749 printk(KERN_WARNING
"a.out ELF interpreter %s is "
750 "deprecated and will not be supported "
751 "after Linux 2.6.25\n", elf_interpreter
);
756 if (!interpreter_type
)
757 goto out_free_dentry
;
759 /* Make sure only one type was selected */
760 if ((interpreter_type
& INTERPRETER_ELF
) &&
761 interpreter_type
!= INTERPRETER_ELF
) {
762 // FIXME - ratelimit this before re-enabling
763 // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
764 interpreter_type
= INTERPRETER_ELF
;
766 /* Verify the interpreter has a valid arch */
767 if ((interpreter_type
== INTERPRETER_ELF
) &&
768 !elf_check_arch(&loc
->interp_elf_ex
))
769 goto out_free_dentry
;
771 /* Executables without an interpreter also need a personality */
772 SET_PERSONALITY(loc
->elf_ex
, ibcs2_interpreter
);
775 /* OK, we are done with that, now set up the arg stuff,
776 and then start this sucker up */
777 if ((!bprm
->sh_bang
) && (interpreter_type
== INTERPRETER_AOUT
)) {
778 char *passed_p
= passed_fileno
;
779 sprintf(passed_fileno
, "%d", elf_exec_fileno
);
781 if (elf_interpreter
) {
782 retval
= copy_strings_kernel(1, &passed_p
, bprm
);
784 goto out_free_dentry
;
789 /* Flush all traces of the currently running executable */
790 retval
= flush_old_exec(bprm
);
792 goto out_free_dentry
;
794 /* Discard our unneeded old files struct */
796 put_files_struct(files
);
800 /* OK, This is the point of no return */
801 current
->flags
&= ~PF_FORKNOEXEC
;
802 current
->mm
->def_flags
= def_flags
;
804 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
805 may depend on the personality. */
806 SET_PERSONALITY(loc
->elf_ex
, ibcs2_interpreter
);
807 if (elf_read_implies_exec(loc
->elf_ex
, executable_stack
))
808 current
->personality
|= READ_IMPLIES_EXEC
;
810 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
811 current
->flags
|= PF_RANDOMIZE
;
812 arch_pick_mmap_layout(current
->mm
);
814 /* Do this so that we can load the interpreter, if need be. We will
815 change some of these later */
816 current
->mm
->free_area_cache
= current
->mm
->mmap_base
;
817 current
->mm
->cached_hole_size
= 0;
818 retval
= setup_arg_pages(bprm
, randomize_stack_top(STACK_TOP
),
821 send_sig(SIGKILL
, current
, 0);
822 goto out_free_dentry
;
825 current
->mm
->start_stack
= bprm
->p
;
827 /* Now we do a little grungy work by mmaping the ELF image into
828 the correct location in memory. At this point, we assume that
829 the image should be loaded at fixed address, not at a variable
831 for(i
= 0, elf_ppnt
= elf_phdata
;
832 i
< loc
->elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
833 int elf_prot
= 0, elf_flags
;
834 unsigned long k
, vaddr
;
836 if (elf_ppnt
->p_type
!= PT_LOAD
)
839 if (unlikely (elf_brk
> elf_bss
)) {
842 /* There was a PT_LOAD segment with p_memsz > p_filesz
843 before this one. Map anonymous pages, if needed,
844 and clear the area. */
845 retval
= set_brk (elf_bss
+ load_bias
,
846 elf_brk
+ load_bias
);
848 send_sig(SIGKILL
, current
, 0);
849 goto out_free_dentry
;
851 nbyte
= ELF_PAGEOFFSET(elf_bss
);
853 nbyte
= ELF_MIN_ALIGN
- nbyte
;
854 if (nbyte
> elf_brk
- elf_bss
)
855 nbyte
= elf_brk
- elf_bss
;
856 if (clear_user((void __user
*)elf_bss
+
859 * This bss-zeroing can fail if the ELF
860 * file specifies odd protections. So
861 * we don't check the return value
867 if (elf_ppnt
->p_flags
& PF_R
)
868 elf_prot
|= PROT_READ
;
869 if (elf_ppnt
->p_flags
& PF_W
)
870 elf_prot
|= PROT_WRITE
;
871 if (elf_ppnt
->p_flags
& PF_X
)
872 elf_prot
|= PROT_EXEC
;
874 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
| MAP_EXECUTABLE
;
876 vaddr
= elf_ppnt
->p_vaddr
;
877 if (loc
->elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
878 elf_flags
|= MAP_FIXED
;
879 } else if (loc
->elf_ex
.e_type
== ET_DYN
) {
880 /* Try and get dynamic programs out of the way of the
881 * default mmap base, as well as whatever program they
882 * might try to exec. This is because the brk will
883 * follow the loader, and is not movable. */
884 load_bias
= ELF_PAGESTART(ELF_ET_DYN_BASE
- vaddr
);
887 error
= elf_map(bprm
->file
, load_bias
+ vaddr
, elf_ppnt
,
888 elf_prot
, elf_flags
);
889 if (BAD_ADDR(error
)) {
890 send_sig(SIGKILL
, current
, 0);
891 retval
= IS_ERR((void *)error
) ?
892 PTR_ERR((void*)error
) : -EINVAL
;
893 goto out_free_dentry
;
896 if (!load_addr_set
) {
898 load_addr
= (elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
);
899 if (loc
->elf_ex
.e_type
== ET_DYN
) {
901 ELF_PAGESTART(load_bias
+ vaddr
);
902 load_addr
+= load_bias
;
903 reloc_func_desc
= load_bias
;
906 k
= elf_ppnt
->p_vaddr
;
913 * Check to see if the section's size will overflow the
914 * allowed task size. Note that p_filesz must always be
915 * <= p_memsz so it is only necessary to check p_memsz.
917 if (BAD_ADDR(k
) || elf_ppnt
->p_filesz
> elf_ppnt
->p_memsz
||
918 elf_ppnt
->p_memsz
> TASK_SIZE
||
919 TASK_SIZE
- elf_ppnt
->p_memsz
< k
) {
920 /* set_brk can never work. Avoid overflows. */
921 send_sig(SIGKILL
, current
, 0);
923 goto out_free_dentry
;
926 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
930 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
934 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
939 loc
->elf_ex
.e_entry
+= load_bias
;
940 elf_bss
+= load_bias
;
941 elf_brk
+= load_bias
;
942 start_code
+= load_bias
;
943 end_code
+= load_bias
;
944 start_data
+= load_bias
;
945 end_data
+= load_bias
;
947 /* Calling set_brk effectively mmaps the pages that we need
948 * for the bss and break sections. We must do this before
949 * mapping in the interpreter, to make sure it doesn't wind
950 * up getting placed where the bss needs to go.
952 retval
= set_brk(elf_bss
, elf_brk
);
954 send_sig(SIGKILL
, current
, 0);
955 goto out_free_dentry
;
957 if (likely(elf_bss
!= elf_brk
) && unlikely(padzero(elf_bss
))) {
958 send_sig(SIGSEGV
, current
, 0);
959 retval
= -EFAULT
; /* Nobody gets to see this, but.. */
960 goto out_free_dentry
;
963 if (elf_interpreter
) {
964 if (interpreter_type
== INTERPRETER_AOUT
)
965 elf_entry
= load_aout_interp(&loc
->interp_ex
,
968 elf_entry
= load_elf_interp(&loc
->interp_elf_ex
,
971 if (BAD_ADDR(elf_entry
)) {
972 force_sig(SIGSEGV
, current
);
973 retval
= IS_ERR((void *)elf_entry
) ?
974 (int)elf_entry
: -EINVAL
;
975 goto out_free_dentry
;
977 reloc_func_desc
= interp_load_addr
;
979 allow_write_access(interpreter
);
981 kfree(elf_interpreter
);
983 elf_entry
= loc
->elf_ex
.e_entry
;
984 if (BAD_ADDR(elf_entry
)) {
985 force_sig(SIGSEGV
, current
);
987 goto out_free_dentry
;
993 if (interpreter_type
!= INTERPRETER_AOUT
)
994 sys_close(elf_exec_fileno
);
996 set_binfmt(&elf_format
);
998 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
999 retval
= arch_setup_additional_pages(bprm
, executable_stack
);
1001 send_sig(SIGKILL
, current
, 0);
1004 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1006 compute_creds(bprm
);
1007 current
->flags
&= ~PF_FORKNOEXEC
;
1008 retval
= create_elf_tables(bprm
, &loc
->elf_ex
,
1009 (interpreter_type
== INTERPRETER_AOUT
),
1010 load_addr
, interp_load_addr
);
1012 send_sig(SIGKILL
, current
, 0);
1015 /* N.B. passed_fileno might not be initialized? */
1016 if (interpreter_type
== INTERPRETER_AOUT
)
1017 current
->mm
->arg_start
+= strlen(passed_fileno
) + 1;
1018 current
->mm
->end_code
= end_code
;
1019 current
->mm
->start_code
= start_code
;
1020 current
->mm
->start_data
= start_data
;
1021 current
->mm
->end_data
= end_data
;
1022 current
->mm
->start_stack
= bprm
->p
;
1024 if (current
->personality
& MMAP_PAGE_ZERO
) {
1025 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1026 and some applications "depend" upon this behavior.
1027 Since we do not have the power to recompile these, we
1028 emulate the SVr4 behavior. Sigh. */
1029 down_write(¤t
->mm
->mmap_sem
);
1030 error
= do_mmap(NULL
, 0, PAGE_SIZE
, PROT_READ
| PROT_EXEC
,
1031 MAP_FIXED
| MAP_PRIVATE
, 0);
1032 up_write(¤t
->mm
->mmap_sem
);
1035 #ifdef ELF_PLAT_INIT
1037 * The ABI may specify that certain registers be set up in special
1038 * ways (on i386 %edx is the address of a DT_FINI function, for
1039 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1040 * that the e_entry field is the address of the function descriptor
1041 * for the startup routine, rather than the address of the startup
1042 * routine itself. This macro performs whatever initialization to
1043 * the regs structure is required as well as any relocations to the
1044 * function descriptor entries when executing dynamically links apps.
1046 ELF_PLAT_INIT(regs
, reloc_func_desc
);
1049 start_thread(regs
, elf_entry
, bprm
->p
);
1050 if (unlikely(current
->ptrace
& PT_PTRACED
)) {
1051 if (current
->ptrace
& PT_TRACE_EXEC
)
1052 ptrace_notify ((PTRACE_EVENT_EXEC
<< 8) | SIGTRAP
);
1054 send_sig(SIGTRAP
, current
, 0);
1064 allow_write_access(interpreter
);
1068 kfree(elf_interpreter
);
1070 sys_close(elf_exec_fileno
);
1073 reset_files_struct(current
, files
);
1079 /* This is really simpleminded and specialized - we are loading an
1080 a.out library that is given an ELF header. */
1081 static int load_elf_library(struct file
*file
)
1083 struct elf_phdr
*elf_phdata
;
1084 struct elf_phdr
*eppnt
;
1085 unsigned long elf_bss
, bss
, len
;
1086 int retval
, error
, i
, j
;
1087 struct elfhdr elf_ex
;
1090 retval
= kernel_read(file
, 0, (char *)&elf_ex
, sizeof(elf_ex
));
1091 if (retval
!= sizeof(elf_ex
))
1094 if (memcmp(elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
1097 /* First of all, some simple consistency checks */
1098 if (elf_ex
.e_type
!= ET_EXEC
|| elf_ex
.e_phnum
> 2 ||
1099 !elf_check_arch(&elf_ex
) || !file
->f_op
|| !file
->f_op
->mmap
)
1102 /* Now read in all of the header information */
1104 j
= sizeof(struct elf_phdr
) * elf_ex
.e_phnum
;
1105 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1108 elf_phdata
= kmalloc(j
, GFP_KERNEL
);
1114 retval
= kernel_read(file
, elf_ex
.e_phoff
, (char *)eppnt
, j
);
1118 for (j
= 0, i
= 0; i
<elf_ex
.e_phnum
; i
++)
1119 if ((eppnt
+ i
)->p_type
== PT_LOAD
)
1124 while (eppnt
->p_type
!= PT_LOAD
)
1127 /* Now use mmap to map the library into memory. */
1128 down_write(¤t
->mm
->mmap_sem
);
1129 error
= do_mmap(file
,
1130 ELF_PAGESTART(eppnt
->p_vaddr
),
1132 ELF_PAGEOFFSET(eppnt
->p_vaddr
)),
1133 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
1134 MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
,
1136 ELF_PAGEOFFSET(eppnt
->p_vaddr
)));
1137 up_write(¤t
->mm
->mmap_sem
);
1138 if (error
!= ELF_PAGESTART(eppnt
->p_vaddr
))
1141 elf_bss
= eppnt
->p_vaddr
+ eppnt
->p_filesz
;
1142 if (padzero(elf_bss
)) {
1147 len
= ELF_PAGESTART(eppnt
->p_filesz
+ eppnt
->p_vaddr
+
1149 bss
= eppnt
->p_memsz
+ eppnt
->p_vaddr
;
1151 down_write(¤t
->mm
->mmap_sem
);
1152 do_brk(len
, bss
- len
);
1153 up_write(¤t
->mm
->mmap_sem
);
1164 * Note that some platforms still use traditional core dumps and not
1165 * the ELF core dump. Each platform can select it as appropriate.
1167 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1172 * Modelled on fs/exec.c:aout_core_dump()
1173 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1176 * These are the only things you should do on a core-file: use only these
1177 * functions to write out all the necessary info.
1179 static int dump_write(struct file
*file
, const void *addr
, int nr
)
1181 return file
->f_op
->write(file
, addr
, nr
, &file
->f_pos
) == nr
;
1184 static int dump_seek(struct file
*file
, loff_t off
)
1186 if (file
->f_op
->llseek
&& file
->f_op
->llseek
!= no_llseek
) {
1187 if (file
->f_op
->llseek(file
, off
, SEEK_CUR
) < 0)
1190 char *buf
= (char *)get_zeroed_page(GFP_KERNEL
);
1194 unsigned long n
= off
;
1197 if (!dump_write(file
, buf
, n
))
1201 free_page((unsigned long)buf
);
1207 * Decide what to dump of a segment, part, all or none.
1209 static unsigned long vma_dump_size(struct vm_area_struct
*vma
,
1210 unsigned long mm_flags
)
1212 /* The vma can be set up to tell us the answer directly. */
1213 if (vma
->vm_flags
& VM_ALWAYSDUMP
)
1216 /* Do not dump I/O mapped devices or special mappings */
1217 if (vma
->vm_flags
& (VM_IO
| VM_RESERVED
))
1220 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1222 /* By default, dump shared memory if mapped from an anonymous file. */
1223 if (vma
->vm_flags
& VM_SHARED
) {
1224 if (vma
->vm_file
->f_path
.dentry
->d_inode
->i_nlink
== 0 ?
1225 FILTER(ANON_SHARED
) : FILTER(MAPPED_SHARED
))
1230 /* Dump segments that have been written to. */
1231 if (vma
->anon_vma
&& FILTER(ANON_PRIVATE
))
1233 if (vma
->vm_file
== NULL
)
1236 if (FILTER(MAPPED_PRIVATE
))
1240 * If this looks like the beginning of a DSO or executable mapping,
1241 * check for an ELF header. If we find one, dump the first page to
1242 * aid in determining what was mapped here.
1244 if (FILTER(ELF_HEADERS
) && vma
->vm_file
!= NULL
&& vma
->vm_pgoff
== 0) {
1245 u32 __user
*header
= (u32 __user
*) vma
->vm_start
;
1248 * Doing it this way gets the constant folded by GCC.
1252 char elfmag
[SELFMAG
];
1254 BUILD_BUG_ON(SELFMAG
!= sizeof word
);
1255 magic
.elfmag
[EI_MAG0
] = ELFMAG0
;
1256 magic
.elfmag
[EI_MAG1
] = ELFMAG1
;
1257 magic
.elfmag
[EI_MAG2
] = ELFMAG2
;
1258 magic
.elfmag
[EI_MAG3
] = ELFMAG3
;
1259 if (get_user(word
, header
) == 0 && word
== magic
.cmp
)
1268 return vma
->vm_end
- vma
->vm_start
;
1271 /* An ELF note in memory */
1276 unsigned int datasz
;
1280 static int notesize(struct memelfnote
*en
)
1284 sz
= sizeof(struct elf_note
);
1285 sz
+= roundup(strlen(en
->name
) + 1, 4);
1286 sz
+= roundup(en
->datasz
, 4);
1291 #define DUMP_WRITE(addr, nr, foffset) \
1292 do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
1294 static int alignfile(struct file
*file
, loff_t
*foffset
)
1296 static const char buf
[4] = { 0, };
1297 DUMP_WRITE(buf
, roundup(*foffset
, 4) - *foffset
, foffset
);
1301 static int writenote(struct memelfnote
*men
, struct file
*file
,
1305 en
.n_namesz
= strlen(men
->name
) + 1;
1306 en
.n_descsz
= men
->datasz
;
1307 en
.n_type
= men
->type
;
1309 DUMP_WRITE(&en
, sizeof(en
), foffset
);
1310 DUMP_WRITE(men
->name
, en
.n_namesz
, foffset
);
1311 if (!alignfile(file
, foffset
))
1313 DUMP_WRITE(men
->data
, men
->datasz
, foffset
);
1314 if (!alignfile(file
, foffset
))
1321 #define DUMP_WRITE(addr, nr) \
1322 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1324 #define DUMP_SEEK(off) \
1325 if (!dump_seek(file, (off))) \
1328 static void fill_elf_header(struct elfhdr
*elf
, int segs
)
1330 memcpy(elf
->e_ident
, ELFMAG
, SELFMAG
);
1331 elf
->e_ident
[EI_CLASS
] = ELF_CLASS
;
1332 elf
->e_ident
[EI_DATA
] = ELF_DATA
;
1333 elf
->e_ident
[EI_VERSION
] = EV_CURRENT
;
1334 elf
->e_ident
[EI_OSABI
] = ELF_OSABI
;
1335 memset(elf
->e_ident
+EI_PAD
, 0, EI_NIDENT
-EI_PAD
);
1337 elf
->e_type
= ET_CORE
;
1338 elf
->e_machine
= ELF_ARCH
;
1339 elf
->e_version
= EV_CURRENT
;
1341 elf
->e_phoff
= sizeof(struct elfhdr
);
1343 elf
->e_flags
= ELF_CORE_EFLAGS
;
1344 elf
->e_ehsize
= sizeof(struct elfhdr
);
1345 elf
->e_phentsize
= sizeof(struct elf_phdr
);
1346 elf
->e_phnum
= segs
;
1347 elf
->e_shentsize
= 0;
1349 elf
->e_shstrndx
= 0;
1353 static void fill_elf_note_phdr(struct elf_phdr
*phdr
, int sz
, loff_t offset
)
1355 phdr
->p_type
= PT_NOTE
;
1356 phdr
->p_offset
= offset
;
1359 phdr
->p_filesz
= sz
;
1366 static void fill_note(struct memelfnote
*note
, const char *name
, int type
,
1367 unsigned int sz
, void *data
)
1377 * fill up all the fields in prstatus from the given task struct, except
1378 * registers which need to be filled up separately.
1380 static void fill_prstatus(struct elf_prstatus
*prstatus
,
1381 struct task_struct
*p
, long signr
)
1383 prstatus
->pr_info
.si_signo
= prstatus
->pr_cursig
= signr
;
1384 prstatus
->pr_sigpend
= p
->pending
.signal
.sig
[0];
1385 prstatus
->pr_sighold
= p
->blocked
.sig
[0];
1386 prstatus
->pr_pid
= task_pid_vnr(p
);
1387 prstatus
->pr_ppid
= task_pid_vnr(p
->parent
);
1388 prstatus
->pr_pgrp
= task_pgrp_vnr(p
);
1389 prstatus
->pr_sid
= task_session_vnr(p
);
1390 if (thread_group_leader(p
)) {
1392 * This is the record for the group leader. Add in the
1393 * cumulative times of previous dead threads. This total
1394 * won't include the time of each live thread whose state
1395 * is included in the core dump. The final total reported
1396 * to our parent process when it calls wait4 will include
1397 * those sums as well as the little bit more time it takes
1398 * this and each other thread to finish dying after the
1399 * core dump synchronization phase.
1401 cputime_to_timeval(cputime_add(p
->utime
, p
->signal
->utime
),
1402 &prstatus
->pr_utime
);
1403 cputime_to_timeval(cputime_add(p
->stime
, p
->signal
->stime
),
1404 &prstatus
->pr_stime
);
1406 cputime_to_timeval(p
->utime
, &prstatus
->pr_utime
);
1407 cputime_to_timeval(p
->stime
, &prstatus
->pr_stime
);
1409 cputime_to_timeval(p
->signal
->cutime
, &prstatus
->pr_cutime
);
1410 cputime_to_timeval(p
->signal
->cstime
, &prstatus
->pr_cstime
);
1413 static int fill_psinfo(struct elf_prpsinfo
*psinfo
, struct task_struct
*p
,
1414 struct mm_struct
*mm
)
1416 unsigned int i
, len
;
1418 /* first copy the parameters from user space */
1419 memset(psinfo
, 0, sizeof(struct elf_prpsinfo
));
1421 len
= mm
->arg_end
- mm
->arg_start
;
1422 if (len
>= ELF_PRARGSZ
)
1423 len
= ELF_PRARGSZ
-1;
1424 if (copy_from_user(&psinfo
->pr_psargs
,
1425 (const char __user
*)mm
->arg_start
, len
))
1427 for(i
= 0; i
< len
; i
++)
1428 if (psinfo
->pr_psargs
[i
] == 0)
1429 psinfo
->pr_psargs
[i
] = ' ';
1430 psinfo
->pr_psargs
[len
] = 0;
1432 psinfo
->pr_pid
= task_pid_vnr(p
);
1433 psinfo
->pr_ppid
= task_pid_vnr(p
->parent
);
1434 psinfo
->pr_pgrp
= task_pgrp_vnr(p
);
1435 psinfo
->pr_sid
= task_session_vnr(p
);
1437 i
= p
->state
? ffz(~p
->state
) + 1 : 0;
1438 psinfo
->pr_state
= i
;
1439 psinfo
->pr_sname
= (i
> 5) ? '.' : "RSDTZW"[i
];
1440 psinfo
->pr_zomb
= psinfo
->pr_sname
== 'Z';
1441 psinfo
->pr_nice
= task_nice(p
);
1442 psinfo
->pr_flag
= p
->flags
;
1443 SET_UID(psinfo
->pr_uid
, p
->uid
);
1444 SET_GID(psinfo
->pr_gid
, p
->gid
);
1445 strncpy(psinfo
->pr_fname
, p
->comm
, sizeof(psinfo
->pr_fname
));
1450 /* Here is the structure in which status of each thread is captured. */
1451 struct elf_thread_status
1453 struct list_head list
;
1454 struct elf_prstatus prstatus
; /* NT_PRSTATUS */
1455 elf_fpregset_t fpu
; /* NT_PRFPREG */
1456 struct task_struct
*thread
;
1457 #ifdef ELF_CORE_COPY_XFPREGS
1458 elf_fpxregset_t xfpu
; /* ELF_CORE_XFPREG_TYPE */
1460 struct memelfnote notes
[3];
1465 * In order to add the specific thread information for the elf file format,
1466 * we need to keep a linked list of every threads pr_status and then create
1467 * a single section for them in the final core file.
1469 static int elf_dump_thread_status(long signr
, struct elf_thread_status
*t
)
1472 struct task_struct
*p
= t
->thread
;
1475 fill_prstatus(&t
->prstatus
, p
, signr
);
1476 elf_core_copy_task_regs(p
, &t
->prstatus
.pr_reg
);
1478 fill_note(&t
->notes
[0], "CORE", NT_PRSTATUS
, sizeof(t
->prstatus
),
1481 sz
+= notesize(&t
->notes
[0]);
1483 if ((t
->prstatus
.pr_fpvalid
= elf_core_copy_task_fpregs(p
, NULL
,
1485 fill_note(&t
->notes
[1], "CORE", NT_PRFPREG
, sizeof(t
->fpu
),
1488 sz
+= notesize(&t
->notes
[1]);
1491 #ifdef ELF_CORE_COPY_XFPREGS
1492 if (elf_core_copy_task_xfpregs(p
, &t
->xfpu
)) {
1493 fill_note(&t
->notes
[2], "LINUX", ELF_CORE_XFPREG_TYPE
,
1494 sizeof(t
->xfpu
), &t
->xfpu
);
1496 sz
+= notesize(&t
->notes
[2]);
1502 static struct vm_area_struct
*first_vma(struct task_struct
*tsk
,
1503 struct vm_area_struct
*gate_vma
)
1505 struct vm_area_struct
*ret
= tsk
->mm
->mmap
;
1512 * Helper function for iterating across a vma list. It ensures that the caller
1513 * will visit `gate_vma' prior to terminating the search.
1515 static struct vm_area_struct
*next_vma(struct vm_area_struct
*this_vma
,
1516 struct vm_area_struct
*gate_vma
)
1518 struct vm_area_struct
*ret
;
1520 ret
= this_vma
->vm_next
;
1523 if (this_vma
== gate_vma
)
1531 * This is a two-pass process; first we find the offsets of the bits,
1532 * and then they are actually written out. If we run out of core limit
1535 static int elf_core_dump(long signr
, struct pt_regs
*regs
, struct file
*file
, unsigned long limit
)
1543 struct vm_area_struct
*vma
, *gate_vma
;
1544 struct elfhdr
*elf
= NULL
;
1545 loff_t offset
= 0, dataoff
, foffset
;
1547 struct memelfnote
*notes
= NULL
;
1548 struct elf_prstatus
*prstatus
= NULL
; /* NT_PRSTATUS */
1549 struct elf_prpsinfo
*psinfo
= NULL
; /* NT_PRPSINFO */
1550 struct task_struct
*g
, *p
;
1551 LIST_HEAD(thread_list
);
1552 struct list_head
*t
;
1553 elf_fpregset_t
*fpu
= NULL
;
1554 #ifdef ELF_CORE_COPY_XFPREGS
1555 elf_fpxregset_t
*xfpu
= NULL
;
1557 int thread_status_size
= 0;
1559 unsigned long mm_flags
;
1562 * We no longer stop all VM operations.
1564 * This is because those proceses that could possibly change map_count
1565 * or the mmap / vma pages are now blocked in do_exit on current
1566 * finishing this core dump.
1568 * Only ptrace can touch these memory addresses, but it doesn't change
1569 * the map_count or the pages allocated. So no possibility of crashing
1570 * exists while dumping the mm->vm_next areas to the core file.
1573 /* alloc memory for large data structures: too large to be on stack */
1574 elf
= kmalloc(sizeof(*elf
), GFP_KERNEL
);
1577 prstatus
= kmalloc(sizeof(*prstatus
), GFP_KERNEL
);
1580 psinfo
= kmalloc(sizeof(*psinfo
), GFP_KERNEL
);
1583 notes
= kmalloc(NUM_NOTES
* sizeof(struct memelfnote
), GFP_KERNEL
);
1586 fpu
= kmalloc(sizeof(*fpu
), GFP_KERNEL
);
1589 #ifdef ELF_CORE_COPY_XFPREGS
1590 xfpu
= kmalloc(sizeof(*xfpu
), GFP_KERNEL
);
1596 struct elf_thread_status
*tmp
;
1599 if (current
->mm
== p
->mm
&& current
!= p
) {
1600 tmp
= kzalloc(sizeof(*tmp
), GFP_ATOMIC
);
1606 list_add(&tmp
->list
, &thread_list
);
1608 while_each_thread(g
,p
);
1610 list_for_each(t
, &thread_list
) {
1611 struct elf_thread_status
*tmp
;
1614 tmp
= list_entry(t
, struct elf_thread_status
, list
);
1615 sz
= elf_dump_thread_status(signr
, tmp
);
1616 thread_status_size
+= sz
;
1619 /* now collect the dump for the current */
1620 memset(prstatus
, 0, sizeof(*prstatus
));
1621 fill_prstatus(prstatus
, current
, signr
);
1622 elf_core_copy_regs(&prstatus
->pr_reg
, regs
);
1624 segs
= current
->mm
->map_count
;
1625 #ifdef ELF_CORE_EXTRA_PHDRS
1626 segs
+= ELF_CORE_EXTRA_PHDRS
;
1629 gate_vma
= get_gate_vma(current
);
1630 if (gate_vma
!= NULL
)
1634 fill_elf_header(elf
, segs
+ 1); /* including notes section */
1637 current
->flags
|= PF_DUMPCORE
;
1640 * Set up the notes in similar form to SVR4 core dumps made
1641 * with info from their /proc.
1644 fill_note(notes
+ 0, "CORE", NT_PRSTATUS
, sizeof(*prstatus
), prstatus
);
1645 fill_psinfo(psinfo
, current
->group_leader
, current
->mm
);
1646 fill_note(notes
+ 1, "CORE", NT_PRPSINFO
, sizeof(*psinfo
), psinfo
);
1650 auxv
= (elf_addr_t
*)current
->mm
->saved_auxv
;
1655 while (auxv
[i
- 2] != AT_NULL
);
1656 fill_note(¬es
[numnote
++], "CORE", NT_AUXV
,
1657 i
* sizeof(elf_addr_t
), auxv
);
1659 /* Try to dump the FPU. */
1660 if ((prstatus
->pr_fpvalid
=
1661 elf_core_copy_task_fpregs(current
, regs
, fpu
)))
1662 fill_note(notes
+ numnote
++,
1663 "CORE", NT_PRFPREG
, sizeof(*fpu
), fpu
);
1664 #ifdef ELF_CORE_COPY_XFPREGS
1665 if (elf_core_copy_task_xfpregs(current
, xfpu
))
1666 fill_note(notes
+ numnote
++,
1667 "LINUX", ELF_CORE_XFPREG_TYPE
, sizeof(*xfpu
), xfpu
);
1673 DUMP_WRITE(elf
, sizeof(*elf
));
1674 offset
+= sizeof(*elf
); /* Elf header */
1675 offset
+= (segs
+ 1) * sizeof(struct elf_phdr
); /* Program headers */
1678 /* Write notes phdr entry */
1680 struct elf_phdr phdr
;
1683 for (i
= 0; i
< numnote
; i
++)
1684 sz
+= notesize(notes
+ i
);
1686 sz
+= thread_status_size
;
1688 sz
+= elf_coredump_extra_notes_size();
1690 fill_elf_note_phdr(&phdr
, sz
, offset
);
1692 DUMP_WRITE(&phdr
, sizeof(phdr
));
1695 dataoff
= offset
= roundup(offset
, ELF_EXEC_PAGESIZE
);
1698 * We must use the same mm->flags while dumping core to avoid
1699 * inconsistency between the program headers and bodies, otherwise an
1700 * unusable core file can be generated.
1702 mm_flags
= current
->mm
->flags
;
1704 /* Write program headers for segments dump */
1705 for (vma
= first_vma(current
, gate_vma
); vma
!= NULL
;
1706 vma
= next_vma(vma
, gate_vma
)) {
1707 struct elf_phdr phdr
;
1709 phdr
.p_type
= PT_LOAD
;
1710 phdr
.p_offset
= offset
;
1711 phdr
.p_vaddr
= vma
->vm_start
;
1713 phdr
.p_filesz
= vma_dump_size(vma
, mm_flags
);
1714 phdr
.p_memsz
= vma
->vm_end
- vma
->vm_start
;
1715 offset
+= phdr
.p_filesz
;
1716 phdr
.p_flags
= vma
->vm_flags
& VM_READ
? PF_R
: 0;
1717 if (vma
->vm_flags
& VM_WRITE
)
1718 phdr
.p_flags
|= PF_W
;
1719 if (vma
->vm_flags
& VM_EXEC
)
1720 phdr
.p_flags
|= PF_X
;
1721 phdr
.p_align
= ELF_EXEC_PAGESIZE
;
1723 DUMP_WRITE(&phdr
, sizeof(phdr
));
1726 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
1727 ELF_CORE_WRITE_EXTRA_PHDRS
;
1730 /* write out the notes section */
1731 for (i
= 0; i
< numnote
; i
++)
1732 if (!writenote(notes
+ i
, file
, &foffset
))
1735 if (elf_coredump_extra_notes_write(file
, &foffset
))
1738 /* write out the thread status notes section */
1739 list_for_each(t
, &thread_list
) {
1740 struct elf_thread_status
*tmp
=
1741 list_entry(t
, struct elf_thread_status
, list
);
1743 for (i
= 0; i
< tmp
->num_notes
; i
++)
1744 if (!writenote(&tmp
->notes
[i
], file
, &foffset
))
1749 DUMP_SEEK(dataoff
- foffset
);
1751 for (vma
= first_vma(current
, gate_vma
); vma
!= NULL
;
1752 vma
= next_vma(vma
, gate_vma
)) {
1756 end
= vma
->vm_start
+ vma_dump_size(vma
, mm_flags
);
1758 for (addr
= vma
->vm_start
; addr
< end
; addr
+= PAGE_SIZE
) {
1760 struct vm_area_struct
*vma
;
1762 if (get_user_pages(current
, current
->mm
, addr
, 1, 0, 1,
1763 &page
, &vma
) <= 0) {
1764 DUMP_SEEK(PAGE_SIZE
);
1766 if (page
== ZERO_PAGE(0)) {
1767 if (!dump_seek(file
, PAGE_SIZE
)) {
1768 page_cache_release(page
);
1773 flush_cache_page(vma
, addr
,
1776 if ((size
+= PAGE_SIZE
) > limit
||
1777 !dump_write(file
, kaddr
,
1780 page_cache_release(page
);
1785 page_cache_release(page
);
1790 #ifdef ELF_CORE_WRITE_EXTRA_DATA
1791 ELF_CORE_WRITE_EXTRA_DATA
;
1798 while (!list_empty(&thread_list
)) {
1799 struct list_head
*tmp
= thread_list
.next
;
1801 kfree(list_entry(tmp
, struct elf_thread_status
, list
));
1809 #ifdef ELF_CORE_COPY_XFPREGS
1816 #endif /* USE_ELF_CORE_DUMP */
1818 static int __init
init_elf_binfmt(void)
1820 return register_binfmt(&elf_format
);
1823 static void __exit
exit_elf_binfmt(void)
1825 /* Remove the COFF and ELF loaders. */
1826 unregister_binfmt(&elf_format
);
1829 core_initcall(init_elf_binfmt
);
1830 module_exit(exit_elf_binfmt
);
1831 MODULE_LICENSE("GPL");