2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
15 #include <linux/stat.h>
16 #include <linux/time.h>
18 #include <linux/mman.h>
19 #include <linux/errno.h>
20 #include <linux/signal.h>
21 #include <linux/binfmts.h>
22 #include <linux/string.h>
23 #include <linux/file.h>
24 #include <linux/fcntl.h>
25 #include <linux/ptrace.h>
26 #include <linux/slab.h>
27 #include <linux/shm.h>
28 #include <linux/personality.h>
29 #include <linux/elfcore.h>
30 #include <linux/init.h>
31 #include <linux/highuid.h>
32 #include <linux/smp.h>
33 #include <linux/compiler.h>
34 #include <linux/highmem.h>
35 #include <linux/pagemap.h>
36 #include <linux/security.h>
37 #include <linux/syscalls.h>
38 #include <linux/random.h>
39 #include <linux/elf.h>
40 #include <linux/utsname.h>
41 #include <asm/uaccess.h>
42 #include <asm/param.h>
45 static int load_elf_binary(struct linux_binprm
*bprm
, struct pt_regs
*regs
);
46 static int load_elf_library(struct file
*);
47 static unsigned long elf_map(struct file
*, unsigned long, struct elf_phdr
*,
48 int, int, unsigned long);
51 * If we don't support core dumping, then supply a NULL so we
54 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
55 static int elf_core_dump(long signr
, struct pt_regs
*regs
, struct file
*file
, unsigned long limit
);
57 #define elf_core_dump NULL
60 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
61 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
63 #define ELF_MIN_ALIGN PAGE_SIZE
66 #ifndef ELF_CORE_EFLAGS
67 #define ELF_CORE_EFLAGS 0
70 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
71 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
72 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
74 static struct linux_binfmt elf_format
= {
75 .module
= THIS_MODULE
,
76 .load_binary
= load_elf_binary
,
77 .load_shlib
= load_elf_library
,
78 .core_dump
= elf_core_dump
,
79 .min_coredump
= ELF_EXEC_PAGESIZE
,
83 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
85 static int set_brk(unsigned long start
, unsigned long end
)
87 start
= ELF_PAGEALIGN(start
);
88 end
= ELF_PAGEALIGN(end
);
91 down_write(¤t
->mm
->mmap_sem
);
92 addr
= do_brk(start
, end
- start
);
93 up_write(¤t
->mm
->mmap_sem
);
97 current
->mm
->start_brk
= current
->mm
->brk
= end
;
101 /* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
106 static int padzero(unsigned long elf_bss
)
110 nbyte
= ELF_PAGEOFFSET(elf_bss
);
112 nbyte
= ELF_MIN_ALIGN
- nbyte
;
113 if (clear_user((void __user
*) elf_bss
, nbyte
))
119 /* Let's use some macros to make this stack manipulation a little clearer */
120 #ifdef CONFIG_STACK_GROWSUP
121 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
122 #define STACK_ROUND(sp, items) \
123 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
124 #define STACK_ALLOC(sp, len) ({ \
125 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
128 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
129 #define STACK_ROUND(sp, items) \
130 (((unsigned long) (sp - items)) &~ 15UL)
131 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
134 #ifndef ELF_BASE_PLATFORM
136 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
137 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
138 * will be copied to the user stack in the same manner as AT_PLATFORM.
140 #define ELF_BASE_PLATFORM NULL
144 create_elf_tables(struct linux_binprm
*bprm
, struct elfhdr
*exec
,
145 unsigned long load_addr
, unsigned long interp_load_addr
)
147 unsigned long p
= bprm
->p
;
148 int argc
= bprm
->argc
;
149 int envc
= bprm
->envc
;
150 elf_addr_t __user
*argv
;
151 elf_addr_t __user
*envp
;
152 elf_addr_t __user
*sp
;
153 elf_addr_t __user
*u_platform
;
154 elf_addr_t __user
*u_base_platform
;
155 elf_addr_t __user
*u_rand_bytes
;
156 const char *k_platform
= ELF_PLATFORM
;
157 const char *k_base_platform
= ELF_BASE_PLATFORM
;
158 unsigned char k_rand_bytes
[16];
160 elf_addr_t
*elf_info
;
162 const struct cred
*cred
= current_cred();
163 struct vm_area_struct
*vma
;
166 * In some cases (e.g. Hyper-Threading), we want to avoid L1
167 * evictions by the processes running on the same package. One
168 * thing we can do is to shuffle the initial stack for them.
171 p
= arch_align_stack(p
);
174 * If this architecture has a platform capability string, copy it
175 * to userspace. In some cases (Sparc), this info is impossible
176 * for userspace to get any other way, in others (i386) it is
181 size_t len
= strlen(k_platform
) + 1;
183 u_platform
= (elf_addr_t __user
*)STACK_ALLOC(p
, len
);
184 if (__copy_to_user(u_platform
, k_platform
, len
))
189 * If this architecture has a "base" platform capability
190 * string, copy it to userspace.
192 u_base_platform
= NULL
;
193 if (k_base_platform
) {
194 size_t len
= strlen(k_base_platform
) + 1;
196 u_base_platform
= (elf_addr_t __user
*)STACK_ALLOC(p
, len
);
197 if (__copy_to_user(u_base_platform
, k_base_platform
, len
))
202 * Generate 16 random bytes for userspace PRNG seeding.
204 get_random_bytes(k_rand_bytes
, sizeof(k_rand_bytes
));
205 u_rand_bytes
= (elf_addr_t __user
*)
206 STACK_ALLOC(p
, sizeof(k_rand_bytes
));
207 if (__copy_to_user(u_rand_bytes
, k_rand_bytes
, sizeof(k_rand_bytes
)))
210 /* Create the ELF interpreter info */
211 elf_info
= (elf_addr_t
*)current
->mm
->saved_auxv
;
212 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
213 #define NEW_AUX_ENT(id, val) \
215 elf_info[ei_index++] = id; \
216 elf_info[ei_index++] = val; \
221 * ARCH_DLINFO must come first so PPC can do its special alignment of
223 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
224 * ARCH_DLINFO changes
228 NEW_AUX_ENT(AT_HWCAP
, ELF_HWCAP
);
229 NEW_AUX_ENT(AT_PAGESZ
, ELF_EXEC_PAGESIZE
);
230 NEW_AUX_ENT(AT_CLKTCK
, CLOCKS_PER_SEC
);
231 NEW_AUX_ENT(AT_PHDR
, load_addr
+ exec
->e_phoff
);
232 NEW_AUX_ENT(AT_PHENT
, sizeof(struct elf_phdr
));
233 NEW_AUX_ENT(AT_PHNUM
, exec
->e_phnum
);
234 NEW_AUX_ENT(AT_BASE
, interp_load_addr
);
235 NEW_AUX_ENT(AT_FLAGS
, 0);
236 NEW_AUX_ENT(AT_ENTRY
, exec
->e_entry
);
237 NEW_AUX_ENT(AT_UID
, cred
->uid
);
238 NEW_AUX_ENT(AT_EUID
, cred
->euid
);
239 NEW_AUX_ENT(AT_GID
, cred
->gid
);
240 NEW_AUX_ENT(AT_EGID
, cred
->egid
);
241 NEW_AUX_ENT(AT_SECURE
, security_bprm_secureexec(bprm
));
242 NEW_AUX_ENT(AT_RANDOM
, (elf_addr_t
)(unsigned long)u_rand_bytes
);
243 NEW_AUX_ENT(AT_EXECFN
, bprm
->exec
);
245 NEW_AUX_ENT(AT_PLATFORM
,
246 (elf_addr_t
)(unsigned long)u_platform
);
248 if (k_base_platform
) {
249 NEW_AUX_ENT(AT_BASE_PLATFORM
,
250 (elf_addr_t
)(unsigned long)u_base_platform
);
252 if (bprm
->interp_flags
& BINPRM_FLAGS_EXECFD
) {
253 NEW_AUX_ENT(AT_EXECFD
, bprm
->interp_data
);
256 /* AT_NULL is zero; clear the rest too */
257 memset(&elf_info
[ei_index
], 0,
258 sizeof current
->mm
->saved_auxv
- ei_index
* sizeof elf_info
[0]);
260 /* And advance past the AT_NULL entry. */
263 sp
= STACK_ADD(p
, ei_index
);
265 items
= (argc
+ 1) + (envc
+ 1) + 1;
266 bprm
->p
= STACK_ROUND(sp
, items
);
268 /* Point sp at the lowest address on the stack */
269 #ifdef CONFIG_STACK_GROWSUP
270 sp
= (elf_addr_t __user
*)bprm
->p
- items
- ei_index
;
271 bprm
->exec
= (unsigned long)sp
; /* XXX: PARISC HACK */
273 sp
= (elf_addr_t __user
*)bprm
->p
;
278 * Grow the stack manually; some architectures have a limit on how
279 * far ahead a user-space access may be in order to grow the stack.
281 vma
= find_extend_vma(current
->mm
, bprm
->p
);
285 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
286 if (__put_user(argc
, sp
++))
289 envp
= argv
+ argc
+ 1;
291 /* Populate argv and envp */
292 p
= current
->mm
->arg_end
= current
->mm
->arg_start
;
295 if (__put_user((elf_addr_t
)p
, argv
++))
297 len
= strnlen_user((void __user
*)p
, MAX_ARG_STRLEN
);
298 if (!len
|| len
> MAX_ARG_STRLEN
)
302 if (__put_user(0, argv
))
304 current
->mm
->arg_end
= current
->mm
->env_start
= p
;
307 if (__put_user((elf_addr_t
)p
, envp
++))
309 len
= strnlen_user((void __user
*)p
, MAX_ARG_STRLEN
);
310 if (!len
|| len
> MAX_ARG_STRLEN
)
314 if (__put_user(0, envp
))
316 current
->mm
->env_end
= p
;
318 /* Put the elf_info on the stack in the right place. */
319 sp
= (elf_addr_t __user
*)envp
+ 1;
320 if (copy_to_user(sp
, elf_info
, ei_index
* sizeof(elf_addr_t
)))
327 static unsigned long elf_map(struct file
*filep
, unsigned long addr
,
328 struct elf_phdr
*eppnt
, int prot
, int type
,
329 unsigned long total_size
)
331 unsigned long map_addr
;
332 unsigned long size
= eppnt
->p_filesz
+ ELF_PAGEOFFSET(eppnt
->p_vaddr
);
333 unsigned long off
= eppnt
->p_offset
- ELF_PAGEOFFSET(eppnt
->p_vaddr
);
334 addr
= ELF_PAGESTART(addr
);
335 size
= ELF_PAGEALIGN(size
);
337 /* mmap() will return -EINVAL if given a zero size, but a
338 * segment with zero filesize is perfectly valid */
342 down_write(¤t
->mm
->mmap_sem
);
344 * total_size is the size of the ELF (interpreter) image.
345 * The _first_ mmap needs to know the full size, otherwise
346 * randomization might put this image into an overlapping
347 * position with the ELF binary image. (since size < total_size)
348 * So we first map the 'big' image - and unmap the remainder at
349 * the end. (which unmap is needed for ELF images with holes.)
352 total_size
= ELF_PAGEALIGN(total_size
);
353 map_addr
= do_mmap(filep
, addr
, total_size
, prot
, type
, off
);
354 if (!BAD_ADDR(map_addr
))
355 do_munmap(current
->mm
, map_addr
+size
, total_size
-size
);
357 map_addr
= do_mmap(filep
, addr
, size
, prot
, type
, off
);
359 up_write(¤t
->mm
->mmap_sem
);
363 #endif /* !elf_map */
365 static unsigned long total_mapping_size(struct elf_phdr
*cmds
, int nr
)
367 int i
, first_idx
= -1, last_idx
= -1;
369 for (i
= 0; i
< nr
; i
++) {
370 if (cmds
[i
].p_type
== PT_LOAD
) {
379 return cmds
[last_idx
].p_vaddr
+ cmds
[last_idx
].p_memsz
-
380 ELF_PAGESTART(cmds
[first_idx
].p_vaddr
);
384 /* This is much more generalized than the library routine read function,
385 so we keep this separate. Technically the library read function
386 is only provided so that we can read a.out libraries that have
389 static unsigned long load_elf_interp(struct elfhdr
*interp_elf_ex
,
390 struct file
*interpreter
, unsigned long *interp_map_addr
,
391 unsigned long no_base
)
393 struct elf_phdr
*elf_phdata
;
394 struct elf_phdr
*eppnt
;
395 unsigned long load_addr
= 0;
396 int load_addr_set
= 0;
397 unsigned long last_bss
= 0, elf_bss
= 0;
398 unsigned long error
= ~0UL;
399 unsigned long total_size
;
402 /* First of all, some simple consistency checks */
403 if (interp_elf_ex
->e_type
!= ET_EXEC
&&
404 interp_elf_ex
->e_type
!= ET_DYN
)
406 if (!elf_check_arch(interp_elf_ex
))
408 if (!interpreter
->f_op
|| !interpreter
->f_op
->mmap
)
412 * If the size of this structure has changed, then punt, since
413 * we will be doing the wrong thing.
415 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
))
417 if (interp_elf_ex
->e_phnum
< 1 ||
418 interp_elf_ex
->e_phnum
> 65536U / sizeof(struct elf_phdr
))
421 /* Now read in all of the header information */
422 size
= sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
;
423 if (size
> ELF_MIN_ALIGN
)
425 elf_phdata
= kmalloc(size
, GFP_KERNEL
);
429 retval
= kernel_read(interpreter
, interp_elf_ex
->e_phoff
,
430 (char *)elf_phdata
,size
);
432 if (retval
!= size
) {
438 total_size
= total_mapping_size(elf_phdata
, interp_elf_ex
->e_phnum
);
445 for (i
= 0; i
< interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
446 if (eppnt
->p_type
== PT_LOAD
) {
447 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
449 unsigned long vaddr
= 0;
450 unsigned long k
, map_addr
;
452 if (eppnt
->p_flags
& PF_R
)
453 elf_prot
= PROT_READ
;
454 if (eppnt
->p_flags
& PF_W
)
455 elf_prot
|= PROT_WRITE
;
456 if (eppnt
->p_flags
& PF_X
)
457 elf_prot
|= PROT_EXEC
;
458 vaddr
= eppnt
->p_vaddr
;
459 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
)
460 elf_type
|= MAP_FIXED
;
461 else if (no_base
&& interp_elf_ex
->e_type
== ET_DYN
)
464 map_addr
= elf_map(interpreter
, load_addr
+ vaddr
,
465 eppnt
, elf_prot
, elf_type
, total_size
);
467 if (!*interp_map_addr
)
468 *interp_map_addr
= map_addr
;
470 if (BAD_ADDR(map_addr
))
473 if (!load_addr_set
&&
474 interp_elf_ex
->e_type
== ET_DYN
) {
475 load_addr
= map_addr
- ELF_PAGESTART(vaddr
);
480 * Check to see if the section's size will overflow the
481 * allowed task size. Note that p_filesz must always be
482 * <= p_memsize so it's only necessary to check p_memsz.
484 k
= load_addr
+ eppnt
->p_vaddr
;
486 eppnt
->p_filesz
> eppnt
->p_memsz
||
487 eppnt
->p_memsz
> TASK_SIZE
||
488 TASK_SIZE
- eppnt
->p_memsz
< k
) {
494 * Find the end of the file mapping for this phdr, and
495 * keep track of the largest address we see for this.
497 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
502 * Do the same thing for the memory mapping - between
503 * elf_bss and last_bss is the bss section.
505 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
512 * Now fill out the bss section. First pad the last page up
513 * to the page boundary, and then perform a mmap to make sure
514 * that there are zero-mapped pages up to and including the
517 if (padzero(elf_bss
)) {
522 /* What we have mapped so far */
523 elf_bss
= ELF_PAGESTART(elf_bss
+ ELF_MIN_ALIGN
- 1);
525 /* Map the last of the bss segment */
526 if (last_bss
> elf_bss
) {
527 down_write(¤t
->mm
->mmap_sem
);
528 error
= do_brk(elf_bss
, last_bss
- elf_bss
);
529 up_write(¤t
->mm
->mmap_sem
);
543 * These are the functions used to load ELF style executables and shared
544 * libraries. There is no binary dependent code anywhere else.
547 #define INTERPRETER_NONE 0
548 #define INTERPRETER_ELF 2
550 #ifndef STACK_RND_MASK
551 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
554 static unsigned long randomize_stack_top(unsigned long stack_top
)
556 unsigned int random_variable
= 0;
558 if ((current
->flags
& PF_RANDOMIZE
) &&
559 !(current
->personality
& ADDR_NO_RANDOMIZE
)) {
560 random_variable
= get_random_int() & STACK_RND_MASK
;
561 random_variable
<<= PAGE_SHIFT
;
563 #ifdef CONFIG_STACK_GROWSUP
564 return PAGE_ALIGN(stack_top
) + random_variable
;
566 return PAGE_ALIGN(stack_top
) - random_variable
;
570 static int load_elf_binary(struct linux_binprm
*bprm
, struct pt_regs
*regs
)
572 struct file
*interpreter
= NULL
; /* to shut gcc up */
573 unsigned long load_addr
= 0, load_bias
= 0;
574 int load_addr_set
= 0;
575 char * elf_interpreter
= NULL
;
577 struct elf_phdr
*elf_ppnt
, *elf_phdata
;
578 unsigned long elf_bss
, elf_brk
;
582 unsigned long elf_entry
;
583 unsigned long interp_load_addr
= 0;
584 unsigned long start_code
, end_code
, start_data
, end_data
;
585 unsigned long reloc_func_desc
= 0;
586 int executable_stack
= EXSTACK_DEFAULT
;
587 unsigned long def_flags
= 0;
589 struct elfhdr elf_ex
;
590 struct elfhdr interp_elf_ex
;
593 loc
= kmalloc(sizeof(*loc
), GFP_KERNEL
);
599 /* Get the exec-header */
600 loc
->elf_ex
= *((struct elfhdr
*)bprm
->buf
);
603 /* First of all, some simple consistency checks */
604 if (memcmp(loc
->elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
607 if (loc
->elf_ex
.e_type
!= ET_EXEC
&& loc
->elf_ex
.e_type
!= ET_DYN
)
609 if (!elf_check_arch(&loc
->elf_ex
))
611 if (!bprm
->file
->f_op
||!bprm
->file
->f_op
->mmap
)
614 /* Now read in all of the header information */
615 if (loc
->elf_ex
.e_phentsize
!= sizeof(struct elf_phdr
))
617 if (loc
->elf_ex
.e_phnum
< 1 ||
618 loc
->elf_ex
.e_phnum
> 65536U / sizeof(struct elf_phdr
))
620 size
= loc
->elf_ex
.e_phnum
* sizeof(struct elf_phdr
);
622 elf_phdata
= kmalloc(size
, GFP_KERNEL
);
626 retval
= kernel_read(bprm
->file
, loc
->elf_ex
.e_phoff
,
627 (char *)elf_phdata
, size
);
628 if (retval
!= size
) {
634 retval
= get_unused_fd();
637 get_file(bprm
->file
);
638 fd_install(elf_exec_fileno
= retval
, bprm
->file
);
640 elf_ppnt
= elf_phdata
;
649 for (i
= 0; i
< loc
->elf_ex
.e_phnum
; i
++) {
650 if (elf_ppnt
->p_type
== PT_INTERP
) {
651 /* This is the program interpreter used for
652 * shared libraries - for now assume that this
653 * is an a.out format binary
656 if (elf_ppnt
->p_filesz
> PATH_MAX
||
657 elf_ppnt
->p_filesz
< 2)
661 elf_interpreter
= kmalloc(elf_ppnt
->p_filesz
,
663 if (!elf_interpreter
)
666 retval
= kernel_read(bprm
->file
, elf_ppnt
->p_offset
,
669 if (retval
!= elf_ppnt
->p_filesz
) {
672 goto out_free_interp
;
674 /* make sure path is NULL terminated */
676 if (elf_interpreter
[elf_ppnt
->p_filesz
- 1] != '\0')
677 goto out_free_interp
;
680 * The early SET_PERSONALITY here is so that the lookup
681 * for the interpreter happens in the namespace of the
682 * to-be-execed image. SET_PERSONALITY can select an
685 * However, SET_PERSONALITY is NOT allowed to switch
686 * this task into the new images's memory mapping
687 * policy - that is, TASK_SIZE must still evaluate to
688 * that which is appropriate to the execing application.
689 * This is because exit_mmap() needs to have TASK_SIZE
690 * evaluate to the size of the old image.
692 * So if (say) a 64-bit application is execing a 32-bit
693 * application it is the architecture's responsibility
694 * to defer changing the value of TASK_SIZE until the
695 * switch really is going to happen - do this in
696 * flush_thread(). - akpm
698 SET_PERSONALITY(loc
->elf_ex
);
700 interpreter
= open_exec(elf_interpreter
);
701 retval
= PTR_ERR(interpreter
);
702 if (IS_ERR(interpreter
))
703 goto out_free_interp
;
706 * If the binary is not readable then enforce
707 * mm->dumpable = 0 regardless of the interpreter's
710 if (file_permission(interpreter
, MAY_READ
) < 0)
711 bprm
->interp_flags
|= BINPRM_FLAGS_ENFORCE_NONDUMP
;
713 retval
= kernel_read(interpreter
, 0, bprm
->buf
,
715 if (retval
!= BINPRM_BUF_SIZE
) {
718 goto out_free_dentry
;
721 /* Get the exec headers */
722 loc
->interp_elf_ex
= *((struct elfhdr
*)bprm
->buf
);
728 elf_ppnt
= elf_phdata
;
729 for (i
= 0; i
< loc
->elf_ex
.e_phnum
; i
++, elf_ppnt
++)
730 if (elf_ppnt
->p_type
== PT_GNU_STACK
) {
731 if (elf_ppnt
->p_flags
& PF_X
)
732 executable_stack
= EXSTACK_ENABLE_X
;
734 executable_stack
= EXSTACK_DISABLE_X
;
738 /* Some simple consistency checks for the interpreter */
739 if (elf_interpreter
) {
741 /* Not an ELF interpreter */
742 if (memcmp(loc
->interp_elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
743 goto out_free_dentry
;
744 /* Verify the interpreter has a valid arch */
745 if (!elf_check_arch(&loc
->interp_elf_ex
))
746 goto out_free_dentry
;
748 /* Executables without an interpreter also need a personality */
749 SET_PERSONALITY(loc
->elf_ex
);
752 /* Flush all traces of the currently running executable */
753 retval
= flush_old_exec(bprm
);
755 goto out_free_dentry
;
757 /* OK, This is the point of no return */
758 current
->flags
&= ~PF_FORKNOEXEC
;
759 current
->mm
->def_flags
= def_flags
;
761 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
762 may depend on the personality. */
763 SET_PERSONALITY(loc
->elf_ex
);
764 if (elf_read_implies_exec(loc
->elf_ex
, executable_stack
))
765 current
->personality
|= READ_IMPLIES_EXEC
;
767 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
768 current
->flags
|= PF_RANDOMIZE
;
769 arch_pick_mmap_layout(current
->mm
);
771 /* Do this so that we can load the interpreter, if need be. We will
772 change some of these later */
773 current
->mm
->free_area_cache
= current
->mm
->mmap_base
;
774 current
->mm
->cached_hole_size
= 0;
775 retval
= setup_arg_pages(bprm
, randomize_stack_top(STACK_TOP
),
778 send_sig(SIGKILL
, current
, 0);
779 goto out_free_dentry
;
782 current
->mm
->start_stack
= bprm
->p
;
784 /* Now we do a little grungy work by mmaping the ELF image into
785 the correct location in memory. */
786 for(i
= 0, elf_ppnt
= elf_phdata
;
787 i
< loc
->elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
788 int elf_prot
= 0, elf_flags
;
789 unsigned long k
, vaddr
;
791 if (elf_ppnt
->p_type
!= PT_LOAD
)
794 if (unlikely (elf_brk
> elf_bss
)) {
797 /* There was a PT_LOAD segment with p_memsz > p_filesz
798 before this one. Map anonymous pages, if needed,
799 and clear the area. */
800 retval
= set_brk (elf_bss
+ load_bias
,
801 elf_brk
+ load_bias
);
803 send_sig(SIGKILL
, current
, 0);
804 goto out_free_dentry
;
806 nbyte
= ELF_PAGEOFFSET(elf_bss
);
808 nbyte
= ELF_MIN_ALIGN
- nbyte
;
809 if (nbyte
> elf_brk
- elf_bss
)
810 nbyte
= elf_brk
- elf_bss
;
811 if (clear_user((void __user
*)elf_bss
+
814 * This bss-zeroing can fail if the ELF
815 * file specifies odd protections. So
816 * we don't check the return value
822 if (elf_ppnt
->p_flags
& PF_R
)
823 elf_prot
|= PROT_READ
;
824 if (elf_ppnt
->p_flags
& PF_W
)
825 elf_prot
|= PROT_WRITE
;
826 if (elf_ppnt
->p_flags
& PF_X
)
827 elf_prot
|= PROT_EXEC
;
829 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
| MAP_EXECUTABLE
;
831 vaddr
= elf_ppnt
->p_vaddr
;
832 if (loc
->elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
833 elf_flags
|= MAP_FIXED
;
834 } else if (loc
->elf_ex
.e_type
== ET_DYN
) {
835 /* Try and get dynamic programs out of the way of the
836 * default mmap base, as well as whatever program they
837 * might try to exec. This is because the brk will
838 * follow the loader, and is not movable. */
842 load_bias
= ELF_PAGESTART(ELF_ET_DYN_BASE
- vaddr
);
846 error
= elf_map(bprm
->file
, load_bias
+ vaddr
, elf_ppnt
,
847 elf_prot
, elf_flags
, 0);
848 if (BAD_ADDR(error
)) {
849 send_sig(SIGKILL
, current
, 0);
850 retval
= IS_ERR((void *)error
) ?
851 PTR_ERR((void*)error
) : -EINVAL
;
852 goto out_free_dentry
;
855 if (!load_addr_set
) {
857 load_addr
= (elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
);
858 if (loc
->elf_ex
.e_type
== ET_DYN
) {
860 ELF_PAGESTART(load_bias
+ vaddr
);
861 load_addr
+= load_bias
;
862 reloc_func_desc
= load_bias
;
865 k
= elf_ppnt
->p_vaddr
;
872 * Check to see if the section's size will overflow the
873 * allowed task size. Note that p_filesz must always be
874 * <= p_memsz so it is only necessary to check p_memsz.
876 if (BAD_ADDR(k
) || elf_ppnt
->p_filesz
> elf_ppnt
->p_memsz
||
877 elf_ppnt
->p_memsz
> TASK_SIZE
||
878 TASK_SIZE
- elf_ppnt
->p_memsz
< k
) {
879 /* set_brk can never work. Avoid overflows. */
880 send_sig(SIGKILL
, current
, 0);
882 goto out_free_dentry
;
885 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
889 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
893 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
898 loc
->elf_ex
.e_entry
+= load_bias
;
899 elf_bss
+= load_bias
;
900 elf_brk
+= load_bias
;
901 start_code
+= load_bias
;
902 end_code
+= load_bias
;
903 start_data
+= load_bias
;
904 end_data
+= load_bias
;
906 /* Calling set_brk effectively mmaps the pages that we need
907 * for the bss and break sections. We must do this before
908 * mapping in the interpreter, to make sure it doesn't wind
909 * up getting placed where the bss needs to go.
911 retval
= set_brk(elf_bss
, elf_brk
);
913 send_sig(SIGKILL
, current
, 0);
914 goto out_free_dentry
;
916 if (likely(elf_bss
!= elf_brk
) && unlikely(padzero(elf_bss
))) {
917 send_sig(SIGSEGV
, current
, 0);
918 retval
= -EFAULT
; /* Nobody gets to see this, but.. */
919 goto out_free_dentry
;
922 if (elf_interpreter
) {
923 unsigned long uninitialized_var(interp_map_addr
);
925 elf_entry
= load_elf_interp(&loc
->interp_elf_ex
,
929 if (!IS_ERR((void *)elf_entry
)) {
931 * load_elf_interp() returns relocation
934 interp_load_addr
= elf_entry
;
935 elf_entry
+= loc
->interp_elf_ex
.e_entry
;
937 if (BAD_ADDR(elf_entry
)) {
938 force_sig(SIGSEGV
, current
);
939 retval
= IS_ERR((void *)elf_entry
) ?
940 (int)elf_entry
: -EINVAL
;
941 goto out_free_dentry
;
943 reloc_func_desc
= interp_load_addr
;
945 allow_write_access(interpreter
);
947 kfree(elf_interpreter
);
949 elf_entry
= loc
->elf_ex
.e_entry
;
950 if (BAD_ADDR(elf_entry
)) {
951 force_sig(SIGSEGV
, current
);
953 goto out_free_dentry
;
959 sys_close(elf_exec_fileno
);
961 set_binfmt(&elf_format
);
963 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
964 retval
= arch_setup_additional_pages(bprm
, !!elf_interpreter
);
966 send_sig(SIGKILL
, current
, 0);
969 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
971 install_exec_creds(bprm
);
972 current
->flags
&= ~PF_FORKNOEXEC
;
973 retval
= create_elf_tables(bprm
, &loc
->elf_ex
,
974 load_addr
, interp_load_addr
);
976 send_sig(SIGKILL
, current
, 0);
979 /* N.B. passed_fileno might not be initialized? */
980 current
->mm
->end_code
= end_code
;
981 current
->mm
->start_code
= start_code
;
982 current
->mm
->start_data
= start_data
;
983 current
->mm
->end_data
= end_data
;
984 current
->mm
->start_stack
= bprm
->p
;
986 #ifdef arch_randomize_brk
987 if ((current
->flags
& PF_RANDOMIZE
) && (randomize_va_space
> 1))
988 current
->mm
->brk
= current
->mm
->start_brk
=
989 arch_randomize_brk(current
->mm
);
992 if (current
->personality
& MMAP_PAGE_ZERO
) {
993 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
994 and some applications "depend" upon this behavior.
995 Since we do not have the power to recompile these, we
996 emulate the SVr4 behavior. Sigh. */
997 down_write(¤t
->mm
->mmap_sem
);
998 error
= do_mmap(NULL
, 0, PAGE_SIZE
, PROT_READ
| PROT_EXEC
,
999 MAP_FIXED
| MAP_PRIVATE
, 0);
1000 up_write(¤t
->mm
->mmap_sem
);
1003 #ifdef ELF_PLAT_INIT
1005 * The ABI may specify that certain registers be set up in special
1006 * ways (on i386 %edx is the address of a DT_FINI function, for
1007 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1008 * that the e_entry field is the address of the function descriptor
1009 * for the startup routine, rather than the address of the startup
1010 * routine itself. This macro performs whatever initialization to
1011 * the regs structure is required as well as any relocations to the
1012 * function descriptor entries when executing dynamically links apps.
1014 ELF_PLAT_INIT(regs
, reloc_func_desc
);
1017 start_thread(regs
, elf_entry
, bprm
->p
);
1026 allow_write_access(interpreter
);
1030 kfree(elf_interpreter
);
1032 sys_close(elf_exec_fileno
);
1038 /* This is really simpleminded and specialized - we are loading an
1039 a.out library that is given an ELF header. */
1040 static int load_elf_library(struct file
*file
)
1042 struct elf_phdr
*elf_phdata
;
1043 struct elf_phdr
*eppnt
;
1044 unsigned long elf_bss
, bss
, len
;
1045 int retval
, error
, i
, j
;
1046 struct elfhdr elf_ex
;
1049 retval
= kernel_read(file
, 0, (char *)&elf_ex
, sizeof(elf_ex
));
1050 if (retval
!= sizeof(elf_ex
))
1053 if (memcmp(elf_ex
.e_ident
, ELFMAG
, SELFMAG
) != 0)
1056 /* First of all, some simple consistency checks */
1057 if (elf_ex
.e_type
!= ET_EXEC
|| elf_ex
.e_phnum
> 2 ||
1058 !elf_check_arch(&elf_ex
) || !file
->f_op
|| !file
->f_op
->mmap
)
1061 /* Now read in all of the header information */
1063 j
= sizeof(struct elf_phdr
) * elf_ex
.e_phnum
;
1064 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1067 elf_phdata
= kmalloc(j
, GFP_KERNEL
);
1073 retval
= kernel_read(file
, elf_ex
.e_phoff
, (char *)eppnt
, j
);
1077 for (j
= 0, i
= 0; i
<elf_ex
.e_phnum
; i
++)
1078 if ((eppnt
+ i
)->p_type
== PT_LOAD
)
1083 while (eppnt
->p_type
!= PT_LOAD
)
1086 /* Now use mmap to map the library into memory. */
1087 down_write(¤t
->mm
->mmap_sem
);
1088 error
= do_mmap(file
,
1089 ELF_PAGESTART(eppnt
->p_vaddr
),
1091 ELF_PAGEOFFSET(eppnt
->p_vaddr
)),
1092 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
1093 MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
,
1095 ELF_PAGEOFFSET(eppnt
->p_vaddr
)));
1096 up_write(¤t
->mm
->mmap_sem
);
1097 if (error
!= ELF_PAGESTART(eppnt
->p_vaddr
))
1100 elf_bss
= eppnt
->p_vaddr
+ eppnt
->p_filesz
;
1101 if (padzero(elf_bss
)) {
1106 len
= ELF_PAGESTART(eppnt
->p_filesz
+ eppnt
->p_vaddr
+
1108 bss
= eppnt
->p_memsz
+ eppnt
->p_vaddr
;
1110 down_write(¤t
->mm
->mmap_sem
);
1111 do_brk(len
, bss
- len
);
1112 up_write(¤t
->mm
->mmap_sem
);
1123 * Note that some platforms still use traditional core dumps and not
1124 * the ELF core dump. Each platform can select it as appropriate.
1126 #if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1131 * Modelled on fs/exec.c:aout_core_dump()
1132 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1135 * These are the only things you should do on a core-file: use only these
1136 * functions to write out all the necessary info.
1138 static int dump_write(struct file
*file
, const void *addr
, int nr
)
1140 return file
->f_op
->write(file
, addr
, nr
, &file
->f_pos
) == nr
;
1143 static int dump_seek(struct file
*file
, loff_t off
)
1145 if (file
->f_op
->llseek
&& file
->f_op
->llseek
!= no_llseek
) {
1146 if (file
->f_op
->llseek(file
, off
, SEEK_CUR
) < 0)
1149 char *buf
= (char *)get_zeroed_page(GFP_KERNEL
);
1153 unsigned long n
= off
;
1156 if (!dump_write(file
, buf
, n
))
1160 free_page((unsigned long)buf
);
1166 * Decide what to dump of a segment, part, all or none.
1168 static unsigned long vma_dump_size(struct vm_area_struct
*vma
,
1169 unsigned long mm_flags
)
1171 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1173 /* The vma can be set up to tell us the answer directly. */
1174 if (vma
->vm_flags
& VM_ALWAYSDUMP
)
1177 /* Hugetlb memory check */
1178 if (vma
->vm_flags
& VM_HUGETLB
) {
1179 if ((vma
->vm_flags
& VM_SHARED
) && FILTER(HUGETLB_SHARED
))
1181 if (!(vma
->vm_flags
& VM_SHARED
) && FILTER(HUGETLB_PRIVATE
))
1185 /* Do not dump I/O mapped devices or special mappings */
1186 if (vma
->vm_flags
& (VM_IO
| VM_RESERVED
))
1189 /* By default, dump shared memory if mapped from an anonymous file. */
1190 if (vma
->vm_flags
& VM_SHARED
) {
1191 if (vma
->vm_file
->f_path
.dentry
->d_inode
->i_nlink
== 0 ?
1192 FILTER(ANON_SHARED
) : FILTER(MAPPED_SHARED
))
1197 /* Dump segments that have been written to. */
1198 if (vma
->anon_vma
&& FILTER(ANON_PRIVATE
))
1200 if (vma
->vm_file
== NULL
)
1203 if (FILTER(MAPPED_PRIVATE
))
1207 * If this looks like the beginning of a DSO or executable mapping,
1208 * check for an ELF header. If we find one, dump the first page to
1209 * aid in determining what was mapped here.
1211 if (FILTER(ELF_HEADERS
) && vma
->vm_file
!= NULL
&& vma
->vm_pgoff
== 0) {
1212 u32 __user
*header
= (u32 __user
*) vma
->vm_start
;
1215 * Doing it this way gets the constant folded by GCC.
1219 char elfmag
[SELFMAG
];
1221 BUILD_BUG_ON(SELFMAG
!= sizeof word
);
1222 magic
.elfmag
[EI_MAG0
] = ELFMAG0
;
1223 magic
.elfmag
[EI_MAG1
] = ELFMAG1
;
1224 magic
.elfmag
[EI_MAG2
] = ELFMAG2
;
1225 magic
.elfmag
[EI_MAG3
] = ELFMAG3
;
1226 if (get_user(word
, header
) == 0 && word
== magic
.cmp
)
1235 return vma
->vm_end
- vma
->vm_start
;
1238 /* An ELF note in memory */
1243 unsigned int datasz
;
1247 static int notesize(struct memelfnote
*en
)
1251 sz
= sizeof(struct elf_note
);
1252 sz
+= roundup(strlen(en
->name
) + 1, 4);
1253 sz
+= roundup(en
->datasz
, 4);
1258 #define DUMP_WRITE(addr, nr, foffset) \
1259 do { if (!dump_write(file, (addr), (nr))) return 0; *foffset += (nr); } while(0)
1261 static int alignfile(struct file
*file
, loff_t
*foffset
)
1263 static const char buf
[4] = { 0, };
1264 DUMP_WRITE(buf
, roundup(*foffset
, 4) - *foffset
, foffset
);
1268 static int writenote(struct memelfnote
*men
, struct file
*file
,
1272 en
.n_namesz
= strlen(men
->name
) + 1;
1273 en
.n_descsz
= men
->datasz
;
1274 en
.n_type
= men
->type
;
1276 DUMP_WRITE(&en
, sizeof(en
), foffset
);
1277 DUMP_WRITE(men
->name
, en
.n_namesz
, foffset
);
1278 if (!alignfile(file
, foffset
))
1280 DUMP_WRITE(men
->data
, men
->datasz
, foffset
);
1281 if (!alignfile(file
, foffset
))
1288 #define DUMP_WRITE(addr, nr) \
1289 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
1291 #define DUMP_SEEK(off) \
1292 if (!dump_seek(file, (off))) \
1295 static void fill_elf_header(struct elfhdr
*elf
, int segs
,
1296 u16 machine
, u32 flags
, u8 osabi
)
1298 memset(elf
, 0, sizeof(*elf
));
1300 memcpy(elf
->e_ident
, ELFMAG
, SELFMAG
);
1301 elf
->e_ident
[EI_CLASS
] = ELF_CLASS
;
1302 elf
->e_ident
[EI_DATA
] = ELF_DATA
;
1303 elf
->e_ident
[EI_VERSION
] = EV_CURRENT
;
1304 elf
->e_ident
[EI_OSABI
] = ELF_OSABI
;
1306 elf
->e_type
= ET_CORE
;
1307 elf
->e_machine
= machine
;
1308 elf
->e_version
= EV_CURRENT
;
1309 elf
->e_phoff
= sizeof(struct elfhdr
);
1310 elf
->e_flags
= flags
;
1311 elf
->e_ehsize
= sizeof(struct elfhdr
);
1312 elf
->e_phentsize
= sizeof(struct elf_phdr
);
1313 elf
->e_phnum
= segs
;
1318 static void fill_elf_note_phdr(struct elf_phdr
*phdr
, int sz
, loff_t offset
)
1320 phdr
->p_type
= PT_NOTE
;
1321 phdr
->p_offset
= offset
;
1324 phdr
->p_filesz
= sz
;
1331 static void fill_note(struct memelfnote
*note
, const char *name
, int type
,
1332 unsigned int sz
, void *data
)
1342 * fill up all the fields in prstatus from the given task struct, except
1343 * registers which need to be filled up separately.
1345 static void fill_prstatus(struct elf_prstatus
*prstatus
,
1346 struct task_struct
*p
, long signr
)
1348 prstatus
->pr_info
.si_signo
= prstatus
->pr_cursig
= signr
;
1349 prstatus
->pr_sigpend
= p
->pending
.signal
.sig
[0];
1350 prstatus
->pr_sighold
= p
->blocked
.sig
[0];
1351 prstatus
->pr_pid
= task_pid_vnr(p
);
1352 prstatus
->pr_ppid
= task_pid_vnr(p
->real_parent
);
1353 prstatus
->pr_pgrp
= task_pgrp_vnr(p
);
1354 prstatus
->pr_sid
= task_session_vnr(p
);
1355 if (thread_group_leader(p
)) {
1356 struct task_cputime cputime
;
1359 * This is the record for the group leader. It shows the
1360 * group-wide total, not its individual thread total.
1362 thread_group_cputime(p
, &cputime
);
1363 cputime_to_timeval(cputime
.utime
, &prstatus
->pr_utime
);
1364 cputime_to_timeval(cputime
.stime
, &prstatus
->pr_stime
);
1366 cputime_to_timeval(p
->utime
, &prstatus
->pr_utime
);
1367 cputime_to_timeval(p
->stime
, &prstatus
->pr_stime
);
1369 cputime_to_timeval(p
->signal
->cutime
, &prstatus
->pr_cutime
);
1370 cputime_to_timeval(p
->signal
->cstime
, &prstatus
->pr_cstime
);
1373 static int fill_psinfo(struct elf_prpsinfo
*psinfo
, struct task_struct
*p
,
1374 struct mm_struct
*mm
)
1376 const struct cred
*cred
;
1377 unsigned int i
, len
;
1379 /* first copy the parameters from user space */
1380 memset(psinfo
, 0, sizeof(struct elf_prpsinfo
));
1382 len
= mm
->arg_end
- mm
->arg_start
;
1383 if (len
>= ELF_PRARGSZ
)
1384 len
= ELF_PRARGSZ
-1;
1385 if (copy_from_user(&psinfo
->pr_psargs
,
1386 (const char __user
*)mm
->arg_start
, len
))
1388 for(i
= 0; i
< len
; i
++)
1389 if (psinfo
->pr_psargs
[i
] == 0)
1390 psinfo
->pr_psargs
[i
] = ' ';
1391 psinfo
->pr_psargs
[len
] = 0;
1393 psinfo
->pr_pid
= task_pid_vnr(p
);
1394 psinfo
->pr_ppid
= task_pid_vnr(p
->real_parent
);
1395 psinfo
->pr_pgrp
= task_pgrp_vnr(p
);
1396 psinfo
->pr_sid
= task_session_vnr(p
);
1398 i
= p
->state
? ffz(~p
->state
) + 1 : 0;
1399 psinfo
->pr_state
= i
;
1400 psinfo
->pr_sname
= (i
> 5) ? '.' : "RSDTZW"[i
];
1401 psinfo
->pr_zomb
= psinfo
->pr_sname
== 'Z';
1402 psinfo
->pr_nice
= task_nice(p
);
1403 psinfo
->pr_flag
= p
->flags
;
1405 cred
= __task_cred(p
);
1406 SET_UID(psinfo
->pr_uid
, cred
->uid
);
1407 SET_GID(psinfo
->pr_gid
, cred
->gid
);
1409 strncpy(psinfo
->pr_fname
, p
->comm
, sizeof(psinfo
->pr_fname
));
1414 static void fill_auxv_note(struct memelfnote
*note
, struct mm_struct
*mm
)
1416 elf_addr_t
*auxv
= (elf_addr_t
*) mm
->saved_auxv
;
1420 while (auxv
[i
- 2] != AT_NULL
);
1421 fill_note(note
, "CORE", NT_AUXV
, i
* sizeof(elf_addr_t
), auxv
);
1424 #ifdef CORE_DUMP_USE_REGSET
1425 #include <linux/regset.h>
1427 struct elf_thread_core_info
{
1428 struct elf_thread_core_info
*next
;
1429 struct task_struct
*task
;
1430 struct elf_prstatus prstatus
;
1431 struct memelfnote notes
[0];
1434 struct elf_note_info
{
1435 struct elf_thread_core_info
*thread
;
1436 struct memelfnote psinfo
;
1437 struct memelfnote auxv
;
1443 * When a regset has a writeback hook, we call it on each thread before
1444 * dumping user memory. On register window machines, this makes sure the
1445 * user memory backing the register data is up to date before we read it.
1447 static void do_thread_regset_writeback(struct task_struct
*task
,
1448 const struct user_regset
*regset
)
1450 if (regset
->writeback
)
1451 regset
->writeback(task
, regset
, 1);
1454 static int fill_thread_core_info(struct elf_thread_core_info
*t
,
1455 const struct user_regset_view
*view
,
1456 long signr
, size_t *total
)
1461 * NT_PRSTATUS is the one special case, because the regset data
1462 * goes into the pr_reg field inside the note contents, rather
1463 * than being the whole note contents. We fill the reset in here.
1464 * We assume that regset 0 is NT_PRSTATUS.
1466 fill_prstatus(&t
->prstatus
, t
->task
, signr
);
1467 (void) view
->regsets
[0].get(t
->task
, &view
->regsets
[0],
1468 0, sizeof(t
->prstatus
.pr_reg
),
1469 &t
->prstatus
.pr_reg
, NULL
);
1471 fill_note(&t
->notes
[0], "CORE", NT_PRSTATUS
,
1472 sizeof(t
->prstatus
), &t
->prstatus
);
1473 *total
+= notesize(&t
->notes
[0]);
1475 do_thread_regset_writeback(t
->task
, &view
->regsets
[0]);
1478 * Each other regset might generate a note too. For each regset
1479 * that has no core_note_type or is inactive, we leave t->notes[i]
1480 * all zero and we'll know to skip writing it later.
1482 for (i
= 1; i
< view
->n
; ++i
) {
1483 const struct user_regset
*regset
= &view
->regsets
[i
];
1484 do_thread_regset_writeback(t
->task
, regset
);
1485 if (regset
->core_note_type
&&
1486 (!regset
->active
|| regset
->active(t
->task
, regset
))) {
1488 size_t size
= regset
->n
* regset
->size
;
1489 void *data
= kmalloc(size
, GFP_KERNEL
);
1490 if (unlikely(!data
))
1492 ret
= regset
->get(t
->task
, regset
,
1493 0, size
, data
, NULL
);
1497 if (regset
->core_note_type
!= NT_PRFPREG
)
1498 fill_note(&t
->notes
[i
], "LINUX",
1499 regset
->core_note_type
,
1502 t
->prstatus
.pr_fpvalid
= 1;
1503 fill_note(&t
->notes
[i
], "CORE",
1504 NT_PRFPREG
, size
, data
);
1506 *total
+= notesize(&t
->notes
[i
]);
1514 static int fill_note_info(struct elfhdr
*elf
, int phdrs
,
1515 struct elf_note_info
*info
,
1516 long signr
, struct pt_regs
*regs
)
1518 struct task_struct
*dump_task
= current
;
1519 const struct user_regset_view
*view
= task_user_regset_view(dump_task
);
1520 struct elf_thread_core_info
*t
;
1521 struct elf_prpsinfo
*psinfo
;
1522 struct core_thread
*ct
;
1526 info
->thread
= NULL
;
1528 psinfo
= kmalloc(sizeof(*psinfo
), GFP_KERNEL
);
1529 fill_note(&info
->psinfo
, "CORE", NT_PRPSINFO
, sizeof(*psinfo
), psinfo
);
1535 * Figure out how many notes we're going to need for each thread.
1537 info
->thread_notes
= 0;
1538 for (i
= 0; i
< view
->n
; ++i
)
1539 if (view
->regsets
[i
].core_note_type
!= 0)
1540 ++info
->thread_notes
;
1543 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1544 * since it is our one special case.
1546 if (unlikely(info
->thread_notes
== 0) ||
1547 unlikely(view
->regsets
[0].core_note_type
!= NT_PRSTATUS
)) {
1553 * Initialize the ELF file header.
1555 fill_elf_header(elf
, phdrs
,
1556 view
->e_machine
, view
->e_flags
, view
->ei_osabi
);
1559 * Allocate a structure for each thread.
1561 for (ct
= &dump_task
->mm
->core_state
->dumper
; ct
; ct
= ct
->next
) {
1562 t
= kzalloc(offsetof(struct elf_thread_core_info
,
1563 notes
[info
->thread_notes
]),
1569 if (ct
->task
== dump_task
|| !info
->thread
) {
1570 t
->next
= info
->thread
;
1574 * Make sure to keep the original task at
1575 * the head of the list.
1577 t
->next
= info
->thread
->next
;
1578 info
->thread
->next
= t
;
1583 * Now fill in each thread's information.
1585 for (t
= info
->thread
; t
!= NULL
; t
= t
->next
)
1586 if (!fill_thread_core_info(t
, view
, signr
, &info
->size
))
1590 * Fill in the two process-wide notes.
1592 fill_psinfo(psinfo
, dump_task
->group_leader
, dump_task
->mm
);
1593 info
->size
+= notesize(&info
->psinfo
);
1595 fill_auxv_note(&info
->auxv
, current
->mm
);
1596 info
->size
+= notesize(&info
->auxv
);
1601 static size_t get_note_info_size(struct elf_note_info
*info
)
1607 * Write all the notes for each thread. When writing the first thread, the
1608 * process-wide notes are interleaved after the first thread-specific note.
1610 static int write_note_info(struct elf_note_info
*info
,
1611 struct file
*file
, loff_t
*foffset
)
1614 struct elf_thread_core_info
*t
= info
->thread
;
1619 if (!writenote(&t
->notes
[0], file
, foffset
))
1622 if (first
&& !writenote(&info
->psinfo
, file
, foffset
))
1624 if (first
&& !writenote(&info
->auxv
, file
, foffset
))
1627 for (i
= 1; i
< info
->thread_notes
; ++i
)
1628 if (t
->notes
[i
].data
&&
1629 !writenote(&t
->notes
[i
], file
, foffset
))
1639 static void free_note_info(struct elf_note_info
*info
)
1641 struct elf_thread_core_info
*threads
= info
->thread
;
1644 struct elf_thread_core_info
*t
= threads
;
1646 WARN_ON(t
->notes
[0].data
&& t
->notes
[0].data
!= &t
->prstatus
);
1647 for (i
= 1; i
< info
->thread_notes
; ++i
)
1648 kfree(t
->notes
[i
].data
);
1651 kfree(info
->psinfo
.data
);
1656 /* Here is the structure in which status of each thread is captured. */
1657 struct elf_thread_status
1659 struct list_head list
;
1660 struct elf_prstatus prstatus
; /* NT_PRSTATUS */
1661 elf_fpregset_t fpu
; /* NT_PRFPREG */
1662 struct task_struct
*thread
;
1663 #ifdef ELF_CORE_COPY_XFPREGS
1664 elf_fpxregset_t xfpu
; /* ELF_CORE_XFPREG_TYPE */
1666 struct memelfnote notes
[3];
1671 * In order to add the specific thread information for the elf file format,
1672 * we need to keep a linked list of every threads pr_status and then create
1673 * a single section for them in the final core file.
1675 static int elf_dump_thread_status(long signr
, struct elf_thread_status
*t
)
1678 struct task_struct
*p
= t
->thread
;
1681 fill_prstatus(&t
->prstatus
, p
, signr
);
1682 elf_core_copy_task_regs(p
, &t
->prstatus
.pr_reg
);
1684 fill_note(&t
->notes
[0], "CORE", NT_PRSTATUS
, sizeof(t
->prstatus
),
1687 sz
+= notesize(&t
->notes
[0]);
1689 if ((t
->prstatus
.pr_fpvalid
= elf_core_copy_task_fpregs(p
, NULL
,
1691 fill_note(&t
->notes
[1], "CORE", NT_PRFPREG
, sizeof(t
->fpu
),
1694 sz
+= notesize(&t
->notes
[1]);
1697 #ifdef ELF_CORE_COPY_XFPREGS
1698 if (elf_core_copy_task_xfpregs(p
, &t
->xfpu
)) {
1699 fill_note(&t
->notes
[2], "LINUX", ELF_CORE_XFPREG_TYPE
,
1700 sizeof(t
->xfpu
), &t
->xfpu
);
1702 sz
+= notesize(&t
->notes
[2]);
1708 struct elf_note_info
{
1709 struct memelfnote
*notes
;
1710 struct elf_prstatus
*prstatus
; /* NT_PRSTATUS */
1711 struct elf_prpsinfo
*psinfo
; /* NT_PRPSINFO */
1712 struct list_head thread_list
;
1713 elf_fpregset_t
*fpu
;
1714 #ifdef ELF_CORE_COPY_XFPREGS
1715 elf_fpxregset_t
*xfpu
;
1717 int thread_status_size
;
1721 static int fill_note_info(struct elfhdr
*elf
, int phdrs
,
1722 struct elf_note_info
*info
,
1723 long signr
, struct pt_regs
*regs
)
1726 struct list_head
*t
;
1729 info
->prstatus
= NULL
;
1730 info
->psinfo
= NULL
;
1732 #ifdef ELF_CORE_COPY_XFPREGS
1735 INIT_LIST_HEAD(&info
->thread_list
);
1737 info
->notes
= kmalloc(NUM_NOTES
* sizeof(struct memelfnote
),
1741 info
->psinfo
= kmalloc(sizeof(*info
->psinfo
), GFP_KERNEL
);
1744 info
->prstatus
= kmalloc(sizeof(*info
->prstatus
), GFP_KERNEL
);
1745 if (!info
->prstatus
)
1747 info
->fpu
= kmalloc(sizeof(*info
->fpu
), GFP_KERNEL
);
1750 #ifdef ELF_CORE_COPY_XFPREGS
1751 info
->xfpu
= kmalloc(sizeof(*info
->xfpu
), GFP_KERNEL
);
1756 info
->thread_status_size
= 0;
1758 struct core_thread
*ct
;
1759 struct elf_thread_status
*ets
;
1761 for (ct
= current
->mm
->core_state
->dumper
.next
;
1762 ct
; ct
= ct
->next
) {
1763 ets
= kzalloc(sizeof(*ets
), GFP_KERNEL
);
1767 ets
->thread
= ct
->task
;
1768 list_add(&ets
->list
, &info
->thread_list
);
1771 list_for_each(t
, &info
->thread_list
) {
1774 ets
= list_entry(t
, struct elf_thread_status
, list
);
1775 sz
= elf_dump_thread_status(signr
, ets
);
1776 info
->thread_status_size
+= sz
;
1779 /* now collect the dump for the current */
1780 memset(info
->prstatus
, 0, sizeof(*info
->prstatus
));
1781 fill_prstatus(info
->prstatus
, current
, signr
);
1782 elf_core_copy_regs(&info
->prstatus
->pr_reg
, regs
);
1785 fill_elf_header(elf
, phdrs
, ELF_ARCH
, ELF_CORE_EFLAGS
, ELF_OSABI
);
1788 * Set up the notes in similar form to SVR4 core dumps made
1789 * with info from their /proc.
1792 fill_note(info
->notes
+ 0, "CORE", NT_PRSTATUS
,
1793 sizeof(*info
->prstatus
), info
->prstatus
);
1794 fill_psinfo(info
->psinfo
, current
->group_leader
, current
->mm
);
1795 fill_note(info
->notes
+ 1, "CORE", NT_PRPSINFO
,
1796 sizeof(*info
->psinfo
), info
->psinfo
);
1800 fill_auxv_note(&info
->notes
[info
->numnote
++], current
->mm
);
1802 /* Try to dump the FPU. */
1803 info
->prstatus
->pr_fpvalid
= elf_core_copy_task_fpregs(current
, regs
,
1805 if (info
->prstatus
->pr_fpvalid
)
1806 fill_note(info
->notes
+ info
->numnote
++,
1807 "CORE", NT_PRFPREG
, sizeof(*info
->fpu
), info
->fpu
);
1808 #ifdef ELF_CORE_COPY_XFPREGS
1809 if (elf_core_copy_task_xfpregs(current
, info
->xfpu
))
1810 fill_note(info
->notes
+ info
->numnote
++,
1811 "LINUX", ELF_CORE_XFPREG_TYPE
,
1812 sizeof(*info
->xfpu
), info
->xfpu
);
1820 static size_t get_note_info_size(struct elf_note_info
*info
)
1825 for (i
= 0; i
< info
->numnote
; i
++)
1826 sz
+= notesize(info
->notes
+ i
);
1828 sz
+= info
->thread_status_size
;
1833 static int write_note_info(struct elf_note_info
*info
,
1834 struct file
*file
, loff_t
*foffset
)
1837 struct list_head
*t
;
1839 for (i
= 0; i
< info
->numnote
; i
++)
1840 if (!writenote(info
->notes
+ i
, file
, foffset
))
1843 /* write out the thread status notes section */
1844 list_for_each(t
, &info
->thread_list
) {
1845 struct elf_thread_status
*tmp
=
1846 list_entry(t
, struct elf_thread_status
, list
);
1848 for (i
= 0; i
< tmp
->num_notes
; i
++)
1849 if (!writenote(&tmp
->notes
[i
], file
, foffset
))
1856 static void free_note_info(struct elf_note_info
*info
)
1858 while (!list_empty(&info
->thread_list
)) {
1859 struct list_head
*tmp
= info
->thread_list
.next
;
1861 kfree(list_entry(tmp
, struct elf_thread_status
, list
));
1864 kfree(info
->prstatus
);
1865 kfree(info
->psinfo
);
1868 #ifdef ELF_CORE_COPY_XFPREGS
1875 static struct vm_area_struct
*first_vma(struct task_struct
*tsk
,
1876 struct vm_area_struct
*gate_vma
)
1878 struct vm_area_struct
*ret
= tsk
->mm
->mmap
;
1885 * Helper function for iterating across a vma list. It ensures that the caller
1886 * will visit `gate_vma' prior to terminating the search.
1888 static struct vm_area_struct
*next_vma(struct vm_area_struct
*this_vma
,
1889 struct vm_area_struct
*gate_vma
)
1891 struct vm_area_struct
*ret
;
1893 ret
= this_vma
->vm_next
;
1896 if (this_vma
== gate_vma
)
1904 * This is a two-pass process; first we find the offsets of the bits,
1905 * and then they are actually written out. If we run out of core limit
1908 static int elf_core_dump(long signr
, struct pt_regs
*regs
, struct file
*file
, unsigned long limit
)
1914 struct vm_area_struct
*vma
, *gate_vma
;
1915 struct elfhdr
*elf
= NULL
;
1916 loff_t offset
= 0, dataoff
, foffset
;
1917 unsigned long mm_flags
;
1918 struct elf_note_info info
;
1921 * We no longer stop all VM operations.
1923 * This is because those proceses that could possibly change map_count
1924 * or the mmap / vma pages are now blocked in do_exit on current
1925 * finishing this core dump.
1927 * Only ptrace can touch these memory addresses, but it doesn't change
1928 * the map_count or the pages allocated. So no possibility of crashing
1929 * exists while dumping the mm->vm_next areas to the core file.
1932 /* alloc memory for large data structures: too large to be on stack */
1933 elf
= kmalloc(sizeof(*elf
), GFP_KERNEL
);
1937 segs
= current
->mm
->map_count
;
1938 #ifdef ELF_CORE_EXTRA_PHDRS
1939 segs
+= ELF_CORE_EXTRA_PHDRS
;
1942 gate_vma
= get_gate_vma(current
);
1943 if (gate_vma
!= NULL
)
1947 * Collect all the non-memory information about the process for the
1948 * notes. This also sets up the file header.
1950 if (!fill_note_info(elf
, segs
+ 1, /* including notes section */
1951 &info
, signr
, regs
))
1955 current
->flags
|= PF_DUMPCORE
;
1960 DUMP_WRITE(elf
, sizeof(*elf
));
1961 offset
+= sizeof(*elf
); /* Elf header */
1962 offset
+= (segs
+ 1) * sizeof(struct elf_phdr
); /* Program headers */
1965 /* Write notes phdr entry */
1967 struct elf_phdr phdr
;
1968 size_t sz
= get_note_info_size(&info
);
1970 sz
+= elf_coredump_extra_notes_size();
1972 fill_elf_note_phdr(&phdr
, sz
, offset
);
1974 DUMP_WRITE(&phdr
, sizeof(phdr
));
1977 dataoff
= offset
= roundup(offset
, ELF_EXEC_PAGESIZE
);
1980 * We must use the same mm->flags while dumping core to avoid
1981 * inconsistency between the program headers and bodies, otherwise an
1982 * unusable core file can be generated.
1984 mm_flags
= current
->mm
->flags
;
1986 /* Write program headers for segments dump */
1987 for (vma
= first_vma(current
, gate_vma
); vma
!= NULL
;
1988 vma
= next_vma(vma
, gate_vma
)) {
1989 struct elf_phdr phdr
;
1991 phdr
.p_type
= PT_LOAD
;
1992 phdr
.p_offset
= offset
;
1993 phdr
.p_vaddr
= vma
->vm_start
;
1995 phdr
.p_filesz
= vma_dump_size(vma
, mm_flags
);
1996 phdr
.p_memsz
= vma
->vm_end
- vma
->vm_start
;
1997 offset
+= phdr
.p_filesz
;
1998 phdr
.p_flags
= vma
->vm_flags
& VM_READ
? PF_R
: 0;
1999 if (vma
->vm_flags
& VM_WRITE
)
2000 phdr
.p_flags
|= PF_W
;
2001 if (vma
->vm_flags
& VM_EXEC
)
2002 phdr
.p_flags
|= PF_X
;
2003 phdr
.p_align
= ELF_EXEC_PAGESIZE
;
2005 DUMP_WRITE(&phdr
, sizeof(phdr
));
2008 #ifdef ELF_CORE_WRITE_EXTRA_PHDRS
2009 ELF_CORE_WRITE_EXTRA_PHDRS
;
2012 /* write out the notes section */
2013 if (!write_note_info(&info
, file
, &foffset
))
2016 if (elf_coredump_extra_notes_write(file
, &foffset
))
2020 DUMP_SEEK(dataoff
- foffset
);
2022 for (vma
= first_vma(current
, gate_vma
); vma
!= NULL
;
2023 vma
= next_vma(vma
, gate_vma
)) {
2027 end
= vma
->vm_start
+ vma_dump_size(vma
, mm_flags
);
2029 for (addr
= vma
->vm_start
; addr
< end
; addr
+= PAGE_SIZE
) {
2031 struct vm_area_struct
*tmp_vma
;
2033 if (get_user_pages(current
, current
->mm
, addr
, 1, 0, 1,
2034 &page
, &tmp_vma
) <= 0) {
2035 DUMP_SEEK(PAGE_SIZE
);
2037 if (page
== ZERO_PAGE(0)) {
2038 if (!dump_seek(file
, PAGE_SIZE
)) {
2039 page_cache_release(page
);
2044 flush_cache_page(tmp_vma
, addr
,
2047 if ((size
+= PAGE_SIZE
) > limit
||
2048 !dump_write(file
, kaddr
,
2051 page_cache_release(page
);
2056 page_cache_release(page
);
2061 #ifdef ELF_CORE_WRITE_EXTRA_DATA
2062 ELF_CORE_WRITE_EXTRA_DATA
;
2069 free_note_info(&info
);
2075 #endif /* USE_ELF_CORE_DUMP */
2077 static int __init
init_elf_binfmt(void)
2079 return register_binfmt(&elf_format
);
2082 static void __exit
exit_elf_binfmt(void)
2084 /* Remove the COFF and ELF loaders. */
2085 unregister_binfmt(&elf_format
);
2088 core_initcall(init_elf_binfmt
);
2089 module_exit(exit_elf_binfmt
);
2090 MODULE_LICENSE("GPL");