1 /* This is the Linux kernel elf-loading code, ported into user space */
18 #define ELF_START_MMAP 0x80000000
21 * This is used to ensure we don't load something for the wrong architecture.
23 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
26 * These are used to set parameters in the core dumps.
28 #define ELF_CLASS ELFCLASS32
29 #define ELF_DATA ELFDATA2LSB
30 #define ELF_ARCH EM_386
32 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
33 starts %edx contains a pointer to a function which might be
34 registered using `atexit'. This provides a mean for the
35 dynamic linker to call DT_FINI functions for shared libraries
36 that have been loaded before the code runs.
38 A value of 0 tells we have no such handler. */
39 #define ELF_PLAT_INIT(_r) _r->edx = 0
41 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
43 regs
->esp
= infop
->start_stack
;
44 regs
->eip
= infop
->entry
;
47 #define USE_ELF_CORE_DUMP
48 #define ELF_EXEC_PAGESIZE 4096
54 #define ELF_START_MMAP 0x80000000
56 #define elf_check_arch(x) ( (x) == EM_ARM )
58 #define ELF_CLASS ELFCLASS32
59 #ifdef TARGET_WORDS_BIGENDIAN
60 #define ELF_DATA ELFDATA2MSB
62 #define ELF_DATA ELFDATA2LSB
64 #define ELF_ARCH EM_ARM
66 #define ELF_PLAT_INIT(_r) _r->ARM_r0 = 0
68 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
70 target_long
*stack
= (void *)infop
->start_stack
;
71 memset(regs
, 0, sizeof(*regs
));
72 regs
->ARM_cpsr
= 0x10;
73 regs
->ARM_pc
= infop
->entry
;
74 regs
->ARM_sp
= infop
->start_stack
;
75 regs
->ARM_r2
= tswapl(stack
[2]); /* envp */
76 regs
->ARM_r1
= tswapl(stack
[1]); /* argv */
77 /* XXX: it seems that r0 is zeroed after ! */
78 // regs->ARM_r0 = tswapl(stack[0]); /* argc */
81 #define USE_ELF_CORE_DUMP
82 #define ELF_EXEC_PAGESIZE 4096
88 #define ELF_START_MMAP 0x80000000
90 #define elf_check_arch(x) ( (x) == EM_SPARC )
92 #define ELF_CLASS ELFCLASS32
93 #define ELF_DATA ELFDATA2MSB
94 #define ELF_ARCH EM_SPARC
97 #define ELF_PLAT_INIT(_r)
99 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
101 regs
->u_regs
[0] = infop
->entry
;
102 regs
->u_regs
[1] = infop
->start_stack
;
110 * MAX_ARG_PAGES defines the number of pages allocated for arguments
111 * and envelope for the new program. 32 should suffice, this gives
112 * a maximum env+arg of 128kB w/4KB pages!
114 #define MAX_ARG_PAGES 32
117 * This structure is used to hold the arguments that are
118 * used when loading binaries.
120 struct linux_binprm
{
122 unsigned long page
[MAX_ARG_PAGES
];
128 char * filename
; /* Name of binary */
129 unsigned long loader
, exec
;
130 int dont_iput
; /* binfmt handler has put inode */
135 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
136 unsigned int a_text
; /* length of text, in bytes */
137 unsigned int a_data
; /* length of data, in bytes */
138 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
139 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
140 unsigned int a_entry
; /* start address */
141 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
142 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
146 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
152 /* max code+data+bss space allocated to elf interpreter */
153 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
155 /* max code+data+bss+brk space allocated to ET_DYN executables */
156 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
158 /* from personality.h */
160 /* Flags for bug emulation. These occupy the top three bytes. */
161 #define STICKY_TIMEOUTS 0x4000000
162 #define WHOLE_SECONDS 0x2000000
164 /* Personality types. These go in the low byte. Avoid using the top bit,
165 * it will conflict with error returns.
167 #define PER_MASK (0x00ff)
168 #define PER_LINUX (0x0000)
169 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
170 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
171 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
172 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
173 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
174 #define PER_BSD (0x0006)
175 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
177 /* Necessary parameters */
180 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
181 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
182 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
184 #define INTERPRETER_NONE 0
185 #define INTERPRETER_AOUT 1
186 #define INTERPRETER_ELF 2
188 #define DLINFO_ITEMS 12
190 #define put_user(x,ptr) (void)(*(ptr) = (typeof(*ptr))(x))
191 #define get_user(ptr) (typeof(*ptr))(*(ptr))
193 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
198 static inline void memcpy_tofs(void * to
, const void * from
, unsigned long n
)
203 extern unsigned long x86_stack_size
;
205 static int load_aout_interp(void * exptr
, int interp_fd
);
208 static void bswap_ehdr(Elf32_Ehdr
*ehdr
)
210 bswap16s(&ehdr
->e_type
); /* Object file type */
211 bswap16s(&ehdr
->e_machine
); /* Architecture */
212 bswap32s(&ehdr
->e_version
); /* Object file version */
213 bswap32s(&ehdr
->e_entry
); /* Entry point virtual address */
214 bswap32s(&ehdr
->e_phoff
); /* Program header table file offset */
215 bswap32s(&ehdr
->e_shoff
); /* Section header table file offset */
216 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
217 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
218 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
219 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
220 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
221 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
222 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
225 static void bswap_phdr(Elf32_Phdr
*phdr
)
227 bswap32s(&phdr
->p_type
); /* Segment type */
228 bswap32s(&phdr
->p_offset
); /* Segment file offset */
229 bswap32s(&phdr
->p_vaddr
); /* Segment virtual address */
230 bswap32s(&phdr
->p_paddr
); /* Segment physical address */
231 bswap32s(&phdr
->p_filesz
); /* Segment size in file */
232 bswap32s(&phdr
->p_memsz
); /* Segment size in memory */
233 bswap32s(&phdr
->p_flags
); /* Segment flags */
234 bswap32s(&phdr
->p_align
); /* Segment alignment */
237 static void bswap_shdr(Elf32_Shdr
*shdr
)
239 bswap32s(&shdr
->sh_name
);
240 bswap32s(&shdr
->sh_type
);
241 bswap32s(&shdr
->sh_flags
);
242 bswap32s(&shdr
->sh_addr
);
243 bswap32s(&shdr
->sh_offset
);
244 bswap32s(&shdr
->sh_size
);
245 bswap32s(&shdr
->sh_link
);
246 bswap32s(&shdr
->sh_info
);
247 bswap32s(&shdr
->sh_addralign
);
248 bswap32s(&shdr
->sh_entsize
);
251 static void bswap_sym(Elf32_Sym
*sym
)
253 bswap32s(&sym
->st_name
);
254 bswap32s(&sym
->st_value
);
255 bswap32s(&sym
->st_size
);
256 bswap16s(&sym
->st_shndx
);
260 static void * get_free_page(void)
264 /* User-space version of kernel get_free_page. Returns a page-aligned
265 * page-sized chunk of memory.
267 retval
= (void *)target_mmap(0, host_page_size
, PROT_READ
|PROT_WRITE
,
268 MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
270 if((long)retval
== -1) {
271 perror("get_free_page");
279 static void free_page(void * pageaddr
)
281 target_munmap((unsigned long)pageaddr
, host_page_size
);
285 * 'copy_string()' copies argument/envelope strings from user
286 * memory to free pages in kernel mem. These are in a format ready
287 * to be put directly into the top of new user memory.
290 static unsigned long copy_strings(int argc
,char ** argv
,unsigned long *page
,
293 char *tmp
, *tmp1
, *pag
= NULL
;
297 return 0; /* bullet-proofing */
300 if (!(tmp1
= tmp
= get_user(argv
+argc
))) {
301 fprintf(stderr
, "VFS: argc is wrong");
304 while (get_user(tmp
++));
306 if (p
< len
) { /* this shouldn't happen - 128kB */
312 offset
= p
% TARGET_PAGE_SIZE
;
313 if (!(pag
= (char *) page
[p
/TARGET_PAGE_SIZE
]) &&
314 !(pag
= (char *) page
[p
/TARGET_PAGE_SIZE
] =
315 (unsigned long *) get_free_page())) {
319 if (len
== 0 || offset
== 0) {
320 *(pag
+ offset
) = get_user(tmp
);
323 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
324 tmp
-= bytes_to_copy
;
326 offset
-= bytes_to_copy
;
327 len
-= bytes_to_copy
;
328 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
335 static int in_group_p(gid_t g
)
337 /* return TRUE if we're in the specified group, FALSE otherwise */
340 gid_t grouplist
[NGROUPS
];
342 ngroup
= getgroups(NGROUPS
, grouplist
);
343 for(i
= 0; i
< ngroup
; i
++) {
344 if(grouplist
[i
] == g
) {
351 static int count(char ** vec
)
355 for(i
= 0; *vec
; i
++) {
362 static int prepare_binprm(struct linux_binprm
*bprm
)
366 int retval
, id_change
;
368 if(fstat(bprm
->fd
, &st
) < 0) {
373 if(!S_ISREG(mode
)) { /* Must be regular file */
376 if(!(mode
& 0111)) { /* Must have at least one execute bit set */
380 bprm
->e_uid
= geteuid();
381 bprm
->e_gid
= getegid();
386 bprm
->e_uid
= st
.st_uid
;
387 if(bprm
->e_uid
!= geteuid()) {
394 * If setgid is set but no group execute bit then this
395 * is a candidate for mandatory locking, not a setgid
398 if ((mode
& (S_ISGID
| S_IXGRP
)) == (S_ISGID
| S_IXGRP
)) {
399 bprm
->e_gid
= st
.st_gid
;
400 if (!in_group_p(bprm
->e_gid
)) {
405 memset(bprm
->buf
, 0, sizeof(bprm
->buf
));
406 retval
= lseek(bprm
->fd
, 0L, SEEK_SET
);
408 retval
= read(bprm
->fd
, bprm
->buf
, 128);
411 perror("prepare_binprm");
413 /* return(-errno); */
420 unsigned long setup_arg_pages(unsigned long p
, struct linux_binprm
* bprm
,
421 struct image_info
* info
)
423 unsigned long stack_base
, size
, error
;
426 /* Create enough stack to hold everything. If we don't use
427 * it for args, we'll use it for something else...
429 size
= x86_stack_size
;
430 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
431 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
432 error
= target_mmap(0,
433 size
+ host_page_size
,
434 PROT_READ
| PROT_WRITE
,
435 MAP_PRIVATE
| MAP_ANONYMOUS
,
441 /* we reserve one extra page at the top of the stack as guard */
442 target_mprotect(error
+ size
, host_page_size
, PROT_NONE
);
444 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
448 bprm
->loader
+= stack_base
;
450 bprm
->exec
+= stack_base
;
452 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
456 memcpy((void *)stack_base
, (void *)bprm
->page
[i
], TARGET_PAGE_SIZE
);
457 free_page((void *)bprm
->page
[i
]);
459 stack_base
+= TARGET_PAGE_SIZE
;
464 static void set_brk(unsigned long start
, unsigned long end
)
466 /* page-align the start and end addresses... */
467 start
= HOST_PAGE_ALIGN(start
);
468 end
= HOST_PAGE_ALIGN(end
);
471 if(target_mmap(start
, end
- start
,
472 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
473 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0) == -1) {
474 perror("cannot mmap brk");
480 /* We need to explicitly zero any fractional pages after the data
481 section (i.e. bss). This would contain the junk from the file that
482 should not be in memory. */
483 static void padzero(unsigned long elf_bss
)
488 /* XXX: this is really a hack : if the real host page size is
489 smaller than the target page size, some pages after the end
490 of the file may not be mapped. A better fix would be to
491 patch target_mmap(), but it is more complicated as the file
492 size must be known */
493 if (real_host_page_size
< host_page_size
) {
494 unsigned long end_addr
, end_addr1
;
495 end_addr1
= (elf_bss
+ real_host_page_size
- 1) &
496 ~(real_host_page_size
- 1);
497 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
498 if (end_addr1
< end_addr
) {
499 mmap((void *)end_addr1
, end_addr
- end_addr1
,
500 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
501 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
505 nbyte
= elf_bss
& (host_page_size
-1);
507 nbyte
= host_page_size
- nbyte
;
508 fpnt
= (char *) elf_bss
;
515 static unsigned int * create_elf_tables(char *p
, int argc
, int envc
,
516 struct elfhdr
* exec
,
517 unsigned long load_addr
,
518 unsigned long load_bias
,
519 unsigned long interp_load_addr
, int ibcs
,
520 struct image_info
*info
)
522 target_ulong
*argv
, *envp
, *dlinfo
;
526 * Force 16 byte alignment here for generality.
528 sp
= (unsigned int *) (~15UL & (unsigned long) p
);
529 sp
-= DLINFO_ITEMS
*2;
536 put_user(tswapl((target_ulong
)envp
),--sp
);
537 put_user(tswapl((target_ulong
)argv
),--sp
);
540 #define NEW_AUX_ENT(id, val) \
541 put_user (tswapl(id), dlinfo++); \
542 put_user (tswapl(val), dlinfo++)
544 NEW_AUX_ENT (AT_PHDR
, (target_ulong
)(load_addr
+ exec
->e_phoff
));
545 NEW_AUX_ENT (AT_PHENT
, (target_ulong
)(sizeof (struct elf_phdr
)));
546 NEW_AUX_ENT (AT_PHNUM
, (target_ulong
)(exec
->e_phnum
));
547 NEW_AUX_ENT (AT_PAGESZ
, (target_ulong
)(TARGET_PAGE_SIZE
));
548 NEW_AUX_ENT (AT_BASE
, (target_ulong
)(interp_load_addr
));
549 NEW_AUX_ENT (AT_FLAGS
, (target_ulong
)0);
550 NEW_AUX_ENT (AT_ENTRY
, load_bias
+ exec
->e_entry
);
551 NEW_AUX_ENT (AT_UID
, (target_ulong
) getuid());
552 NEW_AUX_ENT (AT_EUID
, (target_ulong
) geteuid());
553 NEW_AUX_ENT (AT_GID
, (target_ulong
) getgid());
554 NEW_AUX_ENT (AT_EGID
, (target_ulong
) getegid());
555 NEW_AUX_ENT (AT_NULL
, 0);
558 put_user(tswapl(argc
),--sp
);
559 info
->arg_start
= (unsigned int)((unsigned long)p
& 0xffffffff);
561 put_user(tswapl((target_ulong
)p
),argv
++);
562 while (get_user(p
++)) /* nothing */ ;
565 info
->arg_end
= info
->env_start
= (unsigned int)((unsigned long)p
& 0xffffffff);
567 put_user(tswapl((target_ulong
)p
),envp
++);
568 while (get_user(p
++)) /* nothing */ ;
571 info
->env_end
= (unsigned int)((unsigned long)p
& 0xffffffff);
577 static unsigned long load_elf_interp(struct elfhdr
* interp_elf_ex
,
579 unsigned long *interp_load_addr
)
581 struct elf_phdr
*elf_phdata
= NULL
;
582 struct elf_phdr
*eppnt
;
583 unsigned long load_addr
= 0;
584 int load_addr_set
= 0;
586 unsigned long last_bss
, elf_bss
;
595 bswap_ehdr(interp_elf_ex
);
597 /* First of all, some simple consistency checks */
598 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
599 interp_elf_ex
->e_type
!= ET_DYN
) ||
600 !elf_check_arch(interp_elf_ex
->e_machine
)) {
605 /* Now read in all of the header information */
607 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
610 elf_phdata
= (struct elf_phdr
*)
611 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
617 * If the size of this structure has changed, then punt, since
618 * we will be doing the wrong thing.
620 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
625 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
627 retval
= read(interpreter_fd
,
629 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
632 perror("load_elf_interp");
639 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
644 if (interp_elf_ex
->e_type
== ET_DYN
) {
645 /* in order to avoid harcoding the interpreter load
646 address in qemu, we allocate a big enough memory zone */
647 error
= target_mmap(0, INTERP_MAP_SIZE
,
648 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
659 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
660 if (eppnt
->p_type
== PT_LOAD
) {
661 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
663 unsigned long vaddr
= 0;
666 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
667 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
668 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
669 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
670 elf_type
|= MAP_FIXED
;
671 vaddr
= eppnt
->p_vaddr
;
673 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
674 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
678 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
680 if (error
> -1024UL) {
682 close(interpreter_fd
);
687 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
693 * Find the end of the file mapping for this phdr, and keep
694 * track of the largest address we see for this.
696 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
697 if (k
> elf_bss
) elf_bss
= k
;
700 * Do the same thing for the memory mapping - between
701 * elf_bss and last_bss is the bss section.
703 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
704 if (k
> last_bss
) last_bss
= k
;
707 /* Now use mmap to map the library into memory. */
709 close(interpreter_fd
);
712 * Now fill out the bss section. First pad the last page up
713 * to the page boundary, and then perform a mmap to make sure
714 * that there are zeromapped pages up to and including the last
718 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ host_page_size
- 1); /* What we have mapped so far */
720 /* Map the last of the bss segment */
721 if (last_bss
> elf_bss
) {
722 target_mmap(elf_bss
, last_bss
-elf_bss
,
723 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
724 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
728 *interp_load_addr
= load_addr
;
729 return ((unsigned long) interp_elf_ex
->e_entry
) + load_addr
;
732 /* Best attempt to load symbols from this ELF object. */
733 static void load_symbols(struct elfhdr
*hdr
, int fd
)
736 struct elf_shdr sechdr
, symtab
, strtab
;
739 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
740 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
741 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
746 if (sechdr
.sh_type
== SHT_SYMTAB
) {
748 lseek(fd
, hdr
->e_shoff
749 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
750 if (read(fd
, &strtab
, sizeof(strtab
))
759 return; /* Shouldn't happen... */
762 /* Now know where the strtab and symtab are. Snarf them. */
763 disas_symtab
= malloc(symtab
.sh_size
);
764 disas_strtab
= strings
= malloc(strtab
.sh_size
);
765 if (!disas_symtab
|| !disas_strtab
)
768 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
769 if (read(fd
, disas_symtab
, symtab
.sh_size
) != symtab
.sh_size
)
773 for (i
= 0; i
< symtab
.sh_size
/ sizeof(struct elf_sym
); i
++)
774 bswap_sym(disas_symtab
+ sizeof(struct elf_sym
)*i
);
777 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
778 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
)
780 disas_num_syms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
783 static int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
784 struct image_info
* info
)
786 struct elfhdr elf_ex
;
787 struct elfhdr interp_elf_ex
;
788 struct exec interp_ex
;
789 int interpreter_fd
= -1; /* avoid warning */
790 unsigned long load_addr
, load_bias
;
791 int load_addr_set
= 0;
792 unsigned int interpreter_type
= INTERPRETER_NONE
;
793 unsigned char ibcs2_interpreter
;
795 unsigned long mapped_addr
;
796 struct elf_phdr
* elf_ppnt
;
797 struct elf_phdr
*elf_phdata
;
798 unsigned long elf_bss
, k
, elf_brk
;
800 char * elf_interpreter
;
801 unsigned long elf_entry
, interp_load_addr
= 0;
803 unsigned long start_code
, end_code
, end_data
;
804 unsigned long elf_stack
;
805 char passed_fileno
[6];
807 ibcs2_interpreter
= 0;
811 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
816 if (elf_ex
.e_ident
[0] != 0x7f ||
817 strncmp(&elf_ex
.e_ident
[1], "ELF",3) != 0) {
821 /* First of all, some simple consistency checks */
822 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
823 (! elf_check_arch(elf_ex
.e_machine
))) {
827 /* Now read in all of the header information */
828 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
829 if (elf_phdata
== NULL
) {
833 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
835 retval
= read(bprm
->fd
, (char *) elf_phdata
,
836 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
840 perror("load_elf_binary");
847 elf_ppnt
= elf_phdata
;
848 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
849 bswap_phdr(elf_ppnt
);
852 elf_ppnt
= elf_phdata
;
859 elf_interpreter
= NULL
;
864 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
865 if (elf_ppnt
->p_type
== PT_INTERP
) {
866 if ( elf_interpreter
!= NULL
)
869 free(elf_interpreter
);
874 /* This is the program interpreter used for
875 * shared libraries - for now assume that this
876 * is an a.out format binary
879 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
881 if (elf_interpreter
== NULL
) {
887 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
889 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
892 perror("load_elf_binary2");
896 /* If the program interpreter is one of these two,
897 then assume an iBCS2 image. Otherwise assume
898 a native linux image. */
900 /* JRP - Need to add X86 lib dir stuff here... */
902 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
903 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
904 ibcs2_interpreter
= 1;
908 printf("Using ELF interpreter %s\n", elf_interpreter
);
911 retval
= open(path(elf_interpreter
), O_RDONLY
);
913 interpreter_fd
= retval
;
916 perror(elf_interpreter
);
918 /* retval = -errno; */
923 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
925 retval
= read(interpreter_fd
,bprm
->buf
,128);
929 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
930 interp_elf_ex
=*((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
933 perror("load_elf_binary3");
936 free(elf_interpreter
);
944 /* Some simple consistency checks for the interpreter */
945 if (elf_interpreter
){
946 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
948 /* Now figure out which format our binary is */
949 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
950 (N_MAGIC(interp_ex
) != QMAGIC
)) {
951 interpreter_type
= INTERPRETER_ELF
;
954 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
955 strncmp(&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
956 interpreter_type
&= ~INTERPRETER_ELF
;
959 if (!interpreter_type
) {
960 free(elf_interpreter
);
967 /* OK, we are done with that, now set up the arg stuff,
968 and then start this sucker up */
970 if (!bprm
->sh_bang
) {
973 if (interpreter_type
== INTERPRETER_AOUT
) {
974 sprintf(passed_fileno
, "%d", bprm
->fd
);
975 passed_p
= passed_fileno
;
977 if (elf_interpreter
) {
978 bprm
->p
= copy_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
983 if (elf_interpreter
) {
984 free(elf_interpreter
);
992 /* OK, This is the point of no return */
995 info
->start_mmap
= (unsigned long)ELF_START_MMAP
;
997 elf_entry
= (unsigned long) elf_ex
.e_entry
;
999 /* Do this so that we can load the interpreter, if need be. We will
1000 change some of these later */
1002 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1003 info
->start_stack
= bprm
->p
;
1005 /* Now we do a little grungy work by mmaping the ELF image into
1006 * the correct location in memory. At this point, we assume that
1007 * the image should be loaded at fixed address, not at a variable
1011 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1014 unsigned long error
;
1016 if (elf_ppnt
->p_type
!= PT_LOAD
)
1019 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1020 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1021 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1022 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1023 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1024 elf_flags
|= MAP_FIXED
;
1025 } else if (elf_ex
.e_type
== ET_DYN
) {
1026 /* Try and get dynamic programs out of the way of the default mmap
1027 base, as well as whatever program they might try to exec. This
1028 is because the brk will follow the loader, and is not movable. */
1029 /* NOTE: for qemu, we do a big mmap to get enough space
1030 without harcoding any address */
1031 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1032 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1038 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1041 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1042 (elf_ppnt
->p_filesz
+
1043 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1045 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1047 (elf_ppnt
->p_offset
-
1048 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1054 #ifdef LOW_ELF_STACK
1055 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1056 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1059 if (!load_addr_set
) {
1061 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1062 if (elf_ex
.e_type
== ET_DYN
) {
1063 load_bias
+= error
-
1064 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1065 load_addr
+= load_bias
;
1068 k
= elf_ppnt
->p_vaddr
;
1071 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1074 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1078 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1079 if (k
> elf_brk
) elf_brk
= k
;
1082 elf_entry
+= load_bias
;
1083 elf_bss
+= load_bias
;
1084 elf_brk
+= load_bias
;
1085 start_code
+= load_bias
;
1086 end_code
+= load_bias
;
1087 // start_data += load_bias;
1088 end_data
+= load_bias
;
1090 if (elf_interpreter
) {
1091 if (interpreter_type
& 1) {
1092 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1094 else if (interpreter_type
& 2) {
1095 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1099 close(interpreter_fd
);
1100 free(elf_interpreter
);
1102 if (elf_entry
== ~0UL) {
1103 printf("Unable to load interpreter\n");
1113 load_symbols(&elf_ex
, bprm
->fd
);
1115 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1116 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1118 #ifdef LOW_ELF_STACK
1119 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1121 bprm
->p
= (unsigned long)
1122 create_elf_tables((char *)bprm
->p
,
1126 load_addr
, load_bias
,
1128 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1130 if (interpreter_type
== INTERPRETER_AOUT
)
1131 info
->arg_start
+= strlen(passed_fileno
) + 1;
1132 info
->start_brk
= info
->brk
= elf_brk
;
1133 info
->end_code
= end_code
;
1134 info
->start_code
= start_code
;
1135 info
->end_data
= end_data
;
1136 info
->start_stack
= bprm
->p
;
1138 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1140 set_brk(elf_bss
, elf_brk
);
1145 printf("(start_brk) %x\n" , info
->start_brk
);
1146 printf("(end_code) %x\n" , info
->end_code
);
1147 printf("(start_code) %x\n" , info
->start_code
);
1148 printf("(end_data) %x\n" , info
->end_data
);
1149 printf("(start_stack) %x\n" , info
->start_stack
);
1150 printf("(brk) %x\n" , info
->brk
);
1153 if ( info
->personality
== PER_SVR4
)
1155 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1156 and some applications "depend" upon this behavior.
1157 Since we do not have the power to recompile these, we
1158 emulate the SVr4 behavior. Sigh. */
1159 mapped_addr
= target_mmap(0, host_page_size
, PROT_READ
| PROT_EXEC
,
1160 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1163 #ifdef ELF_PLAT_INIT
1165 * The ABI may specify that certain registers be set up in special
1166 * ways (on i386 %edx is the address of a DT_FINI function, for
1167 * example. This macro performs whatever initialization to
1168 * the regs structure is required.
1170 ELF_PLAT_INIT(regs
);
1174 info
->entry
= elf_entry
;
1181 int elf_exec(const char * filename
, char ** argv
, char ** envp
,
1182 struct target_pt_regs
* regs
, struct image_info
*infop
)
1184 struct linux_binprm bprm
;
1188 bprm
.p
= TARGET_PAGE_SIZE
*MAX_ARG_PAGES
-sizeof(unsigned int);
1189 for (i
=0 ; i
<MAX_ARG_PAGES
; i
++) /* clear page-table */
1191 retval
= open(filename
, O_RDONLY
);
1195 /* return retval; */
1200 bprm
.filename
= (char *)filename
;
1205 bprm
.argc
= count(argv
);
1206 bprm
.envc
= count(envp
);
1208 retval
= prepare_binprm(&bprm
);
1211 bprm
.p
= copy_strings(1, &bprm
.filename
, bprm
.page
, bprm
.p
);
1213 bprm
.p
= copy_strings(bprm
.envc
,envp
,bprm
.page
,bprm
.p
);
1214 bprm
.p
= copy_strings(bprm
.argc
,argv
,bprm
.page
,bprm
.p
);
1221 retval
= load_elf_binary(&bprm
,regs
,infop
);
1224 /* success. Initialize important registers */
1225 init_thread(regs
, infop
);
1229 /* Something went wrong, return the inode and free the argument pages*/
1230 for (i
=0 ; i
<MAX_ARG_PAGES
; i
++) {
1231 free_page((void *)bprm
.page
[i
]);
1237 static int load_aout_interp(void * exptr
, int interp_fd
)
1239 printf("a.out interpreter not yet supported\n");