1 /* This is the Linux kernel elf-loading code, ported into user space */
15 /* this flag is uneffective under linux too, should be deleted */
17 #define MAP_DENYWRITE 0
20 /* should probably go in elf.h */
27 #define ELF_PLATFORM get_elf_platform()
29 static const char *get_elf_platform(void)
31 static char elf_platform
[] = "i386";
32 int family
= (global_env
->cpuid_version
>> 8) & 0xff;
36 elf_platform
[1] = '0' + family
;
40 #define ELF_HWCAP get_elf_hwcap()
42 static uint32_t get_elf_hwcap(void)
44 return global_env
->cpuid_features
;
48 #define ELF_START_MMAP 0x2aaaaab000ULL
49 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
51 #define ELF_CLASS ELFCLASS64
52 #define ELF_DATA ELFDATA2LSB
53 #define ELF_ARCH EM_X86_64
55 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
58 regs
->rsp
= infop
->start_stack
;
59 regs
->rip
= infop
->entry
;
64 #define ELF_START_MMAP 0x80000000
67 * This is used to ensure we don't load something for the wrong architecture.
69 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
72 * These are used to set parameters in the core dumps.
74 #define ELF_CLASS ELFCLASS32
75 #define ELF_DATA ELFDATA2LSB
76 #define ELF_ARCH EM_386
78 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
80 regs
->esp
= infop
->start_stack
;
81 regs
->eip
= infop
->entry
;
83 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
84 starts %edx contains a pointer to a function which might be
85 registered using `atexit'. This provides a mean for the
86 dynamic linker to call DT_FINI functions for shared libraries
87 that have been loaded before the code runs.
89 A value of 0 tells we have no such handler. */
94 #define USE_ELF_CORE_DUMP
95 #define ELF_EXEC_PAGESIZE 4096
101 #define ELF_START_MMAP 0x80000000
103 #define elf_check_arch(x) ( (x) == EM_ARM )
105 #define ELF_CLASS ELFCLASS32
106 #ifdef TARGET_WORDS_BIGENDIAN
107 #define ELF_DATA ELFDATA2MSB
109 #define ELF_DATA ELFDATA2LSB
111 #define ELF_ARCH EM_ARM
113 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
115 target_long stack
= infop
->start_stack
;
116 memset(regs
, 0, sizeof(*regs
));
117 regs
->ARM_cpsr
= 0x10;
118 if (infop
->entry
& 1)
119 regs
->ARM_cpsr
|= CPSR_T
;
120 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
121 regs
->ARM_sp
= infop
->start_stack
;
122 regs
->ARM_r2
= tgetl(stack
+ 8); /* envp */
123 regs
->ARM_r1
= tgetl(stack
+ 4); /* envp */
124 /* XXX: it seems that r0 is zeroed after ! */
126 /* For uClinux PIC binaries. */
127 regs
->ARM_r10
= infop
->start_data
;
130 #define USE_ELF_CORE_DUMP
131 #define ELF_EXEC_PAGESIZE 4096
135 ARM_HWCAP_ARM_SWP
= 1 << 0,
136 ARM_HWCAP_ARM_HALF
= 1 << 1,
137 ARM_HWCAP_ARM_THUMB
= 1 << 2,
138 ARM_HWCAP_ARM_26BIT
= 1 << 3,
139 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
140 ARM_HWCAP_ARM_FPA
= 1 << 5,
141 ARM_HWCAP_ARM_VFP
= 1 << 6,
142 ARM_HWCAP_ARM_EDSP
= 1 << 7,
145 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
146 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
147 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
152 #ifdef TARGET_SPARC64
154 #define ELF_START_MMAP 0x80000000
156 #define elf_check_arch(x) ( (x) == EM_SPARCV9 )
158 #define ELF_CLASS ELFCLASS64
159 #define ELF_DATA ELFDATA2MSB
160 #define ELF_ARCH EM_SPARCV9
162 #define STACK_BIAS 2047
164 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
167 regs
->pc
= infop
->entry
;
168 regs
->npc
= regs
->pc
+ 4;
170 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
174 #define ELF_START_MMAP 0x80000000
176 #define elf_check_arch(x) ( (x) == EM_SPARC )
178 #define ELF_CLASS ELFCLASS32
179 #define ELF_DATA ELFDATA2MSB
180 #define ELF_ARCH EM_SPARC
182 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
185 regs
->pc
= infop
->entry
;
186 regs
->npc
= regs
->pc
+ 4;
188 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
196 #define ELF_START_MMAP 0x80000000
200 #define elf_check_arch(x) ( (x) == EM_PPC64 )
202 #define ELF_CLASS ELFCLASS64
206 #define elf_check_arch(x) ( (x) == EM_PPC )
208 #define ELF_CLASS ELFCLASS32
212 #ifdef TARGET_WORDS_BIGENDIAN
213 #define ELF_DATA ELFDATA2MSB
215 #define ELF_DATA ELFDATA2LSB
217 #define ELF_ARCH EM_PPC
220 * We need to put in some extra aux table entries to tell glibc what
221 * the cache block size is, so it can use the dcbz instruction safely.
223 #define AT_DCACHEBSIZE 19
224 #define AT_ICACHEBSIZE 20
225 #define AT_UCACHEBSIZE 21
226 /* A special ignored type value for PPC, for glibc compatibility. */
227 #define AT_IGNOREPPC 22
229 * The requirements here are:
230 * - keep the final alignment of sp (sp & 0xf)
231 * - make sure the 32-bit value at the first 16 byte aligned position of
232 * AUXV is greater than 16 for glibc compatibility.
233 * AT_IGNOREPPC is used for that.
234 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
235 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
237 #define DLINFO_ARCH_ITEMS 5
238 #define ARCH_DLINFO \
240 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
241 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
242 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
244 * Now handle glibc compatibility. \
246 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
247 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
250 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
252 target_ulong pos
= infop
->start_stack
;
255 target_ulong entry
, toc
;
258 _regs
->msr
= 1 << MSR_PR
; /* Set user mode */
259 _regs
->gpr
[1] = infop
->start_stack
;
261 entry
= ldq_raw(infop
->entry
) + infop
->load_addr
;
262 toc
= ldq_raw(infop
->entry
+ 8) + infop
->load_addr
;
264 infop
->entry
= entry
;
266 _regs
->nip
= infop
->entry
;
267 /* Note that isn't exactly what regular kernel does
268 * but this is what the ABI wants and is needed to allow
269 * execution of PPC BSD programs.
271 _regs
->gpr
[3] = tgetl(pos
);
272 pos
+= sizeof(target_ulong
);
274 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(target_ulong
))
279 #define USE_ELF_CORE_DUMP
280 #define ELF_EXEC_PAGESIZE 4096
286 #define ELF_START_MMAP 0x80000000
288 #define elf_check_arch(x) ( (x) == EM_MIPS )
291 #define ELF_CLASS ELFCLASS64
293 #define ELF_CLASS ELFCLASS32
295 #ifdef TARGET_WORDS_BIGENDIAN
296 #define ELF_DATA ELFDATA2MSB
298 #define ELF_DATA ELFDATA2LSB
300 #define ELF_ARCH EM_MIPS
302 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
304 regs
->cp0_status
= CP0St_UM
;
305 regs
->cp0_epc
= infop
->entry
;
306 regs
->regs
[29] = infop
->start_stack
;
309 #define USE_ELF_CORE_DUMP
310 #define ELF_EXEC_PAGESIZE 4096
312 #endif /* TARGET_MIPS */
316 #define ELF_START_MMAP 0x80000000
318 #define elf_check_arch(x) ( (x) == EM_SH )
320 #define ELF_CLASS ELFCLASS32
321 #define ELF_DATA ELFDATA2LSB
322 #define ELF_ARCH EM_SH
324 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
326 /* Check other registers XXXXX */
327 regs
->pc
= infop
->entry
;
328 regs
->regs
[15] = infop
->start_stack
;
331 #define USE_ELF_CORE_DUMP
332 #define ELF_EXEC_PAGESIZE 4096
338 #define ELF_START_MMAP 0x80000000
340 #define elf_check_arch(x) ( (x) == EM_68K )
342 #define ELF_CLASS ELFCLASS32
343 #define ELF_DATA ELFDATA2MSB
344 #define ELF_ARCH EM_68K
346 /* ??? Does this need to do anything?
347 #define ELF_PLAT_INIT(_r) */
349 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
351 regs
->usp
= infop
->start_stack
;
353 regs
->pc
= infop
->entry
;
356 #define USE_ELF_CORE_DUMP
357 #define ELF_EXEC_PAGESIZE 8192
363 #define ELF_START_MMAP (0x30000000000ULL)
365 #define elf_check_arch(x) ( (x) == ELF_ARCH )
367 #define ELF_CLASS ELFCLASS64
368 #define ELF_DATA ELFDATA2MSB
369 #define ELF_ARCH EM_ALPHA
371 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
373 regs
->pc
= infop
->entry
;
375 regs
->usp
= infop
->start_stack
;
376 regs
->unique
= infop
->start_data
; /* ? */
377 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
378 regs
->unique
, infop
->start_data
);
381 #define USE_ELF_CORE_DUMP
382 #define ELF_EXEC_PAGESIZE 8192
384 #endif /* TARGET_ALPHA */
387 #define ELF_PLATFORM (NULL)
398 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
399 unsigned int a_text
; /* length of text, in bytes */
400 unsigned int a_data
; /* length of data, in bytes */
401 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
402 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
403 unsigned int a_entry
; /* start address */
404 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
405 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
409 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
415 /* max code+data+bss space allocated to elf interpreter */
416 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
418 /* max code+data+bss+brk space allocated to ET_DYN executables */
419 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
421 /* from personality.h */
423 /* Flags for bug emulation. These occupy the top three bytes. */
424 #define STICKY_TIMEOUTS 0x4000000
425 #define WHOLE_SECONDS 0x2000000
427 /* Personality types. These go in the low byte. Avoid using the top bit,
428 * it will conflict with error returns.
430 #define PER_MASK (0x00ff)
431 #define PER_LINUX (0x0000)
432 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
433 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
434 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
435 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
436 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
437 #define PER_BSD (0x0006)
438 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
440 /* Necessary parameters */
441 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
442 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
443 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
445 #define INTERPRETER_NONE 0
446 #define INTERPRETER_AOUT 1
447 #define INTERPRETER_ELF 2
449 #define DLINFO_ITEMS 12
451 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
456 extern unsigned long x86_stack_size
;
458 static int load_aout_interp(void * exptr
, int interp_fd
);
461 static void bswap_ehdr(struct elfhdr
*ehdr
)
463 bswap16s(&ehdr
->e_type
); /* Object file type */
464 bswap16s(&ehdr
->e_machine
); /* Architecture */
465 bswap32s(&ehdr
->e_version
); /* Object file version */
466 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
467 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
468 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
469 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
470 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
471 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
472 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
473 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
474 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
475 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
478 static void bswap_phdr(struct elf_phdr
*phdr
)
480 bswap32s(&phdr
->p_type
); /* Segment type */
481 bswaptls(&phdr
->p_offset
); /* Segment file offset */
482 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
483 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
484 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
485 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
486 bswap32s(&phdr
->p_flags
); /* Segment flags */
487 bswaptls(&phdr
->p_align
); /* Segment alignment */
490 static void bswap_shdr(struct elf_shdr
*shdr
)
492 bswap32s(&shdr
->sh_name
);
493 bswap32s(&shdr
->sh_type
);
494 bswaptls(&shdr
->sh_flags
);
495 bswaptls(&shdr
->sh_addr
);
496 bswaptls(&shdr
->sh_offset
);
497 bswaptls(&shdr
->sh_size
);
498 bswap32s(&shdr
->sh_link
);
499 bswap32s(&shdr
->sh_info
);
500 bswaptls(&shdr
->sh_addralign
);
501 bswaptls(&shdr
->sh_entsize
);
504 static void bswap_sym(struct elf_sym
*sym
)
506 bswap32s(&sym
->st_name
);
507 bswaptls(&sym
->st_value
);
508 bswaptls(&sym
->st_size
);
509 bswap16s(&sym
->st_shndx
);
514 * 'copy_elf_strings()' copies argument/envelope strings from user
515 * memory to free pages in kernel mem. These are in a format ready
516 * to be put directly into the top of new user memory.
519 static unsigned long copy_elf_strings(int argc
,char ** argv
, void **page
,
522 char *tmp
, *tmp1
, *pag
= NULL
;
526 return 0; /* bullet-proofing */
531 fprintf(stderr
, "VFS: argc is wrong");
537 if (p
< len
) { /* this shouldn't happen - 128kB */
543 offset
= p
% TARGET_PAGE_SIZE
;
544 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
546 pag
= (char *)malloc(TARGET_PAGE_SIZE
);
547 memset(pag
, 0, TARGET_PAGE_SIZE
);
548 page
[p
/TARGET_PAGE_SIZE
] = pag
;
553 if (len
== 0 || offset
== 0) {
554 *(pag
+ offset
) = *tmp
;
557 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
558 tmp
-= bytes_to_copy
;
560 offset
-= bytes_to_copy
;
561 len
-= bytes_to_copy
;
562 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
569 unsigned long setup_arg_pages(target_ulong p
, struct linux_binprm
* bprm
,
570 struct image_info
* info
)
572 target_ulong stack_base
, size
, error
;
575 /* Create enough stack to hold everything. If we don't use
576 * it for args, we'll use it for something else...
578 size
= x86_stack_size
;
579 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
580 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
581 error
= target_mmap(0,
582 size
+ qemu_host_page_size
,
583 PROT_READ
| PROT_WRITE
,
584 MAP_PRIVATE
| MAP_ANONYMOUS
,
590 /* we reserve one extra page at the top of the stack as guard */
591 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
593 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
596 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
600 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
603 stack_base
+= TARGET_PAGE_SIZE
;
608 static void set_brk(unsigned long start
, unsigned long end
)
610 /* page-align the start and end addresses... */
611 start
= HOST_PAGE_ALIGN(start
);
612 end
= HOST_PAGE_ALIGN(end
);
615 if(target_mmap(start
, end
- start
,
616 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
617 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0) == -1) {
618 perror("cannot mmap brk");
624 /* We need to explicitly zero any fractional pages after the data
625 section (i.e. bss). This would contain the junk from the file that
626 should not be in memory. */
627 static void padzero(unsigned long elf_bss
, unsigned long last_bss
)
631 if (elf_bss
>= last_bss
)
634 /* XXX: this is really a hack : if the real host page size is
635 smaller than the target page size, some pages after the end
636 of the file may not be mapped. A better fix would be to
637 patch target_mmap(), but it is more complicated as the file
638 size must be known */
639 if (qemu_real_host_page_size
< qemu_host_page_size
) {
640 unsigned long end_addr
, end_addr1
;
641 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
642 ~(qemu_real_host_page_size
- 1);
643 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
644 if (end_addr1
< end_addr
) {
645 mmap((void *)end_addr1
, end_addr
- end_addr1
,
646 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
647 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
651 nbyte
= elf_bss
& (qemu_host_page_size
-1);
653 nbyte
= qemu_host_page_size
- nbyte
;
662 static unsigned long create_elf_tables(target_ulong p
, int argc
, int envc
,
663 struct elfhdr
* exec
,
664 unsigned long load_addr
,
665 unsigned long load_bias
,
666 unsigned long interp_load_addr
, int ibcs
,
667 struct image_info
*info
)
671 target_ulong u_platform
;
672 const char *k_platform
;
673 const int n
= sizeof(target_ulong
);
677 k_platform
= ELF_PLATFORM
;
679 size_t len
= strlen(k_platform
) + 1;
680 sp
-= (len
+ n
- 1) & ~(n
- 1);
682 memcpy_to_target(sp
, k_platform
, len
);
685 * Force 16 byte _final_ alignment here for generality.
687 sp
= sp
&~ (target_ulong
)15;
688 size
= (DLINFO_ITEMS
+ 1) * 2;
691 #ifdef DLINFO_ARCH_ITEMS
692 size
+= DLINFO_ARCH_ITEMS
* 2;
694 size
+= envc
+ argc
+ 2;
695 size
+= (!ibcs
? 3 : 1); /* argc itself */
698 sp
-= 16 - (size
& 15);
700 #define NEW_AUX_ENT(id, val) do { \
701 sp -= n; tputl(sp, val); \
702 sp -= n; tputl(sp, id); \
704 NEW_AUX_ENT (AT_NULL
, 0);
706 /* There must be exactly DLINFO_ITEMS entries here. */
707 NEW_AUX_ENT(AT_PHDR
, (target_ulong
)(load_addr
+ exec
->e_phoff
));
708 NEW_AUX_ENT(AT_PHENT
, (target_ulong
)(sizeof (struct elf_phdr
)));
709 NEW_AUX_ENT(AT_PHNUM
, (target_ulong
)(exec
->e_phnum
));
710 NEW_AUX_ENT(AT_PAGESZ
, (target_ulong
)(TARGET_PAGE_SIZE
));
711 NEW_AUX_ENT(AT_BASE
, (target_ulong
)(interp_load_addr
));
712 NEW_AUX_ENT(AT_FLAGS
, (target_ulong
)0);
713 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
714 NEW_AUX_ENT(AT_UID
, (target_ulong
) getuid());
715 NEW_AUX_ENT(AT_EUID
, (target_ulong
) geteuid());
716 NEW_AUX_ENT(AT_GID
, (target_ulong
) getgid());
717 NEW_AUX_ENT(AT_EGID
, (target_ulong
) getegid());
718 NEW_AUX_ENT(AT_HWCAP
, (target_ulong
) ELF_HWCAP
);
720 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
723 * ARCH_DLINFO must come last so platform specific code can enforce
724 * special alignment requirements on the AUXV if necessary (eg. PPC).
730 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
735 static unsigned long load_elf_interp(struct elfhdr
* interp_elf_ex
,
737 unsigned long *interp_load_addr
)
739 struct elf_phdr
*elf_phdata
= NULL
;
740 struct elf_phdr
*eppnt
;
741 unsigned long load_addr
= 0;
742 int load_addr_set
= 0;
744 unsigned long last_bss
, elf_bss
;
753 bswap_ehdr(interp_elf_ex
);
755 /* First of all, some simple consistency checks */
756 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
757 interp_elf_ex
->e_type
!= ET_DYN
) ||
758 !elf_check_arch(interp_elf_ex
->e_machine
)) {
763 /* Now read in all of the header information */
765 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
768 elf_phdata
= (struct elf_phdr
*)
769 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
775 * If the size of this structure has changed, then punt, since
776 * we will be doing the wrong thing.
778 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
783 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
785 retval
= read(interpreter_fd
,
787 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
790 perror("load_elf_interp");
797 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
802 if (interp_elf_ex
->e_type
== ET_DYN
) {
803 /* in order to avoid hardcoding the interpreter load
804 address in qemu, we allocate a big enough memory zone */
805 error
= target_mmap(0, INTERP_MAP_SIZE
,
806 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
817 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
818 if (eppnt
->p_type
== PT_LOAD
) {
819 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
821 unsigned long vaddr
= 0;
824 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
825 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
826 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
827 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
828 elf_type
|= MAP_FIXED
;
829 vaddr
= eppnt
->p_vaddr
;
831 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
832 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
836 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
840 close(interpreter_fd
);
845 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
851 * Find the end of the file mapping for this phdr, and keep
852 * track of the largest address we see for this.
854 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
855 if (k
> elf_bss
) elf_bss
= k
;
858 * Do the same thing for the memory mapping - between
859 * elf_bss and last_bss is the bss section.
861 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
862 if (k
> last_bss
) last_bss
= k
;
865 /* Now use mmap to map the library into memory. */
867 close(interpreter_fd
);
870 * Now fill out the bss section. First pad the last page up
871 * to the page boundary, and then perform a mmap to make sure
872 * that there are zeromapped pages up to and including the last
875 padzero(elf_bss
, last_bss
);
876 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
878 /* Map the last of the bss segment */
879 if (last_bss
> elf_bss
) {
880 target_mmap(elf_bss
, last_bss
-elf_bss
,
881 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
882 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
886 *interp_load_addr
= load_addr
;
887 return ((unsigned long) interp_elf_ex
->e_entry
) + load_addr
;
890 /* Best attempt to load symbols from this ELF object. */
891 static void load_symbols(struct elfhdr
*hdr
, int fd
)
894 struct elf_shdr sechdr
, symtab
, strtab
;
897 #if (ELF_CLASS == ELFCLASS64)
898 // Disas uses 32 bit symbols
899 struct elf32_sym
*syms32
= NULL
;
903 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
904 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
905 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
910 if (sechdr
.sh_type
== SHT_SYMTAB
) {
912 lseek(fd
, hdr
->e_shoff
913 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
914 if (read(fd
, &strtab
, sizeof(strtab
))
923 return; /* Shouldn't happen... */
926 /* Now know where the strtab and symtab are. Snarf them. */
927 s
= malloc(sizeof(*s
));
928 s
->disas_symtab
= malloc(symtab
.sh_size
);
929 #if (ELF_CLASS == ELFCLASS64)
930 syms32
= malloc(symtab
.sh_size
/ sizeof(struct elf_sym
)
931 * sizeof(struct elf32_sym
));
933 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
934 if (!s
->disas_symtab
|| !s
->disas_strtab
)
937 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
938 if (read(fd
, s
->disas_symtab
, symtab
.sh_size
) != symtab
.sh_size
)
941 for (i
= 0; i
< symtab
.sh_size
/ sizeof(struct elf_sym
); i
++) {
943 bswap_sym(s
->disas_symtab
+ sizeof(struct elf_sym
)*i
);
945 #if (ELF_CLASS == ELFCLASS64)
946 sym
= s
->disas_symtab
+ sizeof(struct elf_sym
)*i
;
947 syms32
[i
].st_name
= sym
->st_name
;
948 syms32
[i
].st_info
= sym
->st_info
;
949 syms32
[i
].st_other
= sym
->st_other
;
950 syms32
[i
].st_shndx
= sym
->st_shndx
;
951 syms32
[i
].st_value
= sym
->st_value
& 0xffffffff;
952 syms32
[i
].st_size
= sym
->st_size
& 0xffffffff;
956 #if (ELF_CLASS == ELFCLASS64)
957 free(s
->disas_symtab
);
958 s
->disas_symtab
= syms32
;
960 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
961 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
)
963 s
->disas_num_syms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
968 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
969 struct image_info
* info
)
971 struct elfhdr elf_ex
;
972 struct elfhdr interp_elf_ex
;
973 struct exec interp_ex
;
974 int interpreter_fd
= -1; /* avoid warning */
975 unsigned long load_addr
, load_bias
;
976 int load_addr_set
= 0;
977 unsigned int interpreter_type
= INTERPRETER_NONE
;
978 unsigned char ibcs2_interpreter
;
980 unsigned long mapped_addr
;
981 struct elf_phdr
* elf_ppnt
;
982 struct elf_phdr
*elf_phdata
;
983 unsigned long elf_bss
, k
, elf_brk
;
985 char * elf_interpreter
;
986 unsigned long elf_entry
, interp_load_addr
= 0;
988 unsigned long start_code
, end_code
, end_data
;
989 unsigned long reloc_func_desc
= 0;
990 unsigned long elf_stack
;
991 char passed_fileno
[6];
993 ibcs2_interpreter
= 0;
997 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1002 /* First of all, some simple consistency checks */
1003 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1004 (! elf_check_arch(elf_ex
.e_machine
))) {
1008 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1009 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1010 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1015 /* Now read in all of the header information */
1016 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1017 if (elf_phdata
== NULL
) {
1021 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1023 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1024 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1028 perror("load_elf_binary");
1035 elf_ppnt
= elf_phdata
;
1036 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1037 bswap_phdr(elf_ppnt
);
1040 elf_ppnt
= elf_phdata
;
1047 elf_interpreter
= NULL
;
1052 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1053 if (elf_ppnt
->p_type
== PT_INTERP
) {
1054 if ( elf_interpreter
!= NULL
)
1057 free(elf_interpreter
);
1062 /* This is the program interpreter used for
1063 * shared libraries - for now assume that this
1064 * is an a.out format binary
1067 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1069 if (elf_interpreter
== NULL
) {
1075 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1077 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1080 perror("load_elf_binary2");
1084 /* If the program interpreter is one of these two,
1085 then assume an iBCS2 image. Otherwise assume
1086 a native linux image. */
1088 /* JRP - Need to add X86 lib dir stuff here... */
1090 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1091 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1092 ibcs2_interpreter
= 1;
1096 printf("Using ELF interpreter %s\n", elf_interpreter
);
1099 retval
= open(path(elf_interpreter
), O_RDONLY
);
1101 interpreter_fd
= retval
;
1104 perror(elf_interpreter
);
1106 /* retval = -errno; */
1111 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1113 retval
= read(interpreter_fd
,bprm
->buf
,128);
1117 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1118 interp_elf_ex
=*((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1121 perror("load_elf_binary3");
1124 free(elf_interpreter
);
1132 /* Some simple consistency checks for the interpreter */
1133 if (elf_interpreter
){
1134 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1136 /* Now figure out which format our binary is */
1137 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1138 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1139 interpreter_type
= INTERPRETER_ELF
;
1142 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1143 strncmp(&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1144 interpreter_type
&= ~INTERPRETER_ELF
;
1147 if (!interpreter_type
) {
1148 free(elf_interpreter
);
1155 /* OK, we are done with that, now set up the arg stuff,
1156 and then start this sucker up */
1161 if (interpreter_type
== INTERPRETER_AOUT
) {
1162 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1163 passed_p
= passed_fileno
;
1165 if (elf_interpreter
) {
1166 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1171 if (elf_interpreter
) {
1172 free(elf_interpreter
);
1180 /* OK, This is the point of no return */
1183 info
->start_mmap
= (unsigned long)ELF_START_MMAP
;
1185 elf_entry
= (unsigned long) elf_ex
.e_entry
;
1187 /* Do this so that we can load the interpreter, if need be. We will
1188 change some of these later */
1190 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1191 info
->start_stack
= bprm
->p
;
1193 /* Now we do a little grungy work by mmaping the ELF image into
1194 * the correct location in memory. At this point, we assume that
1195 * the image should be loaded at fixed address, not at a variable
1199 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1202 unsigned long error
;
1204 if (elf_ppnt
->p_type
!= PT_LOAD
)
1207 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1208 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1209 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1210 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1211 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1212 elf_flags
|= MAP_FIXED
;
1213 } else if (elf_ex
.e_type
== ET_DYN
) {
1214 /* Try and get dynamic programs out of the way of the default mmap
1215 base, as well as whatever program they might try to exec. This
1216 is because the brk will follow the loader, and is not movable. */
1217 /* NOTE: for qemu, we do a big mmap to get enough space
1218 without hardcoding any address */
1219 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1220 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1226 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1229 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1230 (elf_ppnt
->p_filesz
+
1231 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1233 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1235 (elf_ppnt
->p_offset
-
1236 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1242 #ifdef LOW_ELF_STACK
1243 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1244 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1247 if (!load_addr_set
) {
1249 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1250 if (elf_ex
.e_type
== ET_DYN
) {
1251 load_bias
+= error
-
1252 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1253 load_addr
+= load_bias
;
1254 reloc_func_desc
= load_bias
;
1257 k
= elf_ppnt
->p_vaddr
;
1260 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1263 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1267 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1268 if (k
> elf_brk
) elf_brk
= k
;
1271 elf_entry
+= load_bias
;
1272 elf_bss
+= load_bias
;
1273 elf_brk
+= load_bias
;
1274 start_code
+= load_bias
;
1275 end_code
+= load_bias
;
1276 // start_data += load_bias;
1277 end_data
+= load_bias
;
1279 if (elf_interpreter
) {
1280 if (interpreter_type
& 1) {
1281 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1283 else if (interpreter_type
& 2) {
1284 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1287 reloc_func_desc
= interp_load_addr
;
1289 close(interpreter_fd
);
1290 free(elf_interpreter
);
1292 if (elf_entry
== ~0UL) {
1293 printf("Unable to load interpreter\n");
1303 load_symbols(&elf_ex
, bprm
->fd
);
1305 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1306 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1308 #ifdef LOW_ELF_STACK
1309 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1311 bprm
->p
= create_elf_tables(bprm
->p
,
1315 load_addr
, load_bias
,
1317 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1319 info
->load_addr
= reloc_func_desc
;
1320 info
->start_brk
= info
->brk
= elf_brk
;
1321 info
->end_code
= end_code
;
1322 info
->start_code
= start_code
;
1323 info
->start_data
= end_code
;
1324 info
->end_data
= end_data
;
1325 info
->start_stack
= bprm
->p
;
1327 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1329 set_brk(elf_bss
, elf_brk
);
1331 padzero(elf_bss
, elf_brk
);
1334 printf("(start_brk) %x\n" , info
->start_brk
);
1335 printf("(end_code) %x\n" , info
->end_code
);
1336 printf("(start_code) %x\n" , info
->start_code
);
1337 printf("(end_data) %x\n" , info
->end_data
);
1338 printf("(start_stack) %x\n" , info
->start_stack
);
1339 printf("(brk) %x\n" , info
->brk
);
1342 if ( info
->personality
== PER_SVR4
)
1344 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1345 and some applications "depend" upon this behavior.
1346 Since we do not have the power to recompile these, we
1347 emulate the SVr4 behavior. Sigh. */
1348 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1349 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1352 info
->entry
= elf_entry
;
1357 static int load_aout_interp(void * exptr
, int interp_fd
)
1359 printf("a.out interpreter not yet supported\n");
1363 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1365 init_thread(regs
, infop
);