1 /* This is the Linux kernel elf-loading code, ported into user space */
24 /* from personality.h */
27 * Flags for bug emulation.
29 * These occupy the top three bytes.
32 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
33 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
36 MMAP_PAGE_ZERO
= 0x0100000,
37 ADDR_COMPAT_LAYOUT
= 0x0200000,
38 READ_IMPLIES_EXEC
= 0x0400000,
39 ADDR_LIMIT_32BIT
= 0x0800000,
40 SHORT_INODE
= 0x1000000,
41 WHOLE_SECONDS
= 0x2000000,
42 STICKY_TIMEOUTS
= 0x4000000,
43 ADDR_LIMIT_3GB
= 0x8000000,
49 * These go in the low byte. Avoid using the top bit, it will
50 * conflict with error returns.
54 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
55 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
56 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
57 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
58 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
59 WHOLE_SECONDS
| SHORT_INODE
,
60 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
61 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
62 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
64 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
65 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
67 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
68 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
69 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
70 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
72 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
73 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
74 PER_OSF4
= 0x000f, /* OSF/1 v4 */
80 * Return the base personality without flags.
82 #define personality(pers) (pers & PER_MASK)
84 /* this flag is uneffective under linux too, should be deleted */
86 #define MAP_DENYWRITE 0
89 /* should probably go in elf.h */
96 #define ELF_PLATFORM get_elf_platform()
98 static const char *get_elf_platform(void)
100 static char elf_platform
[] = "i386";
101 int family
= (thread_env
->cpuid_version
>> 8) & 0xff;
105 elf_platform
[1] = '0' + family
;
109 #define ELF_HWCAP get_elf_hwcap()
111 static uint32_t get_elf_hwcap(void)
113 return thread_env
->cpuid_features
;
117 #define ELF_START_MMAP 0x2aaaaab000ULL
118 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
120 #define ELF_CLASS ELFCLASS64
121 #define ELF_DATA ELFDATA2LSB
122 #define ELF_ARCH EM_X86_64
124 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
127 regs
->rsp
= infop
->start_stack
;
128 regs
->rip
= infop
->entry
;
133 #define ELF_START_MMAP 0x80000000
136 * This is used to ensure we don't load something for the wrong architecture.
138 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
141 * These are used to set parameters in the core dumps.
143 #define ELF_CLASS ELFCLASS32
144 #define ELF_DATA ELFDATA2LSB
145 #define ELF_ARCH EM_386
147 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
149 regs
->esp
= infop
->start_stack
;
150 regs
->eip
= infop
->entry
;
152 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
153 starts %edx contains a pointer to a function which might be
154 registered using `atexit'. This provides a mean for the
155 dynamic linker to call DT_FINI functions for shared libraries
156 that have been loaded before the code runs.
158 A value of 0 tells we have no such handler. */
163 #define USE_ELF_CORE_DUMP
164 #define ELF_EXEC_PAGESIZE 4096
170 #define ELF_START_MMAP 0x80000000
172 #define elf_check_arch(x) ( (x) == EM_ARM )
174 #define ELF_CLASS ELFCLASS32
175 #ifdef TARGET_WORDS_BIGENDIAN
176 #define ELF_DATA ELFDATA2MSB
178 #define ELF_DATA ELFDATA2LSB
180 #define ELF_ARCH EM_ARM
182 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
184 abi_long stack
= infop
->start_stack
;
185 memset(regs
, 0, sizeof(*regs
));
186 regs
->ARM_cpsr
= 0x10;
187 if (infop
->entry
& 1)
188 regs
->ARM_cpsr
|= CPSR_T
;
189 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
190 regs
->ARM_sp
= infop
->start_stack
;
191 /* FIXME - what to for failure of get_user()? */
192 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
193 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
194 /* XXX: it seems that r0 is zeroed after ! */
196 /* For uClinux PIC binaries. */
197 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
198 regs
->ARM_r10
= infop
->start_data
;
201 #define USE_ELF_CORE_DUMP
202 #define ELF_EXEC_PAGESIZE 4096
206 ARM_HWCAP_ARM_SWP
= 1 << 0,
207 ARM_HWCAP_ARM_HALF
= 1 << 1,
208 ARM_HWCAP_ARM_THUMB
= 1 << 2,
209 ARM_HWCAP_ARM_26BIT
= 1 << 3,
210 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
211 ARM_HWCAP_ARM_FPA
= 1 << 5,
212 ARM_HWCAP_ARM_VFP
= 1 << 6,
213 ARM_HWCAP_ARM_EDSP
= 1 << 7,
216 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
217 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
218 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
223 #ifdef TARGET_SPARC64
225 #define ELF_START_MMAP 0x80000000
228 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
230 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
233 #define ELF_CLASS ELFCLASS64
234 #define ELF_DATA ELFDATA2MSB
235 #define ELF_ARCH EM_SPARCV9
237 #define STACK_BIAS 2047
239 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
244 regs
->pc
= infop
->entry
;
245 regs
->npc
= regs
->pc
+ 4;
248 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
250 if (personality(infop
->personality
) == PER_LINUX32
)
251 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
253 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
258 #define ELF_START_MMAP 0x80000000
260 #define elf_check_arch(x) ( (x) == EM_SPARC )
262 #define ELF_CLASS ELFCLASS32
263 #define ELF_DATA ELFDATA2MSB
264 #define ELF_ARCH EM_SPARC
266 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
269 regs
->pc
= infop
->entry
;
270 regs
->npc
= regs
->pc
+ 4;
272 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
280 #define ELF_START_MMAP 0x80000000
282 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
284 #define elf_check_arch(x) ( (x) == EM_PPC64 )
286 #define ELF_CLASS ELFCLASS64
290 #define elf_check_arch(x) ( (x) == EM_PPC )
292 #define ELF_CLASS ELFCLASS32
296 #ifdef TARGET_WORDS_BIGENDIAN
297 #define ELF_DATA ELFDATA2MSB
299 #define ELF_DATA ELFDATA2LSB
301 #define ELF_ARCH EM_PPC
304 * We need to put in some extra aux table entries to tell glibc what
305 * the cache block size is, so it can use the dcbz instruction safely.
307 #define AT_DCACHEBSIZE 19
308 #define AT_ICACHEBSIZE 20
309 #define AT_UCACHEBSIZE 21
310 /* A special ignored type value for PPC, for glibc compatibility. */
311 #define AT_IGNOREPPC 22
313 * The requirements here are:
314 * - keep the final alignment of sp (sp & 0xf)
315 * - make sure the 32-bit value at the first 16 byte aligned position of
316 * AUXV is greater than 16 for glibc compatibility.
317 * AT_IGNOREPPC is used for that.
318 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
319 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
321 #define DLINFO_ARCH_ITEMS 5
322 #define ARCH_DLINFO \
324 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
325 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
326 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
328 * Now handle glibc compatibility. \
330 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
331 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
334 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
336 abi_ulong pos
= infop
->start_stack
;
338 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
339 abi_ulong entry
, toc
;
342 _regs
->gpr
[1] = infop
->start_stack
;
343 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
344 entry
= ldq_raw(infop
->entry
) + infop
->load_addr
;
345 toc
= ldq_raw(infop
->entry
+ 8) + infop
->load_addr
;
347 infop
->entry
= entry
;
349 _regs
->nip
= infop
->entry
;
350 /* Note that isn't exactly what regular kernel does
351 * but this is what the ABI wants and is needed to allow
352 * execution of PPC BSD programs.
354 /* FIXME - what to for failure of get_user()? */
355 get_user_ual(_regs
->gpr
[3], pos
);
356 pos
+= sizeof(abi_ulong
);
358 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(abi_ulong
))
363 #define USE_ELF_CORE_DUMP
364 #define ELF_EXEC_PAGESIZE 4096
370 #define ELF_START_MMAP 0x80000000
372 #define elf_check_arch(x) ( (x) == EM_MIPS )
375 #define ELF_CLASS ELFCLASS64
377 #define ELF_CLASS ELFCLASS32
379 #ifdef TARGET_WORDS_BIGENDIAN
380 #define ELF_DATA ELFDATA2MSB
382 #define ELF_DATA ELFDATA2LSB
384 #define ELF_ARCH EM_MIPS
386 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
388 regs
->cp0_status
= 2 << CP0St_KSU
;
389 regs
->cp0_epc
= infop
->entry
;
390 regs
->regs
[29] = infop
->start_stack
;
393 #define USE_ELF_CORE_DUMP
394 #define ELF_EXEC_PAGESIZE 4096
396 #endif /* TARGET_MIPS */
400 #define ELF_START_MMAP 0x80000000
402 #define elf_check_arch(x) ( (x) == EM_SH )
404 #define ELF_CLASS ELFCLASS32
405 #define ELF_DATA ELFDATA2LSB
406 #define ELF_ARCH EM_SH
408 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
410 /* Check other registers XXXXX */
411 regs
->pc
= infop
->entry
;
412 regs
->regs
[15] = infop
->start_stack
;
415 #define USE_ELF_CORE_DUMP
416 #define ELF_EXEC_PAGESIZE 4096
422 #define ELF_START_MMAP 0x80000000
424 #define elf_check_arch(x) ( (x) == EM_CRIS )
426 #define ELF_CLASS ELFCLASS32
427 #define ELF_DATA ELFDATA2LSB
428 #define ELF_ARCH EM_CRIS
430 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
432 regs
->erp
= infop
->entry
;
435 #define USE_ELF_CORE_DUMP
436 #define ELF_EXEC_PAGESIZE 8192
442 #define ELF_START_MMAP 0x80000000
444 #define elf_check_arch(x) ( (x) == EM_68K )
446 #define ELF_CLASS ELFCLASS32
447 #define ELF_DATA ELFDATA2MSB
448 #define ELF_ARCH EM_68K
450 /* ??? Does this need to do anything?
451 #define ELF_PLAT_INIT(_r) */
453 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
455 regs
->usp
= infop
->start_stack
;
457 regs
->pc
= infop
->entry
;
460 #define USE_ELF_CORE_DUMP
461 #define ELF_EXEC_PAGESIZE 8192
467 #define ELF_START_MMAP (0x30000000000ULL)
469 #define elf_check_arch(x) ( (x) == ELF_ARCH )
471 #define ELF_CLASS ELFCLASS64
472 #define ELF_DATA ELFDATA2MSB
473 #define ELF_ARCH EM_ALPHA
475 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
477 regs
->pc
= infop
->entry
;
479 regs
->usp
= infop
->start_stack
;
480 regs
->unique
= infop
->start_data
; /* ? */
481 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
482 regs
->unique
, infop
->start_data
);
485 #define USE_ELF_CORE_DUMP
486 #define ELF_EXEC_PAGESIZE 8192
488 #endif /* TARGET_ALPHA */
491 #define ELF_PLATFORM (NULL)
500 #define ELF_CLASS ELFCLASS32
502 #define bswaptls(ptr) bswap32s(ptr)
509 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
510 unsigned int a_text
; /* length of text, in bytes */
511 unsigned int a_data
; /* length of data, in bytes */
512 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
513 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
514 unsigned int a_entry
; /* start address */
515 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
516 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
520 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
526 /* max code+data+bss space allocated to elf interpreter */
527 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
529 /* max code+data+bss+brk space allocated to ET_DYN executables */
530 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
532 /* Necessary parameters */
533 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
534 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
535 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
537 #define INTERPRETER_NONE 0
538 #define INTERPRETER_AOUT 1
539 #define INTERPRETER_ELF 2
541 #define DLINFO_ITEMS 12
543 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
548 extern unsigned long x86_stack_size
;
550 static int load_aout_interp(void * exptr
, int interp_fd
);
553 static void bswap_ehdr(struct elfhdr
*ehdr
)
555 bswap16s(&ehdr
->e_type
); /* Object file type */
556 bswap16s(&ehdr
->e_machine
); /* Architecture */
557 bswap32s(&ehdr
->e_version
); /* Object file version */
558 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
559 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
560 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
561 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
562 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
563 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
564 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
565 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
566 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
567 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
570 static void bswap_phdr(struct elf_phdr
*phdr
)
572 bswap32s(&phdr
->p_type
); /* Segment type */
573 bswaptls(&phdr
->p_offset
); /* Segment file offset */
574 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
575 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
576 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
577 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
578 bswap32s(&phdr
->p_flags
); /* Segment flags */
579 bswaptls(&phdr
->p_align
); /* Segment alignment */
582 static void bswap_shdr(struct elf_shdr
*shdr
)
584 bswap32s(&shdr
->sh_name
);
585 bswap32s(&shdr
->sh_type
);
586 bswaptls(&shdr
->sh_flags
);
587 bswaptls(&shdr
->sh_addr
);
588 bswaptls(&shdr
->sh_offset
);
589 bswaptls(&shdr
->sh_size
);
590 bswap32s(&shdr
->sh_link
);
591 bswap32s(&shdr
->sh_info
);
592 bswaptls(&shdr
->sh_addralign
);
593 bswaptls(&shdr
->sh_entsize
);
596 static void bswap_sym(struct elf_sym
*sym
)
598 bswap32s(&sym
->st_name
);
599 bswaptls(&sym
->st_value
);
600 bswaptls(&sym
->st_size
);
601 bswap16s(&sym
->st_shndx
);
606 * 'copy_elf_strings()' copies argument/envelope strings from user
607 * memory to free pages in kernel mem. These are in a format ready
608 * to be put directly into the top of new user memory.
611 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
614 char *tmp
, *tmp1
, *pag
= NULL
;
618 return 0; /* bullet-proofing */
623 fprintf(stderr
, "VFS: argc is wrong");
629 if (p
< len
) { /* this shouldn't happen - 128kB */
635 offset
= p
% TARGET_PAGE_SIZE
;
636 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
638 pag
= (char *)malloc(TARGET_PAGE_SIZE
);
639 memset(pag
, 0, TARGET_PAGE_SIZE
);
640 page
[p
/TARGET_PAGE_SIZE
] = pag
;
645 if (len
== 0 || offset
== 0) {
646 *(pag
+ offset
) = *tmp
;
649 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
650 tmp
-= bytes_to_copy
;
652 offset
-= bytes_to_copy
;
653 len
-= bytes_to_copy
;
654 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
661 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
662 struct image_info
*info
)
664 abi_ulong stack_base
, size
, error
;
667 /* Create enough stack to hold everything. If we don't use
668 * it for args, we'll use it for something else...
670 size
= x86_stack_size
;
671 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
672 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
673 error
= target_mmap(0,
674 size
+ qemu_host_page_size
,
675 PROT_READ
| PROT_WRITE
,
676 MAP_PRIVATE
| MAP_ANONYMOUS
,
682 /* we reserve one extra page at the top of the stack as guard */
683 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
685 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
688 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
691 /* FIXME - check return value of memcpy_to_target() for failure */
692 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
695 stack_base
+= TARGET_PAGE_SIZE
;
700 static void set_brk(abi_ulong start
, abi_ulong end
)
702 /* page-align the start and end addresses... */
703 start
= HOST_PAGE_ALIGN(start
);
704 end
= HOST_PAGE_ALIGN(end
);
707 if(target_mmap(start
, end
- start
,
708 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
709 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0) == -1) {
710 perror("cannot mmap brk");
716 /* We need to explicitly zero any fractional pages after the data
717 section (i.e. bss). This would contain the junk from the file that
718 should not be in memory. */
719 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
723 if (elf_bss
>= last_bss
)
726 /* XXX: this is really a hack : if the real host page size is
727 smaller than the target page size, some pages after the end
728 of the file may not be mapped. A better fix would be to
729 patch target_mmap(), but it is more complicated as the file
730 size must be known */
731 if (qemu_real_host_page_size
< qemu_host_page_size
) {
732 abi_ulong end_addr
, end_addr1
;
733 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
734 ~(qemu_real_host_page_size
- 1);
735 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
736 if (end_addr1
< end_addr
) {
737 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
738 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
739 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
743 nbyte
= elf_bss
& (qemu_host_page_size
-1);
745 nbyte
= qemu_host_page_size
- nbyte
;
747 /* FIXME - what to do if put_user() fails? */
748 put_user_u8(0, elf_bss
);
755 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
756 struct elfhdr
* exec
,
759 abi_ulong interp_load_addr
, int ibcs
,
760 struct image_info
*info
)
764 abi_ulong u_platform
;
765 const char *k_platform
;
766 const int n
= sizeof(elf_addr_t
);
770 k_platform
= ELF_PLATFORM
;
772 size_t len
= strlen(k_platform
) + 1;
773 sp
-= (len
+ n
- 1) & ~(n
- 1);
775 /* FIXME - check return value of memcpy_to_target() for failure */
776 memcpy_to_target(sp
, k_platform
, len
);
779 * Force 16 byte _final_ alignment here for generality.
781 sp
= sp
&~ (abi_ulong
)15;
782 size
= (DLINFO_ITEMS
+ 1) * 2;
785 #ifdef DLINFO_ARCH_ITEMS
786 size
+= DLINFO_ARCH_ITEMS
* 2;
788 size
+= envc
+ argc
+ 2;
789 size
+= (!ibcs
? 3 : 1); /* argc itself */
792 sp
-= 16 - (size
& 15);
794 /* This is correct because Linux defines
795 * elf_addr_t as Elf32_Off / Elf64_Off
797 #define NEW_AUX_ENT(id, val) do { \
798 sp -= n; put_user_ual(val, sp); \
799 sp -= n; put_user_ual(id, sp); \
802 NEW_AUX_ENT (AT_NULL
, 0);
804 /* There must be exactly DLINFO_ITEMS entries here. */
805 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
806 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
807 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
808 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
809 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
810 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
811 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
812 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
813 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
814 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
815 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
816 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
817 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
819 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
822 * ARCH_DLINFO must come last so platform specific code can enforce
823 * special alignment requirements on the AUXV if necessary (eg. PPC).
829 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
834 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
836 abi_ulong
*interp_load_addr
)
838 struct elf_phdr
*elf_phdata
= NULL
;
839 struct elf_phdr
*eppnt
;
840 abi_ulong load_addr
= 0;
841 int load_addr_set
= 0;
843 abi_ulong last_bss
, elf_bss
;
852 bswap_ehdr(interp_elf_ex
);
854 /* First of all, some simple consistency checks */
855 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
856 interp_elf_ex
->e_type
!= ET_DYN
) ||
857 !elf_check_arch(interp_elf_ex
->e_machine
)) {
858 return ~((abi_ulong
)0UL);
862 /* Now read in all of the header information */
864 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
865 return ~(abi_ulong
)0UL;
867 elf_phdata
= (struct elf_phdr
*)
868 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
871 return ~((abi_ulong
)0UL);
874 * If the size of this structure has changed, then punt, since
875 * we will be doing the wrong thing.
877 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
879 return ~((abi_ulong
)0UL);
882 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
884 retval
= read(interpreter_fd
,
886 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
889 perror("load_elf_interp");
896 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
901 if (interp_elf_ex
->e_type
== ET_DYN
) {
902 /* in order to avoid hardcoding the interpreter load
903 address in qemu, we allocate a big enough memory zone */
904 error
= target_mmap(0, INTERP_MAP_SIZE
,
905 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
916 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
917 if (eppnt
->p_type
== PT_LOAD
) {
918 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
923 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
924 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
925 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
926 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
927 elf_type
|= MAP_FIXED
;
928 vaddr
= eppnt
->p_vaddr
;
930 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
931 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
935 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
939 close(interpreter_fd
);
941 return ~((abi_ulong
)0UL);
944 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
950 * Find the end of the file mapping for this phdr, and keep
951 * track of the largest address we see for this.
953 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
954 if (k
> elf_bss
) elf_bss
= k
;
957 * Do the same thing for the memory mapping - between
958 * elf_bss and last_bss is the bss section.
960 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
961 if (k
> last_bss
) last_bss
= k
;
964 /* Now use mmap to map the library into memory. */
966 close(interpreter_fd
);
969 * Now fill out the bss section. First pad the last page up
970 * to the page boundary, and then perform a mmap to make sure
971 * that there are zeromapped pages up to and including the last
974 padzero(elf_bss
, last_bss
);
975 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
977 /* Map the last of the bss segment */
978 if (last_bss
> elf_bss
) {
979 target_mmap(elf_bss
, last_bss
-elf_bss
,
980 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
981 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
985 *interp_load_addr
= load_addr
;
986 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
989 /* Best attempt to load symbols from this ELF object. */
990 static void load_symbols(struct elfhdr
*hdr
, int fd
)
993 struct elf_shdr sechdr
, symtab
, strtab
;
996 #if (ELF_CLASS == ELFCLASS64)
997 // Disas uses 32 bit symbols
998 struct elf32_sym
*syms32
= NULL
;
1002 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
1003 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1004 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1007 bswap_shdr(&sechdr
);
1009 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1011 lseek(fd
, hdr
->e_shoff
1012 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1013 if (read(fd
, &strtab
, sizeof(strtab
))
1017 bswap_shdr(&strtab
);
1022 return; /* Shouldn't happen... */
1025 /* Now know where the strtab and symtab are. Snarf them. */
1026 s
= malloc(sizeof(*s
));
1027 s
->disas_symtab
= malloc(symtab
.sh_size
);
1028 #if (ELF_CLASS == ELFCLASS64)
1029 syms32
= malloc(symtab
.sh_size
/ sizeof(struct elf_sym
)
1030 * sizeof(struct elf32_sym
));
1032 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1033 if (!s
->disas_symtab
|| !s
->disas_strtab
)
1036 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1037 if (read(fd
, s
->disas_symtab
, symtab
.sh_size
) != symtab
.sh_size
)
1040 for (i
= 0; i
< symtab
.sh_size
/ sizeof(struct elf_sym
); i
++) {
1042 bswap_sym(s
->disas_symtab
+ sizeof(struct elf_sym
)*i
);
1044 #if (ELF_CLASS == ELFCLASS64)
1045 sym
= s
->disas_symtab
+ sizeof(struct elf_sym
)*i
;
1046 syms32
[i
].st_name
= sym
->st_name
;
1047 syms32
[i
].st_info
= sym
->st_info
;
1048 syms32
[i
].st_other
= sym
->st_other
;
1049 syms32
[i
].st_shndx
= sym
->st_shndx
;
1050 syms32
[i
].st_value
= sym
->st_value
& 0xffffffff;
1051 syms32
[i
].st_size
= sym
->st_size
& 0xffffffff;
1055 #if (ELF_CLASS == ELFCLASS64)
1056 free(s
->disas_symtab
);
1057 s
->disas_symtab
= syms32
;
1059 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1060 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
)
1062 s
->disas_num_syms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1067 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1068 struct image_info
* info
)
1070 struct elfhdr elf_ex
;
1071 struct elfhdr interp_elf_ex
;
1072 struct exec interp_ex
;
1073 int interpreter_fd
= -1; /* avoid warning */
1074 abi_ulong load_addr
, load_bias
;
1075 int load_addr_set
= 0;
1076 unsigned int interpreter_type
= INTERPRETER_NONE
;
1077 unsigned char ibcs2_interpreter
;
1079 abi_ulong mapped_addr
;
1080 struct elf_phdr
* elf_ppnt
;
1081 struct elf_phdr
*elf_phdata
;
1082 abi_ulong elf_bss
, k
, elf_brk
;
1084 char * elf_interpreter
;
1085 abi_ulong elf_entry
, interp_load_addr
= 0;
1087 abi_ulong start_code
, end_code
, start_data
, end_data
;
1088 abi_ulong reloc_func_desc
= 0;
1089 abi_ulong elf_stack
;
1090 char passed_fileno
[6];
1092 ibcs2_interpreter
= 0;
1096 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1098 bswap_ehdr(&elf_ex
);
1101 /* First of all, some simple consistency checks */
1102 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1103 (! elf_check_arch(elf_ex
.e_machine
))) {
1107 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1108 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1109 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1114 /* Now read in all of the header information */
1115 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1116 if (elf_phdata
== NULL
) {
1120 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1122 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1123 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1127 perror("load_elf_binary");
1134 elf_ppnt
= elf_phdata
;
1135 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1136 bswap_phdr(elf_ppnt
);
1139 elf_ppnt
= elf_phdata
;
1145 elf_stack
= ~((abi_ulong
)0UL);
1146 elf_interpreter
= NULL
;
1147 start_code
= ~((abi_ulong
)0UL);
1152 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1153 if (elf_ppnt
->p_type
== PT_INTERP
) {
1154 if ( elf_interpreter
!= NULL
)
1157 free(elf_interpreter
);
1162 /* This is the program interpreter used for
1163 * shared libraries - for now assume that this
1164 * is an a.out format binary
1167 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1169 if (elf_interpreter
== NULL
) {
1175 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1177 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1180 perror("load_elf_binary2");
1184 /* If the program interpreter is one of these two,
1185 then assume an iBCS2 image. Otherwise assume
1186 a native linux image. */
1188 /* JRP - Need to add X86 lib dir stuff here... */
1190 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1191 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1192 ibcs2_interpreter
= 1;
1196 printf("Using ELF interpreter %s\n", elf_interpreter
);
1199 retval
= open(path(elf_interpreter
), O_RDONLY
);
1201 interpreter_fd
= retval
;
1204 perror(elf_interpreter
);
1206 /* retval = -errno; */
1211 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1213 retval
= read(interpreter_fd
,bprm
->buf
,128);
1217 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1218 interp_elf_ex
=*((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1221 perror("load_elf_binary3");
1224 free(elf_interpreter
);
1232 /* Some simple consistency checks for the interpreter */
1233 if (elf_interpreter
){
1234 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1236 /* Now figure out which format our binary is */
1237 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1238 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1239 interpreter_type
= INTERPRETER_ELF
;
1242 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1243 strncmp(&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1244 interpreter_type
&= ~INTERPRETER_ELF
;
1247 if (!interpreter_type
) {
1248 free(elf_interpreter
);
1255 /* OK, we are done with that, now set up the arg stuff,
1256 and then start this sucker up */
1261 if (interpreter_type
== INTERPRETER_AOUT
) {
1262 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1263 passed_p
= passed_fileno
;
1265 if (elf_interpreter
) {
1266 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1271 if (elf_interpreter
) {
1272 free(elf_interpreter
);
1280 /* OK, This is the point of no return */
1283 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1285 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1287 /* Do this so that we can load the interpreter, if need be. We will
1288 change some of these later */
1290 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1291 info
->start_stack
= bprm
->p
;
1293 /* Now we do a little grungy work by mmaping the ELF image into
1294 * the correct location in memory. At this point, we assume that
1295 * the image should be loaded at fixed address, not at a variable
1299 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1304 if (elf_ppnt
->p_type
!= PT_LOAD
)
1307 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1308 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1309 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1310 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1311 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1312 elf_flags
|= MAP_FIXED
;
1313 } else if (elf_ex
.e_type
== ET_DYN
) {
1314 /* Try and get dynamic programs out of the way of the default mmap
1315 base, as well as whatever program they might try to exec. This
1316 is because the brk will follow the loader, and is not movable. */
1317 /* NOTE: for qemu, we do a big mmap to get enough space
1318 without hardcoding any address */
1319 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1320 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1326 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1329 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1330 (elf_ppnt
->p_filesz
+
1331 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1333 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1335 (elf_ppnt
->p_offset
-
1336 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1342 #ifdef LOW_ELF_STACK
1343 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1344 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1347 if (!load_addr_set
) {
1349 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1350 if (elf_ex
.e_type
== ET_DYN
) {
1351 load_bias
+= error
-
1352 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1353 load_addr
+= load_bias
;
1354 reloc_func_desc
= load_bias
;
1357 k
= elf_ppnt
->p_vaddr
;
1362 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1365 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1369 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1370 if (k
> elf_brk
) elf_brk
= k
;
1373 elf_entry
+= load_bias
;
1374 elf_bss
+= load_bias
;
1375 elf_brk
+= load_bias
;
1376 start_code
+= load_bias
;
1377 end_code
+= load_bias
;
1378 start_data
+= load_bias
;
1379 end_data
+= load_bias
;
1381 if (elf_interpreter
) {
1382 if (interpreter_type
& 1) {
1383 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1385 else if (interpreter_type
& 2) {
1386 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1389 reloc_func_desc
= interp_load_addr
;
1391 close(interpreter_fd
);
1392 free(elf_interpreter
);
1394 if (elf_entry
== ~((abi_ulong
)0UL)) {
1395 printf("Unable to load interpreter\n");
1405 load_symbols(&elf_ex
, bprm
->fd
);
1407 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1408 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1410 #ifdef LOW_ELF_STACK
1411 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1413 bprm
->p
= create_elf_tables(bprm
->p
,
1417 load_addr
, load_bias
,
1419 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1421 info
->load_addr
= reloc_func_desc
;
1422 info
->start_brk
= info
->brk
= elf_brk
;
1423 info
->end_code
= end_code
;
1424 info
->start_code
= start_code
;
1425 info
->start_data
= start_data
;
1426 info
->end_data
= end_data
;
1427 info
->start_stack
= bprm
->p
;
1429 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1431 set_brk(elf_bss
, elf_brk
);
1433 padzero(elf_bss
, elf_brk
);
1436 printf("(start_brk) %x\n" , info
->start_brk
);
1437 printf("(end_code) %x\n" , info
->end_code
);
1438 printf("(start_code) %x\n" , info
->start_code
);
1439 printf("(end_data) %x\n" , info
->end_data
);
1440 printf("(start_stack) %x\n" , info
->start_stack
);
1441 printf("(brk) %x\n" , info
->brk
);
1444 if ( info
->personality
== PER_SVR4
)
1446 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1447 and some applications "depend" upon this behavior.
1448 Since we do not have the power to recompile these, we
1449 emulate the SVr4 behavior. Sigh. */
1450 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1451 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1454 info
->entry
= elf_entry
;
1459 static int load_aout_interp(void * exptr
, int interp_fd
)
1461 printf("a.out interpreter not yet supported\n");
1465 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1467 init_thread(regs
, infop
);