1 /* This is the Linux kernel elf-loading code, ported into user space */
15 /* from personality.h */
18 * Flags for bug emulation.
20 * These occupy the top three bytes.
23 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
24 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
27 MMAP_PAGE_ZERO
= 0x0100000,
28 ADDR_COMPAT_LAYOUT
= 0x0200000,
29 READ_IMPLIES_EXEC
= 0x0400000,
30 ADDR_LIMIT_32BIT
= 0x0800000,
31 SHORT_INODE
= 0x1000000,
32 WHOLE_SECONDS
= 0x2000000,
33 STICKY_TIMEOUTS
= 0x4000000,
34 ADDR_LIMIT_3GB
= 0x8000000,
40 * These go in the low byte. Avoid using the top bit, it will
41 * conflict with error returns.
45 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
46 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
47 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
48 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
49 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
50 WHOLE_SECONDS
| SHORT_INODE
,
51 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
52 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
53 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
55 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
56 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
58 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
59 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
60 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
61 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
63 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
64 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
65 PER_OSF4
= 0x000f, /* OSF/1 v4 */
71 * Return the base personality without flags.
73 #define personality(pers) (pers & PER_MASK)
75 /* this flag is uneffective under linux too, should be deleted */
77 #define MAP_DENYWRITE 0
80 /* should probably go in elf.h */
87 #define ELF_PLATFORM get_elf_platform()
89 static const char *get_elf_platform(void)
91 static char elf_platform
[] = "i386";
92 int family
= (global_env
->cpuid_version
>> 8) & 0xff;
96 elf_platform
[1] = '0' + family
;
100 #define ELF_HWCAP get_elf_hwcap()
102 static uint32_t get_elf_hwcap(void)
104 return global_env
->cpuid_features
;
108 #define ELF_START_MMAP 0x2aaaaab000ULL
109 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
111 #define ELF_CLASS ELFCLASS64
112 #define ELF_DATA ELFDATA2LSB
113 #define ELF_ARCH EM_X86_64
115 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
118 regs
->rsp
= infop
->start_stack
;
119 regs
->rip
= infop
->entry
;
124 #define ELF_START_MMAP 0x80000000
127 * This is used to ensure we don't load something for the wrong architecture.
129 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
132 * These are used to set parameters in the core dumps.
134 #define ELF_CLASS ELFCLASS32
135 #define ELF_DATA ELFDATA2LSB
136 #define ELF_ARCH EM_386
138 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
140 regs
->esp
= infop
->start_stack
;
141 regs
->eip
= infop
->entry
;
143 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
144 starts %edx contains a pointer to a function which might be
145 registered using `atexit'. This provides a mean for the
146 dynamic linker to call DT_FINI functions for shared libraries
147 that have been loaded before the code runs.
149 A value of 0 tells we have no such handler. */
154 #define USE_ELF_CORE_DUMP
155 #define ELF_EXEC_PAGESIZE 4096
161 #define ELF_START_MMAP 0x80000000
163 #define elf_check_arch(x) ( (x) == EM_ARM )
165 #define ELF_CLASS ELFCLASS32
166 #ifdef TARGET_WORDS_BIGENDIAN
167 #define ELF_DATA ELFDATA2MSB
169 #define ELF_DATA ELFDATA2LSB
171 #define ELF_ARCH EM_ARM
173 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
175 abi_long stack
= infop
->start_stack
;
176 memset(regs
, 0, sizeof(*regs
));
177 regs
->ARM_cpsr
= 0x10;
178 if (infop
->entry
& 1)
179 regs
->ARM_cpsr
|= CPSR_T
;
180 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
181 regs
->ARM_sp
= infop
->start_stack
;
182 /* FIXME - what to for failure of get_user()? */
183 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
184 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
185 /* XXX: it seems that r0 is zeroed after ! */
187 /* For uClinux PIC binaries. */
188 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
189 regs
->ARM_r10
= infop
->start_data
;
192 #define USE_ELF_CORE_DUMP
193 #define ELF_EXEC_PAGESIZE 4096
197 ARM_HWCAP_ARM_SWP
= 1 << 0,
198 ARM_HWCAP_ARM_HALF
= 1 << 1,
199 ARM_HWCAP_ARM_THUMB
= 1 << 2,
200 ARM_HWCAP_ARM_26BIT
= 1 << 3,
201 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
202 ARM_HWCAP_ARM_FPA
= 1 << 5,
203 ARM_HWCAP_ARM_VFP
= 1 << 6,
204 ARM_HWCAP_ARM_EDSP
= 1 << 7,
207 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
208 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
209 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
214 #ifdef TARGET_SPARC64
216 #define ELF_START_MMAP 0x80000000
219 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
221 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
224 #define ELF_CLASS ELFCLASS64
225 #define ELF_DATA ELFDATA2MSB
226 #define ELF_ARCH EM_SPARCV9
228 #define STACK_BIAS 2047
230 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
235 regs
->pc
= infop
->entry
;
236 regs
->npc
= regs
->pc
+ 4;
239 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
241 if (personality(infop
->personality
) == PER_LINUX32
)
242 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
244 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
249 #define ELF_START_MMAP 0x80000000
251 #define elf_check_arch(x) ( (x) == EM_SPARC )
253 #define ELF_CLASS ELFCLASS32
254 #define ELF_DATA ELFDATA2MSB
255 #define ELF_ARCH EM_SPARC
257 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
260 regs
->pc
= infop
->entry
;
261 regs
->npc
= regs
->pc
+ 4;
263 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
271 #define ELF_START_MMAP 0x80000000
273 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
275 #define elf_check_arch(x) ( (x) == EM_PPC64 )
277 #define ELF_CLASS ELFCLASS64
281 #define elf_check_arch(x) ( (x) == EM_PPC )
283 #define ELF_CLASS ELFCLASS32
287 #ifdef TARGET_WORDS_BIGENDIAN
288 #define ELF_DATA ELFDATA2MSB
290 #define ELF_DATA ELFDATA2LSB
292 #define ELF_ARCH EM_PPC
295 * We need to put in some extra aux table entries to tell glibc what
296 * the cache block size is, so it can use the dcbz instruction safely.
298 #define AT_DCACHEBSIZE 19
299 #define AT_ICACHEBSIZE 20
300 #define AT_UCACHEBSIZE 21
301 /* A special ignored type value for PPC, for glibc compatibility. */
302 #define AT_IGNOREPPC 22
304 * The requirements here are:
305 * - keep the final alignment of sp (sp & 0xf)
306 * - make sure the 32-bit value at the first 16 byte aligned position of
307 * AUXV is greater than 16 for glibc compatibility.
308 * AT_IGNOREPPC is used for that.
309 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
310 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
312 #define DLINFO_ARCH_ITEMS 5
313 #define ARCH_DLINFO \
315 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
316 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
317 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
319 * Now handle glibc compatibility. \
321 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
322 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
325 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
327 abi_ulong pos
= infop
->start_stack
;
329 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
330 abi_ulong entry
, toc
;
333 _regs
->gpr
[1] = infop
->start_stack
;
334 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
335 entry
= ldq_raw(infop
->entry
) + infop
->load_addr
;
336 toc
= ldq_raw(infop
->entry
+ 8) + infop
->load_addr
;
338 infop
->entry
= entry
;
340 _regs
->nip
= infop
->entry
;
341 /* Note that isn't exactly what regular kernel does
342 * but this is what the ABI wants and is needed to allow
343 * execution of PPC BSD programs.
345 /* FIXME - what to for failure of get_user()? */
346 get_user_ual(_regs
->gpr
[3], pos
);
347 pos
+= sizeof(abi_ulong
);
349 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(abi_ulong
))
354 #define USE_ELF_CORE_DUMP
355 #define ELF_EXEC_PAGESIZE 4096
361 #define ELF_START_MMAP 0x80000000
363 #define elf_check_arch(x) ( (x) == EM_MIPS )
366 #define ELF_CLASS ELFCLASS64
368 #define ELF_CLASS ELFCLASS32
370 #ifdef TARGET_WORDS_BIGENDIAN
371 #define ELF_DATA ELFDATA2MSB
373 #define ELF_DATA ELFDATA2LSB
375 #define ELF_ARCH EM_MIPS
377 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
379 regs
->cp0_status
= 2 << CP0St_KSU
;
380 regs
->cp0_epc
= infop
->entry
;
381 regs
->regs
[29] = infop
->start_stack
;
384 #define USE_ELF_CORE_DUMP
385 #define ELF_EXEC_PAGESIZE 4096
387 #endif /* TARGET_MIPS */
391 #define ELF_START_MMAP 0x80000000
393 #define elf_check_arch(x) ( (x) == EM_SH )
395 #define ELF_CLASS ELFCLASS32
396 #define ELF_DATA ELFDATA2LSB
397 #define ELF_ARCH EM_SH
399 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
401 /* Check other registers XXXXX */
402 regs
->pc
= infop
->entry
;
403 regs
->regs
[15] = infop
->start_stack
;
406 #define USE_ELF_CORE_DUMP
407 #define ELF_EXEC_PAGESIZE 4096
413 #define ELF_START_MMAP 0x80000000
415 #define elf_check_arch(x) ( (x) == EM_CRIS )
417 #define ELF_CLASS ELFCLASS32
418 #define ELF_DATA ELFDATA2LSB
419 #define ELF_ARCH EM_CRIS
421 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
423 regs
->erp
= infop
->entry
;
426 #define USE_ELF_CORE_DUMP
427 #define ELF_EXEC_PAGESIZE 8192
433 #define ELF_START_MMAP 0x80000000
435 #define elf_check_arch(x) ( (x) == EM_68K )
437 #define ELF_CLASS ELFCLASS32
438 #define ELF_DATA ELFDATA2MSB
439 #define ELF_ARCH EM_68K
441 /* ??? Does this need to do anything?
442 #define ELF_PLAT_INIT(_r) */
444 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
446 regs
->usp
= infop
->start_stack
;
448 regs
->pc
= infop
->entry
;
451 #define USE_ELF_CORE_DUMP
452 #define ELF_EXEC_PAGESIZE 8192
458 #define ELF_START_MMAP (0x30000000000ULL)
460 #define elf_check_arch(x) ( (x) == ELF_ARCH )
462 #define ELF_CLASS ELFCLASS64
463 #define ELF_DATA ELFDATA2MSB
464 #define ELF_ARCH EM_ALPHA
466 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
468 regs
->pc
= infop
->entry
;
470 regs
->usp
= infop
->start_stack
;
471 regs
->unique
= infop
->start_data
; /* ? */
472 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
473 regs
->unique
, infop
->start_data
);
476 #define USE_ELF_CORE_DUMP
477 #define ELF_EXEC_PAGESIZE 8192
479 #endif /* TARGET_ALPHA */
482 #define ELF_PLATFORM (NULL)
491 #define ELF_CLASS ELFCLASS32
493 #define bswaptls(ptr) bswap32s(ptr)
500 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
501 unsigned int a_text
; /* length of text, in bytes */
502 unsigned int a_data
; /* length of data, in bytes */
503 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
504 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
505 unsigned int a_entry
; /* start address */
506 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
507 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
511 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
517 /* max code+data+bss space allocated to elf interpreter */
518 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
520 /* max code+data+bss+brk space allocated to ET_DYN executables */
521 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
523 /* Necessary parameters */
524 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
525 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
526 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
528 #define INTERPRETER_NONE 0
529 #define INTERPRETER_AOUT 1
530 #define INTERPRETER_ELF 2
532 #define DLINFO_ITEMS 12
534 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
539 extern unsigned long x86_stack_size
;
541 static int load_aout_interp(void * exptr
, int interp_fd
);
544 static void bswap_ehdr(struct elfhdr
*ehdr
)
546 bswap16s(&ehdr
->e_type
); /* Object file type */
547 bswap16s(&ehdr
->e_machine
); /* Architecture */
548 bswap32s(&ehdr
->e_version
); /* Object file version */
549 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
550 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
551 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
552 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
553 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
554 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
555 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
556 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
557 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
558 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
561 static void bswap_phdr(struct elf_phdr
*phdr
)
563 bswap32s(&phdr
->p_type
); /* Segment type */
564 bswaptls(&phdr
->p_offset
); /* Segment file offset */
565 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
566 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
567 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
568 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
569 bswap32s(&phdr
->p_flags
); /* Segment flags */
570 bswaptls(&phdr
->p_align
); /* Segment alignment */
573 static void bswap_shdr(struct elf_shdr
*shdr
)
575 bswap32s(&shdr
->sh_name
);
576 bswap32s(&shdr
->sh_type
);
577 bswaptls(&shdr
->sh_flags
);
578 bswaptls(&shdr
->sh_addr
);
579 bswaptls(&shdr
->sh_offset
);
580 bswaptls(&shdr
->sh_size
);
581 bswap32s(&shdr
->sh_link
);
582 bswap32s(&shdr
->sh_info
);
583 bswaptls(&shdr
->sh_addralign
);
584 bswaptls(&shdr
->sh_entsize
);
587 static void bswap_sym(struct elf_sym
*sym
)
589 bswap32s(&sym
->st_name
);
590 bswaptls(&sym
->st_value
);
591 bswaptls(&sym
->st_size
);
592 bswap16s(&sym
->st_shndx
);
597 * 'copy_elf_strings()' copies argument/envelope strings from user
598 * memory to free pages in kernel mem. These are in a format ready
599 * to be put directly into the top of new user memory.
602 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
605 char *tmp
, *tmp1
, *pag
= NULL
;
609 return 0; /* bullet-proofing */
614 fprintf(stderr
, "VFS: argc is wrong");
620 if (p
< len
) { /* this shouldn't happen - 128kB */
626 offset
= p
% TARGET_PAGE_SIZE
;
627 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
629 pag
= (char *)malloc(TARGET_PAGE_SIZE
);
630 memset(pag
, 0, TARGET_PAGE_SIZE
);
631 page
[p
/TARGET_PAGE_SIZE
] = pag
;
636 if (len
== 0 || offset
== 0) {
637 *(pag
+ offset
) = *tmp
;
640 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
641 tmp
-= bytes_to_copy
;
643 offset
-= bytes_to_copy
;
644 len
-= bytes_to_copy
;
645 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
652 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
653 struct image_info
*info
)
655 abi_ulong stack_base
, size
, error
;
658 /* Create enough stack to hold everything. If we don't use
659 * it for args, we'll use it for something else...
661 size
= x86_stack_size
;
662 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
663 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
664 error
= target_mmap(0,
665 size
+ qemu_host_page_size
,
666 PROT_READ
| PROT_WRITE
,
667 MAP_PRIVATE
| MAP_ANONYMOUS
,
673 /* we reserve one extra page at the top of the stack as guard */
674 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
676 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
679 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
682 /* FIXME - check return value of memcpy_to_target() for failure */
683 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
686 stack_base
+= TARGET_PAGE_SIZE
;
691 static void set_brk(abi_ulong start
, abi_ulong end
)
693 /* page-align the start and end addresses... */
694 start
= HOST_PAGE_ALIGN(start
);
695 end
= HOST_PAGE_ALIGN(end
);
698 if(target_mmap(start
, end
- start
,
699 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
700 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0) == -1) {
701 perror("cannot mmap brk");
707 /* We need to explicitly zero any fractional pages after the data
708 section (i.e. bss). This would contain the junk from the file that
709 should not be in memory. */
710 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
714 if (elf_bss
>= last_bss
)
717 /* XXX: this is really a hack : if the real host page size is
718 smaller than the target page size, some pages after the end
719 of the file may not be mapped. A better fix would be to
720 patch target_mmap(), but it is more complicated as the file
721 size must be known */
722 if (qemu_real_host_page_size
< qemu_host_page_size
) {
723 abi_ulong end_addr
, end_addr1
;
724 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
725 ~(qemu_real_host_page_size
- 1);
726 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
727 if (end_addr1
< end_addr
) {
728 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
729 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
730 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
734 nbyte
= elf_bss
& (qemu_host_page_size
-1);
736 nbyte
= qemu_host_page_size
- nbyte
;
738 /* FIXME - what to do if put_user() fails? */
739 put_user_u8(0, elf_bss
);
746 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
747 struct elfhdr
* exec
,
750 abi_ulong interp_load_addr
, int ibcs
,
751 struct image_info
*info
)
755 abi_ulong u_platform
;
756 const char *k_platform
;
757 const int n
= sizeof(elf_addr_t
);
761 k_platform
= ELF_PLATFORM
;
763 size_t len
= strlen(k_platform
) + 1;
764 sp
-= (len
+ n
- 1) & ~(n
- 1);
766 /* FIXME - check return value of memcpy_to_target() for failure */
767 memcpy_to_target(sp
, k_platform
, len
);
770 * Force 16 byte _final_ alignment here for generality.
772 sp
= sp
&~ (abi_ulong
)15;
773 size
= (DLINFO_ITEMS
+ 1) * 2;
776 #ifdef DLINFO_ARCH_ITEMS
777 size
+= DLINFO_ARCH_ITEMS
* 2;
779 size
+= envc
+ argc
+ 2;
780 size
+= (!ibcs
? 3 : 1); /* argc itself */
783 sp
-= 16 - (size
& 15);
785 /* This is correct because Linux defines
786 * elf_addr_t as Elf32_Off / Elf64_Off
788 #define NEW_AUX_ENT(id, val) do { \
789 sp -= n; put_user_ual(val, sp); \
790 sp -= n; put_user_ual(id, sp); \
793 NEW_AUX_ENT (AT_NULL
, 0);
795 /* There must be exactly DLINFO_ITEMS entries here. */
796 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
797 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
798 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
799 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
800 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
801 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
802 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
803 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
804 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
805 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
806 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
807 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
809 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
812 * ARCH_DLINFO must come last so platform specific code can enforce
813 * special alignment requirements on the AUXV if necessary (eg. PPC).
819 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
824 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
826 abi_ulong
*interp_load_addr
)
828 struct elf_phdr
*elf_phdata
= NULL
;
829 struct elf_phdr
*eppnt
;
830 abi_ulong load_addr
= 0;
831 int load_addr_set
= 0;
833 abi_ulong last_bss
, elf_bss
;
842 bswap_ehdr(interp_elf_ex
);
844 /* First of all, some simple consistency checks */
845 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
846 interp_elf_ex
->e_type
!= ET_DYN
) ||
847 !elf_check_arch(interp_elf_ex
->e_machine
)) {
848 return ~((abi_ulong
)0UL);
852 /* Now read in all of the header information */
854 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
855 return ~(abi_ulong
)0UL;
857 elf_phdata
= (struct elf_phdr
*)
858 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
861 return ~((abi_ulong
)0UL);
864 * If the size of this structure has changed, then punt, since
865 * we will be doing the wrong thing.
867 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
869 return ~((abi_ulong
)0UL);
872 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
874 retval
= read(interpreter_fd
,
876 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
879 perror("load_elf_interp");
886 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
891 if (interp_elf_ex
->e_type
== ET_DYN
) {
892 /* in order to avoid hardcoding the interpreter load
893 address in qemu, we allocate a big enough memory zone */
894 error
= target_mmap(0, INTERP_MAP_SIZE
,
895 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
906 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
907 if (eppnt
->p_type
== PT_LOAD
) {
908 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
913 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
914 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
915 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
916 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
917 elf_type
|= MAP_FIXED
;
918 vaddr
= eppnt
->p_vaddr
;
920 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
921 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
925 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
929 close(interpreter_fd
);
931 return ~((abi_ulong
)0UL);
934 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
940 * Find the end of the file mapping for this phdr, and keep
941 * track of the largest address we see for this.
943 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
944 if (k
> elf_bss
) elf_bss
= k
;
947 * Do the same thing for the memory mapping - between
948 * elf_bss and last_bss is the bss section.
950 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
951 if (k
> last_bss
) last_bss
= k
;
954 /* Now use mmap to map the library into memory. */
956 close(interpreter_fd
);
959 * Now fill out the bss section. First pad the last page up
960 * to the page boundary, and then perform a mmap to make sure
961 * that there are zeromapped pages up to and including the last
964 padzero(elf_bss
, last_bss
);
965 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
967 /* Map the last of the bss segment */
968 if (last_bss
> elf_bss
) {
969 target_mmap(elf_bss
, last_bss
-elf_bss
,
970 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
971 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
975 *interp_load_addr
= load_addr
;
976 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
979 /* Best attempt to load symbols from this ELF object. */
980 static void load_symbols(struct elfhdr
*hdr
, int fd
)
983 struct elf_shdr sechdr
, symtab
, strtab
;
986 #if (ELF_CLASS == ELFCLASS64)
987 // Disas uses 32 bit symbols
988 struct elf32_sym
*syms32
= NULL
;
992 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
993 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
994 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
999 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1001 lseek(fd
, hdr
->e_shoff
1002 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1003 if (read(fd
, &strtab
, sizeof(strtab
))
1007 bswap_shdr(&strtab
);
1012 return; /* Shouldn't happen... */
1015 /* Now know where the strtab and symtab are. Snarf them. */
1016 s
= malloc(sizeof(*s
));
1017 s
->disas_symtab
= malloc(symtab
.sh_size
);
1018 #if (ELF_CLASS == ELFCLASS64)
1019 syms32
= malloc(symtab
.sh_size
/ sizeof(struct elf_sym
)
1020 * sizeof(struct elf32_sym
));
1022 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1023 if (!s
->disas_symtab
|| !s
->disas_strtab
)
1026 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1027 if (read(fd
, s
->disas_symtab
, symtab
.sh_size
) != symtab
.sh_size
)
1030 for (i
= 0; i
< symtab
.sh_size
/ sizeof(struct elf_sym
); i
++) {
1032 bswap_sym(s
->disas_symtab
+ sizeof(struct elf_sym
)*i
);
1034 #if (ELF_CLASS == ELFCLASS64)
1035 sym
= s
->disas_symtab
+ sizeof(struct elf_sym
)*i
;
1036 syms32
[i
].st_name
= sym
->st_name
;
1037 syms32
[i
].st_info
= sym
->st_info
;
1038 syms32
[i
].st_other
= sym
->st_other
;
1039 syms32
[i
].st_shndx
= sym
->st_shndx
;
1040 syms32
[i
].st_value
= sym
->st_value
& 0xffffffff;
1041 syms32
[i
].st_size
= sym
->st_size
& 0xffffffff;
1045 #if (ELF_CLASS == ELFCLASS64)
1046 free(s
->disas_symtab
);
1047 s
->disas_symtab
= syms32
;
1049 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1050 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
)
1052 s
->disas_num_syms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1057 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1058 struct image_info
* info
)
1060 struct elfhdr elf_ex
;
1061 struct elfhdr interp_elf_ex
;
1062 struct exec interp_ex
;
1063 int interpreter_fd
= -1; /* avoid warning */
1064 abi_ulong load_addr
, load_bias
;
1065 int load_addr_set
= 0;
1066 unsigned int interpreter_type
= INTERPRETER_NONE
;
1067 unsigned char ibcs2_interpreter
;
1069 abi_ulong mapped_addr
;
1070 struct elf_phdr
* elf_ppnt
;
1071 struct elf_phdr
*elf_phdata
;
1072 abi_ulong elf_bss
, k
, elf_brk
;
1074 char * elf_interpreter
;
1075 abi_ulong elf_entry
, interp_load_addr
= 0;
1077 abi_ulong start_code
, end_code
, start_data
, end_data
;
1078 abi_ulong reloc_func_desc
= 0;
1079 abi_ulong elf_stack
;
1080 char passed_fileno
[6];
1082 ibcs2_interpreter
= 0;
1086 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1088 bswap_ehdr(&elf_ex
);
1091 /* First of all, some simple consistency checks */
1092 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1093 (! elf_check_arch(elf_ex
.e_machine
))) {
1097 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1098 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1099 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1104 /* Now read in all of the header information */
1105 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1106 if (elf_phdata
== NULL
) {
1110 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1112 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1113 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1117 perror("load_elf_binary");
1124 elf_ppnt
= elf_phdata
;
1125 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1126 bswap_phdr(elf_ppnt
);
1129 elf_ppnt
= elf_phdata
;
1135 elf_stack
= ~((abi_ulong
)0UL);
1136 elf_interpreter
= NULL
;
1137 start_code
= ~((abi_ulong
)0UL);
1142 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1143 if (elf_ppnt
->p_type
== PT_INTERP
) {
1144 if ( elf_interpreter
!= NULL
)
1147 free(elf_interpreter
);
1152 /* This is the program interpreter used for
1153 * shared libraries - for now assume that this
1154 * is an a.out format binary
1157 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1159 if (elf_interpreter
== NULL
) {
1165 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1167 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1170 perror("load_elf_binary2");
1174 /* If the program interpreter is one of these two,
1175 then assume an iBCS2 image. Otherwise assume
1176 a native linux image. */
1178 /* JRP - Need to add X86 lib dir stuff here... */
1180 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1181 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1182 ibcs2_interpreter
= 1;
1186 printf("Using ELF interpreter %s\n", elf_interpreter
);
1189 retval
= open(path(elf_interpreter
), O_RDONLY
);
1191 interpreter_fd
= retval
;
1194 perror(elf_interpreter
);
1196 /* retval = -errno; */
1201 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1203 retval
= read(interpreter_fd
,bprm
->buf
,128);
1207 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1208 interp_elf_ex
=*((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1211 perror("load_elf_binary3");
1214 free(elf_interpreter
);
1222 /* Some simple consistency checks for the interpreter */
1223 if (elf_interpreter
){
1224 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1226 /* Now figure out which format our binary is */
1227 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1228 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1229 interpreter_type
= INTERPRETER_ELF
;
1232 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1233 strncmp(&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1234 interpreter_type
&= ~INTERPRETER_ELF
;
1237 if (!interpreter_type
) {
1238 free(elf_interpreter
);
1245 /* OK, we are done with that, now set up the arg stuff,
1246 and then start this sucker up */
1251 if (interpreter_type
== INTERPRETER_AOUT
) {
1252 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1253 passed_p
= passed_fileno
;
1255 if (elf_interpreter
) {
1256 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1261 if (elf_interpreter
) {
1262 free(elf_interpreter
);
1270 /* OK, This is the point of no return */
1273 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1275 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1277 /* Do this so that we can load the interpreter, if need be. We will
1278 change some of these later */
1280 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1281 info
->start_stack
= bprm
->p
;
1283 /* Now we do a little grungy work by mmaping the ELF image into
1284 * the correct location in memory. At this point, we assume that
1285 * the image should be loaded at fixed address, not at a variable
1289 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1294 if (elf_ppnt
->p_type
!= PT_LOAD
)
1297 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1298 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1299 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1300 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1301 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1302 elf_flags
|= MAP_FIXED
;
1303 } else if (elf_ex
.e_type
== ET_DYN
) {
1304 /* Try and get dynamic programs out of the way of the default mmap
1305 base, as well as whatever program they might try to exec. This
1306 is because the brk will follow the loader, and is not movable. */
1307 /* NOTE: for qemu, we do a big mmap to get enough space
1308 without hardcoding any address */
1309 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1310 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1316 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1319 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1320 (elf_ppnt
->p_filesz
+
1321 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1323 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1325 (elf_ppnt
->p_offset
-
1326 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1332 #ifdef LOW_ELF_STACK
1333 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1334 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1337 if (!load_addr_set
) {
1339 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1340 if (elf_ex
.e_type
== ET_DYN
) {
1341 load_bias
+= error
-
1342 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1343 load_addr
+= load_bias
;
1344 reloc_func_desc
= load_bias
;
1347 k
= elf_ppnt
->p_vaddr
;
1352 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1355 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1359 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1360 if (k
> elf_brk
) elf_brk
= k
;
1363 elf_entry
+= load_bias
;
1364 elf_bss
+= load_bias
;
1365 elf_brk
+= load_bias
;
1366 start_code
+= load_bias
;
1367 end_code
+= load_bias
;
1368 start_data
+= load_bias
;
1369 end_data
+= load_bias
;
1371 if (elf_interpreter
) {
1372 if (interpreter_type
& 1) {
1373 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1375 else if (interpreter_type
& 2) {
1376 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1379 reloc_func_desc
= interp_load_addr
;
1381 close(interpreter_fd
);
1382 free(elf_interpreter
);
1384 if (elf_entry
== ~((abi_ulong
)0UL)) {
1385 printf("Unable to load interpreter\n");
1395 load_symbols(&elf_ex
, bprm
->fd
);
1397 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1398 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1400 #ifdef LOW_ELF_STACK
1401 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1403 bprm
->p
= create_elf_tables(bprm
->p
,
1407 load_addr
, load_bias
,
1409 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1411 info
->load_addr
= reloc_func_desc
;
1412 info
->start_brk
= info
->brk
= elf_brk
;
1413 info
->end_code
= end_code
;
1414 info
->start_code
= start_code
;
1415 info
->start_data
= start_data
;
1416 info
->end_data
= end_data
;
1417 info
->start_stack
= bprm
->p
;
1419 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1421 set_brk(elf_bss
, elf_brk
);
1423 padzero(elf_bss
, elf_brk
);
1426 printf("(start_brk) %x\n" , info
->start_brk
);
1427 printf("(end_code) %x\n" , info
->end_code
);
1428 printf("(start_code) %x\n" , info
->start_code
);
1429 printf("(end_data) %x\n" , info
->end_data
);
1430 printf("(start_stack) %x\n" , info
->start_stack
);
1431 printf("(brk) %x\n" , info
->brk
);
1434 if ( info
->personality
== PER_SVR4
)
1436 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1437 and some applications "depend" upon this behavior.
1438 Since we do not have the power to recompile these, we
1439 emulate the SVr4 behavior. Sigh. */
1440 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1441 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1444 info
->entry
= elf_entry
;
1449 static int load_aout_interp(void * exptr
, int interp_fd
)
1451 printf("a.out interpreter not yet supported\n");
1455 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1457 init_thread(regs
, infop
);