1 /* This is the Linux kernel elf-loading code, ported into user space */
15 /* from personality.h */
18 * Flags for bug emulation.
20 * These occupy the top three bytes.
23 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
24 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
27 MMAP_PAGE_ZERO
= 0x0100000,
28 ADDR_COMPAT_LAYOUT
= 0x0200000,
29 READ_IMPLIES_EXEC
= 0x0400000,
30 ADDR_LIMIT_32BIT
= 0x0800000,
31 SHORT_INODE
= 0x1000000,
32 WHOLE_SECONDS
= 0x2000000,
33 STICKY_TIMEOUTS
= 0x4000000,
34 ADDR_LIMIT_3GB
= 0x8000000,
40 * These go in the low byte. Avoid using the top bit, it will
41 * conflict with error returns.
45 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
46 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
47 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
48 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
49 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
50 WHOLE_SECONDS
| SHORT_INODE
,
51 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
52 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
53 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
55 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
56 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
58 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
59 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
60 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
61 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
63 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
64 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
65 PER_OSF4
= 0x000f, /* OSF/1 v4 */
71 * Return the base personality without flags.
73 #define personality(pers) (pers & PER_MASK)
75 /* this flag is uneffective under linux too, should be deleted */
77 #define MAP_DENYWRITE 0
80 /* should probably go in elf.h */
87 #define ELF_PLATFORM get_elf_platform()
89 static const char *get_elf_platform(void)
91 static char elf_platform
[] = "i386";
92 int family
= (global_env
->cpuid_version
>> 8) & 0xff;
96 elf_platform
[1] = '0' + family
;
100 #define ELF_HWCAP get_elf_hwcap()
102 static uint32_t get_elf_hwcap(void)
104 return global_env
->cpuid_features
;
108 #define ELF_START_MMAP 0x2aaaaab000ULL
109 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
111 #define ELF_CLASS ELFCLASS64
112 #define ELF_DATA ELFDATA2LSB
113 #define ELF_ARCH EM_X86_64
115 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
118 regs
->rsp
= infop
->start_stack
;
119 regs
->rip
= infop
->entry
;
124 #define ELF_START_MMAP 0x80000000
127 * This is used to ensure we don't load something for the wrong architecture.
129 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
132 * These are used to set parameters in the core dumps.
134 #define ELF_CLASS ELFCLASS32
135 #define ELF_DATA ELFDATA2LSB
136 #define ELF_ARCH EM_386
138 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
140 regs
->esp
= infop
->start_stack
;
141 regs
->eip
= infop
->entry
;
143 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
144 starts %edx contains a pointer to a function which might be
145 registered using `atexit'. This provides a mean for the
146 dynamic linker to call DT_FINI functions for shared libraries
147 that have been loaded before the code runs.
149 A value of 0 tells we have no such handler. */
154 #define USE_ELF_CORE_DUMP
155 #define ELF_EXEC_PAGESIZE 4096
161 #define ELF_START_MMAP 0x80000000
163 #define elf_check_arch(x) ( (x) == EM_ARM )
165 #define ELF_CLASS ELFCLASS32
166 #ifdef TARGET_WORDS_BIGENDIAN
167 #define ELF_DATA ELFDATA2MSB
169 #define ELF_DATA ELFDATA2LSB
171 #define ELF_ARCH EM_ARM
173 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
175 abi_long stack
= infop
->start_stack
;
176 memset(regs
, 0, sizeof(*regs
));
177 regs
->ARM_cpsr
= 0x10;
178 if (infop
->entry
& 1)
179 regs
->ARM_cpsr
|= CPSR_T
;
180 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
181 regs
->ARM_sp
= infop
->start_stack
;
182 /* FIXME - what to for failure of get_user()? */
183 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
184 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
185 /* XXX: it seems that r0 is zeroed after ! */
187 /* For uClinux PIC binaries. */
188 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
189 regs
->ARM_r10
= infop
->start_data
;
192 #define USE_ELF_CORE_DUMP
193 #define ELF_EXEC_PAGESIZE 4096
197 ARM_HWCAP_ARM_SWP
= 1 << 0,
198 ARM_HWCAP_ARM_HALF
= 1 << 1,
199 ARM_HWCAP_ARM_THUMB
= 1 << 2,
200 ARM_HWCAP_ARM_26BIT
= 1 << 3,
201 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
202 ARM_HWCAP_ARM_FPA
= 1 << 5,
203 ARM_HWCAP_ARM_VFP
= 1 << 6,
204 ARM_HWCAP_ARM_EDSP
= 1 << 7,
207 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
208 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
209 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
214 #ifdef TARGET_SPARC64
216 #define ELF_START_MMAP 0x80000000
219 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
221 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
224 #define ELF_CLASS ELFCLASS64
225 #define ELF_DATA ELFDATA2MSB
226 #define ELF_ARCH EM_SPARCV9
228 #define STACK_BIAS 2047
230 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
235 regs
->pc
= infop
->entry
;
236 regs
->npc
= regs
->pc
+ 4;
239 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
241 if (personality(infop
->personality
) == PER_LINUX32
)
242 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
244 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
249 #define ELF_START_MMAP 0x80000000
251 #define elf_check_arch(x) ( (x) == EM_SPARC )
253 #define ELF_CLASS ELFCLASS32
254 #define ELF_DATA ELFDATA2MSB
255 #define ELF_ARCH EM_SPARC
257 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
260 regs
->pc
= infop
->entry
;
261 regs
->npc
= regs
->pc
+ 4;
263 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
271 #define ELF_START_MMAP 0x80000000
273 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
275 #define elf_check_arch(x) ( (x) == EM_PPC64 )
277 #define ELF_CLASS ELFCLASS64
281 #define elf_check_arch(x) ( (x) == EM_PPC )
283 #define ELF_CLASS ELFCLASS32
287 #ifdef TARGET_WORDS_BIGENDIAN
288 #define ELF_DATA ELFDATA2MSB
290 #define ELF_DATA ELFDATA2LSB
292 #define ELF_ARCH EM_PPC
295 * We need to put in some extra aux table entries to tell glibc what
296 * the cache block size is, so it can use the dcbz instruction safely.
298 #define AT_DCACHEBSIZE 19
299 #define AT_ICACHEBSIZE 20
300 #define AT_UCACHEBSIZE 21
301 /* A special ignored type value for PPC, for glibc compatibility. */
302 #define AT_IGNOREPPC 22
304 * The requirements here are:
305 * - keep the final alignment of sp (sp & 0xf)
306 * - make sure the 32-bit value at the first 16 byte aligned position of
307 * AUXV is greater than 16 for glibc compatibility.
308 * AT_IGNOREPPC is used for that.
309 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
310 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
312 #define DLINFO_ARCH_ITEMS 5
313 #define ARCH_DLINFO \
315 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
316 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
317 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
319 * Now handle glibc compatibility. \
321 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
322 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
325 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
327 abi_ulong pos
= infop
->start_stack
;
329 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
330 abi_ulong entry
, toc
;
333 _regs
->gpr
[1] = infop
->start_stack
;
334 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
335 entry
= ldq_raw(infop
->entry
) + infop
->load_addr
;
336 toc
= ldq_raw(infop
->entry
+ 8) + infop
->load_addr
;
338 infop
->entry
= entry
;
340 _regs
->nip
= infop
->entry
;
341 /* Note that isn't exactly what regular kernel does
342 * but this is what the ABI wants and is needed to allow
343 * execution of PPC BSD programs.
345 /* FIXME - what to for failure of get_user()? */
346 get_user_ual(_regs
->gpr
[3], pos
);
347 pos
+= sizeof(abi_ulong
);
349 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(abi_ulong
))
354 #define USE_ELF_CORE_DUMP
355 #define ELF_EXEC_PAGESIZE 4096
361 #define ELF_START_MMAP 0x80000000
363 #define elf_check_arch(x) ( (x) == EM_MIPS )
366 #define ELF_CLASS ELFCLASS64
368 #define ELF_CLASS ELFCLASS32
370 #ifdef TARGET_WORDS_BIGENDIAN
371 #define ELF_DATA ELFDATA2MSB
373 #define ELF_DATA ELFDATA2LSB
375 #define ELF_ARCH EM_MIPS
377 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
379 regs
->cp0_status
= 2 << CP0St_KSU
;
380 regs
->cp0_epc
= infop
->entry
;
381 regs
->regs
[29] = infop
->start_stack
;
384 #define USE_ELF_CORE_DUMP
385 #define ELF_EXEC_PAGESIZE 4096
387 #endif /* TARGET_MIPS */
391 #define ELF_START_MMAP 0x80000000
393 #define elf_check_arch(x) ( (x) == EM_SH )
395 #define ELF_CLASS ELFCLASS32
396 #define ELF_DATA ELFDATA2LSB
397 #define ELF_ARCH EM_SH
399 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
401 /* Check other registers XXXXX */
402 regs
->pc
= infop
->entry
;
403 regs
->regs
[15] = infop
->start_stack
;
406 #define USE_ELF_CORE_DUMP
407 #define ELF_EXEC_PAGESIZE 4096
413 #define ELF_START_MMAP 0x80000000
415 #define elf_check_arch(x) ( (x) == EM_CRIS )
417 #define ELF_CLASS ELFCLASS32
418 #define ELF_DATA ELFDATA2LSB
419 #define ELF_ARCH EM_CRIS
421 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
423 regs
->erp
= infop
->entry
;
426 #define USE_ELF_CORE_DUMP
427 #define ELF_EXEC_PAGESIZE 8192
433 #define ELF_START_MMAP 0x80000000
435 #define elf_check_arch(x) ( (x) == EM_68K )
437 #define ELF_CLASS ELFCLASS32
438 #define ELF_DATA ELFDATA2MSB
439 #define ELF_ARCH EM_68K
441 /* ??? Does this need to do anything?
442 #define ELF_PLAT_INIT(_r) */
444 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
446 regs
->usp
= infop
->start_stack
;
448 regs
->pc
= infop
->entry
;
451 #define USE_ELF_CORE_DUMP
452 #define ELF_EXEC_PAGESIZE 8192
458 #define ELF_START_MMAP (0x30000000000ULL)
460 #define elf_check_arch(x) ( (x) == ELF_ARCH )
462 #define ELF_CLASS ELFCLASS64
463 #define ELF_DATA ELFDATA2MSB
464 #define ELF_ARCH EM_ALPHA
466 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
468 regs
->pc
= infop
->entry
;
470 regs
->usp
= infop
->start_stack
;
471 regs
->unique
= infop
->start_data
; /* ? */
472 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
473 regs
->unique
, infop
->start_data
);
476 #define USE_ELF_CORE_DUMP
477 #define ELF_EXEC_PAGESIZE 8192
479 #endif /* TARGET_ALPHA */
482 #define ELF_PLATFORM (NULL)
491 #define ELF_CLASS ELFCLASS32
493 #define bswaptls(ptr) bswap32s(ptr)
500 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
501 unsigned int a_text
; /* length of text, in bytes */
502 unsigned int a_data
; /* length of data, in bytes */
503 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
504 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
505 unsigned int a_entry
; /* start address */
506 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
507 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
511 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
517 /* max code+data+bss space allocated to elf interpreter */
518 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
520 /* max code+data+bss+brk space allocated to ET_DYN executables */
521 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
523 /* Necessary parameters */
524 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
525 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
526 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
528 #define INTERPRETER_NONE 0
529 #define INTERPRETER_AOUT 1
530 #define INTERPRETER_ELF 2
532 #define DLINFO_ITEMS 12
534 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
539 extern unsigned long x86_stack_size
;
541 static int load_aout_interp(void * exptr
, int interp_fd
);
544 static void bswap_ehdr(struct elfhdr
*ehdr
)
546 bswap16s(&ehdr
->e_type
); /* Object file type */
547 bswap16s(&ehdr
->e_machine
); /* Architecture */
548 bswap32s(&ehdr
->e_version
); /* Object file version */
549 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
550 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
551 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
552 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
553 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
554 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
555 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
556 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
557 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
558 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
561 static void bswap_phdr(struct elf_phdr
*phdr
)
563 bswap32s(&phdr
->p_type
); /* Segment type */
564 bswaptls(&phdr
->p_offset
); /* Segment file offset */
565 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
566 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
567 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
568 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
569 bswap32s(&phdr
->p_flags
); /* Segment flags */
570 bswaptls(&phdr
->p_align
); /* Segment alignment */
573 static void bswap_shdr(struct elf_shdr
*shdr
)
575 bswap32s(&shdr
->sh_name
);
576 bswap32s(&shdr
->sh_type
);
577 bswaptls(&shdr
->sh_flags
);
578 bswaptls(&shdr
->sh_addr
);
579 bswaptls(&shdr
->sh_offset
);
580 bswaptls(&shdr
->sh_size
);
581 bswap32s(&shdr
->sh_link
);
582 bswap32s(&shdr
->sh_info
);
583 bswaptls(&shdr
->sh_addralign
);
584 bswaptls(&shdr
->sh_entsize
);
587 static void bswap_sym(struct elf_sym
*sym
)
589 bswap32s(&sym
->st_name
);
590 bswaptls(&sym
->st_value
);
591 bswaptls(&sym
->st_size
);
592 bswap16s(&sym
->st_shndx
);
597 * 'copy_elf_strings()' copies argument/envelope strings from user
598 * memory to free pages in kernel mem. These are in a format ready
599 * to be put directly into the top of new user memory.
602 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
605 char *tmp
, *tmp1
, *pag
= NULL
;
609 return 0; /* bullet-proofing */
614 fprintf(stderr
, "VFS: argc is wrong");
620 if (p
< len
) { /* this shouldn't happen - 128kB */
626 offset
= p
% TARGET_PAGE_SIZE
;
627 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
629 pag
= (char *)malloc(TARGET_PAGE_SIZE
);
630 memset(pag
, 0, TARGET_PAGE_SIZE
);
631 page
[p
/TARGET_PAGE_SIZE
] = pag
;
636 if (len
== 0 || offset
== 0) {
637 *(pag
+ offset
) = *tmp
;
640 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
641 tmp
-= bytes_to_copy
;
643 offset
-= bytes_to_copy
;
644 len
-= bytes_to_copy
;
645 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
652 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
653 struct image_info
*info
)
655 abi_ulong stack_base
, size
, error
;
658 /* Create enough stack to hold everything. If we don't use
659 * it for args, we'll use it for something else...
661 size
= x86_stack_size
;
662 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
663 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
664 error
= target_mmap(0,
665 size
+ qemu_host_page_size
,
666 PROT_READ
| PROT_WRITE
,
667 MAP_PRIVATE
| MAP_ANONYMOUS
,
673 /* we reserve one extra page at the top of the stack as guard */
674 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
676 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
679 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
682 /* FIXME - check return value of memcpy_to_target() for failure */
683 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
686 stack_base
+= TARGET_PAGE_SIZE
;
691 static void set_brk(abi_ulong start
, abi_ulong end
)
693 /* page-align the start and end addresses... */
694 start
= HOST_PAGE_ALIGN(start
);
695 end
= HOST_PAGE_ALIGN(end
);
698 if(target_mmap(start
, end
- start
,
699 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
700 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0) == -1) {
701 perror("cannot mmap brk");
707 /* We need to explicitly zero any fractional pages after the data
708 section (i.e. bss). This would contain the junk from the file that
709 should not be in memory. */
710 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
714 if (elf_bss
>= last_bss
)
717 /* XXX: this is really a hack : if the real host page size is
718 smaller than the target page size, some pages after the end
719 of the file may not be mapped. A better fix would be to
720 patch target_mmap(), but it is more complicated as the file
721 size must be known */
722 if (qemu_real_host_page_size
< qemu_host_page_size
) {
723 abi_ulong end_addr
, end_addr1
;
724 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
725 ~(qemu_real_host_page_size
- 1);
726 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
727 if (end_addr1
< end_addr
) {
728 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
729 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
730 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
734 nbyte
= elf_bss
& (qemu_host_page_size
-1);
736 nbyte
= qemu_host_page_size
- nbyte
;
738 /* FIXME - what to do if put_user() fails? */
739 put_user_u8(0, elf_bss
);
746 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
747 struct elfhdr
* exec
,
750 abi_ulong interp_load_addr
, int ibcs
,
751 struct image_info
*info
)
755 abi_ulong u_platform
;
756 const char *k_platform
;
757 const int n
= sizeof(elf_addr_t
);
761 k_platform
= ELF_PLATFORM
;
763 size_t len
= strlen(k_platform
) + 1;
764 sp
-= (len
+ n
- 1) & ~(n
- 1);
766 /* FIXME - check return value of memcpy_to_target() for failure */
767 memcpy_to_target(sp
, k_platform
, len
);
770 * Force 16 byte _final_ alignment here for generality.
772 sp
= sp
&~ (abi_ulong
)15;
773 size
= (DLINFO_ITEMS
+ 1) * 2;
776 #ifdef DLINFO_ARCH_ITEMS
777 size
+= DLINFO_ARCH_ITEMS
* 2;
779 size
+= envc
+ argc
+ 2;
780 size
+= (!ibcs
? 3 : 1); /* argc itself */
783 sp
-= 16 - (size
& 15);
785 /* This is correct because Linux defines
786 * elf_addr_t as Elf32_Off / Elf64_Off
788 #define NEW_AUX_ENT(id, val) do { \
789 sp -= n; put_user_ual(val, sp); \
790 sp -= n; put_user_ual(id, sp); \
793 NEW_AUX_ENT (AT_NULL
, 0);
795 /* There must be exactly DLINFO_ITEMS entries here. */
796 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
797 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
798 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
799 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
800 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
801 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
802 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
803 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
804 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
805 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
806 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
807 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
808 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
810 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
813 * ARCH_DLINFO must come last so platform specific code can enforce
814 * special alignment requirements on the AUXV if necessary (eg. PPC).
820 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
825 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
827 abi_ulong
*interp_load_addr
)
829 struct elf_phdr
*elf_phdata
= NULL
;
830 struct elf_phdr
*eppnt
;
831 abi_ulong load_addr
= 0;
832 int load_addr_set
= 0;
834 abi_ulong last_bss
, elf_bss
;
843 bswap_ehdr(interp_elf_ex
);
845 /* First of all, some simple consistency checks */
846 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
847 interp_elf_ex
->e_type
!= ET_DYN
) ||
848 !elf_check_arch(interp_elf_ex
->e_machine
)) {
849 return ~((abi_ulong
)0UL);
853 /* Now read in all of the header information */
855 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
856 return ~(abi_ulong
)0UL;
858 elf_phdata
= (struct elf_phdr
*)
859 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
862 return ~((abi_ulong
)0UL);
865 * If the size of this structure has changed, then punt, since
866 * we will be doing the wrong thing.
868 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
870 return ~((abi_ulong
)0UL);
873 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
875 retval
= read(interpreter_fd
,
877 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
880 perror("load_elf_interp");
887 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
892 if (interp_elf_ex
->e_type
== ET_DYN
) {
893 /* in order to avoid hardcoding the interpreter load
894 address in qemu, we allocate a big enough memory zone */
895 error
= target_mmap(0, INTERP_MAP_SIZE
,
896 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
907 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
908 if (eppnt
->p_type
== PT_LOAD
) {
909 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
914 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
915 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
916 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
917 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
918 elf_type
|= MAP_FIXED
;
919 vaddr
= eppnt
->p_vaddr
;
921 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
922 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
926 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
930 close(interpreter_fd
);
932 return ~((abi_ulong
)0UL);
935 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
941 * Find the end of the file mapping for this phdr, and keep
942 * track of the largest address we see for this.
944 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
945 if (k
> elf_bss
) elf_bss
= k
;
948 * Do the same thing for the memory mapping - between
949 * elf_bss and last_bss is the bss section.
951 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
952 if (k
> last_bss
) last_bss
= k
;
955 /* Now use mmap to map the library into memory. */
957 close(interpreter_fd
);
960 * Now fill out the bss section. First pad the last page up
961 * to the page boundary, and then perform a mmap to make sure
962 * that there are zeromapped pages up to and including the last
965 padzero(elf_bss
, last_bss
);
966 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
968 /* Map the last of the bss segment */
969 if (last_bss
> elf_bss
) {
970 target_mmap(elf_bss
, last_bss
-elf_bss
,
971 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
972 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
976 *interp_load_addr
= load_addr
;
977 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
980 /* Best attempt to load symbols from this ELF object. */
981 static void load_symbols(struct elfhdr
*hdr
, int fd
)
984 struct elf_shdr sechdr
, symtab
, strtab
;
987 #if (ELF_CLASS == ELFCLASS64)
988 // Disas uses 32 bit symbols
989 struct elf32_sym
*syms32
= NULL
;
993 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
994 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
995 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1000 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1002 lseek(fd
, hdr
->e_shoff
1003 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1004 if (read(fd
, &strtab
, sizeof(strtab
))
1008 bswap_shdr(&strtab
);
1013 return; /* Shouldn't happen... */
1016 /* Now know where the strtab and symtab are. Snarf them. */
1017 s
= malloc(sizeof(*s
));
1018 s
->disas_symtab
= malloc(symtab
.sh_size
);
1019 #if (ELF_CLASS == ELFCLASS64)
1020 syms32
= malloc(symtab
.sh_size
/ sizeof(struct elf_sym
)
1021 * sizeof(struct elf32_sym
));
1023 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1024 if (!s
->disas_symtab
|| !s
->disas_strtab
)
1027 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1028 if (read(fd
, s
->disas_symtab
, symtab
.sh_size
) != symtab
.sh_size
)
1031 for (i
= 0; i
< symtab
.sh_size
/ sizeof(struct elf_sym
); i
++) {
1033 bswap_sym(s
->disas_symtab
+ sizeof(struct elf_sym
)*i
);
1035 #if (ELF_CLASS == ELFCLASS64)
1036 sym
= s
->disas_symtab
+ sizeof(struct elf_sym
)*i
;
1037 syms32
[i
].st_name
= sym
->st_name
;
1038 syms32
[i
].st_info
= sym
->st_info
;
1039 syms32
[i
].st_other
= sym
->st_other
;
1040 syms32
[i
].st_shndx
= sym
->st_shndx
;
1041 syms32
[i
].st_value
= sym
->st_value
& 0xffffffff;
1042 syms32
[i
].st_size
= sym
->st_size
& 0xffffffff;
1046 #if (ELF_CLASS == ELFCLASS64)
1047 free(s
->disas_symtab
);
1048 s
->disas_symtab
= syms32
;
1050 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1051 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
)
1053 s
->disas_num_syms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1058 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1059 struct image_info
* info
)
1061 struct elfhdr elf_ex
;
1062 struct elfhdr interp_elf_ex
;
1063 struct exec interp_ex
;
1064 int interpreter_fd
= -1; /* avoid warning */
1065 abi_ulong load_addr
, load_bias
;
1066 int load_addr_set
= 0;
1067 unsigned int interpreter_type
= INTERPRETER_NONE
;
1068 unsigned char ibcs2_interpreter
;
1070 abi_ulong mapped_addr
;
1071 struct elf_phdr
* elf_ppnt
;
1072 struct elf_phdr
*elf_phdata
;
1073 abi_ulong elf_bss
, k
, elf_brk
;
1075 char * elf_interpreter
;
1076 abi_ulong elf_entry
, interp_load_addr
= 0;
1078 abi_ulong start_code
, end_code
, start_data
, end_data
;
1079 abi_ulong reloc_func_desc
= 0;
1080 abi_ulong elf_stack
;
1081 char passed_fileno
[6];
1083 ibcs2_interpreter
= 0;
1087 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1089 bswap_ehdr(&elf_ex
);
1092 /* First of all, some simple consistency checks */
1093 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1094 (! elf_check_arch(elf_ex
.e_machine
))) {
1098 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1099 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1100 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1105 /* Now read in all of the header information */
1106 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1107 if (elf_phdata
== NULL
) {
1111 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1113 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1114 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1118 perror("load_elf_binary");
1125 elf_ppnt
= elf_phdata
;
1126 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1127 bswap_phdr(elf_ppnt
);
1130 elf_ppnt
= elf_phdata
;
1136 elf_stack
= ~((abi_ulong
)0UL);
1137 elf_interpreter
= NULL
;
1138 start_code
= ~((abi_ulong
)0UL);
1143 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1144 if (elf_ppnt
->p_type
== PT_INTERP
) {
1145 if ( elf_interpreter
!= NULL
)
1148 free(elf_interpreter
);
1153 /* This is the program interpreter used for
1154 * shared libraries - for now assume that this
1155 * is an a.out format binary
1158 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1160 if (elf_interpreter
== NULL
) {
1166 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1168 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1171 perror("load_elf_binary2");
1175 /* If the program interpreter is one of these two,
1176 then assume an iBCS2 image. Otherwise assume
1177 a native linux image. */
1179 /* JRP - Need to add X86 lib dir stuff here... */
1181 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1182 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1183 ibcs2_interpreter
= 1;
1187 printf("Using ELF interpreter %s\n", elf_interpreter
);
1190 retval
= open(path(elf_interpreter
), O_RDONLY
);
1192 interpreter_fd
= retval
;
1195 perror(elf_interpreter
);
1197 /* retval = -errno; */
1202 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1204 retval
= read(interpreter_fd
,bprm
->buf
,128);
1208 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1209 interp_elf_ex
=*((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1212 perror("load_elf_binary3");
1215 free(elf_interpreter
);
1223 /* Some simple consistency checks for the interpreter */
1224 if (elf_interpreter
){
1225 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1227 /* Now figure out which format our binary is */
1228 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1229 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1230 interpreter_type
= INTERPRETER_ELF
;
1233 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1234 strncmp(&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1235 interpreter_type
&= ~INTERPRETER_ELF
;
1238 if (!interpreter_type
) {
1239 free(elf_interpreter
);
1246 /* OK, we are done with that, now set up the arg stuff,
1247 and then start this sucker up */
1252 if (interpreter_type
== INTERPRETER_AOUT
) {
1253 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1254 passed_p
= passed_fileno
;
1256 if (elf_interpreter
) {
1257 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1262 if (elf_interpreter
) {
1263 free(elf_interpreter
);
1271 /* OK, This is the point of no return */
1274 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1276 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1278 /* Do this so that we can load the interpreter, if need be. We will
1279 change some of these later */
1281 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1282 info
->start_stack
= bprm
->p
;
1284 /* Now we do a little grungy work by mmaping the ELF image into
1285 * the correct location in memory. At this point, we assume that
1286 * the image should be loaded at fixed address, not at a variable
1290 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1295 if (elf_ppnt
->p_type
!= PT_LOAD
)
1298 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1299 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1300 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1301 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1302 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1303 elf_flags
|= MAP_FIXED
;
1304 } else if (elf_ex
.e_type
== ET_DYN
) {
1305 /* Try and get dynamic programs out of the way of the default mmap
1306 base, as well as whatever program they might try to exec. This
1307 is because the brk will follow the loader, and is not movable. */
1308 /* NOTE: for qemu, we do a big mmap to get enough space
1309 without hardcoding any address */
1310 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1311 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1317 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1320 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1321 (elf_ppnt
->p_filesz
+
1322 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1324 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1326 (elf_ppnt
->p_offset
-
1327 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1333 #ifdef LOW_ELF_STACK
1334 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1335 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1338 if (!load_addr_set
) {
1340 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1341 if (elf_ex
.e_type
== ET_DYN
) {
1342 load_bias
+= error
-
1343 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1344 load_addr
+= load_bias
;
1345 reloc_func_desc
= load_bias
;
1348 k
= elf_ppnt
->p_vaddr
;
1353 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1356 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1360 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1361 if (k
> elf_brk
) elf_brk
= k
;
1364 elf_entry
+= load_bias
;
1365 elf_bss
+= load_bias
;
1366 elf_brk
+= load_bias
;
1367 start_code
+= load_bias
;
1368 end_code
+= load_bias
;
1369 start_data
+= load_bias
;
1370 end_data
+= load_bias
;
1372 if (elf_interpreter
) {
1373 if (interpreter_type
& 1) {
1374 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1376 else if (interpreter_type
& 2) {
1377 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1380 reloc_func_desc
= interp_load_addr
;
1382 close(interpreter_fd
);
1383 free(elf_interpreter
);
1385 if (elf_entry
== ~((abi_ulong
)0UL)) {
1386 printf("Unable to load interpreter\n");
1396 load_symbols(&elf_ex
, bprm
->fd
);
1398 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1399 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1401 #ifdef LOW_ELF_STACK
1402 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1404 bprm
->p
= create_elf_tables(bprm
->p
,
1408 load_addr
, load_bias
,
1410 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1412 info
->load_addr
= reloc_func_desc
;
1413 info
->start_brk
= info
->brk
= elf_brk
;
1414 info
->end_code
= end_code
;
1415 info
->start_code
= start_code
;
1416 info
->start_data
= start_data
;
1417 info
->end_data
= end_data
;
1418 info
->start_stack
= bprm
->p
;
1420 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1422 set_brk(elf_bss
, elf_brk
);
1424 padzero(elf_bss
, elf_brk
);
1427 printf("(start_brk) %x\n" , info
->start_brk
);
1428 printf("(end_code) %x\n" , info
->end_code
);
1429 printf("(start_code) %x\n" , info
->start_code
);
1430 printf("(end_data) %x\n" , info
->end_data
);
1431 printf("(start_stack) %x\n" , info
->start_stack
);
1432 printf("(brk) %x\n" , info
->brk
);
1435 if ( info
->personality
== PER_SVR4
)
1437 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1438 and some applications "depend" upon this behavior.
1439 Since we do not have the power to recompile these, we
1440 emulate the SVr4 behavior. Sigh. */
1441 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1442 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1445 info
->entry
= elf_entry
;
1450 static int load_aout_interp(void * exptr
, int interp_fd
)
1452 printf("a.out interpreter not yet supported\n");
1456 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1458 init_thread(regs
, infop
);