1 /* This is the Linux kernel elf-loading code, ported into user space */
24 /* from personality.h */
27 * Flags for bug emulation.
29 * These occupy the top three bytes.
32 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
33 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
36 MMAP_PAGE_ZERO
= 0x0100000,
37 ADDR_COMPAT_LAYOUT
= 0x0200000,
38 READ_IMPLIES_EXEC
= 0x0400000,
39 ADDR_LIMIT_32BIT
= 0x0800000,
40 SHORT_INODE
= 0x1000000,
41 WHOLE_SECONDS
= 0x2000000,
42 STICKY_TIMEOUTS
= 0x4000000,
43 ADDR_LIMIT_3GB
= 0x8000000,
49 * These go in the low byte. Avoid using the top bit, it will
50 * conflict with error returns.
54 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
55 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
56 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
57 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
58 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
59 WHOLE_SECONDS
| SHORT_INODE
,
60 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
61 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
62 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
64 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
65 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
67 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
68 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
69 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
70 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
72 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
73 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
74 PER_OSF4
= 0x000f, /* OSF/1 v4 */
80 * Return the base personality without flags.
82 #define personality(pers) (pers & PER_MASK)
84 /* this flag is uneffective under linux too, should be deleted */
86 #define MAP_DENYWRITE 0
89 /* should probably go in elf.h */
96 #define ELF_PLATFORM get_elf_platform()
98 static const char *get_elf_platform(void)
100 static char elf_platform
[] = "i386";
101 int family
= (thread_env
->cpuid_version
>> 8) & 0xff;
105 elf_platform
[1] = '0' + family
;
109 #define ELF_HWCAP get_elf_hwcap()
111 static uint32_t get_elf_hwcap(void)
113 return thread_env
->cpuid_features
;
117 #define ELF_START_MMAP 0x2aaaaab000ULL
118 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
120 #define ELF_CLASS ELFCLASS64
121 #define ELF_DATA ELFDATA2LSB
122 #define ELF_ARCH EM_X86_64
124 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
127 regs
->rsp
= infop
->start_stack
;
128 regs
->rip
= infop
->entry
;
133 #define ELF_START_MMAP 0x80000000
136 * This is used to ensure we don't load something for the wrong architecture.
138 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
141 * These are used to set parameters in the core dumps.
143 #define ELF_CLASS ELFCLASS32
144 #define ELF_DATA ELFDATA2LSB
145 #define ELF_ARCH EM_386
147 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
149 regs
->esp
= infop
->start_stack
;
150 regs
->eip
= infop
->entry
;
152 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
153 starts %edx contains a pointer to a function which might be
154 registered using `atexit'. This provides a mean for the
155 dynamic linker to call DT_FINI functions for shared libraries
156 that have been loaded before the code runs.
158 A value of 0 tells we have no such handler. */
163 #define USE_ELF_CORE_DUMP
164 #define ELF_EXEC_PAGESIZE 4096
170 #define ELF_START_MMAP 0x80000000
172 #define elf_check_arch(x) ( (x) == EM_ARM )
174 #define ELF_CLASS ELFCLASS32
175 #ifdef TARGET_WORDS_BIGENDIAN
176 #define ELF_DATA ELFDATA2MSB
178 #define ELF_DATA ELFDATA2LSB
180 #define ELF_ARCH EM_ARM
182 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
184 abi_long stack
= infop
->start_stack
;
185 memset(regs
, 0, sizeof(*regs
));
186 regs
->ARM_cpsr
= 0x10;
187 if (infop
->entry
& 1)
188 regs
->ARM_cpsr
|= CPSR_T
;
189 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
190 regs
->ARM_sp
= infop
->start_stack
;
191 /* FIXME - what to for failure of get_user()? */
192 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
193 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
194 /* XXX: it seems that r0 is zeroed after ! */
196 /* For uClinux PIC binaries. */
197 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
198 regs
->ARM_r10
= infop
->start_data
;
201 #define USE_ELF_CORE_DUMP
202 #define ELF_EXEC_PAGESIZE 4096
206 ARM_HWCAP_ARM_SWP
= 1 << 0,
207 ARM_HWCAP_ARM_HALF
= 1 << 1,
208 ARM_HWCAP_ARM_THUMB
= 1 << 2,
209 ARM_HWCAP_ARM_26BIT
= 1 << 3,
210 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
211 ARM_HWCAP_ARM_FPA
= 1 << 5,
212 ARM_HWCAP_ARM_VFP
= 1 << 6,
213 ARM_HWCAP_ARM_EDSP
= 1 << 7,
216 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
217 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
218 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
223 #ifdef TARGET_SPARC64
225 #define ELF_START_MMAP 0x80000000
228 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
230 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
233 #define ELF_CLASS ELFCLASS64
234 #define ELF_DATA ELFDATA2MSB
235 #define ELF_ARCH EM_SPARCV9
237 #define STACK_BIAS 2047
239 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
244 regs
->pc
= infop
->entry
;
245 regs
->npc
= regs
->pc
+ 4;
248 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
250 if (personality(infop
->personality
) == PER_LINUX32
)
251 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
253 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
258 #define ELF_START_MMAP 0x80000000
260 #define elf_check_arch(x) ( (x) == EM_SPARC )
262 #define ELF_CLASS ELFCLASS32
263 #define ELF_DATA ELFDATA2MSB
264 #define ELF_ARCH EM_SPARC
266 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
269 regs
->pc
= infop
->entry
;
270 regs
->npc
= regs
->pc
+ 4;
272 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
280 #define ELF_START_MMAP 0x80000000
282 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
284 #define elf_check_arch(x) ( (x) == EM_PPC64 )
286 #define ELF_CLASS ELFCLASS64
290 #define elf_check_arch(x) ( (x) == EM_PPC )
292 #define ELF_CLASS ELFCLASS32
296 #ifdef TARGET_WORDS_BIGENDIAN
297 #define ELF_DATA ELFDATA2MSB
299 #define ELF_DATA ELFDATA2LSB
301 #define ELF_ARCH EM_PPC
304 * We need to put in some extra aux table entries to tell glibc what
305 * the cache block size is, so it can use the dcbz instruction safely.
307 #define AT_DCACHEBSIZE 19
308 #define AT_ICACHEBSIZE 20
309 #define AT_UCACHEBSIZE 21
310 /* A special ignored type value for PPC, for glibc compatibility. */
311 #define AT_IGNOREPPC 22
313 * The requirements here are:
314 * - keep the final alignment of sp (sp & 0xf)
315 * - make sure the 32-bit value at the first 16 byte aligned position of
316 * AUXV is greater than 16 for glibc compatibility.
317 * AT_IGNOREPPC is used for that.
318 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
319 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
321 #define DLINFO_ARCH_ITEMS 5
322 #define ARCH_DLINFO \
324 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
325 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
326 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
328 * Now handle glibc compatibility. \
330 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
331 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
334 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
336 abi_ulong pos
= infop
->start_stack
;
338 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
339 abi_ulong entry
, toc
;
342 _regs
->gpr
[1] = infop
->start_stack
;
343 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
344 entry
= ldq_raw(infop
->entry
) + infop
->load_addr
;
345 toc
= ldq_raw(infop
->entry
+ 8) + infop
->load_addr
;
347 infop
->entry
= entry
;
349 _regs
->nip
= infop
->entry
;
350 /* Note that isn't exactly what regular kernel does
351 * but this is what the ABI wants and is needed to allow
352 * execution of PPC BSD programs.
354 /* FIXME - what to for failure of get_user()? */
355 get_user_ual(_regs
->gpr
[3], pos
);
356 pos
+= sizeof(abi_ulong
);
358 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(abi_ulong
))
363 #define USE_ELF_CORE_DUMP
364 #define ELF_EXEC_PAGESIZE 4096
370 #define ELF_START_MMAP 0x80000000
372 #define elf_check_arch(x) ( (x) == EM_MIPS )
375 #define ELF_CLASS ELFCLASS64
377 #define ELF_CLASS ELFCLASS32
379 #ifdef TARGET_WORDS_BIGENDIAN
380 #define ELF_DATA ELFDATA2MSB
382 #define ELF_DATA ELFDATA2LSB
384 #define ELF_ARCH EM_MIPS
386 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
388 regs
->cp0_status
= 2 << CP0St_KSU
;
389 regs
->cp0_epc
= infop
->entry
;
390 regs
->regs
[29] = infop
->start_stack
;
393 #define USE_ELF_CORE_DUMP
394 #define ELF_EXEC_PAGESIZE 4096
396 #endif /* TARGET_MIPS */
400 #define ELF_START_MMAP 0x80000000
402 #define elf_check_arch(x) ( (x) == EM_SH )
404 #define ELF_CLASS ELFCLASS32
405 #define ELF_DATA ELFDATA2LSB
406 #define ELF_ARCH EM_SH
408 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
410 /* Check other registers XXXXX */
411 regs
->pc
= infop
->entry
;
412 regs
->regs
[15] = infop
->start_stack
;
415 #define USE_ELF_CORE_DUMP
416 #define ELF_EXEC_PAGESIZE 4096
422 #define ELF_START_MMAP 0x80000000
424 #define elf_check_arch(x) ( (x) == EM_CRIS )
426 #define ELF_CLASS ELFCLASS32
427 #define ELF_DATA ELFDATA2LSB
428 #define ELF_ARCH EM_CRIS
430 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
432 regs
->erp
= infop
->entry
;
435 #define USE_ELF_CORE_DUMP
436 #define ELF_EXEC_PAGESIZE 8192
442 #define ELF_START_MMAP 0x80000000
444 #define elf_check_arch(x) ( (x) == EM_68K )
446 #define ELF_CLASS ELFCLASS32
447 #define ELF_DATA ELFDATA2MSB
448 #define ELF_ARCH EM_68K
450 /* ??? Does this need to do anything?
451 #define ELF_PLAT_INIT(_r) */
453 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
455 regs
->usp
= infop
->start_stack
;
457 regs
->pc
= infop
->entry
;
460 #define USE_ELF_CORE_DUMP
461 #define ELF_EXEC_PAGESIZE 8192
467 #define ELF_START_MMAP (0x30000000000ULL)
469 #define elf_check_arch(x) ( (x) == ELF_ARCH )
471 #define ELF_CLASS ELFCLASS64
472 #define ELF_DATA ELFDATA2MSB
473 #define ELF_ARCH EM_ALPHA
475 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
477 regs
->pc
= infop
->entry
;
479 regs
->usp
= infop
->start_stack
;
480 regs
->unique
= infop
->start_data
; /* ? */
481 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
482 regs
->unique
, infop
->start_data
);
485 #define USE_ELF_CORE_DUMP
486 #define ELF_EXEC_PAGESIZE 8192
488 #endif /* TARGET_ALPHA */
491 #define ELF_PLATFORM (NULL)
500 #define ELF_CLASS ELFCLASS32
502 #define bswaptls(ptr) bswap32s(ptr)
509 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
510 unsigned int a_text
; /* length of text, in bytes */
511 unsigned int a_data
; /* length of data, in bytes */
512 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
513 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
514 unsigned int a_entry
; /* start address */
515 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
516 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
520 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
526 /* max code+data+bss space allocated to elf interpreter */
527 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
529 /* max code+data+bss+brk space allocated to ET_DYN executables */
530 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
532 /* Necessary parameters */
533 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
534 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
535 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
537 #define INTERPRETER_NONE 0
538 #define INTERPRETER_AOUT 1
539 #define INTERPRETER_ELF 2
541 #define DLINFO_ITEMS 12
543 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
548 static int load_aout_interp(void * exptr
, int interp_fd
);
551 static void bswap_ehdr(struct elfhdr
*ehdr
)
553 bswap16s(&ehdr
->e_type
); /* Object file type */
554 bswap16s(&ehdr
->e_machine
); /* Architecture */
555 bswap32s(&ehdr
->e_version
); /* Object file version */
556 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
557 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
558 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
559 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
560 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
561 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
562 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
563 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
564 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
565 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
568 static void bswap_phdr(struct elf_phdr
*phdr
)
570 bswap32s(&phdr
->p_type
); /* Segment type */
571 bswaptls(&phdr
->p_offset
); /* Segment file offset */
572 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
573 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
574 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
575 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
576 bswap32s(&phdr
->p_flags
); /* Segment flags */
577 bswaptls(&phdr
->p_align
); /* Segment alignment */
580 static void bswap_shdr(struct elf_shdr
*shdr
)
582 bswap32s(&shdr
->sh_name
);
583 bswap32s(&shdr
->sh_type
);
584 bswaptls(&shdr
->sh_flags
);
585 bswaptls(&shdr
->sh_addr
);
586 bswaptls(&shdr
->sh_offset
);
587 bswaptls(&shdr
->sh_size
);
588 bswap32s(&shdr
->sh_link
);
589 bswap32s(&shdr
->sh_info
);
590 bswaptls(&shdr
->sh_addralign
);
591 bswaptls(&shdr
->sh_entsize
);
594 static void bswap_sym(struct elf_sym
*sym
)
596 bswap32s(&sym
->st_name
);
597 bswaptls(&sym
->st_value
);
598 bswaptls(&sym
->st_size
);
599 bswap16s(&sym
->st_shndx
);
604 * 'copy_elf_strings()' copies argument/envelope strings from user
605 * memory to free pages in kernel mem. These are in a format ready
606 * to be put directly into the top of new user memory.
609 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
612 char *tmp
, *tmp1
, *pag
= NULL
;
616 return 0; /* bullet-proofing */
621 fprintf(stderr
, "VFS: argc is wrong");
627 if (p
< len
) { /* this shouldn't happen - 128kB */
633 offset
= p
% TARGET_PAGE_SIZE
;
634 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
636 pag
= (char *)malloc(TARGET_PAGE_SIZE
);
637 memset(pag
, 0, TARGET_PAGE_SIZE
);
638 page
[p
/TARGET_PAGE_SIZE
] = pag
;
643 if (len
== 0 || offset
== 0) {
644 *(pag
+ offset
) = *tmp
;
647 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
648 tmp
-= bytes_to_copy
;
650 offset
-= bytes_to_copy
;
651 len
-= bytes_to_copy
;
652 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
659 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
660 struct image_info
*info
)
662 abi_ulong stack_base
, size
, error
;
665 /* Create enough stack to hold everything. If we don't use
666 * it for args, we'll use it for something else...
668 size
= x86_stack_size
;
669 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
670 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
671 error
= target_mmap(0,
672 size
+ qemu_host_page_size
,
673 PROT_READ
| PROT_WRITE
,
674 MAP_PRIVATE
| MAP_ANON
,
680 /* we reserve one extra page at the top of the stack as guard */
681 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
683 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
686 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
689 /* FIXME - check return value of memcpy_to_target() for failure */
690 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
693 stack_base
+= TARGET_PAGE_SIZE
;
698 static void set_brk(abi_ulong start
, abi_ulong end
)
700 /* page-align the start and end addresses... */
701 start
= HOST_PAGE_ALIGN(start
);
702 end
= HOST_PAGE_ALIGN(end
);
705 if(target_mmap(start
, end
- start
,
706 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
707 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
708 perror("cannot mmap brk");
714 /* We need to explicitly zero any fractional pages after the data
715 section (i.e. bss). This would contain the junk from the file that
716 should not be in memory. */
717 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
721 if (elf_bss
>= last_bss
)
724 /* XXX: this is really a hack : if the real host page size is
725 smaller than the target page size, some pages after the end
726 of the file may not be mapped. A better fix would be to
727 patch target_mmap(), but it is more complicated as the file
728 size must be known */
729 if (qemu_real_host_page_size
< qemu_host_page_size
) {
730 abi_ulong end_addr
, end_addr1
;
731 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
732 ~(qemu_real_host_page_size
- 1);
733 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
734 if (end_addr1
< end_addr
) {
735 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
736 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
737 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
741 nbyte
= elf_bss
& (qemu_host_page_size
-1);
743 nbyte
= qemu_host_page_size
- nbyte
;
745 /* FIXME - what to do if put_user() fails? */
746 put_user_u8(0, elf_bss
);
753 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
754 struct elfhdr
* exec
,
757 abi_ulong interp_load_addr
, int ibcs
,
758 struct image_info
*info
)
762 abi_ulong u_platform
;
763 const char *k_platform
;
764 const int n
= sizeof(elf_addr_t
);
768 k_platform
= ELF_PLATFORM
;
770 size_t len
= strlen(k_platform
) + 1;
771 sp
-= (len
+ n
- 1) & ~(n
- 1);
773 /* FIXME - check return value of memcpy_to_target() for failure */
774 memcpy_to_target(sp
, k_platform
, len
);
777 * Force 16 byte _final_ alignment here for generality.
779 sp
= sp
&~ (abi_ulong
)15;
780 size
= (DLINFO_ITEMS
+ 1) * 2;
783 #ifdef DLINFO_ARCH_ITEMS
784 size
+= DLINFO_ARCH_ITEMS
* 2;
786 size
+= envc
+ argc
+ 2;
787 size
+= (!ibcs
? 3 : 1); /* argc itself */
790 sp
-= 16 - (size
& 15);
792 /* This is correct because Linux defines
793 * elf_addr_t as Elf32_Off / Elf64_Off
795 #define NEW_AUX_ENT(id, val) do { \
796 sp -= n; put_user_ual(val, sp); \
797 sp -= n; put_user_ual(id, sp); \
800 NEW_AUX_ENT (AT_NULL
, 0);
802 /* There must be exactly DLINFO_ITEMS entries here. */
803 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
804 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
805 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
806 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
807 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
808 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
809 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
810 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
811 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
812 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
813 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
814 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
815 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
817 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
820 * ARCH_DLINFO must come last so platform specific code can enforce
821 * special alignment requirements on the AUXV if necessary (eg. PPC).
827 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
832 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
834 abi_ulong
*interp_load_addr
)
836 struct elf_phdr
*elf_phdata
= NULL
;
837 struct elf_phdr
*eppnt
;
838 abi_ulong load_addr
= 0;
839 int load_addr_set
= 0;
841 abi_ulong last_bss
, elf_bss
;
850 bswap_ehdr(interp_elf_ex
);
852 /* First of all, some simple consistency checks */
853 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
854 interp_elf_ex
->e_type
!= ET_DYN
) ||
855 !elf_check_arch(interp_elf_ex
->e_machine
)) {
856 return ~((abi_ulong
)0UL);
860 /* Now read in all of the header information */
862 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
863 return ~(abi_ulong
)0UL;
865 elf_phdata
= (struct elf_phdr
*)
866 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
869 return ~((abi_ulong
)0UL);
872 * If the size of this structure has changed, then punt, since
873 * we will be doing the wrong thing.
875 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
877 return ~((abi_ulong
)0UL);
880 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
882 retval
= read(interpreter_fd
,
884 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
887 perror("load_elf_interp");
894 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
899 if (interp_elf_ex
->e_type
== ET_DYN
) {
900 /* in order to avoid hardcoding the interpreter load
901 address in qemu, we allocate a big enough memory zone */
902 error
= target_mmap(0, INTERP_MAP_SIZE
,
903 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
914 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
915 if (eppnt
->p_type
== PT_LOAD
) {
916 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
921 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
922 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
923 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
924 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
925 elf_type
|= MAP_FIXED
;
926 vaddr
= eppnt
->p_vaddr
;
928 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
929 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
933 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
937 close(interpreter_fd
);
939 return ~((abi_ulong
)0UL);
942 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
948 * Find the end of the file mapping for this phdr, and keep
949 * track of the largest address we see for this.
951 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
952 if (k
> elf_bss
) elf_bss
= k
;
955 * Do the same thing for the memory mapping - between
956 * elf_bss and last_bss is the bss section.
958 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
959 if (k
> last_bss
) last_bss
= k
;
962 /* Now use mmap to map the library into memory. */
964 close(interpreter_fd
);
967 * Now fill out the bss section. First pad the last page up
968 * to the page boundary, and then perform a mmap to make sure
969 * that there are zeromapped pages up to and including the last
972 padzero(elf_bss
, last_bss
);
973 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
975 /* Map the last of the bss segment */
976 if (last_bss
> elf_bss
) {
977 target_mmap(elf_bss
, last_bss
-elf_bss
,
978 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
979 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
983 *interp_load_addr
= load_addr
;
984 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
987 static int symfind(const void *s0
, const void *s1
)
989 struct elf_sym
*key
= (struct elf_sym
*)s0
;
990 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
992 if (key
->st_value
< sym
->st_value
) {
994 } else if (key
->st_value
> sym
->st_value
+ sym
->st_size
) {
1000 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
1002 #if ELF_CLASS == ELFCLASS32
1003 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
1005 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
1010 struct elf_sym
*sym
;
1012 key
.st_value
= orig_addr
;
1014 sym
= bsearch(&key
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
1016 return s
->disas_strtab
+ sym
->st_name
;
1022 /* FIXME: This should use elf_ops.h */
1023 static int symcmp(const void *s0
, const void *s1
)
1025 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
1026 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
1027 return (sym0
->st_value
< sym1
->st_value
)
1029 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
1032 /* Best attempt to load symbols from this ELF object. */
1033 static void load_symbols(struct elfhdr
*hdr
, int fd
)
1035 unsigned int i
, nsyms
;
1036 struct elf_shdr sechdr
, symtab
, strtab
;
1039 struct elf_sym
*syms
;
1041 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
1042 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1043 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1046 bswap_shdr(&sechdr
);
1048 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1050 lseek(fd
, hdr
->e_shoff
1051 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1052 if (read(fd
, &strtab
, sizeof(strtab
))
1056 bswap_shdr(&strtab
);
1061 return; /* Shouldn't happen... */
1064 /* Now know where the strtab and symtab are. Snarf them. */
1065 s
= malloc(sizeof(*s
));
1066 syms
= malloc(symtab
.sh_size
);
1069 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1070 if (!s
->disas_strtab
)
1073 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1074 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
)
1077 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1082 bswap_sym(syms
+ i
);
1084 // Throw away entries which we do not need.
1085 if (syms
[i
].st_shndx
== SHN_UNDEF
||
1086 syms
[i
].st_shndx
>= SHN_LORESERVE
||
1087 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
1090 syms
[i
] = syms
[nsyms
];
1094 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1095 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1096 syms
[i
].st_value
&= ~(target_ulong
)1;
1100 syms
= realloc(syms
, nsyms
* sizeof(*syms
));
1102 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
1104 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1105 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
)
1107 s
->disas_num_syms
= nsyms
;
1108 #if ELF_CLASS == ELFCLASS32
1109 s
->disas_symtab
.elf32
= syms
;
1110 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1112 s
->disas_symtab
.elf64
= syms
;
1113 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1119 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1120 struct image_info
* info
)
1122 struct elfhdr elf_ex
;
1123 struct elfhdr interp_elf_ex
;
1124 struct exec interp_ex
;
1125 int interpreter_fd
= -1; /* avoid warning */
1126 abi_ulong load_addr
, load_bias
;
1127 int load_addr_set
= 0;
1128 unsigned int interpreter_type
= INTERPRETER_NONE
;
1129 unsigned char ibcs2_interpreter
;
1131 abi_ulong mapped_addr
;
1132 struct elf_phdr
* elf_ppnt
;
1133 struct elf_phdr
*elf_phdata
;
1134 abi_ulong elf_bss
, k
, elf_brk
;
1136 char * elf_interpreter
;
1137 abi_ulong elf_entry
, interp_load_addr
= 0;
1139 abi_ulong start_code
, end_code
, start_data
, end_data
;
1140 abi_ulong reloc_func_desc
= 0;
1141 abi_ulong elf_stack
;
1142 char passed_fileno
[6];
1144 ibcs2_interpreter
= 0;
1148 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1150 bswap_ehdr(&elf_ex
);
1153 /* First of all, some simple consistency checks */
1154 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1155 (! elf_check_arch(elf_ex
.e_machine
))) {
1159 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1160 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1161 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1166 /* Now read in all of the header information */
1167 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1168 if (elf_phdata
== NULL
) {
1172 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1174 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1175 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1179 perror("load_elf_binary");
1186 elf_ppnt
= elf_phdata
;
1187 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1188 bswap_phdr(elf_ppnt
);
1191 elf_ppnt
= elf_phdata
;
1197 elf_stack
= ~((abi_ulong
)0UL);
1198 elf_interpreter
= NULL
;
1199 start_code
= ~((abi_ulong
)0UL);
1203 interp_ex
.a_info
= 0;
1205 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1206 if (elf_ppnt
->p_type
== PT_INTERP
) {
1207 if ( elf_interpreter
!= NULL
)
1210 free(elf_interpreter
);
1215 /* This is the program interpreter used for
1216 * shared libraries - for now assume that this
1217 * is an a.out format binary
1220 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1222 if (elf_interpreter
== NULL
) {
1228 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1230 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1233 perror("load_elf_binary2");
1237 /* If the program interpreter is one of these two,
1238 then assume an iBCS2 image. Otherwise assume
1239 a native linux image. */
1241 /* JRP - Need to add X86 lib dir stuff here... */
1243 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1244 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1245 ibcs2_interpreter
= 1;
1249 printf("Using ELF interpreter %s\n", path(elf_interpreter
));
1252 retval
= open(path(elf_interpreter
), O_RDONLY
);
1254 interpreter_fd
= retval
;
1257 perror(elf_interpreter
);
1259 /* retval = -errno; */
1264 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1266 retval
= read(interpreter_fd
,bprm
->buf
,128);
1270 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1271 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1274 perror("load_elf_binary3");
1277 free(elf_interpreter
);
1285 /* Some simple consistency checks for the interpreter */
1286 if (elf_interpreter
){
1287 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1289 /* Now figure out which format our binary is */
1290 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1291 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1292 interpreter_type
= INTERPRETER_ELF
;
1295 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1296 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1297 interpreter_type
&= ~INTERPRETER_ELF
;
1300 if (!interpreter_type
) {
1301 free(elf_interpreter
);
1308 /* OK, we are done with that, now set up the arg stuff,
1309 and then start this sucker up */
1314 if (interpreter_type
== INTERPRETER_AOUT
) {
1315 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1316 passed_p
= passed_fileno
;
1318 if (elf_interpreter
) {
1319 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1324 if (elf_interpreter
) {
1325 free(elf_interpreter
);
1333 /* OK, This is the point of no return */
1336 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1338 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1340 #if defined(CONFIG_USE_GUEST_BASE)
1342 * In case where user has not explicitly set the guest_base, we
1343 * probe here that should we set it automatically.
1345 if (!have_guest_base
) {
1347 * Go through ELF program header table and find out whether
1348 * any of the segments drop below our current mmap_min_addr and
1349 * in that case set guest_base to corresponding address.
1351 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
;
1353 if (elf_ppnt
->p_type
!= PT_LOAD
)
1355 if (HOST_PAGE_ALIGN(elf_ppnt
->p_vaddr
) < mmap_min_addr
) {
1356 guest_base
= HOST_PAGE_ALIGN(mmap_min_addr
);
1361 #endif /* CONFIG_USE_GUEST_BASE */
1363 /* Do this so that we can load the interpreter, if need be. We will
1364 change some of these later */
1366 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1367 info
->start_stack
= bprm
->p
;
1369 /* Now we do a little grungy work by mmaping the ELF image into
1370 * the correct location in memory. At this point, we assume that
1371 * the image should be loaded at fixed address, not at a variable
1375 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1380 if (elf_ppnt
->p_type
!= PT_LOAD
)
1383 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1384 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1385 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1386 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1387 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1388 elf_flags
|= MAP_FIXED
;
1389 } else if (elf_ex
.e_type
== ET_DYN
) {
1390 /* Try and get dynamic programs out of the way of the default mmap
1391 base, as well as whatever program they might try to exec. This
1392 is because the brk will follow the loader, and is not movable. */
1393 /* NOTE: for qemu, we do a big mmap to get enough space
1394 without hardcoding any address */
1395 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1396 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1402 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1405 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1406 (elf_ppnt
->p_filesz
+
1407 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1409 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1411 (elf_ppnt
->p_offset
-
1412 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1418 #ifdef LOW_ELF_STACK
1419 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1420 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1423 if (!load_addr_set
) {
1425 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1426 if (elf_ex
.e_type
== ET_DYN
) {
1427 load_bias
+= error
-
1428 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1429 load_addr
+= load_bias
;
1430 reloc_func_desc
= load_bias
;
1433 k
= elf_ppnt
->p_vaddr
;
1438 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1441 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1445 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1446 if (k
> elf_brk
) elf_brk
= k
;
1449 elf_entry
+= load_bias
;
1450 elf_bss
+= load_bias
;
1451 elf_brk
+= load_bias
;
1452 start_code
+= load_bias
;
1453 end_code
+= load_bias
;
1454 start_data
+= load_bias
;
1455 end_data
+= load_bias
;
1457 if (elf_interpreter
) {
1458 if (interpreter_type
& 1) {
1459 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1461 else if (interpreter_type
& 2) {
1462 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1465 reloc_func_desc
= interp_load_addr
;
1467 close(interpreter_fd
);
1468 free(elf_interpreter
);
1470 if (elf_entry
== ~((abi_ulong
)0UL)) {
1471 printf("Unable to load interpreter\n");
1480 if (qemu_log_enabled())
1481 load_symbols(&elf_ex
, bprm
->fd
);
1483 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1484 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1486 #ifdef LOW_ELF_STACK
1487 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1489 bprm
->p
= create_elf_tables(bprm
->p
,
1493 load_addr
, load_bias
,
1495 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1497 info
->load_addr
= reloc_func_desc
;
1498 info
->start_brk
= info
->brk
= elf_brk
;
1499 info
->end_code
= end_code
;
1500 info
->start_code
= start_code
;
1501 info
->start_data
= start_data
;
1502 info
->end_data
= end_data
;
1503 info
->start_stack
= bprm
->p
;
1505 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1507 set_brk(elf_bss
, elf_brk
);
1509 padzero(elf_bss
, elf_brk
);
1512 printf("(start_brk) %x\n" , info
->start_brk
);
1513 printf("(end_code) %x\n" , info
->end_code
);
1514 printf("(start_code) %x\n" , info
->start_code
);
1515 printf("(end_data) %x\n" , info
->end_data
);
1516 printf("(start_stack) %x\n" , info
->start_stack
);
1517 printf("(brk) %x\n" , info
->brk
);
1520 if ( info
->personality
== PER_SVR4
)
1522 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1523 and some applications "depend" upon this behavior.
1524 Since we do not have the power to recompile these, we
1525 emulate the SVr4 behavior. Sigh. */
1526 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1527 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1530 info
->entry
= elf_entry
;
1535 static int load_aout_interp(void * exptr
, int interp_fd
)
1537 printf("a.out interpreter not yet supported\n");
1541 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1543 init_thread(regs
, infop
);