1 /* This is the Linux kernel elf-loading code, ported into user space */
13 #include "disas/disas.h"
24 /* from personality.h */
27 * Flags for bug emulation.
29 * These occupy the top three bytes.
32 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
33 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
36 MMAP_PAGE_ZERO
= 0x0100000,
37 ADDR_COMPAT_LAYOUT
= 0x0200000,
38 READ_IMPLIES_EXEC
= 0x0400000,
39 ADDR_LIMIT_32BIT
= 0x0800000,
40 SHORT_INODE
= 0x1000000,
41 WHOLE_SECONDS
= 0x2000000,
42 STICKY_TIMEOUTS
= 0x4000000,
43 ADDR_LIMIT_3GB
= 0x8000000,
49 * These go in the low byte. Avoid using the top bit, it will
50 * conflict with error returns.
54 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
55 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
56 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
57 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
58 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
59 WHOLE_SECONDS
| SHORT_INODE
,
60 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
61 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
62 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
64 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
65 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
67 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
68 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
69 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
70 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
72 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
73 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
74 PER_OSF4
= 0x000f, /* OSF/1 v4 */
80 * Return the base personality without flags.
82 #define personality(pers) (pers & PER_MASK)
84 /* this flag is uneffective under linux too, should be deleted */
86 #define MAP_DENYWRITE 0
89 /* should probably go in elf.h */
96 #define ELF_PLATFORM get_elf_platform()
98 static const char *get_elf_platform(void)
100 static char elf_platform
[] = "i386";
101 int family
= (thread_env
->cpuid_version
>> 8) & 0xff;
105 elf_platform
[1] = '0' + family
;
109 #define ELF_HWCAP get_elf_hwcap()
111 static uint32_t get_elf_hwcap(void)
113 return thread_env
->cpuid_features
;
117 #define ELF_START_MMAP 0x2aaaaab000ULL
118 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
120 #define ELF_CLASS ELFCLASS64
121 #define ELF_DATA ELFDATA2LSB
122 #define ELF_ARCH EM_X86_64
124 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
127 regs
->rsp
= infop
->start_stack
;
128 regs
->rip
= infop
->entry
;
129 if (bsd_type
== target_freebsd
) {
130 regs
->rdi
= infop
->start_stack
;
136 #define ELF_START_MMAP 0x80000000
139 * This is used to ensure we don't load something for the wrong architecture.
141 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
144 * These are used to set parameters in the core dumps.
146 #define ELF_CLASS ELFCLASS32
147 #define ELF_DATA ELFDATA2LSB
148 #define ELF_ARCH EM_386
150 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
152 regs
->esp
= infop
->start_stack
;
153 regs
->eip
= infop
->entry
;
155 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
156 starts %edx contains a pointer to a function which might be
157 registered using `atexit'. This provides a mean for the
158 dynamic linker to call DT_FINI functions for shared libraries
159 that have been loaded before the code runs.
161 A value of 0 tells we have no such handler. */
166 #define USE_ELF_CORE_DUMP
167 #define ELF_EXEC_PAGESIZE 4096
173 #define ELF_START_MMAP 0x80000000
175 #define elf_check_arch(x) ( (x) == EM_ARM )
177 #define ELF_CLASS ELFCLASS32
178 #ifdef TARGET_WORDS_BIGENDIAN
179 #define ELF_DATA ELFDATA2MSB
181 #define ELF_DATA ELFDATA2LSB
183 #define ELF_ARCH EM_ARM
185 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
187 abi_long stack
= infop
->start_stack
;
188 memset(regs
, 0, sizeof(*regs
));
189 regs
->ARM_cpsr
= 0x10;
190 if (infop
->entry
& 1)
191 regs
->ARM_cpsr
|= CPSR_T
;
192 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
193 regs
->ARM_sp
= infop
->start_stack
;
194 /* FIXME - what to for failure of get_user()? */
195 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
196 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
197 /* XXX: it seems that r0 is zeroed after ! */
199 /* For uClinux PIC binaries. */
200 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
201 regs
->ARM_r10
= infop
->start_data
;
204 #define USE_ELF_CORE_DUMP
205 #define ELF_EXEC_PAGESIZE 4096
209 ARM_HWCAP_ARM_SWP
= 1 << 0,
210 ARM_HWCAP_ARM_HALF
= 1 << 1,
211 ARM_HWCAP_ARM_THUMB
= 1 << 2,
212 ARM_HWCAP_ARM_26BIT
= 1 << 3,
213 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
214 ARM_HWCAP_ARM_FPA
= 1 << 5,
215 ARM_HWCAP_ARM_VFP
= 1 << 6,
216 ARM_HWCAP_ARM_EDSP
= 1 << 7,
219 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
220 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
221 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
226 #ifdef TARGET_SPARC64
228 #define ELF_START_MMAP 0x80000000
231 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
233 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
236 #define ELF_CLASS ELFCLASS64
237 #define ELF_DATA ELFDATA2MSB
238 #define ELF_ARCH EM_SPARCV9
240 #define STACK_BIAS 2047
242 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
247 regs
->pc
= infop
->entry
;
248 regs
->npc
= regs
->pc
+ 4;
251 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
253 if (personality(infop
->personality
) == PER_LINUX32
)
254 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
256 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
257 if (bsd_type
== target_freebsd
) {
258 regs
->u_regs
[8] = infop
->start_stack
;
259 regs
->u_regs
[11] = infop
->start_stack
;
266 #define ELF_START_MMAP 0x80000000
268 #define elf_check_arch(x) ( (x) == EM_SPARC )
270 #define ELF_CLASS ELFCLASS32
271 #define ELF_DATA ELFDATA2MSB
272 #define ELF_ARCH EM_SPARC
274 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
277 regs
->pc
= infop
->entry
;
278 regs
->npc
= regs
->pc
+ 4;
280 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
288 #define ELF_START_MMAP 0x80000000
290 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
292 #define elf_check_arch(x) ( (x) == EM_PPC64 )
294 #define ELF_CLASS ELFCLASS64
298 #define elf_check_arch(x) ( (x) == EM_PPC )
300 #define ELF_CLASS ELFCLASS32
304 #ifdef TARGET_WORDS_BIGENDIAN
305 #define ELF_DATA ELFDATA2MSB
307 #define ELF_DATA ELFDATA2LSB
309 #define ELF_ARCH EM_PPC
312 * We need to put in some extra aux table entries to tell glibc what
313 * the cache block size is, so it can use the dcbz instruction safely.
315 #define AT_DCACHEBSIZE 19
316 #define AT_ICACHEBSIZE 20
317 #define AT_UCACHEBSIZE 21
318 /* A special ignored type value for PPC, for glibc compatibility. */
319 #define AT_IGNOREPPC 22
321 * The requirements here are:
322 * - keep the final alignment of sp (sp & 0xf)
323 * - make sure the 32-bit value at the first 16 byte aligned position of
324 * AUXV is greater than 16 for glibc compatibility.
325 * AT_IGNOREPPC is used for that.
326 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
327 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
329 #define DLINFO_ARCH_ITEMS 5
330 #define ARCH_DLINFO \
332 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
333 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
334 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
336 * Now handle glibc compatibility. \
338 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
339 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
342 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
344 abi_ulong pos
= infop
->start_stack
;
346 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
347 abi_ulong entry
, toc
;
350 _regs
->gpr
[1] = infop
->start_stack
;
351 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
352 entry
= ldq_raw(infop
->entry
) + infop
->load_addr
;
353 toc
= ldq_raw(infop
->entry
+ 8) + infop
->load_addr
;
355 infop
->entry
= entry
;
357 _regs
->nip
= infop
->entry
;
358 /* Note that isn't exactly what regular kernel does
359 * but this is what the ABI wants and is needed to allow
360 * execution of PPC BSD programs.
362 /* FIXME - what to for failure of get_user()? */
363 get_user_ual(_regs
->gpr
[3], pos
);
364 pos
+= sizeof(abi_ulong
);
366 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(abi_ulong
))
371 #define USE_ELF_CORE_DUMP
372 #define ELF_EXEC_PAGESIZE 4096
378 #define ELF_START_MMAP 0x80000000
380 #define elf_check_arch(x) ( (x) == EM_MIPS )
383 #define ELF_CLASS ELFCLASS64
385 #define ELF_CLASS ELFCLASS32
387 #ifdef TARGET_WORDS_BIGENDIAN
388 #define ELF_DATA ELFDATA2MSB
390 #define ELF_DATA ELFDATA2LSB
392 #define ELF_ARCH EM_MIPS
394 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
396 regs
->cp0_status
= 2 << CP0St_KSU
;
397 regs
->cp0_epc
= infop
->entry
;
398 regs
->regs
[29] = infop
->start_stack
;
401 #define USE_ELF_CORE_DUMP
402 #define ELF_EXEC_PAGESIZE 4096
404 #endif /* TARGET_MIPS */
408 #define ELF_START_MMAP 0x80000000
410 #define elf_check_arch(x) ( (x) == EM_SH )
412 #define ELF_CLASS ELFCLASS32
413 #define ELF_DATA ELFDATA2LSB
414 #define ELF_ARCH EM_SH
416 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
418 /* Check other registers XXXXX */
419 regs
->pc
= infop
->entry
;
420 regs
->regs
[15] = infop
->start_stack
;
423 #define USE_ELF_CORE_DUMP
424 #define ELF_EXEC_PAGESIZE 4096
430 #define ELF_START_MMAP 0x80000000
432 #define elf_check_arch(x) ( (x) == EM_CRIS )
434 #define ELF_CLASS ELFCLASS32
435 #define ELF_DATA ELFDATA2LSB
436 #define ELF_ARCH EM_CRIS
438 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
440 regs
->erp
= infop
->entry
;
443 #define USE_ELF_CORE_DUMP
444 #define ELF_EXEC_PAGESIZE 8192
450 #define ELF_START_MMAP 0x80000000
452 #define elf_check_arch(x) ( (x) == EM_68K )
454 #define ELF_CLASS ELFCLASS32
455 #define ELF_DATA ELFDATA2MSB
456 #define ELF_ARCH EM_68K
458 /* ??? Does this need to do anything?
459 #define ELF_PLAT_INIT(_r) */
461 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
463 regs
->usp
= infop
->start_stack
;
465 regs
->pc
= infop
->entry
;
468 #define USE_ELF_CORE_DUMP
469 #define ELF_EXEC_PAGESIZE 8192
475 #define ELF_START_MMAP (0x30000000000ULL)
477 #define elf_check_arch(x) ( (x) == ELF_ARCH )
479 #define ELF_CLASS ELFCLASS64
480 #define ELF_DATA ELFDATA2MSB
481 #define ELF_ARCH EM_ALPHA
483 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
485 regs
->pc
= infop
->entry
;
487 regs
->usp
= infop
->start_stack
;
488 regs
->unique
= infop
->start_data
; /* ? */
489 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
490 regs
->unique
, infop
->start_data
);
493 #define USE_ELF_CORE_DUMP
494 #define ELF_EXEC_PAGESIZE 8192
496 #endif /* TARGET_ALPHA */
499 #define ELF_PLATFORM (NULL)
508 #define ELF_CLASS ELFCLASS32
510 #define bswaptls(ptr) bswap32s(ptr)
517 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
518 unsigned int a_text
; /* length of text, in bytes */
519 unsigned int a_data
; /* length of data, in bytes */
520 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
521 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
522 unsigned int a_entry
; /* start address */
523 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
524 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
528 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
534 /* max code+data+bss space allocated to elf interpreter */
535 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
537 /* max code+data+bss+brk space allocated to ET_DYN executables */
538 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
540 /* Necessary parameters */
541 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
542 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
543 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
545 #define INTERPRETER_NONE 0
546 #define INTERPRETER_AOUT 1
547 #define INTERPRETER_ELF 2
549 #define DLINFO_ITEMS 12
551 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
556 static int load_aout_interp(void * exptr
, int interp_fd
);
559 static void bswap_ehdr(struct elfhdr
*ehdr
)
561 bswap16s(&ehdr
->e_type
); /* Object file type */
562 bswap16s(&ehdr
->e_machine
); /* Architecture */
563 bswap32s(&ehdr
->e_version
); /* Object file version */
564 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
565 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
566 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
567 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
568 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
569 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
570 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
571 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
572 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
573 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
576 static void bswap_phdr(struct elf_phdr
*phdr
)
578 bswap32s(&phdr
->p_type
); /* Segment type */
579 bswaptls(&phdr
->p_offset
); /* Segment file offset */
580 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
581 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
582 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
583 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
584 bswap32s(&phdr
->p_flags
); /* Segment flags */
585 bswaptls(&phdr
->p_align
); /* Segment alignment */
588 static void bswap_shdr(struct elf_shdr
*shdr
)
590 bswap32s(&shdr
->sh_name
);
591 bswap32s(&shdr
->sh_type
);
592 bswaptls(&shdr
->sh_flags
);
593 bswaptls(&shdr
->sh_addr
);
594 bswaptls(&shdr
->sh_offset
);
595 bswaptls(&shdr
->sh_size
);
596 bswap32s(&shdr
->sh_link
);
597 bswap32s(&shdr
->sh_info
);
598 bswaptls(&shdr
->sh_addralign
);
599 bswaptls(&shdr
->sh_entsize
);
602 static void bswap_sym(struct elf_sym
*sym
)
604 bswap32s(&sym
->st_name
);
605 bswaptls(&sym
->st_value
);
606 bswaptls(&sym
->st_size
);
607 bswap16s(&sym
->st_shndx
);
612 * 'copy_elf_strings()' copies argument/envelope strings from user
613 * memory to free pages in kernel mem. These are in a format ready
614 * to be put directly into the top of new user memory.
617 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
620 char *tmp
, *tmp1
, *pag
= NULL
;
624 return 0; /* bullet-proofing */
629 fprintf(stderr
, "VFS: argc is wrong");
635 if (p
< len
) { /* this shouldn't happen - 128kB */
641 offset
= p
% TARGET_PAGE_SIZE
;
642 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
644 pag
= g_try_malloc0(TARGET_PAGE_SIZE
);
645 page
[p
/TARGET_PAGE_SIZE
] = pag
;
650 if (len
== 0 || offset
== 0) {
651 *(pag
+ offset
) = *tmp
;
654 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
655 tmp
-= bytes_to_copy
;
657 offset
-= bytes_to_copy
;
658 len
-= bytes_to_copy
;
659 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
666 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
667 struct image_info
*info
)
669 abi_ulong stack_base
, size
, error
;
672 /* Create enough stack to hold everything. If we don't use
673 * it for args, we'll use it for something else...
675 size
= x86_stack_size
;
676 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
677 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
678 error
= target_mmap(0,
679 size
+ qemu_host_page_size
,
680 PROT_READ
| PROT_WRITE
,
681 MAP_PRIVATE
| MAP_ANON
,
687 /* we reserve one extra page at the top of the stack as guard */
688 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
690 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
693 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
696 /* FIXME - check return value of memcpy_to_target() for failure */
697 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
698 g_free(bprm
->page
[i
]);
700 stack_base
+= TARGET_PAGE_SIZE
;
705 static void set_brk(abi_ulong start
, abi_ulong end
)
707 /* page-align the start and end addresses... */
708 start
= HOST_PAGE_ALIGN(start
);
709 end
= HOST_PAGE_ALIGN(end
);
712 if(target_mmap(start
, end
- start
,
713 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
714 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
715 perror("cannot mmap brk");
721 /* We need to explicitly zero any fractional pages after the data
722 section (i.e. bss). This would contain the junk from the file that
723 should not be in memory. */
724 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
728 if (elf_bss
>= last_bss
)
731 /* XXX: this is really a hack : if the real host page size is
732 smaller than the target page size, some pages after the end
733 of the file may not be mapped. A better fix would be to
734 patch target_mmap(), but it is more complicated as the file
735 size must be known */
736 if (qemu_real_host_page_size
< qemu_host_page_size
) {
737 abi_ulong end_addr
, end_addr1
;
738 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
739 ~(qemu_real_host_page_size
- 1);
740 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
741 if (end_addr1
< end_addr
) {
742 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
743 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
744 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
748 nbyte
= elf_bss
& (qemu_host_page_size
-1);
750 nbyte
= qemu_host_page_size
- nbyte
;
752 /* FIXME - what to do if put_user() fails? */
753 put_user_u8(0, elf_bss
);
760 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
761 struct elfhdr
* exec
,
764 abi_ulong interp_load_addr
, int ibcs
,
765 struct image_info
*info
)
769 abi_ulong u_platform
;
770 const char *k_platform
;
771 const int n
= sizeof(elf_addr_t
);
775 k_platform
= ELF_PLATFORM
;
777 size_t len
= strlen(k_platform
) + 1;
778 sp
-= (len
+ n
- 1) & ~(n
- 1);
780 /* FIXME - check return value of memcpy_to_target() for failure */
781 memcpy_to_target(sp
, k_platform
, len
);
784 * Force 16 byte _final_ alignment here for generality.
786 sp
= sp
&~ (abi_ulong
)15;
787 size
= (DLINFO_ITEMS
+ 1) * 2;
790 #ifdef DLINFO_ARCH_ITEMS
791 size
+= DLINFO_ARCH_ITEMS
* 2;
793 size
+= envc
+ argc
+ 2;
794 size
+= (!ibcs
? 3 : 1); /* argc itself */
797 sp
-= 16 - (size
& 15);
799 /* This is correct because Linux defines
800 * elf_addr_t as Elf32_Off / Elf64_Off
802 #define NEW_AUX_ENT(id, val) do { \
803 sp -= n; put_user_ual(val, sp); \
804 sp -= n; put_user_ual(id, sp); \
807 NEW_AUX_ENT (AT_NULL
, 0);
809 /* There must be exactly DLINFO_ITEMS entries here. */
810 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
811 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
812 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
813 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
814 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
815 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
816 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
817 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
818 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
819 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
820 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
821 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
822 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
824 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
827 * ARCH_DLINFO must come last so platform specific code can enforce
828 * special alignment requirements on the AUXV if necessary (eg. PPC).
834 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
839 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
841 abi_ulong
*interp_load_addr
)
843 struct elf_phdr
*elf_phdata
= NULL
;
844 struct elf_phdr
*eppnt
;
845 abi_ulong load_addr
= 0;
846 int load_addr_set
= 0;
848 abi_ulong last_bss
, elf_bss
;
857 bswap_ehdr(interp_elf_ex
);
859 /* First of all, some simple consistency checks */
860 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
861 interp_elf_ex
->e_type
!= ET_DYN
) ||
862 !elf_check_arch(interp_elf_ex
->e_machine
)) {
863 return ~((abi_ulong
)0UL);
867 /* Now read in all of the header information */
869 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
870 return ~(abi_ulong
)0UL;
872 elf_phdata
= (struct elf_phdr
*)
873 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
876 return ~((abi_ulong
)0UL);
879 * If the size of this structure has changed, then punt, since
880 * we will be doing the wrong thing.
882 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
884 return ~((abi_ulong
)0UL);
887 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
889 retval
= read(interpreter_fd
,
891 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
894 perror("load_elf_interp");
901 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
906 if (interp_elf_ex
->e_type
== ET_DYN
) {
907 /* in order to avoid hardcoding the interpreter load
908 address in qemu, we allocate a big enough memory zone */
909 error
= target_mmap(0, INTERP_MAP_SIZE
,
910 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
921 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
922 if (eppnt
->p_type
== PT_LOAD
) {
923 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
928 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
929 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
930 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
931 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
932 elf_type
|= MAP_FIXED
;
933 vaddr
= eppnt
->p_vaddr
;
935 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
936 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
940 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
944 close(interpreter_fd
);
946 return ~((abi_ulong
)0UL);
949 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
955 * Find the end of the file mapping for this phdr, and keep
956 * track of the largest address we see for this.
958 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
959 if (k
> elf_bss
) elf_bss
= k
;
962 * Do the same thing for the memory mapping - between
963 * elf_bss and last_bss is the bss section.
965 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
966 if (k
> last_bss
) last_bss
= k
;
969 /* Now use mmap to map the library into memory. */
971 close(interpreter_fd
);
974 * Now fill out the bss section. First pad the last page up
975 * to the page boundary, and then perform a mmap to make sure
976 * that there are zeromapped pages up to and including the last
979 padzero(elf_bss
, last_bss
);
980 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
982 /* Map the last of the bss segment */
983 if (last_bss
> elf_bss
) {
984 target_mmap(elf_bss
, last_bss
-elf_bss
,
985 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
986 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
990 *interp_load_addr
= load_addr
;
991 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
994 static int symfind(const void *s0
, const void *s1
)
996 target_ulong addr
= *(target_ulong
*)s0
;
997 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
999 if (addr
< sym
->st_value
) {
1001 } else if (addr
>= sym
->st_value
+ sym
->st_size
) {
1007 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
1009 #if ELF_CLASS == ELFCLASS32
1010 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
1012 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
1016 struct elf_sym
*sym
;
1018 sym
= bsearch(&orig_addr
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
1020 return s
->disas_strtab
+ sym
->st_name
;
1026 /* FIXME: This should use elf_ops.h */
1027 static int symcmp(const void *s0
, const void *s1
)
1029 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
1030 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
1031 return (sym0
->st_value
< sym1
->st_value
)
1033 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
1036 /* Best attempt to load symbols from this ELF object. */
1037 static void load_symbols(struct elfhdr
*hdr
, int fd
)
1039 unsigned int i
, nsyms
;
1040 struct elf_shdr sechdr
, symtab
, strtab
;
1043 struct elf_sym
*syms
, *new_syms
;
1045 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
1046 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1047 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1050 bswap_shdr(&sechdr
);
1052 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1054 lseek(fd
, hdr
->e_shoff
1055 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1056 if (read(fd
, &strtab
, sizeof(strtab
))
1060 bswap_shdr(&strtab
);
1065 return; /* Shouldn't happen... */
1068 /* Now know where the strtab and symtab are. Snarf them. */
1069 s
= malloc(sizeof(*s
));
1070 syms
= malloc(symtab
.sh_size
);
1075 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1076 if (!s
->disas_strtab
) {
1082 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1083 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
) {
1090 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1095 bswap_sym(syms
+ i
);
1097 // Throw away entries which we do not need.
1098 if (syms
[i
].st_shndx
== SHN_UNDEF
||
1099 syms
[i
].st_shndx
>= SHN_LORESERVE
||
1100 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
1103 syms
[i
] = syms
[nsyms
];
1107 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1108 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1109 syms
[i
].st_value
&= ~(target_ulong
)1;
1114 /* Attempt to free the storage associated with the local symbols
1115 that we threw away. Whether or not this has any effect on the
1116 memory allocation depends on the malloc implementation and how
1117 many symbols we managed to discard. */
1118 new_syms
= realloc(syms
, nsyms
* sizeof(*syms
));
1119 if (new_syms
== NULL
) {
1127 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
1129 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1130 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
) {
1136 s
->disas_num_syms
= nsyms
;
1137 #if ELF_CLASS == ELFCLASS32
1138 s
->disas_symtab
.elf32
= syms
;
1139 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1141 s
->disas_symtab
.elf64
= syms
;
1142 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1148 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1149 struct image_info
* info
)
1151 struct elfhdr elf_ex
;
1152 struct elfhdr interp_elf_ex
;
1153 struct exec interp_ex
;
1154 int interpreter_fd
= -1; /* avoid warning */
1155 abi_ulong load_addr
, load_bias
;
1156 int load_addr_set
= 0;
1157 unsigned int interpreter_type
= INTERPRETER_NONE
;
1158 unsigned char ibcs2_interpreter
;
1160 abi_ulong mapped_addr
;
1161 struct elf_phdr
* elf_ppnt
;
1162 struct elf_phdr
*elf_phdata
;
1163 abi_ulong elf_bss
, k
, elf_brk
;
1165 char * elf_interpreter
;
1166 abi_ulong elf_entry
, interp_load_addr
= 0;
1168 abi_ulong start_code
, end_code
, start_data
, end_data
;
1169 abi_ulong reloc_func_desc
= 0;
1170 abi_ulong elf_stack
;
1171 char passed_fileno
[6];
1173 ibcs2_interpreter
= 0;
1177 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1179 bswap_ehdr(&elf_ex
);
1182 /* First of all, some simple consistency checks */
1183 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1184 (! elf_check_arch(elf_ex
.e_machine
))) {
1188 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1189 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1190 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1195 /* Now read in all of the header information */
1196 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1197 if (elf_phdata
== NULL
) {
1201 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1203 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1204 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1208 perror("load_elf_binary");
1215 elf_ppnt
= elf_phdata
;
1216 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1217 bswap_phdr(elf_ppnt
);
1220 elf_ppnt
= elf_phdata
;
1226 elf_stack
= ~((abi_ulong
)0UL);
1227 elf_interpreter
= NULL
;
1228 start_code
= ~((abi_ulong
)0UL);
1232 interp_ex
.a_info
= 0;
1234 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1235 if (elf_ppnt
->p_type
== PT_INTERP
) {
1236 if ( elf_interpreter
!= NULL
)
1239 free(elf_interpreter
);
1244 /* This is the program interpreter used for
1245 * shared libraries - for now assume that this
1246 * is an a.out format binary
1249 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1251 if (elf_interpreter
== NULL
) {
1257 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1259 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1262 perror("load_elf_binary2");
1266 /* If the program interpreter is one of these two,
1267 then assume an iBCS2 image. Otherwise assume
1268 a native linux image. */
1270 /* JRP - Need to add X86 lib dir stuff here... */
1272 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1273 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1274 ibcs2_interpreter
= 1;
1278 printf("Using ELF interpreter %s\n", path(elf_interpreter
));
1281 retval
= open(path(elf_interpreter
), O_RDONLY
);
1283 interpreter_fd
= retval
;
1286 perror(elf_interpreter
);
1288 /* retval = -errno; */
1293 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1295 retval
= read(interpreter_fd
,bprm
->buf
,128);
1299 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1300 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1303 perror("load_elf_binary3");
1306 free(elf_interpreter
);
1314 /* Some simple consistency checks for the interpreter */
1315 if (elf_interpreter
){
1316 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1318 /* Now figure out which format our binary is */
1319 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1320 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1321 interpreter_type
= INTERPRETER_ELF
;
1324 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1325 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1326 interpreter_type
&= ~INTERPRETER_ELF
;
1329 if (!interpreter_type
) {
1330 free(elf_interpreter
);
1337 /* OK, we are done with that, now set up the arg stuff,
1338 and then start this sucker up */
1343 if (interpreter_type
== INTERPRETER_AOUT
) {
1344 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1345 passed_p
= passed_fileno
;
1347 if (elf_interpreter
) {
1348 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1353 if (elf_interpreter
) {
1354 free(elf_interpreter
);
1362 /* OK, This is the point of no return */
1365 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1367 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1369 #if defined(CONFIG_USE_GUEST_BASE)
1371 * In case where user has not explicitly set the guest_base, we
1372 * probe here that should we set it automatically.
1374 if (!have_guest_base
) {
1376 * Go through ELF program header table and find out whether
1377 * any of the segments drop below our current mmap_min_addr and
1378 * in that case set guest_base to corresponding address.
1380 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
;
1382 if (elf_ppnt
->p_type
!= PT_LOAD
)
1384 if (HOST_PAGE_ALIGN(elf_ppnt
->p_vaddr
) < mmap_min_addr
) {
1385 guest_base
= HOST_PAGE_ALIGN(mmap_min_addr
);
1390 #endif /* CONFIG_USE_GUEST_BASE */
1392 /* Do this so that we can load the interpreter, if need be. We will
1393 change some of these later */
1395 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1396 info
->start_stack
= bprm
->p
;
1398 /* Now we do a little grungy work by mmaping the ELF image into
1399 * the correct location in memory. At this point, we assume that
1400 * the image should be loaded at fixed address, not at a variable
1404 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1409 if (elf_ppnt
->p_type
!= PT_LOAD
)
1412 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1413 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1414 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1415 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1416 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1417 elf_flags
|= MAP_FIXED
;
1418 } else if (elf_ex
.e_type
== ET_DYN
) {
1419 /* Try and get dynamic programs out of the way of the default mmap
1420 base, as well as whatever program they might try to exec. This
1421 is because the brk will follow the loader, and is not movable. */
1422 /* NOTE: for qemu, we do a big mmap to get enough space
1423 without hardcoding any address */
1424 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1425 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1431 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1434 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1435 (elf_ppnt
->p_filesz
+
1436 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1438 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1440 (elf_ppnt
->p_offset
-
1441 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1447 #ifdef LOW_ELF_STACK
1448 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1449 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1452 if (!load_addr_set
) {
1454 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1455 if (elf_ex
.e_type
== ET_DYN
) {
1456 load_bias
+= error
-
1457 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1458 load_addr
+= load_bias
;
1459 reloc_func_desc
= load_bias
;
1462 k
= elf_ppnt
->p_vaddr
;
1467 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1470 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1474 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1475 if (k
> elf_brk
) elf_brk
= k
;
1478 elf_entry
+= load_bias
;
1479 elf_bss
+= load_bias
;
1480 elf_brk
+= load_bias
;
1481 start_code
+= load_bias
;
1482 end_code
+= load_bias
;
1483 start_data
+= load_bias
;
1484 end_data
+= load_bias
;
1486 if (elf_interpreter
) {
1487 if (interpreter_type
& 1) {
1488 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1490 else if (interpreter_type
& 2) {
1491 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1494 reloc_func_desc
= interp_load_addr
;
1496 close(interpreter_fd
);
1497 free(elf_interpreter
);
1499 if (elf_entry
== ~((abi_ulong
)0UL)) {
1500 printf("Unable to load interpreter\n");
1509 if (qemu_log_enabled())
1510 load_symbols(&elf_ex
, bprm
->fd
);
1512 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1513 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1515 #ifdef LOW_ELF_STACK
1516 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1518 bprm
->p
= create_elf_tables(bprm
->p
,
1522 load_addr
, load_bias
,
1524 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1526 info
->load_addr
= reloc_func_desc
;
1527 info
->start_brk
= info
->brk
= elf_brk
;
1528 info
->end_code
= end_code
;
1529 info
->start_code
= start_code
;
1530 info
->start_data
= start_data
;
1531 info
->end_data
= end_data
;
1532 info
->start_stack
= bprm
->p
;
1534 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1536 set_brk(elf_bss
, elf_brk
);
1538 padzero(elf_bss
, elf_brk
);
1541 printf("(start_brk) %x\n" , info
->start_brk
);
1542 printf("(end_code) %x\n" , info
->end_code
);
1543 printf("(start_code) %x\n" , info
->start_code
);
1544 printf("(end_data) %x\n" , info
->end_data
);
1545 printf("(start_stack) %x\n" , info
->start_stack
);
1546 printf("(brk) %x\n" , info
->brk
);
1549 if ( info
->personality
== PER_SVR4
)
1551 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1552 and some applications "depend" upon this behavior.
1553 Since we do not have the power to recompile these, we
1554 emulate the SVr4 behavior. Sigh. */
1555 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1556 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1559 info
->entry
= elf_entry
;
1564 static int load_aout_interp(void * exptr
, int interp_fd
)
1566 printf("a.out interpreter not yet supported\n");
1570 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1572 init_thread(regs
, infop
);