1 /* This is the Linux kernel elf-loading code, ported into user space */
24 /* from personality.h */
27 * Flags for bug emulation.
29 * These occupy the top three bytes.
32 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
33 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
36 MMAP_PAGE_ZERO
= 0x0100000,
37 ADDR_COMPAT_LAYOUT
= 0x0200000,
38 READ_IMPLIES_EXEC
= 0x0400000,
39 ADDR_LIMIT_32BIT
= 0x0800000,
40 SHORT_INODE
= 0x1000000,
41 WHOLE_SECONDS
= 0x2000000,
42 STICKY_TIMEOUTS
= 0x4000000,
43 ADDR_LIMIT_3GB
= 0x8000000,
49 * These go in the low byte. Avoid using the top bit, it will
50 * conflict with error returns.
54 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
55 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
56 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
57 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
58 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
59 WHOLE_SECONDS
| SHORT_INODE
,
60 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
61 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
62 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
64 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
65 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
67 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
68 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
69 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
70 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
72 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
73 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
74 PER_OSF4
= 0x000f, /* OSF/1 v4 */
80 * Return the base personality without flags.
82 #define personality(pers) (pers & PER_MASK)
84 /* this flag is uneffective under linux too, should be deleted */
86 #define MAP_DENYWRITE 0
89 /* should probably go in elf.h */
96 #define ELF_PLATFORM get_elf_platform()
98 static const char *get_elf_platform(void)
100 static char elf_platform
[] = "i386";
101 int family
= (thread_env
->cpuid_version
>> 8) & 0xff;
105 elf_platform
[1] = '0' + family
;
109 #define ELF_HWCAP get_elf_hwcap()
111 static uint32_t get_elf_hwcap(void)
113 return thread_env
->cpuid_features
;
117 #define ELF_START_MMAP 0x2aaaaab000ULL
118 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
120 #define ELF_CLASS ELFCLASS64
121 #define ELF_DATA ELFDATA2LSB
122 #define ELF_ARCH EM_X86_64
124 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
127 regs
->rsp
= infop
->start_stack
;
128 regs
->rip
= infop
->entry
;
129 if (bsd_type
== target_freebsd
) {
130 regs
->rdi
= infop
->start_stack
;
136 #define ELF_START_MMAP 0x80000000
139 * This is used to ensure we don't load something for the wrong architecture.
141 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
144 * These are used to set parameters in the core dumps.
146 #define ELF_CLASS ELFCLASS32
147 #define ELF_DATA ELFDATA2LSB
148 #define ELF_ARCH EM_386
150 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
152 regs
->esp
= infop
->start_stack
;
153 regs
->eip
= infop
->entry
;
155 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
156 starts %edx contains a pointer to a function which might be
157 registered using `atexit'. This provides a mean for the
158 dynamic linker to call DT_FINI functions for shared libraries
159 that have been loaded before the code runs.
161 A value of 0 tells we have no such handler. */
166 #define USE_ELF_CORE_DUMP
167 #define ELF_EXEC_PAGESIZE 4096
173 #define ELF_START_MMAP 0x80000000
175 #define elf_check_arch(x) ( (x) == EM_ARM )
177 #define ELF_CLASS ELFCLASS32
178 #ifdef TARGET_WORDS_BIGENDIAN
179 #define ELF_DATA ELFDATA2MSB
181 #define ELF_DATA ELFDATA2LSB
183 #define ELF_ARCH EM_ARM
185 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
187 abi_long stack
= infop
->start_stack
;
188 memset(regs
, 0, sizeof(*regs
));
189 regs
->ARM_cpsr
= 0x10;
190 if (infop
->entry
& 1)
191 regs
->ARM_cpsr
|= CPSR_T
;
192 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
193 regs
->ARM_sp
= infop
->start_stack
;
194 /* FIXME - what to for failure of get_user()? */
195 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
196 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
197 /* XXX: it seems that r0 is zeroed after ! */
199 /* For uClinux PIC binaries. */
200 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
201 regs
->ARM_r10
= infop
->start_data
;
204 #define USE_ELF_CORE_DUMP
205 #define ELF_EXEC_PAGESIZE 4096
209 ARM_HWCAP_ARM_SWP
= 1 << 0,
210 ARM_HWCAP_ARM_HALF
= 1 << 1,
211 ARM_HWCAP_ARM_THUMB
= 1 << 2,
212 ARM_HWCAP_ARM_26BIT
= 1 << 3,
213 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
214 ARM_HWCAP_ARM_FPA
= 1 << 5,
215 ARM_HWCAP_ARM_VFP
= 1 << 6,
216 ARM_HWCAP_ARM_EDSP
= 1 << 7,
219 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
220 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
221 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
226 #ifdef TARGET_SPARC64
228 #define ELF_START_MMAP 0x80000000
231 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
233 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
236 #define ELF_CLASS ELFCLASS64
237 #define ELF_DATA ELFDATA2MSB
238 #define ELF_ARCH EM_SPARCV9
240 #define STACK_BIAS 2047
242 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
247 regs
->pc
= infop
->entry
;
248 regs
->npc
= regs
->pc
+ 4;
251 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
253 if (personality(infop
->personality
) == PER_LINUX32
)
254 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
256 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
257 if (bsd_type
== target_freebsd
) {
258 regs
->u_regs
[8] = infop
->start_stack
;
259 regs
->u_regs
[11] = infop
->start_stack
;
266 #define ELF_START_MMAP 0x80000000
268 #define elf_check_arch(x) ( (x) == EM_SPARC )
270 #define ELF_CLASS ELFCLASS32
271 #define ELF_DATA ELFDATA2MSB
272 #define ELF_ARCH EM_SPARC
274 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
277 regs
->pc
= infop
->entry
;
278 regs
->npc
= regs
->pc
+ 4;
280 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
288 #define ELF_START_MMAP 0x80000000
290 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
292 #define elf_check_arch(x) ( (x) == EM_PPC64 )
294 #define ELF_CLASS ELFCLASS64
298 #define elf_check_arch(x) ( (x) == EM_PPC )
300 #define ELF_CLASS ELFCLASS32
304 #ifdef TARGET_WORDS_BIGENDIAN
305 #define ELF_DATA ELFDATA2MSB
307 #define ELF_DATA ELFDATA2LSB
309 #define ELF_ARCH EM_PPC
312 * We need to put in some extra aux table entries to tell glibc what
313 * the cache block size is, so it can use the dcbz instruction safely.
315 #define AT_DCACHEBSIZE 19
316 #define AT_ICACHEBSIZE 20
317 #define AT_UCACHEBSIZE 21
318 /* A special ignored type value for PPC, for glibc compatibility. */
319 #define AT_IGNOREPPC 22
321 * The requirements here are:
322 * - keep the final alignment of sp (sp & 0xf)
323 * - make sure the 32-bit value at the first 16 byte aligned position of
324 * AUXV is greater than 16 for glibc compatibility.
325 * AT_IGNOREPPC is used for that.
326 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
327 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
329 #define DLINFO_ARCH_ITEMS 5
330 #define ARCH_DLINFO \
332 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
333 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
334 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
336 * Now handle glibc compatibility. \
338 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
339 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
342 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
344 abi_ulong pos
= infop
->start_stack
;
346 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
347 abi_ulong entry
, toc
;
350 _regs
->gpr
[1] = infop
->start_stack
;
351 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
352 entry
= ldq_raw(infop
->entry
) + infop
->load_addr
;
353 toc
= ldq_raw(infop
->entry
+ 8) + infop
->load_addr
;
355 infop
->entry
= entry
;
357 _regs
->nip
= infop
->entry
;
358 /* Note that isn't exactly what regular kernel does
359 * but this is what the ABI wants and is needed to allow
360 * execution of PPC BSD programs.
362 /* FIXME - what to for failure of get_user()? */
363 get_user_ual(_regs
->gpr
[3], pos
);
364 pos
+= sizeof(abi_ulong
);
366 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(abi_ulong
))
371 #define USE_ELF_CORE_DUMP
372 #define ELF_EXEC_PAGESIZE 4096
378 #define ELF_START_MMAP 0x80000000
380 #define elf_check_arch(x) ( (x) == EM_MIPS )
383 #define ELF_CLASS ELFCLASS64
385 #define ELF_CLASS ELFCLASS32
387 #ifdef TARGET_WORDS_BIGENDIAN
388 #define ELF_DATA ELFDATA2MSB
390 #define ELF_DATA ELFDATA2LSB
392 #define ELF_ARCH EM_MIPS
394 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
396 regs
->cp0_status
= 2 << CP0St_KSU
;
397 regs
->cp0_epc
= infop
->entry
;
398 regs
->regs
[29] = infop
->start_stack
;
401 #define USE_ELF_CORE_DUMP
402 #define ELF_EXEC_PAGESIZE 4096
404 #endif /* TARGET_MIPS */
408 #define ELF_START_MMAP 0x80000000
410 #define elf_check_arch(x) ( (x) == EM_SH )
412 #define ELF_CLASS ELFCLASS32
413 #define ELF_DATA ELFDATA2LSB
414 #define ELF_ARCH EM_SH
416 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
418 /* Check other registers XXXXX */
419 regs
->pc
= infop
->entry
;
420 regs
->regs
[15] = infop
->start_stack
;
423 #define USE_ELF_CORE_DUMP
424 #define ELF_EXEC_PAGESIZE 4096
430 #define ELF_START_MMAP 0x80000000
432 #define elf_check_arch(x) ( (x) == EM_CRIS )
434 #define ELF_CLASS ELFCLASS32
435 #define ELF_DATA ELFDATA2LSB
436 #define ELF_ARCH EM_CRIS
438 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
440 regs
->erp
= infop
->entry
;
443 #define USE_ELF_CORE_DUMP
444 #define ELF_EXEC_PAGESIZE 8192
450 #define ELF_START_MMAP 0x80000000
452 #define elf_check_arch(x) ( (x) == EM_68K )
454 #define ELF_CLASS ELFCLASS32
455 #define ELF_DATA ELFDATA2MSB
456 #define ELF_ARCH EM_68K
458 /* ??? Does this need to do anything?
459 #define ELF_PLAT_INIT(_r) */
461 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
463 regs
->usp
= infop
->start_stack
;
465 regs
->pc
= infop
->entry
;
468 #define USE_ELF_CORE_DUMP
469 #define ELF_EXEC_PAGESIZE 8192
475 #define ELF_START_MMAP (0x30000000000ULL)
477 #define elf_check_arch(x) ( (x) == ELF_ARCH )
479 #define ELF_CLASS ELFCLASS64
480 #define ELF_DATA ELFDATA2MSB
481 #define ELF_ARCH EM_ALPHA
483 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
485 regs
->pc
= infop
->entry
;
487 regs
->usp
= infop
->start_stack
;
488 regs
->unique
= infop
->start_data
; /* ? */
489 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
490 regs
->unique
, infop
->start_data
);
493 #define USE_ELF_CORE_DUMP
494 #define ELF_EXEC_PAGESIZE 8192
496 #endif /* TARGET_ALPHA */
499 #define ELF_PLATFORM (NULL)
508 #define ELF_CLASS ELFCLASS32
510 #define bswaptls(ptr) bswap32s(ptr)
517 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
518 unsigned int a_text
; /* length of text, in bytes */
519 unsigned int a_data
; /* length of data, in bytes */
520 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
521 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
522 unsigned int a_entry
; /* start address */
523 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
524 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
528 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
534 /* max code+data+bss space allocated to elf interpreter */
535 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
537 /* max code+data+bss+brk space allocated to ET_DYN executables */
538 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
540 /* Necessary parameters */
541 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
542 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
543 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
545 #define INTERPRETER_NONE 0
546 #define INTERPRETER_AOUT 1
547 #define INTERPRETER_ELF 2
549 #define DLINFO_ITEMS 12
551 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
556 static int load_aout_interp(void * exptr
, int interp_fd
);
559 static void bswap_ehdr(struct elfhdr
*ehdr
)
561 bswap16s(&ehdr
->e_type
); /* Object file type */
562 bswap16s(&ehdr
->e_machine
); /* Architecture */
563 bswap32s(&ehdr
->e_version
); /* Object file version */
564 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
565 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
566 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
567 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
568 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
569 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
570 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
571 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
572 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
573 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
576 static void bswap_phdr(struct elf_phdr
*phdr
)
578 bswap32s(&phdr
->p_type
); /* Segment type */
579 bswaptls(&phdr
->p_offset
); /* Segment file offset */
580 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
581 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
582 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
583 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
584 bswap32s(&phdr
->p_flags
); /* Segment flags */
585 bswaptls(&phdr
->p_align
); /* Segment alignment */
588 static void bswap_shdr(struct elf_shdr
*shdr
)
590 bswap32s(&shdr
->sh_name
);
591 bswap32s(&shdr
->sh_type
);
592 bswaptls(&shdr
->sh_flags
);
593 bswaptls(&shdr
->sh_addr
);
594 bswaptls(&shdr
->sh_offset
);
595 bswaptls(&shdr
->sh_size
);
596 bswap32s(&shdr
->sh_link
);
597 bswap32s(&shdr
->sh_info
);
598 bswaptls(&shdr
->sh_addralign
);
599 bswaptls(&shdr
->sh_entsize
);
602 static void bswap_sym(struct elf_sym
*sym
)
604 bswap32s(&sym
->st_name
);
605 bswaptls(&sym
->st_value
);
606 bswaptls(&sym
->st_size
);
607 bswap16s(&sym
->st_shndx
);
612 * 'copy_elf_strings()' copies argument/envelope strings from user
613 * memory to free pages in kernel mem. These are in a format ready
614 * to be put directly into the top of new user memory.
617 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
620 char *tmp
, *tmp1
, *pag
= NULL
;
624 return 0; /* bullet-proofing */
629 fprintf(stderr
, "VFS: argc is wrong");
635 if (p
< len
) { /* this shouldn't happen - 128kB */
641 offset
= p
% TARGET_PAGE_SIZE
;
642 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
644 pag
= (char *)malloc(TARGET_PAGE_SIZE
);
645 memset(pag
, 0, TARGET_PAGE_SIZE
);
646 page
[p
/TARGET_PAGE_SIZE
] = pag
;
651 if (len
== 0 || offset
== 0) {
652 *(pag
+ offset
) = *tmp
;
655 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
656 tmp
-= bytes_to_copy
;
658 offset
-= bytes_to_copy
;
659 len
-= bytes_to_copy
;
660 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
667 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
668 struct image_info
*info
)
670 abi_ulong stack_base
, size
, error
;
673 /* Create enough stack to hold everything. If we don't use
674 * it for args, we'll use it for something else...
676 size
= x86_stack_size
;
677 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
678 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
679 error
= target_mmap(0,
680 size
+ qemu_host_page_size
,
681 PROT_READ
| PROT_WRITE
,
682 MAP_PRIVATE
| MAP_ANON
,
688 /* we reserve one extra page at the top of the stack as guard */
689 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
691 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
694 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
697 /* FIXME - check return value of memcpy_to_target() for failure */
698 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
701 stack_base
+= TARGET_PAGE_SIZE
;
706 static void set_brk(abi_ulong start
, abi_ulong end
)
708 /* page-align the start and end addresses... */
709 start
= HOST_PAGE_ALIGN(start
);
710 end
= HOST_PAGE_ALIGN(end
);
713 if(target_mmap(start
, end
- start
,
714 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
715 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
716 perror("cannot mmap brk");
722 /* We need to explicitly zero any fractional pages after the data
723 section (i.e. bss). This would contain the junk from the file that
724 should not be in memory. */
725 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
729 if (elf_bss
>= last_bss
)
732 /* XXX: this is really a hack : if the real host page size is
733 smaller than the target page size, some pages after the end
734 of the file may not be mapped. A better fix would be to
735 patch target_mmap(), but it is more complicated as the file
736 size must be known */
737 if (qemu_real_host_page_size
< qemu_host_page_size
) {
738 abi_ulong end_addr
, end_addr1
;
739 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
740 ~(qemu_real_host_page_size
- 1);
741 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
742 if (end_addr1
< end_addr
) {
743 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
744 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
745 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
749 nbyte
= elf_bss
& (qemu_host_page_size
-1);
751 nbyte
= qemu_host_page_size
- nbyte
;
753 /* FIXME - what to do if put_user() fails? */
754 put_user_u8(0, elf_bss
);
761 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
762 struct elfhdr
* exec
,
765 abi_ulong interp_load_addr
, int ibcs
,
766 struct image_info
*info
)
770 abi_ulong u_platform
;
771 const char *k_platform
;
772 const int n
= sizeof(elf_addr_t
);
776 k_platform
= ELF_PLATFORM
;
778 size_t len
= strlen(k_platform
) + 1;
779 sp
-= (len
+ n
- 1) & ~(n
- 1);
781 /* FIXME - check return value of memcpy_to_target() for failure */
782 memcpy_to_target(sp
, k_platform
, len
);
785 * Force 16 byte _final_ alignment here for generality.
787 sp
= sp
&~ (abi_ulong
)15;
788 size
= (DLINFO_ITEMS
+ 1) * 2;
791 #ifdef DLINFO_ARCH_ITEMS
792 size
+= DLINFO_ARCH_ITEMS
* 2;
794 size
+= envc
+ argc
+ 2;
795 size
+= (!ibcs
? 3 : 1); /* argc itself */
798 sp
-= 16 - (size
& 15);
800 /* This is correct because Linux defines
801 * elf_addr_t as Elf32_Off / Elf64_Off
803 #define NEW_AUX_ENT(id, val) do { \
804 sp -= n; put_user_ual(val, sp); \
805 sp -= n; put_user_ual(id, sp); \
808 NEW_AUX_ENT (AT_NULL
, 0);
810 /* There must be exactly DLINFO_ITEMS entries here. */
811 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
812 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
813 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
814 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
815 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
816 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
817 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
818 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
819 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
820 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
821 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
822 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
823 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
825 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
828 * ARCH_DLINFO must come last so platform specific code can enforce
829 * special alignment requirements on the AUXV if necessary (eg. PPC).
835 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
840 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
842 abi_ulong
*interp_load_addr
)
844 struct elf_phdr
*elf_phdata
= NULL
;
845 struct elf_phdr
*eppnt
;
846 abi_ulong load_addr
= 0;
847 int load_addr_set
= 0;
849 abi_ulong last_bss
, elf_bss
;
858 bswap_ehdr(interp_elf_ex
);
860 /* First of all, some simple consistency checks */
861 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
862 interp_elf_ex
->e_type
!= ET_DYN
) ||
863 !elf_check_arch(interp_elf_ex
->e_machine
)) {
864 return ~((abi_ulong
)0UL);
868 /* Now read in all of the header information */
870 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
871 return ~(abi_ulong
)0UL;
873 elf_phdata
= (struct elf_phdr
*)
874 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
877 return ~((abi_ulong
)0UL);
880 * If the size of this structure has changed, then punt, since
881 * we will be doing the wrong thing.
883 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
885 return ~((abi_ulong
)0UL);
888 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
890 retval
= read(interpreter_fd
,
892 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
895 perror("load_elf_interp");
902 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
907 if (interp_elf_ex
->e_type
== ET_DYN
) {
908 /* in order to avoid hardcoding the interpreter load
909 address in qemu, we allocate a big enough memory zone */
910 error
= target_mmap(0, INTERP_MAP_SIZE
,
911 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
922 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
923 if (eppnt
->p_type
== PT_LOAD
) {
924 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
929 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
930 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
931 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
932 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
933 elf_type
|= MAP_FIXED
;
934 vaddr
= eppnt
->p_vaddr
;
936 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
937 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
941 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
945 close(interpreter_fd
);
947 return ~((abi_ulong
)0UL);
950 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
956 * Find the end of the file mapping for this phdr, and keep
957 * track of the largest address we see for this.
959 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
960 if (k
> elf_bss
) elf_bss
= k
;
963 * Do the same thing for the memory mapping - between
964 * elf_bss and last_bss is the bss section.
966 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
967 if (k
> last_bss
) last_bss
= k
;
970 /* Now use mmap to map the library into memory. */
972 close(interpreter_fd
);
975 * Now fill out the bss section. First pad the last page up
976 * to the page boundary, and then perform a mmap to make sure
977 * that there are zeromapped pages up to and including the last
980 padzero(elf_bss
, last_bss
);
981 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
983 /* Map the last of the bss segment */
984 if (last_bss
> elf_bss
) {
985 target_mmap(elf_bss
, last_bss
-elf_bss
,
986 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
987 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
991 *interp_load_addr
= load_addr
;
992 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
995 static int symfind(const void *s0
, const void *s1
)
997 struct elf_sym
*key
= (struct elf_sym
*)s0
;
998 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
1000 if (key
->st_value
< sym
->st_value
) {
1002 } else if (key
->st_value
> sym
->st_value
+ sym
->st_size
) {
1008 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
1010 #if ELF_CLASS == ELFCLASS32
1011 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
1013 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
1018 struct elf_sym
*sym
;
1020 key
.st_value
= orig_addr
;
1022 sym
= bsearch(&key
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
1024 return s
->disas_strtab
+ sym
->st_name
;
1030 /* FIXME: This should use elf_ops.h */
1031 static int symcmp(const void *s0
, const void *s1
)
1033 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
1034 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
1035 return (sym0
->st_value
< sym1
->st_value
)
1037 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
1040 /* Best attempt to load symbols from this ELF object. */
1041 static void load_symbols(struct elfhdr
*hdr
, int fd
)
1043 unsigned int i
, nsyms
;
1044 struct elf_shdr sechdr
, symtab
, strtab
;
1047 struct elf_sym
*syms
, *new_syms
;
1049 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
1050 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1051 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1054 bswap_shdr(&sechdr
);
1056 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1058 lseek(fd
, hdr
->e_shoff
1059 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1060 if (read(fd
, &strtab
, sizeof(strtab
))
1064 bswap_shdr(&strtab
);
1069 return; /* Shouldn't happen... */
1072 /* Now know where the strtab and symtab are. Snarf them. */
1073 s
= malloc(sizeof(*s
));
1074 syms
= malloc(symtab
.sh_size
);
1079 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1080 if (!s
->disas_strtab
) {
1086 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1087 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
) {
1094 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1099 bswap_sym(syms
+ i
);
1101 // Throw away entries which we do not need.
1102 if (syms
[i
].st_shndx
== SHN_UNDEF
||
1103 syms
[i
].st_shndx
>= SHN_LORESERVE
||
1104 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
1107 syms
[i
] = syms
[nsyms
];
1111 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1112 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1113 syms
[i
].st_value
&= ~(target_ulong
)1;
1118 /* Attempt to free the storage associated with the local symbols
1119 that we threw away. Whether or not this has any effect on the
1120 memory allocation depends on the malloc implementation and how
1121 many symbols we managed to discard. */
1122 new_syms
= realloc(syms
, nsyms
* sizeof(*syms
));
1123 if (new_syms
== NULL
) {
1131 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
1133 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1134 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
) {
1140 s
->disas_num_syms
= nsyms
;
1141 #if ELF_CLASS == ELFCLASS32
1142 s
->disas_symtab
.elf32
= syms
;
1143 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1145 s
->disas_symtab
.elf64
= syms
;
1146 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1152 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1153 struct image_info
* info
)
1155 struct elfhdr elf_ex
;
1156 struct elfhdr interp_elf_ex
;
1157 struct exec interp_ex
;
1158 int interpreter_fd
= -1; /* avoid warning */
1159 abi_ulong load_addr
, load_bias
;
1160 int load_addr_set
= 0;
1161 unsigned int interpreter_type
= INTERPRETER_NONE
;
1162 unsigned char ibcs2_interpreter
;
1164 abi_ulong mapped_addr
;
1165 struct elf_phdr
* elf_ppnt
;
1166 struct elf_phdr
*elf_phdata
;
1167 abi_ulong elf_bss
, k
, elf_brk
;
1169 char * elf_interpreter
;
1170 abi_ulong elf_entry
, interp_load_addr
= 0;
1172 abi_ulong start_code
, end_code
, start_data
, end_data
;
1173 abi_ulong reloc_func_desc
= 0;
1174 abi_ulong elf_stack
;
1175 char passed_fileno
[6];
1177 ibcs2_interpreter
= 0;
1181 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1183 bswap_ehdr(&elf_ex
);
1186 /* First of all, some simple consistency checks */
1187 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1188 (! elf_check_arch(elf_ex
.e_machine
))) {
1192 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1193 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1194 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1199 /* Now read in all of the header information */
1200 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1201 if (elf_phdata
== NULL
) {
1205 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1207 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1208 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1212 perror("load_elf_binary");
1219 elf_ppnt
= elf_phdata
;
1220 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1221 bswap_phdr(elf_ppnt
);
1224 elf_ppnt
= elf_phdata
;
1230 elf_stack
= ~((abi_ulong
)0UL);
1231 elf_interpreter
= NULL
;
1232 start_code
= ~((abi_ulong
)0UL);
1236 interp_ex
.a_info
= 0;
1238 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1239 if (elf_ppnt
->p_type
== PT_INTERP
) {
1240 if ( elf_interpreter
!= NULL
)
1243 free(elf_interpreter
);
1248 /* This is the program interpreter used for
1249 * shared libraries - for now assume that this
1250 * is an a.out format binary
1253 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1255 if (elf_interpreter
== NULL
) {
1261 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1263 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1266 perror("load_elf_binary2");
1270 /* If the program interpreter is one of these two,
1271 then assume an iBCS2 image. Otherwise assume
1272 a native linux image. */
1274 /* JRP - Need to add X86 lib dir stuff here... */
1276 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1277 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1278 ibcs2_interpreter
= 1;
1282 printf("Using ELF interpreter %s\n", path(elf_interpreter
));
1285 retval
= open(path(elf_interpreter
), O_RDONLY
);
1287 interpreter_fd
= retval
;
1290 perror(elf_interpreter
);
1292 /* retval = -errno; */
1297 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1299 retval
= read(interpreter_fd
,bprm
->buf
,128);
1303 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1304 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1307 perror("load_elf_binary3");
1310 free(elf_interpreter
);
1318 /* Some simple consistency checks for the interpreter */
1319 if (elf_interpreter
){
1320 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1322 /* Now figure out which format our binary is */
1323 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1324 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1325 interpreter_type
= INTERPRETER_ELF
;
1328 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1329 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1330 interpreter_type
&= ~INTERPRETER_ELF
;
1333 if (!interpreter_type
) {
1334 free(elf_interpreter
);
1341 /* OK, we are done with that, now set up the arg stuff,
1342 and then start this sucker up */
1347 if (interpreter_type
== INTERPRETER_AOUT
) {
1348 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1349 passed_p
= passed_fileno
;
1351 if (elf_interpreter
) {
1352 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1357 if (elf_interpreter
) {
1358 free(elf_interpreter
);
1366 /* OK, This is the point of no return */
1369 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1371 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1373 #if defined(CONFIG_USE_GUEST_BASE)
1375 * In case where user has not explicitly set the guest_base, we
1376 * probe here that should we set it automatically.
1378 if (!have_guest_base
) {
1380 * Go through ELF program header table and find out whether
1381 * any of the segments drop below our current mmap_min_addr and
1382 * in that case set guest_base to corresponding address.
1384 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
;
1386 if (elf_ppnt
->p_type
!= PT_LOAD
)
1388 if (HOST_PAGE_ALIGN(elf_ppnt
->p_vaddr
) < mmap_min_addr
) {
1389 guest_base
= HOST_PAGE_ALIGN(mmap_min_addr
);
1394 #endif /* CONFIG_USE_GUEST_BASE */
1396 /* Do this so that we can load the interpreter, if need be. We will
1397 change some of these later */
1399 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1400 info
->start_stack
= bprm
->p
;
1402 /* Now we do a little grungy work by mmaping the ELF image into
1403 * the correct location in memory. At this point, we assume that
1404 * the image should be loaded at fixed address, not at a variable
1408 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1413 if (elf_ppnt
->p_type
!= PT_LOAD
)
1416 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1417 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1418 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1419 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1420 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1421 elf_flags
|= MAP_FIXED
;
1422 } else if (elf_ex
.e_type
== ET_DYN
) {
1423 /* Try and get dynamic programs out of the way of the default mmap
1424 base, as well as whatever program they might try to exec. This
1425 is because the brk will follow the loader, and is not movable. */
1426 /* NOTE: for qemu, we do a big mmap to get enough space
1427 without hardcoding any address */
1428 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1429 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1435 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1438 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1439 (elf_ppnt
->p_filesz
+
1440 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1442 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1444 (elf_ppnt
->p_offset
-
1445 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1451 #ifdef LOW_ELF_STACK
1452 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1453 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1456 if (!load_addr_set
) {
1458 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1459 if (elf_ex
.e_type
== ET_DYN
) {
1460 load_bias
+= error
-
1461 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1462 load_addr
+= load_bias
;
1463 reloc_func_desc
= load_bias
;
1466 k
= elf_ppnt
->p_vaddr
;
1471 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1474 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1478 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1479 if (k
> elf_brk
) elf_brk
= k
;
1482 elf_entry
+= load_bias
;
1483 elf_bss
+= load_bias
;
1484 elf_brk
+= load_bias
;
1485 start_code
+= load_bias
;
1486 end_code
+= load_bias
;
1487 start_data
+= load_bias
;
1488 end_data
+= load_bias
;
1490 if (elf_interpreter
) {
1491 if (interpreter_type
& 1) {
1492 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1494 else if (interpreter_type
& 2) {
1495 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1498 reloc_func_desc
= interp_load_addr
;
1500 close(interpreter_fd
);
1501 free(elf_interpreter
);
1503 if (elf_entry
== ~((abi_ulong
)0UL)) {
1504 printf("Unable to load interpreter\n");
1513 if (qemu_log_enabled())
1514 load_symbols(&elf_ex
, bprm
->fd
);
1516 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1517 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1519 #ifdef LOW_ELF_STACK
1520 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1522 bprm
->p
= create_elf_tables(bprm
->p
,
1526 load_addr
, load_bias
,
1528 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1530 info
->load_addr
= reloc_func_desc
;
1531 info
->start_brk
= info
->brk
= elf_brk
;
1532 info
->end_code
= end_code
;
1533 info
->start_code
= start_code
;
1534 info
->start_data
= start_data
;
1535 info
->end_data
= end_data
;
1536 info
->start_stack
= bprm
->p
;
1538 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1540 set_brk(elf_bss
, elf_brk
);
1542 padzero(elf_bss
, elf_brk
);
1545 printf("(start_brk) %x\n" , info
->start_brk
);
1546 printf("(end_code) %x\n" , info
->end_code
);
1547 printf("(start_code) %x\n" , info
->start_code
);
1548 printf("(end_data) %x\n" , info
->end_data
);
1549 printf("(start_stack) %x\n" , info
->start_stack
);
1550 printf("(brk) %x\n" , info
->brk
);
1553 if ( info
->personality
== PER_SVR4
)
1555 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1556 and some applications "depend" upon this behavior.
1557 Since we do not have the power to recompile these, we
1558 emulate the SVr4 behavior. Sigh. */
1559 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1560 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1563 info
->entry
= elf_entry
;
1568 static int load_aout_interp(void * exptr
, int interp_fd
)
1570 printf("a.out interpreter not yet supported\n");
1574 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1576 init_thread(regs
, infop
);