1 /* This is the Linux kernel elf-loading code, ported into user space */
3 #include "qemu/osdep.h"
7 #include "disas/disas.h"
19 /* from personality.h */
22 * Flags for bug emulation.
24 * These occupy the top three bytes.
27 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
28 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
31 MMAP_PAGE_ZERO
= 0x0100000,
32 ADDR_COMPAT_LAYOUT
= 0x0200000,
33 READ_IMPLIES_EXEC
= 0x0400000,
34 ADDR_LIMIT_32BIT
= 0x0800000,
35 SHORT_INODE
= 0x1000000,
36 WHOLE_SECONDS
= 0x2000000,
37 STICKY_TIMEOUTS
= 0x4000000,
38 ADDR_LIMIT_3GB
= 0x8000000,
44 * These go in the low byte. Avoid using the top bit, it will
45 * conflict with error returns.
49 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
50 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
51 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
52 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
53 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
54 WHOLE_SECONDS
| SHORT_INODE
,
55 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
56 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
57 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
59 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
60 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
62 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
63 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
64 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
65 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
67 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
68 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
69 PER_OSF4
= 0x000f, /* OSF/1 v4 */
75 * Return the base personality without flags.
77 #define personality(pers) (pers & PER_MASK)
79 /* this flag is uneffective under linux too, should be deleted */
81 #define MAP_DENYWRITE 0
84 /* should probably go in elf.h */
91 #define ELF_PLATFORM get_elf_platform()
93 static const char *get_elf_platform(void)
95 static char elf_platform
[] = "i386";
96 int family
= object_property_get_int(OBJECT(thread_cpu
), "family", NULL
);
100 elf_platform
[1] = '0' + family
;
104 #define ELF_HWCAP get_elf_hwcap()
106 static uint32_t get_elf_hwcap(void)
108 X86CPU
*cpu
= X86_CPU(thread_cpu
);
110 return cpu
->env
.features
[FEAT_1_EDX
];
114 #define ELF_START_MMAP 0x2aaaaab000ULL
115 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
117 #define ELF_CLASS ELFCLASS64
118 #define ELF_DATA ELFDATA2LSB
119 #define ELF_ARCH EM_X86_64
121 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
124 regs
->rsp
= infop
->start_stack
;
125 regs
->rip
= infop
->entry
;
126 if (bsd_type
== target_freebsd
) {
127 regs
->rdi
= infop
->start_stack
;
133 #define ELF_START_MMAP 0x80000000
136 * This is used to ensure we don't load something for the wrong architecture.
138 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
141 * These are used to set parameters in the core dumps.
143 #define ELF_CLASS ELFCLASS32
144 #define ELF_DATA ELFDATA2LSB
145 #define ELF_ARCH EM_386
147 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
149 regs
->esp
= infop
->start_stack
;
150 regs
->eip
= infop
->entry
;
152 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
153 starts %edx contains a pointer to a function which might be
154 registered using `atexit'. This provides a mean for the
155 dynamic linker to call DT_FINI functions for shared libraries
156 that have been loaded before the code runs.
158 A value of 0 tells we have no such handler. */
163 #define USE_ELF_CORE_DUMP
164 #define ELF_EXEC_PAGESIZE 4096
170 #define ELF_START_MMAP 0x80000000
172 #define elf_check_arch(x) ( (x) == EM_ARM )
174 #define ELF_CLASS ELFCLASS32
175 #ifdef TARGET_WORDS_BIGENDIAN
176 #define ELF_DATA ELFDATA2MSB
178 #define ELF_DATA ELFDATA2LSB
180 #define ELF_ARCH EM_ARM
182 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
184 abi_long stack
= infop
->start_stack
;
185 memset(regs
, 0, sizeof(*regs
));
186 regs
->ARM_cpsr
= 0x10;
187 if (infop
->entry
& 1)
188 regs
->ARM_cpsr
|= CPSR_T
;
189 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
190 regs
->ARM_sp
= infop
->start_stack
;
191 /* FIXME - what to for failure of get_user()? */
192 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
193 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
194 /* XXX: it seems that r0 is zeroed after ! */
196 /* For uClinux PIC binaries. */
197 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
198 regs
->ARM_r10
= infop
->start_data
;
201 #define USE_ELF_CORE_DUMP
202 #define ELF_EXEC_PAGESIZE 4096
206 ARM_HWCAP_ARM_SWP
= 1 << 0,
207 ARM_HWCAP_ARM_HALF
= 1 << 1,
208 ARM_HWCAP_ARM_THUMB
= 1 << 2,
209 ARM_HWCAP_ARM_26BIT
= 1 << 3,
210 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
211 ARM_HWCAP_ARM_FPA
= 1 << 5,
212 ARM_HWCAP_ARM_VFP
= 1 << 6,
213 ARM_HWCAP_ARM_EDSP
= 1 << 7,
216 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
217 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
218 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
223 #ifdef TARGET_SPARC64
225 #define ELF_START_MMAP 0x80000000
228 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
230 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
233 #define ELF_CLASS ELFCLASS64
234 #define ELF_DATA ELFDATA2MSB
235 #define ELF_ARCH EM_SPARCV9
237 #define STACK_BIAS 2047
239 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
244 regs
->pc
= infop
->entry
;
245 regs
->npc
= regs
->pc
+ 4;
248 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
250 if (personality(infop
->personality
) == PER_LINUX32
)
251 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
253 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
254 if (bsd_type
== target_freebsd
) {
255 regs
->u_regs
[8] = infop
->start_stack
;
256 regs
->u_regs
[11] = infop
->start_stack
;
263 #define ELF_START_MMAP 0x80000000
265 #define elf_check_arch(x) ( (x) == EM_SPARC )
267 #define ELF_CLASS ELFCLASS32
268 #define ELF_DATA ELFDATA2MSB
269 #define ELF_ARCH EM_SPARC
271 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
274 regs
->pc
= infop
->entry
;
275 regs
->npc
= regs
->pc
+ 4;
277 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
285 #define ELF_START_MMAP 0x80000000
287 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
289 #define elf_check_arch(x) ( (x) == EM_PPC64 )
291 #define ELF_CLASS ELFCLASS64
295 #define elf_check_arch(x) ( (x) == EM_PPC )
297 #define ELF_CLASS ELFCLASS32
301 #ifdef TARGET_WORDS_BIGENDIAN
302 #define ELF_DATA ELFDATA2MSB
304 #define ELF_DATA ELFDATA2LSB
306 #define ELF_ARCH EM_PPC
309 * We need to put in some extra aux table entries to tell glibc what
310 * the cache block size is, so it can use the dcbz instruction safely.
312 #define AT_DCACHEBSIZE 19
313 #define AT_ICACHEBSIZE 20
314 #define AT_UCACHEBSIZE 21
315 /* A special ignored type value for PPC, for glibc compatibility. */
316 #define AT_IGNOREPPC 22
318 * The requirements here are:
319 * - keep the final alignment of sp (sp & 0xf)
320 * - make sure the 32-bit value at the first 16 byte aligned position of
321 * AUXV is greater than 16 for glibc compatibility.
322 * AT_IGNOREPPC is used for that.
323 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
324 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
326 #define DLINFO_ARCH_ITEMS 5
327 #define ARCH_DLINFO \
329 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
330 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
331 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
333 * Now handle glibc compatibility. \
335 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
336 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
339 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
341 abi_ulong pos
= infop
->start_stack
;
343 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
344 abi_ulong entry
, toc
;
347 _regs
->gpr
[1] = infop
->start_stack
;
348 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
349 get_user_u64(entry
, infop
->entry
);
350 entry
+= infop
->load_addr
;
351 get_user_u64(toc
, infop
->entry
+ 8);
352 toc
+= infop
->load_addr
;
354 infop
->entry
= entry
;
356 _regs
->nip
= infop
->entry
;
357 /* Note that isn't exactly what regular kernel does
358 * but this is what the ABI wants and is needed to allow
359 * execution of PPC BSD programs.
361 /* FIXME - what to for failure of get_user()? */
362 get_user_ual(_regs
->gpr
[3], pos
);
363 pos
+= sizeof(abi_ulong
);
365 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(abi_ulong
)) {
366 get_user_ual(tmp
, pos
);
371 #define USE_ELF_CORE_DUMP
372 #define ELF_EXEC_PAGESIZE 4096
378 #define ELF_START_MMAP 0x80000000
380 #define elf_check_arch(x) ( (x) == EM_MIPS )
383 #define ELF_CLASS ELFCLASS64
385 #define ELF_CLASS ELFCLASS32
387 #ifdef TARGET_WORDS_BIGENDIAN
388 #define ELF_DATA ELFDATA2MSB
390 #define ELF_DATA ELFDATA2LSB
392 #define ELF_ARCH EM_MIPS
394 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
396 regs
->cp0_status
= 2 << CP0St_KSU
;
397 regs
->cp0_epc
= infop
->entry
;
398 regs
->regs
[29] = infop
->start_stack
;
401 #define USE_ELF_CORE_DUMP
402 #define ELF_EXEC_PAGESIZE 4096
404 #endif /* TARGET_MIPS */
408 #define ELF_START_MMAP 0x80000000
410 #define elf_check_arch(x) ( (x) == EM_SH )
412 #define ELF_CLASS ELFCLASS32
413 #define ELF_DATA ELFDATA2LSB
414 #define ELF_ARCH EM_SH
416 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
418 /* Check other registers XXXXX */
419 regs
->pc
= infop
->entry
;
420 regs
->regs
[15] = infop
->start_stack
;
423 #define USE_ELF_CORE_DUMP
424 #define ELF_EXEC_PAGESIZE 4096
430 #define ELF_START_MMAP 0x80000000
432 #define elf_check_arch(x) ( (x) == EM_CRIS )
434 #define ELF_CLASS ELFCLASS32
435 #define ELF_DATA ELFDATA2LSB
436 #define ELF_ARCH EM_CRIS
438 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
440 regs
->erp
= infop
->entry
;
443 #define USE_ELF_CORE_DUMP
444 #define ELF_EXEC_PAGESIZE 8192
450 #define ELF_START_MMAP 0x80000000
452 #define elf_check_arch(x) ( (x) == EM_68K )
454 #define ELF_CLASS ELFCLASS32
455 #define ELF_DATA ELFDATA2MSB
456 #define ELF_ARCH EM_68K
458 /* ??? Does this need to do anything?
459 #define ELF_PLAT_INIT(_r) */
461 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
463 regs
->usp
= infop
->start_stack
;
465 regs
->pc
= infop
->entry
;
468 #define USE_ELF_CORE_DUMP
469 #define ELF_EXEC_PAGESIZE 8192
475 #define ELF_START_MMAP (0x30000000000ULL)
477 #define elf_check_arch(x) ( (x) == ELF_ARCH )
479 #define ELF_CLASS ELFCLASS64
480 #define ELF_DATA ELFDATA2MSB
481 #define ELF_ARCH EM_ALPHA
483 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
485 regs
->pc
= infop
->entry
;
487 regs
->usp
= infop
->start_stack
;
488 regs
->unique
= infop
->start_data
; /* ? */
489 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
490 regs
->unique
, infop
->start_data
);
493 #define USE_ELF_CORE_DUMP
494 #define ELF_EXEC_PAGESIZE 8192
496 #endif /* TARGET_ALPHA */
499 #define ELF_PLATFORM (NULL)
508 #define ELF_CLASS ELFCLASS32
510 #define bswaptls(ptr) bswap32s(ptr)
517 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
518 unsigned int a_text
; /* length of text, in bytes */
519 unsigned int a_data
; /* length of data, in bytes */
520 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
521 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
522 unsigned int a_entry
; /* start address */
523 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
524 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
528 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
534 /* max code+data+bss space allocated to elf interpreter */
535 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
537 /* max code+data+bss+brk space allocated to ET_DYN executables */
538 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
540 /* Necessary parameters */
541 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
542 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
543 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
545 #define INTERPRETER_NONE 0
546 #define INTERPRETER_AOUT 1
547 #define INTERPRETER_ELF 2
549 #define DLINFO_ITEMS 12
551 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
556 static int load_aout_interp(void * exptr
, int interp_fd
);
559 static void bswap_ehdr(struct elfhdr
*ehdr
)
561 bswap16s(&ehdr
->e_type
); /* Object file type */
562 bswap16s(&ehdr
->e_machine
); /* Architecture */
563 bswap32s(&ehdr
->e_version
); /* Object file version */
564 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
565 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
566 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
567 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
568 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
569 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
570 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
571 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
572 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
573 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
576 static void bswap_phdr(struct elf_phdr
*phdr
)
578 bswap32s(&phdr
->p_type
); /* Segment type */
579 bswaptls(&phdr
->p_offset
); /* Segment file offset */
580 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
581 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
582 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
583 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
584 bswap32s(&phdr
->p_flags
); /* Segment flags */
585 bswaptls(&phdr
->p_align
); /* Segment alignment */
588 static void bswap_shdr(struct elf_shdr
*shdr
)
590 bswap32s(&shdr
->sh_name
);
591 bswap32s(&shdr
->sh_type
);
592 bswaptls(&shdr
->sh_flags
);
593 bswaptls(&shdr
->sh_addr
);
594 bswaptls(&shdr
->sh_offset
);
595 bswaptls(&shdr
->sh_size
);
596 bswap32s(&shdr
->sh_link
);
597 bswap32s(&shdr
->sh_info
);
598 bswaptls(&shdr
->sh_addralign
);
599 bswaptls(&shdr
->sh_entsize
);
602 static void bswap_sym(struct elf_sym
*sym
)
604 bswap32s(&sym
->st_name
);
605 bswaptls(&sym
->st_value
);
606 bswaptls(&sym
->st_size
);
607 bswap16s(&sym
->st_shndx
);
612 * 'copy_elf_strings()' copies argument/envelope strings from user
613 * memory to free pages in kernel mem. These are in a format ready
614 * to be put directly into the top of new user memory.
617 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
620 char *tmp
, *tmp1
, *pag
= NULL
;
624 return 0; /* bullet-proofing */
629 fprintf(stderr
, "VFS: argc is wrong");
635 if (p
< len
) { /* this shouldn't happen - 128kB */
641 offset
= p
% TARGET_PAGE_SIZE
;
642 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
644 pag
= g_try_malloc0(TARGET_PAGE_SIZE
);
645 page
[p
/TARGET_PAGE_SIZE
] = pag
;
650 if (len
== 0 || offset
== 0) {
651 *(pag
+ offset
) = *tmp
;
654 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
655 tmp
-= bytes_to_copy
;
657 offset
-= bytes_to_copy
;
658 len
-= bytes_to_copy
;
659 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
666 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
667 struct image_info
*info
)
669 abi_ulong stack_base
, size
, error
;
672 /* Create enough stack to hold everything. If we don't use
673 * it for args, we'll use it for something else...
675 size
= x86_stack_size
;
676 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
677 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
678 error
= target_mmap(0,
679 size
+ qemu_host_page_size
,
680 PROT_READ
| PROT_WRITE
,
681 MAP_PRIVATE
| MAP_ANON
,
687 /* we reserve one extra page at the top of the stack as guard */
688 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
690 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
693 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
696 /* FIXME - check return value of memcpy_to_target() for failure */
697 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
698 g_free(bprm
->page
[i
]);
700 stack_base
+= TARGET_PAGE_SIZE
;
705 static void set_brk(abi_ulong start
, abi_ulong end
)
707 /* page-align the start and end addresses... */
708 start
= HOST_PAGE_ALIGN(start
);
709 end
= HOST_PAGE_ALIGN(end
);
712 if(target_mmap(start
, end
- start
,
713 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
714 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
715 perror("cannot mmap brk");
721 /* We need to explicitly zero any fractional pages after the data
722 section (i.e. bss). This would contain the junk from the file that
723 should not be in memory. */
724 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
728 if (elf_bss
>= last_bss
)
731 /* XXX: this is really a hack : if the real host page size is
732 smaller than the target page size, some pages after the end
733 of the file may not be mapped. A better fix would be to
734 patch target_mmap(), but it is more complicated as the file
735 size must be known */
736 if (qemu_real_host_page_size
< qemu_host_page_size
) {
737 abi_ulong end_addr
, end_addr1
;
738 end_addr1
= REAL_HOST_PAGE_ALIGN(elf_bss
);
739 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
740 if (end_addr1
< end_addr
) {
741 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
742 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
743 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
747 nbyte
= elf_bss
& (qemu_host_page_size
-1);
749 nbyte
= qemu_host_page_size
- nbyte
;
751 /* FIXME - what to do if put_user() fails? */
752 put_user_u8(0, elf_bss
);
759 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
760 struct elfhdr
* exec
,
763 abi_ulong interp_load_addr
, int ibcs
,
764 struct image_info
*info
)
768 abi_ulong u_platform
;
769 const char *k_platform
;
770 const int n
= sizeof(elf_addr_t
);
774 k_platform
= ELF_PLATFORM
;
776 size_t len
= strlen(k_platform
) + 1;
777 sp
-= (len
+ n
- 1) & ~(n
- 1);
779 /* FIXME - check return value of memcpy_to_target() for failure */
780 memcpy_to_target(sp
, k_platform
, len
);
783 * Force 16 byte _final_ alignment here for generality.
785 sp
= sp
&~ (abi_ulong
)15;
786 size
= (DLINFO_ITEMS
+ 1) * 2;
789 #ifdef DLINFO_ARCH_ITEMS
790 size
+= DLINFO_ARCH_ITEMS
* 2;
792 size
+= envc
+ argc
+ 2;
793 size
+= (!ibcs
? 3 : 1); /* argc itself */
796 sp
-= 16 - (size
& 15);
798 /* This is correct because Linux defines
799 * elf_addr_t as Elf32_Off / Elf64_Off
801 #define NEW_AUX_ENT(id, val) do { \
802 sp -= n; put_user_ual(val, sp); \
803 sp -= n; put_user_ual(id, sp); \
806 NEW_AUX_ENT (AT_NULL
, 0);
808 /* There must be exactly DLINFO_ITEMS entries here. */
809 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
810 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
811 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
812 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
813 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
814 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
815 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
816 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
817 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
818 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
819 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
820 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
821 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
823 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
826 * ARCH_DLINFO must come last so platform specific code can enforce
827 * special alignment requirements on the AUXV if necessary (eg. PPC).
833 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
838 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
840 abi_ulong
*interp_load_addr
)
842 struct elf_phdr
*elf_phdata
= NULL
;
843 struct elf_phdr
*eppnt
;
844 abi_ulong load_addr
= 0;
845 int load_addr_set
= 0;
847 abi_ulong last_bss
, elf_bss
;
856 bswap_ehdr(interp_elf_ex
);
858 /* First of all, some simple consistency checks */
859 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
860 interp_elf_ex
->e_type
!= ET_DYN
) ||
861 !elf_check_arch(interp_elf_ex
->e_machine
)) {
862 return ~((abi_ulong
)0UL);
866 /* Now read in all of the header information */
868 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
869 return ~(abi_ulong
)0UL;
871 elf_phdata
= (struct elf_phdr
*)
872 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
875 return ~((abi_ulong
)0UL);
878 * If the size of this structure has changed, then punt, since
879 * we will be doing the wrong thing.
881 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
883 return ~((abi_ulong
)0UL);
886 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
888 retval
= read(interpreter_fd
,
890 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
893 perror("load_elf_interp");
900 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
905 if (interp_elf_ex
->e_type
== ET_DYN
) {
906 /* in order to avoid hardcoding the interpreter load
907 address in qemu, we allocate a big enough memory zone */
908 error
= target_mmap(0, INTERP_MAP_SIZE
,
909 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
920 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
921 if (eppnt
->p_type
== PT_LOAD
) {
922 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
927 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
928 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
929 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
930 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
931 elf_type
|= MAP_FIXED
;
932 vaddr
= eppnt
->p_vaddr
;
934 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
935 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
939 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
943 close(interpreter_fd
);
945 return ~((abi_ulong
)0UL);
948 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
954 * Find the end of the file mapping for this phdr, and keep
955 * track of the largest address we see for this.
957 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
958 if (k
> elf_bss
) elf_bss
= k
;
961 * Do the same thing for the memory mapping - between
962 * elf_bss and last_bss is the bss section.
964 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
965 if (k
> last_bss
) last_bss
= k
;
968 /* Now use mmap to map the library into memory. */
970 close(interpreter_fd
);
973 * Now fill out the bss section. First pad the last page up
974 * to the page boundary, and then perform a mmap to make sure
975 * that there are zeromapped pages up to and including the last
978 padzero(elf_bss
, last_bss
);
979 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
981 /* Map the last of the bss segment */
982 if (last_bss
> elf_bss
) {
983 target_mmap(elf_bss
, last_bss
-elf_bss
,
984 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
985 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
989 *interp_load_addr
= load_addr
;
990 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
993 static int symfind(const void *s0
, const void *s1
)
995 target_ulong addr
= *(target_ulong
*)s0
;
996 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
998 if (addr
< sym
->st_value
) {
1000 } else if (addr
>= sym
->st_value
+ sym
->st_size
) {
1006 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
1008 #if ELF_CLASS == ELFCLASS32
1009 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
1011 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
1015 struct elf_sym
*sym
;
1017 sym
= bsearch(&orig_addr
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
1019 return s
->disas_strtab
+ sym
->st_name
;
1025 /* FIXME: This should use elf_ops.h */
1026 static int symcmp(const void *s0
, const void *s1
)
1028 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
1029 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
1030 return (sym0
->st_value
< sym1
->st_value
)
1032 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
1035 /* Best attempt to load symbols from this ELF object. */
1036 static void load_symbols(struct elfhdr
*hdr
, int fd
)
1038 unsigned int i
, nsyms
;
1039 struct elf_shdr sechdr
, symtab
, strtab
;
1042 struct elf_sym
*syms
, *new_syms
;
1044 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
1045 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1046 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1049 bswap_shdr(&sechdr
);
1051 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1053 lseek(fd
, hdr
->e_shoff
1054 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1055 if (read(fd
, &strtab
, sizeof(strtab
))
1059 bswap_shdr(&strtab
);
1064 return; /* Shouldn't happen... */
1067 /* Now know where the strtab and symtab are. Snarf them. */
1068 s
= malloc(sizeof(*s
));
1069 syms
= malloc(symtab
.sh_size
);
1074 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1075 if (!s
->disas_strtab
) {
1081 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1082 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
) {
1089 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1094 bswap_sym(syms
+ i
);
1096 // Throw away entries which we do not need.
1097 if (syms
[i
].st_shndx
== SHN_UNDEF
||
1098 syms
[i
].st_shndx
>= SHN_LORESERVE
||
1099 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
1102 syms
[i
] = syms
[nsyms
];
1106 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1107 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1108 syms
[i
].st_value
&= ~(target_ulong
)1;
1113 /* Attempt to free the storage associated with the local symbols
1114 that we threw away. Whether or not this has any effect on the
1115 memory allocation depends on the malloc implementation and how
1116 many symbols we managed to discard. */
1117 new_syms
= realloc(syms
, nsyms
* sizeof(*syms
));
1118 if (new_syms
== NULL
) {
1126 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
1128 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1129 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
) {
1135 s
->disas_num_syms
= nsyms
;
1136 #if ELF_CLASS == ELFCLASS32
1137 s
->disas_symtab
.elf32
= syms
;
1138 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1140 s
->disas_symtab
.elf64
= syms
;
1141 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1147 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1148 struct image_info
* info
)
1150 struct elfhdr elf_ex
;
1151 struct elfhdr interp_elf_ex
;
1152 struct exec interp_ex
;
1153 int interpreter_fd
= -1; /* avoid warning */
1154 abi_ulong load_addr
, load_bias
;
1155 int load_addr_set
= 0;
1156 unsigned int interpreter_type
= INTERPRETER_NONE
;
1157 unsigned char ibcs2_interpreter
;
1159 abi_ulong mapped_addr
;
1160 struct elf_phdr
* elf_ppnt
;
1161 struct elf_phdr
*elf_phdata
;
1162 abi_ulong elf_bss
, k
, elf_brk
;
1164 char * elf_interpreter
;
1165 abi_ulong elf_entry
, interp_load_addr
= 0;
1167 abi_ulong start_code
, end_code
, start_data
, end_data
;
1168 abi_ulong reloc_func_desc
= 0;
1169 abi_ulong elf_stack
;
1170 char passed_fileno
[6];
1172 ibcs2_interpreter
= 0;
1176 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1178 bswap_ehdr(&elf_ex
);
1181 /* First of all, some simple consistency checks */
1182 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1183 (! elf_check_arch(elf_ex
.e_machine
))) {
1187 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1188 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1189 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1194 /* Now read in all of the header information */
1195 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1196 if (elf_phdata
== NULL
) {
1200 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1202 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1203 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1207 perror("load_elf_binary");
1214 elf_ppnt
= elf_phdata
;
1215 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1216 bswap_phdr(elf_ppnt
);
1219 elf_ppnt
= elf_phdata
;
1225 elf_stack
= ~((abi_ulong
)0UL);
1226 elf_interpreter
= NULL
;
1227 start_code
= ~((abi_ulong
)0UL);
1231 interp_ex
.a_info
= 0;
1233 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1234 if (elf_ppnt
->p_type
== PT_INTERP
) {
1235 if ( elf_interpreter
!= NULL
)
1238 free(elf_interpreter
);
1243 /* This is the program interpreter used for
1244 * shared libraries - for now assume that this
1245 * is an a.out format binary
1248 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1250 if (elf_interpreter
== NULL
) {
1256 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1258 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1261 perror("load_elf_binary2");
1265 /* If the program interpreter is one of these two,
1266 then assume an iBCS2 image. Otherwise assume
1267 a native linux image. */
1269 /* JRP - Need to add X86 lib dir stuff here... */
1271 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1272 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1273 ibcs2_interpreter
= 1;
1277 printf("Using ELF interpreter %s\n", path(elf_interpreter
));
1280 retval
= open(path(elf_interpreter
), O_RDONLY
);
1282 interpreter_fd
= retval
;
1285 perror(elf_interpreter
);
1287 /* retval = -errno; */
1292 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1294 retval
= read(interpreter_fd
,bprm
->buf
,128);
1298 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1299 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1302 perror("load_elf_binary3");
1305 free(elf_interpreter
);
1313 /* Some simple consistency checks for the interpreter */
1314 if (elf_interpreter
){
1315 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1317 /* Now figure out which format our binary is */
1318 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1319 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1320 interpreter_type
= INTERPRETER_ELF
;
1323 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1324 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1325 interpreter_type
&= ~INTERPRETER_ELF
;
1328 if (!interpreter_type
) {
1329 free(elf_interpreter
);
1336 /* OK, we are done with that, now set up the arg stuff,
1337 and then start this sucker up */
1342 if (interpreter_type
== INTERPRETER_AOUT
) {
1343 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1344 passed_p
= passed_fileno
;
1346 if (elf_interpreter
) {
1347 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1352 free(elf_interpreter
);
1359 /* OK, This is the point of no return */
1362 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1364 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1367 * In case where user has not explicitly set the guest_base, we
1368 * probe here that should we set it automatically.
1370 if (!have_guest_base
) {
1372 * Go through ELF program header table and find out whether
1373 * any of the segments drop below our current mmap_min_addr and
1374 * in that case set guest_base to corresponding address.
1376 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
;
1378 if (elf_ppnt
->p_type
!= PT_LOAD
)
1380 if (HOST_PAGE_ALIGN(elf_ppnt
->p_vaddr
) < mmap_min_addr
) {
1381 guest_base
= HOST_PAGE_ALIGN(mmap_min_addr
);
1387 /* Do this so that we can load the interpreter, if need be. We will
1388 change some of these later */
1390 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1391 info
->start_stack
= bprm
->p
;
1393 /* Now we do a little grungy work by mmaping the ELF image into
1394 * the correct location in memory. At this point, we assume that
1395 * the image should be loaded at fixed address, not at a variable
1399 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1404 if (elf_ppnt
->p_type
!= PT_LOAD
)
1407 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1408 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1409 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1410 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1411 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1412 elf_flags
|= MAP_FIXED
;
1413 } else if (elf_ex
.e_type
== ET_DYN
) {
1414 /* Try and get dynamic programs out of the way of the default mmap
1415 base, as well as whatever program they might try to exec. This
1416 is because the brk will follow the loader, and is not movable. */
1417 /* NOTE: for qemu, we do a big mmap to get enough space
1418 without hardcoding any address */
1419 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1420 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1426 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1429 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1430 (elf_ppnt
->p_filesz
+
1431 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1433 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1435 (elf_ppnt
->p_offset
-
1436 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1442 #ifdef LOW_ELF_STACK
1443 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1444 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1447 if (!load_addr_set
) {
1449 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1450 if (elf_ex
.e_type
== ET_DYN
) {
1451 load_bias
+= error
-
1452 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1453 load_addr
+= load_bias
;
1454 reloc_func_desc
= load_bias
;
1457 k
= elf_ppnt
->p_vaddr
;
1462 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1465 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1469 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1470 if (k
> elf_brk
) elf_brk
= k
;
1473 elf_entry
+= load_bias
;
1474 elf_bss
+= load_bias
;
1475 elf_brk
+= load_bias
;
1476 start_code
+= load_bias
;
1477 end_code
+= load_bias
;
1478 start_data
+= load_bias
;
1479 end_data
+= load_bias
;
1481 if (elf_interpreter
) {
1482 if (interpreter_type
& 1) {
1483 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1485 else if (interpreter_type
& 2) {
1486 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1489 reloc_func_desc
= interp_load_addr
;
1491 close(interpreter_fd
);
1492 free(elf_interpreter
);
1494 if (elf_entry
== ~((abi_ulong
)0UL)) {
1495 printf("Unable to load interpreter\n");
1504 if (qemu_log_enabled())
1505 load_symbols(&elf_ex
, bprm
->fd
);
1507 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1508 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1510 #ifdef LOW_ELF_STACK
1511 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1513 bprm
->p
= create_elf_tables(bprm
->p
,
1517 load_addr
, load_bias
,
1519 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1521 info
->load_addr
= reloc_func_desc
;
1522 info
->start_brk
= info
->brk
= elf_brk
;
1523 info
->end_code
= end_code
;
1524 info
->start_code
= start_code
;
1525 info
->start_data
= start_data
;
1526 info
->end_data
= end_data
;
1527 info
->start_stack
= bprm
->p
;
1529 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1531 set_brk(elf_bss
, elf_brk
);
1533 padzero(elf_bss
, elf_brk
);
1536 printf("(start_brk) %x\n" , info
->start_brk
);
1537 printf("(end_code) %x\n" , info
->end_code
);
1538 printf("(start_code) %x\n" , info
->start_code
);
1539 printf("(end_data) %x\n" , info
->end_data
);
1540 printf("(start_stack) %x\n" , info
->start_stack
);
1541 printf("(brk) %x\n" , info
->brk
);
1544 if ( info
->personality
== PER_SVR4
)
1546 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1547 and some applications "depend" upon this behavior.
1548 Since we do not have the power to recompile these, we
1549 emulate the SVr4 behavior. Sigh. */
1550 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1551 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1554 info
->entry
= elf_entry
;
1559 static int load_aout_interp(void * exptr
, int interp_fd
)
1561 printf("a.out interpreter not yet supported\n");
1565 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1567 init_thread(regs
, infop
);