1 /* This is the Linux kernel elf-loading code, ported into user space */
13 #include "disas/disas.h"
24 /* from personality.h */
27 * Flags for bug emulation.
29 * These occupy the top three bytes.
32 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
33 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
36 MMAP_PAGE_ZERO
= 0x0100000,
37 ADDR_COMPAT_LAYOUT
= 0x0200000,
38 READ_IMPLIES_EXEC
= 0x0400000,
39 ADDR_LIMIT_32BIT
= 0x0800000,
40 SHORT_INODE
= 0x1000000,
41 WHOLE_SECONDS
= 0x2000000,
42 STICKY_TIMEOUTS
= 0x4000000,
43 ADDR_LIMIT_3GB
= 0x8000000,
49 * These go in the low byte. Avoid using the top bit, it will
50 * conflict with error returns.
54 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
55 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
56 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
57 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
58 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
59 WHOLE_SECONDS
| SHORT_INODE
,
60 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
61 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
62 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
64 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
65 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
67 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
68 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
69 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
70 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
72 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
73 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
74 PER_OSF4
= 0x000f, /* OSF/1 v4 */
80 * Return the base personality without flags.
82 #define personality(pers) (pers & PER_MASK)
84 /* this flag is uneffective under linux too, should be deleted */
86 #define MAP_DENYWRITE 0
89 /* should probably go in elf.h */
96 #define ELF_PLATFORM get_elf_platform()
98 static const char *get_elf_platform(void)
100 static char elf_platform
[] = "i386";
101 int family
= object_property_get_int(OBJECT(thread_cpu
), "family", NULL
);
105 elf_platform
[1] = '0' + family
;
109 #define ELF_HWCAP get_elf_hwcap()
111 static uint32_t get_elf_hwcap(void)
113 X86CPU
*cpu
= X86_CPU(thread_cpu
);
115 return cpu
->env
.features
[FEAT_1_EDX
];
119 #define ELF_START_MMAP 0x2aaaaab000ULL
120 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
122 #define ELF_CLASS ELFCLASS64
123 #define ELF_DATA ELFDATA2LSB
124 #define ELF_ARCH EM_X86_64
126 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
129 regs
->rsp
= infop
->start_stack
;
130 regs
->rip
= infop
->entry
;
131 if (bsd_type
== target_freebsd
) {
132 regs
->rdi
= infop
->start_stack
;
138 #define ELF_START_MMAP 0x80000000
141 * This is used to ensure we don't load something for the wrong architecture.
143 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
146 * These are used to set parameters in the core dumps.
148 #define ELF_CLASS ELFCLASS32
149 #define ELF_DATA ELFDATA2LSB
150 #define ELF_ARCH EM_386
152 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
154 regs
->esp
= infop
->start_stack
;
155 regs
->eip
= infop
->entry
;
157 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
158 starts %edx contains a pointer to a function which might be
159 registered using `atexit'. This provides a mean for the
160 dynamic linker to call DT_FINI functions for shared libraries
161 that have been loaded before the code runs.
163 A value of 0 tells we have no such handler. */
168 #define USE_ELF_CORE_DUMP
169 #define ELF_EXEC_PAGESIZE 4096
175 #define ELF_START_MMAP 0x80000000
177 #define elf_check_arch(x) ( (x) == EM_ARM )
179 #define ELF_CLASS ELFCLASS32
180 #ifdef TARGET_WORDS_BIGENDIAN
181 #define ELF_DATA ELFDATA2MSB
183 #define ELF_DATA ELFDATA2LSB
185 #define ELF_ARCH EM_ARM
187 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
189 abi_long stack
= infop
->start_stack
;
190 memset(regs
, 0, sizeof(*regs
));
191 regs
->ARM_cpsr
= 0x10;
192 if (infop
->entry
& 1)
193 regs
->ARM_cpsr
|= CPSR_T
;
194 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
195 regs
->ARM_sp
= infop
->start_stack
;
196 /* FIXME - what to for failure of get_user()? */
197 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
198 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
199 /* XXX: it seems that r0 is zeroed after ! */
201 /* For uClinux PIC binaries. */
202 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
203 regs
->ARM_r10
= infop
->start_data
;
206 #define USE_ELF_CORE_DUMP
207 #define ELF_EXEC_PAGESIZE 4096
211 ARM_HWCAP_ARM_SWP
= 1 << 0,
212 ARM_HWCAP_ARM_HALF
= 1 << 1,
213 ARM_HWCAP_ARM_THUMB
= 1 << 2,
214 ARM_HWCAP_ARM_26BIT
= 1 << 3,
215 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
216 ARM_HWCAP_ARM_FPA
= 1 << 5,
217 ARM_HWCAP_ARM_VFP
= 1 << 6,
218 ARM_HWCAP_ARM_EDSP
= 1 << 7,
221 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
222 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
223 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
228 #ifdef TARGET_SPARC64
230 #define ELF_START_MMAP 0x80000000
233 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
235 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
238 #define ELF_CLASS ELFCLASS64
239 #define ELF_DATA ELFDATA2MSB
240 #define ELF_ARCH EM_SPARCV9
242 #define STACK_BIAS 2047
244 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
249 regs
->pc
= infop
->entry
;
250 regs
->npc
= regs
->pc
+ 4;
253 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
255 if (personality(infop
->personality
) == PER_LINUX32
)
256 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
258 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
259 if (bsd_type
== target_freebsd
) {
260 regs
->u_regs
[8] = infop
->start_stack
;
261 regs
->u_regs
[11] = infop
->start_stack
;
268 #define ELF_START_MMAP 0x80000000
270 #define elf_check_arch(x) ( (x) == EM_SPARC )
272 #define ELF_CLASS ELFCLASS32
273 #define ELF_DATA ELFDATA2MSB
274 #define ELF_ARCH EM_SPARC
276 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
279 regs
->pc
= infop
->entry
;
280 regs
->npc
= regs
->pc
+ 4;
282 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
290 #define ELF_START_MMAP 0x80000000
292 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
294 #define elf_check_arch(x) ( (x) == EM_PPC64 )
296 #define ELF_CLASS ELFCLASS64
300 #define elf_check_arch(x) ( (x) == EM_PPC )
302 #define ELF_CLASS ELFCLASS32
306 #ifdef TARGET_WORDS_BIGENDIAN
307 #define ELF_DATA ELFDATA2MSB
309 #define ELF_DATA ELFDATA2LSB
311 #define ELF_ARCH EM_PPC
314 * We need to put in some extra aux table entries to tell glibc what
315 * the cache block size is, so it can use the dcbz instruction safely.
317 #define AT_DCACHEBSIZE 19
318 #define AT_ICACHEBSIZE 20
319 #define AT_UCACHEBSIZE 21
320 /* A special ignored type value for PPC, for glibc compatibility. */
321 #define AT_IGNOREPPC 22
323 * The requirements here are:
324 * - keep the final alignment of sp (sp & 0xf)
325 * - make sure the 32-bit value at the first 16 byte aligned position of
326 * AUXV is greater than 16 for glibc compatibility.
327 * AT_IGNOREPPC is used for that.
328 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
329 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
331 #define DLINFO_ARCH_ITEMS 5
332 #define ARCH_DLINFO \
334 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
335 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
336 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
338 * Now handle glibc compatibility. \
340 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
341 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
344 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
346 abi_ulong pos
= infop
->start_stack
;
348 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
349 abi_ulong entry
, toc
;
352 _regs
->gpr
[1] = infop
->start_stack
;
353 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
354 get_user_u64(entry
, infop
->entry
);
355 entry
+= infop
->load_addr
;
356 get_user_u64(toc
, infop
->entry
+ 8);
357 toc
+= infop
->load_addr
;
359 infop
->entry
= entry
;
361 _regs
->nip
= infop
->entry
;
362 /* Note that isn't exactly what regular kernel does
363 * but this is what the ABI wants and is needed to allow
364 * execution of PPC BSD programs.
366 /* FIXME - what to for failure of get_user()? */
367 get_user_ual(_regs
->gpr
[3], pos
);
368 pos
+= sizeof(abi_ulong
);
370 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(abi_ulong
)) {
371 get_user_ual(tmp
, pos
);
376 #define USE_ELF_CORE_DUMP
377 #define ELF_EXEC_PAGESIZE 4096
383 #define ELF_START_MMAP 0x80000000
385 #define elf_check_arch(x) ( (x) == EM_MIPS )
388 #define ELF_CLASS ELFCLASS64
390 #define ELF_CLASS ELFCLASS32
392 #ifdef TARGET_WORDS_BIGENDIAN
393 #define ELF_DATA ELFDATA2MSB
395 #define ELF_DATA ELFDATA2LSB
397 #define ELF_ARCH EM_MIPS
399 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
401 regs
->cp0_status
= 2 << CP0St_KSU
;
402 regs
->cp0_epc
= infop
->entry
;
403 regs
->regs
[29] = infop
->start_stack
;
406 #define USE_ELF_CORE_DUMP
407 #define ELF_EXEC_PAGESIZE 4096
409 #endif /* TARGET_MIPS */
413 #define ELF_START_MMAP 0x80000000
415 #define elf_check_arch(x) ( (x) == EM_SH )
417 #define ELF_CLASS ELFCLASS32
418 #define ELF_DATA ELFDATA2LSB
419 #define ELF_ARCH EM_SH
421 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
423 /* Check other registers XXXXX */
424 regs
->pc
= infop
->entry
;
425 regs
->regs
[15] = infop
->start_stack
;
428 #define USE_ELF_CORE_DUMP
429 #define ELF_EXEC_PAGESIZE 4096
435 #define ELF_START_MMAP 0x80000000
437 #define elf_check_arch(x) ( (x) == EM_CRIS )
439 #define ELF_CLASS ELFCLASS32
440 #define ELF_DATA ELFDATA2LSB
441 #define ELF_ARCH EM_CRIS
443 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
445 regs
->erp
= infop
->entry
;
448 #define USE_ELF_CORE_DUMP
449 #define ELF_EXEC_PAGESIZE 8192
455 #define ELF_START_MMAP 0x80000000
457 #define elf_check_arch(x) ( (x) == EM_68K )
459 #define ELF_CLASS ELFCLASS32
460 #define ELF_DATA ELFDATA2MSB
461 #define ELF_ARCH EM_68K
463 /* ??? Does this need to do anything?
464 #define ELF_PLAT_INIT(_r) */
466 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
468 regs
->usp
= infop
->start_stack
;
470 regs
->pc
= infop
->entry
;
473 #define USE_ELF_CORE_DUMP
474 #define ELF_EXEC_PAGESIZE 8192
480 #define ELF_START_MMAP (0x30000000000ULL)
482 #define elf_check_arch(x) ( (x) == ELF_ARCH )
484 #define ELF_CLASS ELFCLASS64
485 #define ELF_DATA ELFDATA2MSB
486 #define ELF_ARCH EM_ALPHA
488 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
490 regs
->pc
= infop
->entry
;
492 regs
->usp
= infop
->start_stack
;
493 regs
->unique
= infop
->start_data
; /* ? */
494 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
495 regs
->unique
, infop
->start_data
);
498 #define USE_ELF_CORE_DUMP
499 #define ELF_EXEC_PAGESIZE 8192
501 #endif /* TARGET_ALPHA */
504 #define ELF_PLATFORM (NULL)
513 #define ELF_CLASS ELFCLASS32
515 #define bswaptls(ptr) bswap32s(ptr)
522 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
523 unsigned int a_text
; /* length of text, in bytes */
524 unsigned int a_data
; /* length of data, in bytes */
525 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
526 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
527 unsigned int a_entry
; /* start address */
528 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
529 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
533 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
539 /* max code+data+bss space allocated to elf interpreter */
540 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
542 /* max code+data+bss+brk space allocated to ET_DYN executables */
543 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
545 /* Necessary parameters */
546 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
547 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
548 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
550 #define INTERPRETER_NONE 0
551 #define INTERPRETER_AOUT 1
552 #define INTERPRETER_ELF 2
554 #define DLINFO_ITEMS 12
556 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
561 static int load_aout_interp(void * exptr
, int interp_fd
);
564 static void bswap_ehdr(struct elfhdr
*ehdr
)
566 bswap16s(&ehdr
->e_type
); /* Object file type */
567 bswap16s(&ehdr
->e_machine
); /* Architecture */
568 bswap32s(&ehdr
->e_version
); /* Object file version */
569 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
570 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
571 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
572 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
573 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
574 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
575 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
576 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
577 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
578 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
581 static void bswap_phdr(struct elf_phdr
*phdr
)
583 bswap32s(&phdr
->p_type
); /* Segment type */
584 bswaptls(&phdr
->p_offset
); /* Segment file offset */
585 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
586 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
587 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
588 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
589 bswap32s(&phdr
->p_flags
); /* Segment flags */
590 bswaptls(&phdr
->p_align
); /* Segment alignment */
593 static void bswap_shdr(struct elf_shdr
*shdr
)
595 bswap32s(&shdr
->sh_name
);
596 bswap32s(&shdr
->sh_type
);
597 bswaptls(&shdr
->sh_flags
);
598 bswaptls(&shdr
->sh_addr
);
599 bswaptls(&shdr
->sh_offset
);
600 bswaptls(&shdr
->sh_size
);
601 bswap32s(&shdr
->sh_link
);
602 bswap32s(&shdr
->sh_info
);
603 bswaptls(&shdr
->sh_addralign
);
604 bswaptls(&shdr
->sh_entsize
);
607 static void bswap_sym(struct elf_sym
*sym
)
609 bswap32s(&sym
->st_name
);
610 bswaptls(&sym
->st_value
);
611 bswaptls(&sym
->st_size
);
612 bswap16s(&sym
->st_shndx
);
617 * 'copy_elf_strings()' copies argument/envelope strings from user
618 * memory to free pages in kernel mem. These are in a format ready
619 * to be put directly into the top of new user memory.
622 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
625 char *tmp
, *tmp1
, *pag
= NULL
;
629 return 0; /* bullet-proofing */
634 fprintf(stderr
, "VFS: argc is wrong");
640 if (p
< len
) { /* this shouldn't happen - 128kB */
646 offset
= p
% TARGET_PAGE_SIZE
;
647 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
649 pag
= g_try_malloc0(TARGET_PAGE_SIZE
);
650 page
[p
/TARGET_PAGE_SIZE
] = pag
;
655 if (len
== 0 || offset
== 0) {
656 *(pag
+ offset
) = *tmp
;
659 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
660 tmp
-= bytes_to_copy
;
662 offset
-= bytes_to_copy
;
663 len
-= bytes_to_copy
;
664 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
671 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
672 struct image_info
*info
)
674 abi_ulong stack_base
, size
, error
;
677 /* Create enough stack to hold everything. If we don't use
678 * it for args, we'll use it for something else...
680 size
= x86_stack_size
;
681 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
682 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
683 error
= target_mmap(0,
684 size
+ qemu_host_page_size
,
685 PROT_READ
| PROT_WRITE
,
686 MAP_PRIVATE
| MAP_ANON
,
692 /* we reserve one extra page at the top of the stack as guard */
693 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
695 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
698 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
701 /* FIXME - check return value of memcpy_to_target() for failure */
702 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
703 g_free(bprm
->page
[i
]);
705 stack_base
+= TARGET_PAGE_SIZE
;
710 static void set_brk(abi_ulong start
, abi_ulong end
)
712 /* page-align the start and end addresses... */
713 start
= HOST_PAGE_ALIGN(start
);
714 end
= HOST_PAGE_ALIGN(end
);
717 if(target_mmap(start
, end
- start
,
718 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
719 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
720 perror("cannot mmap brk");
726 /* We need to explicitly zero any fractional pages after the data
727 section (i.e. bss). This would contain the junk from the file that
728 should not be in memory. */
729 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
733 if (elf_bss
>= last_bss
)
736 /* XXX: this is really a hack : if the real host page size is
737 smaller than the target page size, some pages after the end
738 of the file may not be mapped. A better fix would be to
739 patch target_mmap(), but it is more complicated as the file
740 size must be known */
741 if (qemu_real_host_page_size
< qemu_host_page_size
) {
742 abi_ulong end_addr
, end_addr1
;
743 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
744 ~(qemu_real_host_page_size
- 1);
745 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
746 if (end_addr1
< end_addr
) {
747 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
748 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
749 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
753 nbyte
= elf_bss
& (qemu_host_page_size
-1);
755 nbyte
= qemu_host_page_size
- nbyte
;
757 /* FIXME - what to do if put_user() fails? */
758 put_user_u8(0, elf_bss
);
765 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
766 struct elfhdr
* exec
,
769 abi_ulong interp_load_addr
, int ibcs
,
770 struct image_info
*info
)
774 abi_ulong u_platform
;
775 const char *k_platform
;
776 const int n
= sizeof(elf_addr_t
);
780 k_platform
= ELF_PLATFORM
;
782 size_t len
= strlen(k_platform
) + 1;
783 sp
-= (len
+ n
- 1) & ~(n
- 1);
785 /* FIXME - check return value of memcpy_to_target() for failure */
786 memcpy_to_target(sp
, k_platform
, len
);
789 * Force 16 byte _final_ alignment here for generality.
791 sp
= sp
&~ (abi_ulong
)15;
792 size
= (DLINFO_ITEMS
+ 1) * 2;
795 #ifdef DLINFO_ARCH_ITEMS
796 size
+= DLINFO_ARCH_ITEMS
* 2;
798 size
+= envc
+ argc
+ 2;
799 size
+= (!ibcs
? 3 : 1); /* argc itself */
802 sp
-= 16 - (size
& 15);
804 /* This is correct because Linux defines
805 * elf_addr_t as Elf32_Off / Elf64_Off
807 #define NEW_AUX_ENT(id, val) do { \
808 sp -= n; put_user_ual(val, sp); \
809 sp -= n; put_user_ual(id, sp); \
812 NEW_AUX_ENT (AT_NULL
, 0);
814 /* There must be exactly DLINFO_ITEMS entries here. */
815 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
816 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
817 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
818 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
819 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
820 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
821 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
822 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
823 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
824 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
825 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
826 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
827 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
829 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
832 * ARCH_DLINFO must come last so platform specific code can enforce
833 * special alignment requirements on the AUXV if necessary (eg. PPC).
839 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
844 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
846 abi_ulong
*interp_load_addr
)
848 struct elf_phdr
*elf_phdata
= NULL
;
849 struct elf_phdr
*eppnt
;
850 abi_ulong load_addr
= 0;
851 int load_addr_set
= 0;
853 abi_ulong last_bss
, elf_bss
;
862 bswap_ehdr(interp_elf_ex
);
864 /* First of all, some simple consistency checks */
865 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
866 interp_elf_ex
->e_type
!= ET_DYN
) ||
867 !elf_check_arch(interp_elf_ex
->e_machine
)) {
868 return ~((abi_ulong
)0UL);
872 /* Now read in all of the header information */
874 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
875 return ~(abi_ulong
)0UL;
877 elf_phdata
= (struct elf_phdr
*)
878 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
881 return ~((abi_ulong
)0UL);
884 * If the size of this structure has changed, then punt, since
885 * we will be doing the wrong thing.
887 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
889 return ~((abi_ulong
)0UL);
892 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
894 retval
= read(interpreter_fd
,
896 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
899 perror("load_elf_interp");
906 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
911 if (interp_elf_ex
->e_type
== ET_DYN
) {
912 /* in order to avoid hardcoding the interpreter load
913 address in qemu, we allocate a big enough memory zone */
914 error
= target_mmap(0, INTERP_MAP_SIZE
,
915 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
926 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
927 if (eppnt
->p_type
== PT_LOAD
) {
928 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
933 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
934 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
935 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
936 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
937 elf_type
|= MAP_FIXED
;
938 vaddr
= eppnt
->p_vaddr
;
940 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
941 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
945 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
949 close(interpreter_fd
);
951 return ~((abi_ulong
)0UL);
954 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
960 * Find the end of the file mapping for this phdr, and keep
961 * track of the largest address we see for this.
963 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
964 if (k
> elf_bss
) elf_bss
= k
;
967 * Do the same thing for the memory mapping - between
968 * elf_bss and last_bss is the bss section.
970 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
971 if (k
> last_bss
) last_bss
= k
;
974 /* Now use mmap to map the library into memory. */
976 close(interpreter_fd
);
979 * Now fill out the bss section. First pad the last page up
980 * to the page boundary, and then perform a mmap to make sure
981 * that there are zeromapped pages up to and including the last
984 padzero(elf_bss
, last_bss
);
985 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
987 /* Map the last of the bss segment */
988 if (last_bss
> elf_bss
) {
989 target_mmap(elf_bss
, last_bss
-elf_bss
,
990 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
991 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
995 *interp_load_addr
= load_addr
;
996 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
999 static int symfind(const void *s0
, const void *s1
)
1001 target_ulong addr
= *(target_ulong
*)s0
;
1002 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
1004 if (addr
< sym
->st_value
) {
1006 } else if (addr
>= sym
->st_value
+ sym
->st_size
) {
1012 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
1014 #if ELF_CLASS == ELFCLASS32
1015 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
1017 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
1021 struct elf_sym
*sym
;
1023 sym
= bsearch(&orig_addr
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
1025 return s
->disas_strtab
+ sym
->st_name
;
1031 /* FIXME: This should use elf_ops.h */
1032 static int symcmp(const void *s0
, const void *s1
)
1034 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
1035 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
1036 return (sym0
->st_value
< sym1
->st_value
)
1038 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
1041 /* Best attempt to load symbols from this ELF object. */
1042 static void load_symbols(struct elfhdr
*hdr
, int fd
)
1044 unsigned int i
, nsyms
;
1045 struct elf_shdr sechdr
, symtab
, strtab
;
1048 struct elf_sym
*syms
, *new_syms
;
1050 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
1051 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1052 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1055 bswap_shdr(&sechdr
);
1057 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1059 lseek(fd
, hdr
->e_shoff
1060 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1061 if (read(fd
, &strtab
, sizeof(strtab
))
1065 bswap_shdr(&strtab
);
1070 return; /* Shouldn't happen... */
1073 /* Now know where the strtab and symtab are. Snarf them. */
1074 s
= malloc(sizeof(*s
));
1075 syms
= malloc(symtab
.sh_size
);
1080 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1081 if (!s
->disas_strtab
) {
1087 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1088 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
) {
1095 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1100 bswap_sym(syms
+ i
);
1102 // Throw away entries which we do not need.
1103 if (syms
[i
].st_shndx
== SHN_UNDEF
||
1104 syms
[i
].st_shndx
>= SHN_LORESERVE
||
1105 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
1108 syms
[i
] = syms
[nsyms
];
1112 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1113 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1114 syms
[i
].st_value
&= ~(target_ulong
)1;
1119 /* Attempt to free the storage associated with the local symbols
1120 that we threw away. Whether or not this has any effect on the
1121 memory allocation depends on the malloc implementation and how
1122 many symbols we managed to discard. */
1123 new_syms
= realloc(syms
, nsyms
* sizeof(*syms
));
1124 if (new_syms
== NULL
) {
1132 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
1134 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1135 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
) {
1141 s
->disas_num_syms
= nsyms
;
1142 #if ELF_CLASS == ELFCLASS32
1143 s
->disas_symtab
.elf32
= syms
;
1144 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1146 s
->disas_symtab
.elf64
= syms
;
1147 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1153 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1154 struct image_info
* info
)
1156 struct elfhdr elf_ex
;
1157 struct elfhdr interp_elf_ex
;
1158 struct exec interp_ex
;
1159 int interpreter_fd
= -1; /* avoid warning */
1160 abi_ulong load_addr
, load_bias
;
1161 int load_addr_set
= 0;
1162 unsigned int interpreter_type
= INTERPRETER_NONE
;
1163 unsigned char ibcs2_interpreter
;
1165 abi_ulong mapped_addr
;
1166 struct elf_phdr
* elf_ppnt
;
1167 struct elf_phdr
*elf_phdata
;
1168 abi_ulong elf_bss
, k
, elf_brk
;
1170 char * elf_interpreter
;
1171 abi_ulong elf_entry
, interp_load_addr
= 0;
1173 abi_ulong start_code
, end_code
, start_data
, end_data
;
1174 abi_ulong reloc_func_desc
= 0;
1175 abi_ulong elf_stack
;
1176 char passed_fileno
[6];
1178 ibcs2_interpreter
= 0;
1182 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1184 bswap_ehdr(&elf_ex
);
1187 /* First of all, some simple consistency checks */
1188 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1189 (! elf_check_arch(elf_ex
.e_machine
))) {
1193 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1194 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1195 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1200 /* Now read in all of the header information */
1201 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1202 if (elf_phdata
== NULL
) {
1206 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1208 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1209 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1213 perror("load_elf_binary");
1220 elf_ppnt
= elf_phdata
;
1221 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1222 bswap_phdr(elf_ppnt
);
1225 elf_ppnt
= elf_phdata
;
1231 elf_stack
= ~((abi_ulong
)0UL);
1232 elf_interpreter
= NULL
;
1233 start_code
= ~((abi_ulong
)0UL);
1237 interp_ex
.a_info
= 0;
1239 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1240 if (elf_ppnt
->p_type
== PT_INTERP
) {
1241 if ( elf_interpreter
!= NULL
)
1244 free(elf_interpreter
);
1249 /* This is the program interpreter used for
1250 * shared libraries - for now assume that this
1251 * is an a.out format binary
1254 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1256 if (elf_interpreter
== NULL
) {
1262 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1264 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1267 perror("load_elf_binary2");
1271 /* If the program interpreter is one of these two,
1272 then assume an iBCS2 image. Otherwise assume
1273 a native linux image. */
1275 /* JRP - Need to add X86 lib dir stuff here... */
1277 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1278 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1279 ibcs2_interpreter
= 1;
1283 printf("Using ELF interpreter %s\n", path(elf_interpreter
));
1286 retval
= open(path(elf_interpreter
), O_RDONLY
);
1288 interpreter_fd
= retval
;
1291 perror(elf_interpreter
);
1293 /* retval = -errno; */
1298 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1300 retval
= read(interpreter_fd
,bprm
->buf
,128);
1304 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1305 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1308 perror("load_elf_binary3");
1311 free(elf_interpreter
);
1319 /* Some simple consistency checks for the interpreter */
1320 if (elf_interpreter
){
1321 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1323 /* Now figure out which format our binary is */
1324 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1325 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1326 interpreter_type
= INTERPRETER_ELF
;
1329 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1330 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1331 interpreter_type
&= ~INTERPRETER_ELF
;
1334 if (!interpreter_type
) {
1335 free(elf_interpreter
);
1342 /* OK, we are done with that, now set up the arg stuff,
1343 and then start this sucker up */
1348 if (interpreter_type
== INTERPRETER_AOUT
) {
1349 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1350 passed_p
= passed_fileno
;
1352 if (elf_interpreter
) {
1353 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1358 if (elf_interpreter
) {
1359 free(elf_interpreter
);
1367 /* OK, This is the point of no return */
1370 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1372 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1375 * In case where user has not explicitly set the guest_base, we
1376 * probe here that should we set it automatically.
1378 if (!have_guest_base
) {
1380 * Go through ELF program header table and find out whether
1381 * any of the segments drop below our current mmap_min_addr and
1382 * in that case set guest_base to corresponding address.
1384 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
;
1386 if (elf_ppnt
->p_type
!= PT_LOAD
)
1388 if (HOST_PAGE_ALIGN(elf_ppnt
->p_vaddr
) < mmap_min_addr
) {
1389 guest_base
= HOST_PAGE_ALIGN(mmap_min_addr
);
1395 /* Do this so that we can load the interpreter, if need be. We will
1396 change some of these later */
1398 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1399 info
->start_stack
= bprm
->p
;
1401 /* Now we do a little grungy work by mmaping the ELF image into
1402 * the correct location in memory. At this point, we assume that
1403 * the image should be loaded at fixed address, not at a variable
1407 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1412 if (elf_ppnt
->p_type
!= PT_LOAD
)
1415 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1416 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1417 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1418 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1419 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1420 elf_flags
|= MAP_FIXED
;
1421 } else if (elf_ex
.e_type
== ET_DYN
) {
1422 /* Try and get dynamic programs out of the way of the default mmap
1423 base, as well as whatever program they might try to exec. This
1424 is because the brk will follow the loader, and is not movable. */
1425 /* NOTE: for qemu, we do a big mmap to get enough space
1426 without hardcoding any address */
1427 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1428 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1434 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1437 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1438 (elf_ppnt
->p_filesz
+
1439 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1441 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1443 (elf_ppnt
->p_offset
-
1444 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1450 #ifdef LOW_ELF_STACK
1451 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1452 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1455 if (!load_addr_set
) {
1457 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1458 if (elf_ex
.e_type
== ET_DYN
) {
1459 load_bias
+= error
-
1460 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1461 load_addr
+= load_bias
;
1462 reloc_func_desc
= load_bias
;
1465 k
= elf_ppnt
->p_vaddr
;
1470 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1473 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1477 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1478 if (k
> elf_brk
) elf_brk
= k
;
1481 elf_entry
+= load_bias
;
1482 elf_bss
+= load_bias
;
1483 elf_brk
+= load_bias
;
1484 start_code
+= load_bias
;
1485 end_code
+= load_bias
;
1486 start_data
+= load_bias
;
1487 end_data
+= load_bias
;
1489 if (elf_interpreter
) {
1490 if (interpreter_type
& 1) {
1491 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1493 else if (interpreter_type
& 2) {
1494 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1497 reloc_func_desc
= interp_load_addr
;
1499 close(interpreter_fd
);
1500 free(elf_interpreter
);
1502 if (elf_entry
== ~((abi_ulong
)0UL)) {
1503 printf("Unable to load interpreter\n");
1512 if (qemu_log_enabled())
1513 load_symbols(&elf_ex
, bprm
->fd
);
1515 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1516 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1518 #ifdef LOW_ELF_STACK
1519 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1521 bprm
->p
= create_elf_tables(bprm
->p
,
1525 load_addr
, load_bias
,
1527 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1529 info
->load_addr
= reloc_func_desc
;
1530 info
->start_brk
= info
->brk
= elf_brk
;
1531 info
->end_code
= end_code
;
1532 info
->start_code
= start_code
;
1533 info
->start_data
= start_data
;
1534 info
->end_data
= end_data
;
1535 info
->start_stack
= bprm
->p
;
1537 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1539 set_brk(elf_bss
, elf_brk
);
1541 padzero(elf_bss
, elf_brk
);
1544 printf("(start_brk) %x\n" , info
->start_brk
);
1545 printf("(end_code) %x\n" , info
->end_code
);
1546 printf("(start_code) %x\n" , info
->start_code
);
1547 printf("(end_data) %x\n" , info
->end_data
);
1548 printf("(start_stack) %x\n" , info
->start_stack
);
1549 printf("(brk) %x\n" , info
->brk
);
1552 if ( info
->personality
== PER_SVR4
)
1554 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1555 and some applications "depend" upon this behavior.
1556 Since we do not have the power to recompile these, we
1557 emulate the SVr4 behavior. Sigh. */
1558 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1559 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1562 info
->entry
= elf_entry
;
1567 static int load_aout_interp(void * exptr
, int interp_fd
)
1569 printf("a.out interpreter not yet supported\n");
1573 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1575 init_thread(regs
, infop
);