1 /* This is the Linux kernel elf-loading code, ported into user space */
13 #include "disas/disas.h"
24 /* from personality.h */
27 * Flags for bug emulation.
29 * These occupy the top three bytes.
32 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
33 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
36 MMAP_PAGE_ZERO
= 0x0100000,
37 ADDR_COMPAT_LAYOUT
= 0x0200000,
38 READ_IMPLIES_EXEC
= 0x0400000,
39 ADDR_LIMIT_32BIT
= 0x0800000,
40 SHORT_INODE
= 0x1000000,
41 WHOLE_SECONDS
= 0x2000000,
42 STICKY_TIMEOUTS
= 0x4000000,
43 ADDR_LIMIT_3GB
= 0x8000000,
49 * These go in the low byte. Avoid using the top bit, it will
50 * conflict with error returns.
54 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
55 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
56 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
57 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
58 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
59 WHOLE_SECONDS
| SHORT_INODE
,
60 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
61 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
62 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
64 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
65 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
67 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
68 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
69 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
70 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
72 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
73 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
74 PER_OSF4
= 0x000f, /* OSF/1 v4 */
80 * Return the base personality without flags.
82 #define personality(pers) (pers & PER_MASK)
84 /* this flag is uneffective under linux too, should be deleted */
86 #define MAP_DENYWRITE 0
89 /* should probably go in elf.h */
96 #define ELF_PLATFORM get_elf_platform()
98 static const char *get_elf_platform(void)
100 static char elf_platform
[] = "i386";
101 int family
= object_property_get_int(OBJECT(thread_cpu
), "family", NULL
);
105 elf_platform
[1] = '0' + family
;
109 #define ELF_HWCAP get_elf_hwcap()
111 static uint32_t get_elf_hwcap(void)
113 X86CPU
*cpu
= X86_CPU(thread_cpu
);
115 return cpu
->env
.features
[FEAT_1_EDX
];
119 #define ELF_START_MMAP 0x2aaaaab000ULL
120 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
122 #define ELF_CLASS ELFCLASS64
123 #define ELF_DATA ELFDATA2LSB
124 #define ELF_ARCH EM_X86_64
126 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
129 regs
->rsp
= infop
->start_stack
;
130 regs
->rip
= infop
->entry
;
131 if (bsd_type
== target_freebsd
) {
132 regs
->rdi
= infop
->start_stack
;
138 #define ELF_START_MMAP 0x80000000
141 * This is used to ensure we don't load something for the wrong architecture.
143 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
146 * These are used to set parameters in the core dumps.
148 #define ELF_CLASS ELFCLASS32
149 #define ELF_DATA ELFDATA2LSB
150 #define ELF_ARCH EM_386
152 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
154 regs
->esp
= infop
->start_stack
;
155 regs
->eip
= infop
->entry
;
157 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
158 starts %edx contains a pointer to a function which might be
159 registered using `atexit'. This provides a mean for the
160 dynamic linker to call DT_FINI functions for shared libraries
161 that have been loaded before the code runs.
163 A value of 0 tells we have no such handler. */
168 #define USE_ELF_CORE_DUMP
169 #define ELF_EXEC_PAGESIZE 4096
175 #define ELF_START_MMAP 0x80000000
177 #define elf_check_arch(x) ( (x) == EM_ARM )
179 #define ELF_CLASS ELFCLASS32
180 #ifdef TARGET_WORDS_BIGENDIAN
181 #define ELF_DATA ELFDATA2MSB
183 #define ELF_DATA ELFDATA2LSB
185 #define ELF_ARCH EM_ARM
187 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
189 abi_long stack
= infop
->start_stack
;
190 memset(regs
, 0, sizeof(*regs
));
191 regs
->ARM_cpsr
= 0x10;
192 if (infop
->entry
& 1)
193 regs
->ARM_cpsr
|= CPSR_T
;
194 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
195 regs
->ARM_sp
= infop
->start_stack
;
196 /* FIXME - what to for failure of get_user()? */
197 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
198 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
199 /* XXX: it seems that r0 is zeroed after ! */
201 /* For uClinux PIC binaries. */
202 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
203 regs
->ARM_r10
= infop
->start_data
;
206 #define USE_ELF_CORE_DUMP
207 #define ELF_EXEC_PAGESIZE 4096
211 ARM_HWCAP_ARM_SWP
= 1 << 0,
212 ARM_HWCAP_ARM_HALF
= 1 << 1,
213 ARM_HWCAP_ARM_THUMB
= 1 << 2,
214 ARM_HWCAP_ARM_26BIT
= 1 << 3,
215 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
216 ARM_HWCAP_ARM_FPA
= 1 << 5,
217 ARM_HWCAP_ARM_VFP
= 1 << 6,
218 ARM_HWCAP_ARM_EDSP
= 1 << 7,
221 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
222 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
223 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
228 #ifdef TARGET_SPARC64
230 #define ELF_START_MMAP 0x80000000
233 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
235 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
238 #define ELF_CLASS ELFCLASS64
239 #define ELF_DATA ELFDATA2MSB
240 #define ELF_ARCH EM_SPARCV9
242 #define STACK_BIAS 2047
244 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
249 regs
->pc
= infop
->entry
;
250 regs
->npc
= regs
->pc
+ 4;
253 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
255 if (personality(infop
->personality
) == PER_LINUX32
)
256 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
258 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
259 if (bsd_type
== target_freebsd
) {
260 regs
->u_regs
[8] = infop
->start_stack
;
261 regs
->u_regs
[11] = infop
->start_stack
;
268 #define ELF_START_MMAP 0x80000000
270 #define elf_check_arch(x) ( (x) == EM_SPARC )
272 #define ELF_CLASS ELFCLASS32
273 #define ELF_DATA ELFDATA2MSB
274 #define ELF_ARCH EM_SPARC
276 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
279 regs
->pc
= infop
->entry
;
280 regs
->npc
= regs
->pc
+ 4;
282 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
290 #define ELF_START_MMAP 0x80000000
292 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
294 #define elf_check_arch(x) ( (x) == EM_PPC64 )
296 #define ELF_CLASS ELFCLASS64
300 #define elf_check_arch(x) ( (x) == EM_PPC )
302 #define ELF_CLASS ELFCLASS32
306 #ifdef TARGET_WORDS_BIGENDIAN
307 #define ELF_DATA ELFDATA2MSB
309 #define ELF_DATA ELFDATA2LSB
311 #define ELF_ARCH EM_PPC
314 * We need to put in some extra aux table entries to tell glibc what
315 * the cache block size is, so it can use the dcbz instruction safely.
317 #define AT_DCACHEBSIZE 19
318 #define AT_ICACHEBSIZE 20
319 #define AT_UCACHEBSIZE 21
320 /* A special ignored type value for PPC, for glibc compatibility. */
321 #define AT_IGNOREPPC 22
323 * The requirements here are:
324 * - keep the final alignment of sp (sp & 0xf)
325 * - make sure the 32-bit value at the first 16 byte aligned position of
326 * AUXV is greater than 16 for glibc compatibility.
327 * AT_IGNOREPPC is used for that.
328 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
329 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
331 #define DLINFO_ARCH_ITEMS 5
332 #define ARCH_DLINFO \
334 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
335 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
336 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
338 * Now handle glibc compatibility. \
340 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
341 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
344 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
346 abi_ulong pos
= infop
->start_stack
;
348 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
349 abi_ulong entry
, toc
;
352 _regs
->gpr
[1] = infop
->start_stack
;
353 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
354 get_user_u64(entry
, infop
->entry
);
355 entry
+= infop
->load_addr
;
356 get_user_u64(toc
, infop
->entry
+ 8);
357 toc
+= infop
->load_addr
;
359 infop
->entry
= entry
;
361 _regs
->nip
= infop
->entry
;
362 /* Note that isn't exactly what regular kernel does
363 * but this is what the ABI wants and is needed to allow
364 * execution of PPC BSD programs.
366 /* FIXME - what to for failure of get_user()? */
367 get_user_ual(_regs
->gpr
[3], pos
);
368 pos
+= sizeof(abi_ulong
);
370 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(abi_ulong
)) {
371 get_user_ual(tmp
, pos
);
376 #define USE_ELF_CORE_DUMP
377 #define ELF_EXEC_PAGESIZE 4096
383 #define ELF_START_MMAP 0x80000000
385 #define elf_check_arch(x) ( (x) == EM_MIPS )
388 #define ELF_CLASS ELFCLASS64
390 #define ELF_CLASS ELFCLASS32
392 #ifdef TARGET_WORDS_BIGENDIAN
393 #define ELF_DATA ELFDATA2MSB
395 #define ELF_DATA ELFDATA2LSB
397 #define ELF_ARCH EM_MIPS
399 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
401 regs
->cp0_status
= 2 << CP0St_KSU
;
402 regs
->cp0_epc
= infop
->entry
;
403 regs
->regs
[29] = infop
->start_stack
;
406 #define USE_ELF_CORE_DUMP
407 #define ELF_EXEC_PAGESIZE 4096
409 #endif /* TARGET_MIPS */
413 #define ELF_START_MMAP 0x80000000
415 #define elf_check_arch(x) ( (x) == EM_SH )
417 #define ELF_CLASS ELFCLASS32
418 #define ELF_DATA ELFDATA2LSB
419 #define ELF_ARCH EM_SH
421 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
423 /* Check other registers XXXXX */
424 regs
->pc
= infop
->entry
;
425 regs
->regs
[15] = infop
->start_stack
;
428 #define USE_ELF_CORE_DUMP
429 #define ELF_EXEC_PAGESIZE 4096
435 #define ELF_START_MMAP 0x80000000
437 #define elf_check_arch(x) ( (x) == EM_CRIS )
439 #define ELF_CLASS ELFCLASS32
440 #define ELF_DATA ELFDATA2LSB
441 #define ELF_ARCH EM_CRIS
443 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
445 regs
->erp
= infop
->entry
;
448 #define USE_ELF_CORE_DUMP
449 #define ELF_EXEC_PAGESIZE 8192
455 #define ELF_START_MMAP 0x80000000
457 #define elf_check_arch(x) ( (x) == EM_68K )
459 #define ELF_CLASS ELFCLASS32
460 #define ELF_DATA ELFDATA2MSB
461 #define ELF_ARCH EM_68K
463 /* ??? Does this need to do anything?
464 #define ELF_PLAT_INIT(_r) */
466 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
468 regs
->usp
= infop
->start_stack
;
470 regs
->pc
= infop
->entry
;
473 #define USE_ELF_CORE_DUMP
474 #define ELF_EXEC_PAGESIZE 8192
480 #define ELF_START_MMAP (0x30000000000ULL)
482 #define elf_check_arch(x) ( (x) == ELF_ARCH )
484 #define ELF_CLASS ELFCLASS64
485 #define ELF_DATA ELFDATA2MSB
486 #define ELF_ARCH EM_ALPHA
488 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
490 regs
->pc
= infop
->entry
;
492 regs
->usp
= infop
->start_stack
;
493 regs
->unique
= infop
->start_data
; /* ? */
494 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
495 regs
->unique
, infop
->start_data
);
498 #define USE_ELF_CORE_DUMP
499 #define ELF_EXEC_PAGESIZE 8192
501 #endif /* TARGET_ALPHA */
504 #define ELF_PLATFORM (NULL)
513 #define ELF_CLASS ELFCLASS32
515 #define bswaptls(ptr) bswap32s(ptr)
522 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
523 unsigned int a_text
; /* length of text, in bytes */
524 unsigned int a_data
; /* length of data, in bytes */
525 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
526 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
527 unsigned int a_entry
; /* start address */
528 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
529 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
533 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
539 /* max code+data+bss space allocated to elf interpreter */
540 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
542 /* max code+data+bss+brk space allocated to ET_DYN executables */
543 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
545 /* Necessary parameters */
546 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
547 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
548 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
550 #define INTERPRETER_NONE 0
551 #define INTERPRETER_AOUT 1
552 #define INTERPRETER_ELF 2
554 #define DLINFO_ITEMS 12
556 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
561 static int load_aout_interp(void * exptr
, int interp_fd
);
564 static void bswap_ehdr(struct elfhdr
*ehdr
)
566 bswap16s(&ehdr
->e_type
); /* Object file type */
567 bswap16s(&ehdr
->e_machine
); /* Architecture */
568 bswap32s(&ehdr
->e_version
); /* Object file version */
569 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
570 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
571 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
572 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
573 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
574 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
575 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
576 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
577 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
578 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
581 static void bswap_phdr(struct elf_phdr
*phdr
)
583 bswap32s(&phdr
->p_type
); /* Segment type */
584 bswaptls(&phdr
->p_offset
); /* Segment file offset */
585 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
586 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
587 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
588 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
589 bswap32s(&phdr
->p_flags
); /* Segment flags */
590 bswaptls(&phdr
->p_align
); /* Segment alignment */
593 static void bswap_shdr(struct elf_shdr
*shdr
)
595 bswap32s(&shdr
->sh_name
);
596 bswap32s(&shdr
->sh_type
);
597 bswaptls(&shdr
->sh_flags
);
598 bswaptls(&shdr
->sh_addr
);
599 bswaptls(&shdr
->sh_offset
);
600 bswaptls(&shdr
->sh_size
);
601 bswap32s(&shdr
->sh_link
);
602 bswap32s(&shdr
->sh_info
);
603 bswaptls(&shdr
->sh_addralign
);
604 bswaptls(&shdr
->sh_entsize
);
607 static void bswap_sym(struct elf_sym
*sym
)
609 bswap32s(&sym
->st_name
);
610 bswaptls(&sym
->st_value
);
611 bswaptls(&sym
->st_size
);
612 bswap16s(&sym
->st_shndx
);
617 * 'copy_elf_strings()' copies argument/envelope strings from user
618 * memory to free pages in kernel mem. These are in a format ready
619 * to be put directly into the top of new user memory.
622 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
625 char *tmp
, *tmp1
, *pag
= NULL
;
629 return 0; /* bullet-proofing */
634 fprintf(stderr
, "VFS: argc is wrong");
640 if (p
< len
) { /* this shouldn't happen - 128kB */
646 offset
= p
% TARGET_PAGE_SIZE
;
647 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
649 pag
= g_try_malloc0(TARGET_PAGE_SIZE
);
650 page
[p
/TARGET_PAGE_SIZE
] = pag
;
655 if (len
== 0 || offset
== 0) {
656 *(pag
+ offset
) = *tmp
;
659 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
660 tmp
-= bytes_to_copy
;
662 offset
-= bytes_to_copy
;
663 len
-= bytes_to_copy
;
664 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
671 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
672 struct image_info
*info
)
674 abi_ulong stack_base
, size
, error
;
677 /* Create enough stack to hold everything. If we don't use
678 * it for args, we'll use it for something else...
680 size
= x86_stack_size
;
681 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
682 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
683 error
= target_mmap(0,
684 size
+ qemu_host_page_size
,
685 PROT_READ
| PROT_WRITE
,
686 MAP_PRIVATE
| MAP_ANON
,
692 /* we reserve one extra page at the top of the stack as guard */
693 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
695 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
698 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
701 /* FIXME - check return value of memcpy_to_target() for failure */
702 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
703 g_free(bprm
->page
[i
]);
705 stack_base
+= TARGET_PAGE_SIZE
;
710 static void set_brk(abi_ulong start
, abi_ulong end
)
712 /* page-align the start and end addresses... */
713 start
= HOST_PAGE_ALIGN(start
);
714 end
= HOST_PAGE_ALIGN(end
);
717 if(target_mmap(start
, end
- start
,
718 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
719 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
720 perror("cannot mmap brk");
726 /* We need to explicitly zero any fractional pages after the data
727 section (i.e. bss). This would contain the junk from the file that
728 should not be in memory. */
729 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
733 if (elf_bss
>= last_bss
)
736 /* XXX: this is really a hack : if the real host page size is
737 smaller than the target page size, some pages after the end
738 of the file may not be mapped. A better fix would be to
739 patch target_mmap(), but it is more complicated as the file
740 size must be known */
741 if (qemu_real_host_page_size
< qemu_host_page_size
) {
742 abi_ulong end_addr
, end_addr1
;
743 end_addr1
= REAL_HOST_PAGE_ALIGN(elf_bss
);
744 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
745 if (end_addr1
< end_addr
) {
746 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
747 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
748 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
752 nbyte
= elf_bss
& (qemu_host_page_size
-1);
754 nbyte
= qemu_host_page_size
- nbyte
;
756 /* FIXME - what to do if put_user() fails? */
757 put_user_u8(0, elf_bss
);
764 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
765 struct elfhdr
* exec
,
768 abi_ulong interp_load_addr
, int ibcs
,
769 struct image_info
*info
)
773 abi_ulong u_platform
;
774 const char *k_platform
;
775 const int n
= sizeof(elf_addr_t
);
779 k_platform
= ELF_PLATFORM
;
781 size_t len
= strlen(k_platform
) + 1;
782 sp
-= (len
+ n
- 1) & ~(n
- 1);
784 /* FIXME - check return value of memcpy_to_target() for failure */
785 memcpy_to_target(sp
, k_platform
, len
);
788 * Force 16 byte _final_ alignment here for generality.
790 sp
= sp
&~ (abi_ulong
)15;
791 size
= (DLINFO_ITEMS
+ 1) * 2;
794 #ifdef DLINFO_ARCH_ITEMS
795 size
+= DLINFO_ARCH_ITEMS
* 2;
797 size
+= envc
+ argc
+ 2;
798 size
+= (!ibcs
? 3 : 1); /* argc itself */
801 sp
-= 16 - (size
& 15);
803 /* This is correct because Linux defines
804 * elf_addr_t as Elf32_Off / Elf64_Off
806 #define NEW_AUX_ENT(id, val) do { \
807 sp -= n; put_user_ual(val, sp); \
808 sp -= n; put_user_ual(id, sp); \
811 NEW_AUX_ENT (AT_NULL
, 0);
813 /* There must be exactly DLINFO_ITEMS entries here. */
814 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
815 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
816 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
817 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
818 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
819 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
820 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
821 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
822 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
823 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
824 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
825 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
826 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
828 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
831 * ARCH_DLINFO must come last so platform specific code can enforce
832 * special alignment requirements on the AUXV if necessary (eg. PPC).
838 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
843 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
845 abi_ulong
*interp_load_addr
)
847 struct elf_phdr
*elf_phdata
= NULL
;
848 struct elf_phdr
*eppnt
;
849 abi_ulong load_addr
= 0;
850 int load_addr_set
= 0;
852 abi_ulong last_bss
, elf_bss
;
861 bswap_ehdr(interp_elf_ex
);
863 /* First of all, some simple consistency checks */
864 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
865 interp_elf_ex
->e_type
!= ET_DYN
) ||
866 !elf_check_arch(interp_elf_ex
->e_machine
)) {
867 return ~((abi_ulong
)0UL);
871 /* Now read in all of the header information */
873 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
874 return ~(abi_ulong
)0UL;
876 elf_phdata
= (struct elf_phdr
*)
877 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
880 return ~((abi_ulong
)0UL);
883 * If the size of this structure has changed, then punt, since
884 * we will be doing the wrong thing.
886 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
888 return ~((abi_ulong
)0UL);
891 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
893 retval
= read(interpreter_fd
,
895 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
898 perror("load_elf_interp");
905 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
910 if (interp_elf_ex
->e_type
== ET_DYN
) {
911 /* in order to avoid hardcoding the interpreter load
912 address in qemu, we allocate a big enough memory zone */
913 error
= target_mmap(0, INTERP_MAP_SIZE
,
914 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
925 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
926 if (eppnt
->p_type
== PT_LOAD
) {
927 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
932 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
933 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
934 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
935 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
936 elf_type
|= MAP_FIXED
;
937 vaddr
= eppnt
->p_vaddr
;
939 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
940 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
944 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
948 close(interpreter_fd
);
950 return ~((abi_ulong
)0UL);
953 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
959 * Find the end of the file mapping for this phdr, and keep
960 * track of the largest address we see for this.
962 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
963 if (k
> elf_bss
) elf_bss
= k
;
966 * Do the same thing for the memory mapping - between
967 * elf_bss and last_bss is the bss section.
969 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
970 if (k
> last_bss
) last_bss
= k
;
973 /* Now use mmap to map the library into memory. */
975 close(interpreter_fd
);
978 * Now fill out the bss section. First pad the last page up
979 * to the page boundary, and then perform a mmap to make sure
980 * that there are zeromapped pages up to and including the last
983 padzero(elf_bss
, last_bss
);
984 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
986 /* Map the last of the bss segment */
987 if (last_bss
> elf_bss
) {
988 target_mmap(elf_bss
, last_bss
-elf_bss
,
989 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
990 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
994 *interp_load_addr
= load_addr
;
995 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
998 static int symfind(const void *s0
, const void *s1
)
1000 target_ulong addr
= *(target_ulong
*)s0
;
1001 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
1003 if (addr
< sym
->st_value
) {
1005 } else if (addr
>= sym
->st_value
+ sym
->st_size
) {
1011 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
1013 #if ELF_CLASS == ELFCLASS32
1014 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
1016 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
1020 struct elf_sym
*sym
;
1022 sym
= bsearch(&orig_addr
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
1024 return s
->disas_strtab
+ sym
->st_name
;
1030 /* FIXME: This should use elf_ops.h */
1031 static int symcmp(const void *s0
, const void *s1
)
1033 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
1034 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
1035 return (sym0
->st_value
< sym1
->st_value
)
1037 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
1040 /* Best attempt to load symbols from this ELF object. */
1041 static void load_symbols(struct elfhdr
*hdr
, int fd
)
1043 unsigned int i
, nsyms
;
1044 struct elf_shdr sechdr
, symtab
, strtab
;
1047 struct elf_sym
*syms
, *new_syms
;
1049 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
1050 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1051 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1054 bswap_shdr(&sechdr
);
1056 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1058 lseek(fd
, hdr
->e_shoff
1059 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1060 if (read(fd
, &strtab
, sizeof(strtab
))
1064 bswap_shdr(&strtab
);
1069 return; /* Shouldn't happen... */
1072 /* Now know where the strtab and symtab are. Snarf them. */
1073 s
= malloc(sizeof(*s
));
1074 syms
= malloc(symtab
.sh_size
);
1079 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1080 if (!s
->disas_strtab
) {
1086 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1087 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
) {
1094 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1099 bswap_sym(syms
+ i
);
1101 // Throw away entries which we do not need.
1102 if (syms
[i
].st_shndx
== SHN_UNDEF
||
1103 syms
[i
].st_shndx
>= SHN_LORESERVE
||
1104 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
1107 syms
[i
] = syms
[nsyms
];
1111 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1112 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1113 syms
[i
].st_value
&= ~(target_ulong
)1;
1118 /* Attempt to free the storage associated with the local symbols
1119 that we threw away. Whether or not this has any effect on the
1120 memory allocation depends on the malloc implementation and how
1121 many symbols we managed to discard. */
1122 new_syms
= realloc(syms
, nsyms
* sizeof(*syms
));
1123 if (new_syms
== NULL
) {
1131 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
1133 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1134 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
) {
1140 s
->disas_num_syms
= nsyms
;
1141 #if ELF_CLASS == ELFCLASS32
1142 s
->disas_symtab
.elf32
= syms
;
1143 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1145 s
->disas_symtab
.elf64
= syms
;
1146 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1152 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1153 struct image_info
* info
)
1155 struct elfhdr elf_ex
;
1156 struct elfhdr interp_elf_ex
;
1157 struct exec interp_ex
;
1158 int interpreter_fd
= -1; /* avoid warning */
1159 abi_ulong load_addr
, load_bias
;
1160 int load_addr_set
= 0;
1161 unsigned int interpreter_type
= INTERPRETER_NONE
;
1162 unsigned char ibcs2_interpreter
;
1164 abi_ulong mapped_addr
;
1165 struct elf_phdr
* elf_ppnt
;
1166 struct elf_phdr
*elf_phdata
;
1167 abi_ulong elf_bss
, k
, elf_brk
;
1169 char * elf_interpreter
;
1170 abi_ulong elf_entry
, interp_load_addr
= 0;
1172 abi_ulong start_code
, end_code
, start_data
, end_data
;
1173 abi_ulong reloc_func_desc
= 0;
1174 abi_ulong elf_stack
;
1175 char passed_fileno
[6];
1177 ibcs2_interpreter
= 0;
1181 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1183 bswap_ehdr(&elf_ex
);
1186 /* First of all, some simple consistency checks */
1187 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1188 (! elf_check_arch(elf_ex
.e_machine
))) {
1192 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1193 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1194 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1199 /* Now read in all of the header information */
1200 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1201 if (elf_phdata
== NULL
) {
1205 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1207 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1208 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1212 perror("load_elf_binary");
1219 elf_ppnt
= elf_phdata
;
1220 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1221 bswap_phdr(elf_ppnt
);
1224 elf_ppnt
= elf_phdata
;
1230 elf_stack
= ~((abi_ulong
)0UL);
1231 elf_interpreter
= NULL
;
1232 start_code
= ~((abi_ulong
)0UL);
1236 interp_ex
.a_info
= 0;
1238 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1239 if (elf_ppnt
->p_type
== PT_INTERP
) {
1240 if ( elf_interpreter
!= NULL
)
1243 free(elf_interpreter
);
1248 /* This is the program interpreter used for
1249 * shared libraries - for now assume that this
1250 * is an a.out format binary
1253 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1255 if (elf_interpreter
== NULL
) {
1261 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1263 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1266 perror("load_elf_binary2");
1270 /* If the program interpreter is one of these two,
1271 then assume an iBCS2 image. Otherwise assume
1272 a native linux image. */
1274 /* JRP - Need to add X86 lib dir stuff here... */
1276 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1277 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1278 ibcs2_interpreter
= 1;
1282 printf("Using ELF interpreter %s\n", path(elf_interpreter
));
1285 retval
= open(path(elf_interpreter
), O_RDONLY
);
1287 interpreter_fd
= retval
;
1290 perror(elf_interpreter
);
1292 /* retval = -errno; */
1297 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1299 retval
= read(interpreter_fd
,bprm
->buf
,128);
1303 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1304 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1307 perror("load_elf_binary3");
1310 free(elf_interpreter
);
1318 /* Some simple consistency checks for the interpreter */
1319 if (elf_interpreter
){
1320 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1322 /* Now figure out which format our binary is */
1323 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1324 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1325 interpreter_type
= INTERPRETER_ELF
;
1328 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1329 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1330 interpreter_type
&= ~INTERPRETER_ELF
;
1333 if (!interpreter_type
) {
1334 free(elf_interpreter
);
1341 /* OK, we are done with that, now set up the arg stuff,
1342 and then start this sucker up */
1347 if (interpreter_type
== INTERPRETER_AOUT
) {
1348 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1349 passed_p
= passed_fileno
;
1351 if (elf_interpreter
) {
1352 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1357 free(elf_interpreter
);
1364 /* OK, This is the point of no return */
1367 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1369 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1372 * In case where user has not explicitly set the guest_base, we
1373 * probe here that should we set it automatically.
1375 if (!have_guest_base
) {
1377 * Go through ELF program header table and find out whether
1378 * any of the segments drop below our current mmap_min_addr and
1379 * in that case set guest_base to corresponding address.
1381 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
;
1383 if (elf_ppnt
->p_type
!= PT_LOAD
)
1385 if (HOST_PAGE_ALIGN(elf_ppnt
->p_vaddr
) < mmap_min_addr
) {
1386 guest_base
= HOST_PAGE_ALIGN(mmap_min_addr
);
1392 /* Do this so that we can load the interpreter, if need be. We will
1393 change some of these later */
1395 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1396 info
->start_stack
= bprm
->p
;
1398 /* Now we do a little grungy work by mmaping the ELF image into
1399 * the correct location in memory. At this point, we assume that
1400 * the image should be loaded at fixed address, not at a variable
1404 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1409 if (elf_ppnt
->p_type
!= PT_LOAD
)
1412 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1413 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1414 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1415 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1416 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1417 elf_flags
|= MAP_FIXED
;
1418 } else if (elf_ex
.e_type
== ET_DYN
) {
1419 /* Try and get dynamic programs out of the way of the default mmap
1420 base, as well as whatever program they might try to exec. This
1421 is because the brk will follow the loader, and is not movable. */
1422 /* NOTE: for qemu, we do a big mmap to get enough space
1423 without hardcoding any address */
1424 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1425 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1431 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1434 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1435 (elf_ppnt
->p_filesz
+
1436 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1438 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1440 (elf_ppnt
->p_offset
-
1441 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1447 #ifdef LOW_ELF_STACK
1448 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1449 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1452 if (!load_addr_set
) {
1454 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1455 if (elf_ex
.e_type
== ET_DYN
) {
1456 load_bias
+= error
-
1457 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1458 load_addr
+= load_bias
;
1459 reloc_func_desc
= load_bias
;
1462 k
= elf_ppnt
->p_vaddr
;
1467 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1470 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1474 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1475 if (k
> elf_brk
) elf_brk
= k
;
1478 elf_entry
+= load_bias
;
1479 elf_bss
+= load_bias
;
1480 elf_brk
+= load_bias
;
1481 start_code
+= load_bias
;
1482 end_code
+= load_bias
;
1483 start_data
+= load_bias
;
1484 end_data
+= load_bias
;
1486 if (elf_interpreter
) {
1487 if (interpreter_type
& 1) {
1488 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1490 else if (interpreter_type
& 2) {
1491 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1494 reloc_func_desc
= interp_load_addr
;
1496 close(interpreter_fd
);
1497 free(elf_interpreter
);
1499 if (elf_entry
== ~((abi_ulong
)0UL)) {
1500 printf("Unable to load interpreter\n");
1509 if (qemu_log_enabled())
1510 load_symbols(&elf_ex
, bprm
->fd
);
1512 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1513 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1515 #ifdef LOW_ELF_STACK
1516 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1518 bprm
->p
= create_elf_tables(bprm
->p
,
1522 load_addr
, load_bias
,
1524 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1526 info
->load_addr
= reloc_func_desc
;
1527 info
->start_brk
= info
->brk
= elf_brk
;
1528 info
->end_code
= end_code
;
1529 info
->start_code
= start_code
;
1530 info
->start_data
= start_data
;
1531 info
->end_data
= end_data
;
1532 info
->start_stack
= bprm
->p
;
1534 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1536 set_brk(elf_bss
, elf_brk
);
1538 padzero(elf_bss
, elf_brk
);
1541 printf("(start_brk) %x\n" , info
->start_brk
);
1542 printf("(end_code) %x\n" , info
->end_code
);
1543 printf("(start_code) %x\n" , info
->start_code
);
1544 printf("(end_data) %x\n" , info
->end_data
);
1545 printf("(start_stack) %x\n" , info
->start_stack
);
1546 printf("(brk) %x\n" , info
->brk
);
1549 if ( info
->personality
== PER_SVR4
)
1551 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1552 and some applications "depend" upon this behavior.
1553 Since we do not have the power to recompile these, we
1554 emulate the SVr4 behavior. Sigh. */
1555 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1556 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1559 info
->entry
= elf_entry
;
1564 static int load_aout_interp(void * exptr
, int interp_fd
)
1566 printf("a.out interpreter not yet supported\n");
1570 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1572 init_thread(regs
, infop
);