1 /* This is the Linux kernel elf-loading code, ported into user space */
24 /* from personality.h */
27 * Flags for bug emulation.
29 * These occupy the top three bytes.
32 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
33 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
36 MMAP_PAGE_ZERO
= 0x0100000,
37 ADDR_COMPAT_LAYOUT
= 0x0200000,
38 READ_IMPLIES_EXEC
= 0x0400000,
39 ADDR_LIMIT_32BIT
= 0x0800000,
40 SHORT_INODE
= 0x1000000,
41 WHOLE_SECONDS
= 0x2000000,
42 STICKY_TIMEOUTS
= 0x4000000,
43 ADDR_LIMIT_3GB
= 0x8000000,
49 * These go in the low byte. Avoid using the top bit, it will
50 * conflict with error returns.
54 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
55 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
56 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
57 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
58 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
59 WHOLE_SECONDS
| SHORT_INODE
,
60 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
61 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
62 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
64 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
65 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
67 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
68 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
69 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
70 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
72 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
73 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
74 PER_OSF4
= 0x000f, /* OSF/1 v4 */
80 * Return the base personality without flags.
82 #define personality(pers) (pers & PER_MASK)
84 /* this flag is uneffective under linux too, should be deleted */
86 #define MAP_DENYWRITE 0
89 /* should probably go in elf.h */
96 #define ELF_PLATFORM get_elf_platform()
98 static const char *get_elf_platform(void)
100 static char elf_platform
[] = "i386";
101 int family
= (thread_env
->cpuid_version
>> 8) & 0xff;
105 elf_platform
[1] = '0' + family
;
109 #define ELF_HWCAP get_elf_hwcap()
111 static uint32_t get_elf_hwcap(void)
113 return thread_env
->cpuid_features
;
117 #define ELF_START_MMAP 0x2aaaaab000ULL
118 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
120 #define ELF_CLASS ELFCLASS64
121 #define ELF_DATA ELFDATA2LSB
122 #define ELF_ARCH EM_X86_64
124 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
127 regs
->rsp
= infop
->start_stack
;
128 regs
->rip
= infop
->entry
;
129 if (bsd_type
== target_freebsd
) {
130 regs
->rdi
= infop
->start_stack
;
136 #define ELF_START_MMAP 0x80000000
139 * This is used to ensure we don't load something for the wrong architecture.
141 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
144 * These are used to set parameters in the core dumps.
146 #define ELF_CLASS ELFCLASS32
147 #define ELF_DATA ELFDATA2LSB
148 #define ELF_ARCH EM_386
150 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
152 regs
->esp
= infop
->start_stack
;
153 regs
->eip
= infop
->entry
;
155 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
156 starts %edx contains a pointer to a function which might be
157 registered using `atexit'. This provides a mean for the
158 dynamic linker to call DT_FINI functions for shared libraries
159 that have been loaded before the code runs.
161 A value of 0 tells we have no such handler. */
166 #define USE_ELF_CORE_DUMP
167 #define ELF_EXEC_PAGESIZE 4096
173 #define ELF_START_MMAP 0x80000000
175 #define elf_check_arch(x) ( (x) == EM_ARM )
177 #define ELF_CLASS ELFCLASS32
178 #ifdef TARGET_WORDS_BIGENDIAN
179 #define ELF_DATA ELFDATA2MSB
181 #define ELF_DATA ELFDATA2LSB
183 #define ELF_ARCH EM_ARM
185 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
187 abi_long stack
= infop
->start_stack
;
188 memset(regs
, 0, sizeof(*regs
));
189 regs
->ARM_cpsr
= 0x10;
190 if (infop
->entry
& 1)
191 regs
->ARM_cpsr
|= CPSR_T
;
192 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
193 regs
->ARM_sp
= infop
->start_stack
;
194 /* FIXME - what to for failure of get_user()? */
195 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
196 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
197 /* XXX: it seems that r0 is zeroed after ! */
199 /* For uClinux PIC binaries. */
200 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
201 regs
->ARM_r10
= infop
->start_data
;
204 #define USE_ELF_CORE_DUMP
205 #define ELF_EXEC_PAGESIZE 4096
209 ARM_HWCAP_ARM_SWP
= 1 << 0,
210 ARM_HWCAP_ARM_HALF
= 1 << 1,
211 ARM_HWCAP_ARM_THUMB
= 1 << 2,
212 ARM_HWCAP_ARM_26BIT
= 1 << 3,
213 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
214 ARM_HWCAP_ARM_FPA
= 1 << 5,
215 ARM_HWCAP_ARM_VFP
= 1 << 6,
216 ARM_HWCAP_ARM_EDSP
= 1 << 7,
219 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
220 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
221 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
226 #ifdef TARGET_SPARC64
228 #define ELF_START_MMAP 0x80000000
231 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
233 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
236 #define ELF_CLASS ELFCLASS64
237 #define ELF_DATA ELFDATA2MSB
238 #define ELF_ARCH EM_SPARCV9
240 #define STACK_BIAS 2047
242 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
247 regs
->pc
= infop
->entry
;
248 regs
->npc
= regs
->pc
+ 4;
251 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
253 if (personality(infop
->personality
) == PER_LINUX32
)
254 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
256 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
257 if (bsd_type
== target_freebsd
) {
258 regs
->u_regs
[8] = infop
->start_stack
;
259 regs
->u_regs
[11] = infop
->start_stack
;
266 #define ELF_START_MMAP 0x80000000
268 #define elf_check_arch(x) ( (x) == EM_SPARC )
270 #define ELF_CLASS ELFCLASS32
271 #define ELF_DATA ELFDATA2MSB
272 #define ELF_ARCH EM_SPARC
274 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
277 regs
->pc
= infop
->entry
;
278 regs
->npc
= regs
->pc
+ 4;
280 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
288 #define ELF_START_MMAP 0x80000000
290 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
292 #define elf_check_arch(x) ( (x) == EM_PPC64 )
294 #define ELF_CLASS ELFCLASS64
298 #define elf_check_arch(x) ( (x) == EM_PPC )
300 #define ELF_CLASS ELFCLASS32
304 #ifdef TARGET_WORDS_BIGENDIAN
305 #define ELF_DATA ELFDATA2MSB
307 #define ELF_DATA ELFDATA2LSB
309 #define ELF_ARCH EM_PPC
312 * We need to put in some extra aux table entries to tell glibc what
313 * the cache block size is, so it can use the dcbz instruction safely.
315 #define AT_DCACHEBSIZE 19
316 #define AT_ICACHEBSIZE 20
317 #define AT_UCACHEBSIZE 21
318 /* A special ignored type value for PPC, for glibc compatibility. */
319 #define AT_IGNOREPPC 22
321 * The requirements here are:
322 * - keep the final alignment of sp (sp & 0xf)
323 * - make sure the 32-bit value at the first 16 byte aligned position of
324 * AUXV is greater than 16 for glibc compatibility.
325 * AT_IGNOREPPC is used for that.
326 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
327 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
329 #define DLINFO_ARCH_ITEMS 5
330 #define ARCH_DLINFO \
332 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
333 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
334 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
336 * Now handle glibc compatibility. \
338 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
339 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
342 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
344 abi_ulong pos
= infop
->start_stack
;
346 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
347 abi_ulong entry
, toc
;
350 _regs
->gpr
[1] = infop
->start_stack
;
351 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
352 entry
= ldq_raw(infop
->entry
) + infop
->load_addr
;
353 toc
= ldq_raw(infop
->entry
+ 8) + infop
->load_addr
;
355 infop
->entry
= entry
;
357 _regs
->nip
= infop
->entry
;
358 /* Note that isn't exactly what regular kernel does
359 * but this is what the ABI wants and is needed to allow
360 * execution of PPC BSD programs.
362 /* FIXME - what to for failure of get_user()? */
363 get_user_ual(_regs
->gpr
[3], pos
);
364 pos
+= sizeof(abi_ulong
);
366 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(abi_ulong
))
371 #define USE_ELF_CORE_DUMP
372 #define ELF_EXEC_PAGESIZE 4096
378 #define ELF_START_MMAP 0x80000000
380 #define elf_check_arch(x) ( (x) == EM_MIPS )
383 #define ELF_CLASS ELFCLASS64
385 #define ELF_CLASS ELFCLASS32
387 #ifdef TARGET_WORDS_BIGENDIAN
388 #define ELF_DATA ELFDATA2MSB
390 #define ELF_DATA ELFDATA2LSB
392 #define ELF_ARCH EM_MIPS
394 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
396 regs
->cp0_status
= 2 << CP0St_KSU
;
397 regs
->cp0_epc
= infop
->entry
;
398 regs
->regs
[29] = infop
->start_stack
;
401 #define USE_ELF_CORE_DUMP
402 #define ELF_EXEC_PAGESIZE 4096
404 #endif /* TARGET_MIPS */
408 #define ELF_START_MMAP 0x80000000
410 #define elf_check_arch(x) ( (x) == EM_SH )
412 #define ELF_CLASS ELFCLASS32
413 #define ELF_DATA ELFDATA2LSB
414 #define ELF_ARCH EM_SH
416 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
418 /* Check other registers XXXXX */
419 regs
->pc
= infop
->entry
;
420 regs
->regs
[15] = infop
->start_stack
;
423 #define USE_ELF_CORE_DUMP
424 #define ELF_EXEC_PAGESIZE 4096
430 #define ELF_START_MMAP 0x80000000
432 #define elf_check_arch(x) ( (x) == EM_CRIS )
434 #define ELF_CLASS ELFCLASS32
435 #define ELF_DATA ELFDATA2LSB
436 #define ELF_ARCH EM_CRIS
438 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
440 regs
->erp
= infop
->entry
;
443 #define USE_ELF_CORE_DUMP
444 #define ELF_EXEC_PAGESIZE 8192
450 #define ELF_START_MMAP 0x80000000
452 #define elf_check_arch(x) ( (x) == EM_68K )
454 #define ELF_CLASS ELFCLASS32
455 #define ELF_DATA ELFDATA2MSB
456 #define ELF_ARCH EM_68K
458 /* ??? Does this need to do anything?
459 #define ELF_PLAT_INIT(_r) */
461 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
463 regs
->usp
= infop
->start_stack
;
465 regs
->pc
= infop
->entry
;
468 #define USE_ELF_CORE_DUMP
469 #define ELF_EXEC_PAGESIZE 8192
475 #define ELF_START_MMAP (0x30000000000ULL)
477 #define elf_check_arch(x) ( (x) == ELF_ARCH )
479 #define ELF_CLASS ELFCLASS64
480 #define ELF_DATA ELFDATA2MSB
481 #define ELF_ARCH EM_ALPHA
483 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
485 regs
->pc
= infop
->entry
;
487 regs
->usp
= infop
->start_stack
;
488 regs
->unique
= infop
->start_data
; /* ? */
489 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
490 regs
->unique
, infop
->start_data
);
493 #define USE_ELF_CORE_DUMP
494 #define ELF_EXEC_PAGESIZE 8192
496 #endif /* TARGET_ALPHA */
499 #define ELF_PLATFORM (NULL)
508 #define ELF_CLASS ELFCLASS32
510 #define bswaptls(ptr) bswap32s(ptr)
517 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
518 unsigned int a_text
; /* length of text, in bytes */
519 unsigned int a_data
; /* length of data, in bytes */
520 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
521 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
522 unsigned int a_entry
; /* start address */
523 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
524 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
528 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
534 /* max code+data+bss space allocated to elf interpreter */
535 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
537 /* max code+data+bss+brk space allocated to ET_DYN executables */
538 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
540 /* Necessary parameters */
541 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
542 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
543 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
545 #define INTERPRETER_NONE 0
546 #define INTERPRETER_AOUT 1
547 #define INTERPRETER_ELF 2
549 #define DLINFO_ITEMS 12
551 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
556 static int load_aout_interp(void * exptr
, int interp_fd
);
559 static void bswap_ehdr(struct elfhdr
*ehdr
)
561 bswap16s(&ehdr
->e_type
); /* Object file type */
562 bswap16s(&ehdr
->e_machine
); /* Architecture */
563 bswap32s(&ehdr
->e_version
); /* Object file version */
564 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
565 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
566 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
567 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
568 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
569 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
570 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
571 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
572 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
573 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
576 static void bswap_phdr(struct elf_phdr
*phdr
)
578 bswap32s(&phdr
->p_type
); /* Segment type */
579 bswaptls(&phdr
->p_offset
); /* Segment file offset */
580 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
581 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
582 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
583 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
584 bswap32s(&phdr
->p_flags
); /* Segment flags */
585 bswaptls(&phdr
->p_align
); /* Segment alignment */
588 static void bswap_shdr(struct elf_shdr
*shdr
)
590 bswap32s(&shdr
->sh_name
);
591 bswap32s(&shdr
->sh_type
);
592 bswaptls(&shdr
->sh_flags
);
593 bswaptls(&shdr
->sh_addr
);
594 bswaptls(&shdr
->sh_offset
);
595 bswaptls(&shdr
->sh_size
);
596 bswap32s(&shdr
->sh_link
);
597 bswap32s(&shdr
->sh_info
);
598 bswaptls(&shdr
->sh_addralign
);
599 bswaptls(&shdr
->sh_entsize
);
602 static void bswap_sym(struct elf_sym
*sym
)
604 bswap32s(&sym
->st_name
);
605 bswaptls(&sym
->st_value
);
606 bswaptls(&sym
->st_size
);
607 bswap16s(&sym
->st_shndx
);
612 * 'copy_elf_strings()' copies argument/envelope strings from user
613 * memory to free pages in kernel mem. These are in a format ready
614 * to be put directly into the top of new user memory.
617 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
620 char *tmp
, *tmp1
, *pag
= NULL
;
624 return 0; /* bullet-proofing */
629 fprintf(stderr
, "VFS: argc is wrong");
635 if (p
< len
) { /* this shouldn't happen - 128kB */
641 offset
= p
% TARGET_PAGE_SIZE
;
642 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
644 pag
= (char *)malloc(TARGET_PAGE_SIZE
);
645 memset(pag
, 0, TARGET_PAGE_SIZE
);
646 page
[p
/TARGET_PAGE_SIZE
] = pag
;
651 if (len
== 0 || offset
== 0) {
652 *(pag
+ offset
) = *tmp
;
655 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
656 tmp
-= bytes_to_copy
;
658 offset
-= bytes_to_copy
;
659 len
-= bytes_to_copy
;
660 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
667 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
668 struct image_info
*info
)
670 abi_ulong stack_base
, size
, error
;
673 /* Create enough stack to hold everything. If we don't use
674 * it for args, we'll use it for something else...
676 size
= x86_stack_size
;
677 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
678 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
679 error
= target_mmap(0,
680 size
+ qemu_host_page_size
,
681 PROT_READ
| PROT_WRITE
,
682 MAP_PRIVATE
| MAP_ANON
,
688 /* we reserve one extra page at the top of the stack as guard */
689 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
691 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
694 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
697 /* FIXME - check return value of memcpy_to_target() for failure */
698 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
701 stack_base
+= TARGET_PAGE_SIZE
;
706 static void set_brk(abi_ulong start
, abi_ulong end
)
708 /* page-align the start and end addresses... */
709 start
= HOST_PAGE_ALIGN(start
);
710 end
= HOST_PAGE_ALIGN(end
);
713 if(target_mmap(start
, end
- start
,
714 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
715 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
716 perror("cannot mmap brk");
722 /* We need to explicitly zero any fractional pages after the data
723 section (i.e. bss). This would contain the junk from the file that
724 should not be in memory. */
725 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
729 if (elf_bss
>= last_bss
)
732 /* XXX: this is really a hack : if the real host page size is
733 smaller than the target page size, some pages after the end
734 of the file may not be mapped. A better fix would be to
735 patch target_mmap(), but it is more complicated as the file
736 size must be known */
737 if (qemu_real_host_page_size
< qemu_host_page_size
) {
738 abi_ulong end_addr
, end_addr1
;
739 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
740 ~(qemu_real_host_page_size
- 1);
741 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
742 if (end_addr1
< end_addr
) {
743 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
744 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
745 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
749 nbyte
= elf_bss
& (qemu_host_page_size
-1);
751 nbyte
= qemu_host_page_size
- nbyte
;
753 /* FIXME - what to do if put_user() fails? */
754 put_user_u8(0, elf_bss
);
761 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
762 struct elfhdr
* exec
,
765 abi_ulong interp_load_addr
, int ibcs
,
766 struct image_info
*info
)
770 abi_ulong u_platform
;
771 const char *k_platform
;
772 const int n
= sizeof(elf_addr_t
);
776 k_platform
= ELF_PLATFORM
;
778 size_t len
= strlen(k_platform
) + 1;
779 sp
-= (len
+ n
- 1) & ~(n
- 1);
781 /* FIXME - check return value of memcpy_to_target() for failure */
782 memcpy_to_target(sp
, k_platform
, len
);
785 * Force 16 byte _final_ alignment here for generality.
787 sp
= sp
&~ (abi_ulong
)15;
788 size
= (DLINFO_ITEMS
+ 1) * 2;
791 #ifdef DLINFO_ARCH_ITEMS
792 size
+= DLINFO_ARCH_ITEMS
* 2;
794 size
+= envc
+ argc
+ 2;
795 size
+= (!ibcs
? 3 : 1); /* argc itself */
798 sp
-= 16 - (size
& 15);
800 /* This is correct because Linux defines
801 * elf_addr_t as Elf32_Off / Elf64_Off
803 #define NEW_AUX_ENT(id, val) do { \
804 sp -= n; put_user_ual(val, sp); \
805 sp -= n; put_user_ual(id, sp); \
808 NEW_AUX_ENT (AT_NULL
, 0);
810 /* There must be exactly DLINFO_ITEMS entries here. */
811 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
812 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
813 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
814 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
815 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
816 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
817 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
818 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
819 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
820 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
821 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
822 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
823 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
825 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
828 * ARCH_DLINFO must come last so platform specific code can enforce
829 * special alignment requirements on the AUXV if necessary (eg. PPC).
835 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
840 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
842 abi_ulong
*interp_load_addr
)
844 struct elf_phdr
*elf_phdata
= NULL
;
845 struct elf_phdr
*eppnt
;
846 abi_ulong load_addr
= 0;
847 int load_addr_set
= 0;
849 abi_ulong last_bss
, elf_bss
;
858 bswap_ehdr(interp_elf_ex
);
860 /* First of all, some simple consistency checks */
861 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
862 interp_elf_ex
->e_type
!= ET_DYN
) ||
863 !elf_check_arch(interp_elf_ex
->e_machine
)) {
864 return ~((abi_ulong
)0UL);
868 /* Now read in all of the header information */
870 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
871 return ~(abi_ulong
)0UL;
873 elf_phdata
= (struct elf_phdr
*)
874 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
877 return ~((abi_ulong
)0UL);
880 * If the size of this structure has changed, then punt, since
881 * we will be doing the wrong thing.
883 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
885 return ~((abi_ulong
)0UL);
888 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
890 retval
= read(interpreter_fd
,
892 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
895 perror("load_elf_interp");
902 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
907 if (interp_elf_ex
->e_type
== ET_DYN
) {
908 /* in order to avoid hardcoding the interpreter load
909 address in qemu, we allocate a big enough memory zone */
910 error
= target_mmap(0, INTERP_MAP_SIZE
,
911 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
922 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
923 if (eppnt
->p_type
== PT_LOAD
) {
924 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
929 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
930 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
931 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
932 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
933 elf_type
|= MAP_FIXED
;
934 vaddr
= eppnt
->p_vaddr
;
936 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
937 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
941 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
945 close(interpreter_fd
);
947 return ~((abi_ulong
)0UL);
950 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
956 * Find the end of the file mapping for this phdr, and keep
957 * track of the largest address we see for this.
959 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
960 if (k
> elf_bss
) elf_bss
= k
;
963 * Do the same thing for the memory mapping - between
964 * elf_bss and last_bss is the bss section.
966 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
967 if (k
> last_bss
) last_bss
= k
;
970 /* Now use mmap to map the library into memory. */
972 close(interpreter_fd
);
975 * Now fill out the bss section. First pad the last page up
976 * to the page boundary, and then perform a mmap to make sure
977 * that there are zeromapped pages up to and including the last
980 padzero(elf_bss
, last_bss
);
981 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
983 /* Map the last of the bss segment */
984 if (last_bss
> elf_bss
) {
985 target_mmap(elf_bss
, last_bss
-elf_bss
,
986 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
987 MAP_FIXED
|MAP_PRIVATE
|MAP_ANON
, -1, 0);
991 *interp_load_addr
= load_addr
;
992 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
995 static int symfind(const void *s0
, const void *s1
)
997 struct elf_sym
*key
= (struct elf_sym
*)s0
;
998 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
1000 if (key
->st_value
< sym
->st_value
) {
1002 } else if (key
->st_value
> sym
->st_value
+ sym
->st_size
) {
1008 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
1010 #if ELF_CLASS == ELFCLASS32
1011 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
1013 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
1018 struct elf_sym
*sym
;
1020 key
.st_value
= orig_addr
;
1022 sym
= bsearch(&key
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
1024 return s
->disas_strtab
+ sym
->st_name
;
1030 /* FIXME: This should use elf_ops.h */
1031 static int symcmp(const void *s0
, const void *s1
)
1033 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
1034 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
1035 return (sym0
->st_value
< sym1
->st_value
)
1037 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
1040 /* Best attempt to load symbols from this ELF object. */
1041 static void load_symbols(struct elfhdr
*hdr
, int fd
)
1043 unsigned int i
, nsyms
;
1044 struct elf_shdr sechdr
, symtab
, strtab
;
1047 struct elf_sym
*syms
;
1049 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
1050 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1051 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1054 bswap_shdr(&sechdr
);
1056 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1058 lseek(fd
, hdr
->e_shoff
1059 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1060 if (read(fd
, &strtab
, sizeof(strtab
))
1064 bswap_shdr(&strtab
);
1069 return; /* Shouldn't happen... */
1072 /* Now know where the strtab and symtab are. Snarf them. */
1073 s
= malloc(sizeof(*s
));
1074 syms
= malloc(symtab
.sh_size
);
1077 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1078 if (!s
->disas_strtab
)
1081 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1082 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
)
1085 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1090 bswap_sym(syms
+ i
);
1092 // Throw away entries which we do not need.
1093 if (syms
[i
].st_shndx
== SHN_UNDEF
||
1094 syms
[i
].st_shndx
>= SHN_LORESERVE
||
1095 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
1098 syms
[i
] = syms
[nsyms
];
1102 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1103 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1104 syms
[i
].st_value
&= ~(target_ulong
)1;
1108 syms
= realloc(syms
, nsyms
* sizeof(*syms
));
1110 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
1112 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1113 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
)
1115 s
->disas_num_syms
= nsyms
;
1116 #if ELF_CLASS == ELFCLASS32
1117 s
->disas_symtab
.elf32
= syms
;
1118 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1120 s
->disas_symtab
.elf64
= syms
;
1121 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
1127 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1128 struct image_info
* info
)
1130 struct elfhdr elf_ex
;
1131 struct elfhdr interp_elf_ex
;
1132 struct exec interp_ex
;
1133 int interpreter_fd
= -1; /* avoid warning */
1134 abi_ulong load_addr
, load_bias
;
1135 int load_addr_set
= 0;
1136 unsigned int interpreter_type
= INTERPRETER_NONE
;
1137 unsigned char ibcs2_interpreter
;
1139 abi_ulong mapped_addr
;
1140 struct elf_phdr
* elf_ppnt
;
1141 struct elf_phdr
*elf_phdata
;
1142 abi_ulong elf_bss
, k
, elf_brk
;
1144 char * elf_interpreter
;
1145 abi_ulong elf_entry
, interp_load_addr
= 0;
1147 abi_ulong start_code
, end_code
, start_data
, end_data
;
1148 abi_ulong reloc_func_desc
= 0;
1149 abi_ulong elf_stack
;
1150 char passed_fileno
[6];
1152 ibcs2_interpreter
= 0;
1156 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1158 bswap_ehdr(&elf_ex
);
1161 /* First of all, some simple consistency checks */
1162 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1163 (! elf_check_arch(elf_ex
.e_machine
))) {
1167 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1168 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1169 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1174 /* Now read in all of the header information */
1175 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1176 if (elf_phdata
== NULL
) {
1180 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1182 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1183 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1187 perror("load_elf_binary");
1194 elf_ppnt
= elf_phdata
;
1195 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1196 bswap_phdr(elf_ppnt
);
1199 elf_ppnt
= elf_phdata
;
1205 elf_stack
= ~((abi_ulong
)0UL);
1206 elf_interpreter
= NULL
;
1207 start_code
= ~((abi_ulong
)0UL);
1211 interp_ex
.a_info
= 0;
1213 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1214 if (elf_ppnt
->p_type
== PT_INTERP
) {
1215 if ( elf_interpreter
!= NULL
)
1218 free(elf_interpreter
);
1223 /* This is the program interpreter used for
1224 * shared libraries - for now assume that this
1225 * is an a.out format binary
1228 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1230 if (elf_interpreter
== NULL
) {
1236 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1238 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1241 perror("load_elf_binary2");
1245 /* If the program interpreter is one of these two,
1246 then assume an iBCS2 image. Otherwise assume
1247 a native linux image. */
1249 /* JRP - Need to add X86 lib dir stuff here... */
1251 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1252 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1253 ibcs2_interpreter
= 1;
1257 printf("Using ELF interpreter %s\n", path(elf_interpreter
));
1260 retval
= open(path(elf_interpreter
), O_RDONLY
);
1262 interpreter_fd
= retval
;
1265 perror(elf_interpreter
);
1267 /* retval = -errno; */
1272 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1274 retval
= read(interpreter_fd
,bprm
->buf
,128);
1278 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1279 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1282 perror("load_elf_binary3");
1285 free(elf_interpreter
);
1293 /* Some simple consistency checks for the interpreter */
1294 if (elf_interpreter
){
1295 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1297 /* Now figure out which format our binary is */
1298 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1299 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1300 interpreter_type
= INTERPRETER_ELF
;
1303 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1304 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1305 interpreter_type
&= ~INTERPRETER_ELF
;
1308 if (!interpreter_type
) {
1309 free(elf_interpreter
);
1316 /* OK, we are done with that, now set up the arg stuff,
1317 and then start this sucker up */
1322 if (interpreter_type
== INTERPRETER_AOUT
) {
1323 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1324 passed_p
= passed_fileno
;
1326 if (elf_interpreter
) {
1327 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1332 if (elf_interpreter
) {
1333 free(elf_interpreter
);
1341 /* OK, This is the point of no return */
1344 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1346 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1348 #if defined(CONFIG_USE_GUEST_BASE)
1350 * In case where user has not explicitly set the guest_base, we
1351 * probe here that should we set it automatically.
1353 if (!have_guest_base
) {
1355 * Go through ELF program header table and find out whether
1356 * any of the segments drop below our current mmap_min_addr and
1357 * in that case set guest_base to corresponding address.
1359 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
;
1361 if (elf_ppnt
->p_type
!= PT_LOAD
)
1363 if (HOST_PAGE_ALIGN(elf_ppnt
->p_vaddr
) < mmap_min_addr
) {
1364 guest_base
= HOST_PAGE_ALIGN(mmap_min_addr
);
1369 #endif /* CONFIG_USE_GUEST_BASE */
1371 /* Do this so that we can load the interpreter, if need be. We will
1372 change some of these later */
1374 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1375 info
->start_stack
= bprm
->p
;
1377 /* Now we do a little grungy work by mmaping the ELF image into
1378 * the correct location in memory. At this point, we assume that
1379 * the image should be loaded at fixed address, not at a variable
1383 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1388 if (elf_ppnt
->p_type
!= PT_LOAD
)
1391 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1392 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1393 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1394 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1395 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1396 elf_flags
|= MAP_FIXED
;
1397 } else if (elf_ex
.e_type
== ET_DYN
) {
1398 /* Try and get dynamic programs out of the way of the default mmap
1399 base, as well as whatever program they might try to exec. This
1400 is because the brk will follow the loader, and is not movable. */
1401 /* NOTE: for qemu, we do a big mmap to get enough space
1402 without hardcoding any address */
1403 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1404 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1410 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1413 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1414 (elf_ppnt
->p_filesz
+
1415 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1417 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1419 (elf_ppnt
->p_offset
-
1420 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1426 #ifdef LOW_ELF_STACK
1427 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1428 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1431 if (!load_addr_set
) {
1433 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1434 if (elf_ex
.e_type
== ET_DYN
) {
1435 load_bias
+= error
-
1436 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1437 load_addr
+= load_bias
;
1438 reloc_func_desc
= load_bias
;
1441 k
= elf_ppnt
->p_vaddr
;
1446 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1449 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1453 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1454 if (k
> elf_brk
) elf_brk
= k
;
1457 elf_entry
+= load_bias
;
1458 elf_bss
+= load_bias
;
1459 elf_brk
+= load_bias
;
1460 start_code
+= load_bias
;
1461 end_code
+= load_bias
;
1462 start_data
+= load_bias
;
1463 end_data
+= load_bias
;
1465 if (elf_interpreter
) {
1466 if (interpreter_type
& 1) {
1467 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1469 else if (interpreter_type
& 2) {
1470 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1473 reloc_func_desc
= interp_load_addr
;
1475 close(interpreter_fd
);
1476 free(elf_interpreter
);
1478 if (elf_entry
== ~((abi_ulong
)0UL)) {
1479 printf("Unable to load interpreter\n");
1488 if (qemu_log_enabled())
1489 load_symbols(&elf_ex
, bprm
->fd
);
1491 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1492 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1494 #ifdef LOW_ELF_STACK
1495 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1497 bprm
->p
= create_elf_tables(bprm
->p
,
1501 load_addr
, load_bias
,
1503 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1505 info
->load_addr
= reloc_func_desc
;
1506 info
->start_brk
= info
->brk
= elf_brk
;
1507 info
->end_code
= end_code
;
1508 info
->start_code
= start_code
;
1509 info
->start_data
= start_data
;
1510 info
->end_data
= end_data
;
1511 info
->start_stack
= bprm
->p
;
1513 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1515 set_brk(elf_bss
, elf_brk
);
1517 padzero(elf_bss
, elf_brk
);
1520 printf("(start_brk) %x\n" , info
->start_brk
);
1521 printf("(end_code) %x\n" , info
->end_code
);
1522 printf("(start_code) %x\n" , info
->start_code
);
1523 printf("(end_data) %x\n" , info
->end_data
);
1524 printf("(start_stack) %x\n" , info
->start_stack
);
1525 printf("(brk) %x\n" , info
->brk
);
1528 if ( info
->personality
== PER_SVR4
)
1530 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1531 and some applications "depend" upon this behavior.
1532 Since we do not have the power to recompile these, we
1533 emulate the SVr4 behavior. Sigh. */
1534 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1535 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1538 info
->entry
= elf_entry
;
1543 static int load_aout_interp(void * exptr
, int interp_fd
)
1545 printf("a.out interpreter not yet supported\n");
1549 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1551 init_thread(regs
, infop
);