1 /* This is the Linux kernel elf-loading code, ported into user space */
24 /* from personality.h */
27 * Flags for bug emulation.
29 * These occupy the top three bytes.
32 ADDR_NO_RANDOMIZE
= 0x0040000, /* disable randomization of VA space */
33 FDPIC_FUNCPTRS
= 0x0080000, /* userspace function ptrs point to descriptors
36 MMAP_PAGE_ZERO
= 0x0100000,
37 ADDR_COMPAT_LAYOUT
= 0x0200000,
38 READ_IMPLIES_EXEC
= 0x0400000,
39 ADDR_LIMIT_32BIT
= 0x0800000,
40 SHORT_INODE
= 0x1000000,
41 WHOLE_SECONDS
= 0x2000000,
42 STICKY_TIMEOUTS
= 0x4000000,
43 ADDR_LIMIT_3GB
= 0x8000000,
49 * These go in the low byte. Avoid using the top bit, it will
50 * conflict with error returns.
54 PER_LINUX_32BIT
= 0x0000 | ADDR_LIMIT_32BIT
,
55 PER_LINUX_FDPIC
= 0x0000 | FDPIC_FUNCPTRS
,
56 PER_SVR4
= 0x0001 | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
57 PER_SVR3
= 0x0002 | STICKY_TIMEOUTS
| SHORT_INODE
,
58 PER_SCOSVR3
= 0x0003 | STICKY_TIMEOUTS
|
59 WHOLE_SECONDS
| SHORT_INODE
,
60 PER_OSR5
= 0x0003 | STICKY_TIMEOUTS
| WHOLE_SECONDS
,
61 PER_WYSEV386
= 0x0004 | STICKY_TIMEOUTS
| SHORT_INODE
,
62 PER_ISCR4
= 0x0005 | STICKY_TIMEOUTS
,
64 PER_SUNOS
= 0x0006 | STICKY_TIMEOUTS
,
65 PER_XENIX
= 0x0007 | STICKY_TIMEOUTS
| SHORT_INODE
,
67 PER_LINUX32_3GB
= 0x0008 | ADDR_LIMIT_3GB
,
68 PER_IRIX32
= 0x0009 | STICKY_TIMEOUTS
,/* IRIX5 32-bit */
69 PER_IRIXN32
= 0x000a | STICKY_TIMEOUTS
,/* IRIX6 new 32-bit */
70 PER_IRIX64
= 0x000b | STICKY_TIMEOUTS
,/* IRIX6 64-bit */
72 PER_SOLARIS
= 0x000d | STICKY_TIMEOUTS
,
73 PER_UW7
= 0x000e | STICKY_TIMEOUTS
| MMAP_PAGE_ZERO
,
74 PER_OSF4
= 0x000f, /* OSF/1 v4 */
80 * Return the base personality without flags.
82 #define personality(pers) (pers & PER_MASK)
84 /* this flag is uneffective under linux too, should be deleted */
86 #define MAP_DENYWRITE 0
89 /* should probably go in elf.h */
96 #define ELF_PLATFORM get_elf_platform()
98 static const char *get_elf_platform(void)
100 static char elf_platform
[] = "i386";
101 int family
= (thread_env
->cpuid_version
>> 8) & 0xff;
105 elf_platform
[1] = '0' + family
;
109 #define ELF_HWCAP get_elf_hwcap()
111 static uint32_t get_elf_hwcap(void)
113 return thread_env
->cpuid_features
;
117 #define ELF_START_MMAP 0x2aaaaab000ULL
118 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
120 #define ELF_CLASS ELFCLASS64
121 #define ELF_DATA ELFDATA2LSB
122 #define ELF_ARCH EM_X86_64
124 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
127 regs
->rsp
= infop
->start_stack
;
128 regs
->rip
= infop
->entry
;
133 #define ELF_START_MMAP 0x80000000
136 * This is used to ensure we don't load something for the wrong architecture.
138 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
141 * These are used to set parameters in the core dumps.
143 #define ELF_CLASS ELFCLASS32
144 #define ELF_DATA ELFDATA2LSB
145 #define ELF_ARCH EM_386
147 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
149 regs
->esp
= infop
->start_stack
;
150 regs
->eip
= infop
->entry
;
152 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
153 starts %edx contains a pointer to a function which might be
154 registered using `atexit'. This provides a mean for the
155 dynamic linker to call DT_FINI functions for shared libraries
156 that have been loaded before the code runs.
158 A value of 0 tells we have no such handler. */
163 #define USE_ELF_CORE_DUMP
164 #define ELF_EXEC_PAGESIZE 4096
170 #define ELF_START_MMAP 0x80000000
172 #define elf_check_arch(x) ( (x) == EM_ARM )
174 #define ELF_CLASS ELFCLASS32
175 #ifdef TARGET_WORDS_BIGENDIAN
176 #define ELF_DATA ELFDATA2MSB
178 #define ELF_DATA ELFDATA2LSB
180 #define ELF_ARCH EM_ARM
182 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
184 abi_long stack
= infop
->start_stack
;
185 memset(regs
, 0, sizeof(*regs
));
186 regs
->ARM_cpsr
= 0x10;
187 if (infop
->entry
& 1)
188 regs
->ARM_cpsr
|= CPSR_T
;
189 regs
->ARM_pc
= infop
->entry
& 0xfffffffe;
190 regs
->ARM_sp
= infop
->start_stack
;
191 /* FIXME - what to for failure of get_user()? */
192 get_user_ual(regs
->ARM_r2
, stack
+ 8); /* envp */
193 get_user_ual(regs
->ARM_r1
, stack
+ 4); /* envp */
194 /* XXX: it seems that r0 is zeroed after ! */
196 /* For uClinux PIC binaries. */
197 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
198 regs
->ARM_r10
= infop
->start_data
;
201 #define USE_ELF_CORE_DUMP
202 #define ELF_EXEC_PAGESIZE 4096
206 ARM_HWCAP_ARM_SWP
= 1 << 0,
207 ARM_HWCAP_ARM_HALF
= 1 << 1,
208 ARM_HWCAP_ARM_THUMB
= 1 << 2,
209 ARM_HWCAP_ARM_26BIT
= 1 << 3,
210 ARM_HWCAP_ARM_FAST_MULT
= 1 << 4,
211 ARM_HWCAP_ARM_FPA
= 1 << 5,
212 ARM_HWCAP_ARM_VFP
= 1 << 6,
213 ARM_HWCAP_ARM_EDSP
= 1 << 7,
216 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
217 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
218 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
223 #ifdef TARGET_SPARC64
225 #define ELF_START_MMAP 0x80000000
228 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
230 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
233 #define ELF_CLASS ELFCLASS64
234 #define ELF_DATA ELFDATA2MSB
235 #define ELF_ARCH EM_SPARCV9
237 #define STACK_BIAS 2047
239 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
244 regs
->pc
= infop
->entry
;
245 regs
->npc
= regs
->pc
+ 4;
248 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
250 if (personality(infop
->personality
) == PER_LINUX32
)
251 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
253 regs
->u_regs
[14] = infop
->start_stack
- 16 * 8 - STACK_BIAS
;
258 #define ELF_START_MMAP 0x80000000
260 #define elf_check_arch(x) ( (x) == EM_SPARC )
262 #define ELF_CLASS ELFCLASS32
263 #define ELF_DATA ELFDATA2MSB
264 #define ELF_ARCH EM_SPARC
266 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
269 regs
->pc
= infop
->entry
;
270 regs
->npc
= regs
->pc
+ 4;
272 regs
->u_regs
[14] = infop
->start_stack
- 16 * 4;
280 #define ELF_START_MMAP 0x80000000
282 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
284 #define elf_check_arch(x) ( (x) == EM_PPC64 )
286 #define ELF_CLASS ELFCLASS64
290 #define elf_check_arch(x) ( (x) == EM_PPC )
292 #define ELF_CLASS ELFCLASS32
296 #ifdef TARGET_WORDS_BIGENDIAN
297 #define ELF_DATA ELFDATA2MSB
299 #define ELF_DATA ELFDATA2LSB
301 #define ELF_ARCH EM_PPC
303 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
304 See arch/powerpc/include/asm/cputable.h. */
306 PPC_FEATURE_32
= 0x80000000,
307 PPC_FEATURE_64
= 0x40000000,
308 PPC_FEATURE_601_INSTR
= 0x20000000,
309 PPC_FEATURE_HAS_ALTIVEC
= 0x10000000,
310 PPC_FEATURE_HAS_FPU
= 0x08000000,
311 PPC_FEATURE_HAS_MMU
= 0x04000000,
312 PPC_FEATURE_HAS_4xxMAC
= 0x02000000,
313 PPC_FEATURE_UNIFIED_CACHE
= 0x01000000,
314 PPC_FEATURE_HAS_SPE
= 0x00800000,
315 PPC_FEATURE_HAS_EFP_SINGLE
= 0x00400000,
316 PPC_FEATURE_HAS_EFP_DOUBLE
= 0x00200000,
317 PPC_FEATURE_NO_TB
= 0x00100000,
318 PPC_FEATURE_POWER4
= 0x00080000,
319 PPC_FEATURE_POWER5
= 0x00040000,
320 PPC_FEATURE_POWER5_PLUS
= 0x00020000,
321 PPC_FEATURE_CELL
= 0x00010000,
322 PPC_FEATURE_BOOKE
= 0x00008000,
323 PPC_FEATURE_SMT
= 0x00004000,
324 PPC_FEATURE_ICACHE_SNOOP
= 0x00002000,
325 PPC_FEATURE_ARCH_2_05
= 0x00001000,
326 PPC_FEATURE_PA6T
= 0x00000800,
327 PPC_FEATURE_HAS_DFP
= 0x00000400,
328 PPC_FEATURE_POWER6_EXT
= 0x00000200,
329 PPC_FEATURE_ARCH_2_06
= 0x00000100,
330 PPC_FEATURE_HAS_VSX
= 0x00000080,
331 PPC_FEATURE_PSERIES_PERFMON_COMPAT
= 0x00000040,
333 PPC_FEATURE_TRUE_LE
= 0x00000002,
334 PPC_FEATURE_PPC_LE
= 0x00000001,
337 #define ELF_HWCAP get_elf_hwcap()
339 static uint32_t get_elf_hwcap(void)
341 CPUState
*e
= thread_env
;
342 uint32_t features
= 0;
344 /* We don't have to be terribly complete here; the high points are
345 Altivec/FP/SPE support. Anything else is just a bonus. */
346 #define GET_FEATURE(flag, feature) \
347 do {if (e->insns_flags & flag) features |= feature; } while(0)
348 GET_FEATURE(PPC_64B
, PPC_FEATURE_64
);
349 GET_FEATURE(PPC_FLOAT
, PPC_FEATURE_HAS_FPU
);
350 GET_FEATURE(PPC_ALTIVEC
, PPC_FEATURE_HAS_ALTIVEC
);
351 GET_FEATURE(PPC_SPE
, PPC_FEATURE_HAS_SPE
);
352 GET_FEATURE(PPC_SPE_SINGLE
, PPC_FEATURE_HAS_EFP_SINGLE
);
353 GET_FEATURE(PPC_SPE_DOUBLE
, PPC_FEATURE_HAS_EFP_DOUBLE
);
354 GET_FEATURE(PPC_BOOKE
, PPC_FEATURE_BOOKE
);
355 GET_FEATURE(PPC_405_MAC
, PPC_FEATURE_HAS_4xxMAC
);
362 * We need to put in some extra aux table entries to tell glibc what
363 * the cache block size is, so it can use the dcbz instruction safely.
365 #define AT_DCACHEBSIZE 19
366 #define AT_ICACHEBSIZE 20
367 #define AT_UCACHEBSIZE 21
368 /* A special ignored type value for PPC, for glibc compatibility. */
369 #define AT_IGNOREPPC 22
371 * The requirements here are:
372 * - keep the final alignment of sp (sp & 0xf)
373 * - make sure the 32-bit value at the first 16 byte aligned position of
374 * AUXV is greater than 16 for glibc compatibility.
375 * AT_IGNOREPPC is used for that.
376 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
377 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
379 #define DLINFO_ARCH_ITEMS 5
380 #define ARCH_DLINFO \
382 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
383 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
384 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
386 * Now handle glibc compatibility. \
388 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
389 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
392 static inline void init_thread(struct target_pt_regs
*_regs
, struct image_info
*infop
)
394 abi_ulong pos
= infop
->start_stack
;
396 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
397 abi_ulong entry
, toc
;
400 _regs
->gpr
[1] = infop
->start_stack
;
401 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
402 entry
= ldq_raw(infop
->entry
) + infop
->load_addr
;
403 toc
= ldq_raw(infop
->entry
+ 8) + infop
->load_addr
;
405 infop
->entry
= entry
;
407 _regs
->nip
= infop
->entry
;
408 /* Note that isn't exactly what regular kernel does
409 * but this is what the ABI wants and is needed to allow
410 * execution of PPC BSD programs.
412 /* FIXME - what to for failure of get_user()? */
413 get_user_ual(_regs
->gpr
[3], pos
);
414 pos
+= sizeof(abi_ulong
);
416 for (tmp
= 1; tmp
!= 0; pos
+= sizeof(abi_ulong
))
421 #define USE_ELF_CORE_DUMP
422 #define ELF_EXEC_PAGESIZE 4096
428 #define ELF_START_MMAP 0x80000000
430 #define elf_check_arch(x) ( (x) == EM_MIPS )
433 #define ELF_CLASS ELFCLASS64
435 #define ELF_CLASS ELFCLASS32
437 #ifdef TARGET_WORDS_BIGENDIAN
438 #define ELF_DATA ELFDATA2MSB
440 #define ELF_DATA ELFDATA2LSB
442 #define ELF_ARCH EM_MIPS
444 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
446 regs
->cp0_status
= 2 << CP0St_KSU
;
447 regs
->cp0_epc
= infop
->entry
;
448 regs
->regs
[29] = infop
->start_stack
;
451 #define USE_ELF_CORE_DUMP
452 #define ELF_EXEC_PAGESIZE 4096
454 #endif /* TARGET_MIPS */
458 #define ELF_START_MMAP 0x80000000
460 #define elf_check_arch(x) ( (x) == EM_SH )
462 #define ELF_CLASS ELFCLASS32
463 #define ELF_DATA ELFDATA2LSB
464 #define ELF_ARCH EM_SH
466 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
468 /* Check other registers XXXXX */
469 regs
->pc
= infop
->entry
;
470 regs
->regs
[15] = infop
->start_stack
;
473 #define USE_ELF_CORE_DUMP
474 #define ELF_EXEC_PAGESIZE 4096
480 #define ELF_START_MMAP 0x80000000
482 #define elf_check_arch(x) ( (x) == EM_CRIS )
484 #define ELF_CLASS ELFCLASS32
485 #define ELF_DATA ELFDATA2LSB
486 #define ELF_ARCH EM_CRIS
488 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
490 regs
->erp
= infop
->entry
;
493 #define USE_ELF_CORE_DUMP
494 #define ELF_EXEC_PAGESIZE 8192
500 #define ELF_START_MMAP 0x80000000
502 #define elf_check_arch(x) ( (x) == EM_68K )
504 #define ELF_CLASS ELFCLASS32
505 #define ELF_DATA ELFDATA2MSB
506 #define ELF_ARCH EM_68K
508 /* ??? Does this need to do anything?
509 #define ELF_PLAT_INIT(_r) */
511 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
513 regs
->usp
= infop
->start_stack
;
515 regs
->pc
= infop
->entry
;
518 #define USE_ELF_CORE_DUMP
519 #define ELF_EXEC_PAGESIZE 8192
525 #define ELF_START_MMAP (0x30000000000ULL)
527 #define elf_check_arch(x) ( (x) == ELF_ARCH )
529 #define ELF_CLASS ELFCLASS64
530 #define ELF_DATA ELFDATA2MSB
531 #define ELF_ARCH EM_ALPHA
533 static inline void init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
535 regs
->pc
= infop
->entry
;
537 regs
->usp
= infop
->start_stack
;
538 regs
->unique
= infop
->start_data
; /* ? */
539 printf("Set unique value to " TARGET_FMT_lx
" (" TARGET_FMT_lx
")\n",
540 regs
->unique
, infop
->start_data
);
543 #define USE_ELF_CORE_DUMP
544 #define ELF_EXEC_PAGESIZE 8192
546 #endif /* TARGET_ALPHA */
549 #define ELF_PLATFORM (NULL)
558 #define ELF_CLASS ELFCLASS32
560 #define bswaptls(ptr) bswap32s(ptr)
567 unsigned int a_info
; /* Use macros N_MAGIC, etc for access */
568 unsigned int a_text
; /* length of text, in bytes */
569 unsigned int a_data
; /* length of data, in bytes */
570 unsigned int a_bss
; /* length of uninitialized data area, in bytes */
571 unsigned int a_syms
; /* length of symbol table data in file, in bytes */
572 unsigned int a_entry
; /* start address */
573 unsigned int a_trsize
; /* length of relocation info for text, in bytes */
574 unsigned int a_drsize
; /* length of relocation info for data, in bytes */
578 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
584 /* max code+data+bss space allocated to elf interpreter */
585 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
587 /* max code+data+bss+brk space allocated to ET_DYN executables */
588 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
590 /* Necessary parameters */
591 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
592 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
593 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
595 #define INTERPRETER_NONE 0
596 #define INTERPRETER_AOUT 1
597 #define INTERPRETER_ELF 2
599 #define DLINFO_ITEMS 12
601 static inline void memcpy_fromfs(void * to
, const void * from
, unsigned long n
)
606 static int load_aout_interp(void * exptr
, int interp_fd
);
609 static void bswap_ehdr(struct elfhdr
*ehdr
)
611 bswap16s(&ehdr
->e_type
); /* Object file type */
612 bswap16s(&ehdr
->e_machine
); /* Architecture */
613 bswap32s(&ehdr
->e_version
); /* Object file version */
614 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
615 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
616 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
617 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
618 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
619 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
620 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
621 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
622 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
623 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
626 static void bswap_phdr(struct elf_phdr
*phdr
)
628 bswap32s(&phdr
->p_type
); /* Segment type */
629 bswaptls(&phdr
->p_offset
); /* Segment file offset */
630 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
631 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
632 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
633 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
634 bswap32s(&phdr
->p_flags
); /* Segment flags */
635 bswaptls(&phdr
->p_align
); /* Segment alignment */
638 static void bswap_shdr(struct elf_shdr
*shdr
)
640 bswap32s(&shdr
->sh_name
);
641 bswap32s(&shdr
->sh_type
);
642 bswaptls(&shdr
->sh_flags
);
643 bswaptls(&shdr
->sh_addr
);
644 bswaptls(&shdr
->sh_offset
);
645 bswaptls(&shdr
->sh_size
);
646 bswap32s(&shdr
->sh_link
);
647 bswap32s(&shdr
->sh_info
);
648 bswaptls(&shdr
->sh_addralign
);
649 bswaptls(&shdr
->sh_entsize
);
652 static void bswap_sym(struct elf_sym
*sym
)
654 bswap32s(&sym
->st_name
);
655 bswaptls(&sym
->st_value
);
656 bswaptls(&sym
->st_size
);
657 bswap16s(&sym
->st_shndx
);
662 * 'copy_elf_strings()' copies argument/envelope strings from user
663 * memory to free pages in kernel mem. These are in a format ready
664 * to be put directly into the top of new user memory.
667 static abi_ulong
copy_elf_strings(int argc
,char ** argv
, void **page
,
670 char *tmp
, *tmp1
, *pag
= NULL
;
674 return 0; /* bullet-proofing */
679 fprintf(stderr
, "VFS: argc is wrong");
685 if (p
< len
) { /* this shouldn't happen - 128kB */
691 offset
= p
% TARGET_PAGE_SIZE
;
692 pag
= (char *)page
[p
/TARGET_PAGE_SIZE
];
694 pag
= (char *)malloc(TARGET_PAGE_SIZE
);
695 memset(pag
, 0, TARGET_PAGE_SIZE
);
696 page
[p
/TARGET_PAGE_SIZE
] = pag
;
701 if (len
== 0 || offset
== 0) {
702 *(pag
+ offset
) = *tmp
;
705 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
706 tmp
-= bytes_to_copy
;
708 offset
-= bytes_to_copy
;
709 len
-= bytes_to_copy
;
710 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
717 static abi_ulong
setup_arg_pages(abi_ulong p
, struct linux_binprm
*bprm
,
718 struct image_info
*info
)
720 abi_ulong stack_base
, size
, error
;
723 /* Create enough stack to hold everything. If we don't use
724 * it for args, we'll use it for something else...
726 size
= x86_stack_size
;
727 if (size
< MAX_ARG_PAGES
*TARGET_PAGE_SIZE
)
728 size
= MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
729 error
= target_mmap(0,
730 size
+ qemu_host_page_size
,
731 PROT_READ
| PROT_WRITE
,
732 MAP_PRIVATE
| MAP_ANONYMOUS
,
738 /* we reserve one extra page at the top of the stack as guard */
739 target_mprotect(error
+ size
, qemu_host_page_size
, PROT_NONE
);
741 stack_base
= error
+ size
- MAX_ARG_PAGES
*TARGET_PAGE_SIZE
;
744 for (i
= 0 ; i
< MAX_ARG_PAGES
; i
++) {
747 /* FIXME - check return value of memcpy_to_target() for failure */
748 memcpy_to_target(stack_base
, bprm
->page
[i
], TARGET_PAGE_SIZE
);
751 stack_base
+= TARGET_PAGE_SIZE
;
756 static void set_brk(abi_ulong start
, abi_ulong end
)
758 /* page-align the start and end addresses... */
759 start
= HOST_PAGE_ALIGN(start
);
760 end
= HOST_PAGE_ALIGN(end
);
763 if(target_mmap(start
, end
- start
,
764 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
765 MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0) == -1) {
766 perror("cannot mmap brk");
772 /* We need to explicitly zero any fractional pages after the data
773 section (i.e. bss). This would contain the junk from the file that
774 should not be in memory. */
775 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
779 if (elf_bss
>= last_bss
)
782 /* XXX: this is really a hack : if the real host page size is
783 smaller than the target page size, some pages after the end
784 of the file may not be mapped. A better fix would be to
785 patch target_mmap(), but it is more complicated as the file
786 size must be known */
787 if (qemu_real_host_page_size
< qemu_host_page_size
) {
788 abi_ulong end_addr
, end_addr1
;
789 end_addr1
= (elf_bss
+ qemu_real_host_page_size
- 1) &
790 ~(qemu_real_host_page_size
- 1);
791 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
792 if (end_addr1
< end_addr
) {
793 mmap((void *)g2h(end_addr1
), end_addr
- end_addr1
,
794 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
795 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
799 nbyte
= elf_bss
& (qemu_host_page_size
-1);
801 nbyte
= qemu_host_page_size
- nbyte
;
803 /* FIXME - what to do if put_user() fails? */
804 put_user_u8(0, elf_bss
);
811 static abi_ulong
create_elf_tables(abi_ulong p
, int argc
, int envc
,
812 struct elfhdr
* exec
,
815 abi_ulong interp_load_addr
, int ibcs
,
816 struct image_info
*info
)
820 abi_ulong u_platform
;
821 const char *k_platform
;
822 const int n
= sizeof(elf_addr_t
);
826 k_platform
= ELF_PLATFORM
;
828 size_t len
= strlen(k_platform
) + 1;
829 sp
-= (len
+ n
- 1) & ~(n
- 1);
831 /* FIXME - check return value of memcpy_to_target() for failure */
832 memcpy_to_target(sp
, k_platform
, len
);
835 * Force 16 byte _final_ alignment here for generality.
837 sp
= sp
&~ (abi_ulong
)15;
838 size
= (DLINFO_ITEMS
+ 1) * 2;
841 #ifdef DLINFO_ARCH_ITEMS
842 size
+= DLINFO_ARCH_ITEMS
* 2;
844 size
+= envc
+ argc
+ 2;
845 size
+= (!ibcs
? 3 : 1); /* argc itself */
848 sp
-= 16 - (size
& 15);
850 /* This is correct because Linux defines
851 * elf_addr_t as Elf32_Off / Elf64_Off
853 #define NEW_AUX_ENT(id, val) do { \
854 sp -= n; put_user_ual(val, sp); \
855 sp -= n; put_user_ual(id, sp); \
858 NEW_AUX_ENT (AT_NULL
, 0);
860 /* There must be exactly DLINFO_ITEMS entries here. */
861 NEW_AUX_ENT(AT_PHDR
, (abi_ulong
)(load_addr
+ exec
->e_phoff
));
862 NEW_AUX_ENT(AT_PHENT
, (abi_ulong
)(sizeof (struct elf_phdr
)));
863 NEW_AUX_ENT(AT_PHNUM
, (abi_ulong
)(exec
->e_phnum
));
864 NEW_AUX_ENT(AT_PAGESZ
, (abi_ulong
)(TARGET_PAGE_SIZE
));
865 NEW_AUX_ENT(AT_BASE
, (abi_ulong
)(interp_load_addr
));
866 NEW_AUX_ENT(AT_FLAGS
, (abi_ulong
)0);
867 NEW_AUX_ENT(AT_ENTRY
, load_bias
+ exec
->e_entry
);
868 NEW_AUX_ENT(AT_UID
, (abi_ulong
) getuid());
869 NEW_AUX_ENT(AT_EUID
, (abi_ulong
) geteuid());
870 NEW_AUX_ENT(AT_GID
, (abi_ulong
) getgid());
871 NEW_AUX_ENT(AT_EGID
, (abi_ulong
) getegid());
872 NEW_AUX_ENT(AT_HWCAP
, (abi_ulong
) ELF_HWCAP
);
873 NEW_AUX_ENT(AT_CLKTCK
, (abi_ulong
) sysconf(_SC_CLK_TCK
));
875 NEW_AUX_ENT(AT_PLATFORM
, u_platform
);
878 * ARCH_DLINFO must come last so platform specific code can enforce
879 * special alignment requirements on the AUXV if necessary (eg. PPC).
885 sp
= loader_build_argptr(envc
, argc
, sp
, p
, !ibcs
);
890 static abi_ulong
load_elf_interp(struct elfhdr
* interp_elf_ex
,
892 abi_ulong
*interp_load_addr
)
894 struct elf_phdr
*elf_phdata
= NULL
;
895 struct elf_phdr
*eppnt
;
896 abi_ulong load_addr
= 0;
897 int load_addr_set
= 0;
899 abi_ulong last_bss
, elf_bss
;
908 bswap_ehdr(interp_elf_ex
);
910 /* First of all, some simple consistency checks */
911 if ((interp_elf_ex
->e_type
!= ET_EXEC
&&
912 interp_elf_ex
->e_type
!= ET_DYN
) ||
913 !elf_check_arch(interp_elf_ex
->e_machine
)) {
914 return ~((abi_ulong
)0UL);
918 /* Now read in all of the header information */
920 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
)
921 return ~(abi_ulong
)0UL;
923 elf_phdata
= (struct elf_phdr
*)
924 malloc(sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
927 return ~((abi_ulong
)0UL);
930 * If the size of this structure has changed, then punt, since
931 * we will be doing the wrong thing.
933 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
935 return ~((abi_ulong
)0UL);
938 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
940 retval
= read(interpreter_fd
,
942 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
945 perror("load_elf_interp");
952 for (i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++) {
957 if (interp_elf_ex
->e_type
== ET_DYN
) {
958 /* in order to avoid hardcoding the interpreter load
959 address in qemu, we allocate a big enough memory zone */
960 error
= target_mmap(0, INTERP_MAP_SIZE
,
961 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
972 for(i
=0; i
<interp_elf_ex
->e_phnum
; i
++, eppnt
++)
973 if (eppnt
->p_type
== PT_LOAD
) {
974 int elf_type
= MAP_PRIVATE
| MAP_DENYWRITE
;
979 if (eppnt
->p_flags
& PF_R
) elf_prot
= PROT_READ
;
980 if (eppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
981 if (eppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
982 if (interp_elf_ex
->e_type
== ET_EXEC
|| load_addr_set
) {
983 elf_type
|= MAP_FIXED
;
984 vaddr
= eppnt
->p_vaddr
;
986 error
= target_mmap(load_addr
+TARGET_ELF_PAGESTART(vaddr
),
987 eppnt
->p_filesz
+ TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
),
991 eppnt
->p_offset
- TARGET_ELF_PAGEOFFSET(eppnt
->p_vaddr
));
995 close(interpreter_fd
);
997 return ~((abi_ulong
)0UL);
1000 if (!load_addr_set
&& interp_elf_ex
->e_type
== ET_DYN
) {
1006 * Find the end of the file mapping for this phdr, and keep
1007 * track of the largest address we see for this.
1009 k
= load_addr
+ eppnt
->p_vaddr
+ eppnt
->p_filesz
;
1010 if (k
> elf_bss
) elf_bss
= k
;
1013 * Do the same thing for the memory mapping - between
1014 * elf_bss and last_bss is the bss section.
1016 k
= load_addr
+ eppnt
->p_memsz
+ eppnt
->p_vaddr
;
1017 if (k
> last_bss
) last_bss
= k
;
1020 /* Now use mmap to map the library into memory. */
1022 close(interpreter_fd
);
1025 * Now fill out the bss section. First pad the last page up
1026 * to the page boundary, and then perform a mmap to make sure
1027 * that there are zeromapped pages up to and including the last
1030 padzero(elf_bss
, last_bss
);
1031 elf_bss
= TARGET_ELF_PAGESTART(elf_bss
+ qemu_host_page_size
- 1); /* What we have mapped so far */
1033 /* Map the last of the bss segment */
1034 if (last_bss
> elf_bss
) {
1035 target_mmap(elf_bss
, last_bss
-elf_bss
,
1036 PROT_READ
|PROT_WRITE
|PROT_EXEC
,
1037 MAP_FIXED
|MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
1041 *interp_load_addr
= load_addr
;
1042 return ((abi_ulong
) interp_elf_ex
->e_entry
) + load_addr
;
1045 static int symfind(const void *s0
, const void *s1
)
1047 struct elf_sym
*key
= (struct elf_sym
*)s0
;
1048 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
1050 if (key
->st_value
< sym
->st_value
) {
1052 } else if (key
->st_value
> sym
->st_value
+ sym
->st_size
) {
1058 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
1060 #if ELF_CLASS == ELFCLASS32
1061 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
1063 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
1068 struct elf_sym
*sym
;
1070 key
.st_value
= orig_addr
;
1072 sym
= bsearch(&key
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
1074 return s
->disas_strtab
+ sym
->st_name
;
1080 /* FIXME: This should use elf_ops.h */
1081 static int symcmp(const void *s0
, const void *s1
)
1083 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
1084 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
1085 return (sym0
->st_value
< sym1
->st_value
)
1087 : ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
1090 /* Best attempt to load symbols from this ELF object. */
1091 static void load_symbols(struct elfhdr
*hdr
, int fd
)
1093 unsigned int i
, nsyms
;
1094 struct elf_shdr sechdr
, symtab
, strtab
;
1097 struct elf_sym
*syms
;
1099 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
1100 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
1101 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
))
1104 bswap_shdr(&sechdr
);
1106 if (sechdr
.sh_type
== SHT_SYMTAB
) {
1108 lseek(fd
, hdr
->e_shoff
1109 + sizeof(sechdr
) * sechdr
.sh_link
, SEEK_SET
);
1110 if (read(fd
, &strtab
, sizeof(strtab
))
1114 bswap_shdr(&strtab
);
1119 return; /* Shouldn't happen... */
1122 /* Now know where the strtab and symtab are. Snarf them. */
1123 s
= malloc(sizeof(*s
));
1124 syms
= malloc(symtab
.sh_size
);
1127 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
1128 if (!s
->disas_strtab
)
1131 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
1132 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
)
1135 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
1140 bswap_sym(syms
+ i
);
1142 // Throw away entries which we do not need.
1143 if (syms
[i
].st_shndx
== SHN_UNDEF
||
1144 syms
[i
].st_shndx
>= SHN_LORESERVE
||
1145 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
1148 syms
[i
] = syms
[nsyms
];
1152 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1153 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1154 syms
[i
].st_value
&= ~(target_ulong
)1;
1158 syms
= realloc(syms
, nsyms
* sizeof(*syms
));
1160 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
1162 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
1163 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
)
1165 s
->disas_num_syms
= nsyms
;
1166 #if ELF_CLASS == ELFCLASS32
1167 s
->disas_symtab
.elf32
= syms
;
1168 s
->lookup_symbol
= lookup_symbolxx
;
1170 s
->disas_symtab
.elf64
= syms
;
1171 s
->lookup_symbol
= lookup_symbolxx
;
1177 int load_elf_binary(struct linux_binprm
* bprm
, struct target_pt_regs
* regs
,
1178 struct image_info
* info
)
1180 struct elfhdr elf_ex
;
1181 struct elfhdr interp_elf_ex
;
1182 struct exec interp_ex
;
1183 int interpreter_fd
= -1; /* avoid warning */
1184 abi_ulong load_addr
, load_bias
;
1185 int load_addr_set
= 0;
1186 unsigned int interpreter_type
= INTERPRETER_NONE
;
1187 unsigned char ibcs2_interpreter
;
1189 abi_ulong mapped_addr
;
1190 struct elf_phdr
* elf_ppnt
;
1191 struct elf_phdr
*elf_phdata
;
1192 abi_ulong elf_bss
, k
, elf_brk
;
1194 char * elf_interpreter
;
1195 abi_ulong elf_entry
, interp_load_addr
= 0;
1197 abi_ulong start_code
, end_code
, start_data
, end_data
;
1198 abi_ulong reloc_func_desc
= 0;
1199 abi_ulong elf_stack
;
1200 char passed_fileno
[6];
1202 ibcs2_interpreter
= 0;
1206 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
1208 bswap_ehdr(&elf_ex
);
1211 /* First of all, some simple consistency checks */
1212 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
1213 (! elf_check_arch(elf_ex
.e_machine
))) {
1217 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
1218 bprm
->p
= copy_elf_strings(bprm
->envc
,bprm
->envp
,bprm
->page
,bprm
->p
);
1219 bprm
->p
= copy_elf_strings(bprm
->argc
,bprm
->argv
,bprm
->page
,bprm
->p
);
1224 /* Now read in all of the header information */
1225 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
*elf_ex
.e_phnum
);
1226 if (elf_phdata
== NULL
) {
1230 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
1232 retval
= read(bprm
->fd
, (char *) elf_phdata
,
1233 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
1237 perror("load_elf_binary");
1244 elf_ppnt
= elf_phdata
;
1245 for (i
=0; i
<elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1246 bswap_phdr(elf_ppnt
);
1249 elf_ppnt
= elf_phdata
;
1255 elf_stack
= ~((abi_ulong
)0UL);
1256 elf_interpreter
= NULL
;
1257 start_code
= ~((abi_ulong
)0UL);
1261 interp_ex
.a_info
= 0;
1263 for(i
=0;i
< elf_ex
.e_phnum
; i
++) {
1264 if (elf_ppnt
->p_type
== PT_INTERP
) {
1265 if ( elf_interpreter
!= NULL
)
1268 free(elf_interpreter
);
1273 /* This is the program interpreter used for
1274 * shared libraries - for now assume that this
1275 * is an a.out format binary
1278 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
1280 if (elf_interpreter
== NULL
) {
1286 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
1288 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
1291 perror("load_elf_binary2");
1295 /* If the program interpreter is one of these two,
1296 then assume an iBCS2 image. Otherwise assume
1297 a native linux image. */
1299 /* JRP - Need to add X86 lib dir stuff here... */
1301 if (strcmp(elf_interpreter
,"/usr/lib/libc.so.1") == 0 ||
1302 strcmp(elf_interpreter
,"/usr/lib/ld.so.1") == 0) {
1303 ibcs2_interpreter
= 1;
1307 printf("Using ELF interpreter %s\n", elf_interpreter
);
1310 retval
= open(path(elf_interpreter
), O_RDONLY
);
1312 interpreter_fd
= retval
;
1315 perror(elf_interpreter
);
1317 /* retval = -errno; */
1322 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
1324 retval
= read(interpreter_fd
,bprm
->buf
,128);
1328 interp_ex
= *((struct exec
*) bprm
->buf
); /* aout exec-header */
1329 interp_elf_ex
=*((struct elfhdr
*) bprm
->buf
); /* elf exec-header */
1332 perror("load_elf_binary3");
1335 free(elf_interpreter
);
1343 /* Some simple consistency checks for the interpreter */
1344 if (elf_interpreter
){
1345 interpreter_type
= INTERPRETER_ELF
| INTERPRETER_AOUT
;
1347 /* Now figure out which format our binary is */
1348 if ((N_MAGIC(interp_ex
) != OMAGIC
) && (N_MAGIC(interp_ex
) != ZMAGIC
) &&
1349 (N_MAGIC(interp_ex
) != QMAGIC
)) {
1350 interpreter_type
= INTERPRETER_ELF
;
1353 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
1354 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF",3) != 0) {
1355 interpreter_type
&= ~INTERPRETER_ELF
;
1358 if (!interpreter_type
) {
1359 free(elf_interpreter
);
1366 /* OK, we are done with that, now set up the arg stuff,
1367 and then start this sucker up */
1372 if (interpreter_type
== INTERPRETER_AOUT
) {
1373 snprintf(passed_fileno
, sizeof(passed_fileno
), "%d", bprm
->fd
);
1374 passed_p
= passed_fileno
;
1376 if (elf_interpreter
) {
1377 bprm
->p
= copy_elf_strings(1,&passed_p
,bprm
->page
,bprm
->p
);
1382 if (elf_interpreter
) {
1383 free(elf_interpreter
);
1391 /* OK, This is the point of no return */
1394 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
1396 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
1398 /* Do this so that we can load the interpreter, if need be. We will
1399 change some of these later */
1401 bprm
->p
= setup_arg_pages(bprm
->p
, bprm
, info
);
1402 info
->start_stack
= bprm
->p
;
1404 /* Now we do a little grungy work by mmaping the ELF image into
1405 * the correct location in memory. At this point, we assume that
1406 * the image should be loaded at fixed address, not at a variable
1410 for(i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
1415 if (elf_ppnt
->p_type
!= PT_LOAD
)
1418 if (elf_ppnt
->p_flags
& PF_R
) elf_prot
|= PROT_READ
;
1419 if (elf_ppnt
->p_flags
& PF_W
) elf_prot
|= PROT_WRITE
;
1420 if (elf_ppnt
->p_flags
& PF_X
) elf_prot
|= PROT_EXEC
;
1421 elf_flags
= MAP_PRIVATE
| MAP_DENYWRITE
;
1422 if (elf_ex
.e_type
== ET_EXEC
|| load_addr_set
) {
1423 elf_flags
|= MAP_FIXED
;
1424 } else if (elf_ex
.e_type
== ET_DYN
) {
1425 /* Try and get dynamic programs out of the way of the default mmap
1426 base, as well as whatever program they might try to exec. This
1427 is because the brk will follow the loader, and is not movable. */
1428 /* NOTE: for qemu, we do a big mmap to get enough space
1429 without hardcoding any address */
1430 error
= target_mmap(0, ET_DYN_MAP_SIZE
,
1431 PROT_NONE
, MAP_PRIVATE
| MAP_ANON
,
1437 load_bias
= TARGET_ELF_PAGESTART(error
- elf_ppnt
->p_vaddr
);
1440 error
= target_mmap(TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
),
1441 (elf_ppnt
->p_filesz
+
1442 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
1444 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
1446 (elf_ppnt
->p_offset
-
1447 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
1453 #ifdef LOW_ELF_STACK
1454 if (TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
) < elf_stack
)
1455 elf_stack
= TARGET_ELF_PAGESTART(elf_ppnt
->p_vaddr
);
1458 if (!load_addr_set
) {
1460 load_addr
= elf_ppnt
->p_vaddr
- elf_ppnt
->p_offset
;
1461 if (elf_ex
.e_type
== ET_DYN
) {
1462 load_bias
+= error
-
1463 TARGET_ELF_PAGESTART(load_bias
+ elf_ppnt
->p_vaddr
);
1464 load_addr
+= load_bias
;
1465 reloc_func_desc
= load_bias
;
1468 k
= elf_ppnt
->p_vaddr
;
1473 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
1476 if ((elf_ppnt
->p_flags
& PF_X
) && end_code
< k
)
1480 k
= elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
1481 if (k
> elf_brk
) elf_brk
= k
;
1484 elf_entry
+= load_bias
;
1485 elf_bss
+= load_bias
;
1486 elf_brk
+= load_bias
;
1487 start_code
+= load_bias
;
1488 end_code
+= load_bias
;
1489 start_data
+= load_bias
;
1490 end_data
+= load_bias
;
1492 if (elf_interpreter
) {
1493 if (interpreter_type
& 1) {
1494 elf_entry
= load_aout_interp(&interp_ex
, interpreter_fd
);
1496 else if (interpreter_type
& 2) {
1497 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
1500 reloc_func_desc
= interp_load_addr
;
1502 close(interpreter_fd
);
1503 free(elf_interpreter
);
1505 if (elf_entry
== ~((abi_ulong
)0UL)) {
1506 printf("Unable to load interpreter\n");
1515 if (qemu_log_enabled())
1516 load_symbols(&elf_ex
, bprm
->fd
);
1518 if (interpreter_type
!= INTERPRETER_AOUT
) close(bprm
->fd
);
1519 info
->personality
= (ibcs2_interpreter
? PER_SVR4
: PER_LINUX
);
1521 #ifdef LOW_ELF_STACK
1522 info
->start_stack
= bprm
->p
= elf_stack
- 4;
1524 bprm
->p
= create_elf_tables(bprm
->p
,
1528 load_addr
, load_bias
,
1530 (interpreter_type
== INTERPRETER_AOUT
? 0 : 1),
1532 info
->load_addr
= reloc_func_desc
;
1533 info
->start_brk
= info
->brk
= elf_brk
;
1534 info
->end_code
= end_code
;
1535 info
->start_code
= start_code
;
1536 info
->start_data
= start_data
;
1537 info
->end_data
= end_data
;
1538 info
->start_stack
= bprm
->p
;
1540 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1542 set_brk(elf_bss
, elf_brk
);
1544 padzero(elf_bss
, elf_brk
);
1547 printf("(start_brk) %x\n" , info
->start_brk
);
1548 printf("(end_code) %x\n" , info
->end_code
);
1549 printf("(start_code) %x\n" , info
->start_code
);
1550 printf("(end_data) %x\n" , info
->end_data
);
1551 printf("(start_stack) %x\n" , info
->start_stack
);
1552 printf("(brk) %x\n" , info
->brk
);
1555 if ( info
->personality
== PER_SVR4
)
1557 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1558 and some applications "depend" upon this behavior.
1559 Since we do not have the power to recompile these, we
1560 emulate the SVr4 behavior. Sigh. */
1561 mapped_addr
= target_mmap(0, qemu_host_page_size
, PROT_READ
| PROT_EXEC
,
1562 MAP_FIXED
| MAP_PRIVATE
, -1, 0);
1565 info
->entry
= elf_entry
;
1570 static int load_aout_interp(void * exptr
, int interp_fd
)
1572 printf("a.out interpreter not yet supported\n");
1576 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
1578 init_thread(regs
, infop
);