vga: add sr_vbe register set
[qemu/ar7.git] / bsd-user / elfload.c
blob898ee05472e218ad33a4a91acaf3d225c0000e3d
1 /* This is the Linux kernel elf-loading code, ported into user space */
3 #include "qemu/osdep.h"
4 #include <sys/mman.h>
6 #include "qemu.h"
7 #include "disas/disas.h"
8 #include "qemu/path.h"
10 #ifdef _ARCH_PPC64
11 #undef ARCH_DLINFO
12 #undef ELF_PLATFORM
13 #undef ELF_HWCAP
14 #undef ELF_CLASS
15 #undef ELF_DATA
16 #undef ELF_ARCH
17 #endif
19 /* from personality.h */
22 * Flags for bug emulation.
24 * These occupy the top three bytes.
26 enum {
27 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
28 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors
29 * (signal handling)
31 MMAP_PAGE_ZERO = 0x0100000,
32 ADDR_COMPAT_LAYOUT = 0x0200000,
33 READ_IMPLIES_EXEC = 0x0400000,
34 ADDR_LIMIT_32BIT = 0x0800000,
35 SHORT_INODE = 0x1000000,
36 WHOLE_SECONDS = 0x2000000,
37 STICKY_TIMEOUTS = 0x4000000,
38 ADDR_LIMIT_3GB = 0x8000000,
42 * Personality types.
44 * These go in the low byte. Avoid using the top bit, it will
45 * conflict with error returns.
47 enum {
48 PER_LINUX = 0x0000,
49 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
50 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
51 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
52 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
53 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
54 WHOLE_SECONDS | SHORT_INODE,
55 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
56 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
57 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
58 PER_BSD = 0x0006,
59 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
60 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
61 PER_LINUX32 = 0x0008,
62 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
63 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
64 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
65 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
66 PER_RISCOS = 0x000c,
67 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
68 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
69 PER_OSF4 = 0x000f, /* OSF/1 v4 */
70 PER_HPUX = 0x0010,
71 PER_MASK = 0x00ff,
75 * Return the base personality without flags.
77 #define personality(pers) (pers & PER_MASK)
79 /* this flag is uneffective under linux too, should be deleted */
80 #ifndef MAP_DENYWRITE
81 #define MAP_DENYWRITE 0
82 #endif
84 /* should probably go in elf.h */
85 #ifndef ELIBBAD
86 #define ELIBBAD 80
87 #endif
89 #ifdef TARGET_I386
91 #define ELF_PLATFORM get_elf_platform()
93 static const char *get_elf_platform(void)
95 static char elf_platform[] = "i386";
96 int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL);
97 if (family > 6)
98 family = 6;
99 if (family >= 3)
100 elf_platform[1] = '0' + family;
101 return elf_platform;
104 #define ELF_HWCAP get_elf_hwcap()
106 static uint32_t get_elf_hwcap(void)
108 X86CPU *cpu = X86_CPU(thread_cpu);
110 return cpu->env.features[FEAT_1_EDX];
113 #ifdef TARGET_X86_64
114 #define ELF_START_MMAP 0x2aaaaab000ULL
115 #define elf_check_arch(x) ( ((x) == ELF_ARCH) )
117 #define ELF_CLASS ELFCLASS64
118 #define ELF_DATA ELFDATA2LSB
119 #define ELF_ARCH EM_X86_64
121 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
123 regs->rax = 0;
124 regs->rsp = infop->start_stack;
125 regs->rip = infop->entry;
126 if (bsd_type == target_freebsd) {
127 regs->rdi = infop->start_stack;
131 #else
133 #define ELF_START_MMAP 0x80000000
136 * This is used to ensure we don't load something for the wrong architecture.
138 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
141 * These are used to set parameters in the core dumps.
143 #define ELF_CLASS ELFCLASS32
144 #define ELF_DATA ELFDATA2LSB
145 #define ELF_ARCH EM_386
147 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
149 regs->esp = infop->start_stack;
150 regs->eip = infop->entry;
152 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
153 starts %edx contains a pointer to a function which might be
154 registered using `atexit'. This provides a mean for the
155 dynamic linker to call DT_FINI functions for shared libraries
156 that have been loaded before the code runs.
158 A value of 0 tells we have no such handler. */
159 regs->edx = 0;
161 #endif
163 #define USE_ELF_CORE_DUMP
164 #define ELF_EXEC_PAGESIZE 4096
166 #endif
168 #ifdef TARGET_ARM
170 #define ELF_START_MMAP 0x80000000
172 #define elf_check_arch(x) ( (x) == EM_ARM )
174 #define ELF_CLASS ELFCLASS32
175 #ifdef TARGET_WORDS_BIGENDIAN
176 #define ELF_DATA ELFDATA2MSB
177 #else
178 #define ELF_DATA ELFDATA2LSB
179 #endif
180 #define ELF_ARCH EM_ARM
182 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
184 abi_long stack = infop->start_stack;
185 memset(regs, 0, sizeof(*regs));
186 regs->ARM_cpsr = 0x10;
187 if (infop->entry & 1)
188 regs->ARM_cpsr |= CPSR_T;
189 regs->ARM_pc = infop->entry & 0xfffffffe;
190 regs->ARM_sp = infop->start_stack;
191 /* FIXME - what to for failure of get_user()? */
192 get_user_ual(regs->ARM_r2, stack + 8); /* envp */
193 get_user_ual(regs->ARM_r1, stack + 4); /* envp */
194 /* XXX: it seems that r0 is zeroed after ! */
195 regs->ARM_r0 = 0;
196 /* For uClinux PIC binaries. */
197 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
198 regs->ARM_r10 = infop->start_data;
201 #define USE_ELF_CORE_DUMP
202 #define ELF_EXEC_PAGESIZE 4096
204 enum
206 ARM_HWCAP_ARM_SWP = 1 << 0,
207 ARM_HWCAP_ARM_HALF = 1 << 1,
208 ARM_HWCAP_ARM_THUMB = 1 << 2,
209 ARM_HWCAP_ARM_26BIT = 1 << 3,
210 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
211 ARM_HWCAP_ARM_FPA = 1 << 5,
212 ARM_HWCAP_ARM_VFP = 1 << 6,
213 ARM_HWCAP_ARM_EDSP = 1 << 7,
216 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
217 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
218 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
220 #endif
222 #ifdef TARGET_SPARC
223 #ifdef TARGET_SPARC64
225 #define ELF_START_MMAP 0x80000000
227 #ifndef TARGET_ABI32
228 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
229 #else
230 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
231 #endif
233 #define ELF_CLASS ELFCLASS64
234 #define ELF_DATA ELFDATA2MSB
235 #define ELF_ARCH EM_SPARCV9
237 #define STACK_BIAS 2047
239 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
241 #ifndef TARGET_ABI32
242 regs->tstate = 0;
243 #endif
244 regs->pc = infop->entry;
245 regs->npc = regs->pc + 4;
246 regs->y = 0;
247 #ifdef TARGET_ABI32
248 regs->u_regs[14] = infop->start_stack - 16 * 4;
249 #else
250 if (personality(infop->personality) == PER_LINUX32)
251 regs->u_regs[14] = infop->start_stack - 16 * 4;
252 else {
253 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
254 if (bsd_type == target_freebsd) {
255 regs->u_regs[8] = infop->start_stack;
256 regs->u_regs[11] = infop->start_stack;
259 #endif
262 #else
263 #define ELF_START_MMAP 0x80000000
265 #define elf_check_arch(x) ( (x) == EM_SPARC )
267 #define ELF_CLASS ELFCLASS32
268 #define ELF_DATA ELFDATA2MSB
269 #define ELF_ARCH EM_SPARC
271 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
273 regs->psr = 0;
274 regs->pc = infop->entry;
275 regs->npc = regs->pc + 4;
276 regs->y = 0;
277 regs->u_regs[14] = infop->start_stack - 16 * 4;
280 #endif
281 #endif
283 #ifdef TARGET_PPC
285 #define ELF_START_MMAP 0x80000000
287 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
289 #define elf_check_arch(x) ( (x) == EM_PPC64 )
291 #define ELF_CLASS ELFCLASS64
293 #else
295 #define elf_check_arch(x) ( (x) == EM_PPC )
297 #define ELF_CLASS ELFCLASS32
299 #endif
301 #ifdef TARGET_WORDS_BIGENDIAN
302 #define ELF_DATA ELFDATA2MSB
303 #else
304 #define ELF_DATA ELFDATA2LSB
305 #endif
306 #define ELF_ARCH EM_PPC
309 * We need to put in some extra aux table entries to tell glibc what
310 * the cache block size is, so it can use the dcbz instruction safely.
312 #define AT_DCACHEBSIZE 19
313 #define AT_ICACHEBSIZE 20
314 #define AT_UCACHEBSIZE 21
315 /* A special ignored type value for PPC, for glibc compatibility. */
316 #define AT_IGNOREPPC 22
318 * The requirements here are:
319 * - keep the final alignment of sp (sp & 0xf)
320 * - make sure the 32-bit value at the first 16 byte aligned position of
321 * AUXV is greater than 16 for glibc compatibility.
322 * AT_IGNOREPPC is used for that.
323 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
324 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
326 #define DLINFO_ARCH_ITEMS 5
327 #define ARCH_DLINFO \
328 do { \
329 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
330 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
331 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
332 /* \
333 * Now handle glibc compatibility. \
334 */ \
335 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
336 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
337 } while (0)
339 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
341 abi_ulong pos = infop->start_stack;
342 abi_ulong tmp;
343 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
344 abi_ulong entry, toc;
345 #endif
347 _regs->gpr[1] = infop->start_stack;
348 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
349 get_user_u64(entry, infop->entry);
350 entry += infop->load_addr;
351 get_user_u64(toc, infop->entry + 8);
352 toc += infop->load_addr;
353 _regs->gpr[2] = toc;
354 infop->entry = entry;
355 #endif
356 _regs->nip = infop->entry;
357 /* Note that isn't exactly what regular kernel does
358 * but this is what the ABI wants and is needed to allow
359 * execution of PPC BSD programs.
361 /* FIXME - what to for failure of get_user()? */
362 get_user_ual(_regs->gpr[3], pos);
363 pos += sizeof(abi_ulong);
364 _regs->gpr[4] = pos;
365 for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong)) {
366 get_user_ual(tmp, pos);
368 _regs->gpr[5] = pos;
371 #define USE_ELF_CORE_DUMP
372 #define ELF_EXEC_PAGESIZE 4096
374 #endif
376 #ifdef TARGET_MIPS
378 #define ELF_START_MMAP 0x80000000
380 #define elf_check_arch(x) ( (x) == EM_MIPS )
382 #ifdef TARGET_MIPS64
383 #define ELF_CLASS ELFCLASS64
384 #else
385 #define ELF_CLASS ELFCLASS32
386 #endif
387 #ifdef TARGET_WORDS_BIGENDIAN
388 #define ELF_DATA ELFDATA2MSB
389 #else
390 #define ELF_DATA ELFDATA2LSB
391 #endif
392 #define ELF_ARCH EM_MIPS
394 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
396 regs->cp0_status = 2 << CP0St_KSU;
397 regs->cp0_epc = infop->entry;
398 regs->regs[29] = infop->start_stack;
401 #define USE_ELF_CORE_DUMP
402 #define ELF_EXEC_PAGESIZE 4096
404 #endif /* TARGET_MIPS */
406 #ifdef TARGET_SH4
408 #define ELF_START_MMAP 0x80000000
410 #define elf_check_arch(x) ( (x) == EM_SH )
412 #define ELF_CLASS ELFCLASS32
413 #define ELF_DATA ELFDATA2LSB
414 #define ELF_ARCH EM_SH
416 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
418 /* Check other registers XXXXX */
419 regs->pc = infop->entry;
420 regs->regs[15] = infop->start_stack;
423 #define USE_ELF_CORE_DUMP
424 #define ELF_EXEC_PAGESIZE 4096
426 #endif
428 #ifdef TARGET_CRIS
430 #define ELF_START_MMAP 0x80000000
432 #define elf_check_arch(x) ( (x) == EM_CRIS )
434 #define ELF_CLASS ELFCLASS32
435 #define ELF_DATA ELFDATA2LSB
436 #define ELF_ARCH EM_CRIS
438 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
440 regs->erp = infop->entry;
443 #define USE_ELF_CORE_DUMP
444 #define ELF_EXEC_PAGESIZE 8192
446 #endif
448 #ifdef TARGET_M68K
450 #define ELF_START_MMAP 0x80000000
452 #define elf_check_arch(x) ( (x) == EM_68K )
454 #define ELF_CLASS ELFCLASS32
455 #define ELF_DATA ELFDATA2MSB
456 #define ELF_ARCH EM_68K
458 /* ??? Does this need to do anything?
459 #define ELF_PLAT_INIT(_r) */
461 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
463 regs->usp = infop->start_stack;
464 regs->sr = 0;
465 regs->pc = infop->entry;
468 #define USE_ELF_CORE_DUMP
469 #define ELF_EXEC_PAGESIZE 8192
471 #endif
473 #ifdef TARGET_ALPHA
475 #define ELF_START_MMAP (0x30000000000ULL)
477 #define elf_check_arch(x) ( (x) == ELF_ARCH )
479 #define ELF_CLASS ELFCLASS64
480 #define ELF_DATA ELFDATA2MSB
481 #define ELF_ARCH EM_ALPHA
483 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
485 regs->pc = infop->entry;
486 regs->ps = 8;
487 regs->usp = infop->start_stack;
488 regs->unique = infop->start_data; /* ? */
489 printf("Set unique value to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
490 regs->unique, infop->start_data);
493 #define USE_ELF_CORE_DUMP
494 #define ELF_EXEC_PAGESIZE 8192
496 #endif /* TARGET_ALPHA */
498 #ifndef ELF_PLATFORM
499 #define ELF_PLATFORM (NULL)
500 #endif
502 #ifndef ELF_HWCAP
503 #define ELF_HWCAP 0
504 #endif
506 #ifdef TARGET_ABI32
507 #undef ELF_CLASS
508 #define ELF_CLASS ELFCLASS32
509 #undef bswaptls
510 #define bswaptls(ptr) bswap32s(ptr)
511 #endif
513 #include "elf.h"
515 struct exec
517 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
518 unsigned int a_text; /* length of text, in bytes */
519 unsigned int a_data; /* length of data, in bytes */
520 unsigned int a_bss; /* length of uninitialized data area, in bytes */
521 unsigned int a_syms; /* length of symbol table data in file, in bytes */
522 unsigned int a_entry; /* start address */
523 unsigned int a_trsize; /* length of relocation info for text, in bytes */
524 unsigned int a_drsize; /* length of relocation info for data, in bytes */
528 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
529 #define OMAGIC 0407
530 #define NMAGIC 0410
531 #define ZMAGIC 0413
532 #define QMAGIC 0314
534 /* max code+data+bss space allocated to elf interpreter */
535 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
537 /* max code+data+bss+brk space allocated to ET_DYN executables */
538 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
540 /* Necessary parameters */
541 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
542 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
543 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
545 #define INTERPRETER_NONE 0
546 #define INTERPRETER_AOUT 1
547 #define INTERPRETER_ELF 2
549 #define DLINFO_ITEMS 12
551 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
553 memcpy(to, from, n);
556 static int load_aout_interp(void * exptr, int interp_fd);
558 #ifdef BSWAP_NEEDED
559 static void bswap_ehdr(struct elfhdr *ehdr)
561 bswap16s(&ehdr->e_type); /* Object file type */
562 bswap16s(&ehdr->e_machine); /* Architecture */
563 bswap32s(&ehdr->e_version); /* Object file version */
564 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
565 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
566 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
567 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
568 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
569 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
570 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
571 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
572 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
573 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
576 static void bswap_phdr(struct elf_phdr *phdr)
578 bswap32s(&phdr->p_type); /* Segment type */
579 bswaptls(&phdr->p_offset); /* Segment file offset */
580 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
581 bswaptls(&phdr->p_paddr); /* Segment physical address */
582 bswaptls(&phdr->p_filesz); /* Segment size in file */
583 bswaptls(&phdr->p_memsz); /* Segment size in memory */
584 bswap32s(&phdr->p_flags); /* Segment flags */
585 bswaptls(&phdr->p_align); /* Segment alignment */
588 static void bswap_shdr(struct elf_shdr *shdr)
590 bswap32s(&shdr->sh_name);
591 bswap32s(&shdr->sh_type);
592 bswaptls(&shdr->sh_flags);
593 bswaptls(&shdr->sh_addr);
594 bswaptls(&shdr->sh_offset);
595 bswaptls(&shdr->sh_size);
596 bswap32s(&shdr->sh_link);
597 bswap32s(&shdr->sh_info);
598 bswaptls(&shdr->sh_addralign);
599 bswaptls(&shdr->sh_entsize);
602 static void bswap_sym(struct elf_sym *sym)
604 bswap32s(&sym->st_name);
605 bswaptls(&sym->st_value);
606 bswaptls(&sym->st_size);
607 bswap16s(&sym->st_shndx);
609 #endif
612 * 'copy_elf_strings()' copies argument/envelope strings from user
613 * memory to free pages in kernel mem. These are in a format ready
614 * to be put directly into the top of new user memory.
617 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
618 abi_ulong p)
620 char *tmp, *tmp1, *pag = NULL;
621 int len, offset = 0;
623 if (!p) {
624 return 0; /* bullet-proofing */
626 while (argc-- > 0) {
627 tmp = argv[argc];
628 if (!tmp) {
629 fprintf(stderr, "VFS: argc is wrong");
630 exit(-1);
632 tmp1 = tmp;
633 while (*tmp++);
634 len = tmp - tmp1;
635 if (p < len) { /* this shouldn't happen - 128kB */
636 return 0;
638 while (len) {
639 --p; --tmp; --len;
640 if (--offset < 0) {
641 offset = p % TARGET_PAGE_SIZE;
642 pag = (char *)page[p/TARGET_PAGE_SIZE];
643 if (!pag) {
644 pag = g_try_malloc0(TARGET_PAGE_SIZE);
645 page[p/TARGET_PAGE_SIZE] = pag;
646 if (!pag)
647 return 0;
650 if (len == 0 || offset == 0) {
651 *(pag + offset) = *tmp;
653 else {
654 int bytes_to_copy = (len > offset) ? offset : len;
655 tmp -= bytes_to_copy;
656 p -= bytes_to_copy;
657 offset -= bytes_to_copy;
658 len -= bytes_to_copy;
659 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
663 return p;
666 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
667 struct image_info *info)
669 abi_ulong stack_base, size, error;
670 int i;
672 /* Create enough stack to hold everything. If we don't use
673 * it for args, we'll use it for something else...
675 size = x86_stack_size;
676 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
677 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
678 error = target_mmap(0,
679 size + qemu_host_page_size,
680 PROT_READ | PROT_WRITE,
681 MAP_PRIVATE | MAP_ANON,
682 -1, 0);
683 if (error == -1) {
684 perror("stk mmap");
685 exit(-1);
687 /* we reserve one extra page at the top of the stack as guard */
688 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
690 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
691 p += stack_base;
693 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
694 if (bprm->page[i]) {
695 info->rss++;
696 /* FIXME - check return value of memcpy_to_target() for failure */
697 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
698 g_free(bprm->page[i]);
700 stack_base += TARGET_PAGE_SIZE;
702 return p;
705 static void set_brk(abi_ulong start, abi_ulong end)
707 /* page-align the start and end addresses... */
708 start = HOST_PAGE_ALIGN(start);
709 end = HOST_PAGE_ALIGN(end);
710 if (end <= start)
711 return;
712 if(target_mmap(start, end - start,
713 PROT_READ | PROT_WRITE | PROT_EXEC,
714 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) {
715 perror("cannot mmap brk");
716 exit(-1);
721 /* We need to explicitly zero any fractional pages after the data
722 section (i.e. bss). This would contain the junk from the file that
723 should not be in memory. */
724 static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
726 abi_ulong nbyte;
728 if (elf_bss >= last_bss)
729 return;
731 /* XXX: this is really a hack : if the real host page size is
732 smaller than the target page size, some pages after the end
733 of the file may not be mapped. A better fix would be to
734 patch target_mmap(), but it is more complicated as the file
735 size must be known */
736 if (qemu_real_host_page_size < qemu_host_page_size) {
737 abi_ulong end_addr, end_addr1;
738 end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss);
739 end_addr = HOST_PAGE_ALIGN(elf_bss);
740 if (end_addr1 < end_addr) {
741 mmap((void *)g2h(end_addr1), end_addr - end_addr1,
742 PROT_READ|PROT_WRITE|PROT_EXEC,
743 MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0);
747 nbyte = elf_bss & (qemu_host_page_size-1);
748 if (nbyte) {
749 nbyte = qemu_host_page_size - nbyte;
750 do {
751 /* FIXME - what to do if put_user() fails? */
752 put_user_u8(0, elf_bss);
753 elf_bss++;
754 } while (--nbyte);
759 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
760 struct elfhdr * exec,
761 abi_ulong load_addr,
762 abi_ulong load_bias,
763 abi_ulong interp_load_addr, int ibcs,
764 struct image_info *info)
766 abi_ulong sp;
767 int size;
768 abi_ulong u_platform;
769 const char *k_platform;
770 const int n = sizeof(elf_addr_t);
772 sp = p;
773 u_platform = 0;
774 k_platform = ELF_PLATFORM;
775 if (k_platform) {
776 size_t len = strlen(k_platform) + 1;
777 sp -= (len + n - 1) & ~(n - 1);
778 u_platform = sp;
779 /* FIXME - check return value of memcpy_to_target() for failure */
780 memcpy_to_target(sp, k_platform, len);
783 * Force 16 byte _final_ alignment here for generality.
785 sp = sp &~ (abi_ulong)15;
786 size = (DLINFO_ITEMS + 1) * 2;
787 if (k_platform)
788 size += 2;
789 #ifdef DLINFO_ARCH_ITEMS
790 size += DLINFO_ARCH_ITEMS * 2;
791 #endif
792 size += envc + argc + 2;
793 size += (!ibcs ? 3 : 1); /* argc itself */
794 size *= n;
795 if (size & 15)
796 sp -= 16 - (size & 15);
798 /* This is correct because Linux defines
799 * elf_addr_t as Elf32_Off / Elf64_Off
801 #define NEW_AUX_ENT(id, val) do { \
802 sp -= n; put_user_ual(val, sp); \
803 sp -= n; put_user_ual(id, sp); \
804 } while(0)
806 NEW_AUX_ENT (AT_NULL, 0);
808 /* There must be exactly DLINFO_ITEMS entries here. */
809 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
810 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
811 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
812 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
813 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
814 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
815 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
816 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
817 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
818 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
819 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
820 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
821 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
822 if (k_platform)
823 NEW_AUX_ENT(AT_PLATFORM, u_platform);
824 #ifdef ARCH_DLINFO
826 * ARCH_DLINFO must come last so platform specific code can enforce
827 * special alignment requirements on the AUXV if necessary (eg. PPC).
829 ARCH_DLINFO;
830 #endif
831 #undef NEW_AUX_ENT
833 sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
834 return sp;
838 static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
839 int interpreter_fd,
840 abi_ulong *interp_load_addr)
842 struct elf_phdr *elf_phdata = NULL;
843 struct elf_phdr *eppnt;
844 abi_ulong load_addr = 0;
845 int load_addr_set = 0;
846 int retval;
847 abi_ulong last_bss, elf_bss;
848 abi_ulong error;
849 int i;
851 elf_bss = 0;
852 last_bss = 0;
853 error = 0;
855 #ifdef BSWAP_NEEDED
856 bswap_ehdr(interp_elf_ex);
857 #endif
858 /* First of all, some simple consistency checks */
859 if ((interp_elf_ex->e_type != ET_EXEC &&
860 interp_elf_ex->e_type != ET_DYN) ||
861 !elf_check_arch(interp_elf_ex->e_machine)) {
862 return ~((abi_ulong)0UL);
866 /* Now read in all of the header information */
868 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
869 return ~(abi_ulong)0UL;
871 elf_phdata = (struct elf_phdr *)
872 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
874 if (!elf_phdata)
875 return ~((abi_ulong)0UL);
878 * If the size of this structure has changed, then punt, since
879 * we will be doing the wrong thing.
881 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
882 free(elf_phdata);
883 return ~((abi_ulong)0UL);
886 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
887 if(retval >= 0) {
888 retval = read(interpreter_fd,
889 (char *) elf_phdata,
890 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
892 if (retval < 0) {
893 perror("load_elf_interp");
894 exit(-1);
895 free (elf_phdata);
896 return retval;
898 #ifdef BSWAP_NEEDED
899 eppnt = elf_phdata;
900 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
901 bswap_phdr(eppnt);
903 #endif
905 if (interp_elf_ex->e_type == ET_DYN) {
906 /* in order to avoid hardcoding the interpreter load
907 address in qemu, we allocate a big enough memory zone */
908 error = target_mmap(0, INTERP_MAP_SIZE,
909 PROT_NONE, MAP_PRIVATE | MAP_ANON,
910 -1, 0);
911 if (error == -1) {
912 perror("mmap");
913 exit(-1);
915 load_addr = error;
916 load_addr_set = 1;
919 eppnt = elf_phdata;
920 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
921 if (eppnt->p_type == PT_LOAD) {
922 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
923 int elf_prot = 0;
924 abi_ulong vaddr = 0;
925 abi_ulong k;
927 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
928 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
929 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
930 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
931 elf_type |= MAP_FIXED;
932 vaddr = eppnt->p_vaddr;
934 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
935 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
936 elf_prot,
937 elf_type,
938 interpreter_fd,
939 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
941 if (error == -1) {
942 /* Real error */
943 close(interpreter_fd);
944 free(elf_phdata);
945 return ~((abi_ulong)0UL);
948 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
949 load_addr = error;
950 load_addr_set = 1;
954 * Find the end of the file mapping for this phdr, and keep
955 * track of the largest address we see for this.
957 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
958 if (k > elf_bss) elf_bss = k;
961 * Do the same thing for the memory mapping - between
962 * elf_bss and last_bss is the bss section.
964 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
965 if (k > last_bss) last_bss = k;
968 /* Now use mmap to map the library into memory. */
970 close(interpreter_fd);
973 * Now fill out the bss section. First pad the last page up
974 * to the page boundary, and then perform a mmap to make sure
975 * that there are zeromapped pages up to and including the last
976 * bss page.
978 padzero(elf_bss, last_bss);
979 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
981 /* Map the last of the bss segment */
982 if (last_bss > elf_bss) {
983 target_mmap(elf_bss, last_bss-elf_bss,
984 PROT_READ|PROT_WRITE|PROT_EXEC,
985 MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0);
987 free(elf_phdata);
989 *interp_load_addr = load_addr;
990 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
993 static int symfind(const void *s0, const void *s1)
995 target_ulong addr = *(target_ulong *)s0;
996 struct elf_sym *sym = (struct elf_sym *)s1;
997 int result = 0;
998 if (addr < sym->st_value) {
999 result = -1;
1000 } else if (addr >= sym->st_value + sym->st_size) {
1001 result = 1;
1003 return result;
1006 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1008 #if ELF_CLASS == ELFCLASS32
1009 struct elf_sym *syms = s->disas_symtab.elf32;
1010 #else
1011 struct elf_sym *syms = s->disas_symtab.elf64;
1012 #endif
1014 // binary search
1015 struct elf_sym *sym;
1017 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
1018 if (sym != NULL) {
1019 return s->disas_strtab + sym->st_name;
1022 return "";
1025 /* FIXME: This should use elf_ops.h */
1026 static int symcmp(const void *s0, const void *s1)
1028 struct elf_sym *sym0 = (struct elf_sym *)s0;
1029 struct elf_sym *sym1 = (struct elf_sym *)s1;
1030 return (sym0->st_value < sym1->st_value)
1031 ? -1
1032 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1035 /* Best attempt to load symbols from this ELF object. */
1036 static void load_symbols(struct elfhdr *hdr, int fd)
1038 unsigned int i, nsyms;
1039 struct elf_shdr sechdr, symtab, strtab;
1040 char *strings;
1041 struct syminfo *s;
1042 struct elf_sym *syms, *new_syms;
1044 lseek(fd, hdr->e_shoff, SEEK_SET);
1045 for (i = 0; i < hdr->e_shnum; i++) {
1046 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
1047 return;
1048 #ifdef BSWAP_NEEDED
1049 bswap_shdr(&sechdr);
1050 #endif
1051 if (sechdr.sh_type == SHT_SYMTAB) {
1052 symtab = sechdr;
1053 lseek(fd, hdr->e_shoff
1054 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
1055 if (read(fd, &strtab, sizeof(strtab))
1056 != sizeof(strtab))
1057 return;
1058 #ifdef BSWAP_NEEDED
1059 bswap_shdr(&strtab);
1060 #endif
1061 goto found;
1064 return; /* Shouldn't happen... */
1066 found:
1067 /* Now know where the strtab and symtab are. Snarf them. */
1068 s = malloc(sizeof(*s));
1069 syms = malloc(symtab.sh_size);
1070 if (!syms) {
1071 free(s);
1072 return;
1074 s->disas_strtab = strings = malloc(strtab.sh_size);
1075 if (!s->disas_strtab) {
1076 free(s);
1077 free(syms);
1078 return;
1081 lseek(fd, symtab.sh_offset, SEEK_SET);
1082 if (read(fd, syms, symtab.sh_size) != symtab.sh_size) {
1083 free(s);
1084 free(syms);
1085 free(strings);
1086 return;
1089 nsyms = symtab.sh_size / sizeof(struct elf_sym);
1091 i = 0;
1092 while (i < nsyms) {
1093 #ifdef BSWAP_NEEDED
1094 bswap_sym(syms + i);
1095 #endif
1096 // Throw away entries which we do not need.
1097 if (syms[i].st_shndx == SHN_UNDEF ||
1098 syms[i].st_shndx >= SHN_LORESERVE ||
1099 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1100 nsyms--;
1101 if (i < nsyms) {
1102 syms[i] = syms[nsyms];
1104 continue;
1106 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
1107 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
1108 syms[i].st_value &= ~(target_ulong)1;
1109 #endif
1110 i++;
1113 /* Attempt to free the storage associated with the local symbols
1114 that we threw away. Whether or not this has any effect on the
1115 memory allocation depends on the malloc implementation and how
1116 many symbols we managed to discard. */
1117 new_syms = realloc(syms, nsyms * sizeof(*syms));
1118 if (new_syms == NULL) {
1119 free(s);
1120 free(syms);
1121 free(strings);
1122 return;
1124 syms = new_syms;
1126 qsort(syms, nsyms, sizeof(*syms), symcmp);
1128 lseek(fd, strtab.sh_offset, SEEK_SET);
1129 if (read(fd, strings, strtab.sh_size) != strtab.sh_size) {
1130 free(s);
1131 free(syms);
1132 free(strings);
1133 return;
1135 s->disas_num_syms = nsyms;
1136 #if ELF_CLASS == ELFCLASS32
1137 s->disas_symtab.elf32 = syms;
1138 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
1139 #else
1140 s->disas_symtab.elf64 = syms;
1141 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
1142 #endif
1143 s->next = syminfos;
1144 syminfos = s;
1147 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1148 struct image_info * info)
1150 struct elfhdr elf_ex;
1151 struct elfhdr interp_elf_ex;
1152 struct exec interp_ex;
1153 int interpreter_fd = -1; /* avoid warning */
1154 abi_ulong load_addr, load_bias;
1155 int load_addr_set = 0;
1156 unsigned int interpreter_type = INTERPRETER_NONE;
1157 unsigned char ibcs2_interpreter;
1158 int i;
1159 abi_ulong mapped_addr;
1160 struct elf_phdr * elf_ppnt;
1161 struct elf_phdr *elf_phdata;
1162 abi_ulong elf_bss, k, elf_brk;
1163 int retval;
1164 char * elf_interpreter;
1165 abi_ulong elf_entry, interp_load_addr = 0;
1166 int status;
1167 abi_ulong start_code, end_code, start_data, end_data;
1168 abi_ulong reloc_func_desc = 0;
1169 abi_ulong elf_stack;
1170 char passed_fileno[6];
1172 ibcs2_interpreter = 0;
1173 status = 0;
1174 load_addr = 0;
1175 load_bias = 0;
1176 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1177 #ifdef BSWAP_NEEDED
1178 bswap_ehdr(&elf_ex);
1179 #endif
1181 /* First of all, some simple consistency checks */
1182 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1183 (! elf_check_arch(elf_ex.e_machine))) {
1184 return -ENOEXEC;
1187 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1188 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1189 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1190 if (!bprm->p) {
1191 retval = -E2BIG;
1194 /* Now read in all of the header information */
1195 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1196 if (elf_phdata == NULL) {
1197 return -ENOMEM;
1200 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1201 if(retval > 0) {
1202 retval = read(bprm->fd, (char *) elf_phdata,
1203 elf_ex.e_phentsize * elf_ex.e_phnum);
1206 if (retval < 0) {
1207 perror("load_elf_binary");
1208 exit(-1);
1209 free (elf_phdata);
1210 return -errno;
1213 #ifdef BSWAP_NEEDED
1214 elf_ppnt = elf_phdata;
1215 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1216 bswap_phdr(elf_ppnt);
1218 #endif
1219 elf_ppnt = elf_phdata;
1221 elf_bss = 0;
1222 elf_brk = 0;
1225 elf_stack = ~((abi_ulong)0UL);
1226 elf_interpreter = NULL;
1227 start_code = ~((abi_ulong)0UL);
1228 end_code = 0;
1229 start_data = 0;
1230 end_data = 0;
1231 interp_ex.a_info = 0;
1233 for(i=0;i < elf_ex.e_phnum; i++) {
1234 if (elf_ppnt->p_type == PT_INTERP) {
1235 if ( elf_interpreter != NULL )
1237 free (elf_phdata);
1238 free(elf_interpreter);
1239 close(bprm->fd);
1240 return -EINVAL;
1243 /* This is the program interpreter used for
1244 * shared libraries - for now assume that this
1245 * is an a.out format binary
1248 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1250 if (elf_interpreter == NULL) {
1251 free (elf_phdata);
1252 close(bprm->fd);
1253 return -ENOMEM;
1256 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1257 if(retval >= 0) {
1258 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1260 if(retval < 0) {
1261 perror("load_elf_binary2");
1262 exit(-1);
1265 /* If the program interpreter is one of these two,
1266 then assume an iBCS2 image. Otherwise assume
1267 a native linux image. */
1269 /* JRP - Need to add X86 lib dir stuff here... */
1271 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1272 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1273 ibcs2_interpreter = 1;
1276 #if 0
1277 printf("Using ELF interpreter %s\n", path(elf_interpreter));
1278 #endif
1279 if (retval >= 0) {
1280 retval = open(path(elf_interpreter), O_RDONLY);
1281 if(retval >= 0) {
1282 interpreter_fd = retval;
1284 else {
1285 perror(elf_interpreter);
1286 exit(-1);
1287 /* retval = -errno; */
1291 if (retval >= 0) {
1292 retval = lseek(interpreter_fd, 0, SEEK_SET);
1293 if(retval >= 0) {
1294 retval = read(interpreter_fd,bprm->buf,128);
1297 if (retval >= 0) {
1298 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1299 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
1301 if (retval < 0) {
1302 perror("load_elf_binary3");
1303 exit(-1);
1304 free (elf_phdata);
1305 free(elf_interpreter);
1306 close(bprm->fd);
1307 return retval;
1310 elf_ppnt++;
1313 /* Some simple consistency checks for the interpreter */
1314 if (elf_interpreter){
1315 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1317 /* Now figure out which format our binary is */
1318 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1319 (N_MAGIC(interp_ex) != QMAGIC)) {
1320 interpreter_type = INTERPRETER_ELF;
1323 if (interp_elf_ex.e_ident[0] != 0x7f ||
1324 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1325 interpreter_type &= ~INTERPRETER_ELF;
1328 if (!interpreter_type) {
1329 free(elf_interpreter);
1330 free(elf_phdata);
1331 close(bprm->fd);
1332 return -ELIBBAD;
1336 /* OK, we are done with that, now set up the arg stuff,
1337 and then start this sucker up */
1340 char * passed_p;
1342 if (interpreter_type == INTERPRETER_AOUT) {
1343 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1344 passed_p = passed_fileno;
1346 if (elf_interpreter) {
1347 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1348 bprm->argc++;
1351 if (!bprm->p) {
1352 free(elf_interpreter);
1353 free (elf_phdata);
1354 close(bprm->fd);
1355 return -E2BIG;
1359 /* OK, This is the point of no return */
1360 info->end_data = 0;
1361 info->end_code = 0;
1362 info->start_mmap = (abi_ulong)ELF_START_MMAP;
1363 info->mmap = 0;
1364 elf_entry = (abi_ulong) elf_ex.e_entry;
1367 * In case where user has not explicitly set the guest_base, we
1368 * probe here that should we set it automatically.
1370 if (!have_guest_base) {
1372 * Go through ELF program header table and find out whether
1373 * any of the segments drop below our current mmap_min_addr and
1374 * in that case set guest_base to corresponding address.
1376 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
1377 i++, elf_ppnt++) {
1378 if (elf_ppnt->p_type != PT_LOAD)
1379 continue;
1380 if (HOST_PAGE_ALIGN(elf_ppnt->p_vaddr) < mmap_min_addr) {
1381 guest_base = HOST_PAGE_ALIGN(mmap_min_addr);
1382 break;
1387 /* Do this so that we can load the interpreter, if need be. We will
1388 change some of these later */
1389 info->rss = 0;
1390 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1391 info->start_stack = bprm->p;
1393 /* Now we do a little grungy work by mmaping the ELF image into
1394 * the correct location in memory. At this point, we assume that
1395 * the image should be loaded at fixed address, not at a variable
1396 * address.
1399 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1400 int elf_prot = 0;
1401 int elf_flags = 0;
1402 abi_ulong error;
1404 if (elf_ppnt->p_type != PT_LOAD)
1405 continue;
1407 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1408 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1409 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1410 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1411 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1412 elf_flags |= MAP_FIXED;
1413 } else if (elf_ex.e_type == ET_DYN) {
1414 /* Try and get dynamic programs out of the way of the default mmap
1415 base, as well as whatever program they might try to exec. This
1416 is because the brk will follow the loader, and is not movable. */
1417 /* NOTE: for qemu, we do a big mmap to get enough space
1418 without hardcoding any address */
1419 error = target_mmap(0, ET_DYN_MAP_SIZE,
1420 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1421 -1, 0);
1422 if (error == -1) {
1423 perror("mmap");
1424 exit(-1);
1426 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1429 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1430 (elf_ppnt->p_filesz +
1431 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1432 elf_prot,
1433 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1434 bprm->fd,
1435 (elf_ppnt->p_offset -
1436 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1437 if (error == -1) {
1438 perror("mmap");
1439 exit(-1);
1442 #ifdef LOW_ELF_STACK
1443 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1444 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1445 #endif
1447 if (!load_addr_set) {
1448 load_addr_set = 1;
1449 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1450 if (elf_ex.e_type == ET_DYN) {
1451 load_bias += error -
1452 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1453 load_addr += load_bias;
1454 reloc_func_desc = load_bias;
1457 k = elf_ppnt->p_vaddr;
1458 if (k < start_code)
1459 start_code = k;
1460 if (start_data < k)
1461 start_data = k;
1462 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1463 if (k > elf_bss)
1464 elf_bss = k;
1465 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1466 end_code = k;
1467 if (end_data < k)
1468 end_data = k;
1469 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1470 if (k > elf_brk) elf_brk = k;
1473 elf_entry += load_bias;
1474 elf_bss += load_bias;
1475 elf_brk += load_bias;
1476 start_code += load_bias;
1477 end_code += load_bias;
1478 start_data += load_bias;
1479 end_data += load_bias;
1481 if (elf_interpreter) {
1482 if (interpreter_type & 1) {
1483 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1485 else if (interpreter_type & 2) {
1486 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1487 &interp_load_addr);
1489 reloc_func_desc = interp_load_addr;
1491 close(interpreter_fd);
1492 free(elf_interpreter);
1494 if (elf_entry == ~((abi_ulong)0UL)) {
1495 printf("Unable to load interpreter\n");
1496 free(elf_phdata);
1497 exit(-1);
1498 return 0;
1502 free(elf_phdata);
1504 if (qemu_log_enabled())
1505 load_symbols(&elf_ex, bprm->fd);
1507 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1508 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1510 #ifdef LOW_ELF_STACK
1511 info->start_stack = bprm->p = elf_stack - 4;
1512 #endif
1513 bprm->p = create_elf_tables(bprm->p,
1514 bprm->argc,
1515 bprm->envc,
1516 &elf_ex,
1517 load_addr, load_bias,
1518 interp_load_addr,
1519 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1520 info);
1521 info->load_addr = reloc_func_desc;
1522 info->start_brk = info->brk = elf_brk;
1523 info->end_code = end_code;
1524 info->start_code = start_code;
1525 info->start_data = start_data;
1526 info->end_data = end_data;
1527 info->start_stack = bprm->p;
1529 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1530 sections */
1531 set_brk(elf_bss, elf_brk);
1533 padzero(elf_bss, elf_brk);
1535 #if 0
1536 printf("(start_brk) %x\n" , info->start_brk);
1537 printf("(end_code) %x\n" , info->end_code);
1538 printf("(start_code) %x\n" , info->start_code);
1539 printf("(end_data) %x\n" , info->end_data);
1540 printf("(start_stack) %x\n" , info->start_stack);
1541 printf("(brk) %x\n" , info->brk);
1542 #endif
1544 if ( info->personality == PER_SVR4 )
1546 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1547 and some applications "depend" upon this behavior.
1548 Since we do not have the power to recompile these, we
1549 emulate the SVr4 behavior. Sigh. */
1550 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1551 MAP_FIXED | MAP_PRIVATE, -1, 0);
1554 info->entry = elf_entry;
1556 return 0;
1559 static int load_aout_interp(void * exptr, int interp_fd)
1561 printf("a.out interpreter not yet supported\n");
1562 return(0);
1565 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
1567 init_thread(regs, infop);