add missing FORCE_RET (Paul Brook)
[qemu/qemu_0_9_1_stable.git] / linux-user / elfload.c
blob80b964de39b93040352ed0dd34dbab2f991f1d2a
1 /* This is the Linux kernel elf-loading code, ported into user space */
3 #include <stdio.h>
4 #include <sys/types.h>
5 #include <fcntl.h>
6 #include <sys/stat.h>
7 #include <errno.h>
8 #include <unistd.h>
9 #include <sys/mman.h>
10 #include <stdlib.h>
11 #include <string.h>
13 #include "qemu.h"
14 #include "disas.h"
16 /* this flag is uneffective under linux too, should be deleted */
17 #ifndef MAP_DENYWRITE
18 #define MAP_DENYWRITE 0
19 #endif
21 /* should probably go in elf.h */
22 #ifndef ELIBBAD
23 #define ELIBBAD 80
24 #endif
26 #ifdef TARGET_I386
28 #define ELF_START_MMAP 0x80000000
31 * This is used to ensure we don't load something for the wrong architecture.
33 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
36 * These are used to set parameters in the core dumps.
38 #define ELF_CLASS ELFCLASS32
39 #define ELF_DATA ELFDATA2LSB
40 #define ELF_ARCH EM_386
42 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
43 starts %edx contains a pointer to a function which might be
44 registered using `atexit'. This provides a mean for the
45 dynamic linker to call DT_FINI functions for shared libraries
46 that have been loaded before the code runs.
48 A value of 0 tells we have no such handler. */
49 #define ELF_PLAT_INIT(_r) _r->edx = 0
51 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
53 regs->esp = infop->start_stack;
54 regs->eip = infop->entry;
57 #define USE_ELF_CORE_DUMP
58 #define ELF_EXEC_PAGESIZE 4096
60 #endif
62 #ifdef TARGET_ARM
64 #define ELF_START_MMAP 0x80000000
66 #define elf_check_arch(x) ( (x) == EM_ARM )
68 #define ELF_CLASS ELFCLASS32
69 #ifdef TARGET_WORDS_BIGENDIAN
70 #define ELF_DATA ELFDATA2MSB
71 #else
72 #define ELF_DATA ELFDATA2LSB
73 #endif
74 #define ELF_ARCH EM_ARM
76 #define ELF_PLAT_INIT(_r) _r->ARM_r0 = 0
78 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
80 target_long *stack = (void *)infop->start_stack;
81 memset(regs, 0, sizeof(*regs));
82 regs->ARM_cpsr = 0x10;
83 regs->ARM_pc = infop->entry;
84 regs->ARM_sp = infop->start_stack;
85 regs->ARM_r2 = tswapl(stack[2]); /* envp */
86 regs->ARM_r1 = tswapl(stack[1]); /* argv */
87 /* XXX: it seems that r0 is zeroed after ! */
88 // regs->ARM_r0 = tswapl(stack[0]); /* argc */
91 #define USE_ELF_CORE_DUMP
92 #define ELF_EXEC_PAGESIZE 4096
94 #endif
96 #ifdef TARGET_SPARC
97 #ifdef TARGET_SPARC64
99 #define ELF_START_MMAP 0x80000000
101 #define elf_check_arch(x) ( (x) == EM_SPARC )
103 #define ELF_CLASS ELFCLASS64
104 #define ELF_DATA ELFDATA2MSB
105 #define ELF_ARCH EM_SPARC
107 /*XXX*/
108 #define ELF_PLAT_INIT(_r)
110 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
112 regs->tstate = 0;
113 regs->pc = infop->entry;
114 regs->npc = regs->pc + 4;
115 regs->y = 0;
116 regs->u_regs[14] = infop->start_stack - 16 * 4;
119 #else
120 #define ELF_START_MMAP 0x80000000
122 #define elf_check_arch(x) ( (x) == EM_SPARC )
124 #define ELF_CLASS ELFCLASS32
125 #define ELF_DATA ELFDATA2MSB
126 #define ELF_ARCH EM_SPARC
128 /*XXX*/
129 #define ELF_PLAT_INIT(_r)
131 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
133 regs->psr = 0;
134 regs->pc = infop->entry;
135 regs->npc = regs->pc + 4;
136 regs->y = 0;
137 regs->u_regs[14] = infop->start_stack - 16 * 4;
140 #endif
141 #endif
143 #ifdef TARGET_PPC
145 #define ELF_START_MMAP 0x80000000
147 #define elf_check_arch(x) ( (x) == EM_PPC )
149 #define ELF_CLASS ELFCLASS32
150 #ifdef TARGET_WORDS_BIGENDIAN
151 #define ELF_DATA ELFDATA2MSB
152 #else
153 #define ELF_DATA ELFDATA2LSB
154 #endif
155 #define ELF_ARCH EM_PPC
157 /* Note that isn't exactly what regular kernel does
158 * but this is what the ABI wants and is needed to allow
159 * execution of PPC BSD programs.
161 #define ELF_PLAT_INIT(_r) \
162 do { \
163 target_ulong *pos = (target_ulong *)bprm->p, tmp = 1; \
164 _r->gpr[3] = bprm->argc; \
165 _r->gpr[4] = (unsigned long)++pos; \
166 for (; tmp != 0; pos++) \
167 tmp = *pos; \
168 _r->gpr[5] = (unsigned long)pos; \
169 } while (0)
172 * We need to put in some extra aux table entries to tell glibc what
173 * the cache block size is, so it can use the dcbz instruction safely.
175 #define AT_DCACHEBSIZE 19
176 #define AT_ICACHEBSIZE 20
177 #define AT_UCACHEBSIZE 21
178 /* A special ignored type value for PPC, for glibc compatibility. */
179 #define AT_IGNOREPPC 22
181 * The requirements here are:
182 * - keep the final alignment of sp (sp & 0xf)
183 * - make sure the 32-bit value at the first 16 byte aligned position of
184 * AUXV is greater than 16 for glibc compatibility.
185 * AT_IGNOREPPC is used for that.
186 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
187 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
189 #define DLINFO_ARCH_ITEMS 3
190 #define ARCH_DLINFO \
191 do { \
192 sp -= DLINFO_ARCH_ITEMS * 2; \
193 NEW_AUX_ENT(0, AT_DCACHEBSIZE, 0x20); \
194 NEW_AUX_ENT(1, AT_ICACHEBSIZE, 0x20); \
195 NEW_AUX_ENT(2, AT_UCACHEBSIZE, 0); \
196 /* \
197 * Now handle glibc compatibility. \
198 */ \
199 sp -= 2*2; \
200 NEW_AUX_ENT(0, AT_IGNOREPPC, AT_IGNOREPPC); \
201 NEW_AUX_ENT(1, AT_IGNOREPPC, AT_IGNOREPPC); \
202 } while (0)
204 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
206 _regs->msr = 1 << MSR_PR; /* Set user mode */
207 _regs->gpr[1] = infop->start_stack;
208 _regs->nip = infop->entry;
211 #define USE_ELF_CORE_DUMP
212 #define ELF_EXEC_PAGESIZE 4096
214 #endif
216 #include "elf.h"
219 * MAX_ARG_PAGES defines the number of pages allocated for arguments
220 * and envelope for the new program. 32 should suffice, this gives
221 * a maximum env+arg of 128kB w/4KB pages!
223 #define MAX_ARG_PAGES 32
226 * This structure is used to hold the arguments that are
227 * used when loading binaries.
229 struct linux_binprm {
230 char buf[128];
231 unsigned long page[MAX_ARG_PAGES];
232 unsigned long p;
233 int sh_bang;
234 int fd;
235 int e_uid, e_gid;
236 int argc, envc;
237 char * filename; /* Name of binary */
238 unsigned long loader, exec;
239 int dont_iput; /* binfmt handler has put inode */
242 struct exec
244 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
245 unsigned int a_text; /* length of text, in bytes */
246 unsigned int a_data; /* length of data, in bytes */
247 unsigned int a_bss; /* length of uninitialized data area, in bytes */
248 unsigned int a_syms; /* length of symbol table data in file, in bytes */
249 unsigned int a_entry; /* start address */
250 unsigned int a_trsize; /* length of relocation info for text, in bytes */
251 unsigned int a_drsize; /* length of relocation info for data, in bytes */
255 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
256 #define OMAGIC 0407
257 #define NMAGIC 0410
258 #define ZMAGIC 0413
259 #define QMAGIC 0314
261 /* max code+data+bss space allocated to elf interpreter */
262 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
264 /* max code+data+bss+brk space allocated to ET_DYN executables */
265 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
267 /* from personality.h */
269 /* Flags for bug emulation. These occupy the top three bytes. */
270 #define STICKY_TIMEOUTS 0x4000000
271 #define WHOLE_SECONDS 0x2000000
273 /* Personality types. These go in the low byte. Avoid using the top bit,
274 * it will conflict with error returns.
276 #define PER_MASK (0x00ff)
277 #define PER_LINUX (0x0000)
278 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
279 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
280 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
281 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
282 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
283 #define PER_BSD (0x0006)
284 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
286 /* Necessary parameters */
287 #define NGROUPS 32
289 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
290 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
291 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
293 #define INTERPRETER_NONE 0
294 #define INTERPRETER_AOUT 1
295 #define INTERPRETER_ELF 2
297 #define DLINFO_ITEMS 11
299 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
301 memcpy(to, from, n);
304 extern unsigned long x86_stack_size;
306 static int load_aout_interp(void * exptr, int interp_fd);
308 #ifdef BSWAP_NEEDED
309 static void bswap_ehdr(struct elfhdr *ehdr)
311 bswap16s(&ehdr->e_type); /* Object file type */
312 bswap16s(&ehdr->e_machine); /* Architecture */
313 bswap32s(&ehdr->e_version); /* Object file version */
314 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
315 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
316 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
317 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
318 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
319 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
320 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
321 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
322 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
323 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
326 static void bswap_phdr(struct elf_phdr *phdr)
328 bswap32s(&phdr->p_type); /* Segment type */
329 bswaptls(&phdr->p_offset); /* Segment file offset */
330 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
331 bswaptls(&phdr->p_paddr); /* Segment physical address */
332 bswaptls(&phdr->p_filesz); /* Segment size in file */
333 bswaptls(&phdr->p_memsz); /* Segment size in memory */
334 bswap32s(&phdr->p_flags); /* Segment flags */
335 bswaptls(&phdr->p_align); /* Segment alignment */
338 static void bswap_shdr(struct elf_shdr *shdr)
340 bswap32s(&shdr->sh_name);
341 bswap32s(&shdr->sh_type);
342 bswaptls(&shdr->sh_flags);
343 bswaptls(&shdr->sh_addr);
344 bswaptls(&shdr->sh_offset);
345 bswaptls(&shdr->sh_size);
346 bswap32s(&shdr->sh_link);
347 bswap32s(&shdr->sh_info);
348 bswaptls(&shdr->sh_addralign);
349 bswaptls(&shdr->sh_entsize);
352 static void bswap_sym(Elf32_Sym *sym)
354 bswap32s(&sym->st_name);
355 bswap32s(&sym->st_value);
356 bswap32s(&sym->st_size);
357 bswap16s(&sym->st_shndx);
359 #endif
361 static void * get_free_page(void)
363 void * retval;
365 /* User-space version of kernel get_free_page. Returns a page-aligned
366 * page-sized chunk of memory.
368 retval = (void *)target_mmap(0, qemu_host_page_size, PROT_READ|PROT_WRITE,
369 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
371 if((long)retval == -1) {
372 perror("get_free_page");
373 exit(-1);
375 else {
376 return(retval);
380 static void free_page(void * pageaddr)
382 target_munmap((unsigned long)pageaddr, qemu_host_page_size);
386 * 'copy_string()' copies argument/envelope strings from user
387 * memory to free pages in kernel mem. These are in a format ready
388 * to be put directly into the top of new user memory.
391 static unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
392 unsigned long p)
394 char *tmp, *tmp1, *pag = NULL;
395 int len, offset = 0;
397 if (!p) {
398 return 0; /* bullet-proofing */
400 while (argc-- > 0) {
401 tmp = argv[argc];
402 if (!tmp) {
403 fprintf(stderr, "VFS: argc is wrong");
404 exit(-1);
406 tmp1 = tmp;
407 while (*tmp++);
408 len = tmp - tmp1;
409 if (p < len) { /* this shouldn't happen - 128kB */
410 return 0;
412 while (len) {
413 --p; --tmp; --len;
414 if (--offset < 0) {
415 offset = p % TARGET_PAGE_SIZE;
416 pag = (char *) page[p/TARGET_PAGE_SIZE];
417 if (!pag) {
418 pag = (char *)get_free_page();
419 page[p/TARGET_PAGE_SIZE] = (unsigned long)pag;
420 if (!pag)
421 return 0;
424 if (len == 0 || offset == 0) {
425 *(pag + offset) = *tmp;
427 else {
428 int bytes_to_copy = (len > offset) ? offset : len;
429 tmp -= bytes_to_copy;
430 p -= bytes_to_copy;
431 offset -= bytes_to_copy;
432 len -= bytes_to_copy;
433 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
437 return p;
440 static int in_group_p(gid_t g)
442 /* return TRUE if we're in the specified group, FALSE otherwise */
443 int ngroup;
444 int i;
445 gid_t grouplist[NGROUPS];
447 ngroup = getgroups(NGROUPS, grouplist);
448 for(i = 0; i < ngroup; i++) {
449 if(grouplist[i] == g) {
450 return 1;
453 return 0;
456 static int count(char ** vec)
458 int i;
460 for(i = 0; *vec; i++) {
461 vec++;
464 return(i);
467 static int prepare_binprm(struct linux_binprm *bprm)
469 struct stat st;
470 int mode;
471 int retval, id_change;
473 if(fstat(bprm->fd, &st) < 0) {
474 return(-errno);
477 mode = st.st_mode;
478 if(!S_ISREG(mode)) { /* Must be regular file */
479 return(-EACCES);
481 if(!(mode & 0111)) { /* Must have at least one execute bit set */
482 return(-EACCES);
485 bprm->e_uid = geteuid();
486 bprm->e_gid = getegid();
487 id_change = 0;
489 /* Set-uid? */
490 if(mode & S_ISUID) {
491 bprm->e_uid = st.st_uid;
492 if(bprm->e_uid != geteuid()) {
493 id_change = 1;
497 /* Set-gid? */
499 * If setgid is set but no group execute bit then this
500 * is a candidate for mandatory locking, not a setgid
501 * executable.
503 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
504 bprm->e_gid = st.st_gid;
505 if (!in_group_p(bprm->e_gid)) {
506 id_change = 1;
510 memset(bprm->buf, 0, sizeof(bprm->buf));
511 retval = lseek(bprm->fd, 0L, SEEK_SET);
512 if(retval >= 0) {
513 retval = read(bprm->fd, bprm->buf, 128);
515 if(retval < 0) {
516 perror("prepare_binprm");
517 exit(-1);
518 /* return(-errno); */
520 else {
521 return(retval);
525 unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm,
526 struct image_info * info)
528 unsigned long stack_base, size, error;
529 int i;
531 /* Create enough stack to hold everything. If we don't use
532 * it for args, we'll use it for something else...
534 size = x86_stack_size;
535 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
536 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
537 error = target_mmap(0,
538 size + qemu_host_page_size,
539 PROT_READ | PROT_WRITE,
540 MAP_PRIVATE | MAP_ANONYMOUS,
541 -1, 0);
542 if (error == -1) {
543 perror("stk mmap");
544 exit(-1);
546 /* we reserve one extra page at the top of the stack as guard */
547 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
549 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
550 p += stack_base;
552 if (bprm->loader) {
553 bprm->loader += stack_base;
555 bprm->exec += stack_base;
557 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
558 if (bprm->page[i]) {
559 info->rss++;
561 memcpy((void *)stack_base, (void *)bprm->page[i], TARGET_PAGE_SIZE);
562 free_page((void *)bprm->page[i]);
564 stack_base += TARGET_PAGE_SIZE;
566 return p;
569 static void set_brk(unsigned long start, unsigned long end)
571 /* page-align the start and end addresses... */
572 start = HOST_PAGE_ALIGN(start);
573 end = HOST_PAGE_ALIGN(end);
574 if (end <= start)
575 return;
576 if(target_mmap(start, end - start,
577 PROT_READ | PROT_WRITE | PROT_EXEC,
578 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
579 perror("cannot mmap brk");
580 exit(-1);
585 /* We need to explicitly zero any fractional pages after the data
586 section (i.e. bss). This would contain the junk from the file that
587 should not be in memory. */
588 static void padzero(unsigned long elf_bss)
590 unsigned long nbyte;
591 char * fpnt;
593 /* XXX: this is really a hack : if the real host page size is
594 smaller than the target page size, some pages after the end
595 of the file may not be mapped. A better fix would be to
596 patch target_mmap(), but it is more complicated as the file
597 size must be known */
598 if (qemu_real_host_page_size < qemu_host_page_size) {
599 unsigned long end_addr, end_addr1;
600 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
601 ~(qemu_real_host_page_size - 1);
602 end_addr = HOST_PAGE_ALIGN(elf_bss);
603 if (end_addr1 < end_addr) {
604 mmap((void *)end_addr1, end_addr - end_addr1,
605 PROT_READ|PROT_WRITE|PROT_EXEC,
606 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
610 nbyte = elf_bss & (qemu_host_page_size-1);
611 if (nbyte) {
612 nbyte = qemu_host_page_size - nbyte;
613 fpnt = (char *) elf_bss;
614 do {
615 *fpnt++ = 0;
616 } while (--nbyte);
620 static unsigned int * create_elf_tables(char *p, int argc, int envc,
621 struct elfhdr * exec,
622 unsigned long load_addr,
623 unsigned long load_bias,
624 unsigned long interp_load_addr, int ibcs,
625 struct image_info *info)
627 target_ulong *argv, *envp;
628 target_ulong *sp, *csp;
629 int v;
632 * Force 16 byte _final_ alignment here for generality.
634 sp = (unsigned int *) (~15UL & (unsigned long) p);
635 csp = sp;
636 csp -= (DLINFO_ITEMS + 1) * 2;
637 #ifdef DLINFO_ARCH_ITEMS
638 csp -= DLINFO_ARCH_ITEMS*2;
639 #endif
640 csp -= envc+1;
641 csp -= argc+1;
642 csp -= (!ibcs ? 3 : 1); /* argc itself */
643 if ((unsigned long)csp & 15UL)
644 sp -= ((unsigned long)csp & 15UL) / sizeof(*sp);
646 #define NEW_AUX_ENT(nr, id, val) \
647 put_user (id, sp + (nr * 2)); \
648 put_user (val, sp + (nr * 2 + 1))
649 sp -= 2;
650 NEW_AUX_ENT (0, AT_NULL, 0);
652 sp -= DLINFO_ITEMS*2;
653 NEW_AUX_ENT( 0, AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
654 NEW_AUX_ENT( 1, AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
655 NEW_AUX_ENT( 2, AT_PHNUM, (target_ulong)(exec->e_phnum));
656 NEW_AUX_ENT( 3, AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
657 NEW_AUX_ENT( 4, AT_BASE, (target_ulong)(interp_load_addr));
658 NEW_AUX_ENT( 5, AT_FLAGS, (target_ulong)0);
659 NEW_AUX_ENT( 6, AT_ENTRY, load_bias + exec->e_entry);
660 NEW_AUX_ENT( 7, AT_UID, (target_ulong) getuid());
661 NEW_AUX_ENT( 8, AT_EUID, (target_ulong) geteuid());
662 NEW_AUX_ENT( 9, AT_GID, (target_ulong) getgid());
663 NEW_AUX_ENT(11, AT_EGID, (target_ulong) getegid());
664 #ifdef ARCH_DLINFO
666 * ARCH_DLINFO must come last so platform specific code can enforce
667 * special alignment requirements on the AUXV if necessary (eg. PPC).
669 ARCH_DLINFO;
670 #endif
671 #undef NEW_AUX_ENT
673 sp -= envc+1;
674 envp = sp;
675 sp -= argc+1;
676 argv = sp;
677 if (!ibcs) {
678 put_user((target_ulong)envp,--sp);
679 put_user((target_ulong)argv,--sp);
681 put_user(argc,--sp);
682 info->arg_start = (unsigned int)((unsigned long)p & 0xffffffff);
683 while (argc-->0) {
684 put_user((target_ulong)p,argv++);
685 do {
686 get_user(v, p);
687 p++;
688 } while (v != 0);
690 put_user(0,argv);
691 info->arg_end = info->env_start = (unsigned int)((unsigned long)p & 0xffffffff);
692 while (envc-->0) {
693 put_user((target_ulong)p,envp++);
694 do {
695 get_user(v, p);
696 p++;
697 } while (v != 0);
699 put_user(0,envp);
700 info->env_end = (unsigned int)((unsigned long)p & 0xffffffff);
701 return sp;
706 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
707 int interpreter_fd,
708 unsigned long *interp_load_addr)
710 struct elf_phdr *elf_phdata = NULL;
711 struct elf_phdr *eppnt;
712 unsigned long load_addr = 0;
713 int load_addr_set = 0;
714 int retval;
715 unsigned long last_bss, elf_bss;
716 unsigned long error;
717 int i;
719 elf_bss = 0;
720 last_bss = 0;
721 error = 0;
723 #ifdef BSWAP_NEEDED
724 bswap_ehdr(interp_elf_ex);
725 #endif
726 /* First of all, some simple consistency checks */
727 if ((interp_elf_ex->e_type != ET_EXEC &&
728 interp_elf_ex->e_type != ET_DYN) ||
729 !elf_check_arch(interp_elf_ex->e_machine)) {
730 return ~0UL;
734 /* Now read in all of the header information */
736 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
737 return ~0UL;
739 elf_phdata = (struct elf_phdr *)
740 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
742 if (!elf_phdata)
743 return ~0UL;
746 * If the size of this structure has changed, then punt, since
747 * we will be doing the wrong thing.
749 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
750 free(elf_phdata);
751 return ~0UL;
754 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
755 if(retval >= 0) {
756 retval = read(interpreter_fd,
757 (char *) elf_phdata,
758 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
760 if (retval < 0) {
761 perror("load_elf_interp");
762 exit(-1);
763 free (elf_phdata);
764 return retval;
766 #ifdef BSWAP_NEEDED
767 eppnt = elf_phdata;
768 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
769 bswap_phdr(eppnt);
771 #endif
773 if (interp_elf_ex->e_type == ET_DYN) {
774 /* in order to avoid harcoding the interpreter load
775 address in qemu, we allocate a big enough memory zone */
776 error = target_mmap(0, INTERP_MAP_SIZE,
777 PROT_NONE, MAP_PRIVATE | MAP_ANON,
778 -1, 0);
779 if (error == -1) {
780 perror("mmap");
781 exit(-1);
783 load_addr = error;
784 load_addr_set = 1;
787 eppnt = elf_phdata;
788 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
789 if (eppnt->p_type == PT_LOAD) {
790 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
791 int elf_prot = 0;
792 unsigned long vaddr = 0;
793 unsigned long k;
795 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
796 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
797 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
798 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
799 elf_type |= MAP_FIXED;
800 vaddr = eppnt->p_vaddr;
802 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
803 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
804 elf_prot,
805 elf_type,
806 interpreter_fd,
807 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
809 if (error > -1024UL) {
810 /* Real error */
811 close(interpreter_fd);
812 free(elf_phdata);
813 return ~0UL;
816 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
817 load_addr = error;
818 load_addr_set = 1;
822 * Find the end of the file mapping for this phdr, and keep
823 * track of the largest address we see for this.
825 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
826 if (k > elf_bss) elf_bss = k;
829 * Do the same thing for the memory mapping - between
830 * elf_bss and last_bss is the bss section.
832 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
833 if (k > last_bss) last_bss = k;
836 /* Now use mmap to map the library into memory. */
838 close(interpreter_fd);
841 * Now fill out the bss section. First pad the last page up
842 * to the page boundary, and then perform a mmap to make sure
843 * that there are zeromapped pages up to and including the last
844 * bss page.
846 padzero(elf_bss);
847 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
849 /* Map the last of the bss segment */
850 if (last_bss > elf_bss) {
851 target_mmap(elf_bss, last_bss-elf_bss,
852 PROT_READ|PROT_WRITE|PROT_EXEC,
853 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
855 free(elf_phdata);
857 *interp_load_addr = load_addr;
858 return ((unsigned long) interp_elf_ex->e_entry) + load_addr;
861 /* Best attempt to load symbols from this ELF object. */
862 static void load_symbols(struct elfhdr *hdr, int fd)
864 unsigned int i;
865 struct elf_shdr sechdr, symtab, strtab;
866 char *strings;
867 struct syminfo *s;
869 lseek(fd, hdr->e_shoff, SEEK_SET);
870 for (i = 0; i < hdr->e_shnum; i++) {
871 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
872 return;
873 #ifdef BSWAP_NEEDED
874 bswap_shdr(&sechdr);
875 #endif
876 if (sechdr.sh_type == SHT_SYMTAB) {
877 symtab = sechdr;
878 lseek(fd, hdr->e_shoff
879 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
880 if (read(fd, &strtab, sizeof(strtab))
881 != sizeof(strtab))
882 return;
883 #ifdef BSWAP_NEEDED
884 bswap_shdr(&strtab);
885 #endif
886 goto found;
889 return; /* Shouldn't happen... */
891 found:
892 /* Now know where the strtab and symtab are. Snarf them. */
893 s = malloc(sizeof(*s));
894 s->disas_symtab = malloc(symtab.sh_size);
895 s->disas_strtab = strings = malloc(strtab.sh_size);
896 if (!s->disas_symtab || !s->disas_strtab)
897 return;
899 lseek(fd, symtab.sh_offset, SEEK_SET);
900 if (read(fd, s->disas_symtab, symtab.sh_size) != symtab.sh_size)
901 return;
903 #ifdef BSWAP_NEEDED
904 for (i = 0; i < symtab.sh_size / sizeof(struct elf_sym); i++)
905 bswap_sym(s->disas_symtab + sizeof(struct elf_sym)*i);
906 #endif
908 lseek(fd, strtab.sh_offset, SEEK_SET);
909 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
910 return;
911 s->disas_num_syms = symtab.sh_size / sizeof(struct elf_sym);
912 s->next = syminfos;
913 syminfos = s;
916 static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
917 struct image_info * info)
919 struct elfhdr elf_ex;
920 struct elfhdr interp_elf_ex;
921 struct exec interp_ex;
922 int interpreter_fd = -1; /* avoid warning */
923 unsigned long load_addr, load_bias;
924 int load_addr_set = 0;
925 unsigned int interpreter_type = INTERPRETER_NONE;
926 unsigned char ibcs2_interpreter;
927 int i;
928 unsigned long mapped_addr;
929 struct elf_phdr * elf_ppnt;
930 struct elf_phdr *elf_phdata;
931 unsigned long elf_bss, k, elf_brk;
932 int retval;
933 char * elf_interpreter;
934 unsigned long elf_entry, interp_load_addr = 0;
935 int status;
936 unsigned long start_code, end_code, end_data;
937 unsigned long elf_stack;
938 char passed_fileno[6];
940 ibcs2_interpreter = 0;
941 status = 0;
942 load_addr = 0;
943 load_bias = 0;
944 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
945 #ifdef BSWAP_NEEDED
946 bswap_ehdr(&elf_ex);
947 #endif
949 if (elf_ex.e_ident[0] != 0x7f ||
950 strncmp(&elf_ex.e_ident[1], "ELF",3) != 0) {
951 return -ENOEXEC;
954 /* First of all, some simple consistency checks */
955 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
956 (! elf_check_arch(elf_ex.e_machine))) {
957 return -ENOEXEC;
960 /* Now read in all of the header information */
961 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
962 if (elf_phdata == NULL) {
963 return -ENOMEM;
966 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
967 if(retval > 0) {
968 retval = read(bprm->fd, (char *) elf_phdata,
969 elf_ex.e_phentsize * elf_ex.e_phnum);
972 if (retval < 0) {
973 perror("load_elf_binary");
974 exit(-1);
975 free (elf_phdata);
976 return -errno;
979 #ifdef BSWAP_NEEDED
980 elf_ppnt = elf_phdata;
981 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
982 bswap_phdr(elf_ppnt);
984 #endif
985 elf_ppnt = elf_phdata;
987 elf_bss = 0;
988 elf_brk = 0;
991 elf_stack = ~0UL;
992 elf_interpreter = NULL;
993 start_code = ~0UL;
994 end_code = 0;
995 end_data = 0;
997 for(i=0;i < elf_ex.e_phnum; i++) {
998 if (elf_ppnt->p_type == PT_INTERP) {
999 if ( elf_interpreter != NULL )
1001 free (elf_phdata);
1002 free(elf_interpreter);
1003 close(bprm->fd);
1004 return -EINVAL;
1007 /* This is the program interpreter used for
1008 * shared libraries - for now assume that this
1009 * is an a.out format binary
1012 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1014 if (elf_interpreter == NULL) {
1015 free (elf_phdata);
1016 close(bprm->fd);
1017 return -ENOMEM;
1020 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1021 if(retval >= 0) {
1022 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1024 if(retval < 0) {
1025 perror("load_elf_binary2");
1026 exit(-1);
1029 /* If the program interpreter is one of these two,
1030 then assume an iBCS2 image. Otherwise assume
1031 a native linux image. */
1033 /* JRP - Need to add X86 lib dir stuff here... */
1035 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1036 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1037 ibcs2_interpreter = 1;
1040 #if 0
1041 printf("Using ELF interpreter %s\n", elf_interpreter);
1042 #endif
1043 if (retval >= 0) {
1044 retval = open(path(elf_interpreter), O_RDONLY);
1045 if(retval >= 0) {
1046 interpreter_fd = retval;
1048 else {
1049 perror(elf_interpreter);
1050 exit(-1);
1051 /* retval = -errno; */
1055 if (retval >= 0) {
1056 retval = lseek(interpreter_fd, 0, SEEK_SET);
1057 if(retval >= 0) {
1058 retval = read(interpreter_fd,bprm->buf,128);
1061 if (retval >= 0) {
1062 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1063 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
1065 if (retval < 0) {
1066 perror("load_elf_binary3");
1067 exit(-1);
1068 free (elf_phdata);
1069 free(elf_interpreter);
1070 close(bprm->fd);
1071 return retval;
1074 elf_ppnt++;
1077 /* Some simple consistency checks for the interpreter */
1078 if (elf_interpreter){
1079 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1081 /* Now figure out which format our binary is */
1082 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1083 (N_MAGIC(interp_ex) != QMAGIC)) {
1084 interpreter_type = INTERPRETER_ELF;
1087 if (interp_elf_ex.e_ident[0] != 0x7f ||
1088 strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1089 interpreter_type &= ~INTERPRETER_ELF;
1092 if (!interpreter_type) {
1093 free(elf_interpreter);
1094 free(elf_phdata);
1095 close(bprm->fd);
1096 return -ELIBBAD;
1100 /* OK, we are done with that, now set up the arg stuff,
1101 and then start this sucker up */
1103 if (!bprm->sh_bang) {
1104 char * passed_p;
1106 if (interpreter_type == INTERPRETER_AOUT) {
1107 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1108 passed_p = passed_fileno;
1110 if (elf_interpreter) {
1111 bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p);
1112 bprm->argc++;
1115 if (!bprm->p) {
1116 if (elf_interpreter) {
1117 free(elf_interpreter);
1119 free (elf_phdata);
1120 close(bprm->fd);
1121 return -E2BIG;
1125 /* OK, This is the point of no return */
1126 info->end_data = 0;
1127 info->end_code = 0;
1128 info->start_mmap = (unsigned long)ELF_START_MMAP;
1129 info->mmap = 0;
1130 elf_entry = (unsigned long) elf_ex.e_entry;
1132 /* Do this so that we can load the interpreter, if need be. We will
1133 change some of these later */
1134 info->rss = 0;
1135 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1136 info->start_stack = bprm->p;
1138 /* Now we do a little grungy work by mmaping the ELF image into
1139 * the correct location in memory. At this point, we assume that
1140 * the image should be loaded at fixed address, not at a variable
1141 * address.
1144 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1145 int elf_prot = 0;
1146 int elf_flags = 0;
1147 unsigned long error;
1149 if (elf_ppnt->p_type != PT_LOAD)
1150 continue;
1152 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1153 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1154 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1155 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1156 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1157 elf_flags |= MAP_FIXED;
1158 } else if (elf_ex.e_type == ET_DYN) {
1159 /* Try and get dynamic programs out of the way of the default mmap
1160 base, as well as whatever program they might try to exec. This
1161 is because the brk will follow the loader, and is not movable. */
1162 /* NOTE: for qemu, we do a big mmap to get enough space
1163 without harcoding any address */
1164 error = target_mmap(0, ET_DYN_MAP_SIZE,
1165 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1166 -1, 0);
1167 if (error == -1) {
1168 perror("mmap");
1169 exit(-1);
1171 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1174 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1175 (elf_ppnt->p_filesz +
1176 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1177 elf_prot,
1178 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1179 bprm->fd,
1180 (elf_ppnt->p_offset -
1181 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1182 if (error == -1) {
1183 perror("mmap");
1184 exit(-1);
1187 #ifdef LOW_ELF_STACK
1188 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1189 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1190 #endif
1192 if (!load_addr_set) {
1193 load_addr_set = 1;
1194 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1195 if (elf_ex.e_type == ET_DYN) {
1196 load_bias += error -
1197 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1198 load_addr += load_bias;
1201 k = elf_ppnt->p_vaddr;
1202 if (k < start_code)
1203 start_code = k;
1204 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1205 if (k > elf_bss)
1206 elf_bss = k;
1207 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1208 end_code = k;
1209 if (end_data < k)
1210 end_data = k;
1211 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1212 if (k > elf_brk) elf_brk = k;
1215 elf_entry += load_bias;
1216 elf_bss += load_bias;
1217 elf_brk += load_bias;
1218 start_code += load_bias;
1219 end_code += load_bias;
1220 // start_data += load_bias;
1221 end_data += load_bias;
1223 if (elf_interpreter) {
1224 if (interpreter_type & 1) {
1225 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1227 else if (interpreter_type & 2) {
1228 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1229 &interp_load_addr);
1232 close(interpreter_fd);
1233 free(elf_interpreter);
1235 if (elf_entry == ~0UL) {
1236 printf("Unable to load interpreter\n");
1237 free(elf_phdata);
1238 exit(-1);
1239 return 0;
1243 free(elf_phdata);
1245 if (loglevel)
1246 load_symbols(&elf_ex, bprm->fd);
1248 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1249 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1251 #ifdef LOW_ELF_STACK
1252 info->start_stack = bprm->p = elf_stack - 4;
1253 #endif
1254 bprm->p = (unsigned long)
1255 create_elf_tables((char *)bprm->p,
1256 bprm->argc,
1257 bprm->envc,
1258 &elf_ex,
1259 load_addr, load_bias,
1260 interp_load_addr,
1261 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1262 info);
1263 if (interpreter_type == INTERPRETER_AOUT)
1264 info->arg_start += strlen(passed_fileno) + 1;
1265 info->start_brk = info->brk = elf_brk;
1266 info->end_code = end_code;
1267 info->start_code = start_code;
1268 info->end_data = end_data;
1269 info->start_stack = bprm->p;
1271 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1272 sections */
1273 set_brk(elf_bss, elf_brk);
1275 padzero(elf_bss);
1277 #if 0
1278 printf("(start_brk) %x\n" , info->start_brk);
1279 printf("(end_code) %x\n" , info->end_code);
1280 printf("(start_code) %x\n" , info->start_code);
1281 printf("(end_data) %x\n" , info->end_data);
1282 printf("(start_stack) %x\n" , info->start_stack);
1283 printf("(brk) %x\n" , info->brk);
1284 #endif
1286 if ( info->personality == PER_SVR4 )
1288 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1289 and some applications "depend" upon this behavior.
1290 Since we do not have the power to recompile these, we
1291 emulate the SVr4 behavior. Sigh. */
1292 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1293 MAP_FIXED | MAP_PRIVATE, -1, 0);
1296 #ifdef ELF_PLAT_INIT
1298 * The ABI may specify that certain registers be set up in special
1299 * ways (on i386 %edx is the address of a DT_FINI function, for
1300 * example. This macro performs whatever initialization to
1301 * the regs structure is required.
1303 ELF_PLAT_INIT(regs);
1304 #endif
1307 info->entry = elf_entry;
1309 return 0;
1314 int elf_exec(const char * filename, char ** argv, char ** envp,
1315 struct target_pt_regs * regs, struct image_info *infop)
1317 struct linux_binprm bprm;
1318 int retval;
1319 int i;
1321 bprm.p = TARGET_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int);
1322 for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */
1323 bprm.page[i] = 0;
1324 retval = open(filename, O_RDONLY);
1325 if (retval < 0)
1326 return retval;
1327 bprm.fd = retval;
1328 bprm.filename = (char *)filename;
1329 bprm.sh_bang = 0;
1330 bprm.loader = 0;
1331 bprm.exec = 0;
1332 bprm.dont_iput = 0;
1333 bprm.argc = count(argv);
1334 bprm.envc = count(envp);
1336 retval = prepare_binprm(&bprm);
1338 if(retval>=0) {
1339 bprm.p = copy_strings(1, &bprm.filename, bprm.page, bprm.p);
1340 bprm.exec = bprm.p;
1341 bprm.p = copy_strings(bprm.envc,envp,bprm.page,bprm.p);
1342 bprm.p = copy_strings(bprm.argc,argv,bprm.page,bprm.p);
1343 if (!bprm.p) {
1344 retval = -E2BIG;
1348 if(retval>=0) {
1349 retval = load_elf_binary(&bprm,regs,infop);
1351 if(retval>=0) {
1352 /* success. Initialize important registers */
1353 init_thread(regs, infop);
1354 return retval;
1357 /* Something went wrong, return the inode and free the argument pages*/
1358 for (i=0 ; i<MAX_ARG_PAGES ; i++) {
1359 free_page((void *)bprm.page[i]);
1361 return(retval);
1365 static int load_aout_interp(void * exptr, int interp_fd)
1367 printf("a.out interpreter not yet supported\n");
1368 return(0);