Rearrange PCI host emulation code.
[qemu/mini2440.git] / linux-user / elfload.c
blob98c6e3f85caf81eb20004bfc9419429f8f016832
1 /* This is the Linux kernel elf-loading code, ported into user space */
3 #include <stdio.h>
4 #include <sys/types.h>
5 #include <fcntl.h>
6 #include <sys/stat.h>
7 #include <errno.h>
8 #include <unistd.h>
9 #include <sys/mman.h>
10 #include <stdlib.h>
11 #include <string.h>
13 #include "qemu.h"
14 #include "disas.h"
16 /* this flag is uneffective under linux too, should be deleted */
17 #ifndef MAP_DENYWRITE
18 #define MAP_DENYWRITE 0
19 #endif
21 /* should probably go in elf.h */
22 #ifndef ELIBBAD
23 #define ELIBBAD 80
24 #endif
26 #ifdef TARGET_I386
28 #define ELF_PLATFORM get_elf_platform()
30 static const char *get_elf_platform(void)
32 static char elf_platform[] = "i386";
33 int family = (global_env->cpuid_version >> 8) & 0xff;
34 if (family > 6)
35 family = 6;
36 if (family >= 3)
37 elf_platform[1] = '0' + family;
38 return elf_platform;
41 #define ELF_HWCAP get_elf_hwcap()
43 static uint32_t get_elf_hwcap(void)
45 return global_env->cpuid_features;
48 #define ELF_START_MMAP 0x80000000
51 * This is used to ensure we don't load something for the wrong architecture.
53 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
56 * These are used to set parameters in the core dumps.
58 #define ELF_CLASS ELFCLASS32
59 #define ELF_DATA ELFDATA2LSB
60 #define ELF_ARCH EM_386
62 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
63 starts %edx contains a pointer to a function which might be
64 registered using `atexit'. This provides a mean for the
65 dynamic linker to call DT_FINI functions for shared libraries
66 that have been loaded before the code runs.
68 A value of 0 tells we have no such handler. */
69 #define ELF_PLAT_INIT(_r) _r->edx = 0
71 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
73 regs->esp = infop->start_stack;
74 regs->eip = infop->entry;
77 #define USE_ELF_CORE_DUMP
78 #define ELF_EXEC_PAGESIZE 4096
80 #endif
82 #ifdef TARGET_ARM
84 #define ELF_START_MMAP 0x80000000
86 #define elf_check_arch(x) ( (x) == EM_ARM )
88 #define ELF_CLASS ELFCLASS32
89 #ifdef TARGET_WORDS_BIGENDIAN
90 #define ELF_DATA ELFDATA2MSB
91 #else
92 #define ELF_DATA ELFDATA2LSB
93 #endif
94 #define ELF_ARCH EM_ARM
96 #define ELF_PLAT_INIT(_r) _r->ARM_r0 = 0
98 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
100 target_long stack = infop->start_stack;
101 memset(regs, 0, sizeof(*regs));
102 regs->ARM_cpsr = 0x10;
103 if (infop->entry & 1)
104 regs->ARM_cpsr |= CPSR_T;
105 regs->ARM_pc = infop->entry & 0xfffffffe;
106 regs->ARM_sp = infop->start_stack;
107 regs->ARM_r2 = tgetl(stack + 8); /* envp */
108 regs->ARM_r1 = tgetl(stack + 4); /* envp */
109 /* XXX: it seems that r0 is zeroed after ! */
110 // regs->ARM_r0 = tgetl(stack); /* argc */
113 #define USE_ELF_CORE_DUMP
114 #define ELF_EXEC_PAGESIZE 4096
116 enum
118 ARM_HWCAP_ARM_SWP = 1 << 0,
119 ARM_HWCAP_ARM_HALF = 1 << 1,
120 ARM_HWCAP_ARM_THUMB = 1 << 2,
121 ARM_HWCAP_ARM_26BIT = 1 << 3,
122 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
123 ARM_HWCAP_ARM_FPA = 1 << 5,
124 ARM_HWCAP_ARM_VFP = 1 << 6,
125 ARM_HWCAP_ARM_EDSP = 1 << 7,
128 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
129 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
130 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
132 #endif
134 #ifdef TARGET_SPARC
135 #ifdef TARGET_SPARC64
137 #define ELF_START_MMAP 0x80000000
139 #define elf_check_arch(x) ( (x) == EM_SPARC )
141 #define ELF_CLASS ELFCLASS64
142 #define ELF_DATA ELFDATA2MSB
143 #define ELF_ARCH EM_SPARC
145 /*XXX*/
146 #define ELF_PLAT_INIT(_r)
148 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
150 regs->tstate = 0;
151 regs->pc = infop->entry;
152 regs->npc = regs->pc + 4;
153 regs->y = 0;
154 regs->u_regs[14] = infop->start_stack - 16 * 4;
157 #else
158 #define ELF_START_MMAP 0x80000000
160 #define elf_check_arch(x) ( (x) == EM_SPARC )
162 #define ELF_CLASS ELFCLASS32
163 #define ELF_DATA ELFDATA2MSB
164 #define ELF_ARCH EM_SPARC
166 /*XXX*/
167 #define ELF_PLAT_INIT(_r)
169 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
171 regs->psr = 0;
172 regs->pc = infop->entry;
173 regs->npc = regs->pc + 4;
174 regs->y = 0;
175 regs->u_regs[14] = infop->start_stack - 16 * 4;
178 #endif
179 #endif
181 #ifdef TARGET_PPC
183 #define ELF_START_MMAP 0x80000000
185 #define elf_check_arch(x) ( (x) == EM_PPC )
187 #define ELF_CLASS ELFCLASS32
188 #ifdef TARGET_WORDS_BIGENDIAN
189 #define ELF_DATA ELFDATA2MSB
190 #else
191 #define ELF_DATA ELFDATA2LSB
192 #endif
193 #define ELF_ARCH EM_PPC
195 /* Note that isn't exactly what regular kernel does
196 * but this is what the ABI wants and is needed to allow
197 * execution of PPC BSD programs.
199 #define ELF_PLAT_INIT(_r) \
200 do { \
201 target_ulong *pos = (target_ulong *)bprm->p, tmp = 1; \
202 _r->gpr[3] = bprm->argc; \
203 _r->gpr[4] = (unsigned long)++pos; \
204 for (; tmp != 0; pos++) \
205 tmp = ldl(pos); \
206 _r->gpr[5] = (unsigned long)pos; \
207 } while (0)
210 * We need to put in some extra aux table entries to tell glibc what
211 * the cache block size is, so it can use the dcbz instruction safely.
213 #define AT_DCACHEBSIZE 19
214 #define AT_ICACHEBSIZE 20
215 #define AT_UCACHEBSIZE 21
216 /* A special ignored type value for PPC, for glibc compatibility. */
217 #define AT_IGNOREPPC 22
219 * The requirements here are:
220 * - keep the final alignment of sp (sp & 0xf)
221 * - make sure the 32-bit value at the first 16 byte aligned position of
222 * AUXV is greater than 16 for glibc compatibility.
223 * AT_IGNOREPPC is used for that.
224 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
225 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
227 #define DLINFO_ARCH_ITEMS 5
228 #define ARCH_DLINFO \
229 do { \
230 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
231 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
232 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
233 /* \
234 * Now handle glibc compatibility. \
235 */ \
236 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
237 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
238 } while (0)
240 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
242 _regs->msr = 1 << MSR_PR; /* Set user mode */
243 _regs->gpr[1] = infop->start_stack;
244 _regs->nip = infop->entry;
247 #define USE_ELF_CORE_DUMP
248 #define ELF_EXEC_PAGESIZE 4096
250 #endif
252 #ifdef TARGET_MIPS
254 #define ELF_START_MMAP 0x80000000
256 #define elf_check_arch(x) ( (x) == EM_MIPS )
258 #define ELF_CLASS ELFCLASS32
259 #ifdef TARGET_WORDS_BIGENDIAN
260 #define ELF_DATA ELFDATA2MSB
261 #else
262 #define ELF_DATA ELFDATA2LSB
263 #endif
264 #define ELF_ARCH EM_MIPS
266 #define ELF_PLAT_INIT(_r)
268 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
270 regs->cp0_status = CP0St_UM;
271 regs->cp0_epc = infop->entry;
272 regs->regs[29] = infop->start_stack;
275 #endif /* TARGET_MIPS */
277 #ifdef TARGET_SH4
279 #define ELF_START_MMAP 0x80000000
281 #define elf_check_arch(x) ( (x) == EM_SH )
283 #define ELF_CLASS ELFCLASS32
284 #define ELF_DATA ELFDATA2LSB
285 #define ELF_ARCH EM_SH
287 #define ELF_PLAT_INIT(_r) /* XXXXX */
289 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
291 /* Check other registers XXXXX */
292 regs->pc = infop->entry;
293 regs->regs[15] = infop->start_stack - 16 * 4;
296 #define USE_ELF_CORE_DUMP
297 #define ELF_EXEC_PAGESIZE 4096
299 #endif
301 #ifndef ELF_PLATFORM
302 #define ELF_PLATFORM (NULL)
303 #endif
305 #ifndef ELF_HWCAP
306 #define ELF_HWCAP 0
307 #endif
309 #include "elf.h"
312 * MAX_ARG_PAGES defines the number of pages allocated for arguments
313 * and envelope for the new program. 32 should suffice, this gives
314 * a maximum env+arg of 128kB w/4KB pages!
316 #define MAX_ARG_PAGES 32
319 * This structure is used to hold the arguments that are
320 * used when loading binaries.
322 struct linux_binprm {
323 char buf[128];
324 void *page[MAX_ARG_PAGES];
325 unsigned long p;
326 int sh_bang;
327 int fd;
328 int e_uid, e_gid;
329 int argc, envc;
330 char * filename; /* Name of binary */
331 unsigned long loader, exec;
332 int dont_iput; /* binfmt handler has put inode */
335 struct exec
337 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
338 unsigned int a_text; /* length of text, in bytes */
339 unsigned int a_data; /* length of data, in bytes */
340 unsigned int a_bss; /* length of uninitialized data area, in bytes */
341 unsigned int a_syms; /* length of symbol table data in file, in bytes */
342 unsigned int a_entry; /* start address */
343 unsigned int a_trsize; /* length of relocation info for text, in bytes */
344 unsigned int a_drsize; /* length of relocation info for data, in bytes */
348 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
349 #define OMAGIC 0407
350 #define NMAGIC 0410
351 #define ZMAGIC 0413
352 #define QMAGIC 0314
354 /* max code+data+bss space allocated to elf interpreter */
355 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
357 /* max code+data+bss+brk space allocated to ET_DYN executables */
358 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
360 /* from personality.h */
362 /* Flags for bug emulation. These occupy the top three bytes. */
363 #define STICKY_TIMEOUTS 0x4000000
364 #define WHOLE_SECONDS 0x2000000
366 /* Personality types. These go in the low byte. Avoid using the top bit,
367 * it will conflict with error returns.
369 #define PER_MASK (0x00ff)
370 #define PER_LINUX (0x0000)
371 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
372 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
373 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
374 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
375 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
376 #define PER_BSD (0x0006)
377 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
379 /* Necessary parameters */
380 #define NGROUPS 32
382 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
383 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
384 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
386 #define INTERPRETER_NONE 0
387 #define INTERPRETER_AOUT 1
388 #define INTERPRETER_ELF 2
390 #define DLINFO_ITEMS 12
392 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
394 memcpy(to, from, n);
397 extern unsigned long x86_stack_size;
399 static int load_aout_interp(void * exptr, int interp_fd);
401 #ifdef BSWAP_NEEDED
402 static void bswap_ehdr(struct elfhdr *ehdr)
404 bswap16s(&ehdr->e_type); /* Object file type */
405 bswap16s(&ehdr->e_machine); /* Architecture */
406 bswap32s(&ehdr->e_version); /* Object file version */
407 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
408 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
409 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
410 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
411 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
412 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
413 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
414 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
415 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
416 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
419 static void bswap_phdr(struct elf_phdr *phdr)
421 bswap32s(&phdr->p_type); /* Segment type */
422 bswaptls(&phdr->p_offset); /* Segment file offset */
423 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
424 bswaptls(&phdr->p_paddr); /* Segment physical address */
425 bswaptls(&phdr->p_filesz); /* Segment size in file */
426 bswaptls(&phdr->p_memsz); /* Segment size in memory */
427 bswap32s(&phdr->p_flags); /* Segment flags */
428 bswaptls(&phdr->p_align); /* Segment alignment */
431 static void bswap_shdr(struct elf_shdr *shdr)
433 bswap32s(&shdr->sh_name);
434 bswap32s(&shdr->sh_type);
435 bswaptls(&shdr->sh_flags);
436 bswaptls(&shdr->sh_addr);
437 bswaptls(&shdr->sh_offset);
438 bswaptls(&shdr->sh_size);
439 bswap32s(&shdr->sh_link);
440 bswap32s(&shdr->sh_info);
441 bswaptls(&shdr->sh_addralign);
442 bswaptls(&shdr->sh_entsize);
445 static void bswap_sym(Elf32_Sym *sym)
447 bswap32s(&sym->st_name);
448 bswap32s(&sym->st_value);
449 bswap32s(&sym->st_size);
450 bswap16s(&sym->st_shndx);
452 #endif
455 * 'copy_string()' copies argument/envelope strings from user
456 * memory to free pages in kernel mem. These are in a format ready
457 * to be put directly into the top of new user memory.
460 static unsigned long copy_strings(int argc,char ** argv, void **page,
461 unsigned long p)
463 char *tmp, *tmp1, *pag = NULL;
464 int len, offset = 0;
466 if (!p) {
467 return 0; /* bullet-proofing */
469 while (argc-- > 0) {
470 tmp = argv[argc];
471 if (!tmp) {
472 fprintf(stderr, "VFS: argc is wrong");
473 exit(-1);
475 tmp1 = tmp;
476 while (*tmp++);
477 len = tmp - tmp1;
478 if (p < len) { /* this shouldn't happen - 128kB */
479 return 0;
481 while (len) {
482 --p; --tmp; --len;
483 if (--offset < 0) {
484 offset = p % TARGET_PAGE_SIZE;
485 pag = (char *)page[p/TARGET_PAGE_SIZE];
486 if (!pag) {
487 pag = (char *)malloc(TARGET_PAGE_SIZE);
488 page[p/TARGET_PAGE_SIZE] = pag;
489 if (!pag)
490 return 0;
493 if (len == 0 || offset == 0) {
494 *(pag + offset) = *tmp;
496 else {
497 int bytes_to_copy = (len > offset) ? offset : len;
498 tmp -= bytes_to_copy;
499 p -= bytes_to_copy;
500 offset -= bytes_to_copy;
501 len -= bytes_to_copy;
502 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
506 return p;
509 static int in_group_p(gid_t g)
511 /* return TRUE if we're in the specified group, FALSE otherwise */
512 int ngroup;
513 int i;
514 gid_t grouplist[NGROUPS];
516 ngroup = getgroups(NGROUPS, grouplist);
517 for(i = 0; i < ngroup; i++) {
518 if(grouplist[i] == g) {
519 return 1;
522 return 0;
525 static int count(char ** vec)
527 int i;
529 for(i = 0; *vec; i++) {
530 vec++;
533 return(i);
536 static int prepare_binprm(struct linux_binprm *bprm)
538 struct stat st;
539 int mode;
540 int retval, id_change;
542 if(fstat(bprm->fd, &st) < 0) {
543 return(-errno);
546 mode = st.st_mode;
547 if(!S_ISREG(mode)) { /* Must be regular file */
548 return(-EACCES);
550 if(!(mode & 0111)) { /* Must have at least one execute bit set */
551 return(-EACCES);
554 bprm->e_uid = geteuid();
555 bprm->e_gid = getegid();
556 id_change = 0;
558 /* Set-uid? */
559 if(mode & S_ISUID) {
560 bprm->e_uid = st.st_uid;
561 if(bprm->e_uid != geteuid()) {
562 id_change = 1;
566 /* Set-gid? */
568 * If setgid is set but no group execute bit then this
569 * is a candidate for mandatory locking, not a setgid
570 * executable.
572 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
573 bprm->e_gid = st.st_gid;
574 if (!in_group_p(bprm->e_gid)) {
575 id_change = 1;
579 memset(bprm->buf, 0, sizeof(bprm->buf));
580 retval = lseek(bprm->fd, 0L, SEEK_SET);
581 if(retval >= 0) {
582 retval = read(bprm->fd, bprm->buf, 128);
584 if(retval < 0) {
585 perror("prepare_binprm");
586 exit(-1);
587 /* return(-errno); */
589 else {
590 return(retval);
594 static inline void memcpy_to_target(target_ulong dest, const void *src,
595 unsigned long len)
597 void *host_ptr;
599 host_ptr = lock_user(dest, len, 0);
600 memcpy(host_ptr, src, len);
601 unlock_user(host_ptr, dest, 1);
604 unsigned long setup_arg_pages(target_ulong p, struct linux_binprm * bprm,
605 struct image_info * info)
607 target_ulong stack_base, size, error;
608 int i;
610 /* Create enough stack to hold everything. If we don't use
611 * it for args, we'll use it for something else...
613 size = x86_stack_size;
614 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
615 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
616 error = target_mmap(0,
617 size + qemu_host_page_size,
618 PROT_READ | PROT_WRITE,
619 MAP_PRIVATE | MAP_ANONYMOUS,
620 -1, 0);
621 if (error == -1) {
622 perror("stk mmap");
623 exit(-1);
625 /* we reserve one extra page at the top of the stack as guard */
626 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
628 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
629 p += stack_base;
631 if (bprm->loader) {
632 bprm->loader += stack_base;
634 bprm->exec += stack_base;
636 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
637 if (bprm->page[i]) {
638 info->rss++;
640 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
641 free(bprm->page[i]);
643 stack_base += TARGET_PAGE_SIZE;
645 return p;
648 static void set_brk(unsigned long start, unsigned long end)
650 /* page-align the start and end addresses... */
651 start = HOST_PAGE_ALIGN(start);
652 end = HOST_PAGE_ALIGN(end);
653 if (end <= start)
654 return;
655 if(target_mmap(start, end - start,
656 PROT_READ | PROT_WRITE | PROT_EXEC,
657 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
658 perror("cannot mmap brk");
659 exit(-1);
664 /* We need to explicitly zero any fractional pages after the data
665 section (i.e. bss). This would contain the junk from the file that
666 should not be in memory. */
667 static void padzero(unsigned long elf_bss)
669 unsigned long nbyte;
671 /* XXX: this is really a hack : if the real host page size is
672 smaller than the target page size, some pages after the end
673 of the file may not be mapped. A better fix would be to
674 patch target_mmap(), but it is more complicated as the file
675 size must be known */
676 if (qemu_real_host_page_size < qemu_host_page_size) {
677 unsigned long end_addr, end_addr1;
678 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
679 ~(qemu_real_host_page_size - 1);
680 end_addr = HOST_PAGE_ALIGN(elf_bss);
681 if (end_addr1 < end_addr) {
682 mmap((void *)end_addr1, end_addr - end_addr1,
683 PROT_READ|PROT_WRITE|PROT_EXEC,
684 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
688 nbyte = elf_bss & (qemu_host_page_size-1);
689 if (nbyte) {
690 nbyte = qemu_host_page_size - nbyte;
691 do {
692 tput8(elf_bss, 0);
693 elf_bss++;
694 } while (--nbyte);
699 static unsigned long create_elf_tables(target_ulong p, int argc, int envc,
700 struct elfhdr * exec,
701 unsigned long load_addr,
702 unsigned long load_bias,
703 unsigned long interp_load_addr, int ibcs,
704 struct image_info *info)
706 target_ulong argv, envp;
707 target_ulong sp;
708 int size;
709 target_ulong u_platform;
710 const char *k_platform;
711 const int n = sizeof(target_ulong);
713 sp = p;
714 u_platform = 0;
715 k_platform = ELF_PLATFORM;
716 if (k_platform) {
717 size_t len = strlen(k_platform) + 1;
718 sp -= (len + n - 1) & ~(n - 1);
719 u_platform = sp;
720 memcpy_to_target(sp, k_platform, len);
723 * Force 16 byte _final_ alignment here for generality.
725 sp = sp &~ (target_ulong)15;
726 size = (DLINFO_ITEMS + 1) * 2;
727 if (k_platform)
728 size += 2;
729 #ifdef DLINFO_ARCH_ITEMS
730 size += DLINFO_ARCH_ITEMS * 2;
731 #endif
732 size += envc + argc + 2;
733 size += (!ibcs ? 3 : 1); /* argc itself */
734 size *= n;
735 if (size & 15)
736 sp -= 16 - (size & 15);
738 #define NEW_AUX_ENT(id, val) do { \
739 sp -= n; tputl(sp, val); \
740 sp -= n; tputl(sp, id); \
741 } while(0)
742 NEW_AUX_ENT (AT_NULL, 0);
744 /* There must be exactly DLINFO_ITEMS entries here. */
745 NEW_AUX_ENT(AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
746 NEW_AUX_ENT(AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
747 NEW_AUX_ENT(AT_PHNUM, (target_ulong)(exec->e_phnum));
748 NEW_AUX_ENT(AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
749 NEW_AUX_ENT(AT_BASE, (target_ulong)(interp_load_addr));
750 NEW_AUX_ENT(AT_FLAGS, (target_ulong)0);
751 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
752 NEW_AUX_ENT(AT_UID, (target_ulong) getuid());
753 NEW_AUX_ENT(AT_EUID, (target_ulong) geteuid());
754 NEW_AUX_ENT(AT_GID, (target_ulong) getgid());
755 NEW_AUX_ENT(AT_EGID, (target_ulong) getegid());
756 NEW_AUX_ENT(AT_HWCAP, (target_ulong) ELF_HWCAP);
757 if (k_platform)
758 NEW_AUX_ENT(AT_PLATFORM, u_platform);
759 #ifdef ARCH_DLINFO
761 * ARCH_DLINFO must come last so platform specific code can enforce
762 * special alignment requirements on the AUXV if necessary (eg. PPC).
764 ARCH_DLINFO;
765 #endif
766 #undef NEW_AUX_ENT
768 sp -= (envc + 1) * n;
769 envp = sp;
770 sp -= (argc + 1) * n;
771 argv = sp;
772 if (!ibcs) {
773 sp -= n; tputl(sp, envp);
774 sp -= n; tputl(sp, argv);
776 sp -= n; tputl(sp, argc);
777 info->arg_start = p;
778 while (argc-->0) {
779 tputl(argv, p); argv += n;
780 p += target_strlen(p) + 1;
782 tputl(argv, 0);
783 info->arg_end = info->env_start = p;
784 while (envc-->0) {
785 tputl(envp, p); envp += n;
786 p += target_strlen(p) + 1;
788 tputl(envp, 0);
789 info->env_end = p;
790 return sp;
794 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
795 int interpreter_fd,
796 unsigned long *interp_load_addr)
798 struct elf_phdr *elf_phdata = NULL;
799 struct elf_phdr *eppnt;
800 unsigned long load_addr = 0;
801 int load_addr_set = 0;
802 int retval;
803 unsigned long last_bss, elf_bss;
804 unsigned long error;
805 int i;
807 elf_bss = 0;
808 last_bss = 0;
809 error = 0;
811 #ifdef BSWAP_NEEDED
812 bswap_ehdr(interp_elf_ex);
813 #endif
814 /* First of all, some simple consistency checks */
815 if ((interp_elf_ex->e_type != ET_EXEC &&
816 interp_elf_ex->e_type != ET_DYN) ||
817 !elf_check_arch(interp_elf_ex->e_machine)) {
818 return ~0UL;
822 /* Now read in all of the header information */
824 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
825 return ~0UL;
827 elf_phdata = (struct elf_phdr *)
828 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
830 if (!elf_phdata)
831 return ~0UL;
834 * If the size of this structure has changed, then punt, since
835 * we will be doing the wrong thing.
837 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
838 free(elf_phdata);
839 return ~0UL;
842 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
843 if(retval >= 0) {
844 retval = read(interpreter_fd,
845 (char *) elf_phdata,
846 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
848 if (retval < 0) {
849 perror("load_elf_interp");
850 exit(-1);
851 free (elf_phdata);
852 return retval;
854 #ifdef BSWAP_NEEDED
855 eppnt = elf_phdata;
856 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
857 bswap_phdr(eppnt);
859 #endif
861 if (interp_elf_ex->e_type == ET_DYN) {
862 /* in order to avoid harcoding the interpreter load
863 address in qemu, we allocate a big enough memory zone */
864 error = target_mmap(0, INTERP_MAP_SIZE,
865 PROT_NONE, MAP_PRIVATE | MAP_ANON,
866 -1, 0);
867 if (error == -1) {
868 perror("mmap");
869 exit(-1);
871 load_addr = error;
872 load_addr_set = 1;
875 eppnt = elf_phdata;
876 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
877 if (eppnt->p_type == PT_LOAD) {
878 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
879 int elf_prot = 0;
880 unsigned long vaddr = 0;
881 unsigned long k;
883 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
884 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
885 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
886 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
887 elf_type |= MAP_FIXED;
888 vaddr = eppnt->p_vaddr;
890 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
891 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
892 elf_prot,
893 elf_type,
894 interpreter_fd,
895 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
897 if (error == -1) {
898 /* Real error */
899 close(interpreter_fd);
900 free(elf_phdata);
901 return ~0UL;
904 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
905 load_addr = error;
906 load_addr_set = 1;
910 * Find the end of the file mapping for this phdr, and keep
911 * track of the largest address we see for this.
913 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
914 if (k > elf_bss) elf_bss = k;
917 * Do the same thing for the memory mapping - between
918 * elf_bss and last_bss is the bss section.
920 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
921 if (k > last_bss) last_bss = k;
924 /* Now use mmap to map the library into memory. */
926 close(interpreter_fd);
929 * Now fill out the bss section. First pad the last page up
930 * to the page boundary, and then perform a mmap to make sure
931 * that there are zeromapped pages up to and including the last
932 * bss page.
934 padzero(elf_bss);
935 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
937 /* Map the last of the bss segment */
938 if (last_bss > elf_bss) {
939 target_mmap(elf_bss, last_bss-elf_bss,
940 PROT_READ|PROT_WRITE|PROT_EXEC,
941 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
943 free(elf_phdata);
945 *interp_load_addr = load_addr;
946 return ((unsigned long) interp_elf_ex->e_entry) + load_addr;
949 /* Best attempt to load symbols from this ELF object. */
950 static void load_symbols(struct elfhdr *hdr, int fd)
952 unsigned int i;
953 struct elf_shdr sechdr, symtab, strtab;
954 char *strings;
955 struct syminfo *s;
957 lseek(fd, hdr->e_shoff, SEEK_SET);
958 for (i = 0; i < hdr->e_shnum; i++) {
959 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
960 return;
961 #ifdef BSWAP_NEEDED
962 bswap_shdr(&sechdr);
963 #endif
964 if (sechdr.sh_type == SHT_SYMTAB) {
965 symtab = sechdr;
966 lseek(fd, hdr->e_shoff
967 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
968 if (read(fd, &strtab, sizeof(strtab))
969 != sizeof(strtab))
970 return;
971 #ifdef BSWAP_NEEDED
972 bswap_shdr(&strtab);
973 #endif
974 goto found;
977 return; /* Shouldn't happen... */
979 found:
980 /* Now know where the strtab and symtab are. Snarf them. */
981 s = malloc(sizeof(*s));
982 s->disas_symtab = malloc(symtab.sh_size);
983 s->disas_strtab = strings = malloc(strtab.sh_size);
984 if (!s->disas_symtab || !s->disas_strtab)
985 return;
987 lseek(fd, symtab.sh_offset, SEEK_SET);
988 if (read(fd, s->disas_symtab, symtab.sh_size) != symtab.sh_size)
989 return;
991 #ifdef BSWAP_NEEDED
992 for (i = 0; i < symtab.sh_size / sizeof(struct elf_sym); i++)
993 bswap_sym(s->disas_symtab + sizeof(struct elf_sym)*i);
994 #endif
996 lseek(fd, strtab.sh_offset, SEEK_SET);
997 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
998 return;
999 s->disas_num_syms = symtab.sh_size / sizeof(struct elf_sym);
1000 s->next = syminfos;
1001 syminfos = s;
1004 static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1005 struct image_info * info)
1007 struct elfhdr elf_ex;
1008 struct elfhdr interp_elf_ex;
1009 struct exec interp_ex;
1010 int interpreter_fd = -1; /* avoid warning */
1011 unsigned long load_addr, load_bias;
1012 int load_addr_set = 0;
1013 unsigned int interpreter_type = INTERPRETER_NONE;
1014 unsigned char ibcs2_interpreter;
1015 int i;
1016 unsigned long mapped_addr;
1017 struct elf_phdr * elf_ppnt;
1018 struct elf_phdr *elf_phdata;
1019 unsigned long elf_bss, k, elf_brk;
1020 int retval;
1021 char * elf_interpreter;
1022 unsigned long elf_entry, interp_load_addr = 0;
1023 int status;
1024 unsigned long start_code, end_code, end_data;
1025 unsigned long elf_stack;
1026 char passed_fileno[6];
1028 ibcs2_interpreter = 0;
1029 status = 0;
1030 load_addr = 0;
1031 load_bias = 0;
1032 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1033 #ifdef BSWAP_NEEDED
1034 bswap_ehdr(&elf_ex);
1035 #endif
1037 if (elf_ex.e_ident[0] != 0x7f ||
1038 strncmp(&elf_ex.e_ident[1], "ELF",3) != 0) {
1039 return -ENOEXEC;
1042 /* First of all, some simple consistency checks */
1043 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1044 (! elf_check_arch(elf_ex.e_machine))) {
1045 return -ENOEXEC;
1048 /* Now read in all of the header information */
1049 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1050 if (elf_phdata == NULL) {
1051 return -ENOMEM;
1054 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1055 if(retval > 0) {
1056 retval = read(bprm->fd, (char *) elf_phdata,
1057 elf_ex.e_phentsize * elf_ex.e_phnum);
1060 if (retval < 0) {
1061 perror("load_elf_binary");
1062 exit(-1);
1063 free (elf_phdata);
1064 return -errno;
1067 #ifdef BSWAP_NEEDED
1068 elf_ppnt = elf_phdata;
1069 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1070 bswap_phdr(elf_ppnt);
1072 #endif
1073 elf_ppnt = elf_phdata;
1075 elf_bss = 0;
1076 elf_brk = 0;
1079 elf_stack = ~0UL;
1080 elf_interpreter = NULL;
1081 start_code = ~0UL;
1082 end_code = 0;
1083 end_data = 0;
1085 for(i=0;i < elf_ex.e_phnum; i++) {
1086 if (elf_ppnt->p_type == PT_INTERP) {
1087 if ( elf_interpreter != NULL )
1089 free (elf_phdata);
1090 free(elf_interpreter);
1091 close(bprm->fd);
1092 return -EINVAL;
1095 /* This is the program interpreter used for
1096 * shared libraries - for now assume that this
1097 * is an a.out format binary
1100 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1102 if (elf_interpreter == NULL) {
1103 free (elf_phdata);
1104 close(bprm->fd);
1105 return -ENOMEM;
1108 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1109 if(retval >= 0) {
1110 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1112 if(retval < 0) {
1113 perror("load_elf_binary2");
1114 exit(-1);
1117 /* If the program interpreter is one of these two,
1118 then assume an iBCS2 image. Otherwise assume
1119 a native linux image. */
1121 /* JRP - Need to add X86 lib dir stuff here... */
1123 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1124 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1125 ibcs2_interpreter = 1;
1128 #if 0
1129 printf("Using ELF interpreter %s\n", elf_interpreter);
1130 #endif
1131 if (retval >= 0) {
1132 retval = open(path(elf_interpreter), O_RDONLY);
1133 if(retval >= 0) {
1134 interpreter_fd = retval;
1136 else {
1137 perror(elf_interpreter);
1138 exit(-1);
1139 /* retval = -errno; */
1143 if (retval >= 0) {
1144 retval = lseek(interpreter_fd, 0, SEEK_SET);
1145 if(retval >= 0) {
1146 retval = read(interpreter_fd,bprm->buf,128);
1149 if (retval >= 0) {
1150 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1151 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
1153 if (retval < 0) {
1154 perror("load_elf_binary3");
1155 exit(-1);
1156 free (elf_phdata);
1157 free(elf_interpreter);
1158 close(bprm->fd);
1159 return retval;
1162 elf_ppnt++;
1165 /* Some simple consistency checks for the interpreter */
1166 if (elf_interpreter){
1167 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1169 /* Now figure out which format our binary is */
1170 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1171 (N_MAGIC(interp_ex) != QMAGIC)) {
1172 interpreter_type = INTERPRETER_ELF;
1175 if (interp_elf_ex.e_ident[0] != 0x7f ||
1176 strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1177 interpreter_type &= ~INTERPRETER_ELF;
1180 if (!interpreter_type) {
1181 free(elf_interpreter);
1182 free(elf_phdata);
1183 close(bprm->fd);
1184 return -ELIBBAD;
1188 /* OK, we are done with that, now set up the arg stuff,
1189 and then start this sucker up */
1191 if (!bprm->sh_bang) {
1192 char * passed_p;
1194 if (interpreter_type == INTERPRETER_AOUT) {
1195 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1196 passed_p = passed_fileno;
1198 if (elf_interpreter) {
1199 bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p);
1200 bprm->argc++;
1203 if (!bprm->p) {
1204 if (elf_interpreter) {
1205 free(elf_interpreter);
1207 free (elf_phdata);
1208 close(bprm->fd);
1209 return -E2BIG;
1213 /* OK, This is the point of no return */
1214 info->end_data = 0;
1215 info->end_code = 0;
1216 info->start_mmap = (unsigned long)ELF_START_MMAP;
1217 info->mmap = 0;
1218 elf_entry = (unsigned long) elf_ex.e_entry;
1220 /* Do this so that we can load the interpreter, if need be. We will
1221 change some of these later */
1222 info->rss = 0;
1223 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1224 info->start_stack = bprm->p;
1226 /* Now we do a little grungy work by mmaping the ELF image into
1227 * the correct location in memory. At this point, we assume that
1228 * the image should be loaded at fixed address, not at a variable
1229 * address.
1232 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1233 int elf_prot = 0;
1234 int elf_flags = 0;
1235 unsigned long error;
1237 if (elf_ppnt->p_type != PT_LOAD)
1238 continue;
1240 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1241 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1242 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1243 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1244 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1245 elf_flags |= MAP_FIXED;
1246 } else if (elf_ex.e_type == ET_DYN) {
1247 /* Try and get dynamic programs out of the way of the default mmap
1248 base, as well as whatever program they might try to exec. This
1249 is because the brk will follow the loader, and is not movable. */
1250 /* NOTE: for qemu, we do a big mmap to get enough space
1251 without harcoding any address */
1252 error = target_mmap(0, ET_DYN_MAP_SIZE,
1253 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1254 -1, 0);
1255 if (error == -1) {
1256 perror("mmap");
1257 exit(-1);
1259 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1262 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1263 (elf_ppnt->p_filesz +
1264 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1265 elf_prot,
1266 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1267 bprm->fd,
1268 (elf_ppnt->p_offset -
1269 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1270 if (error == -1) {
1271 perror("mmap");
1272 exit(-1);
1275 #ifdef LOW_ELF_STACK
1276 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1277 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1278 #endif
1280 if (!load_addr_set) {
1281 load_addr_set = 1;
1282 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1283 if (elf_ex.e_type == ET_DYN) {
1284 load_bias += error -
1285 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1286 load_addr += load_bias;
1289 k = elf_ppnt->p_vaddr;
1290 if (k < start_code)
1291 start_code = k;
1292 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1293 if (k > elf_bss)
1294 elf_bss = k;
1295 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1296 end_code = k;
1297 if (end_data < k)
1298 end_data = k;
1299 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1300 if (k > elf_brk) elf_brk = k;
1303 elf_entry += load_bias;
1304 elf_bss += load_bias;
1305 elf_brk += load_bias;
1306 start_code += load_bias;
1307 end_code += load_bias;
1308 // start_data += load_bias;
1309 end_data += load_bias;
1311 if (elf_interpreter) {
1312 if (interpreter_type & 1) {
1313 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1315 else if (interpreter_type & 2) {
1316 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1317 &interp_load_addr);
1320 close(interpreter_fd);
1321 free(elf_interpreter);
1323 if (elf_entry == ~0UL) {
1324 printf("Unable to load interpreter\n");
1325 free(elf_phdata);
1326 exit(-1);
1327 return 0;
1331 free(elf_phdata);
1333 if (loglevel)
1334 load_symbols(&elf_ex, bprm->fd);
1336 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1337 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1339 #ifdef LOW_ELF_STACK
1340 info->start_stack = bprm->p = elf_stack - 4;
1341 #endif
1342 bprm->p = create_elf_tables(bprm->p,
1343 bprm->argc,
1344 bprm->envc,
1345 &elf_ex,
1346 load_addr, load_bias,
1347 interp_load_addr,
1348 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1349 info);
1350 if (interpreter_type == INTERPRETER_AOUT)
1351 info->arg_start += strlen(passed_fileno) + 1;
1352 info->start_brk = info->brk = elf_brk;
1353 info->end_code = end_code;
1354 info->start_code = start_code;
1355 info->end_data = end_data;
1356 info->start_stack = bprm->p;
1358 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1359 sections */
1360 set_brk(elf_bss, elf_brk);
1362 padzero(elf_bss);
1364 #if 0
1365 printf("(start_brk) %x\n" , info->start_brk);
1366 printf("(end_code) %x\n" , info->end_code);
1367 printf("(start_code) %x\n" , info->start_code);
1368 printf("(end_data) %x\n" , info->end_data);
1369 printf("(start_stack) %x\n" , info->start_stack);
1370 printf("(brk) %x\n" , info->brk);
1371 #endif
1373 if ( info->personality == PER_SVR4 )
1375 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1376 and some applications "depend" upon this behavior.
1377 Since we do not have the power to recompile these, we
1378 emulate the SVr4 behavior. Sigh. */
1379 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1380 MAP_FIXED | MAP_PRIVATE, -1, 0);
1383 #ifdef ELF_PLAT_INIT
1385 * The ABI may specify that certain registers be set up in special
1386 * ways (on i386 %edx is the address of a DT_FINI function, for
1387 * example. This macro performs whatever initialization to
1388 * the regs structure is required.
1390 ELF_PLAT_INIT(regs);
1391 #endif
1394 info->entry = elf_entry;
1396 return 0;
1401 int elf_exec(const char * filename, char ** argv, char ** envp,
1402 struct target_pt_regs * regs, struct image_info *infop)
1404 struct linux_binprm bprm;
1405 int retval;
1406 int i;
1408 bprm.p = TARGET_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int);
1409 for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */
1410 bprm.page[i] = 0;
1411 retval = open(filename, O_RDONLY);
1412 if (retval < 0)
1413 return retval;
1414 bprm.fd = retval;
1415 bprm.filename = (char *)filename;
1416 bprm.sh_bang = 0;
1417 bprm.loader = 0;
1418 bprm.exec = 0;
1419 bprm.dont_iput = 0;
1420 bprm.argc = count(argv);
1421 bprm.envc = count(envp);
1423 retval = prepare_binprm(&bprm);
1425 if(retval>=0) {
1426 bprm.p = copy_strings(1, &bprm.filename, bprm.page, bprm.p);
1427 bprm.exec = bprm.p;
1428 bprm.p = copy_strings(bprm.envc,envp,bprm.page,bprm.p);
1429 bprm.p = copy_strings(bprm.argc,argv,bprm.page,bprm.p);
1430 if (!bprm.p) {
1431 retval = -E2BIG;
1435 if(retval>=0) {
1436 retval = load_elf_binary(&bprm,regs,infop);
1439 if(retval>=0) {
1440 /* success. Initialize important registers */
1441 init_thread(regs, infop);
1442 return retval;
1445 /* Something went wrong, return the inode and free the argument pages*/
1446 for (i=0 ; i<MAX_ARG_PAGES ; i++) {
1447 free(bprm.page[i]);
1449 return(retval);
1453 static int load_aout_interp(void * exptr, int interp_fd)
1455 printf("a.out interpreter not yet supported\n");
1456 return(0);