Rename MIPS_HFLAG(S)_TMASK (Thiemo Seufer).
[qemu/mini2440.git] / linux-user / elfload.c
blobc934fb831b34675acffaee183e9cec4c4ab71ff0
1 /* This is the Linux kernel elf-loading code, ported into user space */
3 #include <stdio.h>
4 #include <sys/types.h>
5 #include <fcntl.h>
6 #include <sys/stat.h>
7 #include <errno.h>
8 #include <unistd.h>
9 #include <sys/mman.h>
10 #include <stdlib.h>
11 #include <string.h>
13 #include "qemu.h"
14 #include "disas.h"
16 /* this flag is uneffective under linux too, should be deleted */
17 #ifndef MAP_DENYWRITE
18 #define MAP_DENYWRITE 0
19 #endif
21 /* should probably go in elf.h */
22 #ifndef ELIBBAD
23 #define ELIBBAD 80
24 #endif
26 #ifdef TARGET_I386
28 #define ELF_PLATFORM get_elf_platform()
30 static const char *get_elf_platform(void)
32 static char elf_platform[] = "i386";
33 int family = (global_env->cpuid_version >> 8) & 0xff;
34 if (family > 6)
35 family = 6;
36 if (family >= 3)
37 elf_platform[1] = '0' + family;
38 return elf_platform;
41 #define ELF_HWCAP get_elf_hwcap()
43 static uint32_t get_elf_hwcap(void)
45 return global_env->cpuid_features;
48 #define ELF_START_MMAP 0x80000000
51 * This is used to ensure we don't load something for the wrong architecture.
53 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
56 * These are used to set parameters in the core dumps.
58 #define ELF_CLASS ELFCLASS32
59 #define ELF_DATA ELFDATA2LSB
60 #define ELF_ARCH EM_386
62 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
63 starts %edx contains a pointer to a function which might be
64 registered using `atexit'. This provides a mean for the
65 dynamic linker to call DT_FINI functions for shared libraries
66 that have been loaded before the code runs.
68 A value of 0 tells we have no such handler. */
69 #define ELF_PLAT_INIT(_r) _r->edx = 0
71 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
73 regs->esp = infop->start_stack;
74 regs->eip = infop->entry;
77 #define USE_ELF_CORE_DUMP
78 #define ELF_EXEC_PAGESIZE 4096
80 #endif
82 #ifdef TARGET_ARM
84 #define ELF_START_MMAP 0x80000000
86 #define elf_check_arch(x) ( (x) == EM_ARM )
88 #define ELF_CLASS ELFCLASS32
89 #ifdef TARGET_WORDS_BIGENDIAN
90 #define ELF_DATA ELFDATA2MSB
91 #else
92 #define ELF_DATA ELFDATA2LSB
93 #endif
94 #define ELF_ARCH EM_ARM
96 #define ELF_PLAT_INIT(_r) _r->ARM_r0 = 0
98 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
100 target_long *stack = (void *)infop->start_stack;
101 memset(regs, 0, sizeof(*regs));
102 regs->ARM_cpsr = 0x10;
103 if (infop->entry & 1)
104 regs->ARM_cpsr |= CPSR_T;
105 regs->ARM_pc = infop->entry & 0xfffffffe;
106 regs->ARM_sp = infop->start_stack;
107 regs->ARM_r2 = tswapl(stack[2]); /* envp */
108 regs->ARM_r1 = tswapl(stack[1]); /* argv */
109 /* XXX: it seems that r0 is zeroed after ! */
110 // regs->ARM_r0 = tswapl(stack[0]); /* argc */
113 #define USE_ELF_CORE_DUMP
114 #define ELF_EXEC_PAGESIZE 4096
116 enum
118 ARM_HWCAP_ARM_SWP = 1 << 0,
119 ARM_HWCAP_ARM_HALF = 1 << 1,
120 ARM_HWCAP_ARM_THUMB = 1 << 2,
121 ARM_HWCAP_ARM_26BIT = 1 << 3,
122 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
123 ARM_HWCAP_ARM_FPA = 1 << 5,
124 ARM_HWCAP_ARM_VFP = 1 << 6,
125 ARM_HWCAP_ARM_EDSP = 1 << 7,
128 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
129 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
130 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
132 #endif
134 #ifdef TARGET_SPARC
135 #ifdef TARGET_SPARC64
137 #define ELF_START_MMAP 0x80000000
139 #define elf_check_arch(x) ( (x) == EM_SPARC )
141 #define ELF_CLASS ELFCLASS64
142 #define ELF_DATA ELFDATA2MSB
143 #define ELF_ARCH EM_SPARC
145 /*XXX*/
146 #define ELF_PLAT_INIT(_r)
148 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
150 regs->tstate = 0;
151 regs->pc = infop->entry;
152 regs->npc = regs->pc + 4;
153 regs->y = 0;
154 regs->u_regs[14] = infop->start_stack - 16 * 4;
157 #else
158 #define ELF_START_MMAP 0x80000000
160 #define elf_check_arch(x) ( (x) == EM_SPARC )
162 #define ELF_CLASS ELFCLASS32
163 #define ELF_DATA ELFDATA2MSB
164 #define ELF_ARCH EM_SPARC
166 /*XXX*/
167 #define ELF_PLAT_INIT(_r)
169 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
171 regs->psr = 0;
172 regs->pc = infop->entry;
173 regs->npc = regs->pc + 4;
174 regs->y = 0;
175 regs->u_regs[14] = infop->start_stack - 16 * 4;
178 #endif
179 #endif
181 #ifdef TARGET_PPC
183 #define ELF_START_MMAP 0x80000000
185 #define elf_check_arch(x) ( (x) == EM_PPC )
187 #define ELF_CLASS ELFCLASS32
188 #ifdef TARGET_WORDS_BIGENDIAN
189 #define ELF_DATA ELFDATA2MSB
190 #else
191 #define ELF_DATA ELFDATA2LSB
192 #endif
193 #define ELF_ARCH EM_PPC
195 /* Note that isn't exactly what regular kernel does
196 * but this is what the ABI wants and is needed to allow
197 * execution of PPC BSD programs.
199 #define ELF_PLAT_INIT(_r) \
200 do { \
201 target_ulong *pos = (target_ulong *)bprm->p, tmp = 1; \
202 _r->gpr[3] = bprm->argc; \
203 _r->gpr[4] = (unsigned long)++pos; \
204 for (; tmp != 0; pos++) \
205 tmp = *pos; \
206 _r->gpr[5] = (unsigned long)pos; \
207 } while (0)
210 * We need to put in some extra aux table entries to tell glibc what
211 * the cache block size is, so it can use the dcbz instruction safely.
213 #define AT_DCACHEBSIZE 19
214 #define AT_ICACHEBSIZE 20
215 #define AT_UCACHEBSIZE 21
216 /* A special ignored type value for PPC, for glibc compatibility. */
217 #define AT_IGNOREPPC 22
219 * The requirements here are:
220 * - keep the final alignment of sp (sp & 0xf)
221 * - make sure the 32-bit value at the first 16 byte aligned position of
222 * AUXV is greater than 16 for glibc compatibility.
223 * AT_IGNOREPPC is used for that.
224 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
225 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
227 #define DLINFO_ARCH_ITEMS 5
228 #define ARCH_DLINFO \
229 do { \
230 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
231 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
232 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
233 /* \
234 * Now handle glibc compatibility. \
235 */ \
236 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
237 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
238 } while (0)
240 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
242 _regs->msr = 1 << MSR_PR; /* Set user mode */
243 _regs->gpr[1] = infop->start_stack;
244 _regs->nip = infop->entry;
247 #define USE_ELF_CORE_DUMP
248 #define ELF_EXEC_PAGESIZE 4096
250 #endif
252 #ifdef TARGET_MIPS
254 #define ELF_START_MMAP 0x80000000
256 #define elf_check_arch(x) ( (x) == EM_MIPS )
258 #define ELF_CLASS ELFCLASS32
259 #ifdef TARGET_WORDS_BIGENDIAN
260 #define ELF_DATA ELFDATA2MSB
261 #else
262 #define ELF_DATA ELFDATA2LSB
263 #endif
264 #define ELF_ARCH EM_MIPS
266 #define ELF_PLAT_INIT(_r)
268 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
270 regs->cp0_status = CP0St_UM;
271 regs->cp0_epc = infop->entry;
272 regs->regs[29] = infop->start_stack;
275 #endif /* TARGET_MIPS */
277 #ifndef ELF_PLATFORM
278 #define ELF_PLATFORM (NULL)
279 #endif
281 #ifndef ELF_HWCAP
282 #define ELF_HWCAP 0
283 #endif
285 #include "elf.h"
288 * MAX_ARG_PAGES defines the number of pages allocated for arguments
289 * and envelope for the new program. 32 should suffice, this gives
290 * a maximum env+arg of 128kB w/4KB pages!
292 #define MAX_ARG_PAGES 32
295 * This structure is used to hold the arguments that are
296 * used when loading binaries.
298 struct linux_binprm {
299 char buf[128];
300 unsigned long page[MAX_ARG_PAGES];
301 unsigned long p;
302 int sh_bang;
303 int fd;
304 int e_uid, e_gid;
305 int argc, envc;
306 char * filename; /* Name of binary */
307 unsigned long loader, exec;
308 int dont_iput; /* binfmt handler has put inode */
311 struct exec
313 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
314 unsigned int a_text; /* length of text, in bytes */
315 unsigned int a_data; /* length of data, in bytes */
316 unsigned int a_bss; /* length of uninitialized data area, in bytes */
317 unsigned int a_syms; /* length of symbol table data in file, in bytes */
318 unsigned int a_entry; /* start address */
319 unsigned int a_trsize; /* length of relocation info for text, in bytes */
320 unsigned int a_drsize; /* length of relocation info for data, in bytes */
324 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
325 #define OMAGIC 0407
326 #define NMAGIC 0410
327 #define ZMAGIC 0413
328 #define QMAGIC 0314
330 /* max code+data+bss space allocated to elf interpreter */
331 #define INTERP_MAP_SIZE (32 * 1024 * 1024)
333 /* max code+data+bss+brk space allocated to ET_DYN executables */
334 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
336 /* from personality.h */
338 /* Flags for bug emulation. These occupy the top three bytes. */
339 #define STICKY_TIMEOUTS 0x4000000
340 #define WHOLE_SECONDS 0x2000000
342 /* Personality types. These go in the low byte. Avoid using the top bit,
343 * it will conflict with error returns.
345 #define PER_MASK (0x00ff)
346 #define PER_LINUX (0x0000)
347 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS)
348 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS)
349 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS)
350 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS)
351 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS)
352 #define PER_BSD (0x0006)
353 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
355 /* Necessary parameters */
356 #define NGROUPS 32
358 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
359 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
360 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
362 #define INTERPRETER_NONE 0
363 #define INTERPRETER_AOUT 1
364 #define INTERPRETER_ELF 2
366 #define DLINFO_ITEMS 12
368 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
370 memcpy(to, from, n);
373 extern unsigned long x86_stack_size;
375 static int load_aout_interp(void * exptr, int interp_fd);
377 #ifdef BSWAP_NEEDED
378 static void bswap_ehdr(struct elfhdr *ehdr)
380 bswap16s(&ehdr->e_type); /* Object file type */
381 bswap16s(&ehdr->e_machine); /* Architecture */
382 bswap32s(&ehdr->e_version); /* Object file version */
383 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
384 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
385 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
386 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
387 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
388 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
389 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
390 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
391 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
392 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
395 static void bswap_phdr(struct elf_phdr *phdr)
397 bswap32s(&phdr->p_type); /* Segment type */
398 bswaptls(&phdr->p_offset); /* Segment file offset */
399 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
400 bswaptls(&phdr->p_paddr); /* Segment physical address */
401 bswaptls(&phdr->p_filesz); /* Segment size in file */
402 bswaptls(&phdr->p_memsz); /* Segment size in memory */
403 bswap32s(&phdr->p_flags); /* Segment flags */
404 bswaptls(&phdr->p_align); /* Segment alignment */
407 static void bswap_shdr(struct elf_shdr *shdr)
409 bswap32s(&shdr->sh_name);
410 bswap32s(&shdr->sh_type);
411 bswaptls(&shdr->sh_flags);
412 bswaptls(&shdr->sh_addr);
413 bswaptls(&shdr->sh_offset);
414 bswaptls(&shdr->sh_size);
415 bswap32s(&shdr->sh_link);
416 bswap32s(&shdr->sh_info);
417 bswaptls(&shdr->sh_addralign);
418 bswaptls(&shdr->sh_entsize);
421 static void bswap_sym(Elf32_Sym *sym)
423 bswap32s(&sym->st_name);
424 bswap32s(&sym->st_value);
425 bswap32s(&sym->st_size);
426 bswap16s(&sym->st_shndx);
428 #endif
430 static void * get_free_page(void)
432 void * retval;
434 /* User-space version of kernel get_free_page. Returns a page-aligned
435 * page-sized chunk of memory.
437 retval = (void *)target_mmap(0, qemu_host_page_size, PROT_READ|PROT_WRITE,
438 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
440 if((long)retval == -1) {
441 perror("get_free_page");
442 exit(-1);
444 else {
445 return(retval);
449 static void free_page(void * pageaddr)
451 target_munmap((unsigned long)pageaddr, qemu_host_page_size);
455 * 'copy_string()' copies argument/envelope strings from user
456 * memory to free pages in kernel mem. These are in a format ready
457 * to be put directly into the top of new user memory.
460 static unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
461 unsigned long p)
463 char *tmp, *tmp1, *pag = NULL;
464 int len, offset = 0;
466 if (!p) {
467 return 0; /* bullet-proofing */
469 while (argc-- > 0) {
470 tmp = argv[argc];
471 if (!tmp) {
472 fprintf(stderr, "VFS: argc is wrong");
473 exit(-1);
475 tmp1 = tmp;
476 while (*tmp++);
477 len = tmp - tmp1;
478 if (p < len) { /* this shouldn't happen - 128kB */
479 return 0;
481 while (len) {
482 --p; --tmp; --len;
483 if (--offset < 0) {
484 offset = p % TARGET_PAGE_SIZE;
485 pag = (char *) page[p/TARGET_PAGE_SIZE];
486 if (!pag) {
487 pag = (char *)get_free_page();
488 page[p/TARGET_PAGE_SIZE] = (unsigned long)pag;
489 if (!pag)
490 return 0;
493 if (len == 0 || offset == 0) {
494 *(pag + offset) = *tmp;
496 else {
497 int bytes_to_copy = (len > offset) ? offset : len;
498 tmp -= bytes_to_copy;
499 p -= bytes_to_copy;
500 offset -= bytes_to_copy;
501 len -= bytes_to_copy;
502 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
506 return p;
509 static int in_group_p(gid_t g)
511 /* return TRUE if we're in the specified group, FALSE otherwise */
512 int ngroup;
513 int i;
514 gid_t grouplist[NGROUPS];
516 ngroup = getgroups(NGROUPS, grouplist);
517 for(i = 0; i < ngroup; i++) {
518 if(grouplist[i] == g) {
519 return 1;
522 return 0;
525 static int count(char ** vec)
527 int i;
529 for(i = 0; *vec; i++) {
530 vec++;
533 return(i);
536 static int prepare_binprm(struct linux_binprm *bprm)
538 struct stat st;
539 int mode;
540 int retval, id_change;
542 if(fstat(bprm->fd, &st) < 0) {
543 return(-errno);
546 mode = st.st_mode;
547 if(!S_ISREG(mode)) { /* Must be regular file */
548 return(-EACCES);
550 if(!(mode & 0111)) { /* Must have at least one execute bit set */
551 return(-EACCES);
554 bprm->e_uid = geteuid();
555 bprm->e_gid = getegid();
556 id_change = 0;
558 /* Set-uid? */
559 if(mode & S_ISUID) {
560 bprm->e_uid = st.st_uid;
561 if(bprm->e_uid != geteuid()) {
562 id_change = 1;
566 /* Set-gid? */
568 * If setgid is set but no group execute bit then this
569 * is a candidate for mandatory locking, not a setgid
570 * executable.
572 if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
573 bprm->e_gid = st.st_gid;
574 if (!in_group_p(bprm->e_gid)) {
575 id_change = 1;
579 memset(bprm->buf, 0, sizeof(bprm->buf));
580 retval = lseek(bprm->fd, 0L, SEEK_SET);
581 if(retval >= 0) {
582 retval = read(bprm->fd, bprm->buf, 128);
584 if(retval < 0) {
585 perror("prepare_binprm");
586 exit(-1);
587 /* return(-errno); */
589 else {
590 return(retval);
594 unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm,
595 struct image_info * info)
597 unsigned long stack_base, size, error;
598 int i;
600 /* Create enough stack to hold everything. If we don't use
601 * it for args, we'll use it for something else...
603 size = x86_stack_size;
604 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
605 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
606 error = target_mmap(0,
607 size + qemu_host_page_size,
608 PROT_READ | PROT_WRITE,
609 MAP_PRIVATE | MAP_ANONYMOUS,
610 -1, 0);
611 if (error == -1) {
612 perror("stk mmap");
613 exit(-1);
615 /* we reserve one extra page at the top of the stack as guard */
616 target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
618 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
619 p += stack_base;
621 if (bprm->loader) {
622 bprm->loader += stack_base;
624 bprm->exec += stack_base;
626 for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
627 if (bprm->page[i]) {
628 info->rss++;
630 memcpy((void *)stack_base, (void *)bprm->page[i], TARGET_PAGE_SIZE);
631 free_page((void *)bprm->page[i]);
633 stack_base += TARGET_PAGE_SIZE;
635 return p;
638 static void set_brk(unsigned long start, unsigned long end)
640 /* page-align the start and end addresses... */
641 start = HOST_PAGE_ALIGN(start);
642 end = HOST_PAGE_ALIGN(end);
643 if (end <= start)
644 return;
645 if(target_mmap(start, end - start,
646 PROT_READ | PROT_WRITE | PROT_EXEC,
647 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
648 perror("cannot mmap brk");
649 exit(-1);
654 /* We need to explicitly zero any fractional pages after the data
655 section (i.e. bss). This would contain the junk from the file that
656 should not be in memory. */
657 static void padzero(unsigned long elf_bss)
659 unsigned long nbyte;
660 char * fpnt;
662 /* XXX: this is really a hack : if the real host page size is
663 smaller than the target page size, some pages after the end
664 of the file may not be mapped. A better fix would be to
665 patch target_mmap(), but it is more complicated as the file
666 size must be known */
667 if (qemu_real_host_page_size < qemu_host_page_size) {
668 unsigned long end_addr, end_addr1;
669 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
670 ~(qemu_real_host_page_size - 1);
671 end_addr = HOST_PAGE_ALIGN(elf_bss);
672 if (end_addr1 < end_addr) {
673 mmap((void *)end_addr1, end_addr - end_addr1,
674 PROT_READ|PROT_WRITE|PROT_EXEC,
675 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
679 nbyte = elf_bss & (qemu_host_page_size-1);
680 if (nbyte) {
681 nbyte = qemu_host_page_size - nbyte;
682 fpnt = (char *) elf_bss;
683 do {
684 *fpnt++ = 0;
685 } while (--nbyte);
689 static unsigned int * create_elf_tables(char *p, int argc, int envc,
690 struct elfhdr * exec,
691 unsigned long load_addr,
692 unsigned long load_bias,
693 unsigned long interp_load_addr, int ibcs,
694 struct image_info *info)
696 target_ulong *argv, *envp;
697 target_ulong *sp, *csp;
698 target_ulong *u_platform;
699 const char *k_platform;
700 int v;
703 * Force 16 byte _final_ alignment here for generality.
705 sp = (unsigned int *) (~15UL & (unsigned long) p);
706 u_platform = NULL;
707 k_platform = ELF_PLATFORM;
708 if (k_platform) {
709 size_t len = strlen(k_platform) + 1;
710 sp -= (len + sizeof(target_ulong) - 1) / sizeof(target_ulong);
711 u_platform = (target_ulong *)sp;
712 __copy_to_user(u_platform, k_platform, len);
714 csp = sp;
715 csp -= (DLINFO_ITEMS + 1) * 2;
716 if (k_platform)
717 csp -= 2;
718 #ifdef DLINFO_ARCH_ITEMS
719 csp -= DLINFO_ARCH_ITEMS*2;
720 #endif
721 csp -= envc+1;
722 csp -= argc+1;
723 csp -= (!ibcs ? 3 : 1); /* argc itself */
724 if ((unsigned long)csp & 15UL)
725 sp -= ((unsigned long)csp & 15UL) / sizeof(*sp);
727 #define NEW_AUX_ENT(id, val) \
728 sp -= 2; \
729 put_user (id, sp); \
730 put_user (val, sp + 1)
731 NEW_AUX_ENT (AT_NULL, 0);
733 /* There must be exactly DLINFO_ITEMS entries here. */
734 NEW_AUX_ENT(AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
735 NEW_AUX_ENT(AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
736 NEW_AUX_ENT(AT_PHNUM, (target_ulong)(exec->e_phnum));
737 NEW_AUX_ENT(AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
738 NEW_AUX_ENT(AT_BASE, (target_ulong)(interp_load_addr));
739 NEW_AUX_ENT(AT_FLAGS, (target_ulong)0);
740 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
741 NEW_AUX_ENT(AT_UID, (target_ulong) getuid());
742 NEW_AUX_ENT(AT_EUID, (target_ulong) geteuid());
743 NEW_AUX_ENT(AT_GID, (target_ulong) getgid());
744 NEW_AUX_ENT(AT_EGID, (target_ulong) getegid());
745 NEW_AUX_ENT(AT_HWCAP, (target_ulong) ELF_HWCAP);
746 if (k_platform)
747 NEW_AUX_ENT(AT_PLATFORM, (target_ulong) u_platform);
748 #ifdef ARCH_DLINFO
750 * ARCH_DLINFO must come last so platform specific code can enforce
751 * special alignment requirements on the AUXV if necessary (eg. PPC).
753 ARCH_DLINFO;
754 #endif
755 #undef NEW_AUX_ENT
757 sp -= envc+1;
758 envp = sp;
759 sp -= argc+1;
760 argv = sp;
761 if (!ibcs) {
762 put_user((target_ulong)envp,--sp);
763 put_user((target_ulong)argv,--sp);
765 put_user(argc,--sp);
766 info->arg_start = (unsigned int)((unsigned long)p & 0xffffffff);
767 while (argc-->0) {
768 put_user((target_ulong)p,argv++);
769 do {
770 get_user(v, p);
771 p++;
772 } while (v != 0);
774 put_user(0,argv);
775 info->arg_end = info->env_start = (unsigned int)((unsigned long)p & 0xffffffff);
776 while (envc-->0) {
777 put_user((target_ulong)p,envp++);
778 do {
779 get_user(v, p);
780 p++;
781 } while (v != 0);
783 put_user(0,envp);
784 info->env_end = (unsigned int)((unsigned long)p & 0xffffffff);
785 return sp;
790 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
791 int interpreter_fd,
792 unsigned long *interp_load_addr)
794 struct elf_phdr *elf_phdata = NULL;
795 struct elf_phdr *eppnt;
796 unsigned long load_addr = 0;
797 int load_addr_set = 0;
798 int retval;
799 unsigned long last_bss, elf_bss;
800 unsigned long error;
801 int i;
803 elf_bss = 0;
804 last_bss = 0;
805 error = 0;
807 #ifdef BSWAP_NEEDED
808 bswap_ehdr(interp_elf_ex);
809 #endif
810 /* First of all, some simple consistency checks */
811 if ((interp_elf_ex->e_type != ET_EXEC &&
812 interp_elf_ex->e_type != ET_DYN) ||
813 !elf_check_arch(interp_elf_ex->e_machine)) {
814 return ~0UL;
818 /* Now read in all of the header information */
820 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
821 return ~0UL;
823 elf_phdata = (struct elf_phdr *)
824 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
826 if (!elf_phdata)
827 return ~0UL;
830 * If the size of this structure has changed, then punt, since
831 * we will be doing the wrong thing.
833 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
834 free(elf_phdata);
835 return ~0UL;
838 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
839 if(retval >= 0) {
840 retval = read(interpreter_fd,
841 (char *) elf_phdata,
842 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
844 if (retval < 0) {
845 perror("load_elf_interp");
846 exit(-1);
847 free (elf_phdata);
848 return retval;
850 #ifdef BSWAP_NEEDED
851 eppnt = elf_phdata;
852 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
853 bswap_phdr(eppnt);
855 #endif
857 if (interp_elf_ex->e_type == ET_DYN) {
858 /* in order to avoid harcoding the interpreter load
859 address in qemu, we allocate a big enough memory zone */
860 error = target_mmap(0, INTERP_MAP_SIZE,
861 PROT_NONE, MAP_PRIVATE | MAP_ANON,
862 -1, 0);
863 if (error == -1) {
864 perror("mmap");
865 exit(-1);
867 load_addr = error;
868 load_addr_set = 1;
871 eppnt = elf_phdata;
872 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
873 if (eppnt->p_type == PT_LOAD) {
874 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
875 int elf_prot = 0;
876 unsigned long vaddr = 0;
877 unsigned long k;
879 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
880 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
881 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
882 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
883 elf_type |= MAP_FIXED;
884 vaddr = eppnt->p_vaddr;
886 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
887 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
888 elf_prot,
889 elf_type,
890 interpreter_fd,
891 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
893 if (error == -1) {
894 /* Real error */
895 close(interpreter_fd);
896 free(elf_phdata);
897 return ~0UL;
900 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
901 load_addr = error;
902 load_addr_set = 1;
906 * Find the end of the file mapping for this phdr, and keep
907 * track of the largest address we see for this.
909 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
910 if (k > elf_bss) elf_bss = k;
913 * Do the same thing for the memory mapping - between
914 * elf_bss and last_bss is the bss section.
916 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
917 if (k > last_bss) last_bss = k;
920 /* Now use mmap to map the library into memory. */
922 close(interpreter_fd);
925 * Now fill out the bss section. First pad the last page up
926 * to the page boundary, and then perform a mmap to make sure
927 * that there are zeromapped pages up to and including the last
928 * bss page.
930 padzero(elf_bss);
931 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
933 /* Map the last of the bss segment */
934 if (last_bss > elf_bss) {
935 target_mmap(elf_bss, last_bss-elf_bss,
936 PROT_READ|PROT_WRITE|PROT_EXEC,
937 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
939 free(elf_phdata);
941 *interp_load_addr = load_addr;
942 return ((unsigned long) interp_elf_ex->e_entry) + load_addr;
945 /* Best attempt to load symbols from this ELF object. */
946 static void load_symbols(struct elfhdr *hdr, int fd)
948 unsigned int i;
949 struct elf_shdr sechdr, symtab, strtab;
950 char *strings;
951 struct syminfo *s;
953 lseek(fd, hdr->e_shoff, SEEK_SET);
954 for (i = 0; i < hdr->e_shnum; i++) {
955 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
956 return;
957 #ifdef BSWAP_NEEDED
958 bswap_shdr(&sechdr);
959 #endif
960 if (sechdr.sh_type == SHT_SYMTAB) {
961 symtab = sechdr;
962 lseek(fd, hdr->e_shoff
963 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
964 if (read(fd, &strtab, sizeof(strtab))
965 != sizeof(strtab))
966 return;
967 #ifdef BSWAP_NEEDED
968 bswap_shdr(&strtab);
969 #endif
970 goto found;
973 return; /* Shouldn't happen... */
975 found:
976 /* Now know where the strtab and symtab are. Snarf them. */
977 s = malloc(sizeof(*s));
978 s->disas_symtab = malloc(symtab.sh_size);
979 s->disas_strtab = strings = malloc(strtab.sh_size);
980 if (!s->disas_symtab || !s->disas_strtab)
981 return;
983 lseek(fd, symtab.sh_offset, SEEK_SET);
984 if (read(fd, s->disas_symtab, symtab.sh_size) != symtab.sh_size)
985 return;
987 #ifdef BSWAP_NEEDED
988 for (i = 0; i < symtab.sh_size / sizeof(struct elf_sym); i++)
989 bswap_sym(s->disas_symtab + sizeof(struct elf_sym)*i);
990 #endif
992 lseek(fd, strtab.sh_offset, SEEK_SET);
993 if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
994 return;
995 s->disas_num_syms = symtab.sh_size / sizeof(struct elf_sym);
996 s->next = syminfos;
997 syminfos = s;
1000 static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1001 struct image_info * info)
1003 struct elfhdr elf_ex;
1004 struct elfhdr interp_elf_ex;
1005 struct exec interp_ex;
1006 int interpreter_fd = -1; /* avoid warning */
1007 unsigned long load_addr, load_bias;
1008 int load_addr_set = 0;
1009 unsigned int interpreter_type = INTERPRETER_NONE;
1010 unsigned char ibcs2_interpreter;
1011 int i;
1012 unsigned long mapped_addr;
1013 struct elf_phdr * elf_ppnt;
1014 struct elf_phdr *elf_phdata;
1015 unsigned long elf_bss, k, elf_brk;
1016 int retval;
1017 char * elf_interpreter;
1018 unsigned long elf_entry, interp_load_addr = 0;
1019 int status;
1020 unsigned long start_code, end_code, end_data;
1021 unsigned long elf_stack;
1022 char passed_fileno[6];
1024 ibcs2_interpreter = 0;
1025 status = 0;
1026 load_addr = 0;
1027 load_bias = 0;
1028 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1029 #ifdef BSWAP_NEEDED
1030 bswap_ehdr(&elf_ex);
1031 #endif
1033 if (elf_ex.e_ident[0] != 0x7f ||
1034 strncmp(&elf_ex.e_ident[1], "ELF",3) != 0) {
1035 return -ENOEXEC;
1038 /* First of all, some simple consistency checks */
1039 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1040 (! elf_check_arch(elf_ex.e_machine))) {
1041 return -ENOEXEC;
1044 /* Now read in all of the header information */
1045 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1046 if (elf_phdata == NULL) {
1047 return -ENOMEM;
1050 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1051 if(retval > 0) {
1052 retval = read(bprm->fd, (char *) elf_phdata,
1053 elf_ex.e_phentsize * elf_ex.e_phnum);
1056 if (retval < 0) {
1057 perror("load_elf_binary");
1058 exit(-1);
1059 free (elf_phdata);
1060 return -errno;
1063 #ifdef BSWAP_NEEDED
1064 elf_ppnt = elf_phdata;
1065 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1066 bswap_phdr(elf_ppnt);
1068 #endif
1069 elf_ppnt = elf_phdata;
1071 elf_bss = 0;
1072 elf_brk = 0;
1075 elf_stack = ~0UL;
1076 elf_interpreter = NULL;
1077 start_code = ~0UL;
1078 end_code = 0;
1079 end_data = 0;
1081 for(i=0;i < elf_ex.e_phnum; i++) {
1082 if (elf_ppnt->p_type == PT_INTERP) {
1083 if ( elf_interpreter != NULL )
1085 free (elf_phdata);
1086 free(elf_interpreter);
1087 close(bprm->fd);
1088 return -EINVAL;
1091 /* This is the program interpreter used for
1092 * shared libraries - for now assume that this
1093 * is an a.out format binary
1096 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1098 if (elf_interpreter == NULL) {
1099 free (elf_phdata);
1100 close(bprm->fd);
1101 return -ENOMEM;
1104 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1105 if(retval >= 0) {
1106 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1108 if(retval < 0) {
1109 perror("load_elf_binary2");
1110 exit(-1);
1113 /* If the program interpreter is one of these two,
1114 then assume an iBCS2 image. Otherwise assume
1115 a native linux image. */
1117 /* JRP - Need to add X86 lib dir stuff here... */
1119 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1120 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1121 ibcs2_interpreter = 1;
1124 #if 0
1125 printf("Using ELF interpreter %s\n", elf_interpreter);
1126 #endif
1127 if (retval >= 0) {
1128 retval = open(path(elf_interpreter), O_RDONLY);
1129 if(retval >= 0) {
1130 interpreter_fd = retval;
1132 else {
1133 perror(elf_interpreter);
1134 exit(-1);
1135 /* retval = -errno; */
1139 if (retval >= 0) {
1140 retval = lseek(interpreter_fd, 0, SEEK_SET);
1141 if(retval >= 0) {
1142 retval = read(interpreter_fd,bprm->buf,128);
1145 if (retval >= 0) {
1146 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1147 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
1149 if (retval < 0) {
1150 perror("load_elf_binary3");
1151 exit(-1);
1152 free (elf_phdata);
1153 free(elf_interpreter);
1154 close(bprm->fd);
1155 return retval;
1158 elf_ppnt++;
1161 /* Some simple consistency checks for the interpreter */
1162 if (elf_interpreter){
1163 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1165 /* Now figure out which format our binary is */
1166 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1167 (N_MAGIC(interp_ex) != QMAGIC)) {
1168 interpreter_type = INTERPRETER_ELF;
1171 if (interp_elf_ex.e_ident[0] != 0x7f ||
1172 strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1173 interpreter_type &= ~INTERPRETER_ELF;
1176 if (!interpreter_type) {
1177 free(elf_interpreter);
1178 free(elf_phdata);
1179 close(bprm->fd);
1180 return -ELIBBAD;
1184 /* OK, we are done with that, now set up the arg stuff,
1185 and then start this sucker up */
1187 if (!bprm->sh_bang) {
1188 char * passed_p;
1190 if (interpreter_type == INTERPRETER_AOUT) {
1191 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1192 passed_p = passed_fileno;
1194 if (elf_interpreter) {
1195 bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p);
1196 bprm->argc++;
1199 if (!bprm->p) {
1200 if (elf_interpreter) {
1201 free(elf_interpreter);
1203 free (elf_phdata);
1204 close(bprm->fd);
1205 return -E2BIG;
1209 /* OK, This is the point of no return */
1210 info->end_data = 0;
1211 info->end_code = 0;
1212 info->start_mmap = (unsigned long)ELF_START_MMAP;
1213 info->mmap = 0;
1214 elf_entry = (unsigned long) elf_ex.e_entry;
1216 /* Do this so that we can load the interpreter, if need be. We will
1217 change some of these later */
1218 info->rss = 0;
1219 bprm->p = setup_arg_pages(bprm->p, bprm, info);
1220 info->start_stack = bprm->p;
1222 /* Now we do a little grungy work by mmaping the ELF image into
1223 * the correct location in memory. At this point, we assume that
1224 * the image should be loaded at fixed address, not at a variable
1225 * address.
1228 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1229 int elf_prot = 0;
1230 int elf_flags = 0;
1231 unsigned long error;
1233 if (elf_ppnt->p_type != PT_LOAD)
1234 continue;
1236 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1237 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1238 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1239 elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1240 if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1241 elf_flags |= MAP_FIXED;
1242 } else if (elf_ex.e_type == ET_DYN) {
1243 /* Try and get dynamic programs out of the way of the default mmap
1244 base, as well as whatever program they might try to exec. This
1245 is because the brk will follow the loader, and is not movable. */
1246 /* NOTE: for qemu, we do a big mmap to get enough space
1247 without harcoding any address */
1248 error = target_mmap(0, ET_DYN_MAP_SIZE,
1249 PROT_NONE, MAP_PRIVATE | MAP_ANON,
1250 -1, 0);
1251 if (error == -1) {
1252 perror("mmap");
1253 exit(-1);
1255 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1258 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1259 (elf_ppnt->p_filesz +
1260 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1261 elf_prot,
1262 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1263 bprm->fd,
1264 (elf_ppnt->p_offset -
1265 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1266 if (error == -1) {
1267 perror("mmap");
1268 exit(-1);
1271 #ifdef LOW_ELF_STACK
1272 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1273 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1274 #endif
1276 if (!load_addr_set) {
1277 load_addr_set = 1;
1278 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1279 if (elf_ex.e_type == ET_DYN) {
1280 load_bias += error -
1281 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1282 load_addr += load_bias;
1285 k = elf_ppnt->p_vaddr;
1286 if (k < start_code)
1287 start_code = k;
1288 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1289 if (k > elf_bss)
1290 elf_bss = k;
1291 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1292 end_code = k;
1293 if (end_data < k)
1294 end_data = k;
1295 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1296 if (k > elf_brk) elf_brk = k;
1299 elf_entry += load_bias;
1300 elf_bss += load_bias;
1301 elf_brk += load_bias;
1302 start_code += load_bias;
1303 end_code += load_bias;
1304 // start_data += load_bias;
1305 end_data += load_bias;
1307 if (elf_interpreter) {
1308 if (interpreter_type & 1) {
1309 elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1311 else if (interpreter_type & 2) {
1312 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1313 &interp_load_addr);
1316 close(interpreter_fd);
1317 free(elf_interpreter);
1319 if (elf_entry == ~0UL) {
1320 printf("Unable to load interpreter\n");
1321 free(elf_phdata);
1322 exit(-1);
1323 return 0;
1327 free(elf_phdata);
1329 if (loglevel)
1330 load_symbols(&elf_ex, bprm->fd);
1332 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1333 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1335 #ifdef LOW_ELF_STACK
1336 info->start_stack = bprm->p = elf_stack - 4;
1337 #endif
1338 bprm->p = (unsigned long)
1339 create_elf_tables((char *)bprm->p,
1340 bprm->argc,
1341 bprm->envc,
1342 &elf_ex,
1343 load_addr, load_bias,
1344 interp_load_addr,
1345 (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1346 info);
1347 if (interpreter_type == INTERPRETER_AOUT)
1348 info->arg_start += strlen(passed_fileno) + 1;
1349 info->start_brk = info->brk = elf_brk;
1350 info->end_code = end_code;
1351 info->start_code = start_code;
1352 info->end_data = end_data;
1353 info->start_stack = bprm->p;
1355 /* Calling set_brk effectively mmaps the pages that we need for the bss and break
1356 sections */
1357 set_brk(elf_bss, elf_brk);
1359 padzero(elf_bss);
1361 #if 0
1362 printf("(start_brk) %x\n" , info->start_brk);
1363 printf("(end_code) %x\n" , info->end_code);
1364 printf("(start_code) %x\n" , info->start_code);
1365 printf("(end_data) %x\n" , info->end_data);
1366 printf("(start_stack) %x\n" , info->start_stack);
1367 printf("(brk) %x\n" , info->brk);
1368 #endif
1370 if ( info->personality == PER_SVR4 )
1372 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1373 and some applications "depend" upon this behavior.
1374 Since we do not have the power to recompile these, we
1375 emulate the SVr4 behavior. Sigh. */
1376 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1377 MAP_FIXED | MAP_PRIVATE, -1, 0);
1380 #ifdef ELF_PLAT_INIT
1382 * The ABI may specify that certain registers be set up in special
1383 * ways (on i386 %edx is the address of a DT_FINI function, for
1384 * example. This macro performs whatever initialization to
1385 * the regs structure is required.
1387 ELF_PLAT_INIT(regs);
1388 #endif
1391 info->entry = elf_entry;
1393 return 0;
1398 int elf_exec(const char * filename, char ** argv, char ** envp,
1399 struct target_pt_regs * regs, struct image_info *infop)
1401 struct linux_binprm bprm;
1402 int retval;
1403 int i;
1405 bprm.p = TARGET_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int);
1406 for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */
1407 bprm.page[i] = 0;
1408 retval = open(filename, O_RDONLY);
1409 if (retval < 0)
1410 return retval;
1411 bprm.fd = retval;
1412 bprm.filename = (char *)filename;
1413 bprm.sh_bang = 0;
1414 bprm.loader = 0;
1415 bprm.exec = 0;
1416 bprm.dont_iput = 0;
1417 bprm.argc = count(argv);
1418 bprm.envc = count(envp);
1420 retval = prepare_binprm(&bprm);
1422 if(retval>=0) {
1423 bprm.p = copy_strings(1, &bprm.filename, bprm.page, bprm.p);
1424 bprm.exec = bprm.p;
1425 bprm.p = copy_strings(bprm.envc,envp,bprm.page,bprm.p);
1426 bprm.p = copy_strings(bprm.argc,argv,bprm.page,bprm.p);
1427 if (!bprm.p) {
1428 retval = -E2BIG;
1432 if(retval>=0) {
1433 retval = load_elf_binary(&bprm,regs,infop);
1435 if(retval>=0) {
1436 /* success. Initialize important registers */
1437 init_thread(regs, infop);
1438 return retval;
1441 /* Something went wrong, return the inode and free the argument pages*/
1442 for (i=0 ; i<MAX_ARG_PAGES ; i++) {
1443 free_page((void *)bprm.page[i]);
1445 return(retval);
1449 static int load_aout_interp(void * exptr, int interp_fd)
1451 printf("a.out interpreter not yet supported\n");
1452 return(0);