kvm: dirty-ring: Fix race with vcpu creation
[qemu/ar7.git] / bsd-user / elfload.c
blobfbcdc94b960dfa1a7605cc200c8b0de9bc078381
1 /*
2 * ELF loading code
4 * Copyright (c) 2013 Stacey D. Son
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu.h"
23 #include "disas/disas.h"
24 #include "qemu/path.h"
26 static abi_ulong target_auxents; /* Where the AUX entries are in target */
27 static size_t target_auxents_sz; /* Size of AUX entries including AT_NULL */
29 #include "target_arch_reg.h"
30 #include "target_os_elf.h"
31 #include "target_os_stack.h"
32 #include "target_os_thread.h"
33 #include "target_os_user.h"
35 abi_ulong target_stksiz;
36 abi_ulong target_stkbas;
38 static int elf_core_dump(int signr, CPUArchState *env);
39 static int load_elf_sections(const struct elfhdr *hdr, struct elf_phdr *phdr,
40 int fd, abi_ulong rbase, abi_ulong *baddrp);
42 static inline void memcpy_fromfs(void *to, const void *from, unsigned long n)
44 memcpy(to, from, n);
47 #ifdef BSWAP_NEEDED
48 static void bswap_ehdr(struct elfhdr *ehdr)
50 bswap16s(&ehdr->e_type); /* Object file type */
51 bswap16s(&ehdr->e_machine); /* Architecture */
52 bswap32s(&ehdr->e_version); /* Object file version */
53 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
54 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
55 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
56 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
57 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
58 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
59 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
60 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
61 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
62 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
65 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
67 int i;
69 for (i = 0; i < phnum; i++, phdr++) {
70 bswap32s(&phdr->p_type); /* Segment type */
71 bswap32s(&phdr->p_flags); /* Segment flags */
72 bswaptls(&phdr->p_offset); /* Segment file offset */
73 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
74 bswaptls(&phdr->p_paddr); /* Segment physical address */
75 bswaptls(&phdr->p_filesz); /* Segment size in file */
76 bswaptls(&phdr->p_memsz); /* Segment size in memory */
77 bswaptls(&phdr->p_align); /* Segment alignment */
81 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
83 int i;
85 for (i = 0; i < shnum; i++, shdr++) {
86 bswap32s(&shdr->sh_name);
87 bswap32s(&shdr->sh_type);
88 bswaptls(&shdr->sh_flags);
89 bswaptls(&shdr->sh_addr);
90 bswaptls(&shdr->sh_offset);
91 bswaptls(&shdr->sh_size);
92 bswap32s(&shdr->sh_link);
93 bswap32s(&shdr->sh_info);
94 bswaptls(&shdr->sh_addralign);
95 bswaptls(&shdr->sh_entsize);
99 static void bswap_sym(struct elf_sym *sym)
101 bswap32s(&sym->st_name);
102 bswaptls(&sym->st_value);
103 bswaptls(&sym->st_size);
104 bswap16s(&sym->st_shndx);
107 static void bswap_note(struct elf_note *en)
109 bswap32s(&en->n_namesz);
110 bswap32s(&en->n_descsz);
111 bswap32s(&en->n_type);
114 #else /* ! BSWAP_NEEDED */
116 static void bswap_ehdr(struct elfhdr *ehdr) { }
117 static void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
118 static void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
119 static void bswap_sym(struct elf_sym *sym) { }
120 static void bswap_note(struct elf_note *en) { }
122 #endif /* ! BSWAP_NEEDED */
124 #include "elfcore.c"
127 * 'copy_elf_strings()' copies argument/envelope strings from user
128 * memory to free pages in kernel mem. These are in a format ready
129 * to be put directly into the top of new user memory.
132 static abi_ulong copy_elf_strings(int argc, char **argv, void **page,
133 abi_ulong p)
135 char *tmp, *tmp1, *pag = NULL;
136 int len, offset = 0;
138 if (!p) {
139 return 0; /* bullet-proofing */
141 while (argc-- > 0) {
142 tmp = argv[argc];
143 if (!tmp) {
144 fprintf(stderr, "VFS: argc is wrong");
145 exit(-1);
147 tmp1 = tmp;
148 while (*tmp++) {
149 continue;
151 len = tmp - tmp1;
152 if (p < len) { /* this shouldn't happen - 128kB */
153 return 0;
155 while (len) {
156 --p; --tmp; --len;
157 if (--offset < 0) {
158 offset = p % TARGET_PAGE_SIZE;
159 pag = page[p / TARGET_PAGE_SIZE];
160 if (!pag) {
161 pag = g_try_malloc0(TARGET_PAGE_SIZE);
162 page[p / TARGET_PAGE_SIZE] = pag;
163 if (!pag) {
164 return 0;
168 if (len == 0 || offset == 0) {
169 *(pag + offset) = *tmp;
170 } else {
171 int bytes_to_copy = (len > offset) ? offset : len;
172 tmp -= bytes_to_copy;
173 p -= bytes_to_copy;
174 offset -= bytes_to_copy;
175 len -= bytes_to_copy;
176 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
180 return p;
183 static void setup_arg_pages(struct bsd_binprm *bprm, struct image_info *info,
184 abi_ulong *stackp, abi_ulong *stringp)
186 abi_ulong stack_base, size;
187 abi_long addr;
190 * Create enough stack to hold everything. If we don't use it for args,
191 * we'll use it for something else...
193 size = target_dflssiz;
194 stack_base = TARGET_USRSTACK - size;
195 addr = target_mmap(stack_base , size + qemu_host_page_size,
196 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
197 if (addr == -1) {
198 perror("stk mmap");
199 exit(-1);
201 /* we reserve one extra page at the top of the stack as guard */
202 target_mprotect(addr + size, qemu_host_page_size, PROT_NONE);
204 target_stksiz = size;
205 target_stkbas = addr;
207 if (setup_initial_stack(bprm, stackp, stringp) != 0) {
208 perror("stk setup");
209 exit(-1);
213 static void set_brk(abi_ulong start, abi_ulong end)
215 /* page-align the start and end addresses... */
216 start = HOST_PAGE_ALIGN(start);
217 end = HOST_PAGE_ALIGN(end);
218 if (end <= start) {
219 return;
221 if (target_mmap(start, end - start, PROT_READ | PROT_WRITE | PROT_EXEC,
222 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) {
223 perror("cannot mmap brk");
224 exit(-1);
230 * We need to explicitly zero any fractional pages after the data
231 * section (i.e. bss). This would contain the junk from the file that
232 * should not be in memory.
234 static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
236 abi_ulong nbyte;
238 if (elf_bss >= last_bss) {
239 return;
243 * XXX: this is really a hack : if the real host page size is
244 * smaller than the target page size, some pages after the end
245 * of the file may not be mapped. A better fix would be to
246 * patch target_mmap(), but it is more complicated as the file
247 * size must be known.
249 if (qemu_real_host_page_size() < qemu_host_page_size) {
250 abi_ulong end_addr, end_addr1;
251 end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss);
252 end_addr = HOST_PAGE_ALIGN(elf_bss);
253 if (end_addr1 < end_addr) {
254 mmap((void *)g2h_untagged(end_addr1), end_addr - end_addr1,
255 PROT_READ | PROT_WRITE | PROT_EXEC,
256 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
260 nbyte = elf_bss & (qemu_host_page_size - 1);
261 if (nbyte) {
262 nbyte = qemu_host_page_size - nbyte;
263 do {
264 /* FIXME - what to do if put_user() fails? */
265 put_user_u8(0, elf_bss);
266 elf_bss++;
267 } while (--nbyte);
271 static abi_ulong load_elf_interp(struct elfhdr *interp_elf_ex,
272 int interpreter_fd,
273 abi_ulong *interp_load_addr)
275 struct elf_phdr *elf_phdata = NULL;
276 abi_ulong rbase;
277 int retval;
278 abi_ulong baddr, error;
280 error = 0;
282 bswap_ehdr(interp_elf_ex);
283 /* First of all, some simple consistency checks */
284 if ((interp_elf_ex->e_type != ET_EXEC && interp_elf_ex->e_type != ET_DYN) ||
285 !elf_check_arch(interp_elf_ex->e_machine)) {
286 return ~((abi_ulong)0UL);
290 /* Now read in all of the header information */
291 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE) {
292 return ~(abi_ulong)0UL;
295 elf_phdata = (struct elf_phdr *) malloc(sizeof(struct elf_phdr) *
296 interp_elf_ex->e_phnum);
298 if (!elf_phdata) {
299 return ~((abi_ulong)0UL);
303 * If the size of this structure has changed, then punt, since
304 * we will be doing the wrong thing.
306 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
307 free(elf_phdata);
308 return ~((abi_ulong)0UL);
311 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
312 if (retval >= 0) {
313 retval = read(interpreter_fd, (char *) elf_phdata,
314 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
316 if (retval < 0) {
317 perror("load_elf_interp");
318 exit(-1);
319 free(elf_phdata);
320 return retval;
322 bswap_phdr(elf_phdata, interp_elf_ex->e_phnum);
324 rbase = 0;
325 if (interp_elf_ex->e_type == ET_DYN) {
327 * In order to avoid hardcoding the interpreter load
328 * address in qemu, we allocate a big enough memory zone.
330 rbase = target_mmap(0, INTERP_MAP_SIZE, PROT_NONE,
331 MAP_PRIVATE | MAP_ANON, -1, 0);
332 if (rbase == -1) {
333 perror("mmap");
334 exit(-1);
338 error = load_elf_sections(interp_elf_ex, elf_phdata, interpreter_fd, rbase,
339 &baddr);
340 if (error != 0) {
341 perror("load_elf_sections");
342 exit(-1);
345 /* Now use mmap to map the library into memory. */
346 close(interpreter_fd);
347 free(elf_phdata);
349 *interp_load_addr = baddr;
350 return ((abi_ulong) interp_elf_ex->e_entry) + rbase;
353 static int symfind(const void *s0, const void *s1)
355 target_ulong addr = *(target_ulong *)s0;
356 struct elf_sym *sym = (struct elf_sym *)s1;
357 int result = 0;
358 if (addr < sym->st_value) {
359 result = -1;
360 } else if (addr >= sym->st_value + sym->st_size) {
361 result = 1;
363 return result;
366 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
368 #if ELF_CLASS == ELFCLASS32
369 struct elf_sym *syms = s->disas_symtab.elf32;
370 #else
371 struct elf_sym *syms = s->disas_symtab.elf64;
372 #endif
374 /* binary search */
375 struct elf_sym *sym;
377 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
378 if (sym != NULL) {
379 return s->disas_strtab + sym->st_name;
382 return "";
385 /* FIXME: This should use elf_ops.h */
386 static int symcmp(const void *s0, const void *s1)
388 struct elf_sym *sym0 = (struct elf_sym *)s0;
389 struct elf_sym *sym1 = (struct elf_sym *)s1;
390 return (sym0->st_value < sym1->st_value) ? -1 :
391 ((sym0->st_value > sym1->st_value) ? 1 : 0);
394 /* Best attempt to load symbols from this ELF object. */
395 static void load_symbols(struct elfhdr *hdr, int fd)
397 unsigned int i, nsyms;
398 struct elf_shdr sechdr, symtab, strtab;
399 char *strings;
400 struct syminfo *s;
401 struct elf_sym *syms, *new_syms;
403 lseek(fd, hdr->e_shoff, SEEK_SET);
404 for (i = 0; i < hdr->e_shnum; i++) {
405 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr)) {
406 return;
408 bswap_shdr(&sechdr, 1);
409 if (sechdr.sh_type == SHT_SYMTAB) {
410 symtab = sechdr;
411 lseek(fd, hdr->e_shoff + sizeof(sechdr) * sechdr.sh_link,
412 SEEK_SET);
413 if (read(fd, &strtab, sizeof(strtab)) != sizeof(strtab)) {
414 return;
416 bswap_shdr(&strtab, 1);
417 goto found;
420 return; /* Shouldn't happen... */
422 found:
423 /* Now know where the strtab and symtab are. Snarf them. */
424 s = malloc(sizeof(*s));
425 syms = malloc(symtab.sh_size);
426 if (!syms) {
427 free(s);
428 return;
430 s->disas_strtab = strings = malloc(strtab.sh_size);
431 if (!s->disas_strtab) {
432 free(s);
433 free(syms);
434 return;
437 lseek(fd, symtab.sh_offset, SEEK_SET);
438 if (read(fd, syms, symtab.sh_size) != symtab.sh_size) {
439 free(s);
440 free(syms);
441 free(strings);
442 return;
445 nsyms = symtab.sh_size / sizeof(struct elf_sym);
447 i = 0;
448 while (i < nsyms) {
449 bswap_sym(syms + i);
450 /* Throw away entries which we do not need. */
451 if (syms[i].st_shndx == SHN_UNDEF ||
452 syms[i].st_shndx >= SHN_LORESERVE ||
453 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
454 nsyms--;
455 if (i < nsyms) {
456 syms[i] = syms[nsyms];
458 continue;
460 #if defined(TARGET_ARM) || defined(TARGET_MIPS)
461 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
462 syms[i].st_value &= ~(target_ulong)1;
463 #endif
464 i++;
468 * Attempt to free the storage associated with the local symbols
469 * that we threw away. Whether or not this has any effect on the
470 * memory allocation depends on the malloc implementation and how
471 * many symbols we managed to discard.
473 new_syms = realloc(syms, nsyms * sizeof(*syms));
474 if (new_syms == NULL) {
475 free(s);
476 free(syms);
477 free(strings);
478 return;
480 syms = new_syms;
482 qsort(syms, nsyms, sizeof(*syms), symcmp);
484 lseek(fd, strtab.sh_offset, SEEK_SET);
485 if (read(fd, strings, strtab.sh_size) != strtab.sh_size) {
486 free(s);
487 free(syms);
488 free(strings);
489 return;
491 s->disas_num_syms = nsyms;
492 #if ELF_CLASS == ELFCLASS32
493 s->disas_symtab.elf32 = syms;
494 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
495 #else
496 s->disas_symtab.elf64 = syms;
497 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx;
498 #endif
499 s->next = syminfos;
500 syminfos = s;
503 /* Check the elf header and see if this a target elf binary. */
504 int is_target_elf_binary(int fd)
506 uint8_t buf[128];
507 struct elfhdr elf_ex;
509 if (lseek(fd, 0L, SEEK_SET) < 0) {
510 return 0;
512 if (read(fd, buf, sizeof(buf)) < 0) {
513 return 0;
516 elf_ex = *((struct elfhdr *)buf);
517 bswap_ehdr(&elf_ex);
519 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
520 (!elf_check_arch(elf_ex.e_machine))) {
521 return 0;
522 } else {
523 return 1;
527 static int
528 load_elf_sections(const struct elfhdr *hdr, struct elf_phdr *phdr, int fd,
529 abi_ulong rbase, abi_ulong *baddrp)
531 struct elf_phdr *elf_ppnt;
532 abi_ulong baddr;
533 int i;
534 bool first;
537 * Now we do a little grungy work by mmaping the ELF image into
538 * the correct location in memory. At this point, we assume that
539 * the image should be loaded at fixed address, not at a variable
540 * address.
542 first = true;
543 for (i = 0, elf_ppnt = phdr; i < hdr->e_phnum; i++, elf_ppnt++) {
544 int elf_prot = 0;
545 abi_ulong error;
547 /* XXX Skip memsz == 0. */
548 if (elf_ppnt->p_type != PT_LOAD) {
549 continue;
552 if (elf_ppnt->p_flags & PF_R) {
553 elf_prot |= PROT_READ;
555 if (elf_ppnt->p_flags & PF_W) {
556 elf_prot |= PROT_WRITE;
558 if (elf_ppnt->p_flags & PF_X) {
559 elf_prot |= PROT_EXEC;
562 error = target_mmap(TARGET_ELF_PAGESTART(rbase + elf_ppnt->p_vaddr),
563 (elf_ppnt->p_filesz +
564 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
565 elf_prot,
566 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
568 (elf_ppnt->p_offset -
569 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
570 if (error == -1) {
571 perror("mmap");
572 exit(-1);
573 } else if (elf_ppnt->p_memsz != elf_ppnt->p_filesz) {
574 abi_ulong start_bss, end_bss;
576 start_bss = rbase + elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
577 end_bss = rbase + elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
580 * Calling set_brk effectively mmaps the pages that we need for the
581 * bss and break sections.
583 set_brk(start_bss, end_bss);
584 padzero(start_bss, end_bss);
587 if (first) {
588 baddr = TARGET_ELF_PAGESTART(rbase + elf_ppnt->p_vaddr);
589 first = false;
593 if (baddrp != NULL) {
594 *baddrp = baddr;
596 return 0;
599 int load_elf_binary(struct bsd_binprm *bprm, struct target_pt_regs *regs,
600 struct image_info *info)
602 struct elfhdr elf_ex;
603 struct elfhdr interp_elf_ex;
604 int interpreter_fd = -1; /* avoid warning */
605 abi_ulong load_addr;
606 int i;
607 struct elf_phdr *elf_ppnt;
608 struct elf_phdr *elf_phdata;
609 abi_ulong elf_brk;
610 int error, retval;
611 char *elf_interpreter;
612 abi_ulong baddr, elf_entry, et_dyn_addr, interp_load_addr = 0;
613 abi_ulong reloc_func_desc = 0;
615 load_addr = 0;
616 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
617 bswap_ehdr(&elf_ex);
619 /* First of all, some simple consistency checks */
620 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
621 (!elf_check_arch(elf_ex.e_machine))) {
622 return -ENOEXEC;
625 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
626 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, bprm->page, bprm->p);
627 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, bprm->page, bprm->p);
628 if (!bprm->p) {
629 retval = -E2BIG;
632 /* Now read in all of the header information */
633 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize * elf_ex.e_phnum);
634 if (elf_phdata == NULL) {
635 return -ENOMEM;
638 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
639 if (retval > 0) {
640 retval = read(bprm->fd, (char *)elf_phdata,
641 elf_ex.e_phentsize * elf_ex.e_phnum);
644 if (retval < 0) {
645 perror("load_elf_binary");
646 exit(-1);
647 free(elf_phdata);
648 return -errno;
651 bswap_phdr(elf_phdata, elf_ex.e_phnum);
652 elf_ppnt = elf_phdata;
654 elf_brk = 0;
657 elf_interpreter = NULL;
658 for (i = 0; i < elf_ex.e_phnum; i++) {
659 if (elf_ppnt->p_type == PT_INTERP) {
660 if (elf_interpreter != NULL) {
661 free(elf_phdata);
662 free(elf_interpreter);
663 close(bprm->fd);
664 return -EINVAL;
667 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
668 if (elf_interpreter == NULL) {
669 free(elf_phdata);
670 close(bprm->fd);
671 return -ENOMEM;
674 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
675 if (retval >= 0) {
676 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
678 if (retval < 0) {
679 perror("load_elf_binary2");
680 exit(-1);
683 if (retval >= 0) {
684 retval = open(path(elf_interpreter), O_RDONLY);
685 if (retval >= 0) {
686 interpreter_fd = retval;
687 } else {
688 perror(elf_interpreter);
689 exit(-1);
690 /* retval = -errno; */
694 if (retval >= 0) {
695 retval = lseek(interpreter_fd, 0, SEEK_SET);
696 if (retval >= 0) {
697 retval = read(interpreter_fd, bprm->buf, 128);
700 if (retval >= 0) {
701 interp_elf_ex = *((struct elfhdr *) bprm->buf);
703 if (retval < 0) {
704 perror("load_elf_binary3");
705 exit(-1);
706 free(elf_phdata);
707 free(elf_interpreter);
708 close(bprm->fd);
709 return retval;
712 elf_ppnt++;
715 /* Some simple consistency checks for the interpreter */
716 if (elf_interpreter) {
717 if (interp_elf_ex.e_ident[0] != 0x7f ||
718 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF", 3) != 0) {
719 free(elf_interpreter);
720 free(elf_phdata);
721 close(bprm->fd);
722 return -ELIBBAD;
727 * OK, we are done with that, now set up the arg stuff, and then start this
728 * sucker up
730 if (!bprm->p) {
731 free(elf_interpreter);
732 free(elf_phdata);
733 close(bprm->fd);
734 return -E2BIG;
737 /* OK, This is the point of no return */
738 info->end_data = 0;
739 info->end_code = 0;
740 info->start_mmap = (abi_ulong)ELF_START_MMAP;
741 info->mmap = 0;
742 elf_entry = (abi_ulong) elf_ex.e_entry;
744 /* XXX Join this with PT_INTERP search? */
745 baddr = 0;
746 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
747 if (elf_ppnt->p_type != PT_LOAD) {
748 continue;
750 baddr = elf_ppnt->p_vaddr;
751 break;
754 et_dyn_addr = 0;
755 if (elf_ex.e_type == ET_DYN && baddr == 0) {
756 et_dyn_addr = ELF_ET_DYN_LOAD_ADDR;
760 * Do this so that we can load the interpreter, if need be. We will
761 * change some of these later
763 info->rss = 0;
764 setup_arg_pages(bprm, info, &bprm->p, &bprm->stringp);
765 info->start_stack = bprm->p;
767 info->elf_flags = elf_ex.e_flags;
769 error = load_elf_sections(&elf_ex, elf_phdata, bprm->fd, et_dyn_addr,
770 &load_addr);
771 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
772 if (elf_ppnt->p_type != PT_LOAD) {
773 continue;
775 if (elf_ppnt->p_memsz > elf_ppnt->p_filesz)
776 elf_brk = MAX(elf_brk, et_dyn_addr + elf_ppnt->p_vaddr +
777 elf_ppnt->p_memsz);
779 if (error != 0) {
780 perror("load_elf_sections");
781 exit(-1);
784 if (elf_interpreter) {
785 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
786 &interp_load_addr);
787 reloc_func_desc = interp_load_addr;
789 close(interpreter_fd);
790 free(elf_interpreter);
792 if (elf_entry == ~((abi_ulong)0UL)) {
793 printf("Unable to load interpreter\n");
794 free(elf_phdata);
795 exit(-1);
796 return 0;
798 } else {
799 interp_load_addr = et_dyn_addr;
800 elf_entry += interp_load_addr;
803 free(elf_phdata);
805 if (qemu_log_enabled()) {
806 load_symbols(&elf_ex, bprm->fd);
809 close(bprm->fd);
811 bprm->p = target_create_elf_tables(bprm->p, bprm->argc, bprm->envc,
812 bprm->stringp, &elf_ex, load_addr,
813 et_dyn_addr, interp_load_addr, info);
814 info->load_addr = reloc_func_desc;
815 info->start_brk = info->brk = elf_brk;
816 info->start_stack = bprm->p;
817 info->load_bias = 0;
819 info->entry = elf_entry;
821 #ifdef USE_ELF_CORE_DUMP
822 bprm->core_dump = &elf_core_dump;
823 #else
824 bprm->core_dump = NULL;
825 #endif
827 return 0;
830 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
833 target_thread_init(regs, infop);