4 * Copyright (c) 2013 Stacey D. Son
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "qemu/path.h"
26 static abi_ulong target_auxents
; /* Where the AUX entries are in target */
27 static size_t target_auxents_sz
; /* Size of AUX entries including AT_NULL */
29 #include "target_arch_reg.h"
30 #include "target_os_elf.h"
31 #include "target_os_stack.h"
32 #include "target_os_thread.h"
33 #include "target_os_user.h"
35 abi_ulong target_stksiz
;
36 abi_ulong target_stkbas
;
38 static int elf_core_dump(int signr
, CPUArchState
*env
);
39 static int load_elf_sections(const struct elfhdr
*hdr
, struct elf_phdr
*phdr
,
40 int fd
, abi_ulong rbase
, abi_ulong
*baddrp
);
42 static inline void memcpy_fromfs(void *to
, const void *from
, unsigned long n
)
48 static void bswap_ehdr(struct elfhdr
*ehdr
)
50 bswap16s(&ehdr
->e_type
); /* Object file type */
51 bswap16s(&ehdr
->e_machine
); /* Architecture */
52 bswap32s(&ehdr
->e_version
); /* Object file version */
53 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
54 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
55 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
56 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
57 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
58 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
59 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
60 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
61 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
62 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
65 static void bswap_phdr(struct elf_phdr
*phdr
, int phnum
)
69 for (i
= 0; i
< phnum
; i
++, phdr
++) {
70 bswap32s(&phdr
->p_type
); /* Segment type */
71 bswap32s(&phdr
->p_flags
); /* Segment flags */
72 bswaptls(&phdr
->p_offset
); /* Segment file offset */
73 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
74 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
75 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
76 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
77 bswaptls(&phdr
->p_align
); /* Segment alignment */
81 static void bswap_shdr(struct elf_shdr
*shdr
, int shnum
)
85 for (i
= 0; i
< shnum
; i
++, shdr
++) {
86 bswap32s(&shdr
->sh_name
);
87 bswap32s(&shdr
->sh_type
);
88 bswaptls(&shdr
->sh_flags
);
89 bswaptls(&shdr
->sh_addr
);
90 bswaptls(&shdr
->sh_offset
);
91 bswaptls(&shdr
->sh_size
);
92 bswap32s(&shdr
->sh_link
);
93 bswap32s(&shdr
->sh_info
);
94 bswaptls(&shdr
->sh_addralign
);
95 bswaptls(&shdr
->sh_entsize
);
99 static void bswap_sym(struct elf_sym
*sym
)
101 bswap32s(&sym
->st_name
);
102 bswaptls(&sym
->st_value
);
103 bswaptls(&sym
->st_size
);
104 bswap16s(&sym
->st_shndx
);
107 static void bswap_note(struct elf_note
*en
)
109 bswap32s(&en
->n_namesz
);
110 bswap32s(&en
->n_descsz
);
111 bswap32s(&en
->n_type
);
114 #else /* ! BSWAP_NEEDED */
116 static void bswap_ehdr(struct elfhdr
*ehdr
) { }
117 static void bswap_phdr(struct elf_phdr
*phdr
, int phnum
) { }
118 static void bswap_shdr(struct elf_shdr
*shdr
, int shnum
) { }
119 static void bswap_sym(struct elf_sym
*sym
) { }
120 static void bswap_note(struct elf_note
*en
) { }
122 #endif /* ! BSWAP_NEEDED */
127 * 'copy_elf_strings()' copies argument/envelope strings from user
128 * memory to free pages in kernel mem. These are in a format ready
129 * to be put directly into the top of new user memory.
132 static abi_ulong
copy_elf_strings(int argc
, char **argv
, void **page
,
135 char *tmp
, *tmp1
, *pag
= NULL
;
139 return 0; /* bullet-proofing */
144 fprintf(stderr
, "VFS: argc is wrong");
152 if (p
< len
) { /* this shouldn't happen - 128kB */
158 offset
= p
% TARGET_PAGE_SIZE
;
159 pag
= page
[p
/ TARGET_PAGE_SIZE
];
161 pag
= g_try_malloc0(TARGET_PAGE_SIZE
);
162 page
[p
/ TARGET_PAGE_SIZE
] = pag
;
168 if (len
== 0 || offset
== 0) {
169 *(pag
+ offset
) = *tmp
;
171 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
172 tmp
-= bytes_to_copy
;
174 offset
-= bytes_to_copy
;
175 len
-= bytes_to_copy
;
176 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
183 static void setup_arg_pages(struct bsd_binprm
*bprm
, struct image_info
*info
,
184 abi_ulong
*stackp
, abi_ulong
*stringp
)
186 abi_ulong stack_base
, size
;
190 * Create enough stack to hold everything. If we don't use it for args,
191 * we'll use it for something else...
193 size
= target_dflssiz
;
194 stack_base
= TARGET_USRSTACK
- size
;
195 addr
= target_mmap(stack_base
, size
+ qemu_host_page_size
,
196 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
201 /* we reserve one extra page at the top of the stack as guard */
202 target_mprotect(addr
+ size
, qemu_host_page_size
, PROT_NONE
);
204 target_stksiz
= size
;
205 target_stkbas
= addr
;
207 if (setup_initial_stack(bprm
, stackp
, stringp
) != 0) {
213 static void set_brk(abi_ulong start
, abi_ulong end
)
215 /* page-align the start and end addresses... */
216 start
= HOST_PAGE_ALIGN(start
);
217 end
= HOST_PAGE_ALIGN(end
);
221 if (target_mmap(start
, end
- start
, PROT_READ
| PROT_WRITE
| PROT_EXEC
,
222 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
223 perror("cannot mmap brk");
230 * We need to explicitly zero any fractional pages after the data
231 * section (i.e. bss). This would contain the junk from the file that
232 * should not be in memory.
234 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
238 if (elf_bss
>= last_bss
) {
243 * XXX: this is really a hack : if the real host page size is
244 * smaller than the target page size, some pages after the end
245 * of the file may not be mapped. A better fix would be to
246 * patch target_mmap(), but it is more complicated as the file
247 * size must be known.
249 if (qemu_real_host_page_size() < qemu_host_page_size
) {
250 abi_ulong end_addr
, end_addr1
;
251 end_addr1
= REAL_HOST_PAGE_ALIGN(elf_bss
);
252 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
253 if (end_addr1
< end_addr
) {
254 mmap((void *)g2h_untagged(end_addr1
), end_addr
- end_addr1
,
255 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
256 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0);
260 nbyte
= elf_bss
& (qemu_host_page_size
- 1);
262 nbyte
= qemu_host_page_size
- nbyte
;
264 /* FIXME - what to do if put_user() fails? */
265 put_user_u8(0, elf_bss
);
271 static abi_ulong
load_elf_interp(struct elfhdr
*interp_elf_ex
,
273 abi_ulong
*interp_load_addr
)
275 struct elf_phdr
*elf_phdata
= NULL
;
278 abi_ulong baddr
, error
;
282 bswap_ehdr(interp_elf_ex
);
283 /* First of all, some simple consistency checks */
284 if ((interp_elf_ex
->e_type
!= ET_EXEC
&& interp_elf_ex
->e_type
!= ET_DYN
) ||
285 !elf_check_arch(interp_elf_ex
->e_machine
)) {
286 return ~((abi_ulong
)0UL);
290 /* Now read in all of the header information */
291 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
) {
292 return ~(abi_ulong
)0UL;
295 elf_phdata
= (struct elf_phdr
*) malloc(sizeof(struct elf_phdr
) *
296 interp_elf_ex
->e_phnum
);
299 return ~((abi_ulong
)0UL);
303 * If the size of this structure has changed, then punt, since
304 * we will be doing the wrong thing.
306 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
308 return ~((abi_ulong
)0UL);
311 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
313 retval
= read(interpreter_fd
, (char *) elf_phdata
,
314 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
317 perror("load_elf_interp");
322 bswap_phdr(elf_phdata
, interp_elf_ex
->e_phnum
);
325 if (interp_elf_ex
->e_type
== ET_DYN
) {
327 * In order to avoid hardcoding the interpreter load
328 * address in qemu, we allocate a big enough memory zone.
330 rbase
= target_mmap(0, INTERP_MAP_SIZE
, PROT_NONE
,
331 MAP_PRIVATE
| MAP_ANON
, -1, 0);
338 error
= load_elf_sections(interp_elf_ex
, elf_phdata
, interpreter_fd
, rbase
,
341 perror("load_elf_sections");
345 /* Now use mmap to map the library into memory. */
346 close(interpreter_fd
);
349 *interp_load_addr
= baddr
;
350 return ((abi_ulong
) interp_elf_ex
->e_entry
) + rbase
;
353 static int symfind(const void *s0
, const void *s1
)
355 target_ulong addr
= *(target_ulong
*)s0
;
356 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
358 if (addr
< sym
->st_value
) {
360 } else if (addr
>= sym
->st_value
+ sym
->st_size
) {
366 static const char *lookup_symbolxx(struct syminfo
*s
, target_ulong orig_addr
)
368 #if ELF_CLASS == ELFCLASS32
369 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
371 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
377 sym
= bsearch(&orig_addr
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
379 return s
->disas_strtab
+ sym
->st_name
;
385 /* FIXME: This should use elf_ops.h */
386 static int symcmp(const void *s0
, const void *s1
)
388 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
389 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
390 return (sym0
->st_value
< sym1
->st_value
) ? -1 :
391 ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
394 /* Best attempt to load symbols from this ELF object. */
395 static void load_symbols(struct elfhdr
*hdr
, int fd
)
397 unsigned int i
, nsyms
;
398 struct elf_shdr sechdr
, symtab
, strtab
;
401 struct elf_sym
*syms
, *new_syms
;
403 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
404 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
405 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
)) {
408 bswap_shdr(&sechdr
, 1);
409 if (sechdr
.sh_type
== SHT_SYMTAB
) {
411 lseek(fd
, hdr
->e_shoff
+ sizeof(sechdr
) * sechdr
.sh_link
,
413 if (read(fd
, &strtab
, sizeof(strtab
)) != sizeof(strtab
)) {
416 bswap_shdr(&strtab
, 1);
420 return; /* Shouldn't happen... */
423 /* Now know where the strtab and symtab are. Snarf them. */
424 s
= malloc(sizeof(*s
));
425 syms
= malloc(symtab
.sh_size
);
430 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
431 if (!s
->disas_strtab
) {
437 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
438 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
) {
445 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
450 /* Throw away entries which we do not need. */
451 if (syms
[i
].st_shndx
== SHN_UNDEF
||
452 syms
[i
].st_shndx
>= SHN_LORESERVE
||
453 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
456 syms
[i
] = syms
[nsyms
];
460 #if defined(TARGET_ARM) || defined(TARGET_MIPS)
461 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
462 syms
[i
].st_value
&= ~(target_ulong
)1;
468 * Attempt to free the storage associated with the local symbols
469 * that we threw away. Whether or not this has any effect on the
470 * memory allocation depends on the malloc implementation and how
471 * many symbols we managed to discard.
473 new_syms
= realloc(syms
, nsyms
* sizeof(*syms
));
474 if (new_syms
== NULL
) {
482 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
484 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
485 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
) {
491 s
->disas_num_syms
= nsyms
;
492 #if ELF_CLASS == ELFCLASS32
493 s
->disas_symtab
.elf32
= syms
;
494 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
496 s
->disas_symtab
.elf64
= syms
;
497 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
503 /* Check the elf header and see if this a target elf binary. */
504 int is_target_elf_binary(int fd
)
507 struct elfhdr elf_ex
;
509 if (lseek(fd
, 0L, SEEK_SET
) < 0) {
512 if (read(fd
, buf
, sizeof(buf
)) < 0) {
516 elf_ex
= *((struct elfhdr
*)buf
);
519 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
520 (!elf_check_arch(elf_ex
.e_machine
))) {
528 load_elf_sections(const struct elfhdr
*hdr
, struct elf_phdr
*phdr
, int fd
,
529 abi_ulong rbase
, abi_ulong
*baddrp
)
531 struct elf_phdr
*elf_ppnt
;
537 * Now we do a little grungy work by mmaping the ELF image into
538 * the correct location in memory. At this point, we assume that
539 * the image should be loaded at fixed address, not at a variable
543 for (i
= 0, elf_ppnt
= phdr
; i
< hdr
->e_phnum
; i
++, elf_ppnt
++) {
547 /* XXX Skip memsz == 0. */
548 if (elf_ppnt
->p_type
!= PT_LOAD
) {
552 if (elf_ppnt
->p_flags
& PF_R
) {
553 elf_prot
|= PROT_READ
;
555 if (elf_ppnt
->p_flags
& PF_W
) {
556 elf_prot
|= PROT_WRITE
;
558 if (elf_ppnt
->p_flags
& PF_X
) {
559 elf_prot
|= PROT_EXEC
;
562 error
= target_mmap(TARGET_ELF_PAGESTART(rbase
+ elf_ppnt
->p_vaddr
),
563 (elf_ppnt
->p_filesz
+
564 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
566 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
568 (elf_ppnt
->p_offset
-
569 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
573 } else if (elf_ppnt
->p_memsz
!= elf_ppnt
->p_filesz
) {
574 abi_ulong start_bss
, end_bss
;
576 start_bss
= rbase
+ elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
577 end_bss
= rbase
+ elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
580 * Calling set_brk effectively mmaps the pages that we need for the
581 * bss and break sections.
583 set_brk(start_bss
, end_bss
);
584 padzero(start_bss
, end_bss
);
588 baddr
= TARGET_ELF_PAGESTART(rbase
+ elf_ppnt
->p_vaddr
);
593 if (baddrp
!= NULL
) {
599 int load_elf_binary(struct bsd_binprm
*bprm
, struct target_pt_regs
*regs
,
600 struct image_info
*info
)
602 struct elfhdr elf_ex
;
603 struct elfhdr interp_elf_ex
;
604 int interpreter_fd
= -1; /* avoid warning */
607 struct elf_phdr
*elf_ppnt
;
608 struct elf_phdr
*elf_phdata
;
611 char *elf_interpreter
;
612 abi_ulong baddr
, elf_entry
, et_dyn_addr
, interp_load_addr
= 0;
613 abi_ulong reloc_func_desc
= 0;
616 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
619 /* First of all, some simple consistency checks */
620 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
621 (!elf_check_arch(elf_ex
.e_machine
))) {
625 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
626 bprm
->p
= copy_elf_strings(bprm
->envc
, bprm
->envp
, bprm
->page
, bprm
->p
);
627 bprm
->p
= copy_elf_strings(bprm
->argc
, bprm
->argv
, bprm
->page
, bprm
->p
);
632 /* Now read in all of the header information */
633 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
634 if (elf_phdata
== NULL
) {
638 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
640 retval
= read(bprm
->fd
, (char *)elf_phdata
,
641 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
645 perror("load_elf_binary");
651 bswap_phdr(elf_phdata
, elf_ex
.e_phnum
);
652 elf_ppnt
= elf_phdata
;
657 elf_interpreter
= NULL
;
658 for (i
= 0; i
< elf_ex
.e_phnum
; i
++) {
659 if (elf_ppnt
->p_type
== PT_INTERP
) {
660 if (elf_interpreter
!= NULL
) {
662 free(elf_interpreter
);
667 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
668 if (elf_interpreter
== NULL
) {
674 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
676 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
679 perror("load_elf_binary2");
684 retval
= open(path(elf_interpreter
), O_RDONLY
);
686 interpreter_fd
= retval
;
688 perror(elf_interpreter
);
690 /* retval = -errno; */
695 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
697 retval
= read(interpreter_fd
, bprm
->buf
, 128);
701 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
);
704 perror("load_elf_binary3");
707 free(elf_interpreter
);
715 /* Some simple consistency checks for the interpreter */
716 if (elf_interpreter
) {
717 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
718 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF", 3) != 0) {
719 free(elf_interpreter
);
727 * OK, we are done with that, now set up the arg stuff, and then start this
731 free(elf_interpreter
);
737 /* OK, This is the point of no return */
740 info
->start_mmap
= (abi_ulong
)ELF_START_MMAP
;
742 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
744 /* XXX Join this with PT_INTERP search? */
746 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
747 if (elf_ppnt
->p_type
!= PT_LOAD
) {
750 baddr
= elf_ppnt
->p_vaddr
;
755 if (elf_ex
.e_type
== ET_DYN
&& baddr
== 0) {
756 et_dyn_addr
= ELF_ET_DYN_LOAD_ADDR
;
760 * Do this so that we can load the interpreter, if need be. We will
761 * change some of these later
764 setup_arg_pages(bprm
, info
, &bprm
->p
, &bprm
->stringp
);
765 info
->start_stack
= bprm
->p
;
767 info
->elf_flags
= elf_ex
.e_flags
;
769 error
= load_elf_sections(&elf_ex
, elf_phdata
, bprm
->fd
, et_dyn_addr
,
771 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
772 if (elf_ppnt
->p_type
!= PT_LOAD
) {
775 if (elf_ppnt
->p_memsz
> elf_ppnt
->p_filesz
)
776 elf_brk
= MAX(elf_brk
, et_dyn_addr
+ elf_ppnt
->p_vaddr
+
780 perror("load_elf_sections");
784 if (elf_interpreter
) {
785 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
787 reloc_func_desc
= interp_load_addr
;
789 close(interpreter_fd
);
790 free(elf_interpreter
);
792 if (elf_entry
== ~((abi_ulong
)0UL)) {
793 printf("Unable to load interpreter\n");
799 interp_load_addr
= et_dyn_addr
;
800 elf_entry
+= interp_load_addr
;
805 if (qemu_log_enabled()) {
806 load_symbols(&elf_ex
, bprm
->fd
);
811 bprm
->p
= target_create_elf_tables(bprm
->p
, bprm
->argc
, bprm
->envc
,
812 bprm
->stringp
, &elf_ex
, load_addr
,
813 et_dyn_addr
, interp_load_addr
, info
);
814 info
->load_addr
= reloc_func_desc
;
815 info
->start_brk
= info
->brk
= elf_brk
;
816 info
->start_stack
= bprm
->p
;
819 info
->entry
= elf_entry
;
821 #ifdef USE_ELF_CORE_DUMP
822 bprm
->core_dump
= &elf_core_dump
;
824 bprm
->core_dump
= NULL
;
830 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
833 target_thread_init(regs
, infop
);