4 * Copyright (c) 2013 Stacey D. Son
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "disas/disas.h"
24 #include "qemu/path.h"
26 static abi_ulong target_auxents
; /* Where the AUX entries are in target */
27 static size_t target_auxents_sz
; /* Size of AUX entries including AT_NULL */
29 #include "target_arch_reg.h"
30 #include "target_os_elf.h"
31 #include "target_os_stack.h"
32 #include "target_os_thread.h"
33 #include "target_os_user.h"
35 abi_ulong target_stksiz
;
36 abi_ulong target_stkbas
;
38 static int elf_core_dump(int signr
, CPUArchState
*env
);
39 static int load_elf_sections(const struct elfhdr
*hdr
, struct elf_phdr
*phdr
,
40 int fd
, abi_ulong rbase
, abi_ulong
*baddrp
);
42 static inline void memcpy_fromfs(void *to
, const void *from
, unsigned long n
)
48 static void bswap_ehdr(struct elfhdr
*ehdr
)
50 bswap16s(&ehdr
->e_type
); /* Object file type */
51 bswap16s(&ehdr
->e_machine
); /* Architecture */
52 bswap32s(&ehdr
->e_version
); /* Object file version */
53 bswaptls(&ehdr
->e_entry
); /* Entry point virtual address */
54 bswaptls(&ehdr
->e_phoff
); /* Program header table file offset */
55 bswaptls(&ehdr
->e_shoff
); /* Section header table file offset */
56 bswap32s(&ehdr
->e_flags
); /* Processor-specific flags */
57 bswap16s(&ehdr
->e_ehsize
); /* ELF header size in bytes */
58 bswap16s(&ehdr
->e_phentsize
); /* Program header table entry size */
59 bswap16s(&ehdr
->e_phnum
); /* Program header table entry count */
60 bswap16s(&ehdr
->e_shentsize
); /* Section header table entry size */
61 bswap16s(&ehdr
->e_shnum
); /* Section header table entry count */
62 bswap16s(&ehdr
->e_shstrndx
); /* Section header string table index */
65 static void bswap_phdr(struct elf_phdr
*phdr
, int phnum
)
69 for (i
= 0; i
< phnum
; i
++, phdr
++) {
70 bswap32s(&phdr
->p_type
); /* Segment type */
71 bswap32s(&phdr
->p_flags
); /* Segment flags */
72 bswaptls(&phdr
->p_offset
); /* Segment file offset */
73 bswaptls(&phdr
->p_vaddr
); /* Segment virtual address */
74 bswaptls(&phdr
->p_paddr
); /* Segment physical address */
75 bswaptls(&phdr
->p_filesz
); /* Segment size in file */
76 bswaptls(&phdr
->p_memsz
); /* Segment size in memory */
77 bswaptls(&phdr
->p_align
); /* Segment alignment */
81 static void bswap_shdr(struct elf_shdr
*shdr
, int shnum
)
85 for (i
= 0; i
< shnum
; i
++, shdr
++) {
86 bswap32s(&shdr
->sh_name
);
87 bswap32s(&shdr
->sh_type
);
88 bswaptls(&shdr
->sh_flags
);
89 bswaptls(&shdr
->sh_addr
);
90 bswaptls(&shdr
->sh_offset
);
91 bswaptls(&shdr
->sh_size
);
92 bswap32s(&shdr
->sh_link
);
93 bswap32s(&shdr
->sh_info
);
94 bswaptls(&shdr
->sh_addralign
);
95 bswaptls(&shdr
->sh_entsize
);
99 static void bswap_sym(struct elf_sym
*sym
)
101 bswap32s(&sym
->st_name
);
102 bswaptls(&sym
->st_value
);
103 bswaptls(&sym
->st_size
);
104 bswap16s(&sym
->st_shndx
);
107 static void bswap_note(struct elf_note
*en
)
109 bswap32s(&en
->n_namesz
);
110 bswap32s(&en
->n_descsz
);
111 bswap32s(&en
->n_type
);
114 #else /* ! BSWAP_NEEDED */
116 static void bswap_ehdr(struct elfhdr
*ehdr
) { }
117 static void bswap_phdr(struct elf_phdr
*phdr
, int phnum
) { }
118 static void bswap_shdr(struct elf_shdr
*shdr
, int shnum
) { }
119 static void bswap_sym(struct elf_sym
*sym
) { }
120 static void bswap_note(struct elf_note
*en
) { }
122 #endif /* ! BSWAP_NEEDED */
127 * 'copy_elf_strings()' copies argument/envelope strings from user
128 * memory to free pages in kernel mem. These are in a format ready
129 * to be put directly into the top of new user memory.
132 static abi_ulong
copy_elf_strings(int argc
, char **argv
, void **page
,
135 char *tmp
, *tmp1
, *pag
= NULL
;
139 return 0; /* bullet-proofing */
144 fprintf(stderr
, "VFS: argc is wrong");
152 if (p
< len
) { /* this shouldn't happen - 128kB */
158 offset
= p
% TARGET_PAGE_SIZE
;
159 pag
= page
[p
/ TARGET_PAGE_SIZE
];
161 pag
= g_try_malloc0(TARGET_PAGE_SIZE
);
162 page
[p
/ TARGET_PAGE_SIZE
] = pag
;
168 if (len
== 0 || offset
== 0) {
169 *(pag
+ offset
) = *tmp
;
171 int bytes_to_copy
= (len
> offset
) ? offset
: len
;
172 tmp
-= bytes_to_copy
;
174 offset
-= bytes_to_copy
;
175 len
-= bytes_to_copy
;
176 memcpy_fromfs(pag
+ offset
, tmp
, bytes_to_copy
+ 1);
183 static void setup_arg_pages(struct bsd_binprm
*bprm
, struct image_info
*info
,
184 abi_ulong
*stackp
, abi_ulong
*stringp
)
186 abi_ulong stack_base
, size
;
190 * Create enough stack to hold everything. If we don't use it for args,
191 * we'll use it for something else...
193 size
= target_dflssiz
;
194 stack_base
= TARGET_USRSTACK
- size
;
195 addr
= target_mmap(stack_base
, size
+ qemu_host_page_size
,
196 PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
201 /* we reserve one extra page at the top of the stack as guard */
202 target_mprotect(addr
+ size
, qemu_host_page_size
, PROT_NONE
);
204 target_stksiz
= size
;
205 target_stkbas
= addr
;
207 if (setup_initial_stack(bprm
, stackp
, stringp
) != 0) {
213 static void set_brk(abi_ulong start
, abi_ulong end
)
215 /* page-align the start and end addresses... */
216 start
= HOST_PAGE_ALIGN(start
);
217 end
= HOST_PAGE_ALIGN(end
);
221 if (target_mmap(start
, end
- start
, PROT_READ
| PROT_WRITE
| PROT_EXEC
,
222 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == -1) {
223 perror("cannot mmap brk");
230 * We need to explicitly zero any fractional pages after the data
231 * section (i.e. bss). This would contain the junk from the file that
232 * should not be in memory.
234 static void padzero(abi_ulong elf_bss
, abi_ulong last_bss
)
238 if (elf_bss
>= last_bss
) {
243 * XXX: this is really a hack : if the real host page size is
244 * smaller than the target page size, some pages after the end
245 * of the file may not be mapped. A better fix would be to
246 * patch target_mmap(), but it is more complicated as the file
247 * size must be known.
249 if (qemu_real_host_page_size() < qemu_host_page_size
) {
250 abi_ulong end_addr
, end_addr1
;
251 end_addr1
= REAL_HOST_PAGE_ALIGN(elf_bss
);
252 end_addr
= HOST_PAGE_ALIGN(elf_bss
);
253 if (end_addr1
< end_addr
) {
254 mmap((void *)g2h_untagged(end_addr1
), end_addr
- end_addr1
,
255 PROT_READ
| PROT_WRITE
| PROT_EXEC
,
256 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0);
260 nbyte
= elf_bss
& (qemu_host_page_size
- 1);
262 nbyte
= qemu_host_page_size
- nbyte
;
264 /* FIXME - what to do if put_user() fails? */
265 put_user_u8(0, elf_bss
);
271 static abi_ulong
load_elf_interp(struct elfhdr
*interp_elf_ex
,
273 abi_ulong
*interp_load_addr
)
275 struct elf_phdr
*elf_phdata
= NULL
;
278 abi_ulong baddr
, error
;
282 bswap_ehdr(interp_elf_ex
);
283 /* First of all, some simple consistency checks */
284 if ((interp_elf_ex
->e_type
!= ET_EXEC
&& interp_elf_ex
->e_type
!= ET_DYN
) ||
285 !elf_check_arch(interp_elf_ex
->e_machine
)) {
286 return ~((abi_ulong
)0UL);
290 /* Now read in all of the header information */
291 if (sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
> TARGET_PAGE_SIZE
) {
292 return ~(abi_ulong
)0UL;
295 elf_phdata
= (struct elf_phdr
*) malloc(sizeof(struct elf_phdr
) *
296 interp_elf_ex
->e_phnum
);
299 return ~((abi_ulong
)0UL);
303 * If the size of this structure has changed, then punt, since
304 * we will be doing the wrong thing.
306 if (interp_elf_ex
->e_phentsize
!= sizeof(struct elf_phdr
)) {
308 return ~((abi_ulong
)0UL);
311 retval
= lseek(interpreter_fd
, interp_elf_ex
->e_phoff
, SEEK_SET
);
313 retval
= read(interpreter_fd
, (char *) elf_phdata
,
314 sizeof(struct elf_phdr
) * interp_elf_ex
->e_phnum
);
317 perror("load_elf_interp");
322 bswap_phdr(elf_phdata
, interp_elf_ex
->e_phnum
);
325 if (interp_elf_ex
->e_type
== ET_DYN
) {
327 * In order to avoid hardcoding the interpreter load
328 * address in qemu, we allocate a big enough memory zone.
330 rbase
= target_mmap(0, INTERP_MAP_SIZE
, PROT_NONE
,
331 MAP_PRIVATE
| MAP_ANON
, -1, 0);
338 error
= load_elf_sections(interp_elf_ex
, elf_phdata
, interpreter_fd
, rbase
,
341 perror("load_elf_sections");
345 /* Now use mmap to map the library into memory. */
346 close(interpreter_fd
);
349 *interp_load_addr
= baddr
;
350 return ((abi_ulong
) interp_elf_ex
->e_entry
) + rbase
;
353 static int symfind(const void *s0
, const void *s1
)
355 struct elf_sym
*sym
= (struct elf_sym
*)s1
;
356 __typeof(sym
->st_value
) addr
= *(uint64_t *)s0
;
359 if (addr
< sym
->st_value
) {
361 } else if (addr
>= sym
->st_value
+ sym
->st_size
) {
367 static const char *lookup_symbolxx(struct syminfo
*s
, uint64_t orig_addr
)
369 #if ELF_CLASS == ELFCLASS32
370 struct elf_sym
*syms
= s
->disas_symtab
.elf32
;
372 struct elf_sym
*syms
= s
->disas_symtab
.elf64
;
378 sym
= bsearch(&orig_addr
, syms
, s
->disas_num_syms
, sizeof(*syms
), symfind
);
380 return s
->disas_strtab
+ sym
->st_name
;
386 /* FIXME: This should use elf_ops.h */
387 static int symcmp(const void *s0
, const void *s1
)
389 struct elf_sym
*sym0
= (struct elf_sym
*)s0
;
390 struct elf_sym
*sym1
= (struct elf_sym
*)s1
;
391 return (sym0
->st_value
< sym1
->st_value
) ? -1 :
392 ((sym0
->st_value
> sym1
->st_value
) ? 1 : 0);
395 /* Best attempt to load symbols from this ELF object. */
396 static void load_symbols(struct elfhdr
*hdr
, int fd
)
398 unsigned int i
, nsyms
;
399 struct elf_shdr sechdr
, symtab
, strtab
;
402 struct elf_sym
*syms
, *new_syms
;
404 lseek(fd
, hdr
->e_shoff
, SEEK_SET
);
405 for (i
= 0; i
< hdr
->e_shnum
; i
++) {
406 if (read(fd
, &sechdr
, sizeof(sechdr
)) != sizeof(sechdr
)) {
409 bswap_shdr(&sechdr
, 1);
410 if (sechdr
.sh_type
== SHT_SYMTAB
) {
412 lseek(fd
, hdr
->e_shoff
+ sizeof(sechdr
) * sechdr
.sh_link
,
414 if (read(fd
, &strtab
, sizeof(strtab
)) != sizeof(strtab
)) {
417 bswap_shdr(&strtab
, 1);
421 return; /* Shouldn't happen... */
424 /* Now know where the strtab and symtab are. Snarf them. */
425 s
= malloc(sizeof(*s
));
426 syms
= malloc(symtab
.sh_size
);
431 s
->disas_strtab
= strings
= malloc(strtab
.sh_size
);
432 if (!s
->disas_strtab
) {
438 lseek(fd
, symtab
.sh_offset
, SEEK_SET
);
439 if (read(fd
, syms
, symtab
.sh_size
) != symtab
.sh_size
) {
446 nsyms
= symtab
.sh_size
/ sizeof(struct elf_sym
);
451 /* Throw away entries which we do not need. */
452 if (syms
[i
].st_shndx
== SHN_UNDEF
||
453 syms
[i
].st_shndx
>= SHN_LORESERVE
||
454 ELF_ST_TYPE(syms
[i
].st_info
) != STT_FUNC
) {
457 syms
[i
] = syms
[nsyms
];
461 #if defined(TARGET_ARM) || defined(TARGET_MIPS)
462 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
463 syms
[i
].st_value
&= ~(target_ulong
)1;
469 * Attempt to free the storage associated with the local symbols
470 * that we threw away. Whether or not this has any effect on the
471 * memory allocation depends on the malloc implementation and how
472 * many symbols we managed to discard.
474 new_syms
= realloc(syms
, nsyms
* sizeof(*syms
));
475 if (new_syms
== NULL
) {
483 qsort(syms
, nsyms
, sizeof(*syms
), symcmp
);
485 lseek(fd
, strtab
.sh_offset
, SEEK_SET
);
486 if (read(fd
, strings
, strtab
.sh_size
) != strtab
.sh_size
) {
492 s
->disas_num_syms
= nsyms
;
493 #if ELF_CLASS == ELFCLASS32
494 s
->disas_symtab
.elf32
= syms
;
495 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
497 s
->disas_symtab
.elf64
= syms
;
498 s
->lookup_symbol
= (lookup_symbol_t
)lookup_symbolxx
;
504 /* Check the elf header and see if this a target elf binary. */
505 int is_target_elf_binary(int fd
)
508 struct elfhdr elf_ex
;
510 if (lseek(fd
, 0L, SEEK_SET
) < 0) {
513 if (read(fd
, buf
, sizeof(buf
)) < 0) {
517 elf_ex
= *((struct elfhdr
*)buf
);
520 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
521 (!elf_check_arch(elf_ex
.e_machine
))) {
529 load_elf_sections(const struct elfhdr
*hdr
, struct elf_phdr
*phdr
, int fd
,
530 abi_ulong rbase
, abi_ulong
*baddrp
)
532 struct elf_phdr
*elf_ppnt
;
538 * Now we do a little grungy work by mmaping the ELF image into
539 * the correct location in memory. At this point, we assume that
540 * the image should be loaded at fixed address, not at a variable
544 for (i
= 0, elf_ppnt
= phdr
; i
< hdr
->e_phnum
; i
++, elf_ppnt
++) {
548 /* XXX Skip memsz == 0. */
549 if (elf_ppnt
->p_type
!= PT_LOAD
) {
553 if (elf_ppnt
->p_flags
& PF_R
) {
554 elf_prot
|= PROT_READ
;
556 if (elf_ppnt
->p_flags
& PF_W
) {
557 elf_prot
|= PROT_WRITE
;
559 if (elf_ppnt
->p_flags
& PF_X
) {
560 elf_prot
|= PROT_EXEC
;
563 error
= target_mmap(TARGET_ELF_PAGESTART(rbase
+ elf_ppnt
->p_vaddr
),
564 (elf_ppnt
->p_filesz
+
565 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)),
567 (MAP_FIXED
| MAP_PRIVATE
| MAP_DENYWRITE
),
569 (elf_ppnt
->p_offset
-
570 TARGET_ELF_PAGEOFFSET(elf_ppnt
->p_vaddr
)));
574 } else if (elf_ppnt
->p_memsz
!= elf_ppnt
->p_filesz
) {
575 abi_ulong start_bss
, end_bss
;
577 start_bss
= rbase
+ elf_ppnt
->p_vaddr
+ elf_ppnt
->p_filesz
;
578 end_bss
= rbase
+ elf_ppnt
->p_vaddr
+ elf_ppnt
->p_memsz
;
581 * Calling set_brk effectively mmaps the pages that we need for the
582 * bss and break sections.
584 set_brk(start_bss
, end_bss
);
585 padzero(start_bss
, end_bss
);
589 baddr
= TARGET_ELF_PAGESTART(rbase
+ elf_ppnt
->p_vaddr
);
594 if (baddrp
!= NULL
) {
600 int load_elf_binary(struct bsd_binprm
*bprm
, struct target_pt_regs
*regs
,
601 struct image_info
*info
)
603 struct elfhdr elf_ex
;
604 struct elfhdr interp_elf_ex
;
605 int interpreter_fd
= -1; /* avoid warning */
608 struct elf_phdr
*elf_ppnt
;
609 struct elf_phdr
*elf_phdata
;
612 char *elf_interpreter
;
613 abi_ulong baddr
, elf_entry
, et_dyn_addr
, interp_load_addr
= 0;
614 abi_ulong reloc_func_desc
= 0;
617 elf_ex
= *((struct elfhdr
*) bprm
->buf
); /* exec-header */
620 /* First of all, some simple consistency checks */
621 if ((elf_ex
.e_type
!= ET_EXEC
&& elf_ex
.e_type
!= ET_DYN
) ||
622 (!elf_check_arch(elf_ex
.e_machine
))) {
626 bprm
->p
= copy_elf_strings(1, &bprm
->filename
, bprm
->page
, bprm
->p
);
627 bprm
->p
= copy_elf_strings(bprm
->envc
, bprm
->envp
, bprm
->page
, bprm
->p
);
628 bprm
->p
= copy_elf_strings(bprm
->argc
, bprm
->argv
, bprm
->page
, bprm
->p
);
633 /* Now read in all of the header information */
634 elf_phdata
= (struct elf_phdr
*)malloc(elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
635 if (elf_phdata
== NULL
) {
639 retval
= lseek(bprm
->fd
, elf_ex
.e_phoff
, SEEK_SET
);
641 retval
= read(bprm
->fd
, (char *)elf_phdata
,
642 elf_ex
.e_phentsize
* elf_ex
.e_phnum
);
646 perror("load_elf_binary");
652 bswap_phdr(elf_phdata
, elf_ex
.e_phnum
);
653 elf_ppnt
= elf_phdata
;
658 elf_interpreter
= NULL
;
659 for (i
= 0; i
< elf_ex
.e_phnum
; i
++) {
660 if (elf_ppnt
->p_type
== PT_INTERP
) {
661 if (elf_interpreter
!= NULL
) {
663 free(elf_interpreter
);
668 elf_interpreter
= (char *)malloc(elf_ppnt
->p_filesz
);
669 if (elf_interpreter
== NULL
) {
675 retval
= lseek(bprm
->fd
, elf_ppnt
->p_offset
, SEEK_SET
);
677 retval
= read(bprm
->fd
, elf_interpreter
, elf_ppnt
->p_filesz
);
680 perror("load_elf_binary2");
685 retval
= open(path(elf_interpreter
), O_RDONLY
);
687 interpreter_fd
= retval
;
689 perror(elf_interpreter
);
691 /* retval = -errno; */
696 retval
= lseek(interpreter_fd
, 0, SEEK_SET
);
698 retval
= read(interpreter_fd
, bprm
->buf
, 128);
702 interp_elf_ex
= *((struct elfhdr
*) bprm
->buf
);
705 perror("load_elf_binary3");
708 free(elf_interpreter
);
716 /* Some simple consistency checks for the interpreter */
717 if (elf_interpreter
) {
718 if (interp_elf_ex
.e_ident
[0] != 0x7f ||
719 strncmp((char *)&interp_elf_ex
.e_ident
[1], "ELF", 3) != 0) {
720 free(elf_interpreter
);
728 * OK, we are done with that, now set up the arg stuff, and then start this
732 free(elf_interpreter
);
738 /* OK, This is the point of no return */
741 elf_entry
= (abi_ulong
) elf_ex
.e_entry
;
743 /* XXX Join this with PT_INTERP search? */
745 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
746 if (elf_ppnt
->p_type
!= PT_LOAD
) {
749 baddr
= elf_ppnt
->p_vaddr
;
754 if (elf_ex
.e_type
== ET_DYN
&& baddr
== 0) {
755 et_dyn_addr
= ELF_ET_DYN_LOAD_ADDR
;
759 * Do this so that we can load the interpreter, if need be. We will
760 * change some of these later
763 setup_arg_pages(bprm
, info
, &bprm
->p
, &bprm
->stringp
);
764 info
->start_stack
= bprm
->p
;
766 info
->elf_flags
= elf_ex
.e_flags
;
768 error
= load_elf_sections(&elf_ex
, elf_phdata
, bprm
->fd
, et_dyn_addr
,
770 for (i
= 0, elf_ppnt
= elf_phdata
; i
< elf_ex
.e_phnum
; i
++, elf_ppnt
++) {
771 if (elf_ppnt
->p_type
!= PT_LOAD
) {
774 if (elf_ppnt
->p_memsz
> elf_ppnt
->p_filesz
)
775 elf_brk
= MAX(elf_brk
, et_dyn_addr
+ elf_ppnt
->p_vaddr
+
779 perror("load_elf_sections");
783 if (elf_interpreter
) {
784 elf_entry
= load_elf_interp(&interp_elf_ex
, interpreter_fd
,
786 reloc_func_desc
= interp_load_addr
;
788 close(interpreter_fd
);
789 free(elf_interpreter
);
791 if (elf_entry
== ~((abi_ulong
)0UL)) {
792 printf("Unable to load interpreter\n");
798 interp_load_addr
= et_dyn_addr
;
799 elf_entry
+= interp_load_addr
;
804 if (qemu_log_enabled()) {
805 load_symbols(&elf_ex
, bprm
->fd
);
810 bprm
->p
= target_create_elf_tables(bprm
->p
, bprm
->argc
, bprm
->envc
,
811 bprm
->stringp
, &elf_ex
, load_addr
,
812 et_dyn_addr
, interp_load_addr
, info
);
813 info
->load_addr
= reloc_func_desc
;
815 info
->start_stack
= bprm
->p
;
818 info
->entry
= elf_entry
;
820 #ifdef USE_ELF_CORE_DUMP
821 bprm
->core_dump
= &elf_core_dump
;
823 bprm
->core_dump
= NULL
;
829 void do_init_thread(struct target_pt_regs
*regs
, struct image_info
*infop
)
832 target_thread_init(regs
, infop
);