2 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3 * Copyright (c) 1998 Peter Wemm <peter@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
33 #include <sys/linker.h>
34 #include <sys/module.h>
35 #include <sys/stdint.h>
37 #include <machine/elf.h>
42 #include "bootstrap.h"
44 #define COPYOUT(s,d,l) archsw.arch_copyout((vm_offset_t)(s), d, l)
46 #if defined(__i386__) && __ELF_WORD_SIZE == 64
49 #define ELF_TARG_CLASS ELFCLASS64
50 #define ELF_TARG_MACH EM_X86_64
53 typedef struct elf_file
{
75 static int __elfN(loadimage
)(struct preloaded_file
*mp
, elf_file_t ef
, u_int64_t loadaddr
);
76 static int __elfN(lookup_symbol
)(struct preloaded_file
*mp
, elf_file_t ef
, const char* name
, Elf_Sym
* sym
);
77 static int __elfN(reloc_ptr
)(struct preloaded_file
*mp
, elf_file_t ef
,
78 Elf_Addr p
, void *val
, size_t len
);
79 static int __elfN(parse_modmetadata
)(struct preloaded_file
*mp
, elf_file_t ef
,
80 Elf_Addr p_start
, Elf_Addr p_end
);
81 static symaddr_fn
__elfN(symaddr
);
82 static char *fake_modname(const char *name
);
84 const char *__elfN(kerneltype
) = "elf kernel";
85 const char *__elfN(moduletype
) = "elf module";
87 u_int64_t
__elfN(relocation_offset
) = 0;
90 __elfN(load_elf_header
)(char *filename
, elf_file_t ef
)
97 * Open the image, read and validate the ELF header
99 if (filename
== NULL
) /* can't handle nameless */
101 if ((ef
->fd
= open(filename
, O_RDONLY
)) == -1)
103 ef
->firstpage
= malloc(PAGE_SIZE
);
104 if (ef
->firstpage
== NULL
) {
108 bytes_read
= read(ef
->fd
, ef
->firstpage
, PAGE_SIZE
);
109 ef
->firstlen
= (size_t)bytes_read
;
110 if (bytes_read
< 0 || ef
->firstlen
<= sizeof(Elf_Ehdr
)) {
111 err
= EFTYPE
; /* could be EIO, but may be small file */
114 ehdr
= ef
->ehdr
= (Elf_Ehdr
*)ef
->firstpage
;
117 if (!IS_ELF(*ehdr
)) {
121 if (ehdr
->e_ident
[EI_CLASS
] != ELF_TARG_CLASS
|| /* Layout ? */
122 ehdr
->e_ident
[EI_DATA
] != ELF_TARG_DATA
||
123 ehdr
->e_ident
[EI_VERSION
] != EV_CURRENT
|| /* Version ? */
124 ehdr
->e_version
!= EV_CURRENT
||
125 ehdr
->e_machine
!= ELF_TARG_MACH
) { /* Machine ? */
133 if (ef
->firstpage
!= NULL
) {
135 ef
->firstpage
= NULL
;
145 * Attempt to load the file (file) as an ELF module. It will be stored at
146 * (dest), and a pointer to a module structure describing the loaded object
147 * will be saved in (result).
150 __elfN(loadfile
)(char *filename
, u_int64_t dest
, struct preloaded_file
**result
)
152 return (__elfN(loadfile_raw
)(filename
, dest
, result
, 0));
156 __elfN(loadfile_raw
)(char *filename
, u_int64_t dest
,
157 struct preloaded_file
**result
, int multiboot
)
159 struct preloaded_file
*fp
, *kfp
;
165 bzero(&ef
, sizeof(struct elf_file
));
168 err
= __elfN(load_elf_header
)(filename
, &ef
);
175 * Check to see what sort of module we are.
177 kfp
= file_findfile(NULL
, __elfN(kerneltype
));
180 * Kernels can be ET_DYN, so just assume the first loaded object is the
181 * kernel. This assumption will be checked later.
186 if (ef
.kernel
|| ehdr
->e_type
== ET_EXEC
) {
187 /* Looks like a kernel */
189 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadfile: kernel already loaded\n");
194 * Calculate destination address based on kernel entrypoint.
196 * For ARM, the destination address is independent of any values in the
197 * elf header (an ARM kernel can be loaded at any 2MB boundary), so we
198 * leave dest set to the value calculated by archsw.arch_loadaddr() and
199 * passed in to this function.
202 if (ehdr
->e_type
== ET_EXEC
)
203 dest
= (ehdr
->e_entry
& ~PAGE_MASK
);
205 if ((ehdr
->e_entry
& ~PAGE_MASK
) == 0) {
206 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadfile: not a kernel (maybe static binary?)\n");
212 } else if (ehdr
->e_type
== ET_DYN
) {
213 /* Looks like a kld module */
214 if (multiboot
!= 0) {
215 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadfile: can't load module as multiboot\n");
220 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadfile: can't load module before kernel\n");
224 if (strcmp(__elfN(kerneltype
), kfp
->f_type
)) {
225 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadfile: can't load module with kernel type '%s'\n", kfp
->f_type
);
229 /* Looks OK, got ahead */
237 if (archsw
.arch_loadaddr
!= NULL
)
238 dest
= archsw
.arch_loadaddr(LOAD_ELF
, ehdr
, dest
);
240 dest
= roundup(dest
, PAGE_SIZE
);
243 * Ok, we think we should handle this.
247 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadfile: cannot allocate module info\n");
251 if (ef
.kernel
== 1 && multiboot
== 0)
252 setenv("kernelname", filename
, 1);
253 fp
->f_name
= strdup(filename
);
254 if (multiboot
== 0) {
255 fp
->f_type
= strdup(ef
.kernel
?
256 __elfN(kerneltype
) : __elfN(moduletype
));
259 fp
->f_type
= strdup("elf multiboot kernel");
261 fp
->f_type
= strdup("elf multiboot2 kernel");
266 printf("%s entry at 0x%jx\n", filename
, (uintmax_t)ehdr
->e_entry
);
268 printf("%s ", filename
);
271 fp
->f_size
= __elfN(loadimage
)(fp
, &ef
, dest
);
272 if (fp
->f_size
== 0 || fp
->f_addr
== 0)
275 /* save exec header as metadata */
276 file_addmetadata(fp
, MODINFOMD_ELFHDR
, sizeof(*ehdr
), ehdr
);
278 /* Load OK, return module pointer */
279 *result
= (struct preloaded_file
*)fp
;
296 * With the file (fd) open on the image, and (ehdr) containing
297 * the Elf header, load the image at (off)
300 __elfN(loadimage
)(struct preloaded_file
*fp
, elf_file_t ef
, u_int64_t off
)
305 Elf_Phdr
*phdr
, *php
;
309 vm_offset_t firstaddr
;
310 vm_offset_t lastaddr
;
323 Elf_Addr p_start
, p_end
;
328 firstaddr
= lastaddr
= 0;
330 if (ehdr
->e_type
== ET_EXEC
) {
331 #if defined(__i386__) || defined(__amd64__)
332 #if __ELF_WORD_SIZE == 64
333 off
= - (off
& 0xffffffffff000000ull
);/* x86_64 relocates after locore */
335 off
= - (off
& 0xff000000u
); /* i386 relocates after locore */
337 #elif defined(__powerpc__)
339 * On the purely virtual memory machines like e500, the kernel is
340 * linked against its final VA range, which is most often not
341 * available at the loader stage, but only after kernel initializes
342 * and completes its VM settings. In such cases we cannot use p_vaddr
343 * field directly to load ELF segments, but put them at some
344 * 'load-time' locations.
346 if (off
& 0xf0000000u
) {
347 off
= -(off
& 0xf0000000u
);
349 * XXX the physical load address should not be hardcoded. Note
350 * that the Book-E kernel assumes that it's loaded at a 16MB
351 * boundary for now...
354 ehdr
->e_entry
+= off
;
356 printf("Converted entry 0x%08x\n", ehdr
->e_entry
);
360 #elif defined(__arm__) && !defined(EFI)
362 * The elf headers in arm kernels specify virtual addresses in all
363 * header fields, even the ones that should be physical addresses.
364 * We assume the entry point is in the first page, and masking the page
365 * offset will leave us with the virtual address the kernel was linked
366 * at. We subtract that from the load offset, making 'off' into the
367 * value which, when added to a virtual address in an elf header,
368 * translates it to a physical address. We do the va->pa conversion on
369 * the entry point address in the header now, so that later we can
370 * launch the kernel by just jumping to that address.
372 * When booting from UEFI the copyin and copyout functions handle
373 * adjusting the location relative to the first virtual address.
374 * Because of this there is no need to adjust the offset or entry
375 * point address as these will both be handled by the efi code.
377 off
-= ehdr
->e_entry
& ~PAGE_MASK
;
378 ehdr
->e_entry
+= off
;
380 printf("ehdr->e_entry 0x%08x, va<->pa off %llx\n", ehdr
->e_entry
, off
);
383 off
= 0; /* other archs use direct mapped kernels */
388 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
389 /* use entry address from header */
390 fp
->f_addr
= ehdr
->e_entry
;
394 __elfN(relocation_offset
) = off
;
396 if ((ehdr
->e_phoff
+ ehdr
->e_phnum
* sizeof(*phdr
)) > ef
->firstlen
) {
397 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadimage: program header not within first page\n");
400 phdr
= (Elf_Phdr
*)(ef
->firstpage
+ ehdr
->e_phoff
);
402 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
403 /* We want to load PT_LOAD segments only.. */
404 if (phdr
[i
].p_type
!= PT_LOAD
)
408 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
409 printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx",
410 (long)phdr
[i
].p_filesz
, (long)phdr
[i
].p_offset
,
411 (long)(phdr
[i
].p_paddr
+ off
),
412 (long)(phdr
[i
].p_paddr
+ off
+ phdr
[i
].p_memsz
- 1));
414 printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx",
415 (long)phdr
[i
].p_filesz
, (long)phdr
[i
].p_offset
,
416 (long)(phdr
[i
].p_vaddr
+ off
),
417 (long)(phdr
[i
].p_vaddr
+ off
+ phdr
[i
].p_memsz
- 1));
420 if ((phdr
[i
].p_flags
& PF_W
) == 0) {
421 printf("text=0x%lx ", (long)phdr
[i
].p_filesz
);
423 printf("data=0x%lx", (long)phdr
[i
].p_filesz
);
424 if (phdr
[i
].p_filesz
< phdr
[i
].p_memsz
)
425 printf("+0x%lx", (long)(phdr
[i
].p_memsz
-phdr
[i
].p_filesz
));
430 if (ef
->firstlen
> phdr
[i
].p_offset
) {
431 fpcopy
= ef
->firstlen
- phdr
[i
].p_offset
;
432 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
433 archsw
.arch_copyin(ef
->firstpage
+ phdr
[i
].p_offset
,
434 phdr
[i
].p_paddr
+ off
, fpcopy
);
436 archsw
.arch_copyin(ef
->firstpage
+ phdr
[i
].p_offset
,
437 phdr
[i
].p_vaddr
+ off
, fpcopy
);
440 if (phdr
[i
].p_filesz
> fpcopy
) {
441 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
442 if (kern_pread(ef
->fd
, phdr
[i
].p_paddr
+ off
+ fpcopy
,
443 phdr
[i
].p_filesz
- fpcopy
,
444 phdr
[i
].p_offset
+ fpcopy
) != 0) {
445 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
446 "_loadimage: read failed\n");
450 if (kern_pread(ef
->fd
, phdr
[i
].p_vaddr
+ off
+ fpcopy
,
451 phdr
[i
].p_filesz
- fpcopy
,
452 phdr
[i
].p_offset
+ fpcopy
) != 0) {
453 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
454 "_loadimage: read failed\n");
459 /* clear space from oversized segments; eg: bss */
460 if (phdr
[i
].p_filesz
< phdr
[i
].p_memsz
) {
462 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
463 printf(" (bss: 0x%lx-0x%lx)",
464 (long)(phdr
[i
].p_paddr
+ off
+ phdr
[i
].p_filesz
),
465 (long)(phdr
[i
].p_paddr
+ off
+ phdr
[i
].p_memsz
- 1));
467 printf(" (bss: 0x%lx-0x%lx)",
468 (long)(phdr
[i
].p_vaddr
+ off
+ phdr
[i
].p_filesz
),
469 (long)(phdr
[i
].p_vaddr
+ off
+ phdr
[i
].p_memsz
- 1));
473 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
474 kern_bzero(phdr
[i
].p_paddr
+ off
+ phdr
[i
].p_filesz
,
475 phdr
[i
].p_memsz
- phdr
[i
].p_filesz
);
477 kern_bzero(phdr
[i
].p_vaddr
+ off
+ phdr
[i
].p_filesz
,
478 phdr
[i
].p_memsz
- phdr
[i
].p_filesz
);
485 if (archsw
.arch_loadseg
!= NULL
)
486 archsw
.arch_loadseg(ehdr
, phdr
+ i
, off
);
488 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
489 if (firstaddr
== 0 || firstaddr
> (phdr
[i
].p_paddr
+ off
))
490 firstaddr
= phdr
[i
].p_paddr
+ off
;
492 lastaddr
< (phdr
[i
].p_paddr
+ off
+ phdr
[i
].p_memsz
))
493 lastaddr
= phdr
[i
].p_paddr
+ off
+ phdr
[i
].p_memsz
;
495 if (firstaddr
== 0 || firstaddr
> (phdr
[i
].p_vaddr
+ off
))
496 firstaddr
= phdr
[i
].p_vaddr
+ off
;
498 lastaddr
< (phdr
[i
].p_vaddr
+ off
+ phdr
[i
].p_memsz
))
499 lastaddr
= phdr
[i
].p_vaddr
+ off
+ phdr
[i
].p_memsz
;
502 lastaddr
= roundup(lastaddr
, sizeof(long));
505 * Get the section headers. We need this for finding the .ctors
506 * section as well as for loading any symbols. Both may be hard
507 * to do if reading from a .gz file as it involves seeking. I
508 * think the rule is going to have to be that you must strip a
509 * file to remove symbols before gzipping it.
511 chunk
= ehdr
->e_shnum
* ehdr
->e_shentsize
;
512 if (chunk
== 0 || ehdr
->e_shoff
== 0)
514 shdr
= alloc_pread(ef
->fd
, ehdr
->e_shoff
, chunk
);
516 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
517 "_loadimage: failed to read section headers");
520 file_addmetadata(fp
, MODINFOMD_SHDR
, chunk
, shdr
);
523 * Read the section string table and look for the .ctors section.
524 * We need to tell the kernel where it is so that it can call the
527 chunk
= shdr
[ehdr
->e_shstrndx
].sh_size
;
529 shstr
= alloc_pread(ef
->fd
, shdr
[ehdr
->e_shstrndx
].sh_offset
, chunk
);
531 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
532 if (strcmp(shstr
+ shdr
[i
].sh_name
, ".ctors") != 0)
534 ctors
= shdr
[i
].sh_addr
;
535 file_addmetadata(fp
, MODINFOMD_CTORS_ADDR
, sizeof(ctors
),
537 size
= shdr
[i
].sh_size
;
538 file_addmetadata(fp
, MODINFOMD_CTORS_SIZE
, sizeof(size
),
547 * Now load any symbols.
551 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
552 if (shdr
[i
].sh_type
!= SHT_SYMTAB
)
554 for (j
= 0; j
< ehdr
->e_phnum
; j
++) {
555 if (phdr
[j
].p_type
!= PT_LOAD
)
557 if (shdr
[i
].sh_offset
>= phdr
[j
].p_offset
&&
558 (shdr
[i
].sh_offset
+ shdr
[i
].sh_size
<=
559 phdr
[j
].p_offset
+ phdr
[j
].p_filesz
)) {
560 shdr
[i
].sh_offset
= 0;
565 if (shdr
[i
].sh_offset
== 0 || shdr
[i
].sh_size
== 0)
566 continue; /* alread loaded in a PT_LOAD above */
567 /* Save it for loading below */
569 symstrindex
= shdr
[i
].sh_link
;
571 if (symtabindex
< 0 || symstrindex
< 0)
574 /* Ok, committed to a load. */
579 for (i
= symtabindex
; i
>= 0; i
= symstrindex
) {
583 switch(shdr
[i
].sh_type
) {
584 case SHT_SYMTAB
: /* Symbol table */
587 case SHT_STRTAB
: /* String table */
596 size
= shdr
[i
].sh_size
;
597 archsw
.arch_copyin(&size
, lastaddr
, sizeof(size
));
598 lastaddr
+= sizeof(size
);
601 printf("\n%s: 0x%jx@0x%jx -> 0x%jx-0x%jx", secname
,
602 (uintmax_t)shdr
[i
].sh_size
, (uintmax_t)shdr
[i
].sh_offset
,
603 (uintmax_t)lastaddr
, (uintmax_t)(lastaddr
+ shdr
[i
].sh_size
));
605 if (i
== symstrindex
)
607 printf("0x%lx+0x%lx", (long)sizeof(size
), (long)size
);
610 if (lseek(ef
->fd
, (off_t
)shdr
[i
].sh_offset
, SEEK_SET
) == -1) {
611 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
) "_loadimage: could not seek for symbols - skipped!");
616 result
= archsw
.arch_readin(ef
->fd
, lastaddr
, shdr
[i
].sh_size
);
617 if (result
< 0 || (size_t)result
!= shdr
[i
].sh_size
) {
618 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
) "_loadimage: could not read symbols - skipped! (%ju != %ju)", (uintmax_t)result
,
619 (uintmax_t)shdr
[i
].sh_size
);
624 /* Reset offsets relative to ssym */
625 lastaddr
+= shdr
[i
].sh_size
;
626 lastaddr
= roundup(lastaddr
, sizeof(size
));
627 if (i
== symtabindex
)
629 else if (i
== symstrindex
)
637 file_addmetadata(fp
, MODINFOMD_SSYM
, sizeof(ssym
), &ssym
);
638 file_addmetadata(fp
, MODINFOMD_ESYM
, sizeof(esym
), &esym
);
643 ret
= lastaddr
- firstaddr
;
644 if (ehdr
->e_ident
[EI_OSABI
] != ELFOSABI_SOLARIS
)
645 fp
->f_addr
= firstaddr
;
648 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
649 if (phdr
[i
].p_type
== PT_DYNAMIC
) {
652 file_addmetadata(fp
, MODINFOMD_DYNAMIC
, sizeof(adp
), &adp
);
657 if (php
== NULL
) /* this is bad, we cannot get to symbols or _DYNAMIC */
660 ndp
= php
->p_filesz
/ sizeof(Elf_Dyn
);
663 dp
= malloc(php
->p_filesz
);
666 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
)
667 archsw
.arch_copyout(php
->p_paddr
+ off
, dp
, php
->p_filesz
);
669 archsw
.arch_copyout(php
->p_vaddr
+ off
, dp
, php
->p_filesz
);
672 for (i
= 0; i
< ndp
; i
++) {
673 if (dp
[i
].d_tag
== 0)
675 switch (dp
[i
].d_tag
) {
677 ef
->hashtab
= (Elf_Hashelt
*)(uintptr_t)(dp
[i
].d_un
.d_ptr
+ off
);
680 ef
->strtab
= (char *)(uintptr_t)(dp
[i
].d_un
.d_ptr
+ off
);
683 ef
->strsz
= dp
[i
].d_un
.d_val
;
686 ef
->symtab
= (Elf_Sym
*)(uintptr_t)(dp
[i
].d_un
.d_ptr
+ off
);
689 ef
->rel
= (Elf_Rel
*)(uintptr_t)(dp
[i
].d_un
.d_ptr
+ off
);
692 ef
->relsz
= dp
[i
].d_un
.d_val
;
695 ef
->rela
= (Elf_Rela
*)(uintptr_t)(dp
[i
].d_un
.d_ptr
+ off
);
698 ef
->relasz
= dp
[i
].d_un
.d_val
;
704 if (ef
->hashtab
== NULL
|| ef
->symtab
== NULL
||
705 ef
->strtab
== NULL
|| ef
->strsz
== 0)
707 COPYOUT(ef
->hashtab
, &ef
->nbuckets
, sizeof(ef
->nbuckets
));
708 COPYOUT(ef
->hashtab
+ 1, &ef
->nchains
, sizeof(ef
->nchains
));
709 ef
->buckets
= ef
->hashtab
+ 2;
710 ef
->chains
= ef
->buckets
+ ef
->nbuckets
;
712 if (__elfN(lookup_symbol
)(fp
, ef
, "__start_set_modmetadata_set", &sym
) != 0)
714 p_start
= sym
.st_value
+ ef
->off
;
715 if (__elfN(lookup_symbol
)(fp
, ef
, "__stop_set_modmetadata_set", &sym
) != 0)
717 p_end
= sym
.st_value
+ ef
->off
;
719 if (__elfN(parse_modmetadata
)(fp
, ef
, p_start
, p_end
) == 0)
722 if (ef
->kernel
) /* kernel must not depend on anything */
733 static char invalid_name
[] = "bad";
736 fake_modname(const char *name
)
742 sp
= strrchr(name
, '/');
747 ep
= strrchr(name
, '.');
751 ep
= invalid_name
+ sizeof(invalid_name
) - 1;
754 ep
= name
+ strlen(name
);
756 fp
= malloc(len
+ 1);
764 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
765 struct mod_metadata64
{
766 int md_version
; /* structure version MDTV_* */
767 int md_type
; /* type of entry MDT_* */
768 u_int64_t md_data
; /* specific data */
769 u_int64_t md_cval
; /* common string label */
772 #if defined(__amd64__) && __ELF_WORD_SIZE == 32
773 struct mod_metadata32
{
774 int md_version
; /* structure version MDTV_* */
775 int md_type
; /* type of entry MDT_* */
776 u_int32_t md_data
; /* specific data */
777 u_int32_t md_cval
; /* common string label */
782 __elfN(load_modmetadata
)(struct preloaded_file
*fp
, u_int64_t dest
)
786 Elf_Shdr
*sh_meta
, *shdr
= NULL
;
787 Elf_Shdr
*sh_data
[2];
788 char *shstrtab
= NULL
;
790 Elf_Addr p_start
, p_end
;
792 bzero(&ef
, sizeof(struct elf_file
));
795 err
= __elfN(load_elf_header
)(fp
->f_name
, &ef
);
799 if (ef
.kernel
== 1 || ef
.ehdr
->e_type
== ET_EXEC
) {
801 } else if (ef
.ehdr
->e_type
!= ET_DYN
) {
806 size
= ef
.ehdr
->e_shnum
* ef
.ehdr
->e_shentsize
;
807 shdr
= alloc_pread(ef
.fd
, ef
.ehdr
->e_shoff
, size
);
814 shstrtab
= alloc_pread(ef
.fd
, shdr
[ef
.ehdr
->e_shstrndx
].sh_offset
,
815 shdr
[ef
.ehdr
->e_shstrndx
].sh_size
);
816 if (shstrtab
== NULL
) {
817 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
818 "load_modmetadata: unable to load shstrtab\n");
823 /* Find set_modmetadata_set and data sections. */
824 sh_data
[0] = sh_data
[1] = sh_meta
= NULL
;
825 for (i
= 0, j
= 0; i
< ef
.ehdr
->e_shnum
; i
++) {
826 if (strcmp(&shstrtab
[shdr
[i
].sh_name
],
827 "set_modmetadata_set") == 0) {
830 if ((strcmp(&shstrtab
[shdr
[i
].sh_name
], ".data") == 0) ||
831 (strcmp(&shstrtab
[shdr
[i
].sh_name
], ".rodata") == 0)) {
832 sh_data
[j
++] = &shdr
[i
];
835 if (sh_meta
== NULL
|| sh_data
[0] == NULL
|| sh_data
[1] == NULL
) {
836 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
837 "load_modmetadata: unable to find set_modmetadata_set or data sections\n");
842 /* Load set_modmetadata_set into memory */
843 err
= kern_pread(ef
.fd
, dest
, sh_meta
->sh_size
, sh_meta
->sh_offset
);
845 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
846 "load_modmetadata: unable to load set_modmetadata_set: %d\n", err
);
850 p_end
= dest
+ sh_meta
->sh_size
;
851 dest
+= sh_meta
->sh_size
;
853 /* Load data sections into memory. */
854 err
= kern_pread(ef
.fd
, dest
, sh_data
[0]->sh_size
,
855 sh_data
[0]->sh_offset
);
857 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
858 "load_modmetadata: unable to load data: %d\n", err
);
863 * We have to increment the dest, so that the offset is the same into
864 * both the .rodata and .data sections.
866 ef
.off
= -(sh_data
[0]->sh_addr
- dest
);
867 dest
+= (sh_data
[1]->sh_addr
- sh_data
[0]->sh_addr
);
869 err
= kern_pread(ef
.fd
, dest
, sh_data
[1]->sh_size
,
870 sh_data
[1]->sh_offset
);
872 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
873 "load_modmetadata: unable to load data: %d\n", err
);
877 err
= __elfN(parse_modmetadata
)(fp
, &ef
, p_start
, p_end
);
879 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
880 "load_modmetadata: unable to parse metadata: %d\n", err
);
885 if (shstrtab
!= NULL
)
889 if (ef
.firstpage
!= NULL
)
897 __elfN(parse_modmetadata
)(struct preloaded_file
*fp
, elf_file_t ef
,
898 Elf_Addr p_start
, Elf_Addr p_end
)
900 struct mod_metadata md
;
901 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
902 struct mod_metadata64 md64
;
903 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
904 struct mod_metadata32 md32
;
906 struct mod_depend
*mdepend
;
907 struct mod_version mver
;
909 int error
, modcnt
, minfolen
;
915 COPYOUT(p
, &v
, sizeof(v
));
916 error
= __elfN(reloc_ptr
)(fp
, ef
, p
, &v
, sizeof(v
));
917 if (error
== EOPNOTSUPP
)
921 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
922 COPYOUT(v
, &md64
, sizeof(md64
));
923 error
= __elfN(reloc_ptr
)(fp
, ef
, v
, &md64
, sizeof(md64
));
924 if (error
== EOPNOTSUPP
) {
925 md64
.md_cval
+= ef
->off
;
926 md64
.md_data
+= ef
->off
;
927 } else if (error
!= 0)
929 md
.md_version
= md64
.md_version
;
930 md
.md_type
= md64
.md_type
;
931 md
.md_cval
= (const char *)(uintptr_t)md64
.md_cval
;
932 md
.md_data
= (void *)(uintptr_t)md64
.md_data
;
933 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
934 COPYOUT(v
, &md32
, sizeof(md32
));
935 error
= __elfN(reloc_ptr
)(fp
, ef
, v
, &md32
, sizeof(md32
));
936 if (error
== EOPNOTSUPP
) {
937 md32
.md_cval
+= ef
->off
;
938 md32
.md_data
+= ef
->off
;
939 } else if (error
!= 0)
941 md
.md_version
= md32
.md_version
;
942 md
.md_type
= md32
.md_type
;
943 md
.md_cval
= (const char *)(uintptr_t)md32
.md_cval
;
944 md
.md_data
= (void *)(uintptr_t)md32
.md_data
;
946 COPYOUT(v
, &md
, sizeof(md
));
947 error
= __elfN(reloc_ptr
)(fp
, ef
, v
, &md
, sizeof(md
));
948 if (error
== EOPNOTSUPP
) {
949 md
.md_cval
+= ef
->off
;
950 md
.md_data
= (void *)((uintptr_t)md
.md_data
+ (uintptr_t)ef
->off
);
951 } else if (error
!= 0)
954 p
+= sizeof(Elf_Addr
);
957 if (ef
->kernel
) /* kernel must not depend on anything */
959 s
= strdupout((vm_offset_t
)md
.md_cval
);
960 minfolen
= sizeof(*mdepend
) + strlen(s
) + 1;
961 mdepend
= malloc(minfolen
);
964 COPYOUT((vm_offset_t
)md
.md_data
, mdepend
, sizeof(*mdepend
));
965 strcpy((char*)(mdepend
+ 1), s
);
967 file_addmetadata(fp
, MODINFOMD_DEPLIST
, minfolen
, mdepend
);
971 s
= strdupout((vm_offset_t
)md
.md_cval
);
972 COPYOUT((vm_offset_t
)md
.md_data
, &mver
, sizeof(mver
));
973 file_addmodule(fp
, s
, mver
.mv_version
, NULL
);
980 s
= fake_modname(fp
->f_name
);
981 file_addmodule(fp
, s
, 1, NULL
);
988 elf_hash(const char *name
)
990 const unsigned char *p
= (const unsigned char *) name
;
996 if ((g
= h
& 0xf0000000) != 0)
1003 static const char __elfN(bad_symtable
)[] = "elf" __XSTRING(__ELF_WORD_SIZE
) "_lookup_symbol: corrupt symbol table\n";
1005 __elfN(lookup_symbol
)(struct preloaded_file
*fp __unused
, elf_file_t ef
,
1006 const char* name
, Elf_Sym
*symp
)
1013 hash
= elf_hash(name
);
1014 COPYOUT(&ef
->buckets
[hash
% ef
->nbuckets
], &symnum
, sizeof(symnum
));
1016 while (symnum
!= STN_UNDEF
) {
1017 if (symnum
>= ef
->nchains
) {
1018 printf(__elfN(bad_symtable
));
1022 COPYOUT(ef
->symtab
+ symnum
, &sym
, sizeof(sym
));
1023 if (sym
.st_name
== 0) {
1024 printf(__elfN(bad_symtable
));
1028 strp
= strdupout((vm_offset_t
)(ef
->strtab
+ sym
.st_name
));
1029 if (strcmp(name
, strp
) == 0) {
1031 if (sym
.st_shndx
!= SHN_UNDEF
||
1032 (sym
.st_value
!= 0 &&
1033 ELF_ST_TYPE(sym
.st_info
) == STT_FUNC
)) {
1040 COPYOUT(&ef
->chains
[symnum
], &symnum
, sizeof(symnum
));
1046 * Apply any intra-module relocations to the value. p is the load address
1047 * of the value and val/len is the value to be modified. This does NOT modify
1048 * the image in-place, because this is done by kern_linker later on.
1050 * Returns EOPNOTSUPP if no relocation method is supplied.
1053 __elfN(reloc_ptr
)(struct preloaded_file
*mp
, elf_file_t ef
,
1054 Elf_Addr p
, void *val
, size_t len
)
1063 * The kernel is already relocated, but we still want to apply
1064 * offset adjustments.
1067 return (EOPNOTSUPP
);
1069 for (n
= 0; n
< ef
->relsz
/ sizeof(r
); n
++) {
1070 COPYOUT(ef
->rel
+ n
, &r
, sizeof(r
));
1072 error
= __elfN(reloc
)(ef
, __elfN(symaddr
), &r
, ELF_RELOC_REL
,
1073 ef
->off
, p
, val
, len
);
1077 for (n
= 0; n
< ef
->relasz
/ sizeof(a
); n
++) {
1078 COPYOUT(ef
->rela
+ n
, &a
, sizeof(a
));
1080 error
= __elfN(reloc
)(ef
, __elfN(symaddr
), &a
, ELF_RELOC_RELA
,
1081 ef
->off
, p
, val
, len
);
1090 __elfN(symaddr
)(struct elf_file
*ef __unused
, Elf_Size symidx __unused
)
1092 /* Symbol lookup by index not required here. */