2 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3 * Copyright (c) 1998 Peter Wemm <peter@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
33 #include <sys/linker.h>
34 #include <sys/module.h>
35 #include <sys/stdint.h>
37 #include <machine/elf.h>
42 #include "bootstrap.h"
44 #define COPYOUT(s,d,l) archsw.arch_copyout((vm_offset_t)(s), d, l)
46 #if defined(__i386__) && __ELF_WORD_SIZE == 64
49 #define ELF_TARG_CLASS ELFCLASS64
50 #define ELF_TARG_MACH EM_X86_64
53 typedef struct elf_file
{
75 static int __elfN(loadimage
)(struct preloaded_file
*mp
, elf_file_t ef
, u_int64_t loadaddr
);
76 static int __elfN(lookup_symbol
)(struct preloaded_file
*mp
, elf_file_t ef
, const char* name
, Elf_Sym
* sym
);
77 static int __elfN(reloc_ptr
)(struct preloaded_file
*mp
, elf_file_t ef
,
78 Elf_Addr p
, void *val
, size_t len
);
79 static int __elfN(parse_modmetadata
)(struct preloaded_file
*mp
, elf_file_t ef
,
80 Elf_Addr p_start
, Elf_Addr p_end
);
81 static symaddr_fn
__elfN(symaddr
);
82 static char *fake_modname(const char *name
);
84 const char *__elfN(kerneltype
) = "elf kernel";
85 const char *__elfN(moduletype
) = "elf module";
87 u_int64_t
__elfN(relocation_offset
) = 0;
90 __elfN(load_elf_header
)(char *filename
, elf_file_t ef
)
97 * Open the image, read and validate the ELF header
99 if (filename
== NULL
) /* can't handle nameless */
101 if ((ef
->fd
= open(filename
, O_RDONLY
)) == -1)
103 ef
->firstpage
= malloc(PAGE_SIZE
);
104 if (ef
->firstpage
== NULL
) {
108 bytes_read
= read(ef
->fd
, ef
->firstpage
, PAGE_SIZE
);
109 ef
->firstlen
= (size_t)bytes_read
;
110 if (bytes_read
< 0 || ef
->firstlen
<= sizeof(Elf_Ehdr
)) {
111 err
= EFTYPE
; /* could be EIO, but may be small file */
114 ehdr
= ef
->ehdr
= (Elf_Ehdr
*)ef
->firstpage
;
117 if (!IS_ELF(*ehdr
)) {
121 if (ehdr
->e_ident
[EI_CLASS
] != ELF_TARG_CLASS
|| /* Layout ? */
122 ehdr
->e_ident
[EI_DATA
] != ELF_TARG_DATA
||
123 ehdr
->e_ident
[EI_VERSION
] != EV_CURRENT
|| /* Version ? */
124 ehdr
->e_version
!= EV_CURRENT
||
125 ehdr
->e_machine
!= ELF_TARG_MACH
) { /* Machine ? */
133 if (ef
->firstpage
!= NULL
) {
135 ef
->firstpage
= NULL
;
145 * Attempt to load the file (file) as an ELF module. It will be stored at
146 * (dest), and a pointer to a module structure describing the loaded object
147 * will be saved in (result).
150 __elfN(loadfile
)(char *filename
, u_int64_t dest
, struct preloaded_file
**result
)
152 return (__elfN(loadfile_raw
)(filename
, dest
, result
, 0));
156 __elfN(loadfile_raw
)(char *filename
, u_int64_t dest
,
157 struct preloaded_file
**result
, int multiboot
)
159 struct preloaded_file
*fp
, *kfp
;
165 bzero(&ef
, sizeof(struct elf_file
));
168 err
= __elfN(load_elf_header
)(filename
, &ef
);
175 * Check to see what sort of module we are.
177 kfp
= file_findfile(NULL
, __elfN(kerneltype
));
180 * Kernels can be ET_DYN, so just assume the first loaded object is the
181 * kernel. This assumption will be checked later.
186 if (ef
.kernel
|| ehdr
->e_type
== ET_EXEC
) {
187 /* Looks like a kernel */
189 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadfile: kernel already loaded\n");
194 * Calculate destination address based on kernel entrypoint.
196 * For ARM, the destination address is independent of any values in the
197 * elf header (an ARM kernel can be loaded at any 2MB boundary), so we
198 * leave dest set to the value calculated by archsw.arch_loadaddr() and
199 * passed in to this function.
202 if (ehdr
->e_type
== ET_EXEC
)
203 dest
= (ehdr
->e_entry
& ~PAGE_MASK
);
205 if ((ehdr
->e_entry
& ~PAGE_MASK
) == 0) {
206 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadfile: not a kernel (maybe static binary?)\n");
212 } else if (ehdr
->e_type
== ET_DYN
) {
213 /* Looks like a kld module */
214 if (multiboot
!= 0) {
215 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadfile: can't load module as multiboot\n");
220 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadfile: can't load module before kernel\n");
224 if (strcmp(__elfN(kerneltype
), kfp
->f_type
)) {
225 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadfile: can't load module with kernel type '%s'\n", kfp
->f_type
);
229 /* Looks OK, got ahead */
237 if (archsw
.arch_loadaddr
!= NULL
)
238 dest
= archsw
.arch_loadaddr(LOAD_ELF
, ehdr
, dest
);
240 dest
= roundup(dest
, PAGE_SIZE
);
243 * Ok, we think we should handle this.
247 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadfile: cannot allocate module info\n");
251 if (ef
.kernel
== 1 && multiboot
== 0)
252 setenv("kernelname", filename
, 1);
253 fp
->f_name
= strdup(filename
);
255 fp
->f_type
= strdup(ef
.kernel
?
256 __elfN(kerneltype
) : __elfN(moduletype
));
258 fp
->f_type
= strdup("elf multiboot kernel");
262 printf("%s entry at 0x%jx\n", filename
, (uintmax_t)ehdr
->e_entry
);
264 printf("%s ", filename
);
267 fp
->f_size
= __elfN(loadimage
)(fp
, &ef
, dest
);
268 if (fp
->f_size
== 0 || fp
->f_addr
== 0)
271 /* save exec header as metadata */
272 file_addmetadata(fp
, MODINFOMD_ELFHDR
, sizeof(*ehdr
), ehdr
);
274 /* Load OK, return module pointer */
275 *result
= (struct preloaded_file
*)fp
;
292 * With the file (fd) open on the image, and (ehdr) containing
293 * the Elf header, load the image at (off)
296 __elfN(loadimage
)(struct preloaded_file
*fp
, elf_file_t ef
, u_int64_t off
)
301 Elf_Phdr
*phdr
, *php
;
305 vm_offset_t firstaddr
;
306 vm_offset_t lastaddr
;
319 Elf_Addr p_start
, p_end
;
324 firstaddr
= lastaddr
= 0;
326 if (ehdr
->e_type
== ET_EXEC
) {
327 #if defined(__i386__) || defined(__amd64__)
328 #if __ELF_WORD_SIZE == 64
329 off
= - (off
& 0xffffffffff000000ull
);/* x86_64 relocates after locore */
331 off
= - (off
& 0xff000000u
); /* i386 relocates after locore */
333 #elif defined(__powerpc__)
335 * On the purely virtual memory machines like e500, the kernel is
336 * linked against its final VA range, which is most often not
337 * available at the loader stage, but only after kernel initializes
338 * and completes its VM settings. In such cases we cannot use p_vaddr
339 * field directly to load ELF segments, but put them at some
340 * 'load-time' locations.
342 if (off
& 0xf0000000u
) {
343 off
= -(off
& 0xf0000000u
);
345 * XXX the physical load address should not be hardcoded. Note
346 * that the Book-E kernel assumes that it's loaded at a 16MB
347 * boundary for now...
350 ehdr
->e_entry
+= off
;
352 printf("Converted entry 0x%08x\n", ehdr
->e_entry
);
356 #elif defined(__arm__) && !defined(EFI)
358 * The elf headers in arm kernels specify virtual addresses in all
359 * header fields, even the ones that should be physical addresses.
360 * We assume the entry point is in the first page, and masking the page
361 * offset will leave us with the virtual address the kernel was linked
362 * at. We subtract that from the load offset, making 'off' into the
363 * value which, when added to a virtual address in an elf header,
364 * translates it to a physical address. We do the va->pa conversion on
365 * the entry point address in the header now, so that later we can
366 * launch the kernel by just jumping to that address.
368 * When booting from UEFI the copyin and copyout functions handle
369 * adjusting the location relative to the first virtual address.
370 * Because of this there is no need to adjust the offset or entry
371 * point address as these will both be handled by the efi code.
373 off
-= ehdr
->e_entry
& ~PAGE_MASK
;
374 ehdr
->e_entry
+= off
;
376 printf("ehdr->e_entry 0x%08x, va<->pa off %llx\n", ehdr
->e_entry
, off
);
379 off
= 0; /* other archs use direct mapped kernels */
384 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
385 /* use entry address from header */
386 fp
->f_addr
= ehdr
->e_entry
;
390 __elfN(relocation_offset
) = off
;
392 if ((ehdr
->e_phoff
+ ehdr
->e_phnum
* sizeof(*phdr
)) > ef
->firstlen
) {
393 printf("elf" __XSTRING(__ELF_WORD_SIZE
) "_loadimage: program header not within first page\n");
396 phdr
= (Elf_Phdr
*)(ef
->firstpage
+ ehdr
->e_phoff
);
398 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
399 /* We want to load PT_LOAD segments only.. */
400 if (phdr
[i
].p_type
!= PT_LOAD
)
404 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
405 printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx",
406 (long)phdr
[i
].p_filesz
, (long)phdr
[i
].p_offset
,
407 (long)(phdr
[i
].p_paddr
+ off
),
408 (long)(phdr
[i
].p_paddr
+ off
+ phdr
[i
].p_memsz
- 1));
410 printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx",
411 (long)phdr
[i
].p_filesz
, (long)phdr
[i
].p_offset
,
412 (long)(phdr
[i
].p_vaddr
+ off
),
413 (long)(phdr
[i
].p_vaddr
+ off
+ phdr
[i
].p_memsz
- 1));
416 if ((phdr
[i
].p_flags
& PF_W
) == 0) {
417 printf("text=0x%lx ", (long)phdr
[i
].p_filesz
);
419 printf("data=0x%lx", (long)phdr
[i
].p_filesz
);
420 if (phdr
[i
].p_filesz
< phdr
[i
].p_memsz
)
421 printf("+0x%lx", (long)(phdr
[i
].p_memsz
-phdr
[i
].p_filesz
));
426 if (ef
->firstlen
> phdr
[i
].p_offset
) {
427 fpcopy
= ef
->firstlen
- phdr
[i
].p_offset
;
428 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
429 archsw
.arch_copyin(ef
->firstpage
+ phdr
[i
].p_offset
,
430 phdr
[i
].p_paddr
+ off
, fpcopy
);
432 archsw
.arch_copyin(ef
->firstpage
+ phdr
[i
].p_offset
,
433 phdr
[i
].p_vaddr
+ off
, fpcopy
);
436 if (phdr
[i
].p_filesz
> fpcopy
) {
437 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
438 if (kern_pread(ef
->fd
, phdr
[i
].p_paddr
+ off
+ fpcopy
,
439 phdr
[i
].p_filesz
- fpcopy
,
440 phdr
[i
].p_offset
+ fpcopy
) != 0) {
441 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
442 "_loadimage: read failed\n");
446 if (kern_pread(ef
->fd
, phdr
[i
].p_vaddr
+ off
+ fpcopy
,
447 phdr
[i
].p_filesz
- fpcopy
,
448 phdr
[i
].p_offset
+ fpcopy
) != 0) {
449 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
450 "_loadimage: read failed\n");
455 /* clear space from oversized segments; eg: bss */
456 if (phdr
[i
].p_filesz
< phdr
[i
].p_memsz
) {
458 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
459 printf(" (bss: 0x%lx-0x%lx)",
460 (long)(phdr
[i
].p_paddr
+ off
+ phdr
[i
].p_filesz
),
461 (long)(phdr
[i
].p_paddr
+ off
+ phdr
[i
].p_memsz
- 1));
463 printf(" (bss: 0x%lx-0x%lx)",
464 (long)(phdr
[i
].p_vaddr
+ off
+ phdr
[i
].p_filesz
),
465 (long)(phdr
[i
].p_vaddr
+ off
+ phdr
[i
].p_memsz
- 1));
469 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
470 kern_bzero(phdr
[i
].p_paddr
+ off
+ phdr
[i
].p_filesz
,
471 phdr
[i
].p_memsz
- phdr
[i
].p_filesz
);
473 kern_bzero(phdr
[i
].p_vaddr
+ off
+ phdr
[i
].p_filesz
,
474 phdr
[i
].p_memsz
- phdr
[i
].p_filesz
);
481 if (archsw
.arch_loadseg
!= NULL
)
482 archsw
.arch_loadseg(ehdr
, phdr
+ i
, off
);
484 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
) {
485 if (firstaddr
== 0 || firstaddr
> (phdr
[i
].p_paddr
+ off
))
486 firstaddr
= phdr
[i
].p_paddr
+ off
;
488 lastaddr
< (phdr
[i
].p_paddr
+ off
+ phdr
[i
].p_memsz
))
489 lastaddr
= phdr
[i
].p_paddr
+ off
+ phdr
[i
].p_memsz
;
491 if (firstaddr
== 0 || firstaddr
> (phdr
[i
].p_vaddr
+ off
))
492 firstaddr
= phdr
[i
].p_vaddr
+ off
;
494 lastaddr
< (phdr
[i
].p_vaddr
+ off
+ phdr
[i
].p_memsz
))
495 lastaddr
= phdr
[i
].p_vaddr
+ off
+ phdr
[i
].p_memsz
;
498 lastaddr
= roundup(lastaddr
, sizeof(long));
501 * Get the section headers. We need this for finding the .ctors
502 * section as well as for loading any symbols. Both may be hard
503 * to do if reading from a .gz file as it involves seeking. I
504 * think the rule is going to have to be that you must strip a
505 * file to remove symbols before gzipping it.
507 chunk
= ehdr
->e_shnum
* ehdr
->e_shentsize
;
508 if (chunk
== 0 || ehdr
->e_shoff
== 0)
510 shdr
= alloc_pread(ef
->fd
, ehdr
->e_shoff
, chunk
);
512 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
513 "_loadimage: failed to read section headers");
516 file_addmetadata(fp
, MODINFOMD_SHDR
, chunk
, shdr
);
519 * Read the section string table and look for the .ctors section.
520 * We need to tell the kernel where it is so that it can call the
523 chunk
= shdr
[ehdr
->e_shstrndx
].sh_size
;
525 shstr
= alloc_pread(ef
->fd
, shdr
[ehdr
->e_shstrndx
].sh_offset
, chunk
);
527 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
528 if (strcmp(shstr
+ shdr
[i
].sh_name
, ".ctors") != 0)
530 ctors
= shdr
[i
].sh_addr
;
531 file_addmetadata(fp
, MODINFOMD_CTORS_ADDR
, sizeof(ctors
),
533 size
= shdr
[i
].sh_size
;
534 file_addmetadata(fp
, MODINFOMD_CTORS_SIZE
, sizeof(size
),
543 * Now load any symbols.
547 for (i
= 0; i
< ehdr
->e_shnum
; i
++) {
548 if (shdr
[i
].sh_type
!= SHT_SYMTAB
)
550 for (j
= 0; j
< ehdr
->e_phnum
; j
++) {
551 if (phdr
[j
].p_type
!= PT_LOAD
)
553 if (shdr
[i
].sh_offset
>= phdr
[j
].p_offset
&&
554 (shdr
[i
].sh_offset
+ shdr
[i
].sh_size
<=
555 phdr
[j
].p_offset
+ phdr
[j
].p_filesz
)) {
556 shdr
[i
].sh_offset
= 0;
561 if (shdr
[i
].sh_offset
== 0 || shdr
[i
].sh_size
== 0)
562 continue; /* alread loaded in a PT_LOAD above */
563 /* Save it for loading below */
565 symstrindex
= shdr
[i
].sh_link
;
567 if (symtabindex
< 0 || symstrindex
< 0)
570 /* Ok, committed to a load. */
575 for (i
= symtabindex
; i
>= 0; i
= symstrindex
) {
579 switch(shdr
[i
].sh_type
) {
580 case SHT_SYMTAB
: /* Symbol table */
583 case SHT_STRTAB
: /* String table */
592 size
= shdr
[i
].sh_size
;
593 archsw
.arch_copyin(&size
, lastaddr
, sizeof(size
));
594 lastaddr
+= sizeof(size
);
597 printf("\n%s: 0x%jx@0x%jx -> 0x%jx-0x%jx", secname
,
598 (uintmax_t)shdr
[i
].sh_size
, (uintmax_t)shdr
[i
].sh_offset
,
599 (uintmax_t)lastaddr
, (uintmax_t)(lastaddr
+ shdr
[i
].sh_size
));
601 if (i
== symstrindex
)
603 printf("0x%lx+0x%lx", (long)sizeof(size
), (long)size
);
606 if (lseek(ef
->fd
, (off_t
)shdr
[i
].sh_offset
, SEEK_SET
) == -1) {
607 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
) "_loadimage: could not seek for symbols - skipped!");
612 result
= archsw
.arch_readin(ef
->fd
, lastaddr
, shdr
[i
].sh_size
);
613 if (result
< 0 || (size_t)result
!= shdr
[i
].sh_size
) {
614 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
) "_loadimage: could not read symbols - skipped! (%ju != %ju)", (uintmax_t)result
,
615 (uintmax_t)shdr
[i
].sh_size
);
620 /* Reset offsets relative to ssym */
621 lastaddr
+= shdr
[i
].sh_size
;
622 lastaddr
= roundup(lastaddr
, sizeof(size
));
623 if (i
== symtabindex
)
625 else if (i
== symstrindex
)
633 file_addmetadata(fp
, MODINFOMD_SSYM
, sizeof(ssym
), &ssym
);
634 file_addmetadata(fp
, MODINFOMD_ESYM
, sizeof(esym
), &esym
);
639 ret
= lastaddr
- firstaddr
;
640 if (ehdr
->e_ident
[EI_OSABI
] != ELFOSABI_SOLARIS
)
641 fp
->f_addr
= firstaddr
;
644 for (i
= 0; i
< ehdr
->e_phnum
; i
++) {
645 if (phdr
[i
].p_type
== PT_DYNAMIC
) {
648 file_addmetadata(fp
, MODINFOMD_DYNAMIC
, sizeof(adp
), &adp
);
653 if (php
== NULL
) /* this is bad, we cannot get to symbols or _DYNAMIC */
656 ndp
= php
->p_filesz
/ sizeof(Elf_Dyn
);
659 dp
= malloc(php
->p_filesz
);
662 if (ehdr
->e_ident
[EI_OSABI
] == ELFOSABI_SOLARIS
)
663 archsw
.arch_copyout(php
->p_paddr
+ off
, dp
, php
->p_filesz
);
665 archsw
.arch_copyout(php
->p_vaddr
+ off
, dp
, php
->p_filesz
);
668 for (i
= 0; i
< ndp
; i
++) {
669 if (dp
[i
].d_tag
== 0)
671 switch (dp
[i
].d_tag
) {
673 ef
->hashtab
= (Elf_Hashelt
*)(uintptr_t)(dp
[i
].d_un
.d_ptr
+ off
);
676 ef
->strtab
= (char *)(uintptr_t)(dp
[i
].d_un
.d_ptr
+ off
);
679 ef
->strsz
= dp
[i
].d_un
.d_val
;
682 ef
->symtab
= (Elf_Sym
*)(uintptr_t)(dp
[i
].d_un
.d_ptr
+ off
);
685 ef
->rel
= (Elf_Rel
*)(uintptr_t)(dp
[i
].d_un
.d_ptr
+ off
);
688 ef
->relsz
= dp
[i
].d_un
.d_val
;
691 ef
->rela
= (Elf_Rela
*)(uintptr_t)(dp
[i
].d_un
.d_ptr
+ off
);
694 ef
->relasz
= dp
[i
].d_un
.d_val
;
700 if (ef
->hashtab
== NULL
|| ef
->symtab
== NULL
||
701 ef
->strtab
== NULL
|| ef
->strsz
== 0)
703 COPYOUT(ef
->hashtab
, &ef
->nbuckets
, sizeof(ef
->nbuckets
));
704 COPYOUT(ef
->hashtab
+ 1, &ef
->nchains
, sizeof(ef
->nchains
));
705 ef
->buckets
= ef
->hashtab
+ 2;
706 ef
->chains
= ef
->buckets
+ ef
->nbuckets
;
708 if (__elfN(lookup_symbol
)(fp
, ef
, "__start_set_modmetadata_set", &sym
) != 0)
710 p_start
= sym
.st_value
+ ef
->off
;
711 if (__elfN(lookup_symbol
)(fp
, ef
, "__stop_set_modmetadata_set", &sym
) != 0)
713 p_end
= sym
.st_value
+ ef
->off
;
715 if (__elfN(parse_modmetadata
)(fp
, ef
, p_start
, p_end
) == 0)
718 if (ef
->kernel
) /* kernel must not depend on anything */
729 static char invalid_name
[] = "bad";
732 fake_modname(const char *name
)
738 sp
= strrchr(name
, '/');
743 ep
= strrchr(name
, '.');
747 ep
= invalid_name
+ sizeof(invalid_name
) - 1;
750 ep
= name
+ strlen(name
);
752 fp
= malloc(len
+ 1);
760 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
761 struct mod_metadata64
{
762 int md_version
; /* structure version MDTV_* */
763 int md_type
; /* type of entry MDT_* */
764 u_int64_t md_data
; /* specific data */
765 u_int64_t md_cval
; /* common string label */
768 #if defined(__amd64__) && __ELF_WORD_SIZE == 32
769 struct mod_metadata32
{
770 int md_version
; /* structure version MDTV_* */
771 int md_type
; /* type of entry MDT_* */
772 u_int32_t md_data
; /* specific data */
773 u_int32_t md_cval
; /* common string label */
778 __elfN(load_modmetadata
)(struct preloaded_file
*fp
, u_int64_t dest
)
782 Elf_Shdr
*sh_meta
, *shdr
= NULL
;
783 Elf_Shdr
*sh_data
[2];
784 char *shstrtab
= NULL
;
786 Elf_Addr p_start
, p_end
;
788 bzero(&ef
, sizeof(struct elf_file
));
791 err
= __elfN(load_elf_header
)(fp
->f_name
, &ef
);
795 if (ef
.kernel
== 1 || ef
.ehdr
->e_type
== ET_EXEC
) {
797 } else if (ef
.ehdr
->e_type
!= ET_DYN
) {
802 size
= ef
.ehdr
->e_shnum
* ef
.ehdr
->e_shentsize
;
803 shdr
= alloc_pread(ef
.fd
, ef
.ehdr
->e_shoff
, size
);
810 shstrtab
= alloc_pread(ef
.fd
, shdr
[ef
.ehdr
->e_shstrndx
].sh_offset
,
811 shdr
[ef
.ehdr
->e_shstrndx
].sh_size
);
812 if (shstrtab
== NULL
) {
813 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
814 "load_modmetadata: unable to load shstrtab\n");
819 /* Find set_modmetadata_set and data sections. */
820 sh_data
[0] = sh_data
[1] = sh_meta
= NULL
;
821 for (i
= 0, j
= 0; i
< ef
.ehdr
->e_shnum
; i
++) {
822 if (strcmp(&shstrtab
[shdr
[i
].sh_name
],
823 "set_modmetadata_set") == 0) {
826 if ((strcmp(&shstrtab
[shdr
[i
].sh_name
], ".data") == 0) ||
827 (strcmp(&shstrtab
[shdr
[i
].sh_name
], ".rodata") == 0)) {
828 sh_data
[j
++] = &shdr
[i
];
831 if (sh_meta
== NULL
|| sh_data
[0] == NULL
|| sh_data
[1] == NULL
) {
832 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
833 "load_modmetadata: unable to find set_modmetadata_set or data sections\n");
838 /* Load set_modmetadata_set into memory */
839 err
= kern_pread(ef
.fd
, dest
, sh_meta
->sh_size
, sh_meta
->sh_offset
);
841 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
842 "load_modmetadata: unable to load set_modmetadata_set: %d\n", err
);
846 p_end
= dest
+ sh_meta
->sh_size
;
847 dest
+= sh_meta
->sh_size
;
849 /* Load data sections into memory. */
850 err
= kern_pread(ef
.fd
, dest
, sh_data
[0]->sh_size
,
851 sh_data
[0]->sh_offset
);
853 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
854 "load_modmetadata: unable to load data: %d\n", err
);
859 * We have to increment the dest, so that the offset is the same into
860 * both the .rodata and .data sections.
862 ef
.off
= -(sh_data
[0]->sh_addr
- dest
);
863 dest
+= (sh_data
[1]->sh_addr
- sh_data
[0]->sh_addr
);
865 err
= kern_pread(ef
.fd
, dest
, sh_data
[1]->sh_size
,
866 sh_data
[1]->sh_offset
);
868 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
869 "load_modmetadata: unable to load data: %d\n", err
);
873 err
= __elfN(parse_modmetadata
)(fp
, &ef
, p_start
, p_end
);
875 printf("\nelf" __XSTRING(__ELF_WORD_SIZE
)
876 "load_modmetadata: unable to parse metadata: %d\n", err
);
881 if (shstrtab
!= NULL
)
885 if (ef
.firstpage
!= NULL
)
893 __elfN(parse_modmetadata
)(struct preloaded_file
*fp
, elf_file_t ef
,
894 Elf_Addr p_start
, Elf_Addr p_end
)
896 struct mod_metadata md
;
897 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
898 struct mod_metadata64 md64
;
899 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
900 struct mod_metadata32 md32
;
902 struct mod_depend
*mdepend
;
903 struct mod_version mver
;
905 int error
, modcnt
, minfolen
;
911 COPYOUT(p
, &v
, sizeof(v
));
912 error
= __elfN(reloc_ptr
)(fp
, ef
, p
, &v
, sizeof(v
));
913 if (error
== EOPNOTSUPP
)
917 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
918 COPYOUT(v
, &md64
, sizeof(md64
));
919 error
= __elfN(reloc_ptr
)(fp
, ef
, v
, &md64
, sizeof(md64
));
920 if (error
== EOPNOTSUPP
) {
921 md64
.md_cval
+= ef
->off
;
922 md64
.md_data
+= ef
->off
;
923 } else if (error
!= 0)
925 md
.md_version
= md64
.md_version
;
926 md
.md_type
= md64
.md_type
;
927 md
.md_cval
= (const char *)(uintptr_t)md64
.md_cval
;
928 md
.md_data
= (void *)(uintptr_t)md64
.md_data
;
929 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
930 COPYOUT(v
, &md32
, sizeof(md32
));
931 error
= __elfN(reloc_ptr
)(fp
, ef
, v
, &md32
, sizeof(md32
));
932 if (error
== EOPNOTSUPP
) {
933 md32
.md_cval
+= ef
->off
;
934 md32
.md_data
+= ef
->off
;
935 } else if (error
!= 0)
937 md
.md_version
= md32
.md_version
;
938 md
.md_type
= md32
.md_type
;
939 md
.md_cval
= (const char *)(uintptr_t)md32
.md_cval
;
940 md
.md_data
= (void *)(uintptr_t)md32
.md_data
;
942 COPYOUT(v
, &md
, sizeof(md
));
943 error
= __elfN(reloc_ptr
)(fp
, ef
, v
, &md
, sizeof(md
));
944 if (error
== EOPNOTSUPP
) {
945 md
.md_cval
+= ef
->off
;
946 md
.md_data
= (void *)((uintptr_t)md
.md_data
+ (uintptr_t)ef
->off
);
947 } else if (error
!= 0)
950 p
+= sizeof(Elf_Addr
);
953 if (ef
->kernel
) /* kernel must not depend on anything */
955 s
= strdupout((vm_offset_t
)md
.md_cval
);
956 minfolen
= sizeof(*mdepend
) + strlen(s
) + 1;
957 mdepend
= malloc(minfolen
);
960 COPYOUT((vm_offset_t
)md
.md_data
, mdepend
, sizeof(*mdepend
));
961 strcpy((char*)(mdepend
+ 1), s
);
963 file_addmetadata(fp
, MODINFOMD_DEPLIST
, minfolen
, mdepend
);
967 s
= strdupout((vm_offset_t
)md
.md_cval
);
968 COPYOUT((vm_offset_t
)md
.md_data
, &mver
, sizeof(mver
));
969 file_addmodule(fp
, s
, mver
.mv_version
, NULL
);
976 s
= fake_modname(fp
->f_name
);
977 file_addmodule(fp
, s
, 1, NULL
);
984 elf_hash(const char *name
)
986 const unsigned char *p
= (const unsigned char *) name
;
992 if ((g
= h
& 0xf0000000) != 0)
999 static const char __elfN(bad_symtable
)[] = "elf" __XSTRING(__ELF_WORD_SIZE
) "_lookup_symbol: corrupt symbol table\n";
1001 __elfN(lookup_symbol
)(struct preloaded_file
*fp __unused
, elf_file_t ef
,
1002 const char* name
, Elf_Sym
*symp
)
1009 hash
= elf_hash(name
);
1010 COPYOUT(&ef
->buckets
[hash
% ef
->nbuckets
], &symnum
, sizeof(symnum
));
1012 while (symnum
!= STN_UNDEF
) {
1013 if (symnum
>= ef
->nchains
) {
1014 printf(__elfN(bad_symtable
));
1018 COPYOUT(ef
->symtab
+ symnum
, &sym
, sizeof(sym
));
1019 if (sym
.st_name
== 0) {
1020 printf(__elfN(bad_symtable
));
1024 strp
= strdupout((vm_offset_t
)(ef
->strtab
+ sym
.st_name
));
1025 if (strcmp(name
, strp
) == 0) {
1027 if (sym
.st_shndx
!= SHN_UNDEF
||
1028 (sym
.st_value
!= 0 &&
1029 ELF_ST_TYPE(sym
.st_info
) == STT_FUNC
)) {
1036 COPYOUT(&ef
->chains
[symnum
], &symnum
, sizeof(symnum
));
1042 * Apply any intra-module relocations to the value. p is the load address
1043 * of the value and val/len is the value to be modified. This does NOT modify
1044 * the image in-place, because this is done by kern_linker later on.
1046 * Returns EOPNOTSUPP if no relocation method is supplied.
1049 __elfN(reloc_ptr
)(struct preloaded_file
*mp
, elf_file_t ef
,
1050 Elf_Addr p
, void *val
, size_t len
)
1059 * The kernel is already relocated, but we still want to apply
1060 * offset adjustments.
1063 return (EOPNOTSUPP
);
1065 for (n
= 0; n
< ef
->relsz
/ sizeof(r
); n
++) {
1066 COPYOUT(ef
->rel
+ n
, &r
, sizeof(r
));
1068 error
= __elfN(reloc
)(ef
, __elfN(symaddr
), &r
, ELF_RELOC_REL
,
1069 ef
->off
, p
, val
, len
);
1073 for (n
= 0; n
< ef
->relasz
/ sizeof(a
); n
++) {
1074 COPYOUT(ef
->rela
+ n
, &a
, sizeof(a
));
1076 error
= __elfN(reloc
)(ef
, __elfN(symaddr
), &a
, ELF_RELOC_RELA
,
1077 ef
->off
, p
, val
, len
);
1086 __elfN(symaddr
)(struct elf_file
*ef __unused
, Elf_Size symidx __unused
)
1088 /* Symbol lookup by index not required here. */