Bug 865244 - Test for AudioContext.destination.maxChannelCount. r=ehsan
[gecko.git] / mozglue / linker / CustomElf.cpp
blob1833d47b38bd12c512e243d128be6d16a139ef50
1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
3 * You can obtain one at http://mozilla.org/MPL/2.0/. */
5 #include <cstring>
6 #include <sys/mman.h>
7 #include <vector>
8 #include <dlfcn.h>
9 #include "CustomElf.h"
10 #include "Mappable.h"
11 #include "Logging.h"
13 using namespace Elf;
14 using namespace mozilla;
16 #ifndef PAGE_SIZE
17 #define PAGE_SIZE 4096
18 #endif
20 #ifndef PAGE_MASK
21 #define PAGE_MASK (~ (PAGE_SIZE - 1))
22 #endif
24 /* TODO: Fill ElfLoader::Singleton.lastError on errors. */
26 /* Function used to report library mappings from the custom linker to Gecko
27 * crash reporter */
28 #ifdef ANDROID
29 extern "C" {
30 void report_mapping(char *name, void *base, uint32_t len, uint32_t offset);
32 #else
33 #define report_mapping(...)
34 #endif
36 const Ehdr *Ehdr::validate(const void *buf)
38 if (!buf || buf == MAP_FAILED)
39 return NULL;
41 const Ehdr *ehdr = reinterpret_cast<const Ehdr *>(buf);
43 /* Only support ELF executables or libraries for the host system */
44 if (memcmp(ELFMAG, &ehdr->e_ident, SELFMAG) ||
45 ehdr->e_ident[EI_CLASS] != ELFCLASS ||
46 ehdr->e_ident[EI_DATA] != ELFDATA ||
47 ehdr->e_ident[EI_VERSION] != 1 ||
48 (ehdr->e_ident[EI_OSABI] != ELFOSABI && ehdr->e_ident[EI_OSABI] != ELFOSABI_NONE) ||
49 #ifdef EI_ABIVERSION
50 ehdr->e_ident[EI_ABIVERSION] != ELFABIVERSION ||
51 #endif
52 (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) ||
53 ehdr->e_machine != ELFMACHINE ||
54 ehdr->e_version != 1 ||
55 ehdr->e_phentsize != sizeof(Phdr))
56 return NULL;
58 return ehdr;
61 namespace {
63 void debug_phdr(const char *type, const Phdr *phdr)
65 debug("%s @0x%08" PRIxAddr " ("
66 "filesz: 0x%08" PRIxAddr ", "
67 "memsz: 0x%08" PRIxAddr ", "
68 "offset: 0x%08" PRIxAddr ", "
69 "flags: %c%c%c)",
70 type, phdr->p_vaddr, phdr->p_filesz, phdr->p_memsz,
71 phdr->p_offset, phdr->p_flags & PF_R ? 'r' : '-',
72 phdr->p_flags & PF_W ? 'w' : '-', phdr->p_flags & PF_X ? 'x' : '-');
75 } /* anonymous namespace */
77 /**
78 * RAII wrapper for a mapping of the first page off a Mappable object.
79 * This calls Mappable::munmap instead of system munmap.
81 class Mappable1stPagePtr: public GenericMappedPtr<Mappable1stPagePtr> {
82 public:
83 Mappable1stPagePtr(Mappable *mappable)
84 : GenericMappedPtr<Mappable1stPagePtr>(
85 mappable->mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, 0), PAGE_SIZE)
86 , mappable(mappable)
88 /* Ensure the content of this page */
89 mappable->ensure(*this);
92 private:
93 friend class GenericMappedPtr<Mappable1stPagePtr>;
94 void munmap(void *buf, size_t length) {
95 mappable->munmap(buf, length);
98 Mappable *mappable;
102 TemporaryRef<LibHandle>
103 CustomElf::Load(Mappable *mappable, const char *path, int flags)
105 debug("CustomElf::Load(\"%s\", 0x%x) = ...", path, flags);
106 if (!mappable)
107 return NULL;
108 /* Keeping a RefPtr of the CustomElf is going to free the appropriate
109 * resources when returning NULL */
110 RefPtr<CustomElf> elf = new CustomElf(mappable, path);
111 /* Map the first page of the Elf object to access Elf and program headers */
112 Mappable1stPagePtr ehdr_raw(mappable);
113 if (ehdr_raw == MAP_FAILED)
114 return NULL;
116 const Ehdr *ehdr = Ehdr::validate(ehdr_raw);
117 if (!ehdr)
118 return NULL;
120 /* Scan Elf Program Headers and gather some information about them */
121 std::vector<const Phdr *> pt_loads;
122 Addr min_vaddr = (Addr) -1; // We want to find the lowest and biggest
123 Addr max_vaddr = 0; // virtual address used by this Elf.
124 const Phdr *dyn = NULL;
126 const Phdr *first_phdr = reinterpret_cast<const Phdr *>(
127 reinterpret_cast<const char *>(ehdr) + ehdr->e_phoff);
128 const Phdr *end_phdr = &first_phdr[ehdr->e_phnum];
130 for (const Phdr *phdr = first_phdr; phdr < end_phdr; phdr++) {
131 switch (phdr->p_type) {
132 case PT_LOAD:
133 debug_phdr("PT_LOAD", phdr);
134 pt_loads.push_back(phdr);
135 if (phdr->p_vaddr < min_vaddr)
136 min_vaddr = phdr->p_vaddr;
137 if (max_vaddr < phdr->p_vaddr + phdr->p_memsz)
138 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
139 break;
140 case PT_DYNAMIC:
141 debug_phdr("PT_DYNAMIC", phdr);
142 if (!dyn) {
143 dyn = phdr;
144 } else {
145 log("%s: Multiple PT_DYNAMIC segments detected", elf->GetPath());
146 return NULL;
148 break;
149 case PT_TLS:
150 debug_phdr("PT_TLS", phdr);
151 if (phdr->p_memsz) {
152 log("%s: TLS is not supported", elf->GetPath());
153 return NULL;
155 break;
156 case PT_GNU_STACK:
157 debug_phdr("PT_GNU_STACK", phdr);
158 // Skip on Android until bug 706116 is fixed
159 #ifndef ANDROID
160 if (phdr->p_flags & PF_X) {
161 log("%s: Executable stack is not supported", elf->GetPath());
162 return NULL;
164 #endif
165 break;
166 default:
167 debug("%s: Warning: program header type #%d not handled",
168 elf->GetPath(), phdr->p_type);
172 if (min_vaddr != 0) {
173 log("%s: Unsupported minimal virtual address: 0x%08" PRIxAddr,
174 elf->GetPath(), min_vaddr);
175 return NULL;
177 if (!dyn) {
178 log("%s: No PT_DYNAMIC segment found", elf->GetPath());
179 return NULL;
182 /* Reserve enough memory to map the complete virtual address space for this
183 * library.
184 * As we are using the base address from here to mmap something else with
185 * MAP_FIXED | MAP_SHARED, we need to make sure these mmaps will work. For
186 * instance, on armv6, MAP_SHARED mappings require a 16k alignment, but mmap
187 * MAP_PRIVATE only returns a 4k aligned address. So we first get a base
188 * address with MAP_SHARED, which guarantees the kernel returns an address
189 * that we'll be able to use with MAP_FIXED, and then remap MAP_PRIVATE at
190 * the same address, because of some bad side effects of keeping it as
191 * MAP_SHARED. */
192 elf->base.Assign(mmap(NULL, max_vaddr, PROT_NONE, MAP_SHARED | MAP_ANONYMOUS,
193 -1, 0), max_vaddr);
194 if ((elf->base == MAP_FAILED) ||
195 (mmap(elf->base, max_vaddr, PROT_NONE,
196 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) != elf->base)) {
197 log("%s: Failed to mmap", elf->GetPath());
198 return NULL;
201 /* Load and initialize library */
202 for (std::vector<const Phdr *>::iterator it = pt_loads.begin();
203 it < pt_loads.end(); ++it)
204 if (!elf->LoadSegment(*it))
205 return NULL;
207 /* We're not going to mmap anymore */
208 mappable->finalize();
210 report_mapping(const_cast<char *>(elf->GetName()), elf->base,
211 (max_vaddr + PAGE_SIZE - 1) & PAGE_MASK, 0);
213 elf->l_addr = elf->base;
214 elf->l_name = elf->GetPath();
215 elf->l_ld = elf->GetPtr<Dyn>(dyn->p_vaddr);
216 ElfLoader::Singleton.Register(elf);
218 if (!elf->InitDyn(dyn))
219 return NULL;
221 elf->stats("oneLibLoaded");
222 debug("CustomElf::Load(\"%s\", 0x%x) = %p", path, flags,
223 static_cast<void *>(elf));
224 return elf;
227 CustomElf::~CustomElf()
229 debug("CustomElf::~CustomElf(%p [\"%s\"])",
230 reinterpret_cast<void *>(this), GetPath());
231 CallFini();
232 /* Normally, __cxa_finalize is called by the .fini function. However,
233 * Android NDK before r6b doesn't do that. Our wrapped cxa_finalize only
234 * calls destructors once, so call it in all cases. */
235 ElfLoader::__wrap_cxa_finalize(this);
236 delete mappable;
237 ElfLoader::Singleton.Forget(this);
240 namespace {
243 * Hash function for symbol lookup, as defined in ELF standard for System V
245 unsigned long
246 ElfHash(const char *symbol)
248 const unsigned char *sym = reinterpret_cast<const unsigned char *>(symbol);
249 unsigned long h = 0, g;
250 while (*sym) {
251 h = (h << 4) + *sym++;
252 if ((g = h & 0xf0000000))
253 h ^= g >> 24;
254 h &= ~g;
256 return h;
259 } /* anonymous namespace */
261 void *
262 CustomElf::GetSymbolPtr(const char *symbol) const
264 return GetSymbolPtr(symbol, ElfHash(symbol));
267 void *
268 CustomElf::GetSymbolPtr(const char *symbol, unsigned long hash) const
270 const Sym *sym = GetSymbol(symbol, hash);
271 void *ptr = NULL;
272 if (sym && sym->st_shndx != SHN_UNDEF)
273 ptr = GetPtr(sym->st_value);
274 debug("CustomElf::GetSymbolPtr(%p [\"%s\"], \"%s\") = %p",
275 reinterpret_cast<const void *>(this), GetPath(), symbol, ptr);
276 return ptr;
279 void *
280 CustomElf::GetSymbolPtrInDeps(const char *symbol) const
282 /* Resolve dlopen and related functions to point to ours */
283 if (symbol[0] == 'd' && symbol[1] == 'l') {
284 if (strcmp(symbol + 2, "open") == 0)
285 return FunctionPtr(__wrap_dlopen);
286 if (strcmp(symbol + 2, "error") == 0)
287 return FunctionPtr(__wrap_dlerror);
288 if (strcmp(symbol + 2, "close") == 0)
289 return FunctionPtr(__wrap_dlclose);
290 if (strcmp(symbol + 2, "sym") == 0)
291 return FunctionPtr(__wrap_dlsym);
292 if (strcmp(symbol + 2, "addr") == 0)
293 return FunctionPtr(__wrap_dladdr);
294 if (strcmp(symbol + 2, "_iterate_phdr") == 0)
295 return FunctionPtr(__wrap_dl_iterate_phdr);
296 } else if (symbol[0] == '_' && symbol[1] == '_') {
297 /* Resolve a few C++ ABI specific functions to point to ours */
298 #ifdef __ARM_EABI__
299 if (strcmp(symbol + 2, "aeabi_atexit") == 0)
300 return FunctionPtr(&ElfLoader::__wrap_aeabi_atexit);
301 #else
302 if (strcmp(symbol + 2, "cxa_atexit") == 0)
303 return FunctionPtr(&ElfLoader::__wrap_cxa_atexit);
304 #endif
305 if (strcmp(symbol + 2, "cxa_finalize") == 0)
306 return FunctionPtr(&ElfLoader::__wrap_cxa_finalize);
307 if (strcmp(symbol + 2, "dso_handle") == 0)
308 return const_cast<CustomElf *>(this);
309 if (strcmp(symbol + 2, "moz_linker_stats") == 0)
310 return FunctionPtr(&ElfLoader::stats);
313 void *sym;
314 /* Search the symbol in the main program. Note this also tries all libraries
315 * the system linker will have loaded RTLD_GLOBAL. Unfortunately, that doesn't
316 * work with bionic, but its linker doesn't normally search the main binary
317 * anyways. Moreover, on android, the main binary is dalvik. */
318 #ifdef __GLIBC__
319 sym = dlsym(RTLD_DEFAULT, symbol);
320 debug("dlsym(RTLD_DEFAULT, \"%s\") = %p", symbol, sym);
321 if (sym)
322 return sym;
323 #endif
325 /* Then search the symbol in our dependencies. Since we already searched in
326 * libraries the system linker loaded, skip those (on glibc systems). We
327 * also assume the symbol is to be found in one of the dependent libraries
328 * directly, not in their own dependent libraries. Building libraries with
329 * --no-allow-shlib-undefined ensures such indirect symbol dependency don't
330 * happen. */
331 unsigned long hash = ElfHash(symbol);
332 for (std::vector<RefPtr<LibHandle> >::const_iterator it = dependencies.begin();
333 it < dependencies.end(); ++it) {
334 if (!(*it)->IsSystemElf()) {
335 sym = reinterpret_cast<CustomElf *>((*it).get())->GetSymbolPtr(symbol, hash);
336 #ifndef __GLIBC__
337 } else {
338 sym = (*it)->GetSymbolPtr(symbol);
339 #endif
341 if (sym)
342 return sym;
344 return NULL;
347 const Sym *
348 CustomElf::GetSymbol(const char *symbol, unsigned long hash) const
350 /* Search symbol with the buckets and chains tables.
351 * The hash computed from the symbol name gives an index in the buckets
352 * table. The corresponding value in the bucket table is an index in the
353 * symbols table and in the chains table.
354 * If the corresponding symbol in the symbols table matches, we're done.
355 * Otherwise, the corresponding value in the chains table is a new index
356 * in both tables, which corresponding symbol is tested and so on and so
357 * forth */
358 size_t bucket = hash % buckets.numElements();
359 for (size_t y = buckets[bucket]; y != STN_UNDEF; y = chains[y]) {
360 if (strcmp(symbol, strtab.GetStringAt(symtab[y].st_name)))
361 continue;
362 return &symtab[y];
364 return NULL;
367 bool
368 CustomElf::Contains(void *addr) const
370 return base.Contains(addr);
373 void
374 CustomElf::stats(const char *when) const
376 mappable->stats(when, GetPath());
379 bool
380 CustomElf::LoadSegment(const Phdr *pt_load) const
382 if (pt_load->p_type != PT_LOAD) {
383 debug("%s: Elf::LoadSegment only takes PT_LOAD program headers", GetPath());
384 return false;;
387 int prot = ((pt_load->p_flags & PF_X) ? PROT_EXEC : 0) |
388 ((pt_load->p_flags & PF_W) ? PROT_WRITE : 0) |
389 ((pt_load->p_flags & PF_R) ? PROT_READ : 0);
391 /* Mmap at page boundary */
392 Addr align = PAGE_SIZE;
393 void *mapped, *where;
394 do {
395 Addr align_offset = pt_load->p_vaddr & (align - 1);
396 where = GetPtr(pt_load->p_vaddr - align_offset);
397 debug("%s: Loading segment @%p %c%c%c", GetPath(), where,
398 prot & PROT_READ ? 'r' : '-',
399 prot & PROT_WRITE ? 'w' : '-',
400 prot & PROT_EXEC ? 'x' : '-');
401 mapped = mappable->mmap(where, pt_load->p_filesz + align_offset,
402 prot, MAP_PRIVATE | MAP_FIXED,
403 pt_load->p_offset - align_offset);
404 if ((mapped != MAP_FAILED) || (pt_load->p_vaddr == 0) ||
405 (pt_load->p_align == align))
406 break;
407 /* The virtual address space for the library is properly aligned at
408 * 16k on ARMv6 (see CustomElf::Load), and so is the first segment
409 * (p_vaddr == 0). But subsequent segments may not be 16k aligned
410 * and fail to mmap. In such case, try to mmap again at the p_align
411 * boundary instead of page boundary. */
412 debug("%s: Failed to mmap, retrying", GetPath());
413 align = pt_load->p_align;
414 } while (1);
416 if (mapped != where) {
417 if (mapped == MAP_FAILED) {
418 log("%s: Failed to mmap", GetPath());
419 } else {
420 log("%s: Didn't map at the expected location (wanted: %p, got: %p)",
421 GetPath(), where, mapped);
423 return false;
426 /* Ensure the availability of all pages within the mapping if on-demand
427 * decompression is disabled (MOZ_LINKER_ONDEMAND=0 or signal handler not
428 * registered). */
429 const char *ondemand = getenv("MOZ_LINKER_ONDEMAND");
430 if (!ElfLoader::Singleton.hasRegisteredHandler() ||
431 (ondemand && !strncmp(ondemand, "0", 2 /* Including '\0' */))) {
432 for (Addr off = 0; off < pt_load->p_filesz; off += PAGE_SIZE) {
433 mappable->ensure(reinterpret_cast<char *>(mapped) + off);
436 /* When p_memsz is greater than p_filesz, we need to have nulled out memory
437 * after p_filesz and before p_memsz.
438 * Mappable::mmap already guarantees that after p_filesz and up to the end
439 * of the page p_filesz is in, memory is nulled out.
440 * Above the end of that page, and up to p_memsz, we already have nulled out
441 * memory because we mapped anonymous memory on the whole library virtual
442 * address space. We just need to adjust this anonymous memory protection
443 * flags. */
444 if (pt_load->p_memsz > pt_load->p_filesz) {
445 Addr file_end = pt_load->p_vaddr + pt_load->p_filesz;
446 Addr mem_end = pt_load->p_vaddr + pt_load->p_memsz;
447 Addr next_page = (file_end + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
448 if (mem_end > next_page) {
449 if (mprotect(GetPtr(next_page), mem_end - next_page, prot) < 0) {
450 log("%s: Failed to mprotect", GetPath());
451 return false;
455 return true;
458 namespace {
460 void debug_dyn(const char *type, const Dyn *dyn)
462 debug("%s 0x%08" PRIxAddr, type, dyn->d_un.d_val);
465 } /* anonymous namespace */
467 bool
468 CustomElf::InitDyn(const Phdr *pt_dyn)
470 /* Scan PT_DYNAMIC segment and gather some information */
471 const Dyn *first_dyn = GetPtr<Dyn>(pt_dyn->p_vaddr);
472 const Dyn *end_dyn = GetPtr<Dyn>(pt_dyn->p_vaddr + pt_dyn->p_filesz);
473 std::vector<Word> dt_needed;
474 size_t symnum = 0;
475 for (const Dyn *dyn = first_dyn; dyn < end_dyn && dyn->d_tag; dyn++) {
476 switch (dyn->d_tag) {
477 case DT_NEEDED:
478 debug_dyn("DT_NEEDED", dyn);
479 dt_needed.push_back(dyn->d_un.d_val);
480 break;
481 case DT_HASH:
483 debug_dyn("DT_HASH", dyn);
484 const Word *hash_table_header = GetPtr<Word>(dyn->d_un.d_ptr);
485 symnum = hash_table_header[1];
486 buckets.Init(&hash_table_header[2], hash_table_header[0]);
487 chains.Init(&*buckets.end());
489 break;
490 case DT_STRTAB:
491 debug_dyn("DT_STRTAB", dyn);
492 strtab.Init(GetPtr(dyn->d_un.d_ptr));
493 break;
494 case DT_SYMTAB:
495 debug_dyn("DT_SYMTAB", dyn);
496 symtab.Init(GetPtr(dyn->d_un.d_ptr));
497 break;
498 case DT_SYMENT:
499 debug_dyn("DT_SYMENT", dyn);
500 if (dyn->d_un.d_val != sizeof(Sym)) {
501 log("%s: Unsupported DT_SYMENT", GetPath());
502 return false;
504 break;
505 case DT_TEXTREL:
506 log("%s: Text relocations are not supported", GetPath());
507 return false;
508 case DT_STRSZ: /* Ignored */
509 debug_dyn("DT_STRSZ", dyn);
510 break;
511 case UNSUPPORTED_RELOC():
512 case UNSUPPORTED_RELOC(SZ):
513 case UNSUPPORTED_RELOC(ENT):
514 log("%s: Unsupported relocations", GetPath());
515 return false;
516 case RELOC():
517 debug_dyn(STR_RELOC(), dyn);
518 relocations.Init(GetPtr(dyn->d_un.d_ptr));
519 break;
520 case RELOC(SZ):
521 debug_dyn(STR_RELOC(SZ), dyn);
522 relocations.InitSize(dyn->d_un.d_val);
523 break;
524 case RELOC(ENT):
525 debug_dyn(STR_RELOC(ENT), dyn);
526 if (dyn->d_un.d_val != sizeof(Reloc)) {
527 log("%s: Unsupported DT_RELENT", GetPath());
528 return false;
530 break;
531 case DT_JMPREL:
532 debug_dyn("DT_JMPREL", dyn);
533 jumprels.Init(GetPtr(dyn->d_un.d_ptr));
534 break;
535 case DT_PLTRELSZ:
536 debug_dyn("DT_PLTRELSZ", dyn);
537 jumprels.InitSize(dyn->d_un.d_val);
538 break;
539 case DT_PLTGOT:
540 debug_dyn("DT_PLTGOT", dyn);
541 break;
542 case DT_INIT:
543 debug_dyn("DT_INIT", dyn);
544 init = dyn->d_un.d_ptr;
545 break;
546 case DT_INIT_ARRAY:
547 debug_dyn("DT_INIT_ARRAY", dyn);
548 init_array.Init(GetPtr(dyn->d_un.d_ptr));
549 break;
550 case DT_INIT_ARRAYSZ:
551 debug_dyn("DT_INIT_ARRAYSZ", dyn);
552 init_array.InitSize(dyn->d_un.d_val);
553 break;
554 case DT_FINI:
555 debug_dyn("DT_FINI", dyn);
556 fini = dyn->d_un.d_ptr;
557 break;
558 case DT_FINI_ARRAY:
559 debug_dyn("DT_FINI_ARRAY", dyn);
560 fini_array.Init(GetPtr(dyn->d_un.d_ptr));
561 break;
562 case DT_FINI_ARRAYSZ:
563 debug_dyn("DT_FINI_ARRAYSZ", dyn);
564 fini_array.InitSize(dyn->d_un.d_val);
565 break;
566 case DT_PLTREL:
567 if (dyn->d_un.d_val != RELOC()) {
568 log("%s: Error: DT_PLTREL is not " STR_RELOC(), GetPath());
569 return false;
571 break;
572 case DT_FLAGS:
574 Addr flags = dyn->d_un.d_val;
575 /* Treat as a DT_TEXTREL tag */
576 if (flags & DF_TEXTREL) {
577 log("%s: Text relocations are not supported", GetPath());
578 return false;
580 /* we can treat this like having a DT_SYMBOLIC tag */
581 flags &= ~DF_SYMBOLIC;
582 if (flags)
583 log("%s: Warning: unhandled flags #%" PRIxAddr" not handled",
584 GetPath(), flags);
586 break;
587 case DT_SONAME: /* Should match GetName(), but doesn't matter */
588 case DT_SYMBOLIC: /* Indicates internal symbols should be looked up in
589 * the library itself first instead of the executable,
590 * which is actually what this linker does by default */
591 case RELOC(COUNT): /* Indicates how many relocations are relative, which
592 * is usually used to skip relocations on prelinked
593 * libraries. They are not supported anyways. */
594 case UNSUPPORTED_RELOC(COUNT): /* This should error out, but it doesn't
595 * really matter. */
596 case DT_FLAGS_1: /* Additional linker-internal flags that we don't care about. See
597 * DF_1_* values in src/include/elf/common.h in binutils. */
598 case DT_VERSYM: /* DT_VER* entries are used for symbol versioning, which */
599 case DT_VERDEF: /* this linker doesn't support yet. */
600 case DT_VERDEFNUM:
601 case DT_VERNEED:
602 case DT_VERNEEDNUM:
603 /* Ignored */
604 break;
605 default:
606 log("%s: Warning: dynamic header type #%" PRIxAddr" not handled",
607 GetPath(), dyn->d_tag);
611 if (!buckets || !symnum) {
612 log("%s: Missing or broken DT_HASH", GetPath());
613 return false;
615 if (!strtab) {
616 log("%s: Missing DT_STRTAB", GetPath());
617 return false;
619 if (!symtab) {
620 log("%s: Missing DT_SYMTAB", GetPath());
621 return false;
624 /* Load dependent libraries */
625 for (size_t i = 0; i < dt_needed.size(); i++) {
626 const char *name = strtab.GetStringAt(dt_needed[i]);
627 RefPtr<LibHandle> handle =
628 ElfLoader::Singleton.Load(name, RTLD_GLOBAL | RTLD_LAZY, this);
629 if (!handle)
630 return false;
631 dependencies.push_back(handle);
634 /* Finish initialization */
635 return Relocate() && RelocateJumps() && CallInit();
638 bool
639 CustomElf::Relocate()
641 debug("Relocate %s @%p", GetPath(), static_cast<void *>(base));
642 for (Array<Reloc>::iterator rel = relocations.begin();
643 rel < relocations.end(); ++rel) {
644 /* Location of the relocation */
645 void *ptr = GetPtr(rel->r_offset);
647 /* R_*_RELATIVE relocations apply directly at the given location */
648 if (ELF_R_TYPE(rel->r_info) == R_RELATIVE) {
649 *(void **) ptr = GetPtr(rel->GetAddend(base));
650 continue;
652 /* Other relocation types need a symbol resolution */
653 const Sym sym = symtab[ELF_R_SYM(rel->r_info)];
654 void *symptr;
655 if (sym.st_shndx != SHN_UNDEF) {
656 symptr = GetPtr(sym.st_value);
657 } else {
658 /* TODO: avoid symbol resolution when it's the same symbol as last
659 * iteration */
660 /* TODO: handle symbol resolving to NULL vs. being undefined. */
661 symptr = GetSymbolPtrInDeps(strtab.GetStringAt(sym.st_name));
664 if (symptr == NULL)
665 log("%s: Warning: relocation to NULL @0x%08" PRIxAddr,
666 GetPath(), rel->r_offset);
668 /* Apply relocation */
669 switch (ELF_R_TYPE(rel->r_info)) {
670 case R_GLOB_DAT:
671 /* R_*_GLOB_DAT relocations simply use the symbol value */
672 *(void **) ptr = symptr;
673 break;
674 case R_ABS:
675 /* R_*_ABS* relocations add the relocation added to the symbol value */
676 *(const char **) ptr = (const char *)symptr + rel->GetAddend(base);
677 break;
678 default:
679 log("%s: Unsupported relocation type: 0x%" PRIxAddr,
680 GetPath(), ELF_R_TYPE(rel->r_info));
681 return false;
684 return true;
687 bool
688 CustomElf::RelocateJumps()
690 /* TODO: Dynamic symbol resolution */
691 for (Array<Reloc>::iterator rel = jumprels.begin();
692 rel < jumprels.end(); ++rel) {
693 /* Location of the relocation */
694 void *ptr = GetPtr(rel->r_offset);
696 /* Only R_*_JMP_SLOT relocations are expected */
697 if (ELF_R_TYPE(rel->r_info) != R_JMP_SLOT) {
698 log("%s: Jump relocation type mismatch", GetPath());
699 return false;
702 /* TODO: Avoid code duplication with the relocations above */
703 const Sym sym = symtab[ELF_R_SYM(rel->r_info)];
704 void *symptr;
705 if (sym.st_shndx != SHN_UNDEF)
706 symptr = GetPtr(sym.st_value);
707 else
708 symptr = GetSymbolPtrInDeps(strtab.GetStringAt(sym.st_name));
710 if (symptr == NULL) {
711 log("%s: %s: relocation to NULL @0x%08" PRIxAddr " for symbol \"%s\"",
712 GetPath(),
713 (ELF_ST_BIND(sym.st_info) == STB_WEAK) ? "Warning" : "Error",
714 rel->r_offset, strtab.GetStringAt(sym.st_name));
715 if (ELF_ST_BIND(sym.st_info) != STB_WEAK)
716 return false;
718 /* Apply relocation */
719 *(void **) ptr = symptr;
721 return true;
724 bool
725 CustomElf::CallInit()
727 if (init)
728 CallFunction(init);
730 for (Array<void *>::iterator it = init_array.begin();
731 it < init_array.end(); ++it) {
732 /* Android x86 NDK wrongly puts 0xffffffff in INIT_ARRAY */
733 if (*it && *it != reinterpret_cast<void *>(-1))
734 CallFunction(*it);
736 initialized = true;
737 return true;
740 void
741 CustomElf::CallFini()
743 if (!initialized)
744 return;
745 for (Array<void *>::reverse_iterator it = fini_array.rbegin();
746 it < fini_array.rend(); ++it) {
747 /* Android x86 NDK wrongly puts 0xffffffff in FINI_ARRAY */
748 if (*it && *it != reinterpret_cast<void *>(-1))
749 CallFunction(*it);
751 if (fini)
752 CallFunction(fini);
755 Mappable *
756 CustomElf::GetMappable() const
758 if (!mappable)
759 return NULL;
760 if (mappable->GetKind() == Mappable::MAPPABLE_EXTRACT_FILE)
761 return mappable;
762 return ElfLoader::GetMappableFromPath(GetPath());