264.3.102
[darwin-xtools.git] / ld64 / src / ld / OutputFile.cpp
blob1d708da079aea22a21a90a6e143887a366ec771d
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-*
3 * Copyright (c) 2009-2011 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
26 #include <stdlib.h>
27 #include <sys/types.h>
28 #include <sys/stat.h>
29 #include <sys/mman.h>
30 #include <sys/sysctl.h>
31 #include <sys/param.h>
32 #include <sys/mount.h>
33 #include <fcntl.h>
34 #include <errno.h>
35 #include <limits.h>
36 #include <unistd.h>
37 #include <mach/mach_time.h>
38 #include <mach/vm_statistics.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_host.h>
41 #include <uuid/uuid.h>
42 #include <dlfcn.h>
43 #include <mach-o/dyld.h>
44 #include <mach-o/fat.h>
46 #include <string>
47 #include <map>
48 #include <set>
49 #include <string>
50 #include <vector>
51 #include <list>
52 #include <algorithm>
53 #include <unordered_set>
54 #include <utility>
56 #include <CommonCrypto/CommonDigest.h>
57 #include <AvailabilityMacros.h>
59 #include "MachOTrie.hpp"
61 #include "Options.h"
63 #include "OutputFile.h"
64 #include "Architectures.hpp"
65 #include "HeaderAndLoadCommands.hpp"
66 #include "LinkEdit.hpp"
67 #include "LinkEditClassic.hpp"
69 namespace ld {
70 namespace tool {
72 uint32_t sAdrpNA = 0;
73 uint32_t sAdrpNoped = 0;
74 uint32_t sAdrpNotNoped = 0;
77 OutputFile::OutputFile(const Options& opts)
79 usesWeakExternalSymbols(false), overridesWeakExternalSymbols(false),
80 _noReExportedDylibs(false), pieDisabled(false), hasDataInCode(false),
81 headerAndLoadCommandsSection(NULL),
82 rebaseSection(NULL), bindingSection(NULL), weakBindingSection(NULL),
83 lazyBindingSection(NULL), exportSection(NULL),
84 splitSegInfoSection(NULL), functionStartsSection(NULL),
85 dataInCodeSection(NULL), optimizationHintsSection(NULL),
86 symbolTableSection(NULL), stringPoolSection(NULL),
87 localRelocationsSection(NULL), externalRelocationsSection(NULL),
88 sectionRelocationsSection(NULL),
89 indirectSymbolTableSection(NULL),
90 _options(opts),
91 _hasDyldInfo(opts.makeCompressedDyldInfo()),
92 _hasSymbolTable(true),
93 _hasSectionRelocations(opts.outputKind() == Options::kObjectFile),
94 _hasSplitSegInfo(opts.sharedRegionEligible()),
95 _hasFunctionStartsInfo(opts.addFunctionStarts()),
96 _hasDataInCodeInfo(opts.addDataInCodeInfo()),
97 _hasDynamicSymbolTable(true),
98 _hasLocalRelocations(!opts.makeCompressedDyldInfo()),
99 _hasExternalRelocations(!opts.makeCompressedDyldInfo()),
100 _hasOptimizationHints(opts.outputKind() == Options::kObjectFile),
101 _encryptedTEXTstartOffset(0),
102 _encryptedTEXTendOffset(0),
103 _localSymbolsStartIndex(0),
104 _localSymbolsCount(0),
105 _globalSymbolsStartIndex(0),
106 _globalSymbolsCount(0),
107 _importSymbolsStartIndex(0),
108 _importSymbolsCount(0),
109 _sectionsRelocationsAtom(NULL),
110 _localRelocsAtom(NULL),
111 _externalRelocsAtom(NULL),
112 _symbolTableAtom(NULL),
113 _indirectSymbolTableAtom(NULL),
114 _rebasingInfoAtom(NULL),
115 _bindingInfoAtom(NULL),
116 _lazyBindingInfoAtom(NULL),
117 _weakBindingInfoAtom(NULL),
118 _exportInfoAtom(NULL),
119 _splitSegInfoAtom(NULL),
120 _functionStartsAtom(NULL),
121 _dataInCodeAtom(NULL),
122 _optimizationHintsAtom(NULL)
126 void OutputFile::dumpAtomsBySection(ld::Internal& state, bool printAtoms)
128 fprintf(stderr, "SORTED:\n");
129 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
130 fprintf(stderr, "final section %p %s/%s %s start addr=0x%08llX, size=0x%08llX, alignment=%02d, fileOffset=0x%08llX\n",
131 (*it), (*it)->segmentName(), (*it)->sectionName(), (*it)->isSectionHidden() ? "(hidden)" : "",
132 (*it)->address, (*it)->size, (*it)->alignment, (*it)->fileOffset);
133 if ( printAtoms ) {
134 std::vector<const ld::Atom*>& atoms = (*it)->atoms;
135 for (std::vector<const ld::Atom*>::iterator ait = atoms.begin(); ait != atoms.end(); ++ait) {
136 fprintf(stderr, " %p (0x%04llX) %s\n", *ait, (*ait)->size(), (*ait)->name());
140 fprintf(stderr, "DYLIBS:\n");
141 for (std::vector<ld::dylib::File*>::iterator it=state.dylibs.begin(); it != state.dylibs.end(); ++it )
142 fprintf(stderr, " %s\n", (*it)->installPath());
145 void OutputFile::write(ld::Internal& state)
147 this->buildDylibOrdinalMapping(state);
148 this->addLoadCommands(state);
149 this->addLinkEdit(state);
150 state.setSectionSizesAndAlignments();
151 this->setLoadCommandsPadding(state);
152 _fileSize = state.assignFileOffsets();
153 this->assignAtomAddresses(state);
154 this->synthesizeDebugNotes(state);
155 this->buildSymbolTable(state);
156 this->generateLinkEditInfo(state);
157 if ( _options.sharedRegionEncodingV2() )
158 this->makeSplitSegInfoV2(state);
159 else
160 this->makeSplitSegInfo(state);
161 this->updateLINKEDITAddresses(state);
162 //this->dumpAtomsBySection(state, false);
163 this->writeOutputFile(state);
164 this->writeMapFile(state);
167 bool OutputFile::findSegment(ld::Internal& state, uint64_t addr, uint64_t* start, uint64_t* end, uint32_t* index)
169 uint32_t segIndex = 0;
170 ld::Internal::FinalSection* segFirstSection = NULL;
171 ld::Internal::FinalSection* lastSection = NULL;
172 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
173 ld::Internal::FinalSection* sect = *it;
174 if ( (segFirstSection == NULL ) || strcmp(segFirstSection->segmentName(), sect->segmentName()) != 0 ) {
175 if ( segFirstSection != NULL ) {
176 //fprintf(stderr, "findSegment(0x%llX) seg changed to %s\n", addr, sect->segmentName());
177 if ( (addr >= segFirstSection->address) && (addr < lastSection->address+lastSection->size) ) {
178 *start = segFirstSection->address;
179 *end = lastSection->address+lastSection->size;
180 *index = segIndex;
181 return true;
183 ++segIndex;
185 segFirstSection = sect;
187 lastSection = sect;
189 return false;
193 void OutputFile::assignAtomAddresses(ld::Internal& state)
195 const bool log = false;
196 if ( log ) fprintf(stderr, "assignAtomAddresses()\n");
197 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
198 ld::Internal::FinalSection* sect = *sit;
199 if ( log ) fprintf(stderr, " section=%s/%s\n", sect->segmentName(), sect->sectionName());
200 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
201 const ld::Atom* atom = *ait;
202 switch ( sect-> type() ) {
203 case ld::Section::typeImportProxies:
204 // want finalAddress() of all proxy atoms to be zero
205 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(0);
206 break;
207 case ld::Section::typeAbsoluteSymbols:
208 // want finalAddress() of all absolute atoms to be value of abs symbol
209 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(0);
210 break;
211 case ld::Section::typeLinkEdit:
212 // linkedit layout is assigned later
213 break;
214 default:
215 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(sect->address);
216 if ( log ) fprintf(stderr, " atom=%p, addr=0x%08llX, name=%s\n", atom, atom->finalAddress(), atom->name());
217 break;
223 void OutputFile::updateLINKEDITAddresses(ld::Internal& state)
225 if ( _options.makeCompressedDyldInfo() ) {
226 // build dylb rebasing info
227 assert(_rebasingInfoAtom != NULL);
228 _rebasingInfoAtom->encode();
230 // build dyld binding info
231 assert(_bindingInfoAtom != NULL);
232 _bindingInfoAtom->encode();
234 // build dyld lazy binding info
235 assert(_lazyBindingInfoAtom != NULL);
236 _lazyBindingInfoAtom->encode();
238 // build dyld weak binding info
239 assert(_weakBindingInfoAtom != NULL);
240 _weakBindingInfoAtom->encode();
242 // build dyld export info
243 assert(_exportInfoAtom != NULL);
244 _exportInfoAtom->encode();
247 if ( _options.sharedRegionEligible() ) {
248 // build split seg info
249 assert(_splitSegInfoAtom != NULL);
250 _splitSegInfoAtom->encode();
253 if ( _options.addFunctionStarts() ) {
254 // build function starts info
255 assert(_functionStartsAtom != NULL);
256 _functionStartsAtom->encode();
259 if ( _options.addDataInCodeInfo() ) {
260 // build data-in-code info
261 assert(_dataInCodeAtom != NULL);
262 _dataInCodeAtom->encode();
265 if ( _hasOptimizationHints ) {
266 // build linker-optimization-hint info
267 assert(_optimizationHintsAtom != NULL);
268 _optimizationHintsAtom->encode();
271 // build classic symbol table
272 assert(_symbolTableAtom != NULL);
273 _symbolTableAtom->encode();
274 assert(_indirectSymbolTableAtom != NULL);
275 _indirectSymbolTableAtom->encode();
277 // add relocations to .o files
278 if ( _options.outputKind() == Options::kObjectFile ) {
279 assert(_sectionsRelocationsAtom != NULL);
280 _sectionsRelocationsAtom->encode();
283 if ( ! _options.makeCompressedDyldInfo() ) {
284 // build external relocations
285 assert(_externalRelocsAtom != NULL);
286 _externalRelocsAtom->encode();
287 // build local relocations
288 assert(_localRelocsAtom != NULL);
289 _localRelocsAtom->encode();
292 // update address and file offsets now that linkedit content has been generated
293 uint64_t curLinkEditAddress = 0;
294 uint64_t curLinkEditfileOffset = 0;
295 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
296 ld::Internal::FinalSection* sect = *sit;
297 if ( sect->type() != ld::Section::typeLinkEdit )
298 continue;
299 if ( curLinkEditAddress == 0 ) {
300 curLinkEditAddress = sect->address;
301 curLinkEditfileOffset = sect->fileOffset;
303 uint16_t maxAlignment = 0;
304 uint64_t offset = 0;
305 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
306 const ld::Atom* atom = *ait;
307 //fprintf(stderr, "setting linkedit atom offset for %s\n", atom->name());
308 if ( atom->alignment().powerOf2 > maxAlignment )
309 maxAlignment = atom->alignment().powerOf2;
310 // calculate section offset for this atom
311 uint64_t alignment = 1 << atom->alignment().powerOf2;
312 uint64_t currentModulus = (offset % alignment);
313 uint64_t requiredModulus = atom->alignment().modulus;
314 if ( currentModulus != requiredModulus ) {
315 if ( requiredModulus > currentModulus )
316 offset += requiredModulus-currentModulus;
317 else
318 offset += requiredModulus+alignment-currentModulus;
320 (const_cast<ld::Atom*>(atom))->setSectionOffset(offset);
321 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(curLinkEditAddress);
322 offset += atom->size();
324 sect->size = offset;
325 // section alignment is that of a contained atom with the greatest alignment
326 sect->alignment = maxAlignment;
327 sect->address = curLinkEditAddress;
328 sect->fileOffset = curLinkEditfileOffset;
329 curLinkEditAddress += sect->size;
330 curLinkEditfileOffset += sect->size;
333 _fileSize = state.sections.back()->fileOffset + state.sections.back()->size;
337 void OutputFile::setLoadCommandsPadding(ld::Internal& state)
339 // In other sections, any extra space is put and end of segment.
340 // In __TEXT segment, any extra space is put after load commands to allow post-processing of load commands
341 // Do a reverse layout of __TEXT segment to determine padding size and adjust section size
342 uint64_t paddingSize = 0;
343 switch ( _options.outputKind() ) {
344 case Options::kDyld:
345 // dyld itself has special padding requirements. We want the beginning __text section to start at a stable address
346 assert(strcmp(state.sections[1]->sectionName(),"__text") == 0);
347 state.sections[1]->alignment = 12; // page align __text
348 break;
349 case Options::kObjectFile:
350 // mach-o .o files need no padding between load commands and first section
351 // but leave enough room that the object file could be signed
352 paddingSize = 32;
353 break;
354 case Options::kPreload:
355 // mach-o MH_PRELOAD files need no padding between load commands and first section
356 paddingSize = 0;
357 default:
358 // work backwards from end of segment and lay out sections so that extra room goes to padding atom
359 uint64_t addr = 0;
360 uint64_t textSegPageSize = _options.segPageSize("__TEXT");
361 if ( _options.sharedRegionEligible() && (_options.iOSVersionMin() >= ld::iOS_8_0) && (textSegPageSize == 0x4000) )
362 textSegPageSize = 0x1000;
363 for (std::vector<ld::Internal::FinalSection*>::reverse_iterator it = state.sections.rbegin(); it != state.sections.rend(); ++it) {
364 ld::Internal::FinalSection* sect = *it;
365 if ( strcmp(sect->segmentName(), "__TEXT") != 0 )
366 continue;
367 if ( sect == headerAndLoadCommandsSection ) {
368 addr -= headerAndLoadCommandsSection->size;
369 paddingSize = addr % textSegPageSize;
370 break;
372 addr -= sect->size;
373 addr = addr & (0 - (1 << sect->alignment));
376 // if command line requires more padding than this
377 uint32_t minPad = _options.minimumHeaderPad();
378 if ( _options.maxMminimumHeaderPad() ) {
379 // -headerpad_max_install_names means there should be room for every path load command to grow to 1204 bytes
380 uint32_t altMin = _dylibsToLoad.size() * MAXPATHLEN;
381 if ( _options.outputKind() == Options::kDynamicLibrary )
382 altMin += MAXPATHLEN;
383 if ( altMin > minPad )
384 minPad = altMin;
386 if ( paddingSize < minPad ) {
387 int extraPages = (minPad - paddingSize + _options.segmentAlignment() - 1)/_options.segmentAlignment();
388 paddingSize += extraPages * _options.segmentAlignment();
391 if ( _options.makeEncryptable() ) {
392 // load commands must be on a separate non-encrypted page
393 int loadCommandsPage = (headerAndLoadCommandsSection->size + minPad)/_options.segmentAlignment();
394 int textPage = (headerAndLoadCommandsSection->size + paddingSize)/_options.segmentAlignment();
395 if ( loadCommandsPage == textPage ) {
396 paddingSize += _options.segmentAlignment();
397 textPage += 1;
399 // remember start for later use by load command
400 _encryptedTEXTstartOffset = textPage*_options.segmentAlignment();
402 break;
404 // add padding to size of section
405 headerAndLoadCommandsSection->size += paddingSize;
409 uint64_t OutputFile::pageAlign(uint64_t addr)
411 const uint64_t alignment = _options.segmentAlignment();
412 return ((addr+alignment-1) & (-alignment));
415 uint64_t OutputFile::pageAlign(uint64_t addr, uint64_t pageSize)
417 return ((addr+pageSize-1) & (-pageSize));
420 static const char* makeName(const ld::Atom& atom)
422 static char buffer[4096];
423 switch ( atom.symbolTableInclusion() ) {
424 case ld::Atom::symbolTableNotIn:
425 case ld::Atom::symbolTableNotInFinalLinkedImages:
426 sprintf(buffer, "%s@0x%08llX", atom.name(), atom.objectAddress());
427 break;
428 case ld::Atom::symbolTableIn:
429 case ld::Atom::symbolTableInAndNeverStrip:
430 case ld::Atom::symbolTableInAsAbsolute:
431 case ld::Atom::symbolTableInWithRandomAutoStripLabel:
432 strlcpy(buffer, atom.name(), 4096);
433 break;
435 return buffer;
438 static const char* referenceTargetAtomName(ld::Internal& state, const ld::Fixup* ref)
440 switch ( ref->binding ) {
441 case ld::Fixup::bindingNone:
442 return "NO BINDING";
443 case ld::Fixup::bindingByNameUnbound:
444 return (char*)(ref->u.target);
445 case ld::Fixup::bindingByContentBound:
446 case ld::Fixup::bindingDirectlyBound:
447 return makeName(*((ld::Atom*)(ref->u.target)));
448 case ld::Fixup::bindingsIndirectlyBound:
449 return makeName(*state.indirectBindingTable[ref->u.bindingIndex]);
451 return "BAD BINDING";
454 bool OutputFile::targetIsThumb(ld::Internal& state, const ld::Fixup* fixup)
456 switch ( fixup->binding ) {
457 case ld::Fixup::bindingByContentBound:
458 case ld::Fixup::bindingDirectlyBound:
459 return fixup->u.target->isThumb();
460 case ld::Fixup::bindingsIndirectlyBound:
461 return state.indirectBindingTable[fixup->u.bindingIndex]->isThumb();
462 default:
463 break;
465 throw "unexpected binding";
468 uint64_t OutputFile::addressOf(const ld::Internal& state, const ld::Fixup* fixup, const ld::Atom** target)
470 if ( !_options.makeCompressedDyldInfo() ) {
471 // For external relocations the classic mach-o format
472 // has addend only stored in the content. That means
473 // that the address of the target is not used.
474 if ( fixup->contentAddendOnly )
475 return 0;
477 switch ( fixup->binding ) {
478 case ld::Fixup::bindingNone:
479 throw "unexpected bindingNone";
480 case ld::Fixup::bindingByNameUnbound:
481 throw "unexpected bindingByNameUnbound";
482 case ld::Fixup::bindingByContentBound:
483 case ld::Fixup::bindingDirectlyBound:
484 *target = fixup->u.target;
485 return (*target)->finalAddress();
486 case ld::Fixup::bindingsIndirectlyBound:
487 *target = state.indirectBindingTable[fixup->u.bindingIndex];
488 #ifndef NDEBUG
489 if ( ! (*target)->finalAddressMode() ) {
490 throwf("reference to symbol (which has not been assigned an address) %s", (*target)->name());
492 #endif
493 return (*target)->finalAddress();
495 throw "unexpected binding";
498 uint64_t OutputFile::sectionOffsetOf(const ld::Internal& state, const ld::Fixup* fixup)
500 const ld::Atom* target = NULL;
501 switch ( fixup->binding ) {
502 case ld::Fixup::bindingNone:
503 throw "unexpected bindingNone";
504 case ld::Fixup::bindingByNameUnbound:
505 throw "unexpected bindingByNameUnbound";
506 case ld::Fixup::bindingByContentBound:
507 case ld::Fixup::bindingDirectlyBound:
508 target = fixup->u.target;
509 break;
510 case ld::Fixup::bindingsIndirectlyBound:
511 target = state.indirectBindingTable[fixup->u.bindingIndex];
512 break;
514 assert(target != NULL);
516 uint64_t targetAddress = target->finalAddress();
517 for (std::vector<ld::Internal::FinalSection*>::const_iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
518 const ld::Internal::FinalSection* sect = *it;
519 if ( (sect->address <= targetAddress) && (targetAddress < (sect->address+sect->size)) )
520 return targetAddress - sect->address;
522 throw "section not found for section offset";
527 uint64_t OutputFile::tlvTemplateOffsetOf(const ld::Internal& state, const ld::Fixup* fixup)
529 const ld::Atom* target = NULL;
530 switch ( fixup->binding ) {
531 case ld::Fixup::bindingNone:
532 throw "unexpected bindingNone";
533 case ld::Fixup::bindingByNameUnbound:
534 throw "unexpected bindingByNameUnbound";
535 case ld::Fixup::bindingByContentBound:
536 case ld::Fixup::bindingDirectlyBound:
537 target = fixup->u.target;
538 break;
539 case ld::Fixup::bindingsIndirectlyBound:
540 target = state.indirectBindingTable[fixup->u.bindingIndex];
541 break;
543 assert(target != NULL);
545 for (std::vector<ld::Internal::FinalSection*>::const_iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
546 const ld::Internal::FinalSection* sect = *it;
547 switch ( sect->type() ) {
548 case ld::Section::typeTLVInitialValues:
549 case ld::Section::typeTLVZeroFill:
550 return target->finalAddress() - sect->address;
551 default:
552 break;
555 throw "section not found for tlvTemplateOffsetOf";
558 void OutputFile::printSectionLayout(ld::Internal& state)
560 // show layout of final image
561 fprintf(stderr, "final section layout:\n");
562 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
563 if ( (*it)->isSectionHidden() )
564 continue;
565 fprintf(stderr, " %s/%s addr=0x%08llX, size=0x%08llX, fileOffset=0x%08llX, type=%d\n",
566 (*it)->segmentName(), (*it)->sectionName(),
567 (*it)->address, (*it)->size, (*it)->fileOffset, (*it)->type());
572 void OutputFile::rangeCheck8(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
574 if ( (displacement > 127) || (displacement < -128) ) {
575 // show layout of final image
576 printSectionLayout(state);
578 const ld::Atom* target;
579 throwf("8-bit reference out of range (%lld max is +/-127B): from %s (0x%08llX) to %s (0x%08llX)",
580 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
581 addressOf(state, fixup, &target));
585 void OutputFile::rangeCheck16(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
587 const int64_t thirtyTwoKLimit = 0x00007FFF;
588 if ( (displacement > thirtyTwoKLimit) || (displacement < (-thirtyTwoKLimit)) ) {
589 // show layout of final image
590 printSectionLayout(state);
592 const ld::Atom* target;
593 throwf("16-bit reference out of range (%lld max is +/-32KB): from %s (0x%08llX) to %s (0x%08llX)",
594 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
595 addressOf(state, fixup, &target));
599 void OutputFile::rangeCheckBranch32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
601 const int64_t twoGigLimit = 0x7FFFFFFF;
602 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
603 // show layout of final image
604 printSectionLayout(state);
606 const ld::Atom* target;
607 throwf("32-bit branch out of range (%lld max is +/-2GB): from %s (0x%08llX) to %s (0x%08llX)",
608 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
609 addressOf(state, fixup, &target));
614 void OutputFile::rangeCheckAbsolute32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
616 const int64_t fourGigLimit = 0xFFFFFFFF;
617 if ( displacement > fourGigLimit ) {
618 // <rdar://problem/9610466> cannot enforce 32-bit range checks on 32-bit archs because assembler loses sign information
619 // .long _foo - 0xC0000000
620 // is encoded in mach-o the same as:
621 // .long _foo + 0x40000000
622 // so if _foo lays out to 0xC0000100, the first is ok, but the second is not.
623 if ( (_options.architecture() == CPU_TYPE_ARM) || (_options.architecture() == CPU_TYPE_I386) ) {
624 // Unlikely userland code does funky stuff like this, so warn for them, but not warn for -preload or -static
625 if ( (_options.outputKind() != Options::kPreload) && (_options.outputKind() != Options::kStaticExecutable) ) {
626 warning("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
627 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), displacement);
629 return;
631 // show layout of final image
632 printSectionLayout(state);
634 const ld::Atom* target;
635 if ( fixup->binding == ld::Fixup::bindingNone )
636 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
637 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), displacement);
638 else
639 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to %s (0x%08llX)",
640 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), referenceTargetAtomName(state, fixup),
641 addressOf(state, fixup, &target));
646 void OutputFile::rangeCheckRIP32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
648 const int64_t twoGigLimit = 0x7FFFFFFF;
649 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
650 // show layout of final image
651 printSectionLayout(state);
653 const ld::Atom* target;
654 throwf("32-bit RIP relative reference out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
655 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
656 addressOf(state, fixup, &target));
660 void OutputFile::rangeCheckARM12(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
662 if ( (displacement > 4092LL) || (displacement < (-4092LL)) ) {
663 // show layout of final image
664 printSectionLayout(state);
666 const ld::Atom* target;
667 throwf("ARM ldr 12-bit displacement out of range (%lld max is +/-4096B): from %s (0x%08llX) to %s (0x%08llX)",
668 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
669 addressOf(state, fixup, &target));
673 bool OutputFile::checkArmBranch24Displacement(int64_t displacement)
675 return ( (displacement < 33554428LL) && (displacement > (-33554432LL)) );
678 void OutputFile::rangeCheckARMBranch24(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
680 if ( checkArmBranch24Displacement(displacement) )
681 return;
683 // show layout of final image
684 printSectionLayout(state);
686 const ld::Atom* target;
687 throwf("b/bl/blx ARM branch out of range (%lld max is +/-32MB): from %s (0x%08llX) to %s (0x%08llX)",
688 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
689 addressOf(state, fixup, &target));
692 bool OutputFile::checkThumbBranch22Displacement(int64_t displacement)
694 // thumb2 supports +/- 16MB displacement
695 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
696 if ( (displacement > 16777214LL) || (displacement < (-16777216LL)) ) {
697 return false;
700 else {
701 // thumb1 supports +/- 4MB displacement
702 if ( (displacement > 4194302LL) || (displacement < (-4194304LL)) ) {
703 return false;
706 return true;
709 void OutputFile::rangeCheckThumbBranch22(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
711 if ( checkThumbBranch22Displacement(displacement) )
712 return;
714 // show layout of final image
715 printSectionLayout(state);
717 const ld::Atom* target;
718 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
719 throwf("b/bl/blx thumb2 branch out of range (%lld max is +/-16MB): from %s (0x%08llX) to %s (0x%08llX)",
720 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
721 addressOf(state, fixup, &target));
723 else {
724 throwf("b/bl/blx thumb1 branch out of range (%lld max is +/-4MB): from %s (0x%08llX) to %s (0x%08llX)",
725 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
726 addressOf(state, fixup, &target));
731 void OutputFile::rangeCheckARM64Branch26(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
733 const int64_t bl_128MegLimit = 0x07FFFFFF;
734 if ( (displacement > bl_128MegLimit) || (displacement < (-bl_128MegLimit)) ) {
735 // show layout of final image
736 printSectionLayout(state);
738 const ld::Atom* target;
739 throwf("b(l) ARM64 branch out of range (%lld max is +/-128MB): from %s (0x%08llX) to %s (0x%08llX)",
740 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
741 addressOf(state, fixup, &target));
745 void OutputFile::rangeCheckARM64Page21(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
747 const int64_t adrp_4GigLimit = 0x100000000ULL;
748 if ( (displacement > adrp_4GigLimit) || (displacement < (-adrp_4GigLimit)) ) {
749 // show layout of final image
750 printSectionLayout(state);
752 const ld::Atom* target;
753 throwf("ARM64 ADRP out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
754 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
755 addressOf(state, fixup, &target));
760 uint16_t OutputFile::get16LE(uint8_t* loc) { return LittleEndian::get16(*(uint16_t*)loc); }
761 void OutputFile::set16LE(uint8_t* loc, uint16_t value) { LittleEndian::set16(*(uint16_t*)loc, value); }
763 uint32_t OutputFile::get32LE(uint8_t* loc) { return LittleEndian::get32(*(uint32_t*)loc); }
764 void OutputFile::set32LE(uint8_t* loc, uint32_t value) { LittleEndian::set32(*(uint32_t*)loc, value); }
766 uint64_t OutputFile::get64LE(uint8_t* loc) { return LittleEndian::get64(*(uint64_t*)loc); }
767 void OutputFile::set64LE(uint8_t* loc, uint64_t value) { LittleEndian::set64(*(uint64_t*)loc, value); }
769 uint16_t OutputFile::get16BE(uint8_t* loc) { return BigEndian::get16(*(uint16_t*)loc); }
770 void OutputFile::set16BE(uint8_t* loc, uint16_t value) { BigEndian::set16(*(uint16_t*)loc, value); }
772 uint32_t OutputFile::get32BE(uint8_t* loc) { return BigEndian::get32(*(uint32_t*)loc); }
773 void OutputFile::set32BE(uint8_t* loc, uint32_t value) { BigEndian::set32(*(uint32_t*)loc, value); }
775 uint64_t OutputFile::get64BE(uint8_t* loc) { return BigEndian::get64(*(uint64_t*)loc); }
776 void OutputFile::set64BE(uint8_t* loc, uint64_t value) { BigEndian::set64(*(uint64_t*)loc, value); }
778 #if SUPPORT_ARCH_arm64
780 static uint32_t makeNOP() {
781 return 0xD503201F;
784 enum SignExtension { signedNot, signed32, signed64 };
785 struct LoadStoreInfo {
786 uint32_t reg;
787 uint32_t baseReg;
788 uint32_t offset; // after scaling
789 uint32_t size; // 1,2,4,8, or 16
790 bool isStore;
791 bool isFloat; // if destReg is FP/SIMD
792 SignExtension signEx; // if load is sign extended
795 static uint32_t makeLDR_literal(const LoadStoreInfo& info, uint64_t targetAddress, uint64_t instructionAddress)
797 int64_t delta = targetAddress - instructionAddress;
798 assert(delta < 1024*1024);
799 assert(delta > -1024*1024);
800 assert((info.reg & 0xFFFFFFE0) == 0);
801 assert((targetAddress & 0x3) == 0);
802 assert((instructionAddress & 0x3) == 0);
803 assert(!info.isStore);
804 uint32_t imm19 = (delta << 3) & 0x00FFFFE0;
805 uint32_t instruction = 0;
806 switch ( info.size ) {
807 case 4:
808 if ( info.isFloat ) {
809 assert(info.signEx == signedNot);
810 instruction = 0x1C000000;
812 else {
813 if ( info.signEx == signed64 )
814 instruction = 0x98000000;
815 else
816 instruction = 0x18000000;
818 break;
819 case 8:
820 assert(info.signEx == signedNot);
821 instruction = info.isFloat ? 0x5C000000 : 0x58000000;
822 break;
823 case 16:
824 assert(info.signEx == signedNot);
825 instruction = 0x9C000000;
826 break;
827 default:
828 assert(0 && "invalid load size for literal");
830 return (instruction | imm19 | info.reg);
833 static uint32_t makeADR(uint32_t destReg, uint64_t targetAddress, uint64_t instructionAddress)
835 assert((destReg & 0xFFFFFFE0) == 0);
836 assert((instructionAddress & 0x3) == 0);
837 uint32_t instruction = 0x10000000;
838 int64_t delta = targetAddress - instructionAddress;
839 assert(delta < 1024*1024);
840 assert(delta > -1024*1024);
841 uint32_t immhi = (delta & 0x001FFFFC) << 3;
842 uint32_t immlo = (delta & 0x00000003) << 29;
843 return (instruction | immhi | immlo | destReg);
846 static uint32_t makeLoadOrStore(const LoadStoreInfo& info)
848 uint32_t instruction = 0x39000000;
849 if ( info.isFloat )
850 instruction |= 0x04000000;
851 instruction |= info.reg;
852 instruction |= (info.baseReg << 5);
853 uint32_t sizeBits = 0;
854 uint32_t opcBits = 0;
855 uint32_t imm12Bits = 0;
856 switch ( info.size ) {
857 case 1:
858 sizeBits = 0;
859 imm12Bits = info.offset;
860 if ( info.isStore ) {
861 opcBits = 0;
863 else {
864 switch ( info.signEx ) {
865 case signedNot:
866 opcBits = 1;
867 break;
868 case signed32:
869 opcBits = 3;
870 break;
871 case signed64:
872 opcBits = 2;
873 break;
876 break;
877 case 2:
878 sizeBits = 1;
879 assert((info.offset % 2) == 0);
880 imm12Bits = info.offset/2;
881 if ( info.isStore ) {
882 opcBits = 0;
884 else {
885 switch ( info.signEx ) {
886 case signedNot:
887 opcBits = 1;
888 break;
889 case signed32:
890 opcBits = 3;
891 break;
892 case signed64:
893 opcBits = 2;
894 break;
897 break;
898 case 4:
899 sizeBits = 2;
900 assert((info.offset % 4) == 0);
901 imm12Bits = info.offset/4;
902 if ( info.isStore ) {
903 opcBits = 0;
905 else {
906 switch ( info.signEx ) {
907 case signedNot:
908 opcBits = 1;
909 break;
910 case signed32:
911 assert(0 && "cannot use signed32 with 32-bit load/store");
912 break;
913 case signed64:
914 opcBits = 2;
915 break;
918 break;
919 case 8:
920 sizeBits = 3;
921 assert((info.offset % 8) == 0);
922 imm12Bits = info.offset/8;
923 if ( info.isStore ) {
924 opcBits = 0;
926 else {
927 opcBits = 1;
928 assert(info.signEx == signedNot);
930 break;
931 case 16:
932 sizeBits = 0;
933 assert((info.offset % 16) == 0);
934 imm12Bits = info.offset/16;
935 assert(info.isFloat);
936 if ( info.isStore ) {
937 opcBits = 2;
939 else {
940 opcBits = 3;
942 break;
943 default:
944 assert(0 && "bad load/store size");
945 break;
947 assert(imm12Bits < 4096);
948 return (instruction | (sizeBits << 30) | (opcBits << 22) | (imm12Bits << 10));
951 static bool parseLoadOrStore(uint32_t instruction, LoadStoreInfo& info)
953 if ( (instruction & 0x3B000000) != 0x39000000 )
954 return false;
955 info.isFloat = ( (instruction & 0x04000000) != 0 );
956 info.reg = (instruction & 0x1F);
957 info.baseReg = ((instruction>>5) & 0x1F);
958 switch (instruction & 0xC0C00000) {
959 case 0x00000000:
960 info.size = 1;
961 info.isStore = true;
962 info.signEx = signedNot;
963 break;
964 case 0x00400000:
965 info.size = 1;
966 info.isStore = false;
967 info.signEx = signedNot;
968 break;
969 case 0x00800000:
970 if ( info.isFloat ) {
971 info.size = 16;
972 info.isStore = true;
973 info.signEx = signedNot;
975 else {
976 info.size = 1;
977 info.isStore = false;
978 info.signEx = signed64;
980 break;
981 case 0x00C00000:
982 if ( info.isFloat ) {
983 info.size = 16;
984 info.isStore = false;
985 info.signEx = signedNot;
987 else {
988 info.size = 1;
989 info.isStore = false;
990 info.signEx = signed32;
992 break;
993 case 0x40000000:
994 info.size = 2;
995 info.isStore = true;
996 info.signEx = signedNot;
997 break;
998 case 0x40400000:
999 info.size = 2;
1000 info.isStore = false;
1001 info.signEx = signedNot;
1002 break;
1003 case 0x40800000:
1004 info.size = 2;
1005 info.isStore = false;
1006 info.signEx = signed64;
1007 break;
1008 case 0x40C00000:
1009 info.size = 2;
1010 info.isStore = false;
1011 info.signEx = signed32;
1012 break;
1013 case 0x80000000:
1014 info.size = 4;
1015 info.isStore = true;
1016 info.signEx = signedNot;
1017 break;
1018 case 0x80400000:
1019 info.size = 4;
1020 info.isStore = false;
1021 info.signEx = signedNot;
1022 break;
1023 case 0x80800000:
1024 info.size = 4;
1025 info.isStore = false;
1026 info.signEx = signed64;
1027 break;
1028 case 0xC0000000:
1029 info.size = 8;
1030 info.isStore = true;
1031 info.signEx = signedNot;
1032 break;
1033 case 0xC0400000:
1034 info.size = 8;
1035 info.isStore = false;
1036 info.signEx = signedNot;
1037 break;
1038 default:
1039 return false;
1041 info.offset = ((instruction >> 10) & 0x0FFF) * info.size;
1042 return true;
1045 struct AdrpInfo {
1046 uint32_t destReg;
1049 static bool parseADRP(uint32_t instruction, AdrpInfo& info)
1051 if ( (instruction & 0x9F000000) != 0x90000000 )
1052 return false;
1053 info.destReg = (instruction & 0x1F);
1054 return true;
1057 struct AddInfo {
1058 uint32_t destReg;
1059 uint32_t srcReg;
1060 uint32_t addend;
1063 static bool parseADD(uint32_t instruction, AddInfo& info)
1065 if ( (instruction & 0xFFC00000) != 0x91000000 )
1066 return false;
1067 info.destReg = (instruction & 0x1F);
1068 info.srcReg = ((instruction>>5) & 0x1F);
1069 info.addend = ((instruction>>10) & 0xFFF);
1070 return true;
1075 #if 0
1076 static uint32_t makeLDR_scaledOffset(const LoadStoreInfo& info)
1078 assert((info.reg & 0xFFFFFFE0) == 0);
1079 assert((info.baseReg & 0xFFFFFFE0) == 0);
1080 assert(!info.isFloat || (info.signEx != signedNot));
1081 uint32_t sizeBits = 0;
1082 uint32_t opcBits = 1;
1083 uint32_t vBit = info.isFloat;
1084 switch ( info.signEx ) {
1085 case signedNot:
1086 opcBits = 1;
1087 break;
1088 case signed32:
1089 opcBits = 3;
1090 break;
1091 case signed64:
1092 opcBits = 2;
1093 break;
1094 default:
1095 assert(0 && "bad SignExtension runtime value");
1097 switch ( info.size ) {
1098 case 1:
1099 sizeBits = 0;
1100 break;
1101 case 2:
1102 sizeBits = 1;
1103 break;
1104 case 4:
1105 sizeBits = 2;
1106 break;
1107 case 8:
1108 sizeBits = 3;
1109 break;
1110 case 16:
1111 sizeBits = 0;
1112 vBit = 1;
1113 opcBits = 3;
1114 break;
1115 default:
1116 assert(0 && "invalid load size for literal");
1118 assert((info.offset % info.size) == 0);
1119 uint32_t scaledOffset = info.offset/info.size;
1120 assert(scaledOffset < 4096);
1121 return (0x39000000 | (sizeBits<<30) | (vBit<<26) | (opcBits<<22) | (scaledOffset<<10) | (info.baseReg<<5) | info.reg);
1124 static uint32_t makeLDR_literal(uint32_t destReg, uint32_t loadSize, bool isFloat, uint64_t targetAddress, uint64_t instructionAddress)
1126 int64_t delta = targetAddress - instructionAddress;
1127 assert(delta < 1024*1024);
1128 assert(delta > -1024*1024);
1129 assert((destReg & 0xFFFFFFE0) == 0);
1130 assert((targetAddress & 0x3) == 0);
1131 assert((instructionAddress & 0x3) == 0);
1132 uint32_t imm19 = (delta << 3) & 0x00FFFFE0;
1133 uint32_t instruction = 0;
1134 switch ( loadSize ) {
1135 case 4:
1136 instruction = isFloat ? 0x1C000000 : 0x18000000;
1137 break;
1138 case 8:
1139 instruction = isFloat ? 0x5C000000 : 0x58000000;
1140 break;
1141 case 16:
1142 instruction = 0x9C000000;
1143 break;
1144 default:
1145 assert(0 && "invalid load size for literal");
1147 return (instruction | imm19 | destReg);
1151 static bool ldrInfo(uint32_t instruction, uint8_t* size, uint8_t* destReg, bool* v, uint32_t* scaledOffset)
1153 *v = ( (instruction & 0x04000000) != 0 );
1154 *destReg = (instruction & 0x1F);
1155 uint32_t imm12 = ((instruction >> 10) & 0x00000FFF);
1156 switch ( (instruction & 0xC0000000) >> 30 ) {
1157 case 0:
1158 // vector and byte LDR have same "size" bits, need to check other bits to differenciate
1159 if ( (instruction & 0x00800000) == 0 ) {
1160 *size = 1;
1161 *scaledOffset = imm12;
1163 else {
1164 *size = 16;
1165 *scaledOffset = imm12 * 16;
1167 break;
1168 case 1:
1169 *size = 2;
1170 *scaledOffset = imm12 * 2;
1171 break;
1172 case 2:
1173 *size = 4;
1174 *scaledOffset = imm12 * 4;
1175 break;
1176 case 3:
1177 *size = 8;
1178 *scaledOffset = imm12 * 8;
1179 break;
1181 return ((instruction & 0x3B400000) == 0x39400000);
1183 #endif
1185 static bool withinOneMeg(uint64_t addr1, uint64_t addr2) {
1186 int64_t delta = (addr2 - addr1);
1187 return ( (delta < 1024*1024) && (delta > -1024*1024) );
1189 #endif // SUPPORT_ARCH_arm64
1191 void OutputFile::setInfo(ld::Internal& state, const ld::Atom* atom, uint8_t* buffer, const std::map<uint32_t, const Fixup*>& usedByHints,
1192 uint32_t offsetInAtom, uint32_t delta, InstructionInfo* info)
1194 info->offsetInAtom = offsetInAtom + delta;
1195 std::map<uint32_t, const Fixup*>::const_iterator pos = usedByHints.find(info->offsetInAtom);
1196 if ( (pos != usedByHints.end()) && (pos->second != NULL) ) {
1197 info->fixup = pos->second;
1198 info->targetAddress = addressOf(state, info->fixup, &info->target);
1199 if ( info->fixup->clusterSize != ld::Fixup::k1of1 ) {
1200 assert(info->fixup->firstInCluster());
1201 const ld::Fixup* nextFixup = info->fixup + 1;
1202 if ( nextFixup->kind == ld::Fixup::kindAddAddend ) {
1203 info->targetAddress += nextFixup->u.addend;
1205 else {
1206 assert(0 && "expected addend");
1210 else {
1211 info->fixup = NULL;
1212 info->targetAddress = 0;
1213 info->target = NULL;
1215 info->instructionContent = &buffer[info->offsetInAtom];
1216 info->instructionAddress = atom->finalAddress() + info->offsetInAtom;
1217 info->instruction = get32LE(info->instructionContent);
1220 #if SUPPORT_ARCH_arm64
1221 static bool isPageKind(const ld::Fixup* fixup, bool mustBeGOT=false)
1223 if ( fixup == NULL )
1224 return false;
1225 const ld::Fixup* f;
1226 switch ( fixup->kind ) {
1227 case ld::Fixup::kindStoreTargetAddressARM64Page21:
1228 return !mustBeGOT;
1229 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
1230 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
1231 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
1232 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
1233 return true;
1234 case ld::Fixup::kindSetTargetAddress:
1235 f = fixup;
1236 do {
1237 ++f;
1238 } while ( ! f->lastInCluster() );
1239 switch (f->kind ) {
1240 case ld::Fixup::kindStoreARM64Page21:
1241 return !mustBeGOT;
1242 case ld::Fixup::kindStoreARM64GOTLoadPage21:
1243 case ld::Fixup::kindStoreARM64GOTLeaPage21:
1244 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
1245 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
1246 return true;
1247 default:
1248 break;
1250 break;
1251 default:
1252 break;
1254 return false;
1257 static bool isPageOffsetKind(const ld::Fixup* fixup, bool mustBeGOT=false)
1259 if ( fixup == NULL )
1260 return false;
1261 const ld::Fixup* f;
1262 switch ( fixup->kind ) {
1263 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
1264 return !mustBeGOT;
1265 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
1266 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
1267 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
1268 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
1269 return true;
1270 case ld::Fixup::kindSetTargetAddress:
1271 f = fixup;
1272 do {
1273 ++f;
1274 } while ( ! f->lastInCluster() );
1275 switch (f->kind ) {
1276 case ld::Fixup::kindStoreARM64PageOff12:
1277 return !mustBeGOT;
1278 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
1279 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
1280 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12:
1281 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
1282 return true;
1283 default:
1284 break;
1286 break;
1287 default:
1288 break;
1290 return false;
1292 #endif // SUPPORT_ARCH_arm64
1295 #define LOH_ASSERT(cond) \
1296 if ( !(cond) ) { \
1297 warning("ignoring linker optimization hint at %s+0x%X because " #cond, atom->name(), fit->offsetInAtom); \
1298 break; \
1301 void OutputFile::applyFixUps(ld::Internal& state, uint64_t mhAddress, const ld::Atom* atom, uint8_t* buffer)
1303 //fprintf(stderr, "applyFixUps() on %s\n", atom->name());
1304 int64_t accumulator = 0;
1305 const ld::Atom* toTarget = NULL;
1306 const ld::Atom* fromTarget;
1307 int64_t delta;
1308 uint32_t instruction;
1309 uint32_t newInstruction;
1310 bool is_bl;
1311 bool is_blx;
1312 bool is_b;
1313 bool thumbTarget = false;
1314 std::map<uint32_t, const Fixup*> usedByHints;
1315 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
1316 uint8_t* fixUpLocation = &buffer[fit->offsetInAtom];
1317 ld::Fixup::LOH_arm64 lohExtra;
1318 switch ( (ld::Fixup::Kind)(fit->kind) ) {
1319 case ld::Fixup::kindNone:
1320 case ld::Fixup::kindNoneFollowOn:
1321 case ld::Fixup::kindNoneGroupSubordinate:
1322 case ld::Fixup::kindNoneGroupSubordinateFDE:
1323 case ld::Fixup::kindNoneGroupSubordinateLSDA:
1324 case ld::Fixup::kindNoneGroupSubordinatePersonality:
1325 break;
1326 case ld::Fixup::kindSetTargetAddress:
1327 accumulator = addressOf(state, fit, &toTarget);
1328 thumbTarget = targetIsThumb(state, fit);
1329 if ( thumbTarget )
1330 accumulator |= 1;
1331 if ( fit->contentAddendOnly || fit->contentDetlaToAddendOnly )
1332 accumulator = 0;
1333 break;
1334 case ld::Fixup::kindSubtractTargetAddress:
1335 delta = addressOf(state, fit, &fromTarget);
1336 if ( ! fit->contentAddendOnly )
1337 accumulator -= delta;
1338 break;
1339 case ld::Fixup::kindAddAddend:
1340 if ( ! fit->contentIgnoresAddend ) {
1341 // <rdar://problem/8342028> ARM main executables main contain .long constants pointing
1342 // into themselves such as jump tables. These .long should not have thumb bit set
1343 // even though the target is a thumb instruction. We can tell it is an interior pointer
1344 // because we are processing an addend.
1345 if ( thumbTarget && (toTarget == atom) && ((int32_t)fit->u.addend > 0) ) {
1346 accumulator &= (-2);
1347 //warning("removing thumb bit from intra-atom pointer in %s %s+0x%0X",
1348 // atom->section().sectionName(), atom->name(), fit->offsetInAtom);
1350 accumulator += fit->u.addend;
1352 break;
1353 case ld::Fixup::kindSubtractAddend:
1354 accumulator -= fit->u.addend;
1355 break;
1356 case ld::Fixup::kindSetTargetImageOffset:
1357 accumulator = addressOf(state, fit, &toTarget) - mhAddress;
1358 thumbTarget = targetIsThumb(state, fit);
1359 if ( thumbTarget )
1360 accumulator |= 1;
1361 break;
1362 case ld::Fixup::kindSetTargetSectionOffset:
1363 accumulator = sectionOffsetOf(state, fit);
1364 break;
1365 case ld::Fixup::kindSetTargetTLVTemplateOffset:
1366 accumulator = tlvTemplateOffsetOf(state, fit);
1367 break;
1368 case ld::Fixup::kindStore8:
1369 *fixUpLocation += accumulator;
1370 break;
1371 case ld::Fixup::kindStoreLittleEndian16:
1372 set16LE(fixUpLocation, accumulator);
1373 break;
1374 case ld::Fixup::kindStoreLittleEndianLow24of32:
1375 set32LE(fixUpLocation, (get32LE(fixUpLocation) & 0xFF000000) | (accumulator & 0x00FFFFFF) );
1376 break;
1377 case ld::Fixup::kindStoreLittleEndian32:
1378 rangeCheckAbsolute32(accumulator, state, atom, fit);
1379 set32LE(fixUpLocation, accumulator);
1380 break;
1381 case ld::Fixup::kindStoreLittleEndian64:
1382 set64LE(fixUpLocation, accumulator);
1383 break;
1384 case ld::Fixup::kindStoreBigEndian16:
1385 set16BE(fixUpLocation, accumulator);
1386 break;
1387 case ld::Fixup::kindStoreBigEndianLow24of32:
1388 set32BE(fixUpLocation, (get32BE(fixUpLocation) & 0xFF000000) | (accumulator & 0x00FFFFFF) );
1389 break;
1390 case ld::Fixup::kindStoreBigEndian32:
1391 rangeCheckAbsolute32(accumulator, state, atom, fit);
1392 set32BE(fixUpLocation, accumulator);
1393 break;
1394 case ld::Fixup::kindStoreBigEndian64:
1395 set64BE(fixUpLocation, accumulator);
1396 break;
1397 case ld::Fixup::kindStoreX86PCRel8:
1398 case ld::Fixup::kindStoreX86BranchPCRel8:
1399 if ( fit->contentAddendOnly )
1400 delta = accumulator;
1401 else
1402 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 1);
1403 rangeCheck8(delta, state, atom, fit);
1404 *fixUpLocation = delta;
1405 break;
1406 case ld::Fixup::kindStoreX86PCRel16:
1407 if ( fit->contentAddendOnly )
1408 delta = accumulator;
1409 else
1410 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 2);
1411 rangeCheck16(delta, state, atom, fit);
1412 set16LE(fixUpLocation, delta);
1413 break;
1414 case ld::Fixup::kindStoreX86BranchPCRel32:
1415 if ( fit->contentAddendOnly )
1416 delta = accumulator;
1417 else
1418 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1419 rangeCheckBranch32(delta, state, atom, fit);
1420 set32LE(fixUpLocation, delta);
1421 break;
1422 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
1423 case ld::Fixup::kindStoreX86PCRel32GOT:
1424 case ld::Fixup::kindStoreX86PCRel32:
1425 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
1426 if ( fit->contentAddendOnly )
1427 delta = accumulator;
1428 else
1429 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1430 rangeCheckRIP32(delta, state, atom, fit);
1431 set32LE(fixUpLocation, delta);
1432 break;
1433 case ld::Fixup::kindStoreX86PCRel32_1:
1434 if ( fit->contentAddendOnly )
1435 delta = accumulator - 1;
1436 else
1437 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 5);
1438 rangeCheckRIP32(delta, state, atom, fit);
1439 set32LE(fixUpLocation, delta);
1440 break;
1441 case ld::Fixup::kindStoreX86PCRel32_2:
1442 if ( fit->contentAddendOnly )
1443 delta = accumulator - 2;
1444 else
1445 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 6);
1446 rangeCheckRIP32(delta, state, atom, fit);
1447 set32LE(fixUpLocation, delta);
1448 break;
1449 case ld::Fixup::kindStoreX86PCRel32_4:
1450 if ( fit->contentAddendOnly )
1451 delta = accumulator - 4;
1452 else
1453 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1454 rangeCheckRIP32(delta, state, atom, fit);
1455 set32LE(fixUpLocation, delta);
1456 break;
1457 case ld::Fixup::kindStoreX86Abs32TLVLoad:
1458 set32LE(fixUpLocation, accumulator);
1459 break;
1460 case ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA:
1461 assert(_options.outputKind() != Options::kObjectFile);
1462 // TLV entry was optimized away, change movl instruction to a leal
1463 if ( fixUpLocation[-1] != 0xA1 )
1464 throw "TLV load reloc does not point to a movl instruction";
1465 fixUpLocation[-1] = 0xB8;
1466 set32LE(fixUpLocation, accumulator);
1467 break;
1468 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
1469 assert(_options.outputKind() != Options::kObjectFile);
1470 // GOT entry was optimized away, change movq instruction to a leaq
1471 if ( fixUpLocation[-2] != 0x8B )
1472 throw "GOT load reloc does not point to a movq instruction";
1473 fixUpLocation[-2] = 0x8D;
1474 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1475 rangeCheckRIP32(delta, state, atom, fit);
1476 set32LE(fixUpLocation, delta);
1477 break;
1478 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
1479 assert(_options.outputKind() != Options::kObjectFile);
1480 // TLV entry was optimized away, change movq instruction to a leaq
1481 if ( fixUpLocation[-2] != 0x8B )
1482 throw "TLV load reloc does not point to a movq instruction";
1483 fixUpLocation[-2] = 0x8D;
1484 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1485 rangeCheckRIP32(delta, state, atom, fit);
1486 set32LE(fixUpLocation, delta);
1487 break;
1488 case ld::Fixup::kindStoreTargetAddressARMLoad12:
1489 accumulator = addressOf(state, fit, &toTarget);
1490 // fall into kindStoreARMLoad12 case
1491 case ld::Fixup::kindStoreARMLoad12:
1492 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1493 rangeCheckARM12(delta, state, atom, fit);
1494 instruction = get32LE(fixUpLocation);
1495 if ( delta >= 0 ) {
1496 newInstruction = instruction & 0xFFFFF000;
1497 newInstruction |= ((uint32_t)delta & 0xFFF);
1499 else {
1500 newInstruction = instruction & 0xFF7FF000;
1501 newInstruction |= ((uint32_t)(-delta) & 0xFFF);
1503 set32LE(fixUpLocation, newInstruction);
1504 break;
1505 case ld::Fixup::kindDtraceExtra:
1506 break;
1507 case ld::Fixup::kindStoreX86DtraceCallSiteNop:
1508 if ( _options.outputKind() != Options::kObjectFile ) {
1509 // change call site to a NOP
1510 fixUpLocation[-1] = 0x90; // 1-byte nop
1511 fixUpLocation[0] = 0x0F; // 4-byte nop
1512 fixUpLocation[1] = 0x1F;
1513 fixUpLocation[2] = 0x40;
1514 fixUpLocation[3] = 0x00;
1516 break;
1517 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear:
1518 if ( _options.outputKind() != Options::kObjectFile ) {
1519 // change call site to a clear eax
1520 fixUpLocation[-1] = 0x33; // xorl eax,eax
1521 fixUpLocation[0] = 0xC0;
1522 fixUpLocation[1] = 0x90; // 1-byte nop
1523 fixUpLocation[2] = 0x90; // 1-byte nop
1524 fixUpLocation[3] = 0x90; // 1-byte nop
1526 break;
1527 case ld::Fixup::kindStoreARMDtraceCallSiteNop:
1528 if ( _options.outputKind() != Options::kObjectFile ) {
1529 // change call site to a NOP
1530 set32LE(fixUpLocation, 0xE1A00000);
1532 break;
1533 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear:
1534 if ( _options.outputKind() != Options::kObjectFile ) {
1535 // change call site to 'eor r0, r0, r0'
1536 set32LE(fixUpLocation, 0xE0200000);
1538 break;
1539 case ld::Fixup::kindStoreThumbDtraceCallSiteNop:
1540 if ( _options.outputKind() != Options::kObjectFile ) {
1541 // change 32-bit blx call site to two thumb NOPs
1542 set32LE(fixUpLocation, 0x46C046C0);
1544 break;
1545 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear:
1546 if ( _options.outputKind() != Options::kObjectFile ) {
1547 // change 32-bit blx call site to 'nop', 'eor r0, r0'
1548 set32LE(fixUpLocation, 0x46C04040);
1550 break;
1551 case ld::Fixup::kindStoreARM64DtraceCallSiteNop:
1552 if ( _options.outputKind() != Options::kObjectFile ) {
1553 // change call site to a NOP
1554 set32LE(fixUpLocation, 0xD503201F);
1556 break;
1557 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear:
1558 if ( _options.outputKind() != Options::kObjectFile ) {
1559 // change call site to 'MOVZ X0,0'
1560 set32LE(fixUpLocation, 0xD2800000);
1562 break;
1563 case ld::Fixup::kindLazyTarget:
1564 case ld::Fixup::kindIslandTarget:
1565 break;
1566 case ld::Fixup::kindSetLazyOffset:
1567 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
1568 accumulator = this->lazyBindingInfoOffsetForLazyPointerAddress(fit->u.target->finalAddress());
1569 break;
1570 case ld::Fixup::kindDataInCodeStartData:
1571 case ld::Fixup::kindDataInCodeStartJT8:
1572 case ld::Fixup::kindDataInCodeStartJT16:
1573 case ld::Fixup::kindDataInCodeStartJT32:
1574 case ld::Fixup::kindDataInCodeStartJTA32:
1575 case ld::Fixup::kindDataInCodeEnd:
1576 break;
1577 case ld::Fixup::kindLinkerOptimizationHint:
1578 // expand table of address/offsets used by hints
1579 lohExtra.addend = fit->u.addend;
1580 usedByHints[fit->offsetInAtom + (lohExtra.info.delta1 << 2)] = NULL;
1581 if ( lohExtra.info.count > 0 )
1582 usedByHints[fit->offsetInAtom + (lohExtra.info.delta2 << 2)] = NULL;
1583 if ( lohExtra.info.count > 1 )
1584 usedByHints[fit->offsetInAtom + (lohExtra.info.delta3 << 2)] = NULL;
1585 if ( lohExtra.info.count > 2 )
1586 usedByHints[fit->offsetInAtom + (lohExtra.info.delta4 << 2)] = NULL;
1587 break;
1588 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
1589 accumulator = addressOf(state, fit, &toTarget);
1590 thumbTarget = targetIsThumb(state, fit);
1591 if ( thumbTarget )
1592 accumulator |= 1;
1593 if ( fit->contentAddendOnly )
1594 accumulator = 0;
1595 rangeCheckAbsolute32(accumulator, state, atom, fit);
1596 set32LE(fixUpLocation, accumulator);
1597 break;
1598 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
1599 accumulator = addressOf(state, fit, &toTarget);
1600 if ( fit->contentAddendOnly )
1601 accumulator = 0;
1602 set64LE(fixUpLocation, accumulator);
1603 break;
1604 case ld::Fixup::kindStoreTargetAddressBigEndian32:
1605 accumulator = addressOf(state, fit, &toTarget);
1606 if ( fit->contentAddendOnly )
1607 accumulator = 0;
1608 set32BE(fixUpLocation, accumulator);
1609 break;
1610 case ld::Fixup::kindStoreTargetAddressBigEndian64:
1611 accumulator = addressOf(state, fit, &toTarget);
1612 if ( fit->contentAddendOnly )
1613 accumulator = 0;
1614 set64BE(fixUpLocation, accumulator);
1615 break;
1616 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32:
1617 accumulator = tlvTemplateOffsetOf(state, fit);
1618 set32LE(fixUpLocation, accumulator);
1619 break;
1620 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64:
1621 accumulator = tlvTemplateOffsetOf(state, fit);
1622 set64LE(fixUpLocation, accumulator);
1623 break;
1624 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
1625 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
1626 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
1627 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
1628 accumulator = addressOf(state, fit, &toTarget);
1629 if ( fit->contentDetlaToAddendOnly )
1630 accumulator = 0;
1631 if ( fit->contentAddendOnly )
1632 delta = 0;
1633 else
1634 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1635 rangeCheckRIP32(delta, state, atom, fit);
1636 set32LE(fixUpLocation, delta);
1637 break;
1638 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
1639 set32LE(fixUpLocation, accumulator);
1640 break;
1641 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA:
1642 // TLV entry was optimized away, change movl instruction to a leal
1643 if ( fixUpLocation[-1] != 0xA1 )
1644 throw "TLV load reloc does not point to a movl <abs-address>,<reg> instruction";
1645 fixUpLocation[-1] = 0xB8;
1646 accumulator = addressOf(state, fit, &toTarget);
1647 set32LE(fixUpLocation, accumulator);
1648 break;
1649 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
1650 // GOT entry was optimized away, change movq instruction to a leaq
1651 if ( fixUpLocation[-2] != 0x8B )
1652 throw "GOT load reloc does not point to a movq instruction";
1653 fixUpLocation[-2] = 0x8D;
1654 accumulator = addressOf(state, fit, &toTarget);
1655 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1656 rangeCheckRIP32(delta, state, atom, fit);
1657 set32LE(fixUpLocation, delta);
1658 break;
1659 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
1660 // TLV entry was optimized away, change movq instruction to a leaq
1661 if ( fixUpLocation[-2] != 0x8B )
1662 throw "TLV load reloc does not point to a movq instruction";
1663 fixUpLocation[-2] = 0x8D;
1664 accumulator = addressOf(state, fit, &toTarget);
1665 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1666 rangeCheckRIP32(delta, state, atom, fit);
1667 set32LE(fixUpLocation, delta);
1668 break;
1669 case ld::Fixup::kindStoreTargetAddressARMBranch24:
1670 accumulator = addressOf(state, fit, &toTarget);
1671 thumbTarget = targetIsThumb(state, fit);
1672 if ( toTarget->contentType() == ld::Atom::typeBranchIsland ) {
1673 // Branching to island. If ultimate target is in range, branch there directly.
1674 for (ld::Fixup::iterator islandfit = toTarget->fixupsBegin(), end=toTarget->fixupsEnd(); islandfit != end; ++islandfit) {
1675 if ( islandfit->kind == ld::Fixup::kindIslandTarget ) {
1676 const ld::Atom* islandTarget = NULL;
1677 uint64_t islandTargetAddress = addressOf(state, islandfit, &islandTarget);
1678 delta = islandTargetAddress - (atom->finalAddress() + fit->offsetInAtom + 4);
1679 if ( checkArmBranch24Displacement(delta) ) {
1680 toTarget = islandTarget;
1681 accumulator = islandTargetAddress;
1682 thumbTarget = targetIsThumb(state, islandfit);
1684 break;
1688 if ( thumbTarget )
1689 accumulator |= 1;
1690 if ( fit->contentDetlaToAddendOnly )
1691 accumulator = 0;
1692 // fall into kindStoreARMBranch24 case
1693 case ld::Fixup::kindStoreARMBranch24:
1694 // The pc added will be +8 from the pc
1695 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1696 rangeCheckARMBranch24(delta, state, atom, fit);
1697 instruction = get32LE(fixUpLocation);
1698 // Make sure we are calling arm with bl, thumb with blx
1699 is_bl = ((instruction & 0xFF000000) == 0xEB000000);
1700 is_blx = ((instruction & 0xFE000000) == 0xFA000000);
1701 is_b = !is_blx && ((instruction & 0x0F000000) == 0x0A000000);
1702 if ( (is_bl | is_blx) && thumbTarget ) {
1703 uint32_t opcode = 0xFA000000; // force to be blx
1704 uint32_t disp = (uint32_t)(delta >> 2) & 0x00FFFFFF;
1705 uint32_t h_bit = (uint32_t)(delta << 23) & 0x01000000;
1706 newInstruction = opcode | h_bit | disp;
1708 else if ( (is_bl | is_blx) && !thumbTarget ) {
1709 uint32_t opcode = 0xEB000000; // force to be bl
1710 uint32_t disp = (uint32_t)(delta >> 2) & 0x00FFFFFF;
1711 newInstruction = opcode | disp;
1713 else if ( is_b && thumbTarget ) {
1714 if ( fit->contentDetlaToAddendOnly )
1715 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(delta >> 2) & 0x00FFFFFF);
1716 else
1717 throwf("no pc-rel bx arm instruction. Can't fix up branch to %s in %s",
1718 referenceTargetAtomName(state, fit), atom->name());
1720 else if ( !is_bl && !is_blx && thumbTarget ) {
1721 throwf("don't know how to convert instruction %x referencing %s to thumb",
1722 instruction, referenceTargetAtomName(state, fit));
1724 else {
1725 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(delta >> 2) & 0x00FFFFFF);
1727 set32LE(fixUpLocation, newInstruction);
1728 break;
1729 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
1730 accumulator = addressOf(state, fit, &toTarget);
1731 thumbTarget = targetIsThumb(state, fit);
1732 if ( toTarget->contentType() == ld::Atom::typeBranchIsland ) {
1733 // branching to island, so see if ultimate target is in range
1734 // and if so branch to ultimate target instead.
1735 for (ld::Fixup::iterator islandfit = toTarget->fixupsBegin(), end=toTarget->fixupsEnd(); islandfit != end; ++islandfit) {
1736 if ( islandfit->kind == ld::Fixup::kindIslandTarget ) {
1737 const ld::Atom* islandTarget = NULL;
1738 uint64_t islandTargetAddress = addressOf(state, islandfit, &islandTarget);
1739 if ( !fit->contentDetlaToAddendOnly ) {
1740 if ( targetIsThumb(state, islandfit) ) {
1741 // Thumb to thumb branch, we will be generating a bl instruction.
1742 // Delta is always even, so mask out thumb bit in target.
1743 islandTargetAddress &= -2ULL;
1745 else {
1746 // Target is not thumb, we will be generating a blx instruction
1747 // Since blx cannot have the low bit set, set bit[1] of the target to
1748 // bit[1] of the base address, so that the difference is a multiple of
1749 // 4 bytes.
1750 islandTargetAddress &= -3ULL;
1751 islandTargetAddress |= ((atom->finalAddress() + fit->offsetInAtom ) & 2LL);
1754 delta = islandTargetAddress - (atom->finalAddress() + fit->offsetInAtom + 4);
1755 if ( checkThumbBranch22Displacement(delta) ) {
1756 toTarget = islandTarget;
1757 accumulator = islandTargetAddress;
1758 thumbTarget = targetIsThumb(state, islandfit);
1760 break;
1764 if ( thumbTarget )
1765 accumulator |= 1;
1766 if ( fit->contentDetlaToAddendOnly )
1767 accumulator = 0;
1768 // fall into kindStoreThumbBranch22 case
1769 case ld::Fixup::kindStoreThumbBranch22:
1770 instruction = get32LE(fixUpLocation);
1771 is_bl = ((instruction & 0xD000F800) == 0xD000F000);
1772 is_blx = ((instruction & 0xD000F800) == 0xC000F000);
1773 is_b = ((instruction & 0xD000F800) == 0x9000F000);
1774 if ( !fit->contentDetlaToAddendOnly ) {
1775 if ( thumbTarget ) {
1776 // Thumb to thumb branch, we will be generating a bl instruction.
1777 // Delta is always even, so mask out thumb bit in target.
1778 accumulator &= -2ULL;
1780 else {
1781 // Target is not thumb, we will be generating a blx instruction
1782 // Since blx cannot have the low bit set, set bit[1] of the target to
1783 // bit[1] of the base address, so that the difference is a multiple of
1784 // 4 bytes.
1785 accumulator &= -3ULL;
1786 accumulator |= ((atom->finalAddress() + fit->offsetInAtom ) & 2LL);
1789 // The pc added will be +4 from the pc
1790 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1791 // <rdar://problem/16652542> support bl in very large .o files
1792 if ( fit->contentDetlaToAddendOnly ) {
1793 while ( delta < (-16777216LL) )
1794 delta += 0x2000000;
1796 rangeCheckThumbBranch22(delta, state, atom, fit);
1797 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
1798 // The instruction is really two instructions:
1799 // The lower 16 bits are the first instruction, which contains the high
1800 // 11 bits of the displacement.
1801 // The upper 16 bits are the second instruction, which contains the low
1802 // 11 bits of the displacement, as well as differentiating bl and blx.
1803 uint32_t s = (uint32_t)(delta >> 24) & 0x1;
1804 uint32_t i1 = (uint32_t)(delta >> 23) & 0x1;
1805 uint32_t i2 = (uint32_t)(delta >> 22) & 0x1;
1806 uint32_t imm10 = (uint32_t)(delta >> 12) & 0x3FF;
1807 uint32_t imm11 = (uint32_t)(delta >> 1) & 0x7FF;
1808 uint32_t j1 = (i1 == s);
1809 uint32_t j2 = (i2 == s);
1810 if ( is_bl ) {
1811 if ( thumbTarget )
1812 instruction = 0xD000F000; // keep bl
1813 else
1814 instruction = 0xC000F000; // change to blx
1816 else if ( is_blx ) {
1817 if ( thumbTarget )
1818 instruction = 0xD000F000; // change to bl
1819 else
1820 instruction = 0xC000F000; // keep blx
1822 else if ( is_b ) {
1823 instruction = 0x9000F000; // keep b
1824 if ( !thumbTarget && !fit->contentDetlaToAddendOnly ) {
1825 throwf("armv7 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1826 referenceTargetAtomName(state, fit), atom->name());
1829 else {
1830 if ( !thumbTarget )
1831 throwf("don't know how to convert branch instruction %x referencing %s to bx",
1832 instruction, referenceTargetAtomName(state, fit));
1833 instruction = 0x9000F000; // keep b
1835 uint32_t nextDisp = (j1 << 13) | (j2 << 11) | imm11;
1836 uint32_t firstDisp = (s << 10) | imm10;
1837 newInstruction = instruction | (nextDisp << 16) | firstDisp;
1838 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, instruction=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
1839 // s, j1, j2, imm10, imm11, instruction, firstDisp, nextDisp, newInstruction, delta, atom->name(), toTarget->name());
1840 set32LE(fixUpLocation, newInstruction);
1842 else {
1843 // The instruction is really two instructions:
1844 // The lower 16 bits are the first instruction, which contains the high
1845 // 11 bits of the displacement.
1846 // The upper 16 bits are the second instruction, which contains the low
1847 // 11 bits of the displacement, as well as differentiating bl and blx.
1848 uint32_t firstDisp = (uint32_t)(delta >> 12) & 0x7FF;
1849 uint32_t nextDisp = (uint32_t)(delta >> 1) & 0x7FF;
1850 if ( is_bl && !thumbTarget ) {
1851 instruction = 0xE800F000;
1853 else if ( is_blx && thumbTarget ) {
1854 instruction = 0xF800F000;
1856 else if ( is_b ) {
1857 instruction = 0x9000F000; // keep b
1858 if ( !thumbTarget && !fit->contentDetlaToAddendOnly ) {
1859 throwf("armv6 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1860 referenceTargetAtomName(state, fit), atom->name());
1863 else {
1864 instruction = instruction & 0xF800F800;
1866 newInstruction = instruction | (nextDisp << 16) | firstDisp;
1867 set32LE(fixUpLocation, newInstruction);
1869 break;
1870 case ld::Fixup::kindStoreARMLow16:
1872 uint32_t imm4 = (accumulator & 0x0000F000) >> 12;
1873 uint32_t imm12 = accumulator & 0x00000FFF;
1874 instruction = get32LE(fixUpLocation);
1875 newInstruction = (instruction & 0xFFF0F000) | (imm4 << 16) | imm12;
1876 set32LE(fixUpLocation, newInstruction);
1878 break;
1879 case ld::Fixup::kindStoreARMHigh16:
1881 uint32_t imm4 = (accumulator & 0xF0000000) >> 28;
1882 uint32_t imm12 = (accumulator & 0x0FFF0000) >> 16;
1883 instruction = get32LE(fixUpLocation);
1884 newInstruction = (instruction & 0xFFF0F000) | (imm4 << 16) | imm12;
1885 set32LE(fixUpLocation, newInstruction);
1887 break;
1888 case ld::Fixup::kindStoreThumbLow16:
1890 uint32_t imm4 = (accumulator & 0x0000F000) >> 12;
1891 uint32_t i = (accumulator & 0x00000800) >> 11;
1892 uint32_t imm3 = (accumulator & 0x00000700) >> 8;
1893 uint32_t imm8 = accumulator & 0x000000FF;
1894 instruction = get32LE(fixUpLocation);
1895 newInstruction = (instruction & 0x8F00FBF0) | imm4 | (i << 10) | (imm3 << 28) | (imm8 << 16);
1896 set32LE(fixUpLocation, newInstruction);
1898 break;
1899 case ld::Fixup::kindStoreThumbHigh16:
1901 uint32_t imm4 = (accumulator & 0xF0000000) >> 28;
1902 uint32_t i = (accumulator & 0x08000000) >> 27;
1903 uint32_t imm3 = (accumulator & 0x07000000) >> 24;
1904 uint32_t imm8 = (accumulator & 0x00FF0000) >> 16;
1905 instruction = get32LE(fixUpLocation);
1906 newInstruction = (instruction & 0x8F00FBF0) | imm4 | (i << 10) | (imm3 << 28) | (imm8 << 16);
1907 set32LE(fixUpLocation, newInstruction);
1909 break;
1910 #if SUPPORT_ARCH_arm64
1911 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
1912 accumulator = addressOf(state, fit, &toTarget);
1913 // fall into kindStoreARM64Branch26 case
1914 case ld::Fixup::kindStoreARM64Branch26:
1915 if ( fit->contentAddendOnly )
1916 delta = accumulator;
1917 else
1918 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom);
1919 rangeCheckARM64Branch26(delta, state, atom, fit);
1920 instruction = get32LE(fixUpLocation);
1921 newInstruction = (instruction & 0xFC000000) | ((uint32_t)(delta >> 2) & 0x03FFFFFF);
1922 set32LE(fixUpLocation, newInstruction);
1923 break;
1924 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
1925 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
1926 case ld::Fixup::kindStoreTargetAddressARM64Page21:
1927 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
1928 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
1929 accumulator = addressOf(state, fit, &toTarget);
1930 // fall into kindStoreARM64Branch26 case
1931 case ld::Fixup::kindStoreARM64GOTLeaPage21:
1932 case ld::Fixup::kindStoreARM64GOTLoadPage21:
1933 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
1934 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
1935 case ld::Fixup::kindStoreARM64Page21:
1937 // the ADRP instruction adds the imm << 12 to the page that the pc is on
1938 if ( fit->contentAddendOnly )
1939 delta = 0;
1940 else
1941 delta = (accumulator & (-4096)) - ((atom->finalAddress() + fit->offsetInAtom) & (-4096));
1942 rangeCheckARM64Page21(delta, state, atom, fit);
1943 instruction = get32LE(fixUpLocation);
1944 uint32_t immhi = (delta >> 9) & (0x00FFFFE0);
1945 uint32_t immlo = (delta << 17) & (0x60000000);
1946 newInstruction = (instruction & 0x9F00001F) | immlo | immhi;
1947 set32LE(fixUpLocation, newInstruction);
1949 break;
1950 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
1951 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
1952 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
1953 accumulator = addressOf(state, fit, &toTarget);
1954 // fall into kindAddressARM64PageOff12 case
1955 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12:
1956 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
1957 case ld::Fixup::kindStoreARM64PageOff12:
1959 uint32_t offset = accumulator & 0x00000FFF;
1960 instruction = get32LE(fixUpLocation);
1961 // LDR/STR instruction have implicit scale factor, need to compensate for that
1962 if ( instruction & 0x08000000 ) {
1963 uint32_t implictShift = ((instruction >> 30) & 0x3);
1964 switch ( implictShift ) {
1965 case 0:
1966 if ( (instruction & 0x04800000) == 0x04800000 ) {
1967 // vector and byte LDR/STR have same "size" bits, need to check other bits to differenciate
1968 implictShift = 4;
1969 if ( (offset & 0xF) != 0 ) {
1970 throwf("128-bit LDR/STR not 16-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1971 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
1972 addressOf(state, fit, &toTarget));
1975 break;
1976 case 1:
1977 if ( (offset & 0x1) != 0 ) {
1978 throwf("16-bit LDR/STR not 2-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1979 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
1980 addressOf(state, fit, &toTarget));
1982 break;
1983 case 2:
1984 if ( (offset & 0x3) != 0 ) {
1985 throwf("32-bit LDR/STR not 4-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1986 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
1987 addressOf(state, fit, &toTarget));
1989 break;
1990 case 3:
1991 if ( (offset & 0x7) != 0 ) {
1992 throwf("64-bit LDR/STR not 8-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
1993 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
1994 addressOf(state, fit, &toTarget));
1996 break;
1998 // compensate for implicit scale
1999 offset >>= implictShift;
2001 if ( fit->contentAddendOnly )
2002 offset = 0;
2003 uint32_t imm12 = offset << 10;
2004 newInstruction = (instruction & 0xFFC003FF) | imm12;
2005 set32LE(fixUpLocation, newInstruction);
2007 break;
2008 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
2009 accumulator = addressOf(state, fit, &toTarget);
2010 // fall into kindStoreARM64GOTLoadPage21 case
2011 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
2013 // GOT entry was optimized away, change LDR instruction to a ADD
2014 instruction = get32LE(fixUpLocation);
2015 if ( (instruction & 0xFFC00000) != 0xF9400000 )
2016 throwf("GOT load reloc does not point to a LDR instruction in %s", atom->name());
2017 uint32_t offset = accumulator & 0x00000FFF;
2018 uint32_t imm12 = offset << 10;
2019 newInstruction = 0x91000000 | imm12 | (instruction & 0x000003FF);
2020 set32LE(fixUpLocation, newInstruction);
2022 break;
2023 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
2024 accumulator = addressOf(state, fit, &toTarget);
2025 // fall into kindStoreARM64TLVPLeaPageOff12 case
2026 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
2028 // TLV thunk in same linkage unit, so LEA it directly, changing LDR instruction to a ADD
2029 instruction = get32LE(fixUpLocation);
2030 if ( (instruction & 0xFFC00000) != 0xF9400000 )
2031 throwf("TLV load reloc does not point to a LDR instruction in %s", atom->name());
2032 uint32_t offset = accumulator & 0x00000FFF;
2033 uint32_t imm12 = offset << 10;
2034 newInstruction = 0x91000000 | imm12 | (instruction & 0x000003FF);
2035 set32LE(fixUpLocation, newInstruction);
2037 break;
2038 case ld::Fixup::kindStoreARM64PointerToGOT:
2039 set64LE(fixUpLocation, accumulator);
2040 break;
2041 case ld::Fixup::kindStoreARM64PCRelToGOT:
2042 if ( fit->contentAddendOnly )
2043 delta = accumulator;
2044 else
2045 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom);
2046 set32LE(fixUpLocation, delta);
2047 break;
2048 #endif
2052 #if SUPPORT_ARCH_arm64
2053 // after all fixups are done on atom, if there are potential optimizations, do those
2054 if ( (usedByHints.size() != 0) && (_options.outputKind() != Options::kObjectFile) && !_options.ignoreOptimizationHints() ) {
2055 // fill in second part of usedByHints map, so we can see the target of fixups that might be optimized
2056 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2057 switch ( fit->kind ) {
2058 case ld::Fixup::kindLinkerOptimizationHint:
2059 case ld::Fixup::kindNoneFollowOn:
2060 case ld::Fixup::kindNoneGroupSubordinate:
2061 case ld::Fixup::kindNoneGroupSubordinateFDE:
2062 case ld::Fixup::kindNoneGroupSubordinateLSDA:
2063 case ld::Fixup::kindNoneGroupSubordinatePersonality:
2064 break;
2065 default:
2066 if ( fit->firstInCluster() ) {
2067 std::map<uint32_t, const Fixup*>::iterator pos = usedByHints.find(fit->offsetInAtom);
2068 if ( pos != usedByHints.end() ) {
2069 assert(pos->second == NULL && "two fixups in same hint location");
2070 pos->second = fit;
2071 //fprintf(stderr, "setting %s usedByHints[0x%04X], kind = %d\n", atom->name(), fit->offsetInAtom, fit->kind);
2077 // apply hints pass 1
2078 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2079 if ( fit->kind != ld::Fixup::kindLinkerOptimizationHint )
2080 continue;
2081 InstructionInfo infoA;
2082 InstructionInfo infoB;
2083 InstructionInfo infoC;
2084 InstructionInfo infoD;
2085 LoadStoreInfo ldrInfoB, ldrInfoC;
2086 AddInfo addInfoB;
2087 AdrpInfo adrpInfoA;
2088 bool usableSegment;
2089 bool targetFourByteAligned;
2090 bool literalableSize, isADRP, isADD, isLDR, isSTR;
2091 //uint8_t loadSize, destReg;
2092 //uint32_t scaledOffset;
2093 //uint32_t imm12;
2094 ld::Fixup::LOH_arm64 alt;
2095 alt.addend = fit->u.addend;
2096 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta1 << 2), &infoA);
2097 if ( alt.info.count > 0 )
2098 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta2 << 2), &infoB);
2099 if ( alt.info.count > 1 )
2100 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta3 << 2), &infoC);
2101 if ( alt.info.count > 2 )
2102 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta4 << 2), &infoD);
2104 if ( _options.sharedRegionEligible() ) {
2105 if ( _options.sharedRegionEncodingV2() ) {
2106 // In v2 format, all references might be move at dyld shared cache creation time
2107 usableSegment = false;
2109 else {
2110 // In v1 format, only references to something in __TEXT segment could be optimized
2111 usableSegment = (strcmp(atom->section().segmentName(), infoB.target->section().segmentName()) == 0);
2114 else {
2115 // main executables can optimize any reference
2116 usableSegment = true;
2119 switch ( alt.info.kind ) {
2120 case LOH_ARM64_ADRP_ADRP:
2121 // processed in pass 2 because some ADRP may have been removed
2122 break;
2123 case LOH_ARM64_ADRP_LDR:
2124 LOH_ASSERT(alt.info.count == 1);
2125 LOH_ASSERT(isPageKind(infoA.fixup));
2126 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2127 LOH_ASSERT(infoA.target == infoB.target);
2128 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2129 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2130 LOH_ASSERT(isADRP);
2131 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2132 // silently ignore LDRs transformed to ADD by TLV pass
2133 if ( !isLDR && infoB.fixup->kind == ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12 )
2134 break;
2135 LOH_ASSERT(isLDR);
2136 LOH_ASSERT(ldrInfoB.baseReg == adrpInfoA.destReg);
2137 LOH_ASSERT(ldrInfoB.offset == (infoA.targetAddress & 0x00000FFF));
2138 literalableSize = ( (ldrInfoB.size != 1) && (ldrInfoB.size != 2) );
2139 targetFourByteAligned = ( (infoA.targetAddress & 0x3) == 0 );
2140 if ( literalableSize && usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress) ) {
2141 set32LE(infoA.instructionContent, makeNOP());
2142 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2143 if ( _options.verboseOptimizationHints() )
2144 fprintf(stderr, "adrp-ldr at 0x%08llX transformed to LDR literal, usableSegment=%d usableSegment\n", infoB.instructionAddress, usableSegment);
2146 else {
2147 if ( _options.verboseOptimizationHints() )
2148 fprintf(stderr, "adrp-ldr at 0x%08llX not transformed, isLDR=%d, literalableSize=%d, inRange=%d, usableSegment=%d, scaledOffset=%d\n",
2149 infoB.instructionAddress, isLDR, literalableSize, withinOneMeg(infoB.instructionAddress, infoA.targetAddress), usableSegment, ldrInfoB.offset);
2151 break;
2152 case LOH_ARM64_ADRP_ADD_LDR:
2153 LOH_ASSERT(alt.info.count == 2);
2154 LOH_ASSERT(isPageKind(infoA.fixup));
2155 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2156 LOH_ASSERT(infoC.fixup == NULL);
2157 LOH_ASSERT(infoA.target == infoB.target);
2158 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2159 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2160 LOH_ASSERT(isADRP);
2161 isADD = parseADD(infoB.instruction, addInfoB);
2162 LOH_ASSERT(isADD);
2163 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2164 isLDR = parseLoadOrStore(infoC.instruction, ldrInfoC);
2165 LOH_ASSERT(isLDR);
2166 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2167 targetFourByteAligned = ( ((infoB.targetAddress+ldrInfoC.offset) & 0x3) == 0 );
2168 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2169 if ( literalableSize && usableSegment && targetFourByteAligned && withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2170 // can do T1 transformation to LDR literal
2171 set32LE(infoA.instructionContent, makeNOP());
2172 set32LE(infoB.instructionContent, makeNOP());
2173 set32LE(infoC.instructionContent, makeLDR_literal(ldrInfoC, infoA.targetAddress+ldrInfoC.offset, infoC.instructionAddress));
2174 if ( _options.verboseOptimizationHints() ) {
2175 fprintf(stderr, "adrp-add-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC.instructionAddress);
2178 else if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2179 // can to T4 transformation and turn ADRP/ADD into ADR
2180 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress+ldrInfoC.offset, infoA.instructionAddress));
2181 set32LE(infoB.instructionContent, makeNOP());
2182 ldrInfoC.offset = 0; // offset is now in ADR instead of ADD or LDR
2183 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2184 set32LE(infoC.instructionContent, infoC.instruction & 0xFFC003FF);
2185 if ( _options.verboseOptimizationHints() )
2186 fprintf(stderr, "adrp-add-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoB.instructionAddress);
2188 else if ( ((infoB.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2189 // can do T2 transformation by merging ADD into LD
2190 // Leave ADRP as-is
2191 set32LE(infoB.instructionContent, makeNOP());
2192 ldrInfoC.offset += addInfoB.addend;
2193 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2194 if ( _options.verboseOptimizationHints() )
2195 fprintf(stderr, "adrp-add-ldr at 0x%08llX T2 transformed to ADRP/LDR \n", infoC.instructionAddress);
2197 else {
2198 if ( _options.verboseOptimizationHints() )
2199 fprintf(stderr, "adrp-add-ldr at 0x%08llX could not be transformed, loadSize=%d, literalableSize=%d, inRange=%d, usableSegment=%d, targetFourByteAligned=%d, imm12=%d\n",
2200 infoC.instructionAddress, ldrInfoC.size, literalableSize, withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset), usableSegment, targetFourByteAligned, ldrInfoC.offset);
2202 break;
2203 case LOH_ARM64_ADRP_ADD:
2204 LOH_ASSERT(alt.info.count == 1);
2205 LOH_ASSERT(isPageKind(infoA.fixup));
2206 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2207 LOH_ASSERT(infoA.target == infoB.target);
2208 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2209 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2210 LOH_ASSERT(isADRP);
2211 isADD = parseADD(infoB.instruction, addInfoB);
2212 LOH_ASSERT(isADD);
2213 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2214 if ( usableSegment && withinOneMeg(infoA.targetAddress, infoA.instructionAddress) ) {
2215 // can do T4 transformation and use ADR
2216 set32LE(infoA.instructionContent, makeADR(addInfoB.destReg, infoA.targetAddress, infoA.instructionAddress));
2217 set32LE(infoB.instructionContent, makeNOP());
2218 if ( _options.verboseOptimizationHints() )
2219 fprintf(stderr, "adrp-add at 0x%08llX transformed to ADR\n", infoB.instructionAddress);
2221 else {
2222 if ( _options.verboseOptimizationHints() )
2223 fprintf(stderr, "adrp-add at 0x%08llX not transformed, isAdd=%d, inRange=%d, usableSegment=%d\n",
2224 infoB.instructionAddress, isADD, withinOneMeg(infoA.targetAddress, infoA.instructionAddress), usableSegment);
2226 break;
2227 case LOH_ARM64_ADRP_LDR_GOT_LDR:
2228 LOH_ASSERT(alt.info.count == 2);
2229 LOH_ASSERT(isPageKind(infoA.fixup, true));
2230 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2231 LOH_ASSERT(infoC.fixup == NULL);
2232 LOH_ASSERT(infoA.target == infoB.target);
2233 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2234 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2235 LOH_ASSERT(isADRP);
2236 isLDR = parseLoadOrStore(infoC.instruction, ldrInfoC);
2237 LOH_ASSERT(isLDR);
2238 isADD = parseADD(infoB.instruction, addInfoB);
2239 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2240 if ( isLDR ) {
2241 // target of GOT is external
2242 LOH_ASSERT(ldrInfoB.size == 8);
2243 LOH_ASSERT(!ldrInfoB.isFloat);
2244 LOH_ASSERT(ldrInfoC.baseReg == ldrInfoB.reg);
2245 //fprintf(stderr, "infoA.target=%p, %s, infoA.targetAddress=0x%08llX\n", infoA.target, infoA.target->name(), infoA.targetAddress);
2246 targetFourByteAligned = ( ((infoA.targetAddress + ldrInfoC.offset) & 0x3) == 0 );
2247 if ( usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress + ldrInfoC.offset) ) {
2248 // can do T5 transform
2249 set32LE(infoA.instructionContent, makeNOP());
2250 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2251 if ( _options.verboseOptimizationHints() ) {
2252 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T5 transformed to LDR literal of GOT plus LDR\n", infoC.instructionAddress);
2255 else {
2256 if ( _options.verboseOptimizationHints() )
2257 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX no optimization done\n", infoC.instructionAddress);
2260 else if ( isADD ) {
2261 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2262 LOH_ASSERT(addInfoB.srcReg == adrpInfoA.destReg);
2263 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2264 targetFourByteAligned = ( ((infoA.targetAddress) & 0x3) == 0 );
2265 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2266 if ( usableSegment && literalableSize && targetFourByteAligned && withinOneMeg(infoC.instructionAddress, infoA.targetAddress + ldrInfoC.offset) ) {
2267 // can do T1 transform
2268 set32LE(infoA.instructionContent, makeNOP());
2269 set32LE(infoB.instructionContent, makeNOP());
2270 set32LE(infoC.instructionContent, makeLDR_literal(ldrInfoC, infoA.targetAddress + ldrInfoC.offset, infoC.instructionAddress));
2271 if ( _options.verboseOptimizationHints() )
2272 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC.instructionAddress);
2274 else if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2275 // can do T4 transform
2276 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress, infoA.instructionAddress));
2277 set32LE(infoB.instructionContent, makeNOP());
2278 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2279 if ( _options.verboseOptimizationHints() ) {
2280 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoC.instructionAddress);
2283 else if ( ((infoA.targetAddress % ldrInfoC.size) == 0) && ((addInfoB.addend + ldrInfoC.offset) < 4096) ) {
2284 // can do T2 transform
2285 set32LE(infoB.instructionContent, makeNOP());
2286 ldrInfoC.baseReg = adrpInfoA.destReg;
2287 ldrInfoC.offset += addInfoB.addend;
2288 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2289 if ( _options.verboseOptimizationHints() ) {
2290 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T2 transformed to ADRP/NOP/LDR\n", infoC.instructionAddress);
2293 else {
2294 // T3 transform already done by ld::passes:got:doPass()
2295 if ( _options.verboseOptimizationHints() ) {
2296 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T3 transformed to ADRP/ADD/LDR\n", infoC.instructionAddress);
2300 else {
2301 if ( _options.verboseOptimizationHints() )
2302 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX not ADD or LDR\n", infoC.instructionAddress);
2304 break;
2305 case LOH_ARM64_ADRP_ADD_STR:
2306 LOH_ASSERT(alt.info.count == 2);
2307 LOH_ASSERT(isPageKind(infoA.fixup));
2308 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2309 LOH_ASSERT(infoC.fixup == NULL);
2310 LOH_ASSERT(infoA.target == infoB.target);
2311 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2312 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2313 LOH_ASSERT(isADRP);
2314 isADD = parseADD(infoB.instruction, addInfoB);
2315 LOH_ASSERT(isADD);
2316 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2317 isSTR = (parseLoadOrStore(infoC.instruction, ldrInfoC) && ldrInfoC.isStore);
2318 LOH_ASSERT(isSTR);
2319 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2320 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2321 // can to T4 transformation and turn ADRP/ADD into ADR
2322 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress+ldrInfoC.offset, infoA.instructionAddress));
2323 set32LE(infoB.instructionContent, makeNOP());
2324 ldrInfoC.offset = 0; // offset is now in ADR instead of ADD or LDR
2325 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2326 set32LE(infoC.instructionContent, infoC.instruction & 0xFFC003FF);
2327 if ( _options.verboseOptimizationHints() )
2328 fprintf(stderr, "adrp-add-str at 0x%08llX T4 transformed to ADR/STR\n", infoB.instructionAddress);
2330 else if ( ((infoB.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2331 // can do T2 transformation by merging ADD into STR
2332 // Leave ADRP as-is
2333 set32LE(infoB.instructionContent, makeNOP());
2334 ldrInfoC.offset += addInfoB.addend;
2335 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2336 if ( _options.verboseOptimizationHints() )
2337 fprintf(stderr, "adrp-add-str at 0x%08llX T2 transformed to ADRP/STR \n", infoC.instructionAddress);
2339 else {
2340 if ( _options.verboseOptimizationHints() )
2341 fprintf(stderr, "adrp-add-str at 0x%08llX could not be transformed, loadSize=%d, inRange=%d, usableSegment=%d, imm12=%d\n",
2342 infoC.instructionAddress, ldrInfoC.size, withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset), usableSegment, ldrInfoC.offset);
2344 break;
2345 case LOH_ARM64_ADRP_LDR_GOT_STR:
2346 LOH_ASSERT(alt.info.count == 2);
2347 LOH_ASSERT(isPageKind(infoA.fixup, true));
2348 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2349 LOH_ASSERT(infoC.fixup == NULL);
2350 LOH_ASSERT(infoA.target == infoB.target);
2351 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2352 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2353 LOH_ASSERT(isADRP);
2354 isSTR = (parseLoadOrStore(infoC.instruction, ldrInfoC) && ldrInfoC.isStore);
2355 LOH_ASSERT(isSTR);
2356 isADD = parseADD(infoB.instruction, addInfoB);
2357 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2358 if ( isLDR ) {
2359 // target of GOT is external
2360 LOH_ASSERT(ldrInfoB.size == 8);
2361 LOH_ASSERT(!ldrInfoB.isFloat);
2362 LOH_ASSERT(ldrInfoC.baseReg == ldrInfoB.reg);
2363 targetFourByteAligned = ( ((infoA.targetAddress + ldrInfoC.offset) & 0x3) == 0 );
2364 if ( usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress + ldrInfoC.offset) ) {
2365 // can do T5 transform
2366 set32LE(infoA.instructionContent, makeNOP());
2367 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2368 if ( _options.verboseOptimizationHints() ) {
2369 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T5 transformed to LDR literal of GOT plus STR\n", infoC.instructionAddress);
2372 else {
2373 if ( _options.verboseOptimizationHints() )
2374 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX no optimization done\n", infoC.instructionAddress);
2377 else if ( isADD ) {
2378 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2379 LOH_ASSERT(addInfoB.srcReg == adrpInfoA.destReg);
2380 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2381 targetFourByteAligned = ( ((infoA.targetAddress) & 0x3) == 0 );
2382 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2383 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2384 // can do T4 transform
2385 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress, infoA.instructionAddress));
2386 set32LE(infoB.instructionContent, makeNOP());
2387 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2388 if ( _options.verboseOptimizationHints() ) {
2389 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADR/STR\n", infoC.instructionAddress);
2392 else if ( ((infoA.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2393 // can do T2 transform
2394 set32LE(infoB.instructionContent, makeNOP());
2395 ldrInfoC.baseReg = adrpInfoA.destReg;
2396 ldrInfoC.offset += addInfoB.addend;
2397 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2398 if ( _options.verboseOptimizationHints() ) {
2399 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADRP/NOP/STR\n", infoC.instructionAddress);
2402 else {
2403 // T3 transform already done by ld::passes:got:doPass()
2404 if ( _options.verboseOptimizationHints() ) {
2405 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T3 transformed to ADRP/ADD/STR\n", infoC.instructionAddress);
2409 else {
2410 if ( _options.verboseOptimizationHints() )
2411 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX not ADD or LDR\n", infoC.instructionAddress);
2413 break;
2414 case LOH_ARM64_ADRP_LDR_GOT:
2415 LOH_ASSERT(alt.info.count == 1);
2416 LOH_ASSERT(isPageKind(infoA.fixup, true));
2417 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2418 LOH_ASSERT(infoA.target == infoB.target);
2419 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2420 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2421 isADD = parseADD(infoB.instruction, addInfoB);
2422 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2423 if ( isADRP ) {
2424 if ( isLDR ) {
2425 if ( usableSegment && withinOneMeg(infoB.instructionAddress, infoA.targetAddress) ) {
2426 // can do T5 transform (LDR literal load of GOT)
2427 set32LE(infoA.instructionContent, makeNOP());
2428 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2429 if ( _options.verboseOptimizationHints() ) {
2430 fprintf(stderr, "adrp-ldr-got at 0x%08llX T5 transformed to NOP/LDR\n", infoC.instructionAddress);
2434 else if ( isADD ) {
2435 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2436 // can do T4 transform (ADR to compute local address)
2437 set32LE(infoA.instructionContent, makeADR(addInfoB.destReg, infoA.targetAddress, infoA.instructionAddress));
2438 set32LE(infoB.instructionContent, makeNOP());
2439 if ( _options.verboseOptimizationHints() ) {
2440 fprintf(stderr, "adrp-ldr-got at 0x%08llX T4 transformed to ADR/STR\n", infoC.instructionAddress);
2444 else {
2445 if ( _options.verboseOptimizationHints() )
2446 fprintf(stderr, "adrp-ldr-got at 0x%08llX not LDR or ADD\n", infoB.instructionAddress);
2449 else {
2450 if ( _options.verboseOptimizationHints() )
2451 fprintf(stderr, "adrp-ldr-got at 0x%08llX not ADRP\n", infoA.instructionAddress);
2453 break;
2454 default:
2455 if ( _options.verboseOptimizationHints() )
2456 fprintf(stderr, "unknown hint kind %d alt.info.kind at 0x%08llX\n", alt.info.kind, infoA.instructionAddress);
2457 break;
2460 // apply hints pass 2
2461 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2462 if ( fit->kind != ld::Fixup::kindLinkerOptimizationHint )
2463 continue;
2464 InstructionInfo infoA;
2465 InstructionInfo infoB;
2466 ld::Fixup::LOH_arm64 alt;
2467 alt.addend = fit->u.addend;
2468 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta1 << 2), &infoA);
2469 if ( alt.info.count > 0 )
2470 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta2 << 2), &infoB);
2472 switch ( alt.info.kind ) {
2473 case LOH_ARM64_ADRP_ADRP:
2474 LOH_ASSERT(isPageKind(infoA.fixup));
2475 LOH_ASSERT(isPageKind(infoB.fixup));
2476 if ( (infoA.instruction & 0x9F000000) != 0x90000000 ) {
2477 if ( _options.verboseOptimizationHints() )
2478 fprintf(stderr, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoA.instructionAddress, infoA.instruction);
2479 sAdrpNA++;
2480 break;
2482 if ( (infoB.instruction & 0x9F000000) != 0x90000000 ) {
2483 if ( _options.verboseOptimizationHints() )
2484 fprintf(stderr, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoB.instructionAddress, infoA.instruction);
2485 sAdrpNA++;
2486 break;
2488 if ( (infoA.targetAddress & (-4096)) == (infoB.targetAddress & (-4096)) ) {
2489 set32LE(infoB.instructionContent, 0xD503201F);
2490 sAdrpNoped++;
2492 else {
2493 sAdrpNotNoped++;
2495 break;
2499 #endif // SUPPORT_ARCH_arm64
2503 void OutputFile::copyNoOps(uint8_t* from, uint8_t* to, bool thumb)
2505 switch ( _options.architecture() ) {
2506 case CPU_TYPE_I386:
2507 case CPU_TYPE_X86_64:
2508 for (uint8_t* p=from; p < to; ++p)
2509 *p = 0x90;
2510 break;
2511 case CPU_TYPE_ARM:
2512 if ( thumb ) {
2513 for (uint8_t* p=from; p < to; p += 2)
2514 OSWriteLittleInt16((uint16_t*)p, 0, 0x46c0);
2516 else {
2517 for (uint8_t* p=from; p < to; p += 4)
2518 OSWriteLittleInt32((uint32_t*)p, 0, 0xe1a00000);
2520 break;
2521 default:
2522 for (uint8_t* p=from; p < to; ++p)
2523 *p = 0x00;
2524 break;
2528 bool OutputFile::takesNoDiskSpace(const ld::Section* sect)
2530 switch ( sect->type() ) {
2531 case ld::Section::typeZeroFill:
2532 case ld::Section::typeTLVZeroFill:
2533 return _options.optimizeZeroFill();
2534 case ld::Section::typePageZero:
2535 case ld::Section::typeStack:
2536 case ld::Section::typeAbsoluteSymbols:
2537 case ld::Section::typeTentativeDefs:
2538 return true;
2539 default:
2540 break;
2542 return false;
2545 bool OutputFile::hasZeroForFileOffset(const ld::Section* sect)
2547 switch ( sect->type() ) {
2548 case ld::Section::typeZeroFill:
2549 case ld::Section::typeTLVZeroFill:
2550 return _options.optimizeZeroFill();
2551 case ld::Section::typePageZero:
2552 case ld::Section::typeStack:
2553 case ld::Section::typeTentativeDefs:
2554 return true;
2555 default:
2556 break;
2558 return false;
2561 void OutputFile::writeAtoms(ld::Internal& state, uint8_t* wholeBuffer)
2563 // have each atom write itself
2564 uint64_t fileOffsetOfEndOfLastAtom = 0;
2565 uint64_t mhAddress = 0;
2566 bool lastAtomUsesNoOps = false;
2567 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2568 ld::Internal::FinalSection* sect = *sit;
2569 if ( sect->type() == ld::Section::typeMachHeader )
2570 mhAddress = sect->address;
2571 if ( takesNoDiskSpace(sect) )
2572 continue;
2573 const bool sectionUsesNops = (sect->type() == ld::Section::typeCode);
2574 //fprintf(stderr, "file offset=0x%08llX, section %s\n", sect->fileOffset, sect->sectionName());
2575 std::vector<const ld::Atom*>& atoms = sect->atoms;
2576 bool lastAtomWasThumb = false;
2577 for (std::vector<const ld::Atom*>::iterator ait = atoms.begin(); ait != atoms.end(); ++ait) {
2578 const ld::Atom* atom = *ait;
2579 if ( atom->definition() == ld::Atom::definitionProxy )
2580 continue;
2581 try {
2582 uint64_t fileOffset = atom->finalAddress() - sect->address + sect->fileOffset;
2583 // check for alignment padding between atoms
2584 if ( (fileOffset != fileOffsetOfEndOfLastAtom) && lastAtomUsesNoOps ) {
2585 this->copyNoOps(&wholeBuffer[fileOffsetOfEndOfLastAtom], &wholeBuffer[fileOffset], lastAtomWasThumb);
2587 // copy atom content
2588 atom->copyRawContent(&wholeBuffer[fileOffset]);
2589 // apply fix ups
2590 this->applyFixUps(state, mhAddress, atom, &wholeBuffer[fileOffset]);
2591 fileOffsetOfEndOfLastAtom = fileOffset+atom->size();
2592 lastAtomUsesNoOps = sectionUsesNops;
2593 lastAtomWasThumb = atom->isThumb();
2595 catch (const char* msg) {
2596 if ( atom->file() != NULL )
2597 throwf("%s in '%s' from %s", msg, atom->name(), atom->file()->path());
2598 else
2599 throwf("%s in '%s'", msg, atom->name());
2604 if ( _options.verboseOptimizationHints() ) {
2605 //fprintf(stderr, "ADRP optimized away: %d\n", sAdrpNA);
2606 //fprintf(stderr, "ADRPs changed to NOPs: %d\n", sAdrpNoped);
2607 //fprintf(stderr, "ADRPs unchanged: %d\n", sAdrpNotNoped);
2611 void OutputFile::computeContentUUID(ld::Internal& state, uint8_t* wholeBuffer)
2613 const bool log = false;
2614 if ( (_options.outputKind() != Options::kObjectFile) || state.someObjectFileHasDwarf ) {
2615 uint8_t digest[CC_MD5_DIGEST_LENGTH];
2616 std::vector<std::pair<uint64_t, uint64_t>> excludeRegions;
2617 uint64_t bitcodeCmdOffset;
2618 uint64_t bitcodeCmdEnd;
2619 uint64_t bitcodeSectOffset;
2620 uint64_t bitcodePaddingEnd;
2621 if ( _headersAndLoadCommandAtom->bitcodeBundleCommand(bitcodeCmdOffset, bitcodeCmdEnd,
2622 bitcodeSectOffset, bitcodePaddingEnd) ) {
2623 // Exclude embedded bitcode bundle section which contains timestamps in XAR header
2624 // Note the timestamp is in the compressed XML header which means it might change the size of
2625 // bitcode section. The load command which include the size of the section and the padding after
2626 // the bitcode section should also be excluded in the UUID computation.
2627 // Bitcode section should appears before LINKEDIT
2628 // Exclude section cmd
2629 if ( log ) fprintf(stderr, "bundle cmd start=0x%08llX, bundle cmd end=0x%08llX\n",
2630 bitcodeCmdOffset, bitcodeCmdEnd);
2631 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(bitcodeCmdOffset, bitcodeCmdEnd));
2632 // Exclude section content
2633 if ( log ) fprintf(stderr, "bundle start=0x%08llX, bundle end=0x%08llX\n",
2634 bitcodeSectOffset, bitcodePaddingEnd);
2635 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(bitcodeSectOffset, bitcodePaddingEnd));
2637 uint32_t stabsStringsOffsetStart;
2638 uint32_t tabsStringsOffsetEnd;
2639 uint32_t stabsOffsetStart;
2640 uint32_t stabsOffsetEnd;
2641 if ( _symbolTableAtom->hasStabs(stabsStringsOffsetStart, tabsStringsOffsetEnd, stabsOffsetStart, stabsOffsetEnd) ) {
2642 // find two areas of file that are stabs info and should not contribute to checksum
2643 uint64_t stringPoolFileOffset = 0;
2644 uint64_t symbolTableFileOffset = 0;
2645 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2646 ld::Internal::FinalSection* sect = *sit;
2647 if ( sect->type() == ld::Section::typeLinkEdit ) {
2648 if ( strcmp(sect->sectionName(), "__string_pool") == 0 )
2649 stringPoolFileOffset = sect->fileOffset;
2650 else if ( strcmp(sect->sectionName(), "__symbol_table") == 0 )
2651 symbolTableFileOffset = sect->fileOffset;
2654 uint64_t firstStabNlistFileOffset = symbolTableFileOffset + stabsOffsetStart;
2655 uint64_t lastStabNlistFileOffset = symbolTableFileOffset + stabsOffsetEnd;
2656 uint64_t firstStabStringFileOffset = stringPoolFileOffset + stabsStringsOffsetStart;
2657 uint64_t lastStabStringFileOffset = stringPoolFileOffset + tabsStringsOffsetEnd;
2658 if ( log ) fprintf(stderr, "firstStabNlistFileOffset=0x%08llX\n", firstStabNlistFileOffset);
2659 if ( log ) fprintf(stderr, "lastStabNlistFileOffset=0x%08llX\n", lastStabNlistFileOffset);
2660 if ( log ) fprintf(stderr, "firstStabStringFileOffset=0x%08llX\n", firstStabStringFileOffset);
2661 if ( log ) fprintf(stderr, "lastStabStringFileOffset=0x%08llX\n", lastStabStringFileOffset);
2662 assert(firstStabNlistFileOffset <= firstStabStringFileOffset);
2663 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(firstStabNlistFileOffset, lastStabNlistFileOffset));
2664 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(firstStabStringFileOffset, lastStabStringFileOffset));
2666 if ( !excludeRegions.empty() ) {
2667 CC_MD5_CTX md5state;
2668 CC_MD5_Init(&md5state);
2669 // rdar://problem/19487042 include the output leaf file name in the hash
2670 const char* lastSlash = strrchr(_options.outputFilePath(), '/');
2671 if ( lastSlash != NULL ) {
2672 CC_MD5_Update(&md5state, lastSlash, strlen(lastSlash));
2674 uint64_t checksumStart = 0;
2675 for ( auto& region : excludeRegions ) {
2676 uint64_t regionStart = region.first;
2677 uint64_t regionEnd = region.second;
2678 assert(checksumStart <= regionStart && regionStart <= regionEnd && "Region overlapped");
2679 if ( log ) fprintf(stderr, "checksum 0x%08llX -> 0x%08llX\n", checksumStart, regionStart);
2680 CC_MD5_Update(&md5state, &wholeBuffer[checksumStart], regionStart - checksumStart);
2681 checksumStart = regionEnd;
2683 if ( log ) fprintf(stderr, "checksum 0x%08llX -> 0x%08llX\n", checksumStart, _fileSize);
2684 CC_MD5_Update(&md5state, &wholeBuffer[checksumStart], _fileSize-checksumStart);
2685 CC_MD5_Final(digest, &md5state);
2686 if ( log ) fprintf(stderr, "uuid=%02X, %02X, %02X, %02X, %02X, %02X, %02X, %02X\n", digest[0], digest[1], digest[2],
2687 digest[3], digest[4], digest[5], digest[6], digest[7]);
2689 else {
2690 CC_MD5(wholeBuffer, _fileSize, digest);
2692 // <rdar://problem/6723729> LC_UUID uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
2693 digest[6] = ( digest[6] & 0x0F ) | ( 3 << 4 );
2694 digest[8] = ( digest[8] & 0x3F ) | 0x80;
2695 // update buffer with new UUID
2696 _headersAndLoadCommandAtom->setUUID(digest);
2697 _headersAndLoadCommandAtom->recopyUUIDCommand();
2701 static int sDescriptorOfPathToRemove = -1;
2702 static void removePathAndExit(int sig)
2704 if ( sDescriptorOfPathToRemove != -1 ) {
2705 char path[MAXPATHLEN];
2706 if ( ::fcntl(sDescriptorOfPathToRemove, F_GETPATH, path) == 0 )
2707 ::unlink(path);
2709 fprintf(stderr, "ld: interrupted\n");
2710 exit(1);
2713 void OutputFile::writeOutputFile(ld::Internal& state)
2715 // for UNIX conformance, error if file exists and is not writable
2716 if ( (access(_options.outputFilePath(), F_OK) == 0) && (access(_options.outputFilePath(), W_OK) == -1) )
2717 throwf("can't write output file: %s", _options.outputFilePath());
2719 mode_t permissions = 0777;
2720 if ( _options.outputKind() == Options::kObjectFile )
2721 permissions = 0666;
2722 mode_t umask = ::umask(0);
2723 ::umask(umask); // put back the original umask
2724 permissions &= ~umask;
2725 // Calling unlink first assures the file is gone so that open creates it with correct permissions
2726 // It also handles the case where __options.outputFilePath() file is not writable but its directory is
2727 // And it means we don't have to truncate the file when done writing (in case new is smaller than old)
2728 // Lastly, only delete existing file if it is a normal file (e.g. not /dev/null).
2729 struct stat stat_buf;
2730 bool outputIsRegularFile = false;
2731 bool outputIsMappableFile = false;
2732 if ( stat(_options.outputFilePath(), &stat_buf) != -1 ) {
2733 if (stat_buf.st_mode & S_IFREG) {
2734 outputIsRegularFile = true;
2735 // <rdar://problem/12264302> Don't use mmap on non-hfs volumes
2736 struct statfs fsInfo;
2737 if ( statfs(_options.outputFilePath(), &fsInfo) != -1 ) {
2738 if ( strcmp(fsInfo.f_fstypename, "hfs") == 0) {
2739 (void)unlink(_options.outputFilePath());
2740 outputIsMappableFile = true;
2743 else {
2744 outputIsMappableFile = false;
2747 else {
2748 outputIsRegularFile = false;
2751 else {
2752 // special files (pipes, devices, etc) must already exist
2753 outputIsRegularFile = true;
2754 // output file does not exist yet
2755 char dirPath[PATH_MAX];
2756 strcpy(dirPath, _options.outputFilePath());
2757 char* end = strrchr(dirPath, '/');
2758 if ( end != NULL ) {
2759 end[1] = '\0';
2760 struct statfs fsInfo;
2761 if ( statfs(dirPath, &fsInfo) != -1 ) {
2762 if ( strcmp(fsInfo.f_fstypename, "hfs") == 0) {
2763 outputIsMappableFile = true;
2769 //fprintf(stderr, "outputIsMappableFile=%d, outputIsRegularFile=%d, path=%s\n", outputIsMappableFile, outputIsRegularFile, _options.outputFilePath());
2771 int fd;
2772 // Construct a temporary path of the form {outputFilePath}.ld_XXXXXX
2773 const char filenameTemplate[] = ".ld_XXXXXX";
2774 char tmpOutput[PATH_MAX];
2775 uint8_t *wholeBuffer;
2776 if ( outputIsRegularFile && outputIsMappableFile ) {
2777 // <rdar://problem/20959031> ld64 should clean up temporary files on SIGINT
2778 ::signal(SIGINT, removePathAndExit);
2780 strcpy(tmpOutput, _options.outputFilePath());
2781 // If the path is too long to add a suffix for a temporary name then
2782 // just fall back to using the output path.
2783 if (strlen(tmpOutput)+strlen(filenameTemplate) < PATH_MAX) {
2784 strcat(tmpOutput, filenameTemplate);
2785 fd = mkstemp(tmpOutput);
2786 sDescriptorOfPathToRemove = fd;
2788 else {
2789 fd = open(tmpOutput, O_RDWR|O_CREAT, permissions);
2791 if ( fd == -1 )
2792 throwf("can't open output file for writing '%s', errno=%d", tmpOutput, errno);
2793 if ( ftruncate(fd, _fileSize) == -1 ) {
2794 int err = errno;
2795 unlink(tmpOutput);
2796 if ( err == ENOSPC )
2797 throwf("not enough disk space for writing '%s'", _options.outputFilePath());
2798 else
2799 throwf("can't grow file for writing '%s', errno=%d", _options.outputFilePath(), err);
2802 wholeBuffer = (uint8_t *)mmap(NULL, _fileSize, PROT_WRITE|PROT_READ, MAP_SHARED, fd, 0);
2803 if ( wholeBuffer == MAP_FAILED )
2804 throwf("can't create buffer of %llu bytes for output", _fileSize);
2806 else {
2807 if ( outputIsRegularFile )
2808 fd = open(_options.outputFilePath(), O_RDWR|O_CREAT, permissions);
2809 else
2810 fd = open(_options.outputFilePath(), O_WRONLY);
2811 if ( fd == -1 )
2812 throwf("can't open output file for writing: %s, errno=%d", _options.outputFilePath(), errno);
2813 // try to allocate buffer for entire output file content
2814 wholeBuffer = (uint8_t*)calloc(_fileSize, 1);
2815 if ( wholeBuffer == NULL )
2816 throwf("can't create buffer of %llu bytes for output", _fileSize);
2819 if ( _options.UUIDMode() == Options::kUUIDRandom ) {
2820 uint8_t bits[16];
2821 ::uuid_generate_random(bits);
2822 _headersAndLoadCommandAtom->setUUID(bits);
2825 writeAtoms(state, wholeBuffer);
2827 // compute UUID
2828 if ( _options.UUIDMode() == Options::kUUIDContent )
2829 computeContentUUID(state, wholeBuffer);
2831 if ( outputIsRegularFile && outputIsMappableFile ) {
2832 if ( ::chmod(tmpOutput, permissions) == -1 ) {
2833 unlink(tmpOutput);
2834 throwf("can't set permissions on output file: %s, errno=%d", tmpOutput, errno);
2836 if ( ::rename(tmpOutput, _options.outputFilePath()) == -1 && strcmp(tmpOutput, _options.outputFilePath()) != 0) {
2837 unlink(tmpOutput);
2838 throwf("can't move output file in place, errno=%d", errno);
2841 else {
2842 if ( ::write(fd, wholeBuffer, _fileSize) == -1 ) {
2843 throwf("can't write to output file: %s, errno=%d", _options.outputFilePath(), errno);
2845 sDescriptorOfPathToRemove = -1;
2846 ::close(fd);
2847 // <rdar://problem/13118223> NFS: iOS incremental builds in Xcode 4.6 fail with codesign error
2848 // NFS seems to pad the end of the file sometimes. Calling trunc seems to correct it...
2849 ::truncate(_options.outputFilePath(), _fileSize);
2852 // Rename symbol map file if needed
2853 if ( _options.renameReverseSymbolMap() ) {
2854 assert(_options.hideSymbols() && _options.reverseSymbolMapPath() != NULL && "Must hide symbol and specify a path");
2855 uuid_string_t UUIDString;
2856 const uint8_t* rawUUID = _headersAndLoadCommandAtom->getUUID();
2857 uuid_unparse_upper(rawUUID, UUIDString);
2858 char outputMapPath[PATH_MAX];
2859 sprintf(outputMapPath, "%s/%s.bcsymbolmap", _options.reverseSymbolMapPath(), UUIDString);
2860 if ( ::rename(_options.reverseMapTempPath().c_str(), outputMapPath) != 0 )
2861 throwf("could not create bcsymbolmap file: %s", outputMapPath);
2865 struct AtomByNameSorter
2867 bool operator()(const ld::Atom* left, const ld::Atom* right)
2869 return (strcmp(left->name(), right->name()) < 0);
2873 class NotInSet
2875 public:
2876 NotInSet(const std::set<const ld::Atom*>& theSet) : _set(theSet) {}
2878 bool operator()(const ld::Atom* atom) const {
2879 return ( _set.count(atom) == 0 );
2881 private:
2882 const std::set<const ld::Atom*>& _set;
2886 void OutputFile::buildSymbolTable(ld::Internal& state)
2888 unsigned int machoSectionIndex = 0;
2889 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2890 ld::Internal::FinalSection* sect = *sit;
2891 bool setMachoSectionIndex = !sect->isSectionHidden() && (sect->type() != ld::Section::typeTentativeDefs);
2892 if ( setMachoSectionIndex )
2893 ++machoSectionIndex;
2894 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
2895 const ld::Atom* atom = *ait;
2896 if ( setMachoSectionIndex )
2897 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex);
2898 else if ( sect->type() == ld::Section::typeMachHeader )
2899 (const_cast<ld::Atom*>(atom))->setMachoSection(1); // __mh_execute_header is not in any section by needs n_sect==1
2900 else if ( sect->type() == ld::Section::typeLastSection )
2901 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex); // use section index of previous section
2902 else if ( sect->type() == ld::Section::typeFirstSection )
2903 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex+1); // use section index of next section
2905 // in -r mode, clarify symbolTableNotInFinalLinkedImages
2906 if ( _options.outputKind() == Options::kObjectFile ) {
2907 if ( (_options.architecture() == CPU_TYPE_X86_64) || (_options.architecture() == CPU_TYPE_ARM64) ) {
2908 // x86_64 .o files need labels on anonymous literal strings
2909 if ( (sect->type() == ld::Section::typeCString) && (atom->combine() == ld::Atom::combineByNameAndContent) ) {
2910 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
2911 _localAtoms.push_back(atom);
2912 continue;
2915 if ( sect->type() == ld::Section::typeCFI ) {
2916 if ( _options.removeEHLabels() )
2917 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
2918 else
2919 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
2921 else if ( sect->type() == ld::Section::typeTempAlias ) {
2922 assert(_options.outputKind() == Options::kObjectFile);
2923 _importedAtoms.push_back(atom);
2924 continue;
2926 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages )
2927 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
2930 // TEMP work around until <rdar://problem/7702923> goes in
2931 if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip)
2932 && (atom->scope() == ld::Atom::scopeLinkageUnit)
2933 && (_options.outputKind() == Options::kDynamicLibrary) ) {
2934 (const_cast<ld::Atom*>(atom))->setScope(ld::Atom::scopeGlobal);
2937 // <rdar://problem/6783167> support auto hidden weak symbols: .weak_def_can_be_hidden
2938 if ( atom->autoHide() && (_options.outputKind() != Options::kObjectFile) ) {
2939 // adding auto-hide symbol to .exp file should keep it global
2940 if ( !_options.hasExportMaskList() || !_options.shouldExport(atom->name()) )
2941 (const_cast<ld::Atom*>(atom))->setScope(ld::Atom::scopeLinkageUnit);
2944 // <rdar://problem/8626058> ld should consistently warn when resolvers are not exported
2945 if ( (atom->contentType() == ld::Atom::typeResolver) && (atom->scope() == ld::Atom::scopeLinkageUnit) )
2946 warning("resolver functions should be external, but '%s' is hidden", atom->name());
2948 if ( sect->type() == ld::Section::typeImportProxies ) {
2949 if ( atom->combine() == ld::Atom::combineByName )
2950 this->usesWeakExternalSymbols = true;
2951 // alias proxy is a re-export with a name change, don't import changed name
2952 if ( ! atom->isAlias() )
2953 _importedAtoms.push_back(atom);
2954 // scope of proxies are usually linkage unit, so done
2955 // if scope is global, we need to re-export it too
2956 if ( atom->scope() == ld::Atom::scopeGlobal )
2957 _exportedAtoms.push_back(atom);
2958 continue;
2960 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages ) {
2961 assert(_options.outputKind() != Options::kObjectFile);
2962 continue; // don't add to symbol table
2964 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotIn ) {
2965 continue; // don't add to symbol table
2967 if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel)
2968 && (_options.outputKind() != Options::kObjectFile) ) {
2969 continue; // don't add to symbol table
2972 if ( (atom->definition() == ld::Atom::definitionTentative) && (_options.outputKind() == Options::kObjectFile) ) {
2973 if ( _options.makeTentativeDefinitionsReal() ) {
2974 // -r -d turns tentative defintions into real def
2975 _exportedAtoms.push_back(atom);
2977 else {
2978 // in mach-o object files tentative defintions are stored like undefined symbols
2979 _importedAtoms.push_back(atom);
2981 continue;
2984 switch ( atom->scope() ) {
2985 case ld::Atom::scopeTranslationUnit:
2986 if ( _options.keepLocalSymbol(atom->name()) ) {
2987 _localAtoms.push_back(atom);
2989 else {
2990 if ( _options.outputKind() == Options::kObjectFile ) {
2991 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel);
2992 _localAtoms.push_back(atom);
2994 else
2995 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
2997 break;
2998 case ld::Atom::scopeGlobal:
2999 _exportedAtoms.push_back(atom);
3000 break;
3001 case ld::Atom::scopeLinkageUnit:
3002 if ( _options.outputKind() == Options::kObjectFile ) {
3003 if ( _options.keepPrivateExterns() ) {
3004 _exportedAtoms.push_back(atom);
3006 else if ( _options.keepLocalSymbol(atom->name()) ) {
3007 _localAtoms.push_back(atom);
3009 else {
3010 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel);
3011 _localAtoms.push_back(atom);
3014 else {
3015 if ( _options.keepLocalSymbol(atom->name()) )
3016 _localAtoms.push_back(atom);
3017 // <rdar://problem/5804214> ld should never have a symbol in the non-lazy indirect symbol table with index 0
3018 // this works by making __mh_execute_header be a local symbol which takes symbol index 0
3019 else if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip) && !_options.makeCompressedDyldInfo() )
3020 _localAtoms.push_back(atom);
3021 else
3022 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
3024 break;
3029 // <rdar://problem/6978069> ld adds undefined symbol from .exp file to binary
3030 if ( (_options.outputKind() == Options::kKextBundle) && _options.hasExportRestrictList() ) {
3031 // search for referenced undefines
3032 std::set<const ld::Atom*> referencedProxyAtoms;
3033 for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
3034 ld::Internal::FinalSection* sect = *sit;
3035 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
3036 const ld::Atom* atom = *ait;
3037 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
3038 switch ( fit->binding ) {
3039 case ld::Fixup::bindingsIndirectlyBound:
3040 referencedProxyAtoms.insert(state.indirectBindingTable[fit->u.bindingIndex]);
3041 break;
3042 case ld::Fixup::bindingDirectlyBound:
3043 referencedProxyAtoms.insert(fit->u.target);
3044 break;
3045 default:
3046 break;
3051 // remove any unreferenced _importedAtoms
3052 _importedAtoms.erase(std::remove_if(_importedAtoms.begin(), _importedAtoms.end(), NotInSet(referencedProxyAtoms)), _importedAtoms.end());
3055 // sort by name
3056 std::sort(_exportedAtoms.begin(), _exportedAtoms.end(), AtomByNameSorter());
3057 std::sort(_importedAtoms.begin(), _importedAtoms.end(), AtomByNameSorter());
3060 void OutputFile::addPreloadLinkEdit(ld::Internal& state)
3062 switch ( _options.architecture() ) {
3063 #if SUPPORT_ARCH_i386
3064 case CPU_TYPE_I386:
3065 if ( _hasLocalRelocations ) {
3066 _localRelocsAtom = new LocalRelocationsAtom<x86>(_options, state, *this);
3067 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3069 if ( _hasExternalRelocations ) {
3070 _externalRelocsAtom = new ExternalRelocationsAtom<x86>(_options, state, *this);
3071 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3073 if ( _hasSymbolTable ) {
3074 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86>(_options, state, *this);
3075 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3076 _symbolTableAtom = new SymbolTableAtom<x86>(_options, state, *this);
3077 symbolTableSection = state.addAtom(*_symbolTableAtom);
3078 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3079 stringPoolSection = state.addAtom(*_stringPoolAtom);
3081 break;
3082 #endif
3083 #if SUPPORT_ARCH_x86_64
3084 case CPU_TYPE_X86_64:
3085 if ( _hasLocalRelocations ) {
3086 _localRelocsAtom = new LocalRelocationsAtom<x86_64>(_options, state, *this);
3087 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3089 if ( _hasExternalRelocations ) {
3090 _externalRelocsAtom = new ExternalRelocationsAtom<x86_64>(_options, state, *this);
3091 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3093 if ( _hasSymbolTable ) {
3094 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86_64>(_options, state, *this);
3095 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3096 _symbolTableAtom = new SymbolTableAtom<x86_64>(_options, state, *this);
3097 symbolTableSection = state.addAtom(*_symbolTableAtom);
3098 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3099 stringPoolSection = state.addAtom(*_stringPoolAtom);
3101 break;
3102 #endif
3103 #if SUPPORT_ARCH_arm_any
3104 case CPU_TYPE_ARM:
3105 if ( _hasLocalRelocations ) {
3106 _localRelocsAtom = new LocalRelocationsAtom<arm>(_options, state, *this);
3107 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3109 if ( _hasExternalRelocations ) {
3110 _externalRelocsAtom = new ExternalRelocationsAtom<arm>(_options, state, *this);
3111 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3113 if ( _hasSymbolTable ) {
3114 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm>(_options, state, *this);
3115 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3116 _symbolTableAtom = new SymbolTableAtom<arm>(_options, state, *this);
3117 symbolTableSection = state.addAtom(*_symbolTableAtom);
3118 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3119 stringPoolSection = state.addAtom(*_stringPoolAtom);
3121 break;
3122 #endif
3123 #if SUPPORT_ARCH_arm64
3124 case CPU_TYPE_ARM64:
3125 if ( _hasLocalRelocations ) {
3126 _localRelocsAtom = new LocalRelocationsAtom<arm64>(_options, state, *this);
3127 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3129 if ( _hasExternalRelocations ) {
3130 _externalRelocsAtom = new ExternalRelocationsAtom<arm64>(_options, state, *this);
3131 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3133 if ( _hasSymbolTable ) {
3134 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm64>(_options, state, *this);
3135 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3136 _symbolTableAtom = new SymbolTableAtom<arm64>(_options, state, *this);
3137 symbolTableSection = state.addAtom(*_symbolTableAtom);
3138 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3139 stringPoolSection = state.addAtom(*_stringPoolAtom);
3141 break;
3142 #endif
3143 default:
3144 throw "-preload not supported";
3150 void OutputFile::addLinkEdit(ld::Internal& state)
3152 // for historical reasons, -preload orders LINKEDIT content differently
3153 if ( _options.outputKind() == Options::kPreload )
3154 return addPreloadLinkEdit(state);
3156 switch ( _options.architecture() ) {
3157 #if SUPPORT_ARCH_i386
3158 case CPU_TYPE_I386:
3159 if ( _hasSectionRelocations ) {
3160 _sectionsRelocationsAtom = new SectionRelocationsAtom<x86>(_options, state, *this);
3161 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3163 if ( _hasDyldInfo ) {
3164 _rebasingInfoAtom = new RebaseInfoAtom<x86>(_options, state, *this);
3165 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3167 _bindingInfoAtom = new BindingInfoAtom<x86>(_options, state, *this);
3168 bindingSection = state.addAtom(*_bindingInfoAtom);
3170 _weakBindingInfoAtom = new WeakBindingInfoAtom<x86>(_options, state, *this);
3171 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3173 _lazyBindingInfoAtom = new LazyBindingInfoAtom<x86>(_options, state, *this);
3174 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3176 _exportInfoAtom = new ExportInfoAtom<x86>(_options, state, *this);
3177 exportSection = state.addAtom(*_exportInfoAtom);
3179 if ( _hasLocalRelocations ) {
3180 _localRelocsAtom = new LocalRelocationsAtom<x86>(_options, state, *this);
3181 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3183 if ( _hasSplitSegInfo ) {
3184 _splitSegInfoAtom = new SplitSegInfoV1Atom<x86>(_options, state, *this);
3185 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3187 if ( _hasFunctionStartsInfo ) {
3188 _functionStartsAtom = new FunctionStartsAtom<x86>(_options, state, *this);
3189 functionStartsSection = state.addAtom(*_functionStartsAtom);
3191 if ( _hasDataInCodeInfo ) {
3192 _dataInCodeAtom = new DataInCodeAtom<x86>(_options, state, *this);
3193 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3195 if ( _hasOptimizationHints ) {
3196 _optimizationHintsAtom = new OptimizationHintsAtom<x86>(_options, state, *this);
3197 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3199 if ( _hasSymbolTable ) {
3200 _symbolTableAtom = new SymbolTableAtom<x86>(_options, state, *this);
3201 symbolTableSection = state.addAtom(*_symbolTableAtom);
3203 if ( _hasExternalRelocations ) {
3204 _externalRelocsAtom = new ExternalRelocationsAtom<x86>(_options, state, *this);
3205 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3207 if ( _hasSymbolTable ) {
3208 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86>(_options, state, *this);
3209 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3210 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3211 stringPoolSection = state.addAtom(*_stringPoolAtom);
3213 break;
3214 #endif
3215 #if SUPPORT_ARCH_x86_64
3216 case CPU_TYPE_X86_64:
3217 if ( _hasSectionRelocations ) {
3218 _sectionsRelocationsAtom = new SectionRelocationsAtom<x86_64>(_options, state, *this);
3219 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3221 if ( _hasDyldInfo ) {
3222 _rebasingInfoAtom = new RebaseInfoAtom<x86_64>(_options, state, *this);
3223 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3225 _bindingInfoAtom = new BindingInfoAtom<x86_64>(_options, state, *this);
3226 bindingSection = state.addAtom(*_bindingInfoAtom);
3228 _weakBindingInfoAtom = new WeakBindingInfoAtom<x86_64>(_options, state, *this);
3229 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3231 _lazyBindingInfoAtom = new LazyBindingInfoAtom<x86_64>(_options, state, *this);
3232 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3234 _exportInfoAtom = new ExportInfoAtom<x86_64>(_options, state, *this);
3235 exportSection = state.addAtom(*_exportInfoAtom);
3237 if ( _hasLocalRelocations ) {
3238 _localRelocsAtom = new LocalRelocationsAtom<x86_64>(_options, state, *this);
3239 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3241 if ( _hasSplitSegInfo ) {
3242 _splitSegInfoAtom = new SplitSegInfoV1Atom<x86_64>(_options, state, *this);
3243 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3245 if ( _hasFunctionStartsInfo ) {
3246 _functionStartsAtom = new FunctionStartsAtom<x86_64>(_options, state, *this);
3247 functionStartsSection = state.addAtom(*_functionStartsAtom);
3249 if ( _hasDataInCodeInfo ) {
3250 _dataInCodeAtom = new DataInCodeAtom<x86_64>(_options, state, *this);
3251 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3253 if ( _hasOptimizationHints ) {
3254 _optimizationHintsAtom = new OptimizationHintsAtom<x86_64>(_options, state, *this);
3255 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3257 if ( _hasSymbolTable ) {
3258 _symbolTableAtom = new SymbolTableAtom<x86_64>(_options, state, *this);
3259 symbolTableSection = state.addAtom(*_symbolTableAtom);
3261 if ( _hasExternalRelocations ) {
3262 _externalRelocsAtom = new ExternalRelocationsAtom<x86_64>(_options, state, *this);
3263 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3265 if ( _hasSymbolTable ) {
3266 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86_64>(_options, state, *this);
3267 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3268 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 8);
3269 stringPoolSection = state.addAtom(*_stringPoolAtom);
3271 break;
3272 #endif
3273 #if SUPPORT_ARCH_arm_any
3274 case CPU_TYPE_ARM:
3275 if ( _hasSectionRelocations ) {
3276 _sectionsRelocationsAtom = new SectionRelocationsAtom<arm>(_options, state, *this);
3277 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3279 if ( _hasDyldInfo ) {
3280 _rebasingInfoAtom = new RebaseInfoAtom<arm>(_options, state, *this);
3281 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3283 _bindingInfoAtom = new BindingInfoAtom<arm>(_options, state, *this);
3284 bindingSection = state.addAtom(*_bindingInfoAtom);
3286 _weakBindingInfoAtom = new WeakBindingInfoAtom<arm>(_options, state, *this);
3287 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3289 _lazyBindingInfoAtom = new LazyBindingInfoAtom<arm>(_options, state, *this);
3290 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3292 _exportInfoAtom = new ExportInfoAtom<arm>(_options, state, *this);
3293 exportSection = state.addAtom(*_exportInfoAtom);
3295 if ( _hasLocalRelocations ) {
3296 _localRelocsAtom = new LocalRelocationsAtom<arm>(_options, state, *this);
3297 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3299 if ( _hasSplitSegInfo ) {
3300 if ( _options.sharedRegionEncodingV2() )
3301 _splitSegInfoAtom = new SplitSegInfoV2Atom<arm>(_options, state, *this);
3302 else
3303 _splitSegInfoAtom = new SplitSegInfoV1Atom<arm>(_options, state, *this);
3304 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3306 if ( _hasFunctionStartsInfo ) {
3307 _functionStartsAtom = new FunctionStartsAtom<arm>(_options, state, *this);
3308 functionStartsSection = state.addAtom(*_functionStartsAtom);
3310 if ( _hasDataInCodeInfo ) {
3311 _dataInCodeAtom = new DataInCodeAtom<arm>(_options, state, *this);
3312 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3314 if ( _hasOptimizationHints ) {
3315 _optimizationHintsAtom = new OptimizationHintsAtom<arm>(_options, state, *this);
3316 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3318 if ( _hasSymbolTable ) {
3319 _symbolTableAtom = new SymbolTableAtom<arm>(_options, state, *this);
3320 symbolTableSection = state.addAtom(*_symbolTableAtom);
3322 if ( _hasExternalRelocations ) {
3323 _externalRelocsAtom = new ExternalRelocationsAtom<arm>(_options, state, *this);
3324 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3326 if ( _hasSymbolTable ) {
3327 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm>(_options, state, *this);
3328 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3329 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3330 stringPoolSection = state.addAtom(*_stringPoolAtom);
3332 break;
3333 #endif
3334 #if SUPPORT_ARCH_arm64
3335 case CPU_TYPE_ARM64:
3336 if ( _hasSectionRelocations ) {
3337 _sectionsRelocationsAtom = new SectionRelocationsAtom<arm64>(_options, state, *this);
3338 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3340 if ( _hasDyldInfo ) {
3341 _rebasingInfoAtom = new RebaseInfoAtom<arm64>(_options, state, *this);
3342 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3344 _bindingInfoAtom = new BindingInfoAtom<arm64>(_options, state, *this);
3345 bindingSection = state.addAtom(*_bindingInfoAtom);
3347 _weakBindingInfoAtom = new WeakBindingInfoAtom<arm64>(_options, state, *this);
3348 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3350 _lazyBindingInfoAtom = new LazyBindingInfoAtom<arm64>(_options, state, *this);
3351 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3353 _exportInfoAtom = new ExportInfoAtom<arm64>(_options, state, *this);
3354 exportSection = state.addAtom(*_exportInfoAtom);
3356 if ( _hasLocalRelocations ) {
3357 _localRelocsAtom = new LocalRelocationsAtom<arm64>(_options, state, *this);
3358 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3360 if ( _hasSplitSegInfo ) {
3361 if ( _options.sharedRegionEncodingV2() )
3362 _splitSegInfoAtom = new SplitSegInfoV2Atom<arm64>(_options, state, *this);
3363 else
3364 _splitSegInfoAtom = new SplitSegInfoV1Atom<arm64>(_options, state, *this);
3365 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3367 if ( _hasFunctionStartsInfo ) {
3368 _functionStartsAtom = new FunctionStartsAtom<arm64>(_options, state, *this);
3369 functionStartsSection = state.addAtom(*_functionStartsAtom);
3371 if ( _hasDataInCodeInfo ) {
3372 _dataInCodeAtom = new DataInCodeAtom<arm64>(_options, state, *this);
3373 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3375 if ( _hasOptimizationHints ) {
3376 _optimizationHintsAtom = new OptimizationHintsAtom<arm64>(_options, state, *this);
3377 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3379 if ( _hasSymbolTable ) {
3380 _symbolTableAtom = new SymbolTableAtom<arm64>(_options, state, *this);
3381 symbolTableSection = state.addAtom(*_symbolTableAtom);
3383 if ( _hasExternalRelocations ) {
3384 _externalRelocsAtom = new ExternalRelocationsAtom<arm64>(_options, state, *this);
3385 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3387 if ( _hasSymbolTable ) {
3388 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm64>(_options, state, *this);
3389 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3390 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3391 stringPoolSection = state.addAtom(*_stringPoolAtom);
3393 break;
3394 #endif
3395 default:
3396 throw "unknown architecture";
3400 void OutputFile::addLoadCommands(ld::Internal& state)
3402 switch ( _options.architecture() ) {
3403 #if SUPPORT_ARCH_x86_64
3404 case CPU_TYPE_X86_64:
3405 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<x86_64>(_options, state, *this);
3406 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3407 break;
3408 #endif
3409 #if SUPPORT_ARCH_arm_any
3410 case CPU_TYPE_ARM:
3411 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<arm>(_options, state, *this);
3412 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3413 break;
3414 #endif
3415 #if SUPPORT_ARCH_arm64
3416 case CPU_TYPE_ARM64:
3417 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<arm64>(_options, state, *this);
3418 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3419 break;
3420 #endif
3421 #if SUPPORT_ARCH_i386
3422 case CPU_TYPE_I386:
3423 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<x86>(_options, state, *this);
3424 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3425 break;
3426 #endif
3427 default:
3428 throw "unknown architecture";
3432 uint32_t OutputFile::dylibCount()
3434 return _dylibsToLoad.size();
3437 const ld::dylib::File* OutputFile::dylibByOrdinal(unsigned int ordinal)
3439 assert( ordinal > 0 );
3440 assert( ordinal <= _dylibsToLoad.size() );
3441 return _dylibsToLoad[ordinal-1];
3444 bool OutputFile::hasOrdinalForInstallPath(const char* path, int* ordinal)
3446 for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3447 const char* installPath = it->first->installPath();
3448 if ( (installPath != NULL) && (strcmp(path, installPath) == 0) ) {
3449 *ordinal = it->second;
3450 return true;
3453 return false;
3456 uint32_t OutputFile::dylibToOrdinal(const ld::dylib::File* dylib)
3458 return _dylibToOrdinal[dylib];
3462 void OutputFile::buildDylibOrdinalMapping(ld::Internal& state)
3464 // count non-public re-exported dylibs
3465 unsigned int nonPublicReExportCount = 0;
3466 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3467 ld::dylib::File* aDylib = *it;
3468 if ( aDylib->willBeReExported() && ! aDylib->hasPublicInstallName() )
3469 ++nonPublicReExportCount;
3472 // look at each dylib supplied in state
3473 bool hasReExports = false;
3474 bool haveLazyDylibs = false;
3475 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3476 ld::dylib::File* aDylib = *it;
3477 int ordinal;
3478 if ( aDylib == state.bundleLoader ) {
3479 _dylibToOrdinal[aDylib] = BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE;
3481 else if ( this->hasOrdinalForInstallPath(aDylib->installPath(), &ordinal) ) {
3482 // already have a dylib with that install path, map all uses to that ordinal
3483 _dylibToOrdinal[aDylib] = ordinal;
3485 else if ( aDylib->willBeLazyLoadedDylib() ) {
3486 // all lazy dylib need to be at end of ordinals
3487 haveLazyDylibs = true;
3489 else if ( aDylib->willBeReExported() && ! aDylib->hasPublicInstallName() && (nonPublicReExportCount >= 2) ) {
3490 _dylibsToLoad.push_back(aDylib);
3491 _dylibToOrdinal[aDylib] = BIND_SPECIAL_DYLIB_SELF;
3493 else {
3494 // first time this install path seen, create new ordinal
3495 _dylibsToLoad.push_back(aDylib);
3496 _dylibToOrdinal[aDylib] = _dylibsToLoad.size();
3498 if ( aDylib->explicitlyLinked() && aDylib->willBeReExported() )
3499 hasReExports = true;
3501 if ( haveLazyDylibs ) {
3502 // second pass to determine ordinals for lazy loaded dylibs
3503 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3504 ld::dylib::File* aDylib = *it;
3505 if ( aDylib->willBeLazyLoadedDylib() ) {
3506 int ordinal;
3507 if ( this->hasOrdinalForInstallPath(aDylib->installPath(), &ordinal) ) {
3508 // already have a dylib with that install path, map all uses to that ordinal
3509 _dylibToOrdinal[aDylib] = ordinal;
3511 else {
3512 // first time this install path seen, create new ordinal
3513 _dylibsToLoad.push_back(aDylib);
3514 _dylibToOrdinal[aDylib] = _dylibsToLoad.size();
3519 _noReExportedDylibs = !hasReExports;
3520 //fprintf(stderr, "dylibs:\n");
3521 //for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3522 // fprintf(stderr, " %p ord=%u, install_name=%s\n",it->first, it->second, it->first->installPath());
3526 uint32_t OutputFile::lazyBindingInfoOffsetForLazyPointerAddress(uint64_t lpAddress)
3528 return _lazyPointerAddressToInfoOffset[lpAddress];
3531 void OutputFile::setLazyBindingInfoOffset(uint64_t lpAddress, uint32_t lpInfoOffset)
3533 _lazyPointerAddressToInfoOffset[lpAddress] = lpInfoOffset;
3536 int OutputFile::compressedOrdinalForAtom(const ld::Atom* target)
3538 // flat namespace images use zero for all ordinals
3539 if ( _options.nameSpace() != Options::kTwoLevelNameSpace )
3540 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3542 // handle -interposable
3543 if ( target->definition() == ld::Atom::definitionRegular )
3544 return BIND_SPECIAL_DYLIB_SELF;
3546 // regular ordinal
3547 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
3548 if ( dylib != NULL ) {
3549 std::map<const ld::dylib::File*, int>::iterator pos = _dylibToOrdinal.find(dylib);
3550 if ( pos != _dylibToOrdinal.end() )
3551 return pos->second;
3552 assert(0 && "dylib not assigned ordinal");
3555 // handle undefined dynamic_lookup
3556 if ( _options.undefinedTreatment() == Options::kUndefinedDynamicLookup )
3557 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3559 // handle -U _foo
3560 if ( _options.allowedUndefined(target->name()) )
3561 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3563 throw "can't find ordinal for imported symbol";
3567 bool OutputFile::isPcRelStore(ld::Fixup::Kind kind)
3569 switch ( kind ) {
3570 case ld::Fixup::kindStoreX86BranchPCRel8:
3571 case ld::Fixup::kindStoreX86BranchPCRel32:
3572 case ld::Fixup::kindStoreX86PCRel8:
3573 case ld::Fixup::kindStoreX86PCRel16:
3574 case ld::Fixup::kindStoreX86PCRel32:
3575 case ld::Fixup::kindStoreX86PCRel32_1:
3576 case ld::Fixup::kindStoreX86PCRel32_2:
3577 case ld::Fixup::kindStoreX86PCRel32_4:
3578 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
3579 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
3580 case ld::Fixup::kindStoreX86PCRel32GOT:
3581 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
3582 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
3583 case ld::Fixup::kindStoreARMBranch24:
3584 case ld::Fixup::kindStoreThumbBranch22:
3585 case ld::Fixup::kindStoreARMLoad12:
3586 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
3587 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
3588 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
3589 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
3590 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
3591 case ld::Fixup::kindStoreTargetAddressARMBranch24:
3592 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
3593 case ld::Fixup::kindStoreTargetAddressARMLoad12:
3594 #if SUPPORT_ARCH_arm64
3595 case ld::Fixup::kindStoreARM64Page21:
3596 case ld::Fixup::kindStoreARM64PageOff12:
3597 case ld::Fixup::kindStoreARM64GOTLoadPage21:
3598 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
3599 case ld::Fixup::kindStoreARM64GOTLeaPage21:
3600 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
3601 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
3602 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12:
3603 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
3604 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
3605 case ld::Fixup::kindStoreARM64PCRelToGOT:
3606 case ld::Fixup::kindStoreTargetAddressARM64Page21:
3607 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
3608 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
3609 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
3610 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
3611 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
3612 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
3613 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
3614 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
3615 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
3616 #endif
3617 return true;
3618 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
3619 #if SUPPORT_ARCH_arm64
3620 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
3621 #endif
3622 return (_options.outputKind() != Options::kKextBundle);
3623 default:
3624 break;
3626 return false;
3629 bool OutputFile::isStore(ld::Fixup::Kind kind)
3631 switch ( kind ) {
3632 case ld::Fixup::kindNone:
3633 case ld::Fixup::kindNoneFollowOn:
3634 case ld::Fixup::kindNoneGroupSubordinate:
3635 case ld::Fixup::kindNoneGroupSubordinateFDE:
3636 case ld::Fixup::kindNoneGroupSubordinateLSDA:
3637 case ld::Fixup::kindNoneGroupSubordinatePersonality:
3638 case ld::Fixup::kindSetTargetAddress:
3639 case ld::Fixup::kindSubtractTargetAddress:
3640 case ld::Fixup::kindAddAddend:
3641 case ld::Fixup::kindSubtractAddend:
3642 case ld::Fixup::kindSetTargetImageOffset:
3643 case ld::Fixup::kindSetTargetSectionOffset:
3644 return false;
3645 default:
3646 break;
3648 return true;
3652 bool OutputFile::setsTarget(ld::Fixup::Kind kind)
3654 switch ( kind ) {
3655 case ld::Fixup::kindSetTargetAddress:
3656 case ld::Fixup::kindLazyTarget:
3657 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
3658 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
3659 case ld::Fixup::kindStoreTargetAddressBigEndian32:
3660 case ld::Fixup::kindStoreTargetAddressBigEndian64:
3661 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
3662 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
3663 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
3664 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
3665 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
3666 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
3667 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
3668 case ld::Fixup::kindStoreTargetAddressARMBranch24:
3669 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
3670 case ld::Fixup::kindStoreTargetAddressARMLoad12:
3671 #if SUPPORT_ARCH_arm64
3672 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
3673 case ld::Fixup::kindStoreTargetAddressARM64Page21:
3674 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
3675 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
3676 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
3677 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
3678 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
3679 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
3680 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
3681 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
3682 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
3683 #endif
3684 return true;
3685 case ld::Fixup::kindStoreX86DtraceCallSiteNop:
3686 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear:
3687 case ld::Fixup::kindStoreARMDtraceCallSiteNop:
3688 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear:
3689 case ld::Fixup::kindStoreARM64DtraceCallSiteNop:
3690 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear:
3691 case ld::Fixup::kindStoreThumbDtraceCallSiteNop:
3692 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear:
3693 return (_options.outputKind() == Options::kObjectFile);
3694 default:
3695 break;
3697 return false;
3700 bool OutputFile::isPointerToTarget(ld::Fixup::Kind kind)
3702 switch ( kind ) {
3703 case ld::Fixup::kindSetTargetAddress:
3704 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
3705 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
3706 case ld::Fixup::kindStoreTargetAddressBigEndian32:
3707 case ld::Fixup::kindStoreTargetAddressBigEndian64:
3708 case ld::Fixup::kindLazyTarget:
3709 return true;
3710 default:
3711 break;
3713 return false;
3715 bool OutputFile::isPointerFromTarget(ld::Fixup::Kind kind)
3717 switch ( kind ) {
3718 case ld::Fixup::kindSubtractTargetAddress:
3719 return true;
3720 default:
3721 break;
3723 return false;
3727 uint64_t OutputFile::lookBackAddend(ld::Fixup::iterator fit)
3729 uint64_t addend = 0;
3730 switch ( fit->clusterSize ) {
3731 case ld::Fixup::k1of1:
3732 case ld::Fixup::k1of2:
3733 case ld::Fixup::k2of2:
3734 break;
3735 case ld::Fixup::k2of3:
3736 --fit;
3737 switch ( fit->kind ) {
3738 case ld::Fixup::kindAddAddend:
3739 addend += fit->u.addend;
3740 break;
3741 case ld::Fixup::kindSubtractAddend:
3742 addend -= fit->u.addend;
3743 break;
3744 default:
3745 throw "unexpected fixup kind for binding";
3747 break;
3748 case ld::Fixup::k1of3:
3749 ++fit;
3750 switch ( fit->kind ) {
3751 case ld::Fixup::kindAddAddend:
3752 addend += fit->u.addend;
3753 break;
3754 case ld::Fixup::kindSubtractAddend:
3755 addend -= fit->u.addend;
3756 break;
3757 default:
3758 throw "unexpected fixup kind for binding";
3760 break;
3761 default:
3762 throw "unexpected fixup cluster size for binding";
3764 return addend;
3768 void OutputFile::generateLinkEditInfo(ld::Internal& state)
3770 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
3771 ld::Internal::FinalSection* sect = *sit;
3772 // record end of last __TEXT section encrypted iPhoneOS apps.
3773 if ( _options.makeEncryptable() && (strcmp(sect->segmentName(), "__TEXT") == 0) ) {
3774 _encryptedTEXTendOffset = pageAlign(sect->fileOffset + sect->size);
3776 bool objc1ClassRefSection = ( (sect->type() == ld::Section::typeCStringPointer)
3777 && (strcmp(sect->sectionName(), "__cls_refs") == 0)
3778 && (strcmp(sect->segmentName(), "__OBJC") == 0) );
3779 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
3780 const ld::Atom* atom = *ait;
3782 // Record regular atoms that override a dylib's weak definitions
3783 if ( (atom->scope() == ld::Atom::scopeGlobal) && atom->overridesDylibsWeakDef() ) {
3784 if ( _options.makeCompressedDyldInfo() ) {
3785 uint8_t wtype = BIND_TYPE_OVERRIDE_OF_WEAKDEF_IN_DYLIB;
3786 bool nonWeakDef = (atom->combine() == ld::Atom::combineNever);
3787 _weakBindingInfo.push_back(BindingInfo(wtype, atom->name(), nonWeakDef, atom->finalAddress(), 0));
3789 this->overridesWeakExternalSymbols = true;
3790 if ( _options.warnWeakExports() )
3791 warning("overrides weak external symbol: %s", atom->name());
3794 ld::Fixup* fixupWithTarget = NULL;
3795 ld::Fixup* fixupWithMinusTarget = NULL;
3796 ld::Fixup* fixupWithStore = NULL;
3797 ld::Fixup* fixupWithAddend = NULL;
3798 const ld::Atom* target = NULL;
3799 const ld::Atom* minusTarget = NULL;
3800 uint64_t targetAddend = 0;
3801 uint64_t minusTargetAddend = 0;
3802 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
3803 if ( fit->firstInCluster() ) {
3804 fixupWithTarget = NULL;
3805 fixupWithMinusTarget = NULL;
3806 fixupWithStore = NULL;
3807 target = NULL;
3808 minusTarget = NULL;
3809 targetAddend = 0;
3810 minusTargetAddend = 0;
3812 if ( this->setsTarget(fit->kind) ) {
3813 switch ( fit->binding ) {
3814 case ld::Fixup::bindingNone:
3815 case ld::Fixup::bindingByNameUnbound:
3816 break;
3817 case ld::Fixup::bindingByContentBound:
3818 case ld::Fixup::bindingDirectlyBound:
3819 fixupWithTarget = fit;
3820 target = fit->u.target;
3821 break;
3822 case ld::Fixup::bindingsIndirectlyBound:
3823 fixupWithTarget = fit;
3824 target = state.indirectBindingTable[fit->u.bindingIndex];
3825 break;
3827 assert(target != NULL);
3829 switch ( fit->kind ) {
3830 case ld::Fixup::kindAddAddend:
3831 targetAddend = fit->u.addend;
3832 fixupWithAddend = fit;
3833 break;
3834 case ld::Fixup::kindSubtractAddend:
3835 minusTargetAddend = fit->u.addend;
3836 fixupWithAddend = fit;
3837 break;
3838 case ld::Fixup::kindSubtractTargetAddress:
3839 switch ( fit->binding ) {
3840 case ld::Fixup::bindingNone:
3841 case ld::Fixup::bindingByNameUnbound:
3842 break;
3843 case ld::Fixup::bindingByContentBound:
3844 case ld::Fixup::bindingDirectlyBound:
3845 fixupWithMinusTarget = fit;
3846 minusTarget = fit->u.target;
3847 break;
3848 case ld::Fixup::bindingsIndirectlyBound:
3849 fixupWithMinusTarget = fit;
3850 minusTarget = state.indirectBindingTable[fit->u.bindingIndex];
3851 break;
3853 assert(minusTarget != NULL);
3854 break;
3855 case ld::Fixup::kindDataInCodeStartData:
3856 case ld::Fixup::kindDataInCodeStartJT8:
3857 case ld::Fixup::kindDataInCodeStartJT16:
3858 case ld::Fixup::kindDataInCodeStartJT32:
3859 case ld::Fixup::kindDataInCodeStartJTA32:
3860 case ld::Fixup::kindDataInCodeEnd:
3861 hasDataInCode = true;
3862 break;
3863 default:
3864 break;
3866 if ( this->isStore(fit->kind) ) {
3867 fixupWithStore = fit;
3869 if ( fit->lastInCluster() ) {
3870 if ( (fixupWithStore != NULL) && (target != NULL) ) {
3871 if ( _options.outputKind() == Options::kObjectFile ) {
3872 this->addSectionRelocs(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithAddend, fixupWithStore,
3873 target, minusTarget, targetAddend, minusTargetAddend);
3875 else {
3876 if ( _options.makeCompressedDyldInfo() ) {
3877 this->addDyldInfo(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithStore,
3878 target, minusTarget, targetAddend, minusTargetAddend);
3880 else {
3881 this->addClassicRelocs(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithStore,
3882 target, minusTarget, targetAddend, minusTargetAddend);
3886 else if ( objc1ClassRefSection && (target != NULL) && (fixupWithStore == NULL) ) {
3887 // check for class refs to lazy loaded dylibs
3888 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
3889 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
3890 throwf("illegal class reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
3899 void OutputFile::noteTextReloc(const ld::Atom* atom, const ld::Atom* target)
3901 if ( (atom->contentType() == ld::Atom::typeStub) || (atom->contentType() == ld::Atom::typeStubHelper) ) {
3902 // silently let stubs (synthesized by linker) use text relocs
3904 else if ( _options.allowTextRelocs() ) {
3905 if ( _options.warnAboutTextRelocs() )
3906 warning("text reloc in %s to %s", atom->name(), target->name());
3908 else if ( _options.positionIndependentExecutable() && (_options.outputKind() == Options::kDynamicExecutable)
3909 && ((_options.iOSVersionMin() >= ld::iOS_4_3) || (_options.macosxVersionMin() >= ld::mac10_7)) ) {
3910 if ( ! this->pieDisabled ) {
3911 #if SUPPORT_ARCH_arm64
3912 if ( _options.architecture() == CPU_TYPE_ARM64 ) {
3913 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
3914 throwf("Absolute addressing not allowed in arm64 code but used in '%s' referencing '%s'", demangledName, _options.demangleSymbol(target->name()));
3916 else
3917 #endif
3919 warning("PIE disabled. Absolute addressing (perhaps -mdynamic-no-pic) not allowed in code signed PIE, "
3920 "but used in %s from %s. "
3921 "To fix this warning, don't compile with -mdynamic-no-pic or link with -Wl,-no_pie",
3922 atom->name(), atom->file()->path());
3925 this->pieDisabled = true;
3927 else if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) ) {
3928 throwf("illegal text-relocoation (direct reference) to (global,weak) %s in %s from %s in %s", target->name(), target->file()->path(), atom->name(), atom->file()->path());
3930 else {
3931 if ( (target->file() != NULL) && (atom->file() != NULL) )
3932 throwf("illegal text-relocation to '%s' in %s from '%s' in %s", target->name(), target->file()->path(), atom->name(), atom->file()->path());
3933 else
3934 throwf("illegal text reloc in '%s' to '%s'", atom->name(), target->name());
3938 void OutputFile::addDyldInfo(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
3939 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget, ld::Fixup* fixupWithStore,
3940 const ld::Atom* target, const ld::Atom* minusTarget,
3941 uint64_t targetAddend, uint64_t minusTargetAddend)
3943 if ( sect->isSectionHidden() )
3944 return;
3946 // no need to rebase or bind PCRel stores
3947 if ( this->isPcRelStore(fixupWithStore->kind) ) {
3948 // as long as target is in same linkage unit
3949 if ( (target == NULL) || (target->definition() != ld::Atom::definitionProxy) ) {
3950 // make sure target is not global and weak
3951 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular)) {
3952 if ( (atom->section().type() == ld::Section::typeCFI)
3953 || (atom->section().type() == ld::Section::typeDtraceDOF)
3954 || (atom->section().type() == ld::Section::typeUnwindInfo) ) {
3955 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
3956 return;
3958 // <rdar://problem/13700961> spurious warning when weak function has reference to itself
3959 if ( fixupWithTarget->binding == ld::Fixup::bindingDirectlyBound ) {
3960 // ok to ignore pc-rel references within a weak function to itself
3961 return;
3963 // Have direct reference to weak-global. This should be an indrect reference
3964 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
3965 warning("direct access in %s to global weak symbol %s means the weak symbol cannot be overridden at runtime. "
3966 "This was likely caused by different translation units being compiled with different visibility settings.",
3967 demangledName, _options.demangleSymbol(target->name()));
3969 return;
3973 // no need to rebase or bind PIC internal pointer diff
3974 if ( minusTarget != NULL ) {
3975 // with pointer diffs, both need to be in same linkage unit
3976 assert(minusTarget->definition() != ld::Atom::definitionProxy);
3977 assert(target != NULL);
3978 assert(target->definition() != ld::Atom::definitionProxy);
3979 if ( target == minusTarget ) {
3980 // This is a compile time constant and could have been optimized away by compiler
3981 return;
3984 // check if target of pointer-diff is global and weak
3985 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) ) {
3986 if ( (atom->section().type() == ld::Section::typeCFI)
3987 || (atom->section().type() == ld::Section::typeDtraceDOF)
3988 || (atom->section().type() == ld::Section::typeUnwindInfo) ) {
3989 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
3990 return;
3992 // Have direct reference to weak-global. This should be an indrect reference
3993 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
3994 warning("direct access in %s to global weak symbol %s means the weak symbol cannot be overridden at runtime. "
3995 "This was likely caused by different translation units being compiled with different visibility settings.",
3996 demangledName, _options.demangleSymbol(target->name()));
3998 return;
4001 // no need to rebase or bind an atom's references to itself if the output is not slidable
4002 if ( (atom == target) && !_options.outputSlidable() )
4003 return;
4005 // cluster has no target, so needs no rebasing or binding
4006 if ( target == NULL )
4007 return;
4009 bool inReadOnlySeg = ((_options.initialSegProtection(sect->segmentName()) & VM_PROT_WRITE) == 0);
4010 bool needsRebase = false;
4011 bool needsBinding = false;
4012 bool needsLazyBinding = false;
4013 bool needsWeakBinding = false;
4015 uint8_t rebaseType = REBASE_TYPE_POINTER;
4016 uint8_t type = BIND_TYPE_POINTER;
4017 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
4018 bool weak_import = (fixupWithTarget->weakImport || ((dylib != NULL) && dylib->forcedWeakLinked()));
4019 uint64_t address = atom->finalAddress() + fixupWithTarget->offsetInAtom;
4020 uint64_t addend = targetAddend - minusTargetAddend;
4022 // special case lazy pointers
4023 if ( fixupWithTarget->kind == ld::Fixup::kindLazyTarget ) {
4024 assert(fixupWithTarget->u.target == target);
4025 assert(addend == 0);
4026 // lazy dylib lazy pointers do not have any dyld info
4027 if ( atom->section().type() == ld::Section::typeLazyDylibPointer )
4028 return;
4029 // lazy binding to weak definitions are done differently
4030 // they are directly bound to target, then have a weak bind in case of a collision
4031 if ( target->combine() == ld::Atom::combineByName ) {
4032 if ( target->definition() == ld::Atom::definitionProxy ) {
4033 // weak def exported from another dylib
4034 // must non-lazy bind to it plus have weak binding info in case of collision
4035 needsBinding = true;
4036 needsWeakBinding = true;
4038 else {
4039 // weak def in this linkage unit.
4040 // just rebase, plus have weak binding info in case of collision
4041 // this will be done by other cluster on lazy pointer atom
4044 else if ( target->contentType() == ld::Atom::typeResolver ) {
4045 // <rdar://problem/8553647> Hidden resolver functions should not have lazy binding info
4046 // <rdar://problem/12629331> Resolver function run before initializers when overriding the dyld shared cache
4047 // The lazy pointers used by stubs used when non-lazy binding to a resolver are not normal lazy pointers
4048 // and should not be in lazy binding info.
4049 needsLazyBinding = false;
4051 else {
4052 // normal case of a pointer to non-weak-def symbol, so can lazily bind
4053 needsLazyBinding = true;
4056 else {
4057 // everything except lazy pointers
4058 switch ( target->definition() ) {
4059 case ld::Atom::definitionProxy:
4060 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
4061 throwf("illegal data reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
4062 if ( target->contentType() == ld::Atom::typeTLV ) {
4063 if ( sect->type() != ld::Section::typeTLVPointers )
4064 throwf("illegal data reference in %s to thread local variable %s in dylib %s",
4065 atom->name(), target->name(), dylib->path());
4067 if ( inReadOnlySeg )
4068 type = BIND_TYPE_TEXT_ABSOLUTE32;
4069 needsBinding = true;
4070 if ( target->combine() == ld::Atom::combineByName )
4071 needsWeakBinding = true;
4072 break;
4073 case ld::Atom::definitionRegular:
4074 case ld::Atom::definitionTentative:
4075 // only slideable images need rebasing info
4076 if ( _options.outputSlidable() ) {
4077 needsRebase = true;
4079 // references to internal symbol never need binding
4080 if ( target->scope() != ld::Atom::scopeGlobal )
4081 break;
4082 // reference to global weak def needs weak binding
4083 if ( (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) )
4084 needsWeakBinding = true;
4085 else if ( _options.outputKind() == Options::kDynamicExecutable ) {
4086 // in main executables, the only way regular symbols are indirected is if -interposable is used
4087 if ( _options.interposable(target->name()) ) {
4088 needsRebase = false;
4089 needsBinding = true;
4092 else {
4093 // for flat-namespace or interposable two-level-namespace
4094 // all references to exported symbols get indirected
4095 if ( (_options.nameSpace() != Options::kTwoLevelNameSpace) || _options.interposable(target->name()) ) {
4096 // <rdar://problem/5254468> no external relocs for flat objc classes
4097 if ( strncmp(target->name(), ".objc_class_", 12) == 0 )
4098 break;
4099 // no rebase info for references to global symbols that will have binding info
4100 needsRebase = false;
4101 needsBinding = true;
4103 else if ( _options.forceCoalesce(target->name()) ) {
4104 needsWeakBinding = true;
4107 break;
4108 case ld::Atom::definitionAbsolute:
4109 break;
4113 // <rdar://problem/13828711> if target is an import alias, use base of alias
4114 if ( target->isAlias() && (target->definition() == ld::Atom::definitionProxy) ) {
4115 for (ld::Fixup::iterator fit = target->fixupsBegin(), end=target->fixupsEnd(); fit != end; ++fit) {
4116 if ( fit->firstInCluster() ) {
4117 if ( fit->kind == ld::Fixup::kindNoneFollowOn ) {
4118 if ( fit->binding == ld::Fixup::bindingDirectlyBound ) {
4119 //fprintf(stderr, "switching import of %s to import of %s\n", target->name(), fit->u.target->name());
4120 target = fit->u.target;
4127 // record dyld info for this cluster
4128 if ( needsRebase ) {
4129 if ( inReadOnlySeg ) {
4130 noteTextReloc(atom, target);
4131 sect->hasLocalRelocs = true; // so dyld knows to change permissions on __TEXT segment
4132 rebaseType = REBASE_TYPE_TEXT_ABSOLUTE32;
4134 if ( _options.sharedRegionEligible() ) {
4135 // <rdar://problem/13287063> when range checking, ignore high byte of arm64 addends
4136 uint64_t checkAddend = addend;
4137 if ( _options.architecture() == CPU_TYPE_ARM64 )
4138 checkAddend &= 0x0FFFFFFFFFFFFFFFULL;
4139 if ( checkAddend != 0 ) {
4140 // make sure the addend does not cause the pointer to point outside the target's segment
4141 // if it does, update_dyld_shared_cache will not be able to put this dylib into the shared cache
4142 uint64_t targetAddress = target->finalAddress();
4143 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4144 ld::Internal::FinalSection* sct = *sit;
4145 uint64_t sctEnd = (sct->address+sct->size);
4146 if ( (sct->address <= targetAddress) && (targetAddress < sctEnd) ) {
4147 if ( (targetAddress+checkAddend) > sctEnd ) {
4148 warning("data symbol %s from %s has pointer to %s + 0x%08llX. "
4149 "That large of an addend may disable %s from being put in the dyld shared cache.",
4150 atom->name(), atom->file()->path(), target->name(), addend, _options.installPath() );
4156 _rebaseInfo.push_back(RebaseInfo(rebaseType, address));
4158 if ( needsBinding ) {
4159 if ( inReadOnlySeg ) {
4160 noteTextReloc(atom, target);
4161 sect->hasExternalRelocs = true; // so dyld knows to change permissions on __TEXT segment
4163 _bindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4165 if ( needsLazyBinding ) {
4166 if ( _options.bindAtLoad() )
4167 _bindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4168 else
4169 _lazyBindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4171 if ( needsWeakBinding )
4172 _weakBindingInfo.push_back(BindingInfo(type, 0, target->name(), false, address, addend));
4176 void OutputFile::addClassicRelocs(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
4177 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget, ld::Fixup* fixupWithStore,
4178 const ld::Atom* target, const ld::Atom* minusTarget,
4179 uint64_t targetAddend, uint64_t minusTargetAddend)
4181 if ( sect->isSectionHidden() )
4182 return;
4184 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4185 if ( sect->type() == ld::Section::typeNonLazyPointer ) {
4186 // except kexts and static pie which *do* use relocations
4187 switch (_options.outputKind()) {
4188 case Options::kKextBundle:
4189 break;
4190 case Options::kStaticExecutable:
4191 if ( _options.positionIndependentExecutable() )
4192 break;
4193 // else fall into default case
4194 default:
4195 assert(target != NULL);
4196 assert(fixupWithTarget != NULL);
4197 return;
4201 // no need to rebase or bind PCRel stores
4202 if ( this->isPcRelStore(fixupWithStore->kind) ) {
4203 // as long as target is in same linkage unit
4204 if ( (target == NULL) || (target->definition() != ld::Atom::definitionProxy) )
4205 return;
4208 // no need to rebase or bind PIC internal pointer diff
4209 if ( minusTarget != NULL ) {
4210 // with pointer diffs, both need to be in same linkage unit
4211 assert(minusTarget->definition() != ld::Atom::definitionProxy);
4212 assert(target != NULL);
4213 assert(target->definition() != ld::Atom::definitionProxy);
4214 // make sure target is not global and weak
4215 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName)
4216 && (atom->section().type() != ld::Section::typeCFI)
4217 && (atom->section().type() != ld::Section::typeDtraceDOF)
4218 && (atom->section().type() != ld::Section::typeUnwindInfo)
4219 && (minusTarget != target) ) {
4220 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4221 throwf("bad codegen, pointer diff in %s to global weak symbol %s", atom->name(), target->name());
4223 return;
4226 // cluster has no target, so needs no rebasing or binding
4227 if ( target == NULL )
4228 return;
4230 assert(_localRelocsAtom != NULL);
4231 uint64_t relocAddress = atom->finalAddress() + fixupWithTarget->offsetInAtom - _localRelocsAtom->relocBaseAddress(state);
4233 bool inReadOnlySeg = ( strcmp(sect->segmentName(), "__TEXT") == 0 );
4234 bool needsLocalReloc = false;
4235 bool needsExternReloc = false;
4237 switch ( fixupWithStore->kind ) {
4238 case ld::Fixup::kindLazyTarget:
4239 // lazy pointers don't need relocs
4240 break;
4241 case ld::Fixup::kindStoreLittleEndian32:
4242 case ld::Fixup::kindStoreLittleEndian64:
4243 case ld::Fixup::kindStoreBigEndian32:
4244 case ld::Fixup::kindStoreBigEndian64:
4245 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4246 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4247 case ld::Fixup::kindStoreTargetAddressBigEndian32:
4248 case ld::Fixup::kindStoreTargetAddressBigEndian64:
4249 // is pointer
4250 switch ( target->definition() ) {
4251 case ld::Atom::definitionProxy:
4252 needsExternReloc = true;
4253 break;
4254 case ld::Atom::definitionRegular:
4255 case ld::Atom::definitionTentative:
4256 // only slideable images need local relocs
4257 if ( _options.outputSlidable() )
4258 needsLocalReloc = true;
4259 // references to internal symbol never need binding
4260 if ( target->scope() != ld::Atom::scopeGlobal )
4261 break;
4262 // reference to global weak def needs weak binding in dynamic images
4263 if ( (target->combine() == ld::Atom::combineByName)
4264 && (target->definition() == ld::Atom::definitionRegular)
4265 && (_options.outputKind() != Options::kStaticExecutable)
4266 && (_options.outputKind() != Options::kPreload)
4267 && (atom != target) ) {
4268 needsExternReloc = true;
4270 else if ( _options.outputKind() == Options::kDynamicExecutable ) {
4271 // in main executables, the only way regular symbols are indirected is if -interposable is used
4272 if ( _options.interposable(target->name()) )
4273 needsExternReloc = true;
4275 else {
4276 // for flat-namespace or interposable two-level-namespace
4277 // all references to exported symbols get indirected
4278 if ( (_options.nameSpace() != Options::kTwoLevelNameSpace) || _options.interposable(target->name()) ) {
4279 // <rdar://problem/5254468> no external relocs for flat objc classes
4280 if ( strncmp(target->name(), ".objc_class_", 12) == 0 )
4281 break;
4282 // no rebase info for references to global symbols that will have binding info
4283 needsExternReloc = true;
4286 if ( needsExternReloc )
4287 needsLocalReloc = false;
4288 break;
4289 case ld::Atom::definitionAbsolute:
4290 break;
4292 if ( needsExternReloc ) {
4293 if ( inReadOnlySeg )
4294 noteTextReloc(atom, target);
4295 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
4296 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
4297 throwf("illegal data reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
4298 _externalRelocsAtom->addExternalPointerReloc(relocAddress, target);
4299 sect->hasExternalRelocs = true;
4300 fixupWithTarget->contentAddendOnly = true;
4302 else if ( needsLocalReloc ) {
4303 assert(target != NULL);
4304 if ( inReadOnlySeg )
4305 noteTextReloc(atom, target);
4306 _localRelocsAtom->addPointerReloc(relocAddress, target->machoSection());
4307 sect->hasLocalRelocs = true;
4309 break;
4310 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
4311 #if SUPPORT_ARCH_arm64
4312 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
4313 #endif
4314 if ( _options.outputKind() == Options::kKextBundle ) {
4315 assert(target != NULL);
4316 if ( target->definition() == ld::Atom::definitionProxy ) {
4317 _externalRelocsAtom->addExternalCallSiteReloc(relocAddress, target);
4318 fixupWithStore->contentAddendOnly = true;
4321 break;
4323 case ld::Fixup::kindStoreARMLow16:
4324 case ld::Fixup::kindStoreThumbLow16:
4325 // no way to encode rebasing of binding for these instructions
4326 if ( _options.outputSlidable() || (target->definition() == ld::Atom::definitionProxy) )
4327 throwf("no supported runtime lo16 relocation in %s from %s to %s", atom->name(), atom->file()->path(), target->name());
4328 break;
4330 case ld::Fixup::kindStoreARMHigh16:
4331 case ld::Fixup::kindStoreThumbHigh16:
4332 // no way to encode rebasing of binding for these instructions
4333 if ( _options.outputSlidable() || (target->definition() == ld::Atom::definitionProxy) )
4334 throwf("no supported runtime hi16 relocation in %s from %s to %s", atom->name(), atom->file()->path(), target->name());
4335 break;
4337 default:
4338 break;
4343 bool OutputFile::useExternalSectionReloc(const ld::Atom* atom, const ld::Atom* target, ld::Fixup* fixupWithTarget)
4345 if ( (_options.architecture() == CPU_TYPE_X86_64) || (_options.architecture() == CPU_TYPE_ARM64) ) {
4346 // x86_64 and ARM64 use external relocations for everthing that has a symbol
4347 return ( target->symbolTableInclusion() != ld::Atom::symbolTableNotIn );
4350 // <rdar://problem/9513487> support arm branch interworking in -r mode
4351 if ( (_options.architecture() == CPU_TYPE_ARM) && (_options.outputKind() == Options::kObjectFile) ) {
4352 if ( atom->isThumb() != target->isThumb() ) {
4353 switch ( fixupWithTarget->kind ) {
4354 // have branch that switches mode, then might be 'b' not 'bl'
4355 // Force external relocation, since no way to do local reloc for 'b'
4356 case ld::Fixup::kindStoreTargetAddressThumbBranch22 :
4357 case ld::Fixup::kindStoreTargetAddressARMBranch24:
4358 return true;
4359 default:
4360 break;
4365 if ( (_options.architecture() == CPU_TYPE_I386) && (_options.outputKind() == Options::kObjectFile) ) {
4366 if ( target->contentType() == ld::Atom::typeTLV )
4367 return true;
4370 // most architectures use external relocations only for references
4371 // to a symbol in another translation unit or for references to "weak symbols" or tentative definitions
4372 assert(target != NULL);
4373 if ( target->definition() == ld::Atom::definitionProxy )
4374 return true;
4375 if ( (target->definition() == ld::Atom::definitionTentative) && ! _options.makeTentativeDefinitionsReal() )
4376 return true;
4377 if ( target->scope() != ld::Atom::scopeGlobal )
4378 return false;
4379 if ( (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) )
4380 return true;
4381 return false;
4384 bool OutputFile::useSectionRelocAddend(ld::Fixup* fixupWithTarget)
4386 #if SUPPORT_ARCH_arm64
4387 if ( _options.architecture() == CPU_TYPE_ARM64 ) {
4388 switch ( fixupWithTarget->kind ) {
4389 case ld::Fixup::kindStoreARM64Branch26:
4390 case ld::Fixup::kindStoreARM64Page21:
4391 case ld::Fixup::kindStoreARM64PageOff12:
4392 return true;
4393 default:
4394 return false;
4397 #endif
4398 return false;
4404 void OutputFile::addSectionRelocs(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
4405 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget,
4406 ld::Fixup* fixupWithAddend, ld::Fixup* fixupWithStore,
4407 const ld::Atom* target, const ld::Atom* minusTarget,
4408 uint64_t targetAddend, uint64_t minusTargetAddend)
4410 if ( sect->isSectionHidden() )
4411 return;
4413 // in -r mode where there will be no labels on __eh_frame section, there is no need for relocations
4414 if ( (sect->type() == ld::Section::typeCFI) && _options.removeEHLabels() )
4415 return;
4417 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4418 if ( sect->type() == ld::Section::typeNonLazyPointer )
4419 return;
4421 // tentative defs don't have any relocations
4422 if ( sect->type() == ld::Section::typeTentativeDefs )
4423 return;
4425 assert(target != NULL);
4426 assert(fixupWithTarget != NULL);
4427 bool targetUsesExternalReloc = this->useExternalSectionReloc(atom, target, fixupWithTarget);
4428 bool minusTargetUsesExternalReloc = (minusTarget != NULL) && this->useExternalSectionReloc(atom, minusTarget, fixupWithMinusTarget);
4430 // in x86_64 and arm64 .o files an external reloc means the content contains just the addend
4431 if ( (_options.architecture() == CPU_TYPE_X86_64) ||(_options.architecture() == CPU_TYPE_ARM64) ) {
4432 if ( targetUsesExternalReloc ) {
4433 fixupWithTarget->contentAddendOnly = true;
4434 fixupWithStore->contentAddendOnly = true;
4435 if ( this->useSectionRelocAddend(fixupWithStore) && (fixupWithAddend != NULL) )
4436 fixupWithAddend->contentIgnoresAddend = true;
4438 if ( minusTargetUsesExternalReloc )
4439 fixupWithMinusTarget->contentAddendOnly = true;
4441 else {
4442 // for other archs, content is addend only with (non pc-rel) pointers
4443 // pc-rel instructions are funny. If the target is _foo+8 and _foo is
4444 // external, then the pc-rel instruction *evalutates* to the address 8.
4445 if ( targetUsesExternalReloc ) {
4446 // TLV support for i386 acts like RIP relative addressing
4447 // The addend is the offset from the PICBase to the end of the instruction
4448 if ( (_options.architecture() == CPU_TYPE_I386)
4449 && (_options.outputKind() == Options::kObjectFile)
4450 && (fixupWithStore->kind == ld::Fixup::kindStoreX86PCRel32TLVLoad) ) {
4451 fixupWithTarget->contentAddendOnly = true;
4452 fixupWithStore->contentAddendOnly = true;
4454 else if ( isPcRelStore(fixupWithStore->kind) ) {
4455 fixupWithTarget->contentDetlaToAddendOnly = true;
4456 fixupWithStore->contentDetlaToAddendOnly = true;
4458 else if ( minusTarget == NULL ){
4459 fixupWithTarget->contentAddendOnly = true;
4460 fixupWithStore->contentAddendOnly = true;
4465 if ( fixupWithStore != NULL ) {
4466 _sectionsRelocationsAtom->addSectionReloc(sect, fixupWithStore->kind, atom, fixupWithStore->offsetInAtom,
4467 targetUsesExternalReloc, minusTargetUsesExternalReloc,
4468 target, targetAddend, minusTarget, minusTargetAddend);
4473 void OutputFile::makeSplitSegInfo(ld::Internal& state)
4475 if ( !_options.sharedRegionEligible() )
4476 return;
4478 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4479 ld::Internal::FinalSection* sect = *sit;
4480 if ( sect->isSectionHidden() )
4481 continue;
4482 if ( strcmp(sect->segmentName(), "__TEXT") != 0 )
4483 continue;
4484 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4485 const ld::Atom* atom = *ait;
4486 const ld::Atom* target = NULL;
4487 const ld::Atom* fromTarget = NULL;
4488 uint64_t accumulator = 0;
4489 bool thumbTarget;
4490 bool hadSubtract = false;
4491 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
4492 if ( fit->firstInCluster() )
4493 target = NULL;
4494 if ( this->setsTarget(fit->kind) ) {
4495 accumulator = addressOf(state, fit, &target);
4496 thumbTarget = targetIsThumb(state, fit);
4497 if ( thumbTarget )
4498 accumulator |= 1;
4500 switch ( fit->kind ) {
4501 case ld::Fixup::kindSubtractTargetAddress:
4502 accumulator -= addressOf(state, fit, &fromTarget);
4503 hadSubtract = true;
4504 break;
4505 case ld::Fixup::kindAddAddend:
4506 accumulator += fit->u.addend;
4507 break;
4508 case ld::Fixup::kindSubtractAddend:
4509 accumulator -= fit->u.addend;
4510 break;
4511 case ld::Fixup::kindStoreBigEndian32:
4512 case ld::Fixup::kindStoreLittleEndian32:
4513 case ld::Fixup::kindStoreLittleEndian64:
4514 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4515 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4516 // if no subtract, then this is an absolute pointer which means
4517 // there is also a text reloc which update_dyld_shared_cache will use.
4518 if ( ! hadSubtract )
4519 break;
4520 // fall through
4521 case ld::Fixup::kindStoreX86PCRel32:
4522 case ld::Fixup::kindStoreX86PCRel32_1:
4523 case ld::Fixup::kindStoreX86PCRel32_2:
4524 case ld::Fixup::kindStoreX86PCRel32_4:
4525 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
4526 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
4527 case ld::Fixup::kindStoreX86PCRel32GOT:
4528 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
4529 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
4530 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
4531 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
4532 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
4533 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
4534 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
4535 case ld::Fixup::kindStoreARMLow16:
4536 case ld::Fixup::kindStoreThumbLow16:
4537 #if SUPPORT_ARCH_arm64
4538 case ld::Fixup::kindStoreARM64Page21:
4539 case ld::Fixup::kindStoreARM64GOTLoadPage21:
4540 case ld::Fixup::kindStoreARM64GOTLeaPage21:
4541 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
4542 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
4543 case ld::Fixup::kindStoreTargetAddressARM64Page21:
4544 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
4545 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
4546 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
4547 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
4548 case ld::Fixup::kindStoreARM64PCRelToGOT:
4549 #endif
4550 assert(target != NULL);
4551 if ( strcmp(sect->segmentName(), target->section().segmentName()) != 0 ) {
4552 _splitSegInfos.push_back(SplitSegInfoEntry(atom->finalAddress()+fit->offsetInAtom,fit->kind));
4554 break;
4555 case ld::Fixup::kindStoreARMHigh16:
4556 case ld::Fixup::kindStoreThumbHigh16:
4557 assert(target != NULL);
4558 if ( strcmp(sect->segmentName(), target->section().segmentName()) != 0 ) {
4559 // hi16 needs to know upper 4-bits of low16 to compute carry
4560 uint32_t extra = (accumulator >> 12) & 0xF;
4561 _splitSegInfos.push_back(SplitSegInfoEntry(atom->finalAddress()+fit->offsetInAtom,fit->kind, extra));
4563 break;
4564 case ld::Fixup::kindSetTargetImageOffset:
4565 accumulator = addressOf(state, fit, &target);
4566 assert(target != NULL);
4567 hadSubtract = true;
4568 break;
4569 default:
4570 break;
4577 void OutputFile::makeSplitSegInfoV2(ld::Internal& state)
4579 static const bool log = false;
4580 if ( !_options.sharedRegionEligible() )
4581 return;
4583 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4584 ld::Internal::FinalSection* sect = *sit;
4585 if ( sect->isSectionHidden() )
4586 continue;
4587 bool codeSection = (sect->type() == ld::Section::typeCode);
4588 if (log) fprintf(stderr, "sect: %s, address=0x%llX\n", sect->sectionName(), sect->address);
4589 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4590 const ld::Atom* atom = *ait;
4591 const ld::Atom* target = NULL;
4592 const ld::Atom* fromTarget = NULL;
4593 uint32_t picBase = 0;
4594 uint64_t accumulator = 0;
4595 bool thumbTarget;
4596 bool hadSubtract = false;
4597 uint8_t fromSectionIndex = atom->machoSection();
4598 uint8_t toSectionIndex;
4599 uint8_t kind = 0;
4600 uint64_t fromOffset = 0;
4601 uint64_t toOffset = 0;
4602 uint64_t addend = 0;
4603 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
4604 if ( fit->firstInCluster() ) {
4605 target = NULL;
4606 fromTarget = NULL;
4607 kind = 0;
4608 addend = 0;
4609 toSectionIndex = 255;
4610 fromOffset = atom->finalAddress() + fit->offsetInAtom - sect->address;
4612 if ( this->setsTarget(fit->kind) ) {
4613 accumulator = addressOf(state, fit, &target);
4614 thumbTarget = targetIsThumb(state, fit);
4615 if ( thumbTarget )
4616 accumulator |= 1;
4617 toOffset = accumulator - state.atomToSection[target]->address;
4618 if ( target->definition() != ld::Atom::definitionProxy ) {
4619 if ( target->section().type() == ld::Section::typeMachHeader )
4620 toSectionIndex = 0;
4621 else
4622 toSectionIndex = target->machoSection();
4625 switch ( fit->kind ) {
4626 case ld::Fixup::kindSubtractTargetAddress:
4627 accumulator -= addressOf(state, fit, &fromTarget);
4628 hadSubtract = true;
4629 break;
4630 case ld::Fixup::kindAddAddend:
4631 accumulator += fit->u.addend;
4632 addend = fit->u.addend;
4633 break;
4634 case ld::Fixup::kindSubtractAddend:
4635 accumulator -= fit->u.addend;
4636 picBase = fit->u.addend;
4637 break;
4638 case ld::Fixup::kindSetLazyOffset:
4639 break;
4640 case ld::Fixup::kindStoreBigEndian32:
4641 case ld::Fixup::kindStoreLittleEndian32:
4642 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4643 if ( kind != DYLD_CACHE_ADJ_V2_IMAGE_OFF_32 ) {
4644 if ( hadSubtract )
4645 kind = DYLD_CACHE_ADJ_V2_DELTA_32;
4646 else
4647 kind = DYLD_CACHE_ADJ_V2_POINTER_32;
4649 break;
4650 case ld::Fixup::kindStoreLittleEndian64:
4651 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4652 if ( hadSubtract )
4653 kind = DYLD_CACHE_ADJ_V2_DELTA_64;
4654 else
4655 kind = DYLD_CACHE_ADJ_V2_POINTER_64;
4656 break;
4657 case ld::Fixup::kindStoreX86PCRel32:
4658 case ld::Fixup::kindStoreX86PCRel32_1:
4659 case ld::Fixup::kindStoreX86PCRel32_2:
4660 case ld::Fixup::kindStoreX86PCRel32_4:
4661 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
4662 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
4663 case ld::Fixup::kindStoreX86PCRel32GOT:
4664 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
4665 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
4666 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
4667 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
4668 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
4669 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
4670 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
4671 #if SUPPORT_ARCH_arm64
4672 case ld::Fixup::kindStoreARM64PCRelToGOT:
4673 #endif
4674 if ( (fromSectionIndex != toSectionIndex) || !codeSection )
4675 kind = DYLD_CACHE_ADJ_V2_DELTA_32;
4676 break;
4677 #if SUPPORT_ARCH_arm64
4678 case ld::Fixup::kindStoreARM64Page21:
4679 case ld::Fixup::kindStoreARM64GOTLoadPage21:
4680 case ld::Fixup::kindStoreARM64GOTLeaPage21:
4681 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
4682 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
4683 case ld::Fixup::kindStoreTargetAddressARM64Page21:
4684 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
4685 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
4686 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
4687 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
4688 if ( fromSectionIndex != toSectionIndex )
4689 kind = DYLD_CACHE_ADJ_V2_ARM64_ADRP;
4690 break;
4691 case ld::Fixup::kindStoreARM64PageOff12:
4692 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
4693 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
4694 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
4695 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
4696 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
4697 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
4698 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
4699 if ( fromSectionIndex != toSectionIndex )
4700 kind = DYLD_CACHE_ADJ_V2_ARM64_OFF12;
4701 break;
4702 case ld::Fixup::kindStoreARM64Branch26:
4703 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
4704 if ( fromSectionIndex != toSectionIndex )
4705 kind = DYLD_CACHE_ADJ_V2_ARM64_BR26;
4706 break;
4707 #endif
4708 case ld::Fixup::kindStoreARMHigh16:
4709 case ld::Fixup::kindStoreARMLow16:
4710 if ( (fromSectionIndex != toSectionIndex) && (fromTarget == atom) ) {
4711 kind = DYLD_CACHE_ADJ_V2_ARM_MOVW_MOVT;
4713 break;
4714 case ld::Fixup::kindStoreARMBranch24:
4715 case ld::Fixup::kindStoreTargetAddressARMBranch24:
4716 if ( fromSectionIndex != toSectionIndex )
4717 kind = DYLD_CACHE_ADJ_V2_ARM_BR24;
4718 break;
4719 case ld::Fixup::kindStoreThumbLow16:
4720 case ld::Fixup::kindStoreThumbHigh16:
4721 if ( (fromSectionIndex != toSectionIndex) && (fromTarget == atom) ) {
4722 kind = DYLD_CACHE_ADJ_V2_THUMB_MOVW_MOVT;
4724 break;
4725 case ld::Fixup::kindStoreThumbBranch22:
4726 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
4727 if ( fromSectionIndex != toSectionIndex )
4728 kind = DYLD_CACHE_ADJ_V2_THUMB_BR22;
4729 break;
4730 case ld::Fixup::kindSetTargetImageOffset:
4731 kind = DYLD_CACHE_ADJ_V2_IMAGE_OFF_32;
4732 accumulator = addressOf(state, fit, &target);
4733 assert(target != NULL);
4734 toSectionIndex = target->machoSection();
4735 toOffset = accumulator - state.atomToSection[target]->address;
4736 hadSubtract = true;
4737 break;
4738 default:
4739 break;
4741 if ( fit->lastInCluster() ) {
4742 if ( (kind != 0) && (target != NULL) && (target->definition() != ld::Atom::definitionProxy) ) {
4743 if ( !hadSubtract && addend )
4744 toOffset += addend;
4745 assert(toSectionIndex != 255);
4746 if (log) fprintf(stderr, "from (%d.%s + 0x%llX) to (%d.%s + 0x%llX), kind=%d, atomAddr=0x%llX, sectAddr=0x%llx\n",
4747 fromSectionIndex, sect->sectionName(), fromOffset, toSectionIndex, state.atomToSection[target]->sectionName(),
4748 toOffset, kind, atom->finalAddress(), sect->address);
4749 _splitSegV2Infos.push_back(SplitSegInfoV2Entry(fromSectionIndex, fromOffset, toSectionIndex, toOffset, kind));
4758 void OutputFile::writeMapFile(ld::Internal& state)
4760 if ( _options.generatedMapPath() != NULL ) {
4761 FILE* mapFile = fopen(_options.generatedMapPath(), "w");
4762 if ( mapFile != NULL ) {
4763 // write output path
4764 fprintf(mapFile, "# Path: %s\n", _options.outputFilePath());
4765 // write output architecure
4766 fprintf(mapFile, "# Arch: %s\n", _options.architectureName());
4767 // write UUID
4768 //if ( fUUIDAtom != NULL ) {
4769 // const uint8_t* uuid = fUUIDAtom->getUUID();
4770 // fprintf(mapFile, "# UUID: %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X \n",
4771 // uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7],
4772 // uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
4774 // write table of object files
4775 std::map<const ld::File*, ld::File::Ordinal> readerToOrdinal;
4776 std::map<ld::File::Ordinal, const ld::File*> ordinalToReader;
4777 std::map<const ld::File*, uint32_t> readerToFileOrdinal;
4778 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4779 ld::Internal::FinalSection* sect = *sit;
4780 if ( sect->isSectionHidden() )
4781 continue;
4782 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4783 const ld::Atom* atom = *ait;
4784 const ld::File* reader = atom->file();
4785 if ( reader == NULL )
4786 continue;
4787 ld::File::Ordinal readerOrdinal = reader->ordinal();
4788 std::map<const ld::File*, ld::File::Ordinal>::iterator pos = readerToOrdinal.find(reader);
4789 if ( pos == readerToOrdinal.end() ) {
4790 readerToOrdinal[reader] = readerOrdinal;
4791 ordinalToReader[readerOrdinal] = reader;
4795 fprintf(mapFile, "# Object files:\n");
4796 fprintf(mapFile, "[%3u] %s\n", 0, "linker synthesized");
4797 uint32_t fileIndex = 1;
4798 for(std::map<ld::File::Ordinal, const ld::File*>::iterator it = ordinalToReader.begin(); it != ordinalToReader.end(); ++it) {
4799 fprintf(mapFile, "[%3u] %s\n", fileIndex, it->second->path());
4800 readerToFileOrdinal[it->second] = fileIndex++;
4802 // write table of sections
4803 fprintf(mapFile, "# Sections:\n");
4804 fprintf(mapFile, "# Address\tSize \tSegment\tSection\n");
4805 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4806 ld::Internal::FinalSection* sect = *sit;
4807 if ( sect->isSectionHidden() )
4808 continue;
4809 fprintf(mapFile, "0x%08llX\t0x%08llX\t%s\t%s\n", sect->address, sect->size,
4810 sect->segmentName(), sect->sectionName());
4812 // write table of symbols
4813 fprintf(mapFile, "# Symbols:\n");
4814 fprintf(mapFile, "# Address\tSize \tFile Name\n");
4815 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4816 ld::Internal::FinalSection* sect = *sit;
4817 if ( sect->isSectionHidden() )
4818 continue;
4819 //bool isCstring = (sect->type() == ld::Section::typeCString);
4820 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4821 char buffer[4096];
4822 const ld::Atom* atom = *ait;
4823 const char* name = atom->name();
4824 // don't add auto-stripped aliases to .map file
4825 if ( (atom->size() == 0) && (atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages) )
4826 continue;
4827 if ( atom->contentType() == ld::Atom::typeCString ) {
4828 strcpy(buffer, "literal string: ");
4829 strlcat(buffer, (char*)atom->rawContentPointer(), 4096);
4830 name = buffer;
4832 else if ( (atom->contentType() == ld::Atom::typeCFI) && (strcmp(name, "FDE") == 0) ) {
4833 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
4834 if ( (fit->kind == ld::Fixup::kindSetTargetAddress) && (fit->clusterSize == ld::Fixup::k1of4) ) {
4835 if ( (fit->binding == ld::Fixup::bindingDirectlyBound)
4836 && (fit->u.target->section().type() == ld::Section::typeCode) ) {
4837 strcpy(buffer, "FDE for: ");
4838 strlcat(buffer, fit->u.target->name(), 4096);
4839 name = buffer;
4844 else if ( atom->contentType() == ld::Atom::typeNonLazyPointer ) {
4845 strcpy(buffer, "non-lazy-pointer");
4846 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
4847 if ( fit->binding == ld::Fixup::bindingsIndirectlyBound ) {
4848 strcpy(buffer, "non-lazy-pointer-to: ");
4849 strlcat(buffer, state.indirectBindingTable[fit->u.bindingIndex]->name(), 4096);
4850 break;
4852 else if ( fit->binding == ld::Fixup::bindingDirectlyBound ) {
4853 strcpy(buffer, "non-lazy-pointer-to-local: ");
4854 strlcat(buffer, fit->u.target->name(), 4096);
4855 break;
4858 name = buffer;
4860 fprintf(mapFile, "0x%08llX\t0x%08llX\t[%3u] %s\n", atom->finalAddress(), atom->size(),
4861 readerToFileOrdinal[atom->file()], name);
4864 fclose(mapFile);
4866 else {
4867 warning("could not write map file: %s\n", _options.generatedMapPath());
4872 // used to sort atoms with debug notes
4873 class DebugNoteSorter
4875 public:
4876 bool operator()(const ld::Atom* left, const ld::Atom* right) const
4878 // first sort by reader
4879 ld::File::Ordinal leftFileOrdinal = left->file()->ordinal();
4880 ld::File::Ordinal rightFileOrdinal = right->file()->ordinal();
4881 if ( leftFileOrdinal!= rightFileOrdinal)
4882 return (leftFileOrdinal < rightFileOrdinal);
4884 // then sort by atom objectAddress
4885 uint64_t leftAddr = left->finalAddress();
4886 uint64_t rightAddr = right->finalAddress();
4887 return leftAddr < rightAddr;
4892 const char* OutputFile::assureFullPath(const char* path)
4894 if ( path[0] == '/' )
4895 return path;
4896 char cwdbuff[MAXPATHLEN];
4897 if ( getcwd(cwdbuff, MAXPATHLEN) != NULL ) {
4898 char* result;
4899 asprintf(&result, "%s/%s", cwdbuff, path);
4900 if ( result != NULL )
4901 return result;
4903 return path;
4906 static time_t fileModTime(const char* path) {
4907 struct stat statBuffer;
4908 if ( stat(path, &statBuffer) == 0 ) {
4909 return statBuffer.st_mtime;
4911 return 0;
4915 void OutputFile::synthesizeDebugNotes(ld::Internal& state)
4917 // -S means don't synthesize debug map
4918 if ( _options.debugInfoStripping() == Options::kDebugInfoNone )
4919 return;
4920 // make a vector of atoms that come from files compiled with dwarf debug info
4921 std::vector<const ld::Atom*> atomsNeedingDebugNotes;
4922 std::set<const ld::Atom*> atomsWithStabs;
4923 atomsNeedingDebugNotes.reserve(1024);
4924 const ld::relocatable::File* objFile = NULL;
4925 bool objFileHasDwarf = false;
4926 bool objFileHasStabs = false;
4927 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4928 ld::Internal::FinalSection* sect = *sit;
4929 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4930 const ld::Atom* atom = *ait;
4931 // no stabs for atoms that would not be in the symbol table
4932 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotIn )
4933 continue;
4934 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages )
4935 continue;
4936 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel )
4937 continue;
4938 // no stabs for absolute symbols
4939 if ( atom->definition() == ld::Atom::definitionAbsolute )
4940 continue;
4941 // no stabs for .eh atoms
4942 if ( atom->contentType() == ld::Atom::typeCFI )
4943 continue;
4944 // no stabs for string literal atoms
4945 if ( atom->contentType() == ld::Atom::typeCString )
4946 continue;
4947 // no stabs for kernel dtrace probes
4948 if ( (_options.outputKind() == Options::kStaticExecutable) && (strncmp(atom->name(), "__dtrace_probe$", 15) == 0) )
4949 continue;
4950 const ld::File* file = atom->file();
4951 if ( file != NULL ) {
4952 if ( file != objFile ) {
4953 objFileHasDwarf = false;
4954 objFileHasStabs = false;
4955 objFile = dynamic_cast<const ld::relocatable::File*>(file);
4956 if ( objFile != NULL ) {
4957 switch ( objFile->debugInfo() ) {
4958 case ld::relocatable::File::kDebugInfoNone:
4959 break;
4960 case ld::relocatable::File::kDebugInfoDwarf:
4961 objFileHasDwarf = true;
4962 break;
4963 case ld::relocatable::File::kDebugInfoStabs:
4964 case ld::relocatable::File::kDebugInfoStabsUUID:
4965 objFileHasStabs = true;
4966 break;
4970 if ( objFileHasDwarf )
4971 atomsNeedingDebugNotes.push_back(atom);
4972 if ( objFileHasStabs )
4973 atomsWithStabs.insert(atom);
4978 // sort by file ordinal then atom ordinal
4979 std::sort(atomsNeedingDebugNotes.begin(), atomsNeedingDebugNotes.end(), DebugNoteSorter());
4981 // <rdar://problem/17689030> Add -add_ast_path option to linker which add N_AST stab entry to output
4982 const std::vector<const char*>& astPaths = _options.astFilePaths();
4983 for (std::vector<const char*>::const_iterator it=astPaths.begin(); it != astPaths.end(); it++) {
4984 const char* path = *it;
4985 // emit N_AST
4986 ld::relocatable::File::Stab astStab;
4987 astStab.atom = NULL;
4988 astStab.type = N_AST;
4989 astStab.other = 0;
4990 astStab.desc = 0;
4991 astStab.value = fileModTime(path);
4992 astStab.string = path;
4993 state.stabs.push_back(astStab);
4996 // synthesize "debug notes" and add them to master stabs vector
4997 const char* dirPath = NULL;
4998 const char* filename = NULL;
4999 bool wroteStartSO = false;
5000 state.stabs.reserve(atomsNeedingDebugNotes.size()*4);
5001 std::unordered_set<const char*, CStringHash, CStringEquals> seenFiles;
5002 for (std::vector<const ld::Atom*>::iterator it=atomsNeedingDebugNotes.begin(); it != atomsNeedingDebugNotes.end(); it++) {
5003 const ld::Atom* atom = *it;
5004 const ld::File* atomFile = atom->file();
5005 const ld::relocatable::File* atomObjFile = dynamic_cast<const ld::relocatable::File*>(atomFile);
5006 //fprintf(stderr, "debug note for %s\n", atom->name());
5007 const char* newPath = atom->translationUnitSource();
5008 if ( newPath != NULL ) {
5009 const char* newDirPath;
5010 const char* newFilename;
5011 const char* lastSlash = strrchr(newPath, '/');
5012 if ( lastSlash == NULL )
5013 continue;
5014 newFilename = lastSlash+1;
5015 char* temp = strdup(newPath);
5016 newDirPath = temp;
5017 // gdb like directory SO's to end in '/', but dwarf DW_AT_comp_dir usually does not have trailing '/'
5018 temp[lastSlash-newPath+1] = '\0';
5019 // need SO's whenever the translation unit source file changes
5020 if ( (filename == NULL) || (strcmp(newFilename,filename) != 0) || (strcmp(newDirPath,dirPath) != 0)) {
5021 if ( filename != NULL ) {
5022 // translation unit change, emit ending SO
5023 ld::relocatable::File::Stab endFileStab;
5024 endFileStab.atom = NULL;
5025 endFileStab.type = N_SO;
5026 endFileStab.other = 1;
5027 endFileStab.desc = 0;
5028 endFileStab.value = 0;
5029 endFileStab.string = "";
5030 state.stabs.push_back(endFileStab);
5032 // new translation unit, emit start SO's
5033 ld::relocatable::File::Stab dirPathStab;
5034 dirPathStab.atom = NULL;
5035 dirPathStab.type = N_SO;
5036 dirPathStab.other = 0;
5037 dirPathStab.desc = 0;
5038 dirPathStab.value = 0;
5039 dirPathStab.string = newDirPath;
5040 state.stabs.push_back(dirPathStab);
5041 ld::relocatable::File::Stab fileStab;
5042 fileStab.atom = NULL;
5043 fileStab.type = N_SO;
5044 fileStab.other = 0;
5045 fileStab.desc = 0;
5046 fileStab.value = 0;
5047 fileStab.string = newFilename;
5048 state.stabs.push_back(fileStab);
5049 // Synthesize OSO for start of file
5050 ld::relocatable::File::Stab objStab;
5051 objStab.atom = NULL;
5052 objStab.type = N_OSO;
5053 // <rdar://problem/6337329> linker should put cpusubtype in n_sect field of nlist entry for N_OSO debug note entries
5054 objStab.other = atomFile->cpuSubType();
5055 objStab.desc = 1;
5056 if ( atomObjFile != NULL ) {
5057 objStab.string = assureFullPath(atomObjFile->debugInfoPath());
5058 objStab.value = atomObjFile->debugInfoModificationTime();
5060 else {
5061 objStab.string = assureFullPath(atomFile->path());
5062 objStab.value = atomFile->modificationTime();
5064 state.stabs.push_back(objStab);
5065 wroteStartSO = true;
5066 // add the source file path to seenFiles so it does not show up in SOLs
5067 seenFiles.insert(newFilename);
5068 char* fullFilePath;
5069 asprintf(&fullFilePath, "%s%s", newDirPath, newFilename);
5070 // add both leaf path and full path
5071 seenFiles.insert(fullFilePath);
5073 filename = newFilename;
5074 dirPath = newDirPath;
5075 if ( atom->section().type() == ld::Section::typeCode ) {
5076 // Synthesize BNSYM and start FUN stabs
5077 ld::relocatable::File::Stab beginSym;
5078 beginSym.atom = atom;
5079 beginSym.type = N_BNSYM;
5080 beginSym.other = 1;
5081 beginSym.desc = 0;
5082 beginSym.value = 0;
5083 beginSym.string = "";
5084 state.stabs.push_back(beginSym);
5085 ld::relocatable::File::Stab startFun;
5086 startFun.atom = atom;
5087 startFun.type = N_FUN;
5088 startFun.other = 1;
5089 startFun.desc = 0;
5090 startFun.value = 0;
5091 startFun.string = atom->name();
5092 state.stabs.push_back(startFun);
5093 // Synthesize any SOL stabs needed
5094 const char* curFile = NULL;
5095 for (ld::Atom::LineInfo::iterator lit = atom->beginLineInfo(); lit != atom->endLineInfo(); ++lit) {
5096 if ( lit->fileName != curFile ) {
5097 if ( seenFiles.count(lit->fileName) == 0 ) {
5098 seenFiles.insert(lit->fileName);
5099 ld::relocatable::File::Stab sol;
5100 sol.atom = 0;
5101 sol.type = N_SOL;
5102 sol.other = 0;
5103 sol.desc = 0;
5104 sol.value = 0;
5105 sol.string = lit->fileName;
5106 state.stabs.push_back(sol);
5108 curFile = lit->fileName;
5111 // Synthesize end FUN and ENSYM stabs
5112 ld::relocatable::File::Stab endFun;
5113 endFun.atom = atom;
5114 endFun.type = N_FUN;
5115 endFun.other = 0;
5116 endFun.desc = 0;
5117 endFun.value = 0;
5118 endFun.string = "";
5119 state.stabs.push_back(endFun);
5120 ld::relocatable::File::Stab endSym;
5121 endSym.atom = atom;
5122 endSym.type = N_ENSYM;
5123 endSym.other = 1;
5124 endSym.desc = 0;
5125 endSym.value = 0;
5126 endSym.string = "";
5127 state.stabs.push_back(endSym);
5129 else {
5130 ld::relocatable::File::Stab globalsStab;
5131 const char* name = atom->name();
5132 if ( atom->scope() == ld::Atom::scopeTranslationUnit ) {
5133 // Synthesize STSYM stab for statics
5134 globalsStab.atom = atom;
5135 globalsStab.type = N_STSYM;
5136 globalsStab.other = 1;
5137 globalsStab.desc = 0;
5138 globalsStab.value = 0;
5139 globalsStab.string = name;
5140 state.stabs.push_back(globalsStab);
5142 else {
5143 // Synthesize GSYM stab for other globals
5144 globalsStab.atom = atom;
5145 globalsStab.type = N_GSYM;
5146 globalsStab.other = 1;
5147 globalsStab.desc = 0;
5148 globalsStab.value = 0;
5149 globalsStab.string = name;
5150 state.stabs.push_back(globalsStab);
5156 if ( wroteStartSO ) {
5157 // emit ending SO
5158 ld::relocatable::File::Stab endFileStab;
5159 endFileStab.atom = NULL;
5160 endFileStab.type = N_SO;
5161 endFileStab.other = 1;
5162 endFileStab.desc = 0;
5163 endFileStab.value = 0;
5164 endFileStab.string = "";
5165 state.stabs.push_back(endFileStab);
5168 // copy any stabs from .o file
5169 std::set<const ld::File*> filesSeenWithStabs;
5170 for (std::set<const ld::Atom*>::iterator it=atomsWithStabs.begin(); it != atomsWithStabs.end(); it++) {
5171 const ld::Atom* atom = *it;
5172 objFile = dynamic_cast<const ld::relocatable::File*>(atom->file());
5173 if ( objFile != NULL ) {
5174 if ( filesSeenWithStabs.count(objFile) == 0 ) {
5175 filesSeenWithStabs.insert(objFile);
5176 const std::vector<ld::relocatable::File::Stab>* stabs = objFile->stabs();
5177 if ( stabs != NULL ) {
5178 for(std::vector<ld::relocatable::File::Stab>::const_iterator sit = stabs->begin(); sit != stabs->end(); ++sit) {
5179 ld::relocatable::File::Stab stab = *sit;
5180 // ignore stabs associated with atoms that were dead stripped or coalesced away
5181 if ( (sit->atom != NULL) && (atomsWithStabs.count(sit->atom) == 0) )
5182 continue;
5183 // <rdar://problem/8284718> Value of N_SO stabs should be address of first atom from translation unit
5184 if ( (stab.type == N_SO) && (stab.string != NULL) && (stab.string[0] != '\0') ) {
5185 stab.atom = atom;
5187 state.stabs.push_back(stab);
5197 } // namespace tool
5198 } // namespace ld