Merge pull request #8 from biergaizi/upstream
[darwin-xtools.git] / ld64 / src / ld / OutputFile.cpp
blobf9e5cf076228cc2b9bc9e9b50e30abbff3819ac8
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-*
3 * Copyright (c) 2009-2011 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
26 #include <stdlib.h>
27 #define __STDC_FORMAT_MACROS
28 #include <inttypes.h>
29 #undef __STDC_FORMAT_MACROS
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <sys/mman.h>
33 #include <sys/sysctl.h>
34 #include <sys/param.h>
35 #include <sys/mount.h>
36 #include <fcntl.h>
37 #include <errno.h>
38 #include <limits.h>
39 #include <unistd.h>
40 #include <mach/mach_time.h>
41 #include <mach/vm_statistics.h>
42 #include <mach/mach_init.h>
43 #include <mach/mach_host.h>
44 #include <uuid/uuid.h>
45 #include <dlfcn.h>
46 #include <mach-o/dyld.h>
47 #include <mach-o/fat.h>
49 #include <string>
50 #include <map>
51 #include <set>
52 #include <string>
53 #include <vector>
54 #include <list>
55 #include <algorithm>
56 #include <unordered_set>
57 #include <utility>
58 #include <iostream>
59 #include <fstream>
61 #include <CommonCrypto/CommonDigest.h>
62 #include <AvailabilityMacros.h>
64 #include "MachOTrie.hpp"
66 #include "Options.h"
68 #include "OutputFile.h"
69 #include "Architectures.hpp"
70 #include "HeaderAndLoadCommands.hpp"
71 #include "LinkEdit.hpp"
72 #include "LinkEditClassic.hpp"
74 namespace ld {
75 namespace tool {
77 uint32_t sAdrpNA = 0;
78 uint32_t sAdrpNoped = 0;
79 uint32_t sAdrpNotNoped = 0;
82 OutputFile::OutputFile(const Options& opts)
84 usesWeakExternalSymbols(false), overridesWeakExternalSymbols(false),
85 _noReExportedDylibs(false), pieDisabled(false), hasDataInCode(false),
86 headerAndLoadCommandsSection(NULL),
87 rebaseSection(NULL), bindingSection(NULL), weakBindingSection(NULL),
88 lazyBindingSection(NULL), exportSection(NULL),
89 splitSegInfoSection(NULL), functionStartsSection(NULL),
90 dataInCodeSection(NULL), optimizationHintsSection(NULL),
91 symbolTableSection(NULL), stringPoolSection(NULL),
92 localRelocationsSection(NULL), externalRelocationsSection(NULL),
93 sectionRelocationsSection(NULL),
94 indirectSymbolTableSection(NULL),
95 _options(opts),
96 _hasDyldInfo(opts.makeCompressedDyldInfo()),
97 _hasSymbolTable(true),
98 _hasSectionRelocations(opts.outputKind() == Options::kObjectFile),
99 _hasSplitSegInfo(opts.sharedRegionEligible()),
100 _hasFunctionStartsInfo(opts.addFunctionStarts()),
101 _hasDataInCodeInfo(opts.addDataInCodeInfo()),
102 _hasDynamicSymbolTable(true),
103 _hasLocalRelocations(!opts.makeCompressedDyldInfo()),
104 _hasExternalRelocations(!opts.makeCompressedDyldInfo()),
105 _hasOptimizationHints(opts.outputKind() == Options::kObjectFile),
106 _encryptedTEXTstartOffset(0),
107 _encryptedTEXTendOffset(0),
108 _localSymbolsStartIndex(0),
109 _localSymbolsCount(0),
110 _globalSymbolsStartIndex(0),
111 _globalSymbolsCount(0),
112 _importSymbolsStartIndex(0),
113 _importSymbolsCount(0),
114 _sectionsRelocationsAtom(NULL),
115 _localRelocsAtom(NULL),
116 _externalRelocsAtom(NULL),
117 _symbolTableAtom(NULL),
118 _indirectSymbolTableAtom(NULL),
119 _rebasingInfoAtom(NULL),
120 _bindingInfoAtom(NULL),
121 _lazyBindingInfoAtom(NULL),
122 _weakBindingInfoAtom(NULL),
123 _exportInfoAtom(NULL),
124 _splitSegInfoAtom(NULL),
125 _functionStartsAtom(NULL),
126 _dataInCodeAtom(NULL),
127 _optimizationHintsAtom(NULL)
131 void OutputFile::dumpAtomsBySection(ld::Internal& state, bool printAtoms)
133 fprintf(stderr, "SORTED:\n");
134 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
135 fprintf(stderr, "final section %p %s/%s %s start addr=0x%08llX, size=0x%08llX, alignment=%02d, fileOffset=0x%08llX\n",
136 (*it), (*it)->segmentName(), (*it)->sectionName(), (*it)->isSectionHidden() ? "(hidden)" : "",
137 (*it)->address, (*it)->size, (*it)->alignment, (*it)->fileOffset);
138 if ( printAtoms ) {
139 std::vector<const ld::Atom*>& atoms = (*it)->atoms;
140 for (std::vector<const ld::Atom*>::iterator ait = atoms.begin(); ait != atoms.end(); ++ait) {
141 fprintf(stderr, " %p (0x%04llX) %s\n", *ait, (*ait)->size(), (*ait)->name());
145 fprintf(stderr, "DYLIBS:\n");
146 for (std::vector<ld::dylib::File*>::iterator it=state.dylibs.begin(); it != state.dylibs.end(); ++it )
147 fprintf(stderr, " %s\n", (*it)->installPath());
150 void OutputFile::write(ld::Internal& state)
152 this->buildDylibOrdinalMapping(state);
153 this->addLoadCommands(state);
154 this->addLinkEdit(state);
155 state.setSectionSizesAndAlignments();
156 this->setLoadCommandsPadding(state);
157 _fileSize = state.assignFileOffsets();
158 this->assignAtomAddresses(state);
159 this->synthesizeDebugNotes(state);
160 this->buildSymbolTable(state);
161 this->generateLinkEditInfo(state);
162 if ( _options.sharedRegionEncodingV2() )
163 this->makeSplitSegInfoV2(state);
164 else
165 this->makeSplitSegInfo(state);
166 this->updateLINKEDITAddresses(state);
167 //this->dumpAtomsBySection(state, false);
168 this->writeOutputFile(state);
169 this->writeMapFile(state);
170 this->writeJSONEntry(state);
173 bool OutputFile::findSegment(ld::Internal& state, uint64_t addr, uint64_t* start, uint64_t* end, uint32_t* index)
175 uint32_t segIndex = 0;
176 ld::Internal::FinalSection* segFirstSection = NULL;
177 ld::Internal::FinalSection* lastSection = NULL;
178 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
179 ld::Internal::FinalSection* sect = *it;
180 if ( (segFirstSection == NULL ) || strcmp(segFirstSection->segmentName(), sect->segmentName()) != 0 ) {
181 if ( segFirstSection != NULL ) {
182 //fprintf(stderr, "findSegment(0x%llX) seg changed to %s\n", addr, sect->segmentName());
183 if ( (addr >= segFirstSection->address) && (addr < lastSection->address+lastSection->size) ) {
184 *start = segFirstSection->address;
185 *end = lastSection->address+lastSection->size;
186 *index = segIndex;
187 return true;
189 ++segIndex;
191 segFirstSection = sect;
193 lastSection = sect;
195 return false;
199 void OutputFile::assignAtomAddresses(ld::Internal& state)
201 const bool log = false;
202 if ( log ) fprintf(stderr, "assignAtomAddresses()\n");
203 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
204 ld::Internal::FinalSection* sect = *sit;
205 if ( log ) fprintf(stderr, " section=%s/%s\n", sect->segmentName(), sect->sectionName());
206 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
207 const ld::Atom* atom = *ait;
208 switch ( sect-> type() ) {
209 case ld::Section::typeImportProxies:
210 // want finalAddress() of all proxy atoms to be zero
211 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(0);
212 break;
213 case ld::Section::typeAbsoluteSymbols:
214 // want finalAddress() of all absolute atoms to be value of abs symbol
215 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(0);
216 break;
217 case ld::Section::typeLinkEdit:
218 // linkedit layout is assigned later
219 break;
220 default:
221 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(sect->address);
222 if ( log ) fprintf(stderr, " atom=%p, addr=0x%08llX, name=%s\n", atom, atom->finalAddress(), atom->name());
223 break;
229 void OutputFile::updateLINKEDITAddresses(ld::Internal& state)
231 if ( _options.makeCompressedDyldInfo() ) {
232 // build dylb rebasing info
233 assert(_rebasingInfoAtom != NULL);
234 _rebasingInfoAtom->encode();
236 // build dyld binding info
237 assert(_bindingInfoAtom != NULL);
238 _bindingInfoAtom->encode();
240 // build dyld lazy binding info
241 assert(_lazyBindingInfoAtom != NULL);
242 _lazyBindingInfoAtom->encode();
244 // build dyld weak binding info
245 assert(_weakBindingInfoAtom != NULL);
246 _weakBindingInfoAtom->encode();
248 // build dyld export info
249 assert(_exportInfoAtom != NULL);
250 _exportInfoAtom->encode();
253 if ( _options.sharedRegionEligible() ) {
254 // build split seg info
255 assert(_splitSegInfoAtom != NULL);
256 _splitSegInfoAtom->encode();
259 if ( _options.addFunctionStarts() ) {
260 // build function starts info
261 assert(_functionStartsAtom != NULL);
262 _functionStartsAtom->encode();
265 if ( _options.addDataInCodeInfo() ) {
266 // build data-in-code info
267 assert(_dataInCodeAtom != NULL);
268 _dataInCodeAtom->encode();
271 if ( _hasOptimizationHints ) {
272 // build linker-optimization-hint info
273 assert(_optimizationHintsAtom != NULL);
274 _optimizationHintsAtom->encode();
277 // build classic symbol table
278 assert(_symbolTableAtom != NULL);
279 _symbolTableAtom->encode();
280 assert(_indirectSymbolTableAtom != NULL);
281 _indirectSymbolTableAtom->encode();
283 // add relocations to .o files
284 if ( _options.outputKind() == Options::kObjectFile ) {
285 assert(_sectionsRelocationsAtom != NULL);
286 _sectionsRelocationsAtom->encode();
289 if ( ! _options.makeCompressedDyldInfo() ) {
290 // build external relocations
291 assert(_externalRelocsAtom != NULL);
292 _externalRelocsAtom->encode();
293 // build local relocations
294 assert(_localRelocsAtom != NULL);
295 _localRelocsAtom->encode();
298 // update address and file offsets now that linkedit content has been generated
299 uint64_t curLinkEditAddress = 0;
300 uint64_t curLinkEditfileOffset = 0;
301 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
302 ld::Internal::FinalSection* sect = *sit;
303 if ( sect->type() != ld::Section::typeLinkEdit )
304 continue;
305 if ( curLinkEditAddress == 0 ) {
306 curLinkEditAddress = sect->address;
307 curLinkEditfileOffset = sect->fileOffset;
309 uint16_t maxAlignment = 0;
310 uint64_t offset = 0;
311 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
312 const ld::Atom* atom = *ait;
313 //fprintf(stderr, "setting linkedit atom offset for %s\n", atom->name());
314 if ( atom->alignment().powerOf2 > maxAlignment )
315 maxAlignment = atom->alignment().powerOf2;
316 // calculate section offset for this atom
317 uint64_t alignment = 1 << atom->alignment().powerOf2;
318 uint64_t currentModulus = (offset % alignment);
319 uint64_t requiredModulus = atom->alignment().modulus;
320 if ( currentModulus != requiredModulus ) {
321 if ( requiredModulus > currentModulus )
322 offset += requiredModulus-currentModulus;
323 else
324 offset += requiredModulus+alignment-currentModulus;
326 (const_cast<ld::Atom*>(atom))->setSectionOffset(offset);
327 (const_cast<ld::Atom*>(atom))->setSectionStartAddress(curLinkEditAddress);
328 offset += atom->size();
330 sect->size = offset;
331 // section alignment is that of a contained atom with the greatest alignment
332 sect->alignment = maxAlignment;
333 sect->address = curLinkEditAddress;
334 sect->fileOffset = curLinkEditfileOffset;
335 curLinkEditAddress += sect->size;
336 curLinkEditfileOffset += sect->size;
339 _fileSize = state.sections.back()->fileOffset + state.sections.back()->size;
343 void OutputFile::setLoadCommandsPadding(ld::Internal& state)
345 // In other sections, any extra space is put and end of segment.
346 // In __TEXT segment, any extra space is put after load commands to allow post-processing of load commands
347 // Do a reverse layout of __TEXT segment to determine padding size and adjust section size
348 uint64_t paddingSize = 0;
349 switch ( _options.outputKind() ) {
350 case Options::kDyld:
351 // dyld itself has special padding requirements. We want the beginning __text section to start at a stable address
352 assert(strcmp(state.sections[1]->sectionName(),"__text") == 0);
353 state.sections[1]->alignment = 12; // page align __text
354 break;
355 case Options::kObjectFile:
356 // mach-o .o files need no padding between load commands and first section
357 // but leave enough room that the object file could be signed
358 paddingSize = 32;
359 break;
360 case Options::kPreload:
361 // mach-o MH_PRELOAD files need no padding between load commands and first section
362 paddingSize = 0;
363 case Options::kKextBundle:
364 if ( _options.useTextExecSegment() ) {
365 paddingSize = 32;
366 break;
368 // else fall into default case
369 default:
370 // work backwards from end of segment and lay out sections so that extra room goes to padding atom
371 uint64_t addr = 0;
372 uint64_t textSegPageSize = _options.segPageSize("__TEXT");
373 if ( _options.sharedRegionEligible() && (_options.iOSVersionMin() >= ld::iOS_8_0) && (textSegPageSize == 0x4000) )
374 textSegPageSize = 0x1000;
375 for (std::vector<ld::Internal::FinalSection*>::reverse_iterator it = state.sections.rbegin(); it != state.sections.rend(); ++it) {
376 ld::Internal::FinalSection* sect = *it;
377 if ( strcmp(sect->segmentName(), "__TEXT") != 0 )
378 continue;
379 if ( sect == headerAndLoadCommandsSection ) {
380 addr -= headerAndLoadCommandsSection->size;
381 paddingSize = addr % textSegPageSize;
382 break;
384 addr -= sect->size;
385 addr = addr & (0 - (1 << sect->alignment));
388 // if command line requires more padding than this
389 uint32_t minPad = _options.minimumHeaderPad();
390 if ( _options.maxMminimumHeaderPad() ) {
391 // -headerpad_max_install_names means there should be room for every path load command to grow to 1204 bytes
392 uint32_t altMin = _dylibsToLoad.size() * MAXPATHLEN;
393 if ( _options.outputKind() == Options::kDynamicLibrary )
394 altMin += MAXPATHLEN;
395 if ( altMin > minPad )
396 minPad = altMin;
398 if ( paddingSize < minPad ) {
399 int extraPages = (minPad - paddingSize + _options.segmentAlignment() - 1)/_options.segmentAlignment();
400 paddingSize += extraPages * _options.segmentAlignment();
403 if ( _options.makeEncryptable() ) {
404 // load commands must be on a separate non-encrypted page
405 int loadCommandsPage = (headerAndLoadCommandsSection->size + minPad)/_options.segmentAlignment();
406 int textPage = (headerAndLoadCommandsSection->size + paddingSize)/_options.segmentAlignment();
407 if ( loadCommandsPage == textPage ) {
408 paddingSize += _options.segmentAlignment();
409 textPage += 1;
411 // remember start for later use by load command
412 _encryptedTEXTstartOffset = textPage*_options.segmentAlignment();
414 break;
416 // add padding to size of section
417 headerAndLoadCommandsSection->size += paddingSize;
421 uint64_t OutputFile::pageAlign(uint64_t addr)
423 const uint64_t alignment = _options.segmentAlignment();
424 return ((addr+alignment-1) & (-alignment));
427 uint64_t OutputFile::pageAlign(uint64_t addr, uint64_t pageSize)
429 return ((addr+pageSize-1) & (-pageSize));
432 static const char* makeName(const ld::Atom& atom)
434 static char buffer[4096];
435 switch ( atom.symbolTableInclusion() ) {
436 case ld::Atom::symbolTableNotIn:
437 case ld::Atom::symbolTableNotInFinalLinkedImages:
438 sprintf(buffer, "%s@0x%08llX", atom.name(), atom.objectAddress());
439 break;
440 case ld::Atom::symbolTableIn:
441 case ld::Atom::symbolTableInAndNeverStrip:
442 case ld::Atom::symbolTableInAsAbsolute:
443 case ld::Atom::symbolTableInWithRandomAutoStripLabel:
444 strlcpy(buffer, atom.name(), 4096);
445 break;
447 return buffer;
450 static const char* referenceTargetAtomName(ld::Internal& state, const ld::Fixup* ref)
452 switch ( ref->binding ) {
453 case ld::Fixup::bindingNone:
454 return "NO BINDING";
455 case ld::Fixup::bindingByNameUnbound:
456 return (char*)(ref->u.target);
457 case ld::Fixup::bindingByContentBound:
458 case ld::Fixup::bindingDirectlyBound:
459 return makeName(*((ld::Atom*)(ref->u.target)));
460 case ld::Fixup::bindingsIndirectlyBound:
461 return makeName(*state.indirectBindingTable[ref->u.bindingIndex]);
463 return "BAD BINDING";
466 bool OutputFile::targetIsThumb(ld::Internal& state, const ld::Fixup* fixup)
468 switch ( fixup->binding ) {
469 case ld::Fixup::bindingByContentBound:
470 case ld::Fixup::bindingDirectlyBound:
471 return fixup->u.target->isThumb();
472 case ld::Fixup::bindingsIndirectlyBound:
473 return state.indirectBindingTable[fixup->u.bindingIndex]->isThumb();
474 default:
475 break;
477 throw "unexpected binding";
480 uint64_t OutputFile::addressOf(const ld::Internal& state, const ld::Fixup* fixup, const ld::Atom** target)
482 if ( !_options.makeCompressedDyldInfo() ) {
483 // For external relocations the classic mach-o format
484 // has addend only stored in the content. That means
485 // that the address of the target is not used.
486 *target = NULL;
487 if ( fixup->contentAddendOnly )
488 return 0;
490 switch ( fixup->binding ) {
491 case ld::Fixup::bindingNone:
492 throw "unexpected bindingNone";
493 case ld::Fixup::bindingByNameUnbound:
494 throw "unexpected bindingByNameUnbound";
495 case ld::Fixup::bindingByContentBound:
496 case ld::Fixup::bindingDirectlyBound:
497 *target = fixup->u.target;
498 return (*target)->finalAddress();
499 case ld::Fixup::bindingsIndirectlyBound:
500 *target = state.indirectBindingTable[fixup->u.bindingIndex];
501 #ifndef NDEBUG
502 if ( ! (*target)->finalAddressMode() ) {
503 throwf("reference to symbol (which has not been assigned an address) %s", (*target)->name());
505 #endif
506 return (*target)->finalAddress();
508 throw "unexpected binding";
511 uint64_t OutputFile::addressAndTarget(const ld::Internal& state, const ld::Fixup* fixup, const ld::Atom** target)
513 switch ( fixup->binding ) {
514 case ld::Fixup::bindingNone:
515 throw "unexpected bindingNone";
516 case ld::Fixup::bindingByNameUnbound:
517 throw "unexpected bindingByNameUnbound";
518 case ld::Fixup::bindingByContentBound:
519 case ld::Fixup::bindingDirectlyBound:
520 *target = fixup->u.target;
521 return (*target)->finalAddress();
522 case ld::Fixup::bindingsIndirectlyBound:
523 *target = state.indirectBindingTable[fixup->u.bindingIndex];
524 #ifndef NDEBUG
525 if ( ! (*target)->finalAddressMode() ) {
526 throwf("reference to symbol (which has not been assigned an address) %s", (*target)->name());
528 #endif
529 return (*target)->finalAddress();
531 throw "unexpected binding";
535 uint64_t OutputFile::sectionOffsetOf(const ld::Internal& state, const ld::Fixup* fixup)
537 const ld::Atom* target = NULL;
538 switch ( fixup->binding ) {
539 case ld::Fixup::bindingNone:
540 throw "unexpected bindingNone";
541 case ld::Fixup::bindingByNameUnbound:
542 throw "unexpected bindingByNameUnbound";
543 case ld::Fixup::bindingByContentBound:
544 case ld::Fixup::bindingDirectlyBound:
545 target = fixup->u.target;
546 break;
547 case ld::Fixup::bindingsIndirectlyBound:
548 target = state.indirectBindingTable[fixup->u.bindingIndex];
549 break;
551 assert(target != NULL);
553 uint64_t targetAddress = target->finalAddress();
554 for (std::vector<ld::Internal::FinalSection*>::const_iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
555 const ld::Internal::FinalSection* sect = *it;
556 if ( (sect->address <= targetAddress) && (targetAddress < (sect->address+sect->size)) )
557 return targetAddress - sect->address;
559 throw "section not found for section offset";
564 uint64_t OutputFile::tlvTemplateOffsetOf(const ld::Internal& state, const ld::Fixup* fixup)
566 const ld::Atom* target = NULL;
567 switch ( fixup->binding ) {
568 case ld::Fixup::bindingNone:
569 throw "unexpected bindingNone";
570 case ld::Fixup::bindingByNameUnbound:
571 throw "unexpected bindingByNameUnbound";
572 case ld::Fixup::bindingByContentBound:
573 case ld::Fixup::bindingDirectlyBound:
574 target = fixup->u.target;
575 break;
576 case ld::Fixup::bindingsIndirectlyBound:
577 target = state.indirectBindingTable[fixup->u.bindingIndex];
578 break;
580 assert(target != NULL);
582 for (std::vector<ld::Internal::FinalSection*>::const_iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
583 const ld::Internal::FinalSection* sect = *it;
584 switch ( sect->type() ) {
585 case ld::Section::typeTLVInitialValues:
586 case ld::Section::typeTLVZeroFill:
587 return target->finalAddress() - sect->address;
588 default:
589 break;
592 throw "section not found for tlvTemplateOffsetOf";
595 void OutputFile::printSectionLayout(ld::Internal& state)
597 // show layout of final image
598 fprintf(stderr, "final section layout:\n");
599 for (std::vector<ld::Internal::FinalSection*>::iterator it = state.sections.begin(); it != state.sections.end(); ++it) {
600 if ( (*it)->isSectionHidden() )
601 continue;
602 fprintf(stderr, " %s/%s addr=0x%08llX, size=0x%08llX, fileOffset=0x%08llX, type=%d\n",
603 (*it)->segmentName(), (*it)->sectionName(),
604 (*it)->address, (*it)->size, (*it)->fileOffset, (*it)->type());
609 void OutputFile::rangeCheck8(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
611 if ( (displacement > 127) || (displacement < -128) ) {
612 // show layout of final image
613 printSectionLayout(state);
615 const ld::Atom* target;
616 throwf("8-bit reference out of range (%lld max is +/-127B): from %s (0x%08llX) to %s (0x%08llX)",
617 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
618 addressOf(state, fixup, &target));
622 void OutputFile::rangeCheck16(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
624 const int64_t thirtyTwoKLimit = 0x00007FFF;
625 if ( (displacement > thirtyTwoKLimit) || (displacement < (-thirtyTwoKLimit)) ) {
626 // show layout of final image
627 printSectionLayout(state);
629 const ld::Atom* target;
630 throwf("16-bit reference out of range (%lld max is +/-32KB): from %s (0x%08llX) to %s (0x%08llX)",
631 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
632 addressOf(state, fixup, &target));
636 void OutputFile::rangeCheckBranch32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
638 const int64_t twoGigLimit = 0x7FFFFFFF;
639 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
640 // show layout of final image
641 printSectionLayout(state);
643 const ld::Atom* target;
644 throwf("32-bit branch out of range (%lld max is +/-2GB): from %s (0x%08llX) to %s (0x%08llX)",
645 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
646 addressOf(state, fixup, &target));
651 void OutputFile::rangeCheckAbsolute32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
653 const int64_t fourGigLimit = 0xFFFFFFFF;
654 if ( displacement > fourGigLimit ) {
655 // <rdar://problem/9610466> cannot enforce 32-bit range checks on 32-bit archs because assembler loses sign information
656 // .long _foo - 0xC0000000
657 // is encoded in mach-o the same as:
658 // .long _foo + 0x40000000
659 // so if _foo lays out to 0xC0000100, the first is ok, but the second is not.
660 if ( _options.architecture() == CPU_TYPE_ARM ||
661 _options.architecture() == CPU_TYPE_I386 ||
662 _options.architecture() == CPU_TYPE_POWERPC) {
663 // Unlikely userland code does funky stuff like this, so warn for them,
664 // but not warn for -preload or -static
665 if ( (_options.outputKind() != Options::kPreload)
666 && (_options.outputKind() != Options::kStaticExecutable)) {
667 warning("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
668 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), displacement);
670 return;
672 // show layout of final image
673 printSectionLayout(state);
675 const ld::Atom* target;
676 if ( fixup->binding == ld::Fixup::bindingNone )
677 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to 0x%08llX",
678 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), displacement);
679 else
680 throwf("32-bit absolute address out of range (0x%08llX max is 4GB): from %s + 0x%08X (0x%08llX) to %s (0x%08llX)",
681 displacement, atom->name(), fixup->offsetInAtom, atom->finalAddress(), referenceTargetAtomName(state, fixup),
682 addressOf(state, fixup, &target));
687 void OutputFile::rangeCheckRIP32(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
689 const int64_t twoGigLimit = 0x7FFFFFFF;
690 if ( (displacement > twoGigLimit) || (displacement < (-twoGigLimit)) ) {
691 // show layout of final image
692 printSectionLayout(state);
694 const ld::Atom* target;
695 throwf("32-bit RIP relative reference out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
696 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
697 addressOf(state, fixup, &target));
701 void OutputFile::rangeCheckARM12(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
703 if ( (displacement > 4092LL) || (displacement < (-4092LL)) ) {
704 // show layout of final image
705 printSectionLayout(state);
707 const ld::Atom* target;
708 throwf("ARM ldr 12-bit displacement out of range (%lld max is +/-4096B): from %s (0x%08llX) to %s (0x%08llX)",
709 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
710 addressOf(state, fixup, &target));
714 bool OutputFile::checkArmBranch24Displacement(int64_t displacement)
716 return ( (displacement < 33554428LL) && (displacement > (-33554432LL)) );
719 void OutputFile::rangeCheckARMBranch24(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
721 if ( checkArmBranch24Displacement(displacement) )
722 return;
724 // show layout of final image
725 printSectionLayout(state);
727 const ld::Atom* target;
728 throwf("b/bl/blx ARM branch out of range (%lld max is +/-32MB): from %s (0x%08llX) to %s (0x%08llX)",
729 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
730 addressOf(state, fixup, &target));
733 bool OutputFile::checkThumbBranch22Displacement(int64_t displacement)
735 // thumb2 supports +/- 16MB displacement
736 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
737 if ( (displacement > 16777214LL) || (displacement < (-16777216LL)) ) {
738 return false;
741 else {
742 // thumb1 supports +/- 4MB displacement
743 if ( (displacement > 4194302LL) || (displacement < (-4194304LL)) ) {
744 return false;
747 return true;
750 void OutputFile::rangeCheckThumbBranch22(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
752 if ( checkThumbBranch22Displacement(displacement) )
753 return;
755 // show layout of final image
756 printSectionLayout(state);
758 const ld::Atom* target;
759 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
760 throwf("b/bl/blx thumb2 branch out of range (%lld max is +/-16MB): from %s (0x%08llX) to %s (0x%08llX)",
761 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
762 addressOf(state, fixup, &target));
764 else {
765 throwf("b/bl/blx thumb1 branch out of range (%lld max is +/-4MB): from %s (0x%08llX) to %s (0x%08llX)",
766 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
767 addressOf(state, fixup, &target));
771 bool OutputFile::checkPPCBranch24Displacement(int64_t displacement)
773 const int64_t lim = 0x01FFFFFC;
774 return ( (displacement < lim) && (displacement > (-lim)) );
777 void OutputFile::rangeCheckPPCBranch24(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
779 if (checkPPCBranch24Displacement (displacement))
780 return;
782 printSectionLayout(state);
784 const ld::Atom* target;
785 throwf("bl PPC branch out of range (%" PRId64 " max is +/-32MB): from %s (0x%08" PRIX64 ") to %s (0x%08" PRIX64 ")",
786 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
787 addressOf(state, fixup, &target));
790 void OutputFile::rangeCheckPPCBranch14(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
792 const int64_t b_sixtyFourKiloLimit = 0x0000FFFF;
793 if ( (displacement > b_sixtyFourKiloLimit) || (displacement < (-b_sixtyFourKiloLimit)) ) {
794 // show layout of final image
795 printSectionLayout(state);
797 const ld::Atom* target;
798 throwf("bcc PPC branch out of range (%lld max is +/-64KB): from %s (0x%08llX) to %s (0x%08llX)",
799 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
800 addressOf(state, fixup, &target));
804 void OutputFile::rangeCheckARM64Branch26(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
806 const int64_t bl_128MegLimit = 0x07FFFFFF;
807 if ( (displacement > bl_128MegLimit) || (displacement < (-bl_128MegLimit)) ) {
808 // show layout of final image
809 printSectionLayout(state);
811 const ld::Atom* target;
812 throwf("b(l) ARM64 branch out of range (%lld max is +/-128MB): from %s (0x%08llX) to %s (0x%08llX)",
813 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
814 addressOf(state, fixup, &target));
818 void OutputFile::rangeCheckARM64Page21(int64_t displacement, ld::Internal& state, const ld::Atom* atom, const ld::Fixup* fixup)
820 const int64_t adrp_4GigLimit = 0x100000000ULL;
821 if ( (displacement > adrp_4GigLimit) || (displacement < (-adrp_4GigLimit)) ) {
822 // show layout of final image
823 printSectionLayout(state);
825 const ld::Atom* target;
826 throwf("ARM64 ADRP out of range (%lld max is +/-4GB): from %s (0x%08llX) to %s (0x%08llX)",
827 displacement, atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fixup),
828 addressOf(state, fixup, &target));
833 uint16_t OutputFile::get16LE(uint8_t* loc) { return LittleEndian::get16(*(uint16_t*)loc); }
834 void OutputFile::set16LE(uint8_t* loc, uint16_t value) { LittleEndian::set16(*(uint16_t*)loc, value); }
836 uint32_t OutputFile::get32LE(uint8_t* loc) { return LittleEndian::get32(*(uint32_t*)loc); }
837 void OutputFile::set32LE(uint8_t* loc, uint32_t value) { LittleEndian::set32(*(uint32_t*)loc, value); }
839 uint64_t OutputFile::get64LE(uint8_t* loc) { return LittleEndian::get64(*(uint64_t*)loc); }
840 void OutputFile::set64LE(uint8_t* loc, uint64_t value) { LittleEndian::set64(*(uint64_t*)loc, value); }
842 uint16_t OutputFile::get16BE(uint8_t* loc) { return BigEndian::get16(*(uint16_t*)loc); }
843 void OutputFile::set16BE(uint8_t* loc, uint16_t value) { BigEndian::set16(*(uint16_t*)loc, value); }
845 uint32_t OutputFile::get32BE(uint8_t* loc) { return BigEndian::get32(*(uint32_t*)loc); }
846 void OutputFile::set32BE(uint8_t* loc, uint32_t value) { BigEndian::set32(*(uint32_t*)loc, value); }
848 uint64_t OutputFile::get64BE(uint8_t* loc) { return BigEndian::get64(*(uint64_t*)loc); }
849 void OutputFile::set64BE(uint8_t* loc, uint64_t value) { BigEndian::set64(*(uint64_t*)loc, value); }
851 #if SUPPORT_ARCH_arm64
853 static uint32_t makeNOP() {
854 return 0xD503201F;
857 enum SignExtension { signedNot, signed32, signed64 };
858 struct LoadStoreInfo {
859 uint32_t reg;
860 uint32_t baseReg;
861 uint32_t offset; // after scaling
862 uint32_t size; // 1,2,4,8, or 16
863 bool isStore;
864 bool isFloat; // if destReg is FP/SIMD
865 SignExtension signEx; // if load is sign extended
868 static uint32_t makeLDR_literal(const LoadStoreInfo& info, uint64_t targetAddress, uint64_t instructionAddress)
870 int64_t delta = targetAddress - instructionAddress;
871 assert(delta < 1024*1024);
872 assert(delta > -1024*1024);
873 assert((info.reg & 0xFFFFFFE0) == 0);
874 assert((targetAddress & 0x3) == 0);
875 assert((instructionAddress & 0x3) == 0);
876 assert(!info.isStore);
877 uint32_t imm19 = (delta << 3) & 0x00FFFFE0;
878 uint32_t instruction = 0;
879 switch ( info.size ) {
880 case 4:
881 if ( info.isFloat ) {
882 assert(info.signEx == signedNot);
883 instruction = 0x1C000000;
885 else {
886 if ( info.signEx == signed64 )
887 instruction = 0x98000000;
888 else
889 instruction = 0x18000000;
891 break;
892 case 8:
893 assert(info.signEx == signedNot);
894 instruction = info.isFloat ? 0x5C000000 : 0x58000000;
895 break;
896 case 16:
897 assert(info.signEx == signedNot);
898 instruction = 0x9C000000;
899 break;
900 default:
901 assert(0 && "invalid load size for literal");
903 return (instruction | imm19 | info.reg);
906 static uint32_t makeADR(uint32_t destReg, uint64_t targetAddress, uint64_t instructionAddress)
908 assert((destReg & 0xFFFFFFE0) == 0);
909 assert((instructionAddress & 0x3) == 0);
910 uint32_t instruction = 0x10000000;
911 int64_t delta = targetAddress - instructionAddress;
912 assert(delta < 1024*1024);
913 assert(delta > -1024*1024);
914 uint32_t immhi = (delta & 0x001FFFFC) << 3;
915 uint32_t immlo = (delta & 0x00000003) << 29;
916 return (instruction | immhi | immlo | destReg);
919 static uint32_t makeLoadOrStore(const LoadStoreInfo& info)
921 uint32_t instruction = 0x39000000;
922 if ( info.isFloat )
923 instruction |= 0x04000000;
924 instruction |= info.reg;
925 instruction |= (info.baseReg << 5);
926 uint32_t sizeBits = 0;
927 uint32_t opcBits = 0;
928 uint32_t imm12Bits = 0;
929 switch ( info.size ) {
930 case 1:
931 sizeBits = 0;
932 imm12Bits = info.offset;
933 if ( info.isStore ) {
934 opcBits = 0;
936 else {
937 switch ( info.signEx ) {
938 case signedNot:
939 opcBits = 1;
940 break;
941 case signed32:
942 opcBits = 3;
943 break;
944 case signed64:
945 opcBits = 2;
946 break;
949 break;
950 case 2:
951 sizeBits = 1;
952 assert((info.offset % 2) == 0);
953 imm12Bits = info.offset/2;
954 if ( info.isStore ) {
955 opcBits = 0;
957 else {
958 switch ( info.signEx ) {
959 case signedNot:
960 opcBits = 1;
961 break;
962 case signed32:
963 opcBits = 3;
964 break;
965 case signed64:
966 opcBits = 2;
967 break;
970 break;
971 case 4:
972 sizeBits = 2;
973 assert((info.offset % 4) == 0);
974 imm12Bits = info.offset/4;
975 if ( info.isStore ) {
976 opcBits = 0;
978 else {
979 switch ( info.signEx ) {
980 case signedNot:
981 opcBits = 1;
982 break;
983 case signed32:
984 assert(0 && "cannot use signed32 with 32-bit load/store");
985 break;
986 case signed64:
987 opcBits = 2;
988 break;
991 break;
992 case 8:
993 sizeBits = 3;
994 assert((info.offset % 8) == 0);
995 imm12Bits = info.offset/8;
996 if ( info.isStore ) {
997 opcBits = 0;
999 else {
1000 opcBits = 1;
1001 assert(info.signEx == signedNot);
1003 break;
1004 case 16:
1005 sizeBits = 0;
1006 assert((info.offset % 16) == 0);
1007 imm12Bits = info.offset/16;
1008 assert(info.isFloat);
1009 if ( info.isStore ) {
1010 opcBits = 2;
1012 else {
1013 opcBits = 3;
1015 break;
1016 default:
1017 assert(0 && "bad load/store size");
1018 break;
1020 assert(imm12Bits < 4096);
1021 return (instruction | (sizeBits << 30) | (opcBits << 22) | (imm12Bits << 10));
1024 static bool parseLoadOrStore(uint32_t instruction, LoadStoreInfo& info)
1026 if ( (instruction & 0x3B000000) != 0x39000000 )
1027 return false;
1028 info.isFloat = ( (instruction & 0x04000000) != 0 );
1029 info.reg = (instruction & 0x1F);
1030 info.baseReg = ((instruction>>5) & 0x1F);
1031 switch (instruction & 0xC0C00000) {
1032 case 0x00000000:
1033 info.size = 1;
1034 info.isStore = true;
1035 info.signEx = signedNot;
1036 break;
1037 case 0x00400000:
1038 info.size = 1;
1039 info.isStore = false;
1040 info.signEx = signedNot;
1041 break;
1042 case 0x00800000:
1043 if ( info.isFloat ) {
1044 info.size = 16;
1045 info.isStore = true;
1046 info.signEx = signedNot;
1048 else {
1049 info.size = 1;
1050 info.isStore = false;
1051 info.signEx = signed64;
1053 break;
1054 case 0x00C00000:
1055 if ( info.isFloat ) {
1056 info.size = 16;
1057 info.isStore = false;
1058 info.signEx = signedNot;
1060 else {
1061 info.size = 1;
1062 info.isStore = false;
1063 info.signEx = signed32;
1065 break;
1066 case 0x40000000:
1067 info.size = 2;
1068 info.isStore = true;
1069 info.signEx = signedNot;
1070 break;
1071 case 0x40400000:
1072 info.size = 2;
1073 info.isStore = false;
1074 info.signEx = signedNot;
1075 break;
1076 case 0x40800000:
1077 info.size = 2;
1078 info.isStore = false;
1079 info.signEx = signed64;
1080 break;
1081 case 0x40C00000:
1082 info.size = 2;
1083 info.isStore = false;
1084 info.signEx = signed32;
1085 break;
1086 case 0x80000000:
1087 info.size = 4;
1088 info.isStore = true;
1089 info.signEx = signedNot;
1090 break;
1091 case 0x80400000:
1092 info.size = 4;
1093 info.isStore = false;
1094 info.signEx = signedNot;
1095 break;
1096 case 0x80800000:
1097 info.size = 4;
1098 info.isStore = false;
1099 info.signEx = signed64;
1100 break;
1101 case 0xC0000000:
1102 info.size = 8;
1103 info.isStore = true;
1104 info.signEx = signedNot;
1105 break;
1106 case 0xC0400000:
1107 info.size = 8;
1108 info.isStore = false;
1109 info.signEx = signedNot;
1110 break;
1111 default:
1112 return false;
1114 info.offset = ((instruction >> 10) & 0x0FFF) * info.size;
1115 return true;
1118 struct AdrpInfo {
1119 uint32_t destReg;
1122 static bool parseADRP(uint32_t instruction, AdrpInfo& info)
1124 if ( (instruction & 0x9F000000) != 0x90000000 )
1125 return false;
1126 info.destReg = (instruction & 0x1F);
1127 return true;
1130 struct AddInfo {
1131 uint32_t destReg;
1132 uint32_t srcReg;
1133 uint32_t addend;
1136 static bool parseADD(uint32_t instruction, AddInfo& info)
1138 if ( (instruction & 0xFFC00000) != 0x91000000 )
1139 return false;
1140 info.destReg = (instruction & 0x1F);
1141 info.srcReg = ((instruction>>5) & 0x1F);
1142 info.addend = ((instruction>>10) & 0xFFF);
1143 return true;
1148 #if 0
1149 static uint32_t makeLDR_scaledOffset(const LoadStoreInfo& info)
1151 assert((info.reg & 0xFFFFFFE0) == 0);
1152 assert((info.baseReg & 0xFFFFFFE0) == 0);
1153 assert(!info.isFloat || (info.signEx != signedNot));
1154 uint32_t sizeBits = 0;
1155 uint32_t opcBits = 1;
1156 uint32_t vBit = info.isFloat;
1157 switch ( info.signEx ) {
1158 case signedNot:
1159 opcBits = 1;
1160 break;
1161 case signed32:
1162 opcBits = 3;
1163 break;
1164 case signed64:
1165 opcBits = 2;
1166 break;
1167 default:
1168 assert(0 && "bad SignExtension runtime value");
1170 switch ( info.size ) {
1171 case 1:
1172 sizeBits = 0;
1173 break;
1174 case 2:
1175 sizeBits = 1;
1176 break;
1177 case 4:
1178 sizeBits = 2;
1179 break;
1180 case 8:
1181 sizeBits = 3;
1182 break;
1183 case 16:
1184 sizeBits = 0;
1185 vBit = 1;
1186 opcBits = 3;
1187 break;
1188 default:
1189 assert(0 && "invalid load size for literal");
1191 assert((info.offset % info.size) == 0);
1192 uint32_t scaledOffset = info.offset/info.size;
1193 assert(scaledOffset < 4096);
1194 return (0x39000000 | (sizeBits<<30) | (vBit<<26) | (opcBits<<22) | (scaledOffset<<10) | (info.baseReg<<5) | info.reg);
1197 static uint32_t makeLDR_literal(uint32_t destReg, uint32_t loadSize, bool isFloat, uint64_t targetAddress, uint64_t instructionAddress)
1199 int64_t delta = targetAddress - instructionAddress;
1200 assert(delta < 1024*1024);
1201 assert(delta > -1024*1024);
1202 assert((destReg & 0xFFFFFFE0) == 0);
1203 assert((targetAddress & 0x3) == 0);
1204 assert((instructionAddress & 0x3) == 0);
1205 uint32_t imm19 = (delta << 3) & 0x00FFFFE0;
1206 uint32_t instruction = 0;
1207 switch ( loadSize ) {
1208 case 4:
1209 instruction = isFloat ? 0x1C000000 : 0x18000000;
1210 break;
1211 case 8:
1212 instruction = isFloat ? 0x5C000000 : 0x58000000;
1213 break;
1214 case 16:
1215 instruction = 0x9C000000;
1216 break;
1217 default:
1218 assert(0 && "invalid load size for literal");
1220 return (instruction | imm19 | destReg);
1224 static bool ldrInfo(uint32_t instruction, uint8_t* size, uint8_t* destReg, bool* v, uint32_t* scaledOffset)
1226 *v = ( (instruction & 0x04000000) != 0 );
1227 *destReg = (instruction & 0x1F);
1228 uint32_t imm12 = ((instruction >> 10) & 0x00000FFF);
1229 switch ( (instruction & 0xC0000000) >> 30 ) {
1230 case 0:
1231 // vector and byte LDR have same "size" bits, need to check other bits to differenciate
1232 if ( (instruction & 0x00800000) == 0 ) {
1233 *size = 1;
1234 *scaledOffset = imm12;
1236 else {
1237 *size = 16;
1238 *scaledOffset = imm12 * 16;
1240 break;
1241 case 1:
1242 *size = 2;
1243 *scaledOffset = imm12 * 2;
1244 break;
1245 case 2:
1246 *size = 4;
1247 *scaledOffset = imm12 * 4;
1248 break;
1249 case 3:
1250 *size = 8;
1251 *scaledOffset = imm12 * 8;
1252 break;
1254 return ((instruction & 0x3B400000) == 0x39400000);
1256 #endif
1258 static bool withinOneMeg(uint64_t addr1, uint64_t addr2) {
1259 int64_t delta = (addr2 - addr1);
1260 return ( (delta < 1024*1024) && (delta > -1024*1024) );
1262 #endif // SUPPORT_ARCH_arm64
1264 void OutputFile::setInfo(ld::Internal& state, const ld::Atom* atom, uint8_t* buffer, const std::map<uint32_t, const Fixup*>& usedByHints,
1265 uint32_t offsetInAtom, uint32_t delta, InstructionInfo* info)
1267 info->offsetInAtom = offsetInAtom + delta;
1268 std::map<uint32_t, const Fixup*>::const_iterator pos = usedByHints.find(info->offsetInAtom);
1269 if ( (pos != usedByHints.end()) && (pos->second != NULL) ) {
1270 info->fixup = pos->second;
1271 info->targetAddress = addressOf(state, info->fixup, &info->target);
1272 if ( info->fixup->clusterSize != ld::Fixup::k1of1 ) {
1273 assert(info->fixup->firstInCluster());
1274 const ld::Fixup* nextFixup = info->fixup + 1;
1275 if ( nextFixup->kind == ld::Fixup::kindAddAddend ) {
1276 info->targetAddress += nextFixup->u.addend;
1278 else {
1279 assert(0 && "expected addend");
1283 else {
1284 info->fixup = NULL;
1285 info->targetAddress = 0;
1286 info->target = NULL;
1288 info->instructionContent = &buffer[info->offsetInAtom];
1289 info->instructionAddress = atom->finalAddress() + info->offsetInAtom;
1290 info->instruction = get32LE(info->instructionContent);
1293 #if SUPPORT_ARCH_arm64
1294 static bool isPageKind(const ld::Fixup* fixup, bool mustBeGOT=false)
1296 if ( fixup == NULL )
1297 return false;
1298 const ld::Fixup* f;
1299 switch ( fixup->kind ) {
1300 case ld::Fixup::kindStoreTargetAddressARM64Page21:
1301 return !mustBeGOT;
1302 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
1303 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
1304 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
1305 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
1306 return true;
1307 case ld::Fixup::kindSetTargetAddress:
1308 f = fixup;
1309 do {
1310 ++f;
1311 } while ( ! f->lastInCluster() );
1312 switch (f->kind ) {
1313 case ld::Fixup::kindStoreARM64Page21:
1314 return !mustBeGOT;
1315 case ld::Fixup::kindStoreARM64GOTLoadPage21:
1316 case ld::Fixup::kindStoreARM64GOTLeaPage21:
1317 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
1318 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
1319 return true;
1320 default:
1321 break;
1323 break;
1324 default:
1325 break;
1327 return false;
1330 static bool isPageOffsetKind(const ld::Fixup* fixup, bool mustBeGOT=false)
1332 if ( fixup == NULL )
1333 return false;
1334 const ld::Fixup* f;
1335 switch ( fixup->kind ) {
1336 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
1337 return !mustBeGOT;
1338 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
1339 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
1340 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
1341 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
1342 return true;
1343 case ld::Fixup::kindSetTargetAddress:
1344 f = fixup;
1345 do {
1346 ++f;
1347 } while ( ! f->lastInCluster() );
1348 switch (f->kind ) {
1349 case ld::Fixup::kindStoreARM64PageOff12:
1350 return !mustBeGOT;
1351 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
1352 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
1353 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12:
1354 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
1355 return true;
1356 default:
1357 break;
1359 break;
1360 default:
1361 break;
1363 return false;
1365 #endif // SUPPORT_ARCH_arm64
1368 #define LOH_ASSERT(cond) \
1369 if ( !(cond) ) { \
1370 warning("ignoring linker optimization hint at %s+0x%X because " #cond, atom->name(), fit->offsetInAtom); \
1371 break; \
1374 void OutputFile::applyFixUps(ld::Internal& state, uint64_t mhAddress, const ld::Atom* atom, uint8_t* buffer)
1376 //fprintf(stderr, "applyFixUps() on %s\n", atom->name());
1377 int64_t accumulator = 0;
1378 const ld::Atom* toTarget = NULL;
1379 const ld::Atom* fromTarget;
1380 int64_t delta;
1381 uint32_t instruction;
1382 uint32_t newInstruction;
1383 uint16_t instructionLowHalf;
1384 bool is_bl;
1385 bool is_blx;
1386 bool is_b;
1387 bool thumbTarget = false;
1388 std::map<uint32_t, const Fixup*> usedByHints;
1389 bool printStuff = false;
1390 if (atom->contentType() == ld::Atom::typeBranchIsland) {
1391 //fprintf(stderr, "applyFixUps() on %s ", atom->name());
1392 //printStuff = true;
1395 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
1396 uint8_t* fixUpLocation = &buffer[fit->offsetInAtom];
1397 ld::Fixup::LOH_arm64 lohExtra;
1398 switch ( (ld::Fixup::Kind)(fit->kind) ) {
1399 case ld::Fixup::kindNone:
1400 case ld::Fixup::kindNoneFollowOn:
1401 case ld::Fixup::kindNoneGroupSubordinate:
1402 case ld::Fixup::kindNoneGroupSubordinateFDE:
1403 case ld::Fixup::kindNoneGroupSubordinateLSDA:
1404 case ld::Fixup::kindNoneGroupSubordinatePersonality:
1405 break;
1406 case ld::Fixup::kindSetTargetAddress:
1407 accumulator = addressOf(state, fit, &toTarget);
1408 thumbTarget = targetIsThumb(state, fit);
1409 if ( thumbTarget )
1410 accumulator |= 1;
1411 if ( fit->contentAddendOnly || fit->contentDetlaToAddendOnly )
1412 accumulator = 0;
1413 if (printStuff) fprintf(stderr, ":kindSetTargetAddress accum 0x%lx %s ", accumulator, toTarget?toTarget->name():"anon");
1414 break;
1415 case ld::Fixup::kindSubtractTargetAddress:
1416 delta = addressOf(state, fit, &fromTarget);
1417 if ( ! fit->contentAddendOnly )
1418 accumulator -= delta;
1419 break;
1420 case ld::Fixup::kindAddAddend:
1421 if ( ! fit->contentIgnoresAddend ) {
1422 // <rdar://problem/8342028> ARM main executables main contain .long constants pointing
1423 // into themselves such as jump tables. These .long should not have thumb bit set
1424 // even though the target is a thumb instruction. We can tell it is an interior pointer
1425 // because we are processing an addend.
1426 if ( thumbTarget && (toTarget == atom) && ((int32_t)fit->u.addend > 0) ) {
1427 accumulator &= (-2);
1428 //warning("removing thumb bit from intra-atom pointer in %s %s+0x%0X",
1429 // atom->section().sectionName(), atom->name(), fit->offsetInAtom);
1431 accumulator += fit->u.addend;
1433 if (printStuff) fprintf(stderr, ":kindAddAddend accum 0x%lx addend %u ", accumulator, fit->u.addend);
1434 break;
1435 case ld::Fixup::kindSubtractAddend:
1436 accumulator -= fit->u.addend;
1437 break;
1438 case ld::Fixup::kindSetTargetImageOffset:
1439 accumulator = addressOf(state, fit, &toTarget) - mhAddress;
1440 thumbTarget = targetIsThumb(state, fit);
1441 if ( thumbTarget )
1442 accumulator |= 1;
1443 break;
1444 case ld::Fixup::kindSetTargetSectionOffset:
1445 accumulator = sectionOffsetOf(state, fit);
1446 break;
1447 case ld::Fixup::kindSetTargetTLVTemplateOffset:
1448 accumulator = tlvTemplateOffsetOf(state, fit);
1449 break;
1450 case ld::Fixup::kindStore8:
1451 *fixUpLocation += accumulator;
1452 break;
1453 case ld::Fixup::kindStoreLittleEndian16:
1454 set16LE(fixUpLocation, accumulator);
1455 break;
1456 case ld::Fixup::kindStoreLittleEndianLow24of32:
1457 set32LE(fixUpLocation, (get32LE(fixUpLocation) & 0xFF000000) | (accumulator & 0x00FFFFFF) );
1458 break;
1459 case ld::Fixup::kindStoreLittleEndian32:
1460 rangeCheckAbsolute32(accumulator, state, atom, fit);
1461 set32LE(fixUpLocation, accumulator);
1462 break;
1463 case ld::Fixup::kindStoreLittleEndian64:
1464 set64LE(fixUpLocation, accumulator);
1465 break;
1466 case ld::Fixup::kindStoreBigEndian16:
1467 set16BE(fixUpLocation, accumulator);
1468 break;
1469 case ld::Fixup::kindStoreBigEndianLow24of32:
1470 set32BE(fixUpLocation, (get32BE(fixUpLocation) & 0xFF000000) | (accumulator & 0x00FFFFFF) );
1471 break;
1472 case ld::Fixup::kindStoreBigEndian32:
1473 rangeCheckAbsolute32(accumulator, state, atom, fit);
1474 set32BE(fixUpLocation, accumulator);
1475 break;
1476 case ld::Fixup::kindStoreBigEndian64:
1477 set64BE(fixUpLocation, accumulator);
1478 break;
1479 case ld::Fixup::kindStoreX86PCRel8:
1480 case ld::Fixup::kindStoreX86BranchPCRel8:
1481 if ( fit->contentAddendOnly )
1482 delta = accumulator;
1483 else
1484 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 1);
1485 rangeCheck8(delta, state, atom, fit);
1486 *fixUpLocation = delta;
1487 break;
1488 case ld::Fixup::kindStoreX86PCRel16:
1489 if ( fit->contentAddendOnly )
1490 delta = accumulator;
1491 else
1492 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 2);
1493 rangeCheck16(delta, state, atom, fit);
1494 set16LE(fixUpLocation, delta);
1495 break;
1496 case ld::Fixup::kindStoreX86BranchPCRel32:
1497 if ( fit->contentAddendOnly )
1498 delta = accumulator;
1499 else
1500 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1501 rangeCheckBranch32(delta, state, atom, fit);
1502 set32LE(fixUpLocation, delta);
1503 break;
1504 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
1505 case ld::Fixup::kindStoreX86PCRel32GOT:
1506 case ld::Fixup::kindStoreX86PCRel32:
1507 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
1508 if ( fit->contentAddendOnly )
1509 delta = accumulator;
1510 else
1511 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1512 rangeCheckRIP32(delta, state, atom, fit);
1513 set32LE(fixUpLocation, delta);
1514 break;
1515 case ld::Fixup::kindStoreX86PCRel32_1:
1516 if ( fit->contentAddendOnly )
1517 delta = accumulator - 1;
1518 else
1519 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 5);
1520 rangeCheckRIP32(delta, state, atom, fit);
1521 set32LE(fixUpLocation, delta);
1522 break;
1523 case ld::Fixup::kindStoreX86PCRel32_2:
1524 if ( fit->contentAddendOnly )
1525 delta = accumulator - 2;
1526 else
1527 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 6);
1528 rangeCheckRIP32(delta, state, atom, fit);
1529 set32LE(fixUpLocation, delta);
1530 break;
1531 case ld::Fixup::kindStoreX86PCRel32_4:
1532 if ( fit->contentAddendOnly )
1533 delta = accumulator - 4;
1534 else
1535 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1536 rangeCheckRIP32(delta, state, atom, fit);
1537 set32LE(fixUpLocation, delta);
1538 break;
1539 case ld::Fixup::kindStoreX86Abs32TLVLoad:
1540 set32LE(fixUpLocation, accumulator);
1541 break;
1542 case ld::Fixup::kindStoreX86Abs32TLVLoadNowLEA:
1543 assert(_options.outputKind() != Options::kObjectFile);
1544 // TLV entry was optimized away, change movl instruction to a leal
1545 if ( fixUpLocation[-1] != 0xA1 )
1546 throw "TLV load reloc does not point to a movl instruction";
1547 fixUpLocation[-1] = 0xB8;
1548 set32LE(fixUpLocation, accumulator);
1549 break;
1550 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
1551 assert(_options.outputKind() != Options::kObjectFile);
1552 // GOT entry was optimized away, change movq instruction to a leaq
1553 if ( fixUpLocation[-2] != 0x8B )
1554 throw "GOT load reloc does not point to a movq instruction";
1555 fixUpLocation[-2] = 0x8D;
1556 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1557 rangeCheckRIP32(delta, state, atom, fit);
1558 set32LE(fixUpLocation, delta);
1559 break;
1560 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
1561 assert(_options.outputKind() != Options::kObjectFile);
1562 // TLV entry was optimized away, change movq instruction to a leaq
1563 if ( fixUpLocation[-2] != 0x8B )
1564 throw "TLV load reloc does not point to a movq instruction";
1565 fixUpLocation[-2] = 0x8D;
1566 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1567 rangeCheckRIP32(delta, state, atom, fit);
1568 set32LE(fixUpLocation, delta);
1569 break;
1570 case ld::Fixup::kindStoreTargetAddressARMLoad12:
1571 accumulator = addressOf(state, fit, &toTarget);
1572 // fall into kindStoreARMLoad12 case
1573 case ld::Fixup::kindStoreARMLoad12:
1574 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1575 rangeCheckARM12(delta, state, atom, fit);
1576 instruction = get32LE(fixUpLocation);
1577 if ( delta >= 0 ) {
1578 newInstruction = instruction & 0xFFFFF000;
1579 newInstruction |= ((uint32_t)delta & 0xFFF);
1581 else {
1582 newInstruction = instruction & 0xFF7FF000;
1583 newInstruction |= ((uint32_t)(-delta) & 0xFFF);
1585 set32LE(fixUpLocation, newInstruction);
1586 break;
1587 case ld::Fixup::kindStorePPCBranch14:
1588 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom);
1589 rangeCheckPPCBranch14(delta, state, atom, fit);
1590 instruction = get32BE(fixUpLocation);
1591 newInstruction = (instruction & 0xFFFF0003) | ((uint32_t)delta & 0x0000FFFC);
1592 set32BE(fixUpLocation, newInstruction);
1593 break;
1594 case ld::Fixup::kindStoreTargetAddressPPCBranch24:
1595 accumulator = addressOf(state, fit, &toTarget);
1596 if (printStuff) fprintf(stderr, ":kindStoreTargetAddressPPCBranch24 ");
1597 #if 0
1598 //FIXME: reinstate this optimisation - and make sure it applies to the addend cases too.
1599 if ( toTarget->contentType() == ld::Atom::typeBranchIsland ) {
1600 // Branching to island. If ultimate target is in range, branch there directly.
1601 for (ld::Fixup::iterator islandfit = toTarget->fixupsBegin(), end=toTarget->fixupsEnd();
1602 islandfit != end; ++islandfit) {
1603 if ( islandfit->kind == ld::Fixup::kindIslandTarget ) {
1604 const ld::Atom* islandTarget = NULL;
1605 uint64_t islandTargetAddress = addressOf(state, islandfit, &islandTarget);
1606 delta = islandTargetAddress - (atom->finalAddress() + fit->offsetInAtom + 4);
1607 if ( checkPPCBranch24Displacement(delta) ) {
1608 toTarget = islandTarget;
1609 accumulator = islandTargetAddress;
1610 thumbTarget = targetIsThumb(state, islandfit);
1612 break;
1616 #endif
1617 if ( fit->contentDetlaToAddendOnly )
1618 accumulator = 0;
1619 // fall into kindStorePPCBranch24 case
1620 case ld::Fixup::kindStorePPCBranch24:
1621 if (printStuff) fprintf(stderr, ":kindStorePPCBranch24 ");
1622 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom);
1623 rangeCheckPPCBranch24(delta, state, atom, fit);
1624 instruction = get32BE(fixUpLocation);
1625 newInstruction = (instruction & 0xFC000003) | ((uint32_t)delta & 0x03FFFFFC);
1626 set32BE(fixUpLocation, newInstruction);
1627 break;
1628 case ld::Fixup::kindStorePPCPicLow14:
1629 case ld::Fixup::kindStorePPCAbsLow14:
1630 instruction = get32BE(fixUpLocation);
1631 if ( (accumulator & 0x3) != 0 )
1632 throwf("bad offset (0x%08X) for lo14 instruction pic-base fix-up", (uint32_t)accumulator);
1633 newInstruction = (instruction & 0xFFFF0003) | (accumulator & 0xFFFC);
1634 set32BE(fixUpLocation, newInstruction);
1635 break;
1636 case ld::Fixup::kindStorePPCAbsLow16:
1637 case ld::Fixup::kindStorePPCPicLow16:
1638 instruction = get32BE(fixUpLocation);
1639 newInstruction = (instruction & 0xFFFF0000) | (accumulator & 0xFFFF);
1640 set32BE(fixUpLocation, newInstruction);
1641 break;
1642 case ld::Fixup::kindStorePPCAbsHigh16AddLow:
1643 case ld::Fixup::kindStorePPCPicHigh16AddLow:
1644 instructionLowHalf = (accumulator >> 16) & 0xFFFF;
1645 if ( accumulator & 0x00008000 )
1646 ++instructionLowHalf;
1647 instruction = get32BE(fixUpLocation);
1648 newInstruction = (instruction & 0xFFFF0000) | instructionLowHalf;
1649 set32BE(fixUpLocation, newInstruction);
1650 break;
1651 case ld::Fixup::kindStorePPCAbsHigh16:
1652 instruction = get32BE(fixUpLocation);
1653 newInstruction = (instruction & 0xFFFF0000) | ((accumulator >> 16) & 0xFFFF);
1654 set32BE(fixUpLocation, newInstruction);
1655 break;
1656 case ld::Fixup::kindDtraceExtra:
1657 break;
1658 case ld::Fixup::kindStoreX86DtraceCallSiteNop:
1659 if ( _options.outputKind() != Options::kObjectFile ) {
1660 // change call site to a NOP
1661 fixUpLocation[-1] = 0x90; // 1-byte nop
1662 fixUpLocation[0] = 0x0F; // 4-byte nop
1663 fixUpLocation[1] = 0x1F;
1664 fixUpLocation[2] = 0x40;
1665 fixUpLocation[3] = 0x00;
1667 break;
1668 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear:
1669 if ( _options.outputKind() != Options::kObjectFile ) {
1670 // change call site to a clear eax
1671 fixUpLocation[-1] = 0x33; // xorl eax,eax
1672 fixUpLocation[0] = 0xC0;
1673 fixUpLocation[1] = 0x90; // 1-byte nop
1674 fixUpLocation[2] = 0x90; // 1-byte nop
1675 fixUpLocation[3] = 0x90; // 1-byte nop
1677 break;
1678 case ld::Fixup::kindStorePPCDtraceCallSiteNop:
1679 if ( _options.outputKind() != Options::kObjectFile ) {
1680 // change call site to a NOP
1681 set32BE(fixUpLocation, 0x60000000);
1683 break;
1684 case ld::Fixup::kindStorePPCDtraceIsEnableSiteClear:
1685 if ( _options.outputKind() != Options::kObjectFile ) {
1686 // change call site to a li r3,0
1687 set32BE(fixUpLocation, 0x38600000);
1689 break;
1690 case ld::Fixup::kindStoreARMDtraceCallSiteNop:
1691 if ( _options.outputKind() != Options::kObjectFile ) {
1692 // change call site to a NOP
1693 set32LE(fixUpLocation, 0xE1A00000);
1695 break;
1696 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear:
1697 if ( _options.outputKind() != Options::kObjectFile ) {
1698 // change call site to 'eor r0, r0, r0'
1699 set32LE(fixUpLocation, 0xE0200000);
1701 break;
1702 case ld::Fixup::kindStoreThumbDtraceCallSiteNop:
1703 if ( _options.outputKind() != Options::kObjectFile ) {
1704 // change 32-bit blx call site to two thumb NOPs
1705 set32LE(fixUpLocation, 0x46C046C0);
1707 break;
1708 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear:
1709 if ( _options.outputKind() != Options::kObjectFile ) {
1710 // change 32-bit blx call site to 'nop', 'eor r0, r0'
1711 set32LE(fixUpLocation, 0x46C04040);
1713 break;
1714 case ld::Fixup::kindStoreARM64DtraceCallSiteNop:
1715 if ( _options.outputKind() != Options::kObjectFile ) {
1716 // change call site to a NOP
1717 set32LE(fixUpLocation, 0xD503201F);
1719 break;
1720 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear:
1721 if ( _options.outputKind() != Options::kObjectFile ) {
1722 // change call site to 'MOVZ X0,0'
1723 set32LE(fixUpLocation, 0xD2800000);
1725 break;
1726 case ld::Fixup::kindLazyTarget:
1727 break;
1728 case ld::Fixup::kindIslandTarget:
1729 if ( fit->clusterSize == ld::Fixup::k1of2 ) { // had an addend saved.
1730 ++fit; // skip it.
1732 break;
1733 case ld::Fixup::kindSetLazyOffset:
1734 assert(fit->binding == ld::Fixup::bindingDirectlyBound);
1735 accumulator = this->lazyBindingInfoOffsetForLazyPointerAddress(fit->u.target->finalAddress());
1736 break;
1737 case ld::Fixup::kindDataInCodeStartData:
1738 case ld::Fixup::kindDataInCodeStartJT8:
1739 case ld::Fixup::kindDataInCodeStartJT16:
1740 case ld::Fixup::kindDataInCodeStartJT32:
1741 case ld::Fixup::kindDataInCodeStartJTA32:
1742 case ld::Fixup::kindDataInCodeEnd:
1743 break;
1744 case ld::Fixup::kindLinkerOptimizationHint:
1745 // expand table of address/offsets used by hints
1746 lohExtra.addend = fit->u.addend;
1747 usedByHints[fit->offsetInAtom + (lohExtra.info.delta1 << 2)] = NULL;
1748 if ( lohExtra.info.count > 0 )
1749 usedByHints[fit->offsetInAtom + (lohExtra.info.delta2 << 2)] = NULL;
1750 if ( lohExtra.info.count > 1 )
1751 usedByHints[fit->offsetInAtom + (lohExtra.info.delta3 << 2)] = NULL;
1752 if ( lohExtra.info.count > 2 )
1753 usedByHints[fit->offsetInAtom + (lohExtra.info.delta4 << 2)] = NULL;
1754 break;
1755 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
1756 accumulator = addressOf(state, fit, &toTarget);
1757 thumbTarget = targetIsThumb(state, fit);
1758 if ( thumbTarget )
1759 accumulator |= 1;
1760 if ( fit->contentAddendOnly )
1761 accumulator = 0;
1762 rangeCheckAbsolute32(accumulator, state, atom, fit);
1763 set32LE(fixUpLocation, accumulator);
1764 break;
1765 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
1766 accumulator = addressOf(state, fit, &toTarget);
1767 if ( fit->contentAddendOnly )
1768 accumulator = 0;
1769 set64LE(fixUpLocation, accumulator);
1770 break;
1771 case ld::Fixup::kindStoreTargetAddressBigEndian32:
1772 accumulator = addressOf(state, fit, &toTarget);
1773 if ( fit->contentAddendOnly )
1774 accumulator = 0;
1775 set32BE(fixUpLocation, accumulator);
1776 break;
1777 case ld::Fixup::kindStoreTargetAddressBigEndian64:
1778 accumulator = addressOf(state, fit, &toTarget);
1779 if ( fit->contentAddendOnly )
1780 accumulator = 0;
1781 set64BE(fixUpLocation, accumulator);
1782 break;
1783 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian32:
1784 accumulator = tlvTemplateOffsetOf(state, fit);
1785 set32LE(fixUpLocation, accumulator);
1786 break;
1787 case ld::Fixup::kindSetTargetTLVTemplateOffsetLittleEndian64:
1788 accumulator = tlvTemplateOffsetOf(state, fit);
1789 set64LE(fixUpLocation, accumulator);
1790 break;
1791 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
1792 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
1793 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
1794 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
1795 accumulator = addressOf(state, fit, &toTarget);
1796 if ( fit->contentDetlaToAddendOnly )
1797 accumulator = 0;
1798 if ( fit->contentAddendOnly )
1799 delta = 0;
1800 else
1801 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1802 rangeCheckRIP32(delta, state, atom, fit);
1803 set32LE(fixUpLocation, delta);
1804 break;
1805 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
1806 set32LE(fixUpLocation, accumulator);
1807 break;
1808 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoadNowLEA:
1809 // TLV entry was optimized away, change movl instruction to a leal
1810 if ( fixUpLocation[-1] != 0xA1 )
1811 throw "TLV load reloc does not point to a movl <abs-address>,<reg> instruction";
1812 fixUpLocation[-1] = 0xB8;
1813 accumulator = addressOf(state, fit, &toTarget);
1814 set32LE(fixUpLocation, accumulator);
1815 break;
1816 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
1817 // GOT entry was optimized away, change movq instruction to a leaq
1818 if ( fixUpLocation[-2] != 0x8B )
1819 throw "GOT load reloc does not point to a movq instruction";
1820 fixUpLocation[-2] = 0x8D;
1821 accumulator = addressOf(state, fit, &toTarget);
1822 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1823 rangeCheckRIP32(delta, state, atom, fit);
1824 set32LE(fixUpLocation, delta);
1825 break;
1826 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
1827 // TLV entry was optimized away, change movq instruction to a leaq
1828 if ( fixUpLocation[-2] != 0x8B )
1829 throw "TLV load reloc does not point to a movq instruction";
1830 fixUpLocation[-2] = 0x8D;
1831 accumulator = addressOf(state, fit, &toTarget);
1832 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1833 rangeCheckRIP32(delta, state, atom, fit);
1834 set32LE(fixUpLocation, delta);
1835 break;
1836 case ld::Fixup::kindStoreTargetAddressARMBranch24:
1837 accumulator = addressOf(state, fit, &toTarget);
1838 thumbTarget = targetIsThumb(state, fit);
1839 if ( toTarget->contentType() == ld::Atom::typeBranchIsland ) {
1840 // Branching to island. If ultimate target is in range, branch there directly.
1841 for (ld::Fixup::iterator islandfit = toTarget->fixupsBegin(), end=toTarget->fixupsEnd(); islandfit != end; ++islandfit) {
1842 if ( islandfit->kind == ld::Fixup::kindIslandTarget ) {
1843 const ld::Atom* islandTarget = NULL;
1844 uint64_t islandTargetAddress = addressOf(state, islandfit, &islandTarget);
1845 delta = islandTargetAddress - (atom->finalAddress() + fit->offsetInAtom + 4);
1846 if ( checkArmBranch24Displacement(delta) ) {
1847 toTarget = islandTarget;
1848 accumulator = islandTargetAddress;
1849 thumbTarget = targetIsThumb(state, islandfit);
1851 break;
1855 if ( thumbTarget )
1856 accumulator |= 1;
1857 if ( fit->contentDetlaToAddendOnly )
1858 accumulator = 0;
1859 // fall into kindStoreARMBranch24 case
1860 case ld::Fixup::kindStoreARMBranch24:
1861 // The pc added will be +8 from the pc
1862 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 8);
1863 rangeCheckARMBranch24(delta, state, atom, fit);
1864 instruction = get32LE(fixUpLocation);
1865 // Make sure we are calling arm with bl, thumb with blx
1866 is_bl = ((instruction & 0xFF000000) == 0xEB000000);
1867 is_blx = ((instruction & 0xFE000000) == 0xFA000000);
1868 is_b = !is_blx && ((instruction & 0x0F000000) == 0x0A000000);
1869 if ( (is_bl | is_blx) && thumbTarget ) {
1870 uint32_t opcode = 0xFA000000; // force to be blx
1871 uint32_t disp = (uint32_t)(delta >> 2) & 0x00FFFFFF;
1872 uint32_t h_bit = (uint32_t)(delta << 23) & 0x01000000;
1873 newInstruction = opcode | h_bit | disp;
1875 else if ( (is_bl | is_blx) && !thumbTarget ) {
1876 uint32_t opcode = 0xEB000000; // force to be bl
1877 uint32_t disp = (uint32_t)(delta >> 2) & 0x00FFFFFF;
1878 newInstruction = opcode | disp;
1880 else if ( is_b && thumbTarget ) {
1881 if ( fit->contentDetlaToAddendOnly )
1882 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(delta >> 2) & 0x00FFFFFF);
1883 else
1884 throwf("no pc-rel bx arm instruction. Can't fix up branch to %s in %s",
1885 referenceTargetAtomName(state, fit), atom->name());
1887 else if ( !is_bl && !is_blx && thumbTarget ) {
1888 throwf("don't know how to convert instruction %x referencing %s to thumb",
1889 instruction, referenceTargetAtomName(state, fit));
1891 else {
1892 newInstruction = (instruction & 0xFF000000) | ((uint32_t)(delta >> 2) & 0x00FFFFFF);
1894 set32LE(fixUpLocation, newInstruction);
1895 break;
1896 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
1897 accumulator = addressOf(state, fit, &toTarget);
1898 thumbTarget = targetIsThumb(state, fit);
1899 if ( toTarget->contentType() == ld::Atom::typeBranchIsland ) {
1900 // branching to island, so see if ultimate target is in range
1901 // and if so branch to ultimate target instead.
1902 for (ld::Fixup::iterator islandfit = toTarget->fixupsBegin(), end=toTarget->fixupsEnd(); islandfit != end; ++islandfit) {
1903 if ( islandfit->kind == ld::Fixup::kindIslandTarget ) {
1904 const ld::Atom* islandTarget = NULL;
1905 uint64_t islandTargetAddress = addressOf(state, islandfit, &islandTarget);
1906 if ( !fit->contentDetlaToAddendOnly ) {
1907 if ( targetIsThumb(state, islandfit) ) {
1908 // Thumb to thumb branch, we will be generating a bl instruction.
1909 // Delta is always even, so mask out thumb bit in target.
1910 islandTargetAddress &= -2ULL;
1912 else {
1913 // Target is not thumb, we will be generating a blx instruction
1914 // Since blx cannot have the low bit set, set bit[1] of the target to
1915 // bit[1] of the base address, so that the difference is a multiple of
1916 // 4 bytes.
1917 islandTargetAddress &= -3ULL;
1918 islandTargetAddress |= ((atom->finalAddress() + fit->offsetInAtom ) & 2LL);
1921 delta = islandTargetAddress - (atom->finalAddress() + fit->offsetInAtom + 4);
1922 if ( checkThumbBranch22Displacement(delta) ) {
1923 toTarget = islandTarget;
1924 accumulator = islandTargetAddress;
1925 thumbTarget = targetIsThumb(state, islandfit);
1927 break;
1931 if ( thumbTarget )
1932 accumulator |= 1;
1933 if ( fit->contentDetlaToAddendOnly )
1934 accumulator = 0;
1935 // fall into kindStoreThumbBranch22 case
1936 case ld::Fixup::kindStoreThumbBranch22:
1937 instruction = get32LE(fixUpLocation);
1938 is_bl = ((instruction & 0xD000F800) == 0xD000F000);
1939 is_blx = ((instruction & 0xD000F800) == 0xC000F000);
1940 is_b = ((instruction & 0xD000F800) == 0x9000F000);
1941 if ( !fit->contentDetlaToAddendOnly ) {
1942 if ( thumbTarget ) {
1943 // Thumb to thumb branch, we will be generating a bl instruction.
1944 // Delta is always even, so mask out thumb bit in target.
1945 accumulator &= -2ULL;
1947 else {
1948 // Target is not thumb, we will be generating a blx instruction
1949 // Since blx cannot have the low bit set, set bit[1] of the target to
1950 // bit[1] of the base address, so that the difference is a multiple of
1951 // 4 bytes.
1952 accumulator &= -3ULL;
1953 accumulator |= ((atom->finalAddress() + fit->offsetInAtom ) & 2LL);
1956 // The pc added will be +4 from the pc
1957 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom + 4);
1958 // <rdar://problem/16652542> support bl in very large .o files
1959 if ( fit->contentDetlaToAddendOnly ) {
1960 while ( delta < (-16777216LL) )
1961 delta += 0x2000000;
1963 rangeCheckThumbBranch22(delta, state, atom, fit);
1964 if ( _options.preferSubArchitecture() && _options.archSupportsThumb2() ) {
1965 // The instruction is really two instructions:
1966 // The lower 16 bits are the first instruction, which contains the high
1967 // 11 bits of the displacement.
1968 // The upper 16 bits are the second instruction, which contains the low
1969 // 11 bits of the displacement, as well as differentiating bl and blx.
1970 uint32_t s = (uint32_t)(delta >> 24) & 0x1;
1971 uint32_t i1 = (uint32_t)(delta >> 23) & 0x1;
1972 uint32_t i2 = (uint32_t)(delta >> 22) & 0x1;
1973 uint32_t imm10 = (uint32_t)(delta >> 12) & 0x3FF;
1974 uint32_t imm11 = (uint32_t)(delta >> 1) & 0x7FF;
1975 uint32_t j1 = (i1 == s);
1976 uint32_t j2 = (i2 == s);
1977 if ( is_bl ) {
1978 if ( thumbTarget )
1979 instruction = 0xD000F000; // keep bl
1980 else
1981 instruction = 0xC000F000; // change to blx
1983 else if ( is_blx ) {
1984 if ( thumbTarget )
1985 instruction = 0xD000F000; // change to bl
1986 else
1987 instruction = 0xC000F000; // keep blx
1989 else if ( is_b ) {
1990 instruction = 0x9000F000; // keep b
1991 if ( !thumbTarget && !fit->contentDetlaToAddendOnly ) {
1992 throwf("armv7 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
1993 referenceTargetAtomName(state, fit), atom->name());
1996 else {
1997 if ( !thumbTarget )
1998 throwf("don't know how to convert branch instruction %x referencing %s to bx",
1999 instruction, referenceTargetAtomName(state, fit));
2000 instruction = 0x9000F000; // keep b
2002 uint32_t nextDisp = (j1 << 13) | (j2 << 11) | imm11;
2003 uint32_t firstDisp = (s << 10) | imm10;
2004 newInstruction = instruction | (nextDisp << 16) | firstDisp;
2005 //warning("s=%d, j1=%d, j2=%d, imm10=0x%0X, imm11=0x%0X, instruction=0x%08X, first=0x%04X, next=0x%04X, new=0x%08X, disp=0x%llX for %s to %s\n",
2006 // s, j1, j2, imm10, imm11, instruction, firstDisp, nextDisp, newInstruction, delta, atom->name(), toTarget->name());
2007 set32LE(fixUpLocation, newInstruction);
2009 else {
2010 // The instruction is really two instructions:
2011 // The lower 16 bits are the first instruction, which contains the high
2012 // 11 bits of the displacement.
2013 // The upper 16 bits are the second instruction, which contains the low
2014 // 11 bits of the displacement, as well as differentiating bl and blx.
2015 uint32_t firstDisp = (uint32_t)(delta >> 12) & 0x7FF;
2016 uint32_t nextDisp = (uint32_t)(delta >> 1) & 0x7FF;
2017 if ( is_bl && !thumbTarget ) {
2018 instruction = 0xE800F000;
2020 else if ( is_blx && thumbTarget ) {
2021 instruction = 0xF800F000;
2023 else if ( is_b ) {
2024 instruction = 0x9000F000; // keep b
2025 if ( !thumbTarget && !fit->contentDetlaToAddendOnly ) {
2026 throwf("armv6 has no pc-rel bx thumb instruction. Can't fix up branch to %s in %s",
2027 referenceTargetAtomName(state, fit), atom->name());
2030 else {
2031 instruction = instruction & 0xF800F800;
2033 newInstruction = instruction | (nextDisp << 16) | firstDisp;
2034 set32LE(fixUpLocation, newInstruction);
2036 break;
2037 case ld::Fixup::kindStoreARMLow16:
2039 uint32_t imm4 = (accumulator & 0x0000F000) >> 12;
2040 uint32_t imm12 = accumulator & 0x00000FFF;
2041 instruction = get32LE(fixUpLocation);
2042 newInstruction = (instruction & 0xFFF0F000) | (imm4 << 16) | imm12;
2043 set32LE(fixUpLocation, newInstruction);
2045 break;
2046 case ld::Fixup::kindStoreARMHigh16:
2048 uint32_t imm4 = (accumulator & 0xF0000000) >> 28;
2049 uint32_t imm12 = (accumulator & 0x0FFF0000) >> 16;
2050 instruction = get32LE(fixUpLocation);
2051 newInstruction = (instruction & 0xFFF0F000) | (imm4 << 16) | imm12;
2052 set32LE(fixUpLocation, newInstruction);
2054 break;
2055 case ld::Fixup::kindStoreThumbLow16:
2057 uint32_t imm4 = (accumulator & 0x0000F000) >> 12;
2058 uint32_t i = (accumulator & 0x00000800) >> 11;
2059 uint32_t imm3 = (accumulator & 0x00000700) >> 8;
2060 uint32_t imm8 = accumulator & 0x000000FF;
2061 instruction = get32LE(fixUpLocation);
2062 newInstruction = (instruction & 0x8F00FBF0) | imm4 | (i << 10) | (imm3 << 28) | (imm8 << 16);
2063 set32LE(fixUpLocation, newInstruction);
2065 break;
2066 case ld::Fixup::kindStoreThumbHigh16:
2068 uint32_t imm4 = (accumulator & 0xF0000000) >> 28;
2069 uint32_t i = (accumulator & 0x08000000) >> 27;
2070 uint32_t imm3 = (accumulator & 0x07000000) >> 24;
2071 uint32_t imm8 = (accumulator & 0x00FF0000) >> 16;
2072 instruction = get32LE(fixUpLocation);
2073 newInstruction = (instruction & 0x8F00FBF0) | imm4 | (i << 10) | (imm3 << 28) | (imm8 << 16);
2074 set32LE(fixUpLocation, newInstruction);
2076 break;
2077 #if SUPPORT_ARCH_arm64
2078 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
2079 accumulator = addressOf(state, fit, &toTarget);
2080 // fall into kindStoreARM64Branch26 case
2081 case ld::Fixup::kindStoreARM64Branch26:
2082 if ( fit->contentAddendOnly )
2083 delta = accumulator;
2084 else
2085 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom);
2086 rangeCheckARM64Branch26(delta, state, atom, fit);
2087 instruction = get32LE(fixUpLocation);
2088 newInstruction = (instruction & 0xFC000000) | ((uint32_t)(delta >> 2) & 0x03FFFFFF);
2089 set32LE(fixUpLocation, newInstruction);
2090 break;
2091 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
2092 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
2093 case ld::Fixup::kindStoreTargetAddressARM64Page21:
2094 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
2095 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
2096 accumulator = addressOf(state, fit, &toTarget);
2097 // fall into kindStoreARM64Branch26 case
2098 case ld::Fixup::kindStoreARM64GOTLeaPage21:
2099 case ld::Fixup::kindStoreARM64GOTLoadPage21:
2100 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
2101 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
2102 case ld::Fixup::kindStoreARM64Page21:
2104 // the ADRP instruction adds the imm << 12 to the page that the pc is on
2105 if ( fit->contentAddendOnly )
2106 delta = 0;
2107 else
2108 delta = (accumulator & (-4096)) - ((atom->finalAddress() + fit->offsetInAtom) & (-4096));
2109 rangeCheckARM64Page21(delta, state, atom, fit);
2110 instruction = get32LE(fixUpLocation);
2111 uint32_t immhi = (delta >> 9) & (0x00FFFFE0);
2112 uint32_t immlo = (delta << 17) & (0x60000000);
2113 newInstruction = (instruction & 0x9F00001F) | immlo | immhi;
2114 set32LE(fixUpLocation, newInstruction);
2116 break;
2117 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
2118 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
2119 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
2120 accumulator = addressOf(state, fit, &toTarget);
2121 // fall into kindAddressARM64PageOff12 case
2122 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12:
2123 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
2124 case ld::Fixup::kindStoreARM64PageOff12:
2126 uint32_t offset = accumulator & 0x00000FFF;
2127 instruction = get32LE(fixUpLocation);
2128 // LDR/STR instruction have implicit scale factor, need to compensate for that
2129 if ( instruction & 0x08000000 ) {
2130 uint32_t implictShift = ((instruction >> 30) & 0x3);
2131 switch ( implictShift ) {
2132 case 0:
2133 if ( (instruction & 0x04800000) == 0x04800000 ) {
2134 // vector and byte LDR/STR have same "size" bits, need to check other bits to differenciate
2135 implictShift = 4;
2136 if ( (offset & 0xF) != 0 ) {
2137 throwf("128-bit LDR/STR not 16-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2138 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
2139 addressOf(state, fit, &toTarget));
2142 break;
2143 case 1:
2144 if ( (offset & 0x1) != 0 ) {
2145 throwf("16-bit LDR/STR not 2-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2146 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
2147 addressOf(state, fit, &toTarget));
2149 break;
2150 case 2:
2151 if ( (offset & 0x3) != 0 ) {
2152 throwf("32-bit LDR/STR not 4-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2153 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
2154 addressOf(state, fit, &toTarget));
2156 break;
2157 case 3:
2158 if ( (offset & 0x7) != 0 ) {
2159 throwf("64-bit LDR/STR not 8-byte aligned: from %s (0x%08llX) to %s (0x%08llX)",
2160 atom->name(), atom->finalAddress(), referenceTargetAtomName(state, fit),
2161 addressOf(state, fit, &toTarget));
2163 break;
2165 // compensate for implicit scale
2166 offset >>= implictShift;
2168 if ( fit->contentAddendOnly )
2169 offset = 0;
2170 uint32_t imm12 = offset << 10;
2171 newInstruction = (instruction & 0xFFC003FF) | imm12;
2172 set32LE(fixUpLocation, newInstruction);
2174 break;
2175 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
2176 accumulator = addressOf(state, fit, &toTarget);
2177 // fall into kindStoreARM64GOTLoadPage21 case
2178 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
2180 // GOT entry was optimized away, change LDR instruction to a ADD
2181 instruction = get32LE(fixUpLocation);
2182 if ( (instruction & 0xBFC00000) != 0xB9400000 )
2183 throwf("GOT load reloc does not point to a LDR instruction in %s", atom->name());
2184 uint32_t offset = accumulator & 0x00000FFF;
2185 uint32_t imm12 = offset << 10;
2186 newInstruction = 0x91000000 | imm12 | (instruction & 0x000003FF);
2187 set32LE(fixUpLocation, newInstruction);
2189 break;
2190 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
2191 accumulator = addressOf(state, fit, &toTarget);
2192 // fall into kindStoreARM64TLVPLeaPageOff12 case
2193 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
2195 // TLV thunk in same linkage unit, so LEA it directly, changing LDR instruction to a ADD
2196 instruction = get32LE(fixUpLocation);
2197 if ( (instruction & 0xBFC00000) != 0xB9400000 )
2198 throwf("TLV load reloc does not point to a LDR instruction in %s", atom->name());
2199 uint32_t offset = accumulator & 0x00000FFF;
2200 uint32_t imm12 = offset << 10;
2201 newInstruction = 0x91000000 | imm12 | (instruction & 0x000003FF);
2202 set32LE(fixUpLocation, newInstruction);
2204 break;
2205 case ld::Fixup::kindStoreARM64PointerToGOT:
2206 set64LE(fixUpLocation, accumulator);
2207 break;
2208 case ld::Fixup::kindStoreARM64PCRelToGOT:
2209 if ( fit->contentAddendOnly )
2210 delta = accumulator;
2211 else
2212 delta = accumulator - (atom->finalAddress() + fit->offsetInAtom);
2213 set32LE(fixUpLocation, delta);
2214 break;
2215 #endif
2218 if (printStuff) fprintf(stderr, "\n");
2219 #if SUPPORT_ARCH_arm64
2220 // after all fixups are done on atom, if there are potential optimizations, do those
2221 if ( (usedByHints.size() != 0) && (_options.outputKind() != Options::kObjectFile) && !_options.ignoreOptimizationHints() ) {
2222 // fill in second part of usedByHints map, so we can see the target of fixups that might be optimized
2223 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2224 switch ( fit->kind ) {
2225 case ld::Fixup::kindLinkerOptimizationHint:
2226 case ld::Fixup::kindNoneFollowOn:
2227 case ld::Fixup::kindNoneGroupSubordinate:
2228 case ld::Fixup::kindNoneGroupSubordinateFDE:
2229 case ld::Fixup::kindNoneGroupSubordinateLSDA:
2230 case ld::Fixup::kindNoneGroupSubordinatePersonality:
2231 break;
2232 default:
2233 if ( fit->firstInCluster() ) {
2234 std::map<uint32_t, const Fixup*>::iterator pos = usedByHints.find(fit->offsetInAtom);
2235 if ( pos != usedByHints.end() ) {
2236 assert(pos->second == NULL && "two fixups in same hint location");
2237 pos->second = fit;
2238 //fprintf(stderr, "setting %s usedByHints[0x%04X], kind = %d\n", atom->name(), fit->offsetInAtom, fit->kind);
2244 // apply hints pass 1
2245 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2246 if ( fit->kind != ld::Fixup::kindLinkerOptimizationHint )
2247 continue;
2248 InstructionInfo infoA;
2249 InstructionInfo infoB;
2250 InstructionInfo infoC;
2251 InstructionInfo infoD;
2252 LoadStoreInfo ldrInfoB, ldrInfoC;
2253 AddInfo addInfoB;
2254 AdrpInfo adrpInfoA;
2255 bool usableSegment;
2256 bool targetFourByteAligned;
2257 bool literalableSize, isADRP, isADD, isLDR, isSTR;
2258 //uint8_t loadSize, destReg;
2259 //uint32_t scaledOffset;
2260 //uint32_t imm12;
2261 ld::Fixup::LOH_arm64 alt;
2262 alt.addend = fit->u.addend;
2263 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta1 << 2), &infoA);
2264 if ( alt.info.count > 0 )
2265 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta2 << 2), &infoB);
2266 if ( alt.info.count > 1 )
2267 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta3 << 2), &infoC);
2268 if ( alt.info.count > 2 )
2269 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta4 << 2), &infoD);
2271 if ( _options.sharedRegionEligible() ) {
2272 if ( _options.sharedRegionEncodingV2() ) {
2273 // In v2 format, all references might be move at dyld shared cache creation time
2274 usableSegment = false;
2276 else {
2277 // In v1 format, only references to something in __TEXT segment could be optimized
2278 usableSegment = (strcmp(atom->section().segmentName(), infoB.target->section().segmentName()) == 0);
2281 else {
2282 // main executables can optimize any reference
2283 usableSegment = true;
2286 switch ( alt.info.kind ) {
2287 case LOH_ARM64_ADRP_ADRP:
2288 // processed in pass 2 because some ADRP may have been removed
2289 break;
2290 case LOH_ARM64_ADRP_LDR:
2291 LOH_ASSERT(alt.info.count == 1);
2292 LOH_ASSERT(isPageKind(infoA.fixup));
2293 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2294 LOH_ASSERT(infoA.target == infoB.target);
2295 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2296 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2297 LOH_ASSERT(isADRP);
2298 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2299 // silently ignore LDRs transformed to ADD by TLV pass
2300 if ( !isLDR && infoB.fixup->kind == ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12 )
2301 break;
2302 LOH_ASSERT(isLDR);
2303 LOH_ASSERT(ldrInfoB.baseReg == adrpInfoA.destReg);
2304 LOH_ASSERT(ldrInfoB.offset == (infoA.targetAddress & 0x00000FFF));
2305 literalableSize = ( (ldrInfoB.size != 1) && (ldrInfoB.size != 2) );
2306 targetFourByteAligned = ( (infoA.targetAddress & 0x3) == 0 );
2307 if ( literalableSize && usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress) ) {
2308 set32LE(infoA.instructionContent, makeNOP());
2309 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2310 if ( _options.verboseOptimizationHints() )
2311 fprintf(stderr, "adrp-ldr at 0x%08llX transformed to LDR literal, usableSegment=%d usableSegment\n", infoB.instructionAddress, usableSegment);
2313 else {
2314 if ( _options.verboseOptimizationHints() )
2315 fprintf(stderr, "adrp-ldr at 0x%08llX not transformed, isLDR=%d, literalableSize=%d, inRange=%d, usableSegment=%d, scaledOffset=%d\n",
2316 infoB.instructionAddress, isLDR, literalableSize, withinOneMeg(infoB.instructionAddress, infoA.targetAddress), usableSegment, ldrInfoB.offset);
2318 break;
2319 case LOH_ARM64_ADRP_ADD_LDR:
2320 LOH_ASSERT(alt.info.count == 2);
2321 LOH_ASSERT(isPageKind(infoA.fixup));
2322 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2323 LOH_ASSERT(infoC.fixup == NULL);
2324 LOH_ASSERT(infoA.target == infoB.target);
2325 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2326 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2327 LOH_ASSERT(isADRP);
2328 isADD = parseADD(infoB.instruction, addInfoB);
2329 LOH_ASSERT(isADD);
2330 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2331 isLDR = parseLoadOrStore(infoC.instruction, ldrInfoC);
2332 LOH_ASSERT(isLDR);
2333 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2334 targetFourByteAligned = ( ((infoB.targetAddress+ldrInfoC.offset) & 0x3) == 0 );
2335 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2336 if ( literalableSize && usableSegment && targetFourByteAligned && withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2337 // can do T1 transformation to LDR literal
2338 set32LE(infoA.instructionContent, makeNOP());
2339 set32LE(infoB.instructionContent, makeNOP());
2340 set32LE(infoC.instructionContent, makeLDR_literal(ldrInfoC, infoA.targetAddress+ldrInfoC.offset, infoC.instructionAddress));
2341 if ( _options.verboseOptimizationHints() ) {
2342 fprintf(stderr, "adrp-add-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC.instructionAddress);
2345 else if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2346 // can to T4 transformation and turn ADRP/ADD into ADR
2347 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress+ldrInfoC.offset, infoA.instructionAddress));
2348 set32LE(infoB.instructionContent, makeNOP());
2349 ldrInfoC.offset = 0; // offset is now in ADR instead of ADD or LDR
2350 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2351 set32LE(infoC.instructionContent, infoC.instruction & 0xFFC003FF);
2352 if ( _options.verboseOptimizationHints() )
2353 fprintf(stderr, "adrp-add-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoB.instructionAddress);
2355 else if ( ((infoB.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2356 // can do T2 transformation by merging ADD into LD
2357 // Leave ADRP as-is
2358 set32LE(infoB.instructionContent, makeNOP());
2359 ldrInfoC.offset += addInfoB.addend;
2360 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2361 if ( _options.verboseOptimizationHints() )
2362 fprintf(stderr, "adrp-add-ldr at 0x%08llX T2 transformed to ADRP/LDR \n", infoC.instructionAddress);
2364 else {
2365 if ( _options.verboseOptimizationHints() )
2366 fprintf(stderr, "adrp-add-ldr at 0x%08llX could not be transformed, loadSize=%d, literalableSize=%d, inRange=%d, usableSegment=%d, targetFourByteAligned=%d, imm12=%d\n",
2367 infoC.instructionAddress, ldrInfoC.size, literalableSize, withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset), usableSegment, targetFourByteAligned, ldrInfoC.offset);
2369 break;
2370 case LOH_ARM64_ADRP_ADD:
2371 LOH_ASSERT(alt.info.count == 1);
2372 LOH_ASSERT(isPageKind(infoA.fixup));
2373 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2374 LOH_ASSERT(infoA.target == infoB.target);
2375 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2376 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2377 LOH_ASSERT(isADRP);
2378 isADD = parseADD(infoB.instruction, addInfoB);
2379 LOH_ASSERT(isADD);
2380 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2381 if ( usableSegment && withinOneMeg(infoA.targetAddress, infoA.instructionAddress) ) {
2382 // can do T4 transformation and use ADR
2383 set32LE(infoA.instructionContent, makeADR(addInfoB.destReg, infoA.targetAddress, infoA.instructionAddress));
2384 set32LE(infoB.instructionContent, makeNOP());
2385 if ( _options.verboseOptimizationHints() )
2386 fprintf(stderr, "adrp-add at 0x%08llX transformed to ADR\n", infoB.instructionAddress);
2388 else {
2389 if ( _options.verboseOptimizationHints() )
2390 fprintf(stderr, "adrp-add at 0x%08llX not transformed, isAdd=%d, inRange=%d, usableSegment=%d\n",
2391 infoB.instructionAddress, isADD, withinOneMeg(infoA.targetAddress, infoA.instructionAddress), usableSegment);
2393 break;
2394 case LOH_ARM64_ADRP_LDR_GOT_LDR:
2395 LOH_ASSERT(alt.info.count == 2);
2396 LOH_ASSERT(isPageKind(infoA.fixup, true));
2397 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2398 LOH_ASSERT(infoC.fixup == NULL);
2399 LOH_ASSERT(infoA.target == infoB.target);
2400 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2401 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2402 LOH_ASSERT(isADRP);
2403 isLDR = parseLoadOrStore(infoC.instruction, ldrInfoC);
2404 LOH_ASSERT(isLDR);
2405 isADD = parseADD(infoB.instruction, addInfoB);
2406 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2407 if ( isLDR ) {
2408 // target of GOT is external
2409 LOH_ASSERT(ldrInfoB.size == 8);
2410 LOH_ASSERT(!ldrInfoB.isFloat);
2411 LOH_ASSERT(ldrInfoC.baseReg == ldrInfoB.reg);
2412 //fprintf(stderr, "infoA.target=%p, %s, infoA.targetAddress=0x%08llX\n", infoA.target, infoA.target->name(), infoA.targetAddress);
2413 targetFourByteAligned = ( ((infoA.targetAddress + ldrInfoC.offset) & 0x3) == 0 );
2414 if ( usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress + ldrInfoC.offset) ) {
2415 // can do T5 transform
2416 set32LE(infoA.instructionContent, makeNOP());
2417 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2418 if ( _options.verboseOptimizationHints() ) {
2419 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T5 transformed to LDR literal of GOT plus LDR\n", infoC.instructionAddress);
2422 else {
2423 if ( _options.verboseOptimizationHints() )
2424 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX no optimization done\n", infoC.instructionAddress);
2427 else if ( isADD ) {
2428 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2429 LOH_ASSERT(addInfoB.srcReg == adrpInfoA.destReg);
2430 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2431 targetFourByteAligned = ( ((infoA.targetAddress) & 0x3) == 0 );
2432 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2433 if ( usableSegment && literalableSize && targetFourByteAligned && withinOneMeg(infoC.instructionAddress, infoA.targetAddress + ldrInfoC.offset) ) {
2434 // can do T1 transform
2435 set32LE(infoA.instructionContent, makeNOP());
2436 set32LE(infoB.instructionContent, makeNOP());
2437 set32LE(infoC.instructionContent, makeLDR_literal(ldrInfoC, infoA.targetAddress + ldrInfoC.offset, infoC.instructionAddress));
2438 if ( _options.verboseOptimizationHints() )
2439 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T1 transformed to LDR literal\n", infoC.instructionAddress);
2441 else if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2442 // can do T4 transform
2443 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress, infoA.instructionAddress));
2444 set32LE(infoB.instructionContent, makeNOP());
2445 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2446 if ( _options.verboseOptimizationHints() ) {
2447 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T4 transformed to ADR/LDR\n", infoC.instructionAddress);
2450 else if ( ((infoA.targetAddress % ldrInfoC.size) == 0) && ((addInfoB.addend + ldrInfoC.offset) < 4096) ) {
2451 // can do T2 transform
2452 set32LE(infoB.instructionContent, makeNOP());
2453 ldrInfoC.baseReg = adrpInfoA.destReg;
2454 ldrInfoC.offset += addInfoB.addend;
2455 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2456 if ( _options.verboseOptimizationHints() ) {
2457 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T2 transformed to ADRP/NOP/LDR\n", infoC.instructionAddress);
2460 else {
2461 // T3 transform already done by ld::passes:got:doPass()
2462 if ( _options.verboseOptimizationHints() ) {
2463 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX T3 transformed to ADRP/ADD/LDR\n", infoC.instructionAddress);
2467 else {
2468 if ( _options.verboseOptimizationHints() )
2469 fprintf(stderr, "adrp-ldr-got-ldr at 0x%08llX not ADD or LDR\n", infoC.instructionAddress);
2471 break;
2472 case LOH_ARM64_ADRP_ADD_STR:
2473 LOH_ASSERT(alt.info.count == 2);
2474 LOH_ASSERT(isPageKind(infoA.fixup));
2475 LOH_ASSERT(isPageOffsetKind(infoB.fixup));
2476 LOH_ASSERT(infoC.fixup == NULL);
2477 LOH_ASSERT(infoA.target == infoB.target);
2478 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2479 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2480 LOH_ASSERT(isADRP);
2481 isADD = parseADD(infoB.instruction, addInfoB);
2482 LOH_ASSERT(isADD);
2483 LOH_ASSERT(adrpInfoA.destReg == addInfoB.srcReg);
2484 isSTR = (parseLoadOrStore(infoC.instruction, ldrInfoC) && ldrInfoC.isStore);
2485 LOH_ASSERT(isSTR);
2486 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2487 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress+ldrInfoC.offset) ) {
2488 // can to T4 transformation and turn ADRP/ADD into ADR
2489 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress+ldrInfoC.offset, infoA.instructionAddress));
2490 set32LE(infoB.instructionContent, makeNOP());
2491 ldrInfoC.offset = 0; // offset is now in ADR instead of ADD or LDR
2492 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2493 set32LE(infoC.instructionContent, infoC.instruction & 0xFFC003FF);
2494 if ( _options.verboseOptimizationHints() )
2495 fprintf(stderr, "adrp-add-str at 0x%08llX T4 transformed to ADR/STR\n", infoB.instructionAddress);
2497 else if ( ((infoB.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2498 // can do T2 transformation by merging ADD into STR
2499 // Leave ADRP as-is
2500 set32LE(infoB.instructionContent, makeNOP());
2501 ldrInfoC.offset += addInfoB.addend;
2502 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2503 if ( _options.verboseOptimizationHints() )
2504 fprintf(stderr, "adrp-add-str at 0x%08llX T2 transformed to ADRP/STR \n", infoC.instructionAddress);
2506 else {
2507 if ( _options.verboseOptimizationHints() )
2508 fprintf(stderr, "adrp-add-str at 0x%08llX could not be transformed, loadSize=%d, inRange=%d, usableSegment=%d, imm12=%d\n",
2509 infoC.instructionAddress, ldrInfoC.size, withinOneMeg(infoC.instructionAddress, infoA.targetAddress+ldrInfoC.offset), usableSegment, ldrInfoC.offset);
2511 break;
2512 case LOH_ARM64_ADRP_LDR_GOT_STR:
2513 LOH_ASSERT(alt.info.count == 2);
2514 LOH_ASSERT(isPageKind(infoA.fixup, true));
2515 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2516 LOH_ASSERT(infoC.fixup == NULL);
2517 LOH_ASSERT(infoA.target == infoB.target);
2518 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2519 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2520 LOH_ASSERT(isADRP);
2521 isSTR = (parseLoadOrStore(infoC.instruction, ldrInfoC) && ldrInfoC.isStore);
2522 LOH_ASSERT(isSTR);
2523 isADD = parseADD(infoB.instruction, addInfoB);
2524 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2525 if ( isLDR ) {
2526 // target of GOT is external
2527 LOH_ASSERT(ldrInfoB.size == 8);
2528 LOH_ASSERT(!ldrInfoB.isFloat);
2529 LOH_ASSERT(ldrInfoC.baseReg == ldrInfoB.reg);
2530 targetFourByteAligned = ( ((infoA.targetAddress + ldrInfoC.offset) & 0x3) == 0 );
2531 if ( usableSegment && targetFourByteAligned && withinOneMeg(infoB.instructionAddress, infoA.targetAddress + ldrInfoC.offset) ) {
2532 // can do T5 transform
2533 set32LE(infoA.instructionContent, makeNOP());
2534 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2535 if ( _options.verboseOptimizationHints() ) {
2536 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T5 transformed to LDR literal of GOT plus STR\n", infoC.instructionAddress);
2539 else {
2540 if ( _options.verboseOptimizationHints() )
2541 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX no optimization done\n", infoC.instructionAddress);
2544 else if ( isADD ) {
2545 // target of GOT is in same linkage unit and B instruction was changed to ADD to compute LEA of target
2546 LOH_ASSERT(addInfoB.srcReg == adrpInfoA.destReg);
2547 LOH_ASSERT(addInfoB.destReg == ldrInfoC.baseReg);
2548 targetFourByteAligned = ( ((infoA.targetAddress) & 0x3) == 0 );
2549 literalableSize = ( (ldrInfoC.size != 1) && (ldrInfoC.size != 2) );
2550 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2551 // can do T4 transform
2552 set32LE(infoA.instructionContent, makeADR(ldrInfoC.baseReg, infoA.targetAddress, infoA.instructionAddress));
2553 set32LE(infoB.instructionContent, makeNOP());
2554 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2555 if ( _options.verboseOptimizationHints() ) {
2556 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADR/STR\n", infoC.instructionAddress);
2559 else if ( ((infoA.targetAddress % ldrInfoC.size) == 0) && (ldrInfoC.offset == 0) ) {
2560 // can do T2 transform
2561 set32LE(infoB.instructionContent, makeNOP());
2562 ldrInfoC.baseReg = adrpInfoA.destReg;
2563 ldrInfoC.offset += addInfoB.addend;
2564 set32LE(infoC.instructionContent, makeLoadOrStore(ldrInfoC));
2565 if ( _options.verboseOptimizationHints() ) {
2566 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T4 transformed to ADRP/NOP/STR\n", infoC.instructionAddress);
2569 else {
2570 // T3 transform already done by ld::passes:got:doPass()
2571 if ( _options.verboseOptimizationHints() ) {
2572 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX T3 transformed to ADRP/ADD/STR\n", infoC.instructionAddress);
2576 else {
2577 if ( _options.verboseOptimizationHints() )
2578 fprintf(stderr, "adrp-ldr-got-str at 0x%08llX not ADD or LDR\n", infoC.instructionAddress);
2580 break;
2581 case LOH_ARM64_ADRP_LDR_GOT:
2582 LOH_ASSERT(alt.info.count == 1);
2583 LOH_ASSERT(isPageKind(infoA.fixup, true));
2584 LOH_ASSERT(isPageOffsetKind(infoB.fixup, true));
2585 LOH_ASSERT(infoA.target == infoB.target);
2586 LOH_ASSERT(infoA.targetAddress == infoB.targetAddress);
2587 isADRP = parseADRP(infoA.instruction, adrpInfoA);
2588 isADD = parseADD(infoB.instruction, addInfoB);
2589 isLDR = parseLoadOrStore(infoB.instruction, ldrInfoB);
2590 if ( isADRP ) {
2591 if ( isLDR ) {
2592 if ( usableSegment && withinOneMeg(infoB.instructionAddress, infoA.targetAddress) ) {
2593 // can do T5 transform (LDR literal load of GOT)
2594 set32LE(infoA.instructionContent, makeNOP());
2595 set32LE(infoB.instructionContent, makeLDR_literal(ldrInfoB, infoA.targetAddress, infoB.instructionAddress));
2596 if ( _options.verboseOptimizationHints() ) {
2597 fprintf(stderr, "adrp-ldr-got at 0x%08llX T5 transformed to NOP/LDR\n", infoC.instructionAddress);
2601 else if ( isADD ) {
2602 if ( usableSegment && withinOneMeg(infoA.instructionAddress, infoA.targetAddress) ) {
2603 // can do T4 transform (ADR to compute local address)
2604 set32LE(infoA.instructionContent, makeADR(addInfoB.destReg, infoA.targetAddress, infoA.instructionAddress));
2605 set32LE(infoB.instructionContent, makeNOP());
2606 if ( _options.verboseOptimizationHints() ) {
2607 fprintf(stderr, "adrp-ldr-got at 0x%08llX T4 transformed to ADR/STR\n", infoC.instructionAddress);
2611 else {
2612 if ( _options.verboseOptimizationHints() )
2613 fprintf(stderr, "adrp-ldr-got at 0x%08llX not LDR or ADD\n", infoB.instructionAddress);
2616 else {
2617 if ( _options.verboseOptimizationHints() )
2618 fprintf(stderr, "adrp-ldr-got at 0x%08llX not ADRP\n", infoA.instructionAddress);
2620 break;
2621 default:
2622 if ( _options.verboseOptimizationHints() )
2623 fprintf(stderr, "unknown hint kind %d alt.info.kind at 0x%08llX\n", alt.info.kind, infoA.instructionAddress);
2624 break;
2627 // apply hints pass 2
2628 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
2629 if ( fit->kind != ld::Fixup::kindLinkerOptimizationHint )
2630 continue;
2631 InstructionInfo infoA;
2632 InstructionInfo infoB;
2633 ld::Fixup::LOH_arm64 alt;
2634 alt.addend = fit->u.addend;
2635 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta1 << 2), &infoA);
2636 if ( alt.info.count > 0 )
2637 setInfo(state, atom, buffer, usedByHints, fit->offsetInAtom, (alt.info.delta2 << 2), &infoB);
2639 switch ( alt.info.kind ) {
2640 case LOH_ARM64_ADRP_ADRP:
2641 LOH_ASSERT(isPageKind(infoA.fixup));
2642 LOH_ASSERT(isPageKind(infoB.fixup));
2643 if ( (infoA.instruction & 0x9F000000) != 0x90000000 ) {
2644 if ( _options.verboseOptimizationHints() )
2645 fprintf(stderr, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoA.instructionAddress, infoA.instruction);
2646 sAdrpNA++;
2647 break;
2649 if ( (infoB.instruction & 0x9F000000) != 0x90000000 ) {
2650 if ( _options.verboseOptimizationHints() )
2651 fprintf(stderr, "may-reused-adrp at 0x%08llX no longer an ADRP, now 0x%08X\n", infoB.instructionAddress, infoA.instruction);
2652 sAdrpNA++;
2653 break;
2655 if ( (infoA.targetAddress & (-4096)) == (infoB.targetAddress & (-4096)) ) {
2656 set32LE(infoB.instructionContent, 0xD503201F);
2657 sAdrpNoped++;
2659 else {
2660 sAdrpNotNoped++;
2662 break;
2666 #endif // SUPPORT_ARCH_arm64
2669 void OutputFile::copyNoOps(uint8_t* from, uint8_t* to, bool thumb)
2671 switch ( _options.architecture() ) {
2672 case CPU_TYPE_POWERPC:
2673 case CPU_TYPE_POWERPC64:
2674 for (uint8_t* p=from; p < to; p += 4)
2675 OSWriteBigInt32((uint32_t*)p, 0, 0x60000000);
2676 break;
2677 case CPU_TYPE_I386:
2678 case CPU_TYPE_X86_64:
2679 for (uint8_t* p=from; p < to; ++p)
2680 *p = 0x90;
2681 break;
2682 case CPU_TYPE_ARM:
2683 if ( thumb ) {
2684 for (uint8_t* p=from; p < to; p += 2)
2685 OSWriteLittleInt16((uint16_t*)p, 0, 0x46c0);
2687 else {
2688 for (uint8_t* p=from; p < to; p += 4)
2689 OSWriteLittleInt32((uint32_t*)p, 0, 0xe1a00000);
2691 break;
2692 default:
2693 for (uint8_t* p=from; p < to; ++p)
2694 *p = 0x00;
2695 break;
2699 bool OutputFile::takesNoDiskSpace(const ld::Section* sect)
2701 switch ( sect->type() ) {
2702 case ld::Section::typeZeroFill:
2703 case ld::Section::typeTLVZeroFill:
2704 return _options.optimizeZeroFill();
2705 case ld::Section::typePageZero:
2706 case ld::Section::typeStack:
2707 case ld::Section::typeAbsoluteSymbols:
2708 case ld::Section::typeTentativeDefs:
2709 return true;
2710 default:
2711 break;
2713 return false;
2716 bool OutputFile::hasZeroForFileOffset(const ld::Section* sect)
2718 switch ( sect->type() ) {
2719 case ld::Section::typeZeroFill:
2720 case ld::Section::typeTLVZeroFill:
2721 return _options.optimizeZeroFill();
2722 case ld::Section::typePageZero:
2723 case ld::Section::typeStack:
2724 case ld::Section::typeTentativeDefs:
2725 return true;
2726 default:
2727 break;
2729 return false;
2732 void OutputFile::writeAtoms(ld::Internal& state, uint8_t* wholeBuffer)
2734 // have each atom write itself
2735 uint64_t fileOffsetOfEndOfLastAtom = 0;
2736 uint64_t mhAddress = 0;
2737 bool lastAtomUsesNoOps = false;
2738 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2739 ld::Internal::FinalSection* sect = *sit;
2740 if ( sect->type() == ld::Section::typeMachHeader )
2741 mhAddress = sect->address;
2742 if ( takesNoDiskSpace(sect) )
2743 continue;
2744 const bool sectionUsesNops = (sect->type() == ld::Section::typeCode);
2745 //fprintf(stderr, "file offset=0x%08llX, section %s\n", sect->fileOffset, sect->sectionName());
2746 std::vector<const ld::Atom*>& atoms = sect->atoms;
2747 bool lastAtomWasThumb = false;
2748 for (std::vector<const ld::Atom*>::iterator ait = atoms.begin(); ait != atoms.end(); ++ait) {
2749 const ld::Atom* atom = *ait;
2750 if ( atom->definition() == ld::Atom::definitionProxy )
2751 continue;
2752 try {
2753 uint64_t fileOffset = atom->finalAddress() - sect->address + sect->fileOffset;
2754 // check for alignment padding between atoms
2755 if ( (fileOffset != fileOffsetOfEndOfLastAtom) && lastAtomUsesNoOps ) {
2756 this->copyNoOps(&wholeBuffer[fileOffsetOfEndOfLastAtom], &wholeBuffer[fileOffset], lastAtomWasThumb);
2758 // copy atom content
2759 atom->copyRawContent(&wholeBuffer[fileOffset]);
2760 // apply fix ups
2761 this->applyFixUps(state, mhAddress, atom, &wholeBuffer[fileOffset]);
2762 fileOffsetOfEndOfLastAtom = fileOffset+atom->size();
2763 lastAtomUsesNoOps = sectionUsesNops;
2764 lastAtomWasThumb = atom->isThumb();
2766 catch (const char* msg) {
2767 if ( atom->file() != NULL )
2768 throwf("%s in '%s' from %s", msg, atom->name(), atom->file()->path());
2769 else
2770 throwf("%s in '%s'", msg, atom->name());
2775 if ( _options.verboseOptimizationHints() ) {
2776 //fprintf(stderr, "ADRP optimized away: %d\n", sAdrpNA);
2777 //fprintf(stderr, "ADRPs changed to NOPs: %d\n", sAdrpNoped);
2778 //fprintf(stderr, "ADRPs unchanged: %d\n", sAdrpNotNoped);
2782 void OutputFile::computeContentUUID(ld::Internal& state, uint8_t* wholeBuffer)
2784 const bool log = false;
2785 if ( (_options.outputKind() != Options::kObjectFile) || state.someObjectFileHasDwarf ) {
2786 uint8_t digest[CC_MD5_DIGEST_LENGTH];
2787 std::vector<std::pair<uint64_t, uint64_t>> excludeRegions;
2788 uint64_t bitcodeCmdOffset;
2789 uint64_t bitcodeCmdEnd;
2790 uint64_t bitcodeSectOffset;
2791 uint64_t bitcodePaddingEnd;
2792 if ( _headersAndLoadCommandAtom->bitcodeBundleCommand(bitcodeCmdOffset, bitcodeCmdEnd,
2793 bitcodeSectOffset, bitcodePaddingEnd) ) {
2794 // Exclude embedded bitcode bundle section which contains timestamps in XAR header
2795 // Note the timestamp is in the compressed XML header which means it might change the size of
2796 // bitcode section. The load command which include the size of the section and the padding after
2797 // the bitcode section should also be excluded in the UUID computation.
2798 // Bitcode section should appears before LINKEDIT
2799 // Exclude section cmd
2800 if ( log ) fprintf(stderr, "bundle cmd start=0x%08llX, bundle cmd end=0x%08llX\n",
2801 bitcodeCmdOffset, bitcodeCmdEnd);
2802 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(bitcodeCmdOffset, bitcodeCmdEnd));
2803 // Exclude section content
2804 if ( log ) fprintf(stderr, "bundle start=0x%08llX, bundle end=0x%08llX\n",
2805 bitcodeSectOffset, bitcodePaddingEnd);
2806 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(bitcodeSectOffset, bitcodePaddingEnd));
2808 uint32_t stabsStringsOffsetStart;
2809 uint32_t tabsStringsOffsetEnd;
2810 uint32_t stabsOffsetStart;
2811 uint32_t stabsOffsetEnd;
2812 if ( _symbolTableAtom->hasStabs(stabsStringsOffsetStart, tabsStringsOffsetEnd, stabsOffsetStart, stabsOffsetEnd) ) {
2813 // find two areas of file that are stabs info and should not contribute to checksum
2814 uint64_t stringPoolFileOffset = 0;
2815 uint64_t symbolTableFileOffset = 0;
2816 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
2817 ld::Internal::FinalSection* sect = *sit;
2818 if ( sect->type() == ld::Section::typeLinkEdit ) {
2819 if ( strcmp(sect->sectionName(), "__string_pool") == 0 )
2820 stringPoolFileOffset = sect->fileOffset;
2821 else if ( strcmp(sect->sectionName(), "__symbol_table") == 0 )
2822 symbolTableFileOffset = sect->fileOffset;
2825 uint64_t firstStabNlistFileOffset = symbolTableFileOffset + stabsOffsetStart;
2826 uint64_t lastStabNlistFileOffset = symbolTableFileOffset + stabsOffsetEnd;
2827 uint64_t firstStabStringFileOffset = stringPoolFileOffset + stabsStringsOffsetStart;
2828 uint64_t lastStabStringFileOffset = stringPoolFileOffset + tabsStringsOffsetEnd;
2829 if ( log ) fprintf(stderr, "stabNlist offset=0x%08llX, size=0x%08llX\n", firstStabNlistFileOffset, lastStabNlistFileOffset-firstStabNlistFileOffset);
2830 if ( log ) fprintf(stderr, "stabString offset=0x%08llX, size=0x%08llX\n", firstStabStringFileOffset, lastStabStringFileOffset-firstStabStringFileOffset);
2831 assert(firstStabNlistFileOffset <= firstStabStringFileOffset);
2832 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(firstStabNlistFileOffset, lastStabNlistFileOffset));
2833 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(firstStabStringFileOffset, lastStabStringFileOffset));
2834 // exclude LINKEDIT LC_SEGMENT (size field depends on stabs size)
2835 uint64_t linkeditSegCmdOffset;
2836 uint64_t linkeditSegCmdSize;
2837 _headersAndLoadCommandAtom->linkeditCmdInfo(linkeditSegCmdOffset, linkeditSegCmdSize);
2838 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(linkeditSegCmdOffset, linkeditSegCmdOffset+linkeditSegCmdSize));
2839 if ( log ) fprintf(stderr, "linkedit SegCmdOffset=0x%08llX, size=0x%08llX\n", linkeditSegCmdOffset, linkeditSegCmdSize);
2840 uint64_t symbolTableCmdOffset;
2841 uint64_t symbolTableCmdSize;
2842 _headersAndLoadCommandAtom->symbolTableCmdInfo(symbolTableCmdOffset, symbolTableCmdSize);
2843 excludeRegions.emplace_back(std::pair<uint64_t, uint64_t>(symbolTableCmdOffset, symbolTableCmdOffset+symbolTableCmdSize));
2844 if ( log ) fprintf(stderr, "linkedit SegCmdOffset=0x%08llX, size=0x%08llX\n", symbolTableCmdOffset, symbolTableCmdSize);
2846 if ( !excludeRegions.empty() ) {
2847 CC_MD5_CTX md5state;
2848 CC_MD5_Init(&md5state);
2849 // rdar://problem/19487042 include the output leaf file name in the hash
2850 const char* lastSlash = strrchr(_options.outputFilePath(), '/');
2851 if ( lastSlash != NULL ) {
2852 CC_MD5_Update(&md5state, lastSlash, strlen(lastSlash));
2854 std::sort(excludeRegions.begin(), excludeRegions.end());
2855 uint64_t checksumStart = 0;
2856 for ( auto& region : excludeRegions ) {
2857 uint64_t regionStart = region.first;
2858 uint64_t regionEnd = region.second;
2859 assert(checksumStart <= regionStart && regionStart <= regionEnd && "Region overlapped");
2860 if ( log ) fprintf(stderr, "checksum 0x%08llX -> 0x%08llX\n", checksumStart, regionStart);
2861 CC_MD5_Update(&md5state, &wholeBuffer[checksumStart], regionStart - checksumStart);
2862 checksumStart = regionEnd;
2864 if ( log ) fprintf(stderr, "checksum 0x%08llX -> 0x%08llX\n", checksumStart, _fileSize);
2865 CC_MD5_Update(&md5state, &wholeBuffer[checksumStart], _fileSize-checksumStart);
2866 CC_MD5_Final(digest, &md5state);
2867 if ( log ) fprintf(stderr, "uuid=%02X, %02X, %02X, %02X, %02X, %02X, %02X, %02X\n", digest[0], digest[1], digest[2],
2868 digest[3], digest[4], digest[5], digest[6], digest[7]);
2870 else {
2871 CC_MD5(wholeBuffer, _fileSize, digest);
2873 // <rdar://problem/6723729> LC_UUID uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
2874 digest[6] = ( digest[6] & 0x0F ) | ( 3 << 4 );
2875 digest[8] = ( digest[8] & 0x3F ) | 0x80;
2876 // update buffer with new UUID
2877 _headersAndLoadCommandAtom->setUUID(digest);
2878 _headersAndLoadCommandAtom->recopyUUIDCommand();
2882 static int sDescriptorOfPathToRemove = -1;
2883 static void removePathAndExit(int sig)
2885 if ( sDescriptorOfPathToRemove != -1 ) {
2886 char path[MAXPATHLEN];
2887 if ( ::fcntl(sDescriptorOfPathToRemove, F_GETPATH, path) == 0 )
2888 ::unlink(path);
2890 fprintf(stderr, "ld: interrupted\n");
2891 exit(1);
2894 void OutputFile::writeOutputFile(ld::Internal& state)
2896 // for UNIX conformance, error if file exists and is not writable
2897 if ( (access(_options.outputFilePath(), F_OK) == 0) && (access(_options.outputFilePath(), W_OK) == -1) )
2898 throwf("can't write output file: %s", _options.outputFilePath());
2900 mode_t permissions = 0777;
2901 if ( _options.outputKind() == Options::kObjectFile )
2902 permissions = 0666;
2903 mode_t umask = ::umask(0);
2904 ::umask(umask); // put back the original umask
2905 permissions &= ~umask;
2906 // Calling unlink first assures the file is gone so that open creates it with correct permissions
2907 // It also handles the case where __options.outputFilePath() file is not writable but its directory is
2908 // And it means we don't have to truncate the file when done writing (in case new is smaller than old)
2909 // Lastly, only delete existing file if it is a normal file (e.g. not /dev/null).
2910 struct stat stat_buf;
2911 bool outputIsRegularFile = false;
2912 bool outputIsMappableFile = false;
2913 if ( stat(_options.outputFilePath(), &stat_buf) != -1 ) {
2914 if (stat_buf.st_mode & S_IFREG) {
2915 outputIsRegularFile = true;
2916 // <rdar://problem/12264302> Don't use mmap on non-hfs volumes
2917 struct statfs fsInfo;
2918 if ( statfs(_options.outputFilePath(), &fsInfo) != -1 ) {
2919 if ( strcmp(fsInfo.f_fstypename, "hfs") == 0) {
2920 (void)unlink(_options.outputFilePath());
2921 outputIsMappableFile = true;
2924 else {
2925 outputIsMappableFile = false;
2928 else {
2929 outputIsRegularFile = false;
2932 else {
2933 // special files (pipes, devices, etc) must already exist
2934 outputIsRegularFile = true;
2935 // output file does not exist yet
2936 char dirPath[PATH_MAX];
2937 strcpy(dirPath, _options.outputFilePath());
2938 char* end = strrchr(dirPath, '/');
2939 if ( end != NULL ) {
2940 end[1] = '\0';
2941 struct statfs fsInfo;
2942 if ( statfs(dirPath, &fsInfo) != -1 ) {
2943 if ( strcmp(fsInfo.f_fstypename, "hfs") == 0) {
2944 outputIsMappableFile = true;
2950 //fprintf(stderr, "outputIsMappableFile=%d, outputIsRegularFile=%d, path=%s\n", outputIsMappableFile, outputIsRegularFile, _options.outputFilePath());
2952 int fd;
2953 // Construct a temporary path of the form {outputFilePath}.ld_XXXXXX
2954 const char filenameTemplate[] = ".ld_XXXXXX";
2955 char tmpOutput[PATH_MAX];
2956 uint8_t *wholeBuffer;
2957 if ( outputIsRegularFile && outputIsMappableFile ) {
2958 // <rdar://problem/20959031> ld64 should clean up temporary files on SIGINT
2959 ::signal(SIGINT, removePathAndExit);
2961 strcpy(tmpOutput, _options.outputFilePath());
2962 // If the path is too long to add a suffix for a temporary name then
2963 // just fall back to using the output path.
2964 if (strlen(tmpOutput)+strlen(filenameTemplate) < PATH_MAX) {
2965 strcat(tmpOutput, filenameTemplate);
2966 fd = mkstemp(tmpOutput);
2967 sDescriptorOfPathToRemove = fd;
2969 else {
2970 fd = open(tmpOutput, O_RDWR|O_CREAT, permissions);
2972 if ( fd == -1 )
2973 throwf("can't open output file for writing '%s', errno=%d", tmpOutput, errno);
2974 if ( ftruncate(fd, _fileSize) == -1 ) {
2975 int err = errno;
2976 unlink(tmpOutput);
2977 if ( err == ENOSPC )
2978 throwf("not enough disk space for writing '%s'", _options.outputFilePath());
2979 else
2980 throwf("can't grow file for writing '%s', errno=%d", _options.outputFilePath(), err);
2983 wholeBuffer = (uint8_t *)mmap(NULL, _fileSize, PROT_WRITE|PROT_READ, MAP_SHARED, fd, 0);
2984 if ( wholeBuffer == MAP_FAILED )
2985 throwf("can't create buffer of %llu bytes for output", _fileSize);
2987 else {
2988 if ( outputIsRegularFile )
2989 fd = open(_options.outputFilePath(), O_RDWR|O_CREAT, permissions);
2990 else
2991 fd = open(_options.outputFilePath(), O_WRONLY);
2992 if ( fd == -1 )
2993 throwf("can't open output file for writing: %s, errno=%d", _options.outputFilePath(), errno);
2994 // try to allocate buffer for entire output file content
2995 wholeBuffer = (uint8_t*)calloc(_fileSize, 1);
2996 if ( wholeBuffer == NULL )
2997 throwf("can't create buffer of %llu bytes for output", _fileSize);
3000 if ( _options.UUIDMode() == Options::kUUIDRandom ) {
3001 uint8_t bits[16];
3002 ::uuid_generate_random(bits);
3003 _headersAndLoadCommandAtom->setUUID(bits);
3006 writeAtoms(state, wholeBuffer);
3008 // compute UUID
3009 if ( _options.UUIDMode() == Options::kUUIDContent )
3010 computeContentUUID(state, wholeBuffer);
3012 if ( outputIsRegularFile && outputIsMappableFile ) {
3013 if ( ::chmod(tmpOutput, permissions) == -1 ) {
3014 unlink(tmpOutput);
3015 throwf("can't set permissions on output file: %s, errno=%d", tmpOutput, errno);
3017 if ( ::rename(tmpOutput, _options.outputFilePath()) == -1 && strcmp(tmpOutput, _options.outputFilePath()) != 0) {
3018 unlink(tmpOutput);
3019 throwf("can't move output file in place, errno=%d", errno);
3022 else {
3023 if ( ::write(fd, wholeBuffer, _fileSize) == -1 ) {
3024 throwf("can't write to output file: %s, errno=%d", _options.outputFilePath(), errno);
3026 sDescriptorOfPathToRemove = -1;
3027 ::close(fd);
3028 // <rdar://problem/13118223> NFS: iOS incremental builds in Xcode 4.6 fail with codesign error
3029 // NFS seems to pad the end of the file sometimes. Calling trunc seems to correct it...
3030 ::truncate(_options.outputFilePath(), _fileSize);
3033 // Rename symbol map file if needed
3034 if ( _options.renameReverseSymbolMap() ) {
3035 assert(_options.hideSymbols() && _options.reverseSymbolMapPath() != NULL && "Must hide symbol and specify a path");
3036 uuid_string_t UUIDString;
3037 const uint8_t* rawUUID = _headersAndLoadCommandAtom->getUUID();
3038 uuid_unparse_upper(rawUUID, UUIDString);
3039 char outputMapPath[PATH_MAX];
3040 sprintf(outputMapPath, "%s/%s.bcsymbolmap", _options.reverseSymbolMapPath(), UUIDString);
3041 if ( ::rename(_options.reverseMapTempPath().c_str(), outputMapPath) != 0 )
3042 throwf("could not create bcsymbolmap file: %s", outputMapPath);
3046 struct AtomByNameSorter
3048 bool operator()(const ld::Atom* left, const ld::Atom* right) const
3050 return (strcmp(left->name(), right->name()) < 0);
3053 bool operator()(const ld::Atom* left, const char* right) const
3055 return (strcmp(left->name(), right) < 0);
3058 bool operator()(const char* left, const ld::Atom* right) const
3060 return (strcmp(left, right->name()) < 0);
3065 class NotInSet
3067 public:
3068 NotInSet(const std::set<const ld::Atom*>& theSet) : _set(theSet) {}
3070 bool operator()(const ld::Atom* atom) const {
3071 return ( _set.count(atom) == 0 );
3073 private:
3074 const std::set<const ld::Atom*>& _set;
3078 void OutputFile::buildSymbolTable(ld::Internal& state)
3080 unsigned int machoSectionIndex = 0;
3081 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
3082 ld::Internal::FinalSection* sect = *sit;
3083 bool setMachoSectionIndex = !sect->isSectionHidden() && (sect->type() != ld::Section::typeTentativeDefs);
3084 if ( setMachoSectionIndex )
3085 ++machoSectionIndex;
3086 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
3087 const ld::Atom* atom = *ait;
3088 if ( setMachoSectionIndex )
3089 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex);
3090 else if ( sect->type() == ld::Section::typeMachHeader )
3091 (const_cast<ld::Atom*>(atom))->setMachoSection(1); // __mh_execute_header is not in any section by needs n_sect==1
3092 else if ( sect->type() == ld::Section::typeLastSection )
3093 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex); // use section index of previous section
3094 else if ( sect->type() == ld::Section::typeFirstSection )
3095 (const_cast<ld::Atom*>(atom))->setMachoSection(machoSectionIndex+1); // use section index of next section
3097 // in -r mode, clarify symbolTableNotInFinalLinkedImages
3098 if ( _options.outputKind() == Options::kObjectFile ) {
3099 if ( (_options.architecture() == CPU_TYPE_X86_64)
3100 || (_options.architecture() == CPU_TYPE_ARM64)
3102 // x86_64 .o files need labels on anonymous literal strings
3103 if ( (sect->type() == ld::Section::typeCString) && (atom->combine() == ld::Atom::combineByNameAndContent) ) {
3104 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
3105 _localAtoms.push_back(atom);
3106 continue;
3109 if ( sect->type() == ld::Section::typeCFI ) {
3110 if ( _options.removeEHLabels() )
3111 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
3112 else
3113 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
3115 else if ( sect->type() == ld::Section::typeTempAlias ) {
3116 assert(_options.outputKind() == Options::kObjectFile);
3117 _importedAtoms.push_back(atom);
3118 continue;
3120 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages )
3121 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableIn);
3124 // TEMP work around until <rdar://problem/7702923> goes in
3125 if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip)
3126 && (atom->scope() == ld::Atom::scopeLinkageUnit)
3127 && (_options.outputKind() == Options::kDynamicLibrary) ) {
3128 (const_cast<ld::Atom*>(atom))->setScope(ld::Atom::scopeGlobal);
3131 // <rdar://problem/6783167> support auto hidden weak symbols: .weak_def_can_be_hidden
3132 if ( atom->autoHide() && (_options.outputKind() != Options::kObjectFile) ) {
3133 // adding auto-hide symbol to .exp file should keep it global
3134 if ( !_options.hasExportMaskList() || !_options.shouldExport(atom->name()) )
3135 (const_cast<ld::Atom*>(atom))->setScope(ld::Atom::scopeLinkageUnit);
3138 // <rdar://problem/8626058> ld should consistently warn when resolvers are not exported
3139 if ( (atom->contentType() == ld::Atom::typeResolver) && (atom->scope() == ld::Atom::scopeLinkageUnit) )
3140 warning("resolver functions should be external, but '%s' is hidden", atom->name());
3142 if ( sect->type() == ld::Section::typeImportProxies ) {
3143 if ( atom->combine() == ld::Atom::combineByName )
3144 this->usesWeakExternalSymbols = true;
3145 // alias proxy is a re-export with a name change, don't import changed name
3146 if ( ! atom->isAlias() )
3147 _importedAtoms.push_back(atom);
3148 // scope of proxies are usually linkage unit, so done
3149 // if scope is global, we need to re-export it too
3150 if ( atom->scope() == ld::Atom::scopeGlobal )
3151 _exportedAtoms.push_back(atom);
3152 continue;
3154 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages ) {
3155 assert(_options.outputKind() != Options::kObjectFile);
3156 continue; // don't add to symbol table
3158 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotIn ) {
3159 continue; // don't add to symbol table
3161 if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel)
3162 && (_options.outputKind() != Options::kObjectFile) ) {
3163 continue; // don't add to symbol table
3166 if ( (atom->definition() == ld::Atom::definitionTentative) && (_options.outputKind() == Options::kObjectFile) ) {
3167 if ( _options.makeTentativeDefinitionsReal() ) {
3168 // -r -d turns tentative defintions into real def
3169 _exportedAtoms.push_back(atom);
3171 else {
3172 // in mach-o object files tentative defintions are stored like undefined symbols
3173 _importedAtoms.push_back(atom);
3175 continue;
3178 switch ( atom->scope() ) {
3179 case ld::Atom::scopeTranslationUnit:
3180 if ( _options.keepLocalSymbol(atom->name()) ) {
3181 _localAtoms.push_back(atom);
3183 else {
3184 if ( _options.outputKind() == Options::kObjectFile ) {
3185 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel);
3186 _localAtoms.push_back(atom);
3188 else
3189 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
3191 break;
3192 case ld::Atom::scopeGlobal:
3193 _exportedAtoms.push_back(atom);
3194 break;
3195 case ld::Atom::scopeLinkageUnit:
3196 if ( _options.outputKind() == Options::kObjectFile ) {
3197 if ( _options.keepPrivateExterns() ) {
3198 _exportedAtoms.push_back(atom);
3200 else if ( _options.keepLocalSymbol(atom->name()) ) {
3201 _localAtoms.push_back(atom);
3203 else {
3204 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableInWithRandomAutoStripLabel);
3205 _localAtoms.push_back(atom);
3208 else {
3209 if ( _options.keepLocalSymbol(atom->name()) )
3210 _localAtoms.push_back(atom);
3211 // <rdar://problem/5804214> ld should never have a symbol in the non-lazy indirect symbol table with index 0
3212 // this works by making __mh_execute_header be a local symbol which takes symbol index 0
3213 else if ( (atom->symbolTableInclusion() == ld::Atom::symbolTableInAndNeverStrip) && !_options.makeCompressedDyldInfo() )
3214 _localAtoms.push_back(atom);
3215 else
3216 (const_cast<ld::Atom*>(atom))->setSymbolTableInclusion(ld::Atom::symbolTableNotIn);
3218 break;
3223 // <rdar://problem/6978069> ld adds undefined symbol from .exp file to binary
3224 if ( (_options.outputKind() == Options::kKextBundle) && _options.hasExportRestrictList() ) {
3225 // search for referenced undefines
3226 std::set<const ld::Atom*> referencedProxyAtoms;
3227 for (std::vector<ld::Internal::FinalSection*>::iterator sit=state.sections.begin(); sit != state.sections.end(); ++sit) {
3228 ld::Internal::FinalSection* sect = *sit;
3229 for (std::vector<const ld::Atom*>::iterator ait=sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
3230 const ld::Atom* atom = *ait;
3231 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
3232 switch ( fit->binding ) {
3233 case ld::Fixup::bindingsIndirectlyBound:
3234 referencedProxyAtoms.insert(state.indirectBindingTable[fit->u.bindingIndex]);
3235 break;
3236 case ld::Fixup::bindingDirectlyBound:
3237 referencedProxyAtoms.insert(fit->u.target);
3238 break;
3239 default:
3240 break;
3245 // remove any unreferenced _importedAtoms
3246 _importedAtoms.erase(std::remove_if(_importedAtoms.begin(), _importedAtoms.end(), NotInSet(referencedProxyAtoms)), _importedAtoms.end());
3249 // sort by name
3250 std::sort(_exportedAtoms.begin(), _exportedAtoms.end(), AtomByNameSorter());
3251 std::sort(_importedAtoms.begin(), _importedAtoms.end(), AtomByNameSorter());
3253 std::map<std::string, std::vector<std::string>> addedSymbols;
3254 std::map<std::string, std::vector<std::string>> hiddenSymbols;
3255 for (const auto *atom : _exportedAtoms) {
3256 // The exported symbols have already been sorted. Early exit the loop
3257 // once we see a symbol that is lexicographically past the special
3258 // linker symbol.
3259 if (atom->name()[0] > '$')
3260 break;
3262 std::string name(atom->name());
3263 if (name.rfind("$ld$add$", 7) == 0) {
3264 auto pos = name.find_first_of('$', 10);
3265 if (pos == std::string::npos) {
3266 warning("bad special linker symbol '%s'", atom->name());
3267 continue;
3269 auto &&symbolName = name.substr(pos+1);
3270 auto it = addedSymbols.emplace(symbolName, std::initializer_list<std::string>{name});
3271 if (!it.second)
3272 it.first->second.emplace_back(name);
3273 } else if (name.rfind("$ld$hide$", 8) == 0) {
3274 auto pos = name.find_first_of('$', 11);
3275 if (pos == std::string::npos) {
3276 warning("bad special linker symbol '%s'", atom->name());
3277 continue;
3279 auto &&symbolName = name.substr(pos+1);
3280 auto it = hiddenSymbols.emplace(symbolName, std::initializer_list<std::string>{name});
3281 if (!it.second)
3282 it.first->second.emplace_back(name);
3286 for (const auto &it : addedSymbols) {
3287 if (!std::binary_search(_exportedAtoms.begin(), _exportedAtoms.end(), it.first.c_str(), AtomByNameSorter()))
3288 continue;
3289 for (const auto &symbol : it.second)
3290 warning("linker symbol '%s' adds already existing symbol '%s'", symbol.c_str(), it.first.c_str());
3293 auto it = hiddenSymbols.begin();
3294 while (it != hiddenSymbols.end()) {
3295 if (std::binary_search(_exportedAtoms.begin(), _exportedAtoms.end(), it->first.c_str(), AtomByNameSorter()))
3296 it = hiddenSymbols.erase(it);
3297 else
3298 ++it;
3301 for (const auto &it : hiddenSymbols) {
3302 for (const auto &symbol : it.second)
3303 warning("linker symbol '%s' hides a non-existent symbol '%s'", symbol.c_str(), it.first.c_str());
3307 void OutputFile::addPreloadLinkEdit(ld::Internal& state)
3309 switch ( _options.architecture() ) {
3310 #if SUPPORT_ARCH_i386
3311 case CPU_TYPE_I386:
3312 if ( _hasLocalRelocations ) {
3313 _localRelocsAtom = new LocalRelocationsAtom<x86>(_options, state, *this);
3314 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3316 if ( _hasExternalRelocations ) {
3317 _externalRelocsAtom = new ExternalRelocationsAtom<x86>(_options, state, *this);
3318 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3320 if ( _hasSymbolTable ) {
3321 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86>(_options, state, *this);
3322 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3323 _symbolTableAtom = new SymbolTableAtom<x86>(_options, state, *this);
3324 symbolTableSection = state.addAtom(*_symbolTableAtom);
3325 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3326 stringPoolSection = state.addAtom(*_stringPoolAtom);
3328 break;
3329 #endif
3330 #if SUPPORT_ARCH_x86_64
3331 case CPU_TYPE_X86_64:
3332 if ( _hasLocalRelocations ) {
3333 _localRelocsAtom = new LocalRelocationsAtom<x86_64>(_options, state, *this);
3334 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3336 if ( _hasExternalRelocations ) {
3337 _externalRelocsAtom = new ExternalRelocationsAtom<x86_64>(_options, state, *this);
3338 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3340 if ( _hasSymbolTable ) {
3341 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86_64>(_options, state, *this);
3342 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3343 _symbolTableAtom = new SymbolTableAtom<x86_64>(_options, state, *this);
3344 symbolTableSection = state.addAtom(*_symbolTableAtom);
3345 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3346 stringPoolSection = state.addAtom(*_stringPoolAtom);
3348 break;
3349 #endif
3350 #if SUPPORT_ARCH_arm_any
3351 case CPU_TYPE_ARM:
3352 if ( _hasLocalRelocations ) {
3353 _localRelocsAtom = new LocalRelocationsAtom<arm>(_options, state, *this);
3354 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3356 if ( _hasExternalRelocations ) {
3357 _externalRelocsAtom = new ExternalRelocationsAtom<arm>(_options, state, *this);
3358 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3360 if ( _hasSymbolTable ) {
3361 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm>(_options, state, *this);
3362 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3363 _symbolTableAtom = new SymbolTableAtom<arm>(_options, state, *this);
3364 symbolTableSection = state.addAtom(*_symbolTableAtom);
3365 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3366 stringPoolSection = state.addAtom(*_stringPoolAtom);
3368 break;
3369 #endif
3370 #if SUPPORT_ARCH_arm64
3371 case CPU_TYPE_ARM64:
3372 if ( _hasLocalRelocations ) {
3373 _localRelocsAtom = new LocalRelocationsAtom<arm64>(_options, state, *this);
3374 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3376 if ( _hasExternalRelocations ) {
3377 _externalRelocsAtom = new ExternalRelocationsAtom<arm64>(_options, state, *this);
3378 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3380 if ( _hasSymbolTable ) {
3381 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm64>(_options, state, *this);
3382 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3383 _symbolTableAtom = new SymbolTableAtom<arm64>(_options, state, *this);
3384 symbolTableSection = state.addAtom(*_symbolTableAtom);
3385 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3386 stringPoolSection = state.addAtom(*_stringPoolAtom);
3388 break;
3389 #endif
3390 default:
3391 throw "-preload not supported";
3397 void OutputFile::addLinkEdit(ld::Internal& state)
3399 // for historical reasons, -preload orders LINKEDIT content differently
3400 if ( _options.outputKind() == Options::kPreload )
3401 return addPreloadLinkEdit(state);
3403 switch ( _options.architecture() ) {
3404 #if SUPPORT_ARCH_ppc
3405 case CPU_TYPE_POWERPC:
3406 if ( _hasSectionRelocations ) {
3407 _sectionsRelocationsAtom = new SectionRelocationsAtom<ppc>(_options, state, *this);
3408 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3410 if ( _hasDyldInfo ) {
3411 _rebasingInfoAtom = new RebaseInfoAtom<ppc>(_options, state, *this);
3412 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3414 _bindingInfoAtom = new BindingInfoAtom<ppc>(_options, state, *this);
3415 bindingSection = state.addAtom(*_bindingInfoAtom);
3417 _weakBindingInfoAtom = new WeakBindingInfoAtom<ppc>(_options, state, *this);
3418 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3420 _lazyBindingInfoAtom = new LazyBindingInfoAtom<ppc>(_options, state, *this);
3421 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3423 _exportInfoAtom = new ExportInfoAtom<ppc>(_options, state, *this);
3424 exportSection = state.addAtom(*_exportInfoAtom);
3426 if ( _hasLocalRelocations ) {
3427 _localRelocsAtom = new LocalRelocationsAtom<ppc>(_options, state, *this);
3428 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3430 if ( _hasSplitSegInfo ) {
3431 _splitSegInfoAtom = new SplitSegInfoV1Atom<ppc>(_options, state, *this);
3432 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3434 // FIXME: The next three are entered for compatibility, but unless dyld is
3435 // updated are probably useless.
3436 if ( _hasFunctionStartsInfo ) {
3437 _functionStartsAtom = new FunctionStartsAtom<ppc>(_options, state, *this);
3438 functionStartsSection = state.addAtom(*_functionStartsAtom);
3440 if ( _hasDataInCodeInfo ) {
3441 _dataInCodeAtom = new DataInCodeAtom<ppc>(_options, state, *this);
3442 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3444 if ( _hasOptimizationHints ) {
3445 _optimizationHintsAtom = new OptimizationHintsAtom<ppc>(_options, state, *this);
3446 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3448 if ( _hasSymbolTable ) {
3449 _symbolTableAtom = new SymbolTableAtom<ppc>(_options, state, *this);
3450 symbolTableSection = state.addAtom(*_symbolTableAtom);
3452 if ( _hasExternalRelocations ) {
3453 _externalRelocsAtom = new ExternalRelocationsAtom<ppc>(_options, state, *this);
3454 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3456 if ( _hasSymbolTable ) {
3457 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<ppc>(_options, state, *this);
3458 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3459 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3460 stringPoolSection = state.addAtom(*_stringPoolAtom);
3462 break;
3463 #endif
3464 #if SUPPORT_ARCH_i386
3465 case CPU_TYPE_I386:
3466 if ( _hasSectionRelocations ) {
3467 _sectionsRelocationsAtom = new SectionRelocationsAtom<x86>(_options, state, *this);
3468 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3470 if ( _hasDyldInfo ) {
3471 _rebasingInfoAtom = new RebaseInfoAtom<x86>(_options, state, *this);
3472 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3474 _bindingInfoAtom = new BindingInfoAtom<x86>(_options, state, *this);
3475 bindingSection = state.addAtom(*_bindingInfoAtom);
3477 _weakBindingInfoAtom = new WeakBindingInfoAtom<x86>(_options, state, *this);
3478 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3480 _lazyBindingInfoAtom = new LazyBindingInfoAtom<x86>(_options, state, *this);
3481 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3483 _exportInfoAtom = new ExportInfoAtom<x86>(_options, state, *this);
3484 exportSection = state.addAtom(*_exportInfoAtom);
3486 if ( _hasLocalRelocations ) {
3487 _localRelocsAtom = new LocalRelocationsAtom<x86>(_options, state, *this);
3488 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3490 if ( _hasSplitSegInfo ) {
3491 if ( _options.sharedRegionEncodingV2() )
3492 _splitSegInfoAtom = new SplitSegInfoV2Atom<x86>(_options, state, *this);
3493 else
3494 _splitSegInfoAtom = new SplitSegInfoV1Atom<x86>(_options, state, *this);
3495 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3497 if ( _hasFunctionStartsInfo ) {
3498 _functionStartsAtom = new FunctionStartsAtom<x86>(_options, state, *this);
3499 functionStartsSection = state.addAtom(*_functionStartsAtom);
3501 if ( _hasDataInCodeInfo ) {
3502 _dataInCodeAtom = new DataInCodeAtom<x86>(_options, state, *this);
3503 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3505 if ( _hasOptimizationHints ) {
3506 _optimizationHintsAtom = new OptimizationHintsAtom<x86>(_options, state, *this);
3507 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3509 if ( _hasSymbolTable ) {
3510 _symbolTableAtom = new SymbolTableAtom<x86>(_options, state, *this);
3511 symbolTableSection = state.addAtom(*_symbolTableAtom);
3513 if ( _hasExternalRelocations ) {
3514 _externalRelocsAtom = new ExternalRelocationsAtom<x86>(_options, state, *this);
3515 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3517 if ( _hasSymbolTable ) {
3518 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86>(_options, state, *this);
3519 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3520 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3521 stringPoolSection = state.addAtom(*_stringPoolAtom);
3523 break;
3524 #endif
3525 #if SUPPORT_ARCH_x86_64
3526 case CPU_TYPE_X86_64:
3527 if ( _hasSectionRelocations ) {
3528 _sectionsRelocationsAtom = new SectionRelocationsAtom<x86_64>(_options, state, *this);
3529 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3531 if ( _hasDyldInfo ) {
3532 _rebasingInfoAtom = new RebaseInfoAtom<x86_64>(_options, state, *this);
3533 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3535 _bindingInfoAtom = new BindingInfoAtom<x86_64>(_options, state, *this);
3536 bindingSection = state.addAtom(*_bindingInfoAtom);
3538 _weakBindingInfoAtom = new WeakBindingInfoAtom<x86_64>(_options, state, *this);
3539 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3541 _lazyBindingInfoAtom = new LazyBindingInfoAtom<x86_64>(_options, state, *this);
3542 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3544 _exportInfoAtom = new ExportInfoAtom<x86_64>(_options, state, *this);
3545 exportSection = state.addAtom(*_exportInfoAtom);
3547 if ( _hasLocalRelocations ) {
3548 _localRelocsAtom = new LocalRelocationsAtom<x86_64>(_options, state, *this);
3549 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3551 if ( _hasSplitSegInfo ) {
3552 if ( _options.sharedRegionEncodingV2() )
3553 _splitSegInfoAtom = new SplitSegInfoV2Atom<x86_64>(_options, state, *this);
3554 else
3555 _splitSegInfoAtom = new SplitSegInfoV1Atom<x86_64>(_options, state, *this);
3556 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3558 if ( _hasFunctionStartsInfo ) {
3559 _functionStartsAtom = new FunctionStartsAtom<x86_64>(_options, state, *this);
3560 functionStartsSection = state.addAtom(*_functionStartsAtom);
3562 if ( _hasDataInCodeInfo ) {
3563 _dataInCodeAtom = new DataInCodeAtom<x86_64>(_options, state, *this);
3564 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3566 if ( _hasOptimizationHints ) {
3567 _optimizationHintsAtom = new OptimizationHintsAtom<x86_64>(_options, state, *this);
3568 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3570 if ( _hasSymbolTable ) {
3571 _symbolTableAtom = new SymbolTableAtom<x86_64>(_options, state, *this);
3572 symbolTableSection = state.addAtom(*_symbolTableAtom);
3574 if ( _hasExternalRelocations ) {
3575 _externalRelocsAtom = new ExternalRelocationsAtom<x86_64>(_options, state, *this);
3576 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3578 if ( _hasSymbolTable ) {
3579 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<x86_64>(_options, state, *this);
3580 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3581 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 8);
3582 stringPoolSection = state.addAtom(*_stringPoolAtom);
3584 break;
3585 #endif
3586 #if SUPPORT_ARCH_arm_any
3587 case CPU_TYPE_ARM:
3588 if ( _hasSectionRelocations ) {
3589 _sectionsRelocationsAtom = new SectionRelocationsAtom<arm>(_options, state, *this);
3590 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3592 if ( _hasDyldInfo ) {
3593 _rebasingInfoAtom = new RebaseInfoAtom<arm>(_options, state, *this);
3594 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3596 _bindingInfoAtom = new BindingInfoAtom<arm>(_options, state, *this);
3597 bindingSection = state.addAtom(*_bindingInfoAtom);
3599 _weakBindingInfoAtom = new WeakBindingInfoAtom<arm>(_options, state, *this);
3600 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3602 _lazyBindingInfoAtom = new LazyBindingInfoAtom<arm>(_options, state, *this);
3603 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3605 _exportInfoAtom = new ExportInfoAtom<arm>(_options, state, *this);
3606 exportSection = state.addAtom(*_exportInfoAtom);
3608 if ( _hasLocalRelocations ) {
3609 _localRelocsAtom = new LocalRelocationsAtom<arm>(_options, state, *this);
3610 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3612 if ( _hasSplitSegInfo ) {
3613 if ( _options.sharedRegionEncodingV2() )
3614 _splitSegInfoAtom = new SplitSegInfoV2Atom<arm>(_options, state, *this);
3615 else
3616 _splitSegInfoAtom = new SplitSegInfoV1Atom<arm>(_options, state, *this);
3617 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3619 if ( _hasFunctionStartsInfo ) {
3620 _functionStartsAtom = new FunctionStartsAtom<arm>(_options, state, *this);
3621 functionStartsSection = state.addAtom(*_functionStartsAtom);
3623 if ( _hasDataInCodeInfo ) {
3624 _dataInCodeAtom = new DataInCodeAtom<arm>(_options, state, *this);
3625 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3627 if ( _hasOptimizationHints ) {
3628 _optimizationHintsAtom = new OptimizationHintsAtom<arm>(_options, state, *this);
3629 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3631 if ( _hasSymbolTable ) {
3632 _symbolTableAtom = new SymbolTableAtom<arm>(_options, state, *this);
3633 symbolTableSection = state.addAtom(*_symbolTableAtom);
3635 if ( _hasExternalRelocations ) {
3636 _externalRelocsAtom = new ExternalRelocationsAtom<arm>(_options, state, *this);
3637 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3639 if ( _hasSymbolTable ) {
3640 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm>(_options, state, *this);
3641 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3642 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3643 stringPoolSection = state.addAtom(*_stringPoolAtom);
3645 break;
3646 #endif
3647 #if SUPPORT_ARCH_arm64
3648 case CPU_TYPE_ARM64:
3649 if ( _hasSectionRelocations ) {
3650 _sectionsRelocationsAtom = new SectionRelocationsAtom<arm64>(_options, state, *this);
3651 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3653 if ( _hasDyldInfo ) {
3654 _rebasingInfoAtom = new RebaseInfoAtom<arm64>(_options, state, *this);
3655 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3657 _bindingInfoAtom = new BindingInfoAtom<arm64>(_options, state, *this);
3658 bindingSection = state.addAtom(*_bindingInfoAtom);
3660 _weakBindingInfoAtom = new WeakBindingInfoAtom<arm64>(_options, state, *this);
3661 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3663 _lazyBindingInfoAtom = new LazyBindingInfoAtom<arm64>(_options, state, *this);
3664 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3666 _exportInfoAtom = new ExportInfoAtom<arm64>(_options, state, *this);
3667 exportSection = state.addAtom(*_exportInfoAtom);
3669 if ( _hasLocalRelocations ) {
3670 _localRelocsAtom = new LocalRelocationsAtom<arm64>(_options, state, *this);
3671 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3673 if ( _hasSplitSegInfo ) {
3674 if ( _options.sharedRegionEncodingV2() )
3675 _splitSegInfoAtom = new SplitSegInfoV2Atom<arm64>(_options, state, *this);
3676 else
3677 _splitSegInfoAtom = new SplitSegInfoV1Atom<arm64>(_options, state, *this);
3678 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3680 if ( _hasFunctionStartsInfo ) {
3681 _functionStartsAtom = new FunctionStartsAtom<arm64>(_options, state, *this);
3682 functionStartsSection = state.addAtom(*_functionStartsAtom);
3684 if ( _hasDataInCodeInfo ) {
3685 _dataInCodeAtom = new DataInCodeAtom<arm64>(_options, state, *this);
3686 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3688 if ( _hasOptimizationHints ) {
3689 _optimizationHintsAtom = new OptimizationHintsAtom<arm64>(_options, state, *this);
3690 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3692 if ( _hasSymbolTable ) {
3693 _symbolTableAtom = new SymbolTableAtom<arm64>(_options, state, *this);
3694 symbolTableSection = state.addAtom(*_symbolTableAtom);
3696 if ( _hasExternalRelocations ) {
3697 _externalRelocsAtom = new ExternalRelocationsAtom<arm64>(_options, state, *this);
3698 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3700 if ( _hasSymbolTable ) {
3701 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<arm64>(_options, state, *this);
3702 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3703 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 4);
3704 stringPoolSection = state.addAtom(*_stringPoolAtom);
3706 break;
3707 #endif
3708 #if SUPPORT_ARCH_ppc64
3709 case CPU_TYPE_POWERPC64:
3710 if ( _hasSectionRelocations ) {
3711 _sectionsRelocationsAtom = new SectionRelocationsAtom<ppc64>(_options, state, *this);
3712 sectionRelocationsSection = state.addAtom(*_sectionsRelocationsAtom);
3714 if ( _hasDyldInfo ) {
3715 _rebasingInfoAtom = new RebaseInfoAtom<ppc64>(_options, state, *this);
3716 rebaseSection = state.addAtom(*_rebasingInfoAtom);
3718 _bindingInfoAtom = new BindingInfoAtom<ppc64>(_options, state, *this);
3719 bindingSection = state.addAtom(*_bindingInfoAtom);
3721 _weakBindingInfoAtom = new WeakBindingInfoAtom<ppc64>(_options, state, *this);
3722 weakBindingSection = state.addAtom(*_weakBindingInfoAtom);
3724 _lazyBindingInfoAtom = new LazyBindingInfoAtom<ppc64>(_options, state, *this);
3725 lazyBindingSection = state.addAtom(*_lazyBindingInfoAtom);
3727 _exportInfoAtom = new ExportInfoAtom<ppc64>(_options, state, *this);
3728 exportSection = state.addAtom(*_exportInfoAtom);
3730 if ( _hasLocalRelocations ) {
3731 _localRelocsAtom = new LocalRelocationsAtom<ppc64>(_options, state, *this);
3732 localRelocationsSection = state.addAtom(*_localRelocsAtom);
3734 if ( _hasSplitSegInfo ) {
3735 _splitSegInfoAtom = new SplitSegInfoV1Atom<ppc64>(_options, state, *this);
3736 splitSegInfoSection = state.addAtom(*_splitSegInfoAtom);
3738 // FIXME: The next three are entered for compatibility, but unless dyld is
3739 // updated are probably useless.
3740 if ( _hasFunctionStartsInfo ) {
3741 _functionStartsAtom = new FunctionStartsAtom<ppc64>(_options, state, *this);
3742 functionStartsSection = state.addAtom(*_functionStartsAtom);
3744 if ( _hasDataInCodeInfo ) {
3745 _dataInCodeAtom = new DataInCodeAtom<ppc64>(_options, state, *this);
3746 dataInCodeSection = state.addAtom(*_dataInCodeAtom);
3748 if ( _hasOptimizationHints ) {
3749 _optimizationHintsAtom = new OptimizationHintsAtom<ppc64>(_options, state, *this);
3750 optimizationHintsSection = state.addAtom(*_optimizationHintsAtom);
3752 if ( _hasSymbolTable ) {
3753 _symbolTableAtom = new SymbolTableAtom<ppc64>(_options, state, *this);
3754 symbolTableSection = state.addAtom(*_symbolTableAtom);
3756 if ( _hasExternalRelocations ) {
3757 _externalRelocsAtom = new ExternalRelocationsAtom<ppc64>(_options, state, *this);
3758 externalRelocationsSection = state.addAtom(*_externalRelocsAtom);
3760 if ( _hasSymbolTable ) {
3761 _indirectSymbolTableAtom = new IndirectSymbolTableAtom<ppc64>(_options, state, *this);
3762 indirectSymbolTableSection = state.addAtom(*_indirectSymbolTableAtom);
3763 _stringPoolAtom = new StringPoolAtom(_options, state, *this, 8);
3764 stringPoolSection = state.addAtom(*_stringPoolAtom);
3766 break;
3767 #endif
3768 default:
3769 throw "unknown architecture";
3773 void OutputFile::addLoadCommands(ld::Internal& state)
3775 switch ( _options.architecture() ) {
3776 #if SUPPORT_ARCH_x86_64
3777 case CPU_TYPE_X86_64:
3778 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<x86_64>(_options, state, *this);
3779 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3780 break;
3781 #endif
3782 #if SUPPORT_ARCH_arm_any
3783 case CPU_TYPE_ARM:
3784 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<arm>(_options, state, *this);
3785 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3786 break;
3787 #endif
3788 #if SUPPORT_ARCH_arm64
3789 case CPU_TYPE_ARM64:
3790 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<arm64>(_options, state, *this);
3791 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3792 break;
3793 #endif
3794 #if SUPPORT_ARCH_i386
3795 case CPU_TYPE_I386:
3796 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<x86>(_options, state, *this);
3797 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3798 break;
3799 #endif
3800 #if SUPPORT_ARCH_ppc
3801 case CPU_TYPE_POWERPC:
3802 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<ppc>(_options, state, *this);
3803 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3804 break;
3805 #endif
3806 #if SUPPORT_ARCH_ppc64
3807 case CPU_TYPE_POWERPC64:
3808 _headersAndLoadCommandAtom = new HeaderAndLoadCommandsAtom<ppc64>(_options, state, *this);
3809 headerAndLoadCommandsSection = state.addAtom(*_headersAndLoadCommandAtom);
3810 break;
3811 #endif
3812 default:
3813 throw "unknown architecture";
3817 uint32_t OutputFile::dylibCount()
3819 return _dylibsToLoad.size();
3822 const ld::dylib::File* OutputFile::dylibByOrdinal(unsigned int ordinal)
3824 assert( ordinal > 0 );
3825 assert( ordinal <= _dylibsToLoad.size() );
3826 return _dylibsToLoad[ordinal-1];
3829 bool OutputFile::hasOrdinalForInstallPath(const char* path, int* ordinal)
3831 for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3832 const char* installPath = it->first->installPath();
3833 if ( (installPath != NULL) && (strcmp(path, installPath) == 0) ) {
3834 *ordinal = it->second;
3835 return true;
3838 return false;
3841 uint32_t OutputFile::dylibToOrdinal(const ld::dylib::File* dylib)
3843 return _dylibToOrdinal[dylib];
3847 void OutputFile::buildDylibOrdinalMapping(ld::Internal& state)
3849 // count non-public re-exported dylibs
3850 unsigned int nonPublicReExportCount = 0;
3851 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3852 ld::dylib::File* aDylib = *it;
3853 if ( aDylib->willBeReExported() && ! aDylib->hasPublicInstallName() )
3854 ++nonPublicReExportCount;
3857 // look at each dylib supplied in state
3858 bool hasReExports = false;
3859 bool haveLazyDylibs = false;
3860 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3861 ld::dylib::File* aDylib = *it;
3862 int ordinal;
3863 if ( aDylib == state.bundleLoader ) {
3864 _dylibToOrdinal[aDylib] = BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE;
3866 else if ( this->hasOrdinalForInstallPath(aDylib->installPath(), &ordinal) ) {
3867 // already have a dylib with that install path, map all uses to that ordinal
3868 _dylibToOrdinal[aDylib] = ordinal;
3870 else if ( aDylib->willBeLazyLoadedDylib() ) {
3871 // all lazy dylib need to be at end of ordinals
3872 haveLazyDylibs = true;
3874 else if ( aDylib->willBeReExported() && ! aDylib->hasPublicInstallName() && (nonPublicReExportCount >= 2) ) {
3875 _dylibsToLoad.push_back(aDylib);
3876 _dylibToOrdinal[aDylib] = BIND_SPECIAL_DYLIB_SELF;
3878 else {
3879 // first time this install path seen, create new ordinal
3880 _dylibsToLoad.push_back(aDylib);
3881 _dylibToOrdinal[aDylib] = _dylibsToLoad.size();
3883 if ( aDylib->explicitlyLinked() && aDylib->willBeReExported() )
3884 hasReExports = true;
3886 if ( haveLazyDylibs ) {
3887 // second pass to determine ordinals for lazy loaded dylibs
3888 for (std::vector<ld::dylib::File*>::iterator it = state.dylibs.begin(); it != state.dylibs.end(); ++it) {
3889 ld::dylib::File* aDylib = *it;
3890 if ( aDylib->willBeLazyLoadedDylib() ) {
3891 int ordinal;
3892 if ( this->hasOrdinalForInstallPath(aDylib->installPath(), &ordinal) ) {
3893 // already have a dylib with that install path, map all uses to that ordinal
3894 _dylibToOrdinal[aDylib] = ordinal;
3896 else {
3897 // first time this install path seen, create new ordinal
3898 _dylibsToLoad.push_back(aDylib);
3899 _dylibToOrdinal[aDylib] = _dylibsToLoad.size();
3904 _noReExportedDylibs = !hasReExports;
3905 //fprintf(stderr, "dylibs:\n");
3906 //for (std::map<const ld::dylib::File*, int>::const_iterator it = _dylibToOrdinal.begin(); it != _dylibToOrdinal.end(); ++it) {
3907 // fprintf(stderr, " %p ord=%u, install_name=%s\n",it->first, it->second, it->first->installPath());
3911 uint32_t OutputFile::lazyBindingInfoOffsetForLazyPointerAddress(uint64_t lpAddress)
3913 return _lazyPointerAddressToInfoOffset[lpAddress];
3916 void OutputFile::setLazyBindingInfoOffset(uint64_t lpAddress, uint32_t lpInfoOffset)
3918 _lazyPointerAddressToInfoOffset[lpAddress] = lpInfoOffset;
3921 int OutputFile::compressedOrdinalForAtom(const ld::Atom* target)
3923 // flat namespace images use zero for all ordinals
3924 if ( _options.nameSpace() != Options::kTwoLevelNameSpace )
3925 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3927 // handle -interposable
3928 if ( target->definition() == ld::Atom::definitionRegular )
3929 return BIND_SPECIAL_DYLIB_SELF;
3931 // regular ordinal
3932 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
3933 if ( dylib != NULL ) {
3934 std::map<const ld::dylib::File*, int>::iterator pos = _dylibToOrdinal.find(dylib);
3935 if ( pos != _dylibToOrdinal.end() )
3936 return pos->second;
3937 assert(0 && "dylib not assigned ordinal");
3940 // handle undefined dynamic_lookup
3941 if ( _options.undefinedTreatment() == Options::kUndefinedDynamicLookup )
3942 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3944 // handle -U _foo
3945 if ( _options.allowedUndefined(target->name()) )
3946 return BIND_SPECIAL_DYLIB_FLAT_LOOKUP;
3948 throw "can't find ordinal for imported symbol";
3952 bool OutputFile::isPcRelStore(ld::Fixup::Kind kind)
3954 switch ( kind ) {
3955 case ld::Fixup::kindStoreX86BranchPCRel8:
3956 case ld::Fixup::kindStoreX86BranchPCRel32:
3957 case ld::Fixup::kindStoreX86PCRel8:
3958 case ld::Fixup::kindStoreX86PCRel16:
3959 case ld::Fixup::kindStoreX86PCRel32:
3960 case ld::Fixup::kindStoreX86PCRel32_1:
3961 case ld::Fixup::kindStoreX86PCRel32_2:
3962 case ld::Fixup::kindStoreX86PCRel32_4:
3963 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
3964 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
3965 case ld::Fixup::kindStoreX86PCRel32GOT:
3966 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
3967 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
3968 case ld::Fixup::kindStoreARMBranch24:
3969 case ld::Fixup::kindStoreThumbBranch22:
3970 case ld::Fixup::kindStoreARMLoad12:
3971 case ld::Fixup::kindStorePPCBranch24:
3972 case ld::Fixup::kindStorePPCBranch14:
3973 case ld::Fixup::kindStorePPCPicLow14:
3974 case ld::Fixup::kindStorePPCPicLow16:
3975 case ld::Fixup::kindStorePPCPicHigh16AddLow:
3976 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
3977 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
3978 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
3979 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
3980 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
3981 case ld::Fixup::kindStoreTargetAddressARMBranch24:
3982 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
3983 case ld::Fixup::kindStoreTargetAddressARMLoad12:
3984 #if SUPPORT_ARCH_arm64
3985 case ld::Fixup::kindStoreARM64Page21:
3986 case ld::Fixup::kindStoreARM64PageOff12:
3987 case ld::Fixup::kindStoreARM64GOTLoadPage21:
3988 case ld::Fixup::kindStoreARM64GOTLoadPageOff12:
3989 case ld::Fixup::kindStoreARM64GOTLeaPage21:
3990 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
3991 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
3992 case ld::Fixup::kindStoreARM64TLVPLoadPageOff12:
3993 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
3994 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
3995 case ld::Fixup::kindStoreARM64PCRelToGOT:
3996 case ld::Fixup::kindStoreTargetAddressARM64Page21:
3997 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
3998 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
3999 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
4000 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
4001 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
4002 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
4003 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
4004 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
4005 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
4006 #endif
4007 case ld::Fixup::kindStoreTargetAddressPPCBranch24:
4008 return true;
4009 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
4010 #if SUPPORT_ARCH_arm64
4011 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
4012 #endif
4013 return (_options.outputKind() != Options::kKextBundle);
4014 default:
4015 break;
4017 return false;
4020 bool OutputFile::isStore(ld::Fixup::Kind kind)
4022 switch ( kind ) {
4023 case ld::Fixup::kindNone:
4024 case ld::Fixup::kindNoneFollowOn:
4025 case ld::Fixup::kindNoneGroupSubordinate:
4026 case ld::Fixup::kindNoneGroupSubordinateFDE:
4027 case ld::Fixup::kindNoneGroupSubordinateLSDA:
4028 case ld::Fixup::kindNoneGroupSubordinatePersonality:
4029 case ld::Fixup::kindSetTargetAddress:
4030 case ld::Fixup::kindSubtractTargetAddress:
4031 case ld::Fixup::kindAddAddend:
4032 case ld::Fixup::kindSubtractAddend:
4033 case ld::Fixup::kindSetTargetImageOffset:
4034 case ld::Fixup::kindSetTargetSectionOffset:
4035 return false;
4036 default:
4037 break;
4039 return true;
4043 bool OutputFile::setsTarget(ld::Fixup::Kind kind)
4045 switch ( kind ) {
4046 case ld::Fixup::kindSetTargetAddress:
4047 case ld::Fixup::kindLazyTarget:
4048 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4049 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4050 case ld::Fixup::kindStoreTargetAddressBigEndian32:
4051 case ld::Fixup::kindStoreTargetAddressBigEndian64:
4052 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
4053 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
4054 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
4055 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
4056 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
4057 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
4058 case ld::Fixup::kindStoreTargetAddressX86Abs32TLVLoad:
4059 case ld::Fixup::kindStoreTargetAddressARMBranch24:
4060 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
4061 case ld::Fixup::kindStoreTargetAddressARMLoad12:
4062 #if SUPPORT_ARCH_arm64
4063 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
4064 case ld::Fixup::kindStoreTargetAddressARM64Page21:
4065 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
4066 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
4067 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
4068 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
4069 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
4070 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
4071 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
4072 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
4073 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
4074 #endif
4075 case ld::Fixup::kindStoreTargetAddressPPCBranch24:
4076 return true;
4077 case ld::Fixup::kindStoreX86DtraceCallSiteNop:
4078 case ld::Fixup::kindStoreX86DtraceIsEnableSiteClear:
4079 case ld::Fixup::kindStoreARMDtraceCallSiteNop:
4080 case ld::Fixup::kindStoreARMDtraceIsEnableSiteClear:
4081 case ld::Fixup::kindStoreARM64DtraceCallSiteNop:
4082 case ld::Fixup::kindStoreARM64DtraceIsEnableSiteClear:
4083 case ld::Fixup::kindStoreThumbDtraceCallSiteNop:
4084 case ld::Fixup::kindStoreThumbDtraceIsEnableSiteClear:
4085 case ld::Fixup::kindStorePPCDtraceCallSiteNop:
4086 case ld::Fixup::kindStorePPCDtraceIsEnableSiteClear:
4087 return (_options.outputKind() == Options::kObjectFile);
4088 default:
4089 break;
4091 return false;
4094 bool OutputFile::isPointerToTarget(ld::Fixup::Kind kind)
4096 switch ( kind ) {
4097 case ld::Fixup::kindSetTargetAddress:
4098 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4099 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4100 case ld::Fixup::kindStoreTargetAddressBigEndian32:
4101 case ld::Fixup::kindStoreTargetAddressBigEndian64:
4102 case ld::Fixup::kindLazyTarget:
4103 return true;
4104 default:
4105 break;
4107 return false;
4109 bool OutputFile::isPointerFromTarget(ld::Fixup::Kind kind)
4111 switch ( kind ) {
4112 case ld::Fixup::kindSubtractTargetAddress:
4113 return true;
4114 default:
4115 break;
4117 return false;
4121 uint64_t OutputFile::lookBackAddend(ld::Fixup::iterator fit)
4123 uint64_t addend = 0;
4124 switch ( fit->clusterSize ) {
4125 case ld::Fixup::k1of1:
4126 case ld::Fixup::k1of2:
4127 case ld::Fixup::k2of2:
4128 break;
4129 case ld::Fixup::k2of3:
4130 --fit;
4131 switch ( fit->kind ) {
4132 case ld::Fixup::kindAddAddend:
4133 addend += fit->u.addend;
4134 break;
4135 case ld::Fixup::kindSubtractAddend:
4136 addend -= fit->u.addend;
4137 break;
4138 default:
4139 throw "unexpected fixup kind for binding";
4141 break;
4142 case ld::Fixup::k1of3:
4143 ++fit;
4144 switch ( fit->kind ) {
4145 case ld::Fixup::kindAddAddend:
4146 addend += fit->u.addend;
4147 break;
4148 case ld::Fixup::kindSubtractAddend:
4149 addend -= fit->u.addend;
4150 break;
4151 default:
4152 throw "unexpected fixup kind for binding";
4154 break;
4155 default:
4156 throw "unexpected fixup cluster size for binding";
4158 return addend;
4162 void OutputFile::generateLinkEditInfo(ld::Internal& state)
4164 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4165 ld::Internal::FinalSection* sect = *sit;
4166 // record end of last __TEXT section encrypted iPhoneOS apps.
4167 if ( _options.makeEncryptable() && (strcmp(sect->segmentName(), "__TEXT") == 0) && (strcmp(sect->sectionName(), "__oslogstring") != 0) ) {
4168 _encryptedTEXTendOffset = pageAlign(sect->fileOffset + sect->size);
4170 bool objc1ClassRefSection = ( (sect->type() == ld::Section::typeCStringPointer)
4171 && (strcmp(sect->sectionName(), "__cls_refs") == 0)
4172 && (strcmp(sect->segmentName(), "__OBJC") == 0) );
4173 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4174 const ld::Atom* atom = *ait;
4176 // Record regular atoms that override a dylib's weak definitions
4177 if ( (atom->scope() == ld::Atom::scopeGlobal) && atom->overridesDylibsWeakDef() ) {
4178 if ( _options.makeCompressedDyldInfo() ) {
4179 uint8_t wtype = BIND_TYPE_OVERRIDE_OF_WEAKDEF_IN_DYLIB;
4180 bool nonWeakDef = (atom->combine() == ld::Atom::combineNever);
4181 _weakBindingInfo.push_back(BindingInfo(wtype, atom->name(), nonWeakDef, atom->finalAddress(), 0));
4183 this->overridesWeakExternalSymbols = true;
4184 if ( _options.warnWeakExports() )
4185 warning("overrides weak external symbol: %s", atom->name());
4188 ld::Fixup* fixupWithTarget = NULL;
4189 ld::Fixup* fixupWithMinusTarget = NULL;
4190 ld::Fixup* fixupWithStore = NULL;
4191 ld::Fixup* fixupWithAddend = NULL;
4192 const ld::Atom* target = NULL;
4193 const ld::Atom* minusTarget = NULL;
4194 uint64_t targetAddend = 0;
4195 uint64_t minusTargetAddend = 0;
4196 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
4197 if ( fit->firstInCluster() ) {
4198 fixupWithTarget = NULL;
4199 fixupWithMinusTarget = NULL;
4200 fixupWithStore = NULL;
4201 target = NULL;
4202 minusTarget = NULL;
4203 targetAddend = 0;
4204 minusTargetAddend = 0;
4206 if ( this->setsTarget(fit->kind) ) {
4207 switch ( fit->binding ) {
4208 case ld::Fixup::bindingNone:
4209 case ld::Fixup::bindingByNameUnbound:
4210 break;
4211 case ld::Fixup::bindingByContentBound:
4212 case ld::Fixup::bindingDirectlyBound:
4213 fixupWithTarget = fit;
4214 target = fit->u.target;
4215 break;
4216 case ld::Fixup::bindingsIndirectlyBound:
4217 fixupWithTarget = fit;
4218 target = state.indirectBindingTable[fit->u.bindingIndex];
4219 break;
4221 assert(target != NULL);
4223 switch ( fit->kind ) {
4224 case ld::Fixup::kindAddAddend:
4225 targetAddend = fit->u.addend;
4226 fixupWithAddend = fit;
4227 break;
4228 case ld::Fixup::kindSubtractAddend:
4229 minusTargetAddend = fit->u.addend;
4230 fixupWithAddend = fit;
4231 break;
4232 case ld::Fixup::kindSubtractTargetAddress:
4233 switch ( fit->binding ) {
4234 case ld::Fixup::bindingNone:
4235 case ld::Fixup::bindingByNameUnbound:
4236 break;
4237 case ld::Fixup::bindingByContentBound:
4238 case ld::Fixup::bindingDirectlyBound:
4239 fixupWithMinusTarget = fit;
4240 minusTarget = fit->u.target;
4241 break;
4242 case ld::Fixup::bindingsIndirectlyBound:
4243 fixupWithMinusTarget = fit;
4244 minusTarget = state.indirectBindingTable[fit->u.bindingIndex];
4245 break;
4247 assert(minusTarget != NULL);
4248 break;
4249 case ld::Fixup::kindDataInCodeStartData:
4250 case ld::Fixup::kindDataInCodeStartJT8:
4251 case ld::Fixup::kindDataInCodeStartJT16:
4252 case ld::Fixup::kindDataInCodeStartJT32:
4253 case ld::Fixup::kindDataInCodeStartJTA32:
4254 case ld::Fixup::kindDataInCodeEnd:
4255 hasDataInCode = true;
4256 break;
4257 default:
4258 break;
4260 if ( this->isStore(fit->kind) ) {
4261 fixupWithStore = fit;
4263 if ( fit->lastInCluster() ) {
4264 if ( (fixupWithStore != NULL) && (target != NULL) ) {
4265 if ( _options.outputKind() == Options::kObjectFile ) {
4266 this->addSectionRelocs(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithAddend, fixupWithStore,
4267 target, minusTarget, targetAddend, minusTargetAddend);
4269 else {
4270 if ( _options.makeCompressedDyldInfo() ) {
4271 this->addDyldInfo(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithStore,
4272 target, minusTarget, targetAddend, minusTargetAddend);
4274 else {
4275 this->addClassicRelocs(state, sect, atom, fixupWithTarget, fixupWithMinusTarget, fixupWithStore,
4276 target, minusTarget, targetAddend, minusTargetAddend);
4280 else if ( objc1ClassRefSection && (target != NULL) && (fixupWithStore == NULL) ) {
4281 // check for class refs to lazy loaded dylibs
4282 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
4283 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
4284 throwf("illegal class reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
4293 void OutputFile::noteTextReloc(const ld::Atom* atom, const ld::Atom* target)
4295 if ( (atom->contentType() == ld::Atom::typeStub) || (atom->contentType() == ld::Atom::typeStubHelper) ) {
4296 // silently let stubs (synthesized by linker) use text relocs
4298 else if ( _options.allowTextRelocs() ) {
4299 if ( _options.warnAboutTextRelocs() )
4300 warning("text reloc in %s to %s", atom->name(), target->name());
4302 else if ( _options.positionIndependentExecutable() && (_options.outputKind() == Options::kDynamicExecutable)
4303 && ((_options.iOSVersionMin() >= ld::iOS_4_3) || (_options.macosxVersionMin() >= ld::mac10_7)) ) {
4304 if ( ! this->pieDisabled ) {
4305 switch ( _options.architecture()) {
4306 #if SUPPORT_ARCH_arm64
4307 case CPU_TYPE_ARM64:
4308 #endif
4309 #if SUPPORT_ARCH_arm64
4311 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
4312 throwf("Absolute addressing not allowed in arm64 code but used in '%s' referencing '%s'", demangledName, _options.demangleSymbol(target->name()));
4314 #endif
4315 default:
4316 warning("PIE disabled. Absolute addressing (perhaps -mdynamic-no-pic) not allowed in code signed PIE, "
4317 "but used in %s from %s. "
4318 "To fix this warning, don't compile with -mdynamic-no-pic or link with -Wl,-no_pie",
4319 atom->name(), atom->file()->path());
4322 this->pieDisabled = true;
4324 else if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) ) {
4325 throwf("illegal text-relocoation (direct reference) to (global,weak) %s in %s from %s in %s", target->name(), target->file()->path(), atom->name(), atom->file()->path());
4327 else {
4328 if ( (target->file() != NULL) && (atom->file() != NULL) )
4329 throwf("illegal text-relocation to '%s' in %s from '%s' in %s", target->name(), target->file()->path(), atom->name(), atom->file()->path());
4330 else
4331 throwf("illegal text reloc in '%s' to '%s'", atom->name(), target->name());
4335 void OutputFile::addDyldInfo(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
4336 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget, ld::Fixup* fixupWithStore,
4337 const ld::Atom* target, const ld::Atom* minusTarget,
4338 uint64_t targetAddend, uint64_t minusTargetAddend)
4340 if ( sect->isSectionHidden() )
4341 return;
4343 // no need to rebase or bind PCRel stores
4344 if ( this->isPcRelStore(fixupWithStore->kind) ) {
4345 // as long as target is in same linkage unit
4346 if ( (target == NULL) || (target->definition() != ld::Atom::definitionProxy) ) {
4347 // make sure target is not global and weak
4348 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular)) {
4349 if ( (atom->section().type() == ld::Section::typeCFI)
4350 || (atom->section().type() == ld::Section::typeDtraceDOF)
4351 || (atom->section().type() == ld::Section::typeUnwindInfo) ) {
4352 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4353 return;
4355 // <rdar://problem/13700961> spurious warning when weak function has reference to itself
4356 if ( fixupWithTarget->binding == ld::Fixup::bindingDirectlyBound ) {
4357 // ok to ignore pc-rel references within a weak function to itself
4358 return;
4360 // Have direct reference to weak-global. This should be an indrect reference
4361 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
4362 warning("direct access in function '%s' from file '%s' to global weak symbol '%s' from file '%s' means the weak symbol cannot be overridden at runtime. "
4363 "This was likely caused by different translation units being compiled with different visibility settings.",
4364 demangledName, atom->file()->path(), _options.demangleSymbol(target->name()), target->file()->path());
4366 return;
4370 // no need to rebase or bind PIC internal pointer diff
4371 if ( minusTarget != NULL ) {
4372 // with pointer diffs, both need to be in same linkage unit
4373 assert(minusTarget->definition() != ld::Atom::definitionProxy);
4374 assert(target != NULL);
4375 assert(target->definition() != ld::Atom::definitionProxy);
4376 if ( target == minusTarget ) {
4377 // This is a compile time constant and could have been optimized away by compiler
4378 return;
4381 // check if target of pointer-diff is global and weak
4382 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName)
4383 && (target->definition() == ld::Atom::definitionRegular) ) {
4384 if ( (atom->section().type() == ld::Section::typeCFI)
4385 || (atom->section().type() == ld::Section::typeDtraceDOF)
4386 || (atom->section().type() == ld::Section::typeUnwindInfo) ) {
4387 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4388 return;
4390 // Have direct reference to weak-global. This should be an indrect reference
4391 const char* demangledName = strdup(_options.demangleSymbol(atom->name()));
4392 const char* demangledTgt = strdup(_options.demangleSymbol(target->name()));
4393 const char* demangledMin = strdup(_options.demangleSymbol(minusTarget->name()));
4394 warning("direct access in %s to global weak symbol %s (via %s + %lu) means the weak symbol cannot be overridden at runtime. "
4395 "This was likely caused by different translation units being compiled with different visibility settings.",
4396 demangledName, demangledTgt, demangledMin, minusTargetAddend);
4398 return;
4401 // no need to rebase or bind an atom's references to itself if the output is not slidable
4402 if ( (atom == target) && !_options.outputSlidable() )
4403 return;
4405 // cluster has no target, so needs no rebasing or binding
4406 if ( target == NULL )
4407 return;
4409 bool inReadOnlySeg = ((_options.initialSegProtection(sect->segmentName()) & VM_PROT_WRITE) == 0);
4410 bool needsRebase = false;
4411 bool needsBinding = false;
4412 bool needsLazyBinding = false;
4413 bool needsWeakBinding = false;
4415 uint8_t rebaseType = REBASE_TYPE_POINTER;
4416 uint8_t type = BIND_TYPE_POINTER;
4417 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
4418 bool weak_import = (fixupWithTarget->weakImport || ((dylib != NULL) && dylib->forcedWeakLinked()));
4419 uint64_t address = atom->finalAddress() + fixupWithTarget->offsetInAtom;
4420 uint64_t addend = targetAddend - minusTargetAddend;
4422 // special case lazy pointers
4423 if ( fixupWithTarget->kind == ld::Fixup::kindLazyTarget ) {
4424 assert(fixupWithTarget->u.target == target);
4425 assert(addend == 0);
4426 // lazy dylib lazy pointers do not have any dyld info
4427 if ( atom->section().type() == ld::Section::typeLazyDylibPointer )
4428 return;
4429 // lazy binding to weak definitions are done differently
4430 // they are directly bound to target, then have a weak bind in case of a collision
4431 if ( target->combine() == ld::Atom::combineByName ) {
4432 if ( target->definition() == ld::Atom::definitionProxy ) {
4433 // weak def exported from another dylib
4434 // must non-lazy bind to it plus have weak binding info in case of collision
4435 needsBinding = true;
4436 needsWeakBinding = true;
4438 else {
4439 // weak def in this linkage unit.
4440 // just rebase, plus have weak binding info in case of collision
4441 // this will be done by other cluster on lazy pointer atom
4444 else if ( target->contentType() == ld::Atom::typeResolver ) {
4445 // <rdar://problem/8553647> Hidden resolver functions should not have lazy binding info
4446 // <rdar://problem/12629331> Resolver function run before initializers when overriding the dyld shared cache
4447 // The lazy pointers used by stubs used when non-lazy binding to a resolver are not normal lazy pointers
4448 // and should not be in lazy binding info.
4449 needsLazyBinding = false;
4451 else {
4452 // normal case of a pointer to non-weak-def symbol, so can lazily bind
4453 needsLazyBinding = true;
4456 else {
4457 // everything except lazy pointers
4458 switch ( target->definition() ) {
4459 case ld::Atom::definitionProxy:
4460 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
4461 throwf("illegal data reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
4462 if ( target->contentType() == ld::Atom::typeTLV ) {
4463 if ( sect->type() != ld::Section::typeTLVPointers )
4464 throwf("illegal data reference in %s to thread local variable %s in dylib %s",
4465 atom->name(), target->name(), dylib->path());
4467 if ( inReadOnlySeg )
4468 type = BIND_TYPE_TEXT_ABSOLUTE32;
4469 needsBinding = true;
4470 if ( target->combine() == ld::Atom::combineByName )
4471 needsWeakBinding = true;
4472 break;
4473 case ld::Atom::definitionRegular:
4474 case ld::Atom::definitionTentative:
4475 // only slideable images need rebasing info
4476 if ( _options.outputSlidable() ) {
4477 needsRebase = true;
4479 // references to internal symbol never need binding
4480 if ( target->scope() != ld::Atom::scopeGlobal )
4481 break;
4482 // reference to global weak def needs weak binding
4483 if ( (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) )
4484 needsWeakBinding = true;
4485 else if ( _options.outputKind() == Options::kDynamicExecutable ) {
4486 // in main executables, the only way regular symbols are indirected is if -interposable is used
4487 if ( _options.interposable(target->name()) ) {
4488 needsRebase = false;
4489 needsBinding = true;
4492 else {
4493 // for flat-namespace or interposable two-level-namespace
4494 // all references to exported symbols get indirected
4495 if ( (_options.nameSpace() != Options::kTwoLevelNameSpace) || _options.interposable(target->name()) ) {
4496 // <rdar://problem/5254468> no external relocs for flat objc classes
4497 if ( strncmp(target->name(), ".objc_class_", 12) == 0 )
4498 break;
4499 // no rebase info for references to global symbols that will have binding info
4500 needsRebase = false;
4501 needsBinding = true;
4503 else if ( _options.forceCoalesce(target->name()) ) {
4504 needsWeakBinding = true;
4507 break;
4508 case ld::Atom::definitionAbsolute:
4509 break;
4513 // <rdar://problem/13828711> if target is an import alias, use base of alias
4514 if ( target->isAlias() && (target->definition() == ld::Atom::definitionProxy) ) {
4515 for (ld::Fixup::iterator fit = target->fixupsBegin(), end=target->fixupsEnd(); fit != end; ++fit) {
4516 if ( fit->firstInCluster() ) {
4517 if ( fit->kind == ld::Fixup::kindNoneFollowOn ) {
4518 if ( fit->binding == ld::Fixup::bindingDirectlyBound ) {
4519 //fprintf(stderr, "switching import of %s to import of %s\n", target->name(), fit->u.target->name());
4520 target = fit->u.target;
4527 // record dyld info for this cluster
4528 if ( needsRebase ) {
4529 if ( inReadOnlySeg ) {
4530 noteTextReloc(atom, target);
4531 sect->hasLocalRelocs = true; // so dyld knows to change permissions on __TEXT segment
4532 rebaseType = REBASE_TYPE_TEXT_ABSOLUTE32;
4534 if ( _options.sharedRegionEligible() ) {
4535 // <rdar://problem/13287063> when range checking, ignore high byte of arm64 addends
4536 uint64_t checkAddend = addend;
4537 if ( (_options.architecture() == CPU_TYPE_ARM64)
4539 checkAddend &= 0x0FFFFFFFFFFFFFFFULL;
4540 if ( checkAddend != 0 ) {
4541 // make sure the addend does not cause the pointer to point outside the target's segment
4542 // if it does, update_dyld_shared_cache will not be able to put this dylib into the shared cache
4543 uint64_t targetAddress = target->finalAddress();
4544 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4545 ld::Internal::FinalSection* sct = *sit;
4546 uint64_t sctEnd = (sct->address+sct->size);
4547 if ( (sct->address <= targetAddress) && (targetAddress < sctEnd) ) {
4548 if ( (targetAddress+checkAddend) > sctEnd ) {
4549 warning("data symbol %s from %s has pointer to %s + 0x%08llX. "
4550 "That large of an addend may disable %s from being put in the dyld shared cache.",
4551 atom->name(), atom->file()->path(), target->name(), addend, _options.installPath() );
4557 _rebaseInfo.push_back(RebaseInfo(rebaseType, address));
4559 if ( needsBinding ) {
4560 if ( inReadOnlySeg ) {
4561 noteTextReloc(atom, target);
4562 sect->hasExternalRelocs = true; // so dyld knows to change permissions on __TEXT segment
4564 _bindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4566 if ( needsLazyBinding ) {
4567 if ( _options.bindAtLoad() )
4568 _bindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4569 else
4570 _lazyBindingInfo.push_back(BindingInfo(type, this->compressedOrdinalForAtom(target), target->name(), weak_import, address, addend));
4572 if ( needsWeakBinding )
4573 _weakBindingInfo.push_back(BindingInfo(type, 0, target->name(), false, address, addend));
4577 void OutputFile::addClassicRelocs(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
4578 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget, ld::Fixup* fixupWithStore,
4579 const ld::Atom* target, const ld::Atom* minusTarget,
4580 uint64_t targetAddend, uint64_t minusTargetAddend)
4582 if ( sect->isSectionHidden() )
4583 return;
4585 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4586 if ( sect->type() == ld::Section::typeNonLazyPointer ) {
4587 // except kexts and static pie which *do* use relocations
4588 switch (_options.outputKind()) {
4589 case Options::kKextBundle:
4590 break;
4591 case Options::kStaticExecutable:
4592 if ( _options.positionIndependentExecutable() )
4593 break;
4594 // else fall into default case
4595 default:
4596 assert(target != NULL);
4597 assert(fixupWithTarget != NULL);
4598 return;
4602 // no need to rebase or bind PCRel stores
4603 if ( this->isPcRelStore(fixupWithStore->kind) ) {
4604 // as long as target is in same linkage unit
4605 if ( (target == NULL) || (target->definition() != ld::Atom::definitionProxy) )
4606 return;
4609 // no need to rebase or bind PIC internal pointer diff
4610 if ( minusTarget != NULL ) {
4611 // with pointer diffs, both need to be in same linkage unit
4612 assert(minusTarget->definition() != ld::Atom::definitionProxy);
4613 assert(target != NULL);
4614 assert(target->definition() != ld::Atom::definitionProxy);
4615 // make sure target is not global and weak
4616 if ( (target->scope() == ld::Atom::scopeGlobal) && (target->combine() == ld::Atom::combineByName)
4617 && (atom->section().type() != ld::Section::typeCFI)
4618 && (atom->section().type() != ld::Section::typeDtraceDOF)
4619 && (atom->section().type() != ld::Section::typeUnwindInfo)
4620 && (minusTarget != target) ) {
4621 // ok for __eh_frame and __uwind_info to use pointer diffs to global weak symbols
4622 throwf("bad codegen, pointer diff in %s to global weak symbol %s", atom->name(), target->name());
4624 return;
4627 // cluster has no target, so needs no rebasing or binding
4628 if ( target == NULL )
4629 return;
4631 assert(_localRelocsAtom != NULL);
4632 uint64_t relocAddress = atom->finalAddress() + fixupWithTarget->offsetInAtom - _localRelocsAtom->relocBaseAddress(state);
4634 bool inReadOnlySeg = ( strcmp(sect->segmentName(), "__TEXT") == 0 );
4635 bool needsLocalReloc = false;
4636 bool needsExternReloc = false;
4638 switch ( fixupWithStore->kind ) {
4639 case ld::Fixup::kindLazyTarget:
4640 // lazy pointers don't need relocs
4641 break;
4642 case ld::Fixup::kindStoreLittleEndian32:
4643 case ld::Fixup::kindStoreLittleEndian64:
4644 case ld::Fixup::kindStoreBigEndian32:
4645 case ld::Fixup::kindStoreBigEndian64:
4646 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4647 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4648 case ld::Fixup::kindStoreTargetAddressBigEndian32:
4649 case ld::Fixup::kindStoreTargetAddressBigEndian64:
4650 // is pointer
4651 switch ( target->definition() ) {
4652 case ld::Atom::definitionProxy:
4653 needsExternReloc = true;
4654 break;
4655 case ld::Atom::definitionRegular:
4656 case ld::Atom::definitionTentative:
4657 // only slideable images need local relocs
4658 if ( _options.outputSlidable() )
4659 needsLocalReloc = true;
4660 // references to internal symbol never need binding
4661 if ( target->scope() != ld::Atom::scopeGlobal )
4662 break;
4663 // reference to global weak def needs weak binding in dynamic images
4664 if ( (target->combine() == ld::Atom::combineByName)
4665 && (target->definition() == ld::Atom::definitionRegular)
4666 && (_options.outputKind() != Options::kStaticExecutable)
4667 && (_options.outputKind() != Options::kPreload)
4668 && (atom != target) ) {
4669 needsExternReloc = true;
4671 else if ( _options.outputKind() == Options::kDynamicExecutable ) {
4672 // in main executables, the only way regular symbols are indirected is if -interposable is used
4673 if ( _options.interposable(target->name()) )
4674 needsExternReloc = true;
4676 else {
4677 // for flat-namespace or interposable two-level-namespace
4678 // all references to exported symbols get indirected
4679 if ( (_options.nameSpace() != Options::kTwoLevelNameSpace) || _options.interposable(target->name()) ) {
4680 // <rdar://problem/5254468> no external relocs for flat objc classes
4681 if ( strncmp(target->name(), ".objc_class_", 12) == 0 )
4682 break;
4683 // no rebase info for references to global symbols that will have binding info
4684 needsExternReloc = true;
4687 if ( needsExternReloc )
4688 needsLocalReloc = false;
4689 break;
4690 case ld::Atom::definitionAbsolute:
4691 break;
4693 if ( needsExternReloc ) {
4694 if ( inReadOnlySeg )
4695 noteTextReloc(atom, target);
4696 const ld::dylib::File* dylib = dynamic_cast<const ld::dylib::File*>(target->file());
4697 if ( (dylib != NULL) && dylib->willBeLazyLoadedDylib() )
4698 throwf("illegal data reference to %s in lazy loaded dylib %s", target->name(), dylib->path());
4699 _externalRelocsAtom->addExternalPointerReloc(relocAddress, target);
4700 sect->hasExternalRelocs = true;
4701 fixupWithTarget->contentAddendOnly = true;
4703 else if ( needsLocalReloc ) {
4704 assert(target != NULL);
4705 if ( inReadOnlySeg )
4706 noteTextReloc(atom, target);
4707 _localRelocsAtom->addPointerReloc(relocAddress, target->machoSection());
4708 sect->hasLocalRelocs = true;
4710 break;
4711 case ld::Fixup::kindStorePPCAbsLow14:
4712 case ld::Fixup::kindStorePPCAbsLow16:
4713 case ld::Fixup::kindStorePPCAbsHigh16AddLow:
4714 case ld::Fixup::kindStorePPCAbsHigh16:
4716 assert(target != NULL);
4717 if ( target->definition() == ld::Atom::definitionProxy ) {
4718 fprintf(stderr, "bad reloc target: %40s source %40s (offset 0x%" PRIx64 ") final addr 0x%" PRIx64 " atom offset: 0x%x [0x%" PRIx64 "]\n",
4719 target->name(), atom->name(), atom->objectAddress(), atom->finalAddress(),
4720 fixupWithStore->offsetInAtom, (uint64_t)atom->finalAddress()+fixupWithStore->offsetInAtom );
4721 // throwf("half word text relocs not supported in %s", atom->name());
4723 if ( _options.outputSlidable() ) {
4724 if ( inReadOnlySeg )
4725 noteTextReloc(atom, target);
4726 uint32_t machoSectionIndex = (target->definition() == ld::Atom::definitionAbsolute)
4727 ? R_ABS : target->machoSection();
4728 _localRelocsAtom->addTextReloc(relocAddress, fixupWithTarget->kind,
4729 target->finalAddress(), machoSectionIndex);
4730 sect->hasLocalRelocs = true;
4733 break;
4734 case ld::Fixup::kindStoreTargetAddressX86BranchPCRel32:
4735 #if SUPPORT_ARCH_arm64
4736 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
4737 #endif
4738 if ( _options.outputKind() == Options::kKextBundle ) {
4739 assert(target != NULL);
4740 if ( target->definition() == ld::Atom::definitionProxy ) {
4741 _externalRelocsAtom->addExternalCallSiteReloc(relocAddress, target);
4742 fixupWithStore->contentAddendOnly = true;
4745 break;
4747 case ld::Fixup::kindStoreARMLow16:
4748 case ld::Fixup::kindStoreThumbLow16:
4749 // no way to encode rebasing of binding for these instructions
4750 if ( _options.outputSlidable() || (target->definition() == ld::Atom::definitionProxy) )
4751 throwf("no supported runtime lo16 relocation in %s from %s to %s", atom->name(), atom->file()->path(), target->name());
4752 break;
4754 case ld::Fixup::kindStoreARMHigh16:
4755 case ld::Fixup::kindStoreThumbHigh16:
4756 // no way to encode rebasing of binding for these instructions
4757 if ( _options.outputSlidable() || (target->definition() == ld::Atom::definitionProxy) )
4758 throwf("no supported runtime hi16 relocation in %s from %s to %s", atom->name(), atom->file()->path(), target->name());
4759 break;
4761 default:
4762 break;
4767 bool OutputFile::useExternalSectionReloc(const ld::Atom* atom, const ld::Atom* target, ld::Fixup* fixupWithTarget)
4769 if ( (_options.architecture() == CPU_TYPE_X86_64)
4770 || (_options.architecture() == CPU_TYPE_ARM64)
4772 // x86_64 and ARM64 use external relocations for everthing that has a symbol
4773 return ( target->symbolTableInclusion() != ld::Atom::symbolTableNotIn );
4776 // <rdar://problem/9513487> support arm branch interworking in -r mode
4777 if ( (_options.architecture() == CPU_TYPE_ARM) && (_options.outputKind() == Options::kObjectFile) ) {
4778 if ( atom->isThumb() != target->isThumb() ) {
4779 switch ( fixupWithTarget->kind ) {
4780 // have branch that switches mode, then might be 'b' not 'bl'
4781 // Force external relocation, since no way to do local reloc for 'b'
4782 case ld::Fixup::kindStoreTargetAddressThumbBranch22 :
4783 case ld::Fixup::kindStoreTargetAddressARMBranch24:
4784 return true;
4785 default:
4786 break;
4791 if ( (_options.architecture() == CPU_TYPE_I386) && (_options.outputKind() == Options::kObjectFile) ) {
4792 if ( target->contentType() == ld::Atom::typeTLV )
4793 return true;
4796 // most architectures use external relocations only for references
4797 // to a symbol in another translation unit or for references to "weak symbols" or tentative definitions
4798 assert(target != NULL);
4799 if ( target->definition() == ld::Atom::definitionProxy )
4800 return true;
4801 if ( (target->definition() == ld::Atom::definitionTentative) && ! _options.makeTentativeDefinitionsReal() )
4802 return true;
4803 if ( target->scope() != ld::Atom::scopeGlobal )
4804 return false;
4805 if ( (target->combine() == ld::Atom::combineByName) && (target->definition() == ld::Atom::definitionRegular) )
4806 return true;
4807 return false;
4810 bool OutputFile::useSectionRelocAddend(ld::Fixup* fixupWithTarget)
4812 #if SUPPORT_ARCH_arm64
4813 if ( _options.architecture() == CPU_TYPE_ARM64 ) {
4814 switch ( fixupWithTarget->kind ) {
4815 case ld::Fixup::kindStoreARM64Branch26:
4816 case ld::Fixup::kindStoreARM64Page21:
4817 case ld::Fixup::kindStoreARM64PageOff12:
4818 return true;
4819 default:
4820 return false;
4823 #endif
4824 return false;
4830 void OutputFile::addSectionRelocs(ld::Internal& state, ld::Internal::FinalSection* sect, const ld::Atom* atom,
4831 ld::Fixup* fixupWithTarget, ld::Fixup* fixupWithMinusTarget,
4832 ld::Fixup* fixupWithAddend, ld::Fixup* fixupWithStore,
4833 const ld::Atom* target, const ld::Atom* minusTarget,
4834 uint64_t targetAddend, uint64_t minusTargetAddend)
4836 if ( sect->isSectionHidden() )
4837 return;
4839 // in -r mode where there will be no labels on __eh_frame section, there is no need for relocations
4840 if ( (sect->type() == ld::Section::typeCFI) && _options.removeEHLabels() )
4841 return;
4843 // non-lazy-pointer section is encoded in indirect symbol table - not using relocations
4844 if ( sect->type() == ld::Section::typeNonLazyPointer )
4845 return;
4847 // tentative defs don't have any relocations
4848 if ( sect->type() == ld::Section::typeTentativeDefs )
4849 return;
4851 assert(target != NULL);
4852 assert(fixupWithTarget != NULL);
4853 bool targetUsesExternalReloc = this->useExternalSectionReloc(atom, target, fixupWithTarget);
4854 bool minusTargetUsesExternalReloc = (minusTarget != NULL) && this->useExternalSectionReloc(atom, minusTarget, fixupWithMinusTarget);
4856 // in x86_64 and arm64 .o files an external reloc means the content contains just the addend
4857 if ( (_options.architecture() == CPU_TYPE_X86_64)
4858 || (_options.architecture() == CPU_TYPE_ARM64)
4860 if ( targetUsesExternalReloc ) {
4861 fixupWithTarget->contentAddendOnly = true;
4862 fixupWithStore->contentAddendOnly = true;
4863 if ( this->useSectionRelocAddend(fixupWithStore) && (fixupWithAddend != NULL) )
4864 fixupWithAddend->contentIgnoresAddend = true;
4866 if ( minusTargetUsesExternalReloc )
4867 fixupWithMinusTarget->contentAddendOnly = true;
4869 else {
4870 // for other archs, content is addend only with (non pc-rel) pointers
4871 // pc-rel instructions are funny. If the target is _foo+8 and _foo is
4872 // external, then the pc-rel instruction *evalutates* to the address 8.
4873 if ( targetUsesExternalReloc ) {
4874 // TLV support for i386 acts like RIP relative addressing
4875 // The addend is the offset from the PICBase to the end of the instruction
4876 if ( (_options.architecture() == CPU_TYPE_I386)
4877 && (_options.outputKind() == Options::kObjectFile)
4878 && (fixupWithStore->kind == ld::Fixup::kindStoreX86PCRel32TLVLoad) ) {
4879 fixupWithTarget->contentAddendOnly = true;
4880 fixupWithStore->contentAddendOnly = true;
4882 else if ( isPcRelStore(fixupWithStore->kind) ) {
4883 fixupWithTarget->contentDetlaToAddendOnly = true;
4884 fixupWithStore->contentDetlaToAddendOnly = true;
4886 else if ( minusTarget == NULL ){
4887 fixupWithTarget->contentAddendOnly = true;
4888 fixupWithStore->contentAddendOnly = true;
4893 if ( fixupWithStore != NULL ) {
4894 _sectionsRelocationsAtom->addSectionReloc(sect, fixupWithStore->kind, atom, fixupWithStore->offsetInAtom,
4895 targetUsesExternalReloc, minusTargetUsesExternalReloc,
4896 target, targetAddend, minusTarget, minusTargetAddend);
4901 void OutputFile::makeSplitSegInfo(ld::Internal& state)
4903 if ( !_options.sharedRegionEligible() )
4904 return;
4906 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
4907 ld::Internal::FinalSection* sect = *sit;
4908 if ( sect->isSectionHidden() )
4909 continue;
4910 if ( strcmp(sect->segmentName(), "__TEXT") != 0 )
4911 continue;
4912 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
4913 const ld::Atom* atom = *ait;
4914 const ld::Atom* target = NULL;
4915 const ld::Atom* fromTarget = NULL;
4916 uint64_t accumulator = 0;
4917 bool thumbTarget;
4918 bool hadSubtract = false;
4919 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
4920 if ( fit->firstInCluster() )
4921 target = NULL;
4922 if ( this->setsTarget(fit->kind) ) {
4923 accumulator = addressOf(state, fit, &target);
4924 thumbTarget = targetIsThumb(state, fit);
4925 if ( thumbTarget )
4926 accumulator |= 1;
4928 switch ( fit->kind ) {
4929 case ld::Fixup::kindSubtractTargetAddress:
4930 accumulator -= addressOf(state, fit, &fromTarget);
4931 hadSubtract = true;
4932 break;
4933 case ld::Fixup::kindAddAddend:
4934 accumulator += fit->u.addend;
4935 break;
4936 case ld::Fixup::kindSubtractAddend:
4937 accumulator -= fit->u.addend;
4938 break;
4939 case ld::Fixup::kindStoreBigEndian32:
4940 case ld::Fixup::kindStoreLittleEndian32:
4941 case ld::Fixup::kindStoreLittleEndian64:
4942 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
4943 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
4944 // if no subtract, then this is an absolute pointer which means
4945 // there is also a text reloc which update_dyld_shared_cache will use.
4946 if ( ! hadSubtract )
4947 break;
4948 // fall through
4949 case ld::Fixup::kindStoreX86PCRel32:
4950 case ld::Fixup::kindStoreX86PCRel32_1:
4951 case ld::Fixup::kindStoreX86PCRel32_2:
4952 case ld::Fixup::kindStoreX86PCRel32_4:
4953 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
4954 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
4955 case ld::Fixup::kindStoreX86PCRel32GOT:
4956 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
4957 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
4958 case ld::Fixup::kindStorePPCPicHigh16AddLow:
4959 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
4960 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
4961 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
4962 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
4963 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
4964 case ld::Fixup::kindStoreARMLow16:
4965 case ld::Fixup::kindStoreThumbLow16:
4966 #if SUPPORT_ARCH_arm64
4967 case ld::Fixup::kindStoreARM64Page21:
4968 case ld::Fixup::kindStoreARM64GOTLoadPage21:
4969 case ld::Fixup::kindStoreARM64GOTLeaPage21:
4970 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
4971 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
4972 case ld::Fixup::kindStoreTargetAddressARM64Page21:
4973 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
4974 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
4975 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
4976 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
4977 case ld::Fixup::kindStoreARM64PCRelToGOT:
4978 #endif
4979 assert(target != NULL);
4980 if ( strcmp(sect->segmentName(), target->section().segmentName()) != 0 ) {
4981 _splitSegInfos.push_back(SplitSegInfoEntry(atom->finalAddress()+fit->offsetInAtom,fit->kind));
4983 break;
4984 case ld::Fixup::kindStoreARMHigh16:
4985 case ld::Fixup::kindStoreThumbHigh16:
4986 assert(target != NULL);
4987 if ( strcmp(sect->segmentName(), target->section().segmentName()) != 0 ) {
4988 // hi16 needs to know upper 4-bits of low16 to compute carry
4989 uint32_t extra = (accumulator >> 12) & 0xF;
4990 _splitSegInfos.push_back(SplitSegInfoEntry(atom->finalAddress()+fit->offsetInAtom,fit->kind, extra));
4992 break;
4993 case ld::Fixup::kindSetTargetImageOffset:
4994 accumulator = addressOf(state, fit, &target);
4995 assert(target != NULL);
4996 hadSubtract = true;
4997 break;
4998 default:
4999 break;
5006 void OutputFile::makeSplitSegInfoV2(ld::Internal& state)
5008 static const bool log = false;
5009 if ( !_options.sharedRegionEligible() )
5010 return;
5012 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
5013 ld::Internal::FinalSection* sect = *sit;
5014 if ( sect->isSectionHidden() )
5015 continue;
5016 bool codeSection = (sect->type() == ld::Section::typeCode);
5017 if (log) fprintf(stderr, "sect: %s, address=0x%llX\n", sect->sectionName(), sect->address);
5018 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
5019 const ld::Atom* atom = *ait;
5020 const ld::Atom* target = NULL;
5021 const ld::Atom* fromTarget = NULL;
5022 uint32_t picBase = 0;
5023 uint64_t accumulator = 0;
5024 bool thumbTarget;
5025 bool hadSubtract = false;
5026 uint8_t fromSectionIndex = atom->machoSection();
5027 uint8_t toSectionIndex;
5028 uint8_t kind = 0;
5029 uint64_t fromOffset = 0;
5030 uint64_t toOffset = 0;
5031 uint64_t addend = 0;
5032 for (ld::Fixup::iterator fit = atom->fixupsBegin(), end=atom->fixupsEnd(); fit != end; ++fit) {
5033 if ( fit->firstInCluster() ) {
5034 target = NULL;
5035 hadSubtract = false;
5036 fromTarget = NULL;
5037 kind = 0;
5038 addend = 0;
5039 toSectionIndex = 255;
5040 fromOffset = atom->finalAddress() + fit->offsetInAtom - sect->address;
5042 if ( this->setsTarget(fit->kind) ) {
5043 accumulator = addressAndTarget(state, fit, &target);
5044 thumbTarget = targetIsThumb(state, fit);
5045 if ( thumbTarget )
5046 accumulator |= 1;
5047 toOffset = accumulator - state.atomToSection[target]->address;
5048 if ( target->definition() != ld::Atom::definitionProxy ) {
5049 if ( target->section().type() == ld::Section::typeMachHeader )
5050 toSectionIndex = 0;
5051 else
5052 toSectionIndex = target->machoSection();
5055 switch ( fit->kind ) {
5056 case ld::Fixup::kindSubtractTargetAddress:
5057 accumulator -= addressAndTarget(state, fit, &fromTarget);
5058 hadSubtract = true;
5059 break;
5060 case ld::Fixup::kindAddAddend:
5061 accumulator += fit->u.addend;
5062 addend = fit->u.addend;
5063 break;
5064 case ld::Fixup::kindSubtractAddend:
5065 accumulator -= fit->u.addend;
5066 picBase = fit->u.addend;
5067 break;
5068 case ld::Fixup::kindSetLazyOffset:
5069 break;
5070 case ld::Fixup::kindStoreBigEndian32:
5071 case ld::Fixup::kindStoreLittleEndian32:
5072 case ld::Fixup::kindStoreTargetAddressLittleEndian32:
5073 if ( kind != DYLD_CACHE_ADJ_V2_IMAGE_OFF_32 ) {
5074 if ( hadSubtract )
5075 kind = DYLD_CACHE_ADJ_V2_DELTA_32;
5076 else
5077 kind = DYLD_CACHE_ADJ_V2_POINTER_32;
5079 break;
5080 case ld::Fixup::kindStoreLittleEndian64:
5081 case ld::Fixup::kindStoreTargetAddressLittleEndian64:
5082 if ( hadSubtract )
5083 kind = DYLD_CACHE_ADJ_V2_DELTA_64;
5084 else
5085 kind = DYLD_CACHE_ADJ_V2_POINTER_64;
5086 break;
5087 case ld::Fixup::kindStoreX86PCRel32:
5088 case ld::Fixup::kindStoreX86PCRel32_1:
5089 case ld::Fixup::kindStoreX86PCRel32_2:
5090 case ld::Fixup::kindStoreX86PCRel32_4:
5091 case ld::Fixup::kindStoreX86PCRel32GOTLoad:
5092 case ld::Fixup::kindStoreX86PCRel32GOTLoadNowLEA:
5093 case ld::Fixup::kindStoreX86PCRel32GOT:
5094 case ld::Fixup::kindStoreX86PCRel32TLVLoad:
5095 case ld::Fixup::kindStoreX86PCRel32TLVLoadNowLEA:
5096 case ld::Fixup::kindStoreTargetAddressX86PCRel32:
5097 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoad:
5098 case ld::Fixup::kindStoreTargetAddressX86PCRel32GOTLoadNowLEA:
5099 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoad:
5100 case ld::Fixup::kindStoreTargetAddressX86PCRel32TLVLoadNowLEA:
5101 #if SUPPORT_ARCH_arm64
5102 case ld::Fixup::kindStoreARM64PCRelToGOT:
5103 #endif
5104 if ( (fromSectionIndex != toSectionIndex) || !codeSection )
5105 kind = DYLD_CACHE_ADJ_V2_DELTA_32;
5106 break;
5107 #if SUPPORT_ARCH_arm64
5108 case ld::Fixup::kindStoreARM64Page21:
5109 case ld::Fixup::kindStoreARM64GOTLoadPage21:
5110 case ld::Fixup::kindStoreARM64GOTLeaPage21:
5111 case ld::Fixup::kindStoreARM64TLVPLoadPage21:
5112 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPage21:
5113 case ld::Fixup::kindStoreTargetAddressARM64Page21:
5114 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPage21:
5115 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPage21:
5116 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPage21:
5117 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPage21:
5118 if ( fromSectionIndex != toSectionIndex )
5119 kind = DYLD_CACHE_ADJ_V2_ARM64_ADRP;
5120 break;
5121 case ld::Fixup::kindStoreARM64PageOff12:
5122 case ld::Fixup::kindStoreARM64GOTLeaPageOff12:
5123 case ld::Fixup::kindStoreARM64TLVPLoadNowLeaPageOff12:
5124 case ld::Fixup::kindStoreTargetAddressARM64PageOff12:
5125 case ld::Fixup::kindStoreTargetAddressARM64GOTLeaPageOff12:
5126 case ld::Fixup::kindStoreTargetAddressARM64GOTLoadPageOff12:
5127 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadPageOff12:
5128 case ld::Fixup::kindStoreTargetAddressARM64TLVPLoadNowLeaPageOff12:
5129 if ( fromSectionIndex != toSectionIndex )
5130 kind = DYLD_CACHE_ADJ_V2_ARM64_OFF12;
5131 break;
5132 case ld::Fixup::kindStoreARM64Branch26:
5133 case ld::Fixup::kindStoreTargetAddressARM64Branch26:
5134 if ( fromSectionIndex != toSectionIndex )
5135 kind = DYLD_CACHE_ADJ_V2_ARM64_BR26;
5136 break;
5137 #endif
5138 case ld::Fixup::kindStoreARMHigh16:
5139 case ld::Fixup::kindStoreARMLow16:
5140 if ( (fromSectionIndex != toSectionIndex) && (fromTarget == atom) ) {
5141 kind = DYLD_CACHE_ADJ_V2_ARM_MOVW_MOVT;
5143 break;
5144 case ld::Fixup::kindStoreARMBranch24:
5145 case ld::Fixup::kindStoreTargetAddressARMBranch24:
5146 if ( fromSectionIndex != toSectionIndex )
5147 kind = DYLD_CACHE_ADJ_V2_ARM_BR24;
5148 break;
5149 case ld::Fixup::kindStoreThumbLow16:
5150 case ld::Fixup::kindStoreThumbHigh16:
5151 if ( (fromSectionIndex != toSectionIndex) && (fromTarget == atom) ) {
5152 kind = DYLD_CACHE_ADJ_V2_THUMB_MOVW_MOVT;
5154 break;
5155 case ld::Fixup::kindStoreThumbBranch22:
5156 case ld::Fixup::kindStoreTargetAddressThumbBranch22:
5157 if ( fromSectionIndex != toSectionIndex )
5158 kind = DYLD_CACHE_ADJ_V2_THUMB_BR22;
5159 break;
5160 case ld::Fixup::kindSetTargetImageOffset:
5161 kind = DYLD_CACHE_ADJ_V2_IMAGE_OFF_32;
5162 accumulator = addressAndTarget(state, fit, &target);
5163 assert(target != NULL);
5164 toSectionIndex = target->machoSection();
5165 toOffset = accumulator - state.atomToSection[target]->address;
5166 hadSubtract = true;
5167 break;
5168 default:
5169 break;
5171 if ( fit->lastInCluster() ) {
5172 if ( (kind != 0) && (target != NULL) && (target->definition() != ld::Atom::definitionProxy) ) {
5173 if ( !hadSubtract && addend )
5174 toOffset += addend;
5175 assert(toSectionIndex != 255);
5176 if (log) fprintf(stderr, "from (%d.%s + 0x%llX) to (%d.%s + 0x%llX), kind=%d, atomAddr=0x%llX, sectAddr=0x%llx\n",
5177 fromSectionIndex, sect->sectionName(), fromOffset, toSectionIndex, state.atomToSection[target]->sectionName(),
5178 toOffset, kind, atom->finalAddress(), sect->address);
5179 _splitSegV2Infos.push_back(SplitSegInfoV2Entry(fromSectionIndex, fromOffset, toSectionIndex, toOffset, kind));
5188 void OutputFile::writeMapFile(ld::Internal& state)
5190 if ( _options.generatedMapPath() != NULL ) {
5191 FILE* mapFile = fopen(_options.generatedMapPath(), "w");
5192 if ( mapFile != NULL ) {
5193 // write output path
5194 fprintf(mapFile, "# Path: %s\n", _options.outputFilePath());
5195 // write output architecure
5196 fprintf(mapFile, "# Arch: %s\n", _options.architectureName());
5197 // write UUID
5198 //if ( fUUIDAtom != NULL ) {
5199 // const uint8_t* uuid = fUUIDAtom->getUUID();
5200 // fprintf(mapFile, "# UUID: %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X \n",
5201 // uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7],
5202 // uuid[8], uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
5204 // write table of object files
5205 std::map<const ld::File*, ld::File::Ordinal> readerToOrdinal;
5206 std::map<ld::File::Ordinal, const ld::File*> ordinalToReader;
5207 std::map<const ld::File*, uint32_t> readerToFileOrdinal;
5208 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
5209 ld::Internal::FinalSection* sect = *sit;
5210 if ( sect->isSectionHidden() )
5211 continue;
5212 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
5213 const ld::Atom* atom = *ait;
5214 const ld::File* reader = atom->originalFile();
5215 if ( reader == NULL )
5216 continue;
5217 ld::File::Ordinal readerOrdinal = reader->ordinal();
5218 std::map<const ld::File*, ld::File::Ordinal>::iterator pos = readerToOrdinal.find(reader);
5219 if ( pos == readerToOrdinal.end() ) {
5220 readerToOrdinal[reader] = readerOrdinal;
5221 ordinalToReader[readerOrdinal] = reader;
5225 for (const ld::Atom* atom : state.deadAtoms) {
5226 const ld::File* reader = atom->originalFile();
5227 if ( reader == NULL )
5228 continue;
5229 ld::File::Ordinal readerOrdinal = reader->ordinal();
5230 std::map<const ld::File*, ld::File::Ordinal>::iterator pos = readerToOrdinal.find(reader);
5231 if ( pos == readerToOrdinal.end() ) {
5232 readerToOrdinal[reader] = readerOrdinal;
5233 ordinalToReader[readerOrdinal] = reader;
5236 fprintf(mapFile, "# Object files:\n");
5237 fprintf(mapFile, "[%3u] %s\n", 0, "linker synthesized");
5238 uint32_t fileIndex = 1;
5239 for(std::map<ld::File::Ordinal, const ld::File*>::iterator it = ordinalToReader.begin(); it != ordinalToReader.end(); ++it) {
5240 fprintf(mapFile, "[%3u] %s\n", fileIndex, it->second->path());
5241 readerToFileOrdinal[it->second] = fileIndex++;
5243 // write table of sections
5244 fprintf(mapFile, "# Sections:\n");
5245 fprintf(mapFile, "# Address\tSize \tSegment\tSection\n");
5246 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
5247 ld::Internal::FinalSection* sect = *sit;
5248 if ( sect->isSectionHidden() )
5249 continue;
5250 fprintf(mapFile, "0x%08llX\t0x%08llX\t%s\t%s\n", sect->address, sect->size,
5251 sect->segmentName(), sect->sectionName());
5253 // write table of symbols
5254 fprintf(mapFile, "# Symbols:\n");
5255 fprintf(mapFile, "# Address\tSize \tFile Name\n");
5256 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
5257 ld::Internal::FinalSection* sect = *sit;
5258 if ( sect->isSectionHidden() )
5259 continue;
5260 //bool isCstring = (sect->type() == ld::Section::typeCString);
5261 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
5262 char buffer[4096];
5263 const ld::Atom* atom = *ait;
5264 const char* name = atom->name();
5265 // don't add auto-stripped aliases to .map file
5266 if ( (atom->size() == 0) && (atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages) )
5267 continue;
5268 if ( atom->contentType() == ld::Atom::typeCString ) {
5269 strcpy(buffer, "literal string: ");
5270 const char* s = (char*)atom->rawContentPointer();
5271 char* e = &buffer[4094];
5272 for (char* b = &buffer[strlen(buffer)]; b < e;) {
5273 char c = *s++;
5274 if ( c == '\n' ) {
5275 *b++ = '\\';
5276 *b++ = 'n';
5278 else {
5279 *b++ = c;
5281 if ( c == '\0' )
5282 break;
5284 buffer[4095] = '\0';
5285 name = buffer;
5287 else if ( (atom->contentType() == ld::Atom::typeCFI) && (strcmp(name, "FDE") == 0) ) {
5288 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
5289 if ( (fit->kind == ld::Fixup::kindSetTargetAddress) && (fit->clusterSize == ld::Fixup::k1of4) ) {
5290 if ( (fit->binding == ld::Fixup::bindingDirectlyBound)
5291 && (fit->u.target->section().type() == ld::Section::typeCode) ) {
5292 strcpy(buffer, "FDE for: ");
5293 strlcat(buffer, fit->u.target->name(), 4096);
5294 name = buffer;
5299 else if ( atom->contentType() == ld::Atom::typeNonLazyPointer ) {
5300 strcpy(buffer, "non-lazy-pointer");
5301 for (ld::Fixup::iterator fit = atom->fixupsBegin(); fit != atom->fixupsEnd(); ++fit) {
5302 if ( fit->binding == ld::Fixup::bindingsIndirectlyBound ) {
5303 strcpy(buffer, "non-lazy-pointer-to: ");
5304 strlcat(buffer, state.indirectBindingTable[fit->u.bindingIndex]->name(), 4096);
5305 break;
5307 else if ( fit->binding == ld::Fixup::bindingDirectlyBound ) {
5308 strcpy(buffer, "non-lazy-pointer-to-local: ");
5309 strlcat(buffer, fit->u.target->name(), 4096);
5310 break;
5313 name = buffer;
5315 fprintf(mapFile, "0x%08llX\t0x%08llX\t[%3u] %s\n", atom->finalAddress(), atom->size(),
5316 readerToFileOrdinal[atom->originalFile()], name);
5319 // preload check is hack until 26613948 is fixed
5320 if ( _options.deadCodeStrip() && (_options.outputKind() != Options::kPreload) ) {
5321 fprintf(mapFile, "\n");
5322 fprintf(mapFile, "# Dead Stripped Symbols:\n");
5323 fprintf(mapFile, "# \tSize \tFile Name\n");
5324 for (const ld::Atom* atom : state.deadAtoms) {
5325 char buffer[4096];
5326 const char* name = atom->name();
5327 // don't add auto-stripped aliases to .map file
5328 if ( (atom->size() == 0) && (atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages) )
5329 continue;
5330 if ( atom->contentType() == ld::Atom::typeCString ) {
5331 strcpy(buffer, "literal string: ");
5332 const char* s = (char*)atom->rawContentPointer();
5333 char* e = &buffer[4094];
5334 for (char* b = &buffer[strlen(buffer)]; b < e;) {
5335 char c = *s++;
5336 if ( c == '\n' ) {
5337 *b++ = '\\';
5338 *b++ = 'n';
5340 else {
5341 *b++ = c;
5343 if ( c == '\0' )
5344 break;
5346 buffer[4095] = '\0';
5347 name = buffer;
5349 fprintf(mapFile, "<<dead>> \t0x%08llX\t[%3u] %s\n", atom->size(),
5350 readerToFileOrdinal[atom->originalFile()], name);
5353 fclose(mapFile);
5355 else {
5356 warning("could not write map file: %s\n", _options.generatedMapPath());
5361 void OutputFile::writeJSONEntry(ld::Internal& state)
5363 if ( _options.traceEmitJSON() && (_options.UUIDMode() != Options::kUUIDNone) && (_options.traceOutputFile() != NULL) ) {
5365 // Convert the UUID to a string.
5366 const uint8_t* uuid = _headersAndLoadCommandAtom->getUUID();
5367 uuid_string_t uuidString;
5369 uuid_unparse(uuid, uuidString);
5371 // Enumerate the dylibs.
5372 std::vector<const ld::dylib::File*> dynamicList;
5373 std::vector<const ld::dylib::File*> upwardList;
5374 std::vector<const ld::dylib::File*> reexportList;
5376 for (const ld::dylib::File* dylib : _dylibsToLoad) {
5378 if (dylib->willBeUpwardDylib()) {
5380 upwardList.push_back(dylib);
5381 } else if (dylib->willBeReExported()) {
5383 reexportList.push_back(dylib);
5384 } else {
5386 dynamicList.push_back(dylib);
5391 * Build the JSON entry.
5394 std::string jsonEntry = "{";
5396 jsonEntry += "\"uuid\":\"" + std::string(uuidString) + "\",";
5398 // installPath() returns -final_output for non-dylibs
5399 const char* lastNameSlash = strrchr(_options.installPath(), '/');
5400 const char* leafName = (lastNameSlash != NULL) ? lastNameSlash+1 : _options.outputFilePath();
5401 jsonEntry += "\"name\":\"" + std::string(leafName) + "\",";
5403 jsonEntry += "\"arch\":\"" + std::string(_options.architectureName()) + "\"";
5405 if (dynamicList.size() > 0) {
5406 jsonEntry += ",\"dynamic\":[";
5407 for (const ld::dylib::File* dylib : dynamicList) {
5408 jsonEntry += "\"" + std::string(dylib->path()) + "\"";
5409 if ((dylib != dynamicList.back())) {
5410 jsonEntry += ",";
5413 jsonEntry += "]";
5416 if (upwardList.size() > 0) {
5417 jsonEntry += ",\"upward-dynamic\":[";
5418 for (const ld::dylib::File* dylib : upwardList) {
5419 jsonEntry += "\"" + std::string(dylib->path()) + "\"";
5420 if ((dylib != upwardList.back())) {
5421 jsonEntry += ",";
5424 jsonEntry += "]";
5427 if (reexportList.size() > 0) {
5428 jsonEntry += ",\"re-exports\":[";
5429 for (const ld::dylib::File* dylib : reexportList) {
5430 jsonEntry += "\"" + std::string(dylib->path()) + "\"";
5431 if ((dylib != reexportList.back())) {
5432 jsonEntry += ",";
5435 jsonEntry += "]";
5438 if (state.archivePaths.size() > 0) {
5439 jsonEntry += ",\"archives\":[";
5440 for (const std::string& archivePath : state.archivePaths) {
5441 jsonEntry += "\"" + std::string(archivePath) + "\"";
5442 if ((archivePath != state.archivePaths.back())) {
5443 jsonEntry += ",";
5446 jsonEntry += "]";
5448 jsonEntry += "}\n";
5450 // Write the JSON entry to the trace file.
5451 std::ofstream out(_options.traceOutputFile(), ios::app);
5452 out << jsonEntry;
5456 // used to sort atoms with debug notes
5457 class DebugNoteSorter
5459 public:
5460 bool operator()(const ld::Atom* left, const ld::Atom* right) const
5462 // first sort by reader
5463 ld::File::Ordinal leftFileOrdinal = left->file()->ordinal();
5464 ld::File::Ordinal rightFileOrdinal = right->file()->ordinal();
5465 if ( leftFileOrdinal!= rightFileOrdinal)
5466 return (leftFileOrdinal < rightFileOrdinal);
5468 // then sort by atom objectAddress
5469 uint64_t leftAddr = left->finalAddress();
5470 uint64_t rightAddr = right->finalAddress();
5471 return leftAddr < rightAddr;
5476 const char* OutputFile::assureFullPath(const char* path)
5478 if ( path[0] == '/' )
5479 return path;
5480 char cwdbuff[MAXPATHLEN];
5481 if ( getcwd(cwdbuff, MAXPATHLEN) != NULL ) {
5482 char* result;
5483 asprintf(&result, "%s/%s", cwdbuff, path);
5484 if ( result != NULL )
5485 return result;
5487 return path;
5490 static time_t fileModTime(const char* path) {
5491 struct stat statBuffer;
5492 if ( stat(path, &statBuffer) == 0 ) {
5493 return statBuffer.st_mtime;
5495 return 0;
5499 void OutputFile::synthesizeDebugNotes(ld::Internal& state)
5501 // -S means don't synthesize debug map
5502 if ( _options.debugInfoStripping() == Options::kDebugInfoNone )
5503 return;
5504 // make a vector of atoms that come from files compiled with dwarf debug info
5505 std::vector<const ld::Atom*> atomsNeedingDebugNotes;
5506 std::set<const ld::Atom*> atomsWithStabs;
5507 atomsNeedingDebugNotes.reserve(1024);
5508 const ld::relocatable::File* objFile = NULL;
5509 bool objFileHasDwarf = false;
5510 bool objFileHasStabs = false;
5511 for (std::vector<ld::Internal::FinalSection*>::iterator sit = state.sections.begin(); sit != state.sections.end(); ++sit) {
5512 ld::Internal::FinalSection* sect = *sit;
5513 for (std::vector<const ld::Atom*>::iterator ait = sect->atoms.begin(); ait != sect->atoms.end(); ++ait) {
5514 const ld::Atom* atom = *ait;
5515 // no stabs for atoms that would not be in the symbol table
5516 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotIn )
5517 continue;
5518 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableNotInFinalLinkedImages )
5519 continue;
5520 if ( atom->symbolTableInclusion() == ld::Atom::symbolTableInWithRandomAutoStripLabel )
5521 continue;
5522 // no stabs for absolute symbols
5523 if ( atom->definition() == ld::Atom::definitionAbsolute )
5524 continue;
5525 // no stabs for .eh atoms
5526 if ( atom->contentType() == ld::Atom::typeCFI )
5527 continue;
5528 // no stabs for string literal atoms
5529 if ( atom->contentType() == ld::Atom::typeCString )
5530 continue;
5531 // no stabs for kernel dtrace probes
5532 if ( (_options.outputKind() == Options::kStaticExecutable) && (strncmp(atom->name(), "__dtrace_probe$", 15) == 0) )
5533 continue;
5534 const ld::File* file = atom->file();
5535 if ( file != NULL ) {
5536 if ( file != objFile ) {
5537 objFileHasDwarf = false;
5538 objFileHasStabs = false;
5539 objFile = dynamic_cast<const ld::relocatable::File*>(file);
5540 if ( objFile != NULL ) {
5541 switch ( objFile->debugInfo() ) {
5542 case ld::relocatable::File::kDebugInfoNone:
5543 break;
5544 case ld::relocatable::File::kDebugInfoDwarf:
5545 objFileHasDwarf = true;
5546 break;
5547 case ld::relocatable::File::kDebugInfoStabs:
5548 case ld::relocatable::File::kDebugInfoStabsUUID:
5549 objFileHasStabs = true;
5550 break;
5554 if ( objFileHasDwarf )
5555 atomsNeedingDebugNotes.push_back(atom);
5556 if ( objFileHasStabs )
5557 atomsWithStabs.insert(atom);
5562 // sort by file ordinal then atom ordinal
5563 std::sort(atomsNeedingDebugNotes.begin(), atomsNeedingDebugNotes.end(), DebugNoteSorter());
5565 // <rdar://problem/17689030> Add -add_ast_path option to linker which add N_AST stab entry to output
5566 const std::vector<const char*>& astPaths = _options.astFilePaths();
5567 for (std::vector<const char*>::const_iterator it=astPaths.begin(); it != astPaths.end(); it++) {
5568 const char* path = *it;
5569 // emit N_AST
5570 ld::relocatable::File::Stab astStab;
5571 astStab.atom = NULL;
5572 astStab.type = N_AST;
5573 astStab.other = 0;
5574 astStab.desc = 0;
5575 astStab.value = fileModTime(path);
5576 astStab.string = path;
5577 state.stabs.push_back(astStab);
5580 // synthesize "debug notes" and add them to master stabs vector
5581 const char* dirPath = NULL;
5582 const char* filename = NULL;
5583 const ld::relocatable::File* atomObjFile = NULL;
5584 bool wroteStartSO = false;
5585 state.stabs.reserve(atomsNeedingDebugNotes.size()*4);
5586 std::unordered_set<const char*, CStringHash, CStringEquals> seenFiles;
5587 for (std::vector<const ld::Atom*>::iterator it=atomsNeedingDebugNotes.begin(); it != atomsNeedingDebugNotes.end(); it++) {
5588 const ld::Atom* atom = *it;
5589 const ld::File* atomFile = atom->file();
5590 //fprintf(stderr, "debug note for %s\n", atom->name());
5591 const char* newPath = atom->translationUnitSource();
5592 if ( newPath != NULL ) {
5593 const char* newDirPath;
5594 const char* newFilename;
5595 const char* lastSlash = strrchr(newPath, '/');
5596 if ( lastSlash == NULL )
5597 continue;
5598 newFilename = lastSlash+1;
5599 char* temp = strdup(newPath);
5600 newDirPath = temp;
5601 // gdb like directory SO's to end in '/', but dwarf DW_AT_comp_dir usually does not have trailing '/'
5602 temp[lastSlash-newPath+1] = '\0';
5603 // We need SO's whenever the translation unit source file changes
5604 // We also need a new OSO every time the object file changes (which can be
5605 // more often than the source file change). In particular, when
5606 // multiple entries in a convenience lib are built from the same
5607 // source file (using some conditional compilation).
5608 const ld::relocatable::File* newAtomObjFile = dynamic_cast<const ld::relocatable::File*>(atomFile);
5609 if ( (filename == NULL) || (strcmp(newFilename,filename) != 0)
5610 || (strcmp(newDirPath,dirPath) != 0)
5611 || newAtomObjFile && newAtomObjFile != atomObjFile ) {
5612 if ( filename != NULL ) {
5613 // translation unit change, emit ending SO
5614 ld::relocatable::File::Stab endFileStab;
5615 endFileStab.atom = NULL;
5616 endFileStab.type = N_SO;
5617 endFileStab.other = 1;
5618 endFileStab.desc = 0;
5619 endFileStab.value = 0;
5620 endFileStab.string = "";
5621 state.stabs.push_back(endFileStab);
5623 // new translation unit, emit start SO's
5624 ld::relocatable::File::Stab dirPathStab;
5625 dirPathStab.atom = NULL;
5626 dirPathStab.type = N_SO;
5627 dirPathStab.other = 0;
5628 dirPathStab.desc = 0;
5629 dirPathStab.value = 0;
5630 dirPathStab.string = newDirPath;
5631 state.stabs.push_back(dirPathStab);
5632 ld::relocatable::File::Stab fileStab;
5633 fileStab.atom = NULL;
5634 fileStab.type = N_SO;
5635 fileStab.other = 0;
5636 fileStab.desc = 0;
5637 fileStab.value = 0;
5638 fileStab.string = newFilename;
5639 state.stabs.push_back(fileStab);
5640 wroteStartSO = true;
5641 // Synthesize for this new object.
5642 ld::relocatable::File::Stab objStab;
5643 objStab.atom = NULL;
5644 objStab.type = N_OSO;
5645 // <rdar://problem/6337329> linker should put cpusubtype in n_sect field of nlist entry for N_OSO debug note entries
5646 objStab.other = atomFile->cpuSubType();
5647 objStab.desc = 1;
5648 if ( atomObjFile != NULL ) {
5649 objStab.string = assureFullPath(newAtomObjFile->debugInfoPath());
5650 objStab.value = newAtomObjFile->debugInfoModificationTime();
5652 else {
5653 objStab.string = assureFullPath(atomFile->path());
5654 objStab.value = atomFile->modificationTime();
5656 state.stabs.push_back(objStab);
5657 // add the source file path to seenFiles so it does not show up in SOLs
5658 seenFiles.insert(newFilename);
5659 char* fullFilePath;
5660 asprintf(&fullFilePath, "%s%s", newDirPath, newFilename);
5661 // add both leaf path and full path
5662 seenFiles.insert(fullFilePath);
5664 filename = newFilename;
5665 dirPath = newDirPath;
5666 atomObjFile = newAtomObjFile;
5667 if ( atom->section().type() == ld::Section::typeCode ) {
5668 // Synthesize BNSYM and start FUN stabs
5669 ld::relocatable::File::Stab beginSym;
5670 beginSym.atom = atom;
5671 beginSym.type = N_BNSYM;
5672 beginSym.other = 1;
5673 beginSym.desc = 0;
5674 beginSym.value = 0;
5675 beginSym.string = "";
5676 state.stabs.push_back(beginSym);
5677 ld::relocatable::File::Stab startFun;
5678 startFun.atom = atom;
5679 startFun.type = N_FUN;
5680 startFun.other = 1;
5681 startFun.desc = 0;
5682 startFun.value = 0;
5683 startFun.string = atom->name();
5684 state.stabs.push_back(startFun);
5685 // Synthesize any SOL stabs needed
5686 const char* curFile = NULL;
5687 for (ld::Atom::LineInfo::iterator lit = atom->beginLineInfo(); lit != atom->endLineInfo(); ++lit) {
5688 if ( lit->fileName != curFile ) {
5689 if ( seenFiles.count(lit->fileName) == 0 ) {
5690 seenFiles.insert(lit->fileName);
5691 ld::relocatable::File::Stab sol;
5692 sol.atom = 0;
5693 sol.type = N_SOL;
5694 sol.other = 0;
5695 sol.desc = 0;
5696 sol.value = 0;
5697 sol.string = lit->fileName;
5698 state.stabs.push_back(sol);
5700 curFile = lit->fileName;
5703 // Synthesize end FUN and ENSYM stabs
5704 ld::relocatable::File::Stab endFun;
5705 endFun.atom = atom;
5706 endFun.type = N_FUN;
5707 endFun.other = 0;
5708 endFun.desc = 0;
5709 endFun.value = 0;
5710 endFun.string = "";
5711 state.stabs.push_back(endFun);
5712 ld::relocatable::File::Stab endSym;
5713 endSym.atom = atom;
5714 endSym.type = N_ENSYM;
5715 endSym.other = 1;
5716 endSym.desc = 0;
5717 endSym.value = 0;
5718 endSym.string = "";
5719 state.stabs.push_back(endSym);
5721 else {
5722 ld::relocatable::File::Stab globalsStab;
5723 const char* name = atom->name();
5724 if ( atom->scope() == ld::Atom::scopeTranslationUnit ) {
5725 // Synthesize STSYM stab for statics
5726 globalsStab.atom = atom;
5727 globalsStab.type = N_STSYM;
5728 globalsStab.other = 1;
5729 globalsStab.desc = 0;
5730 globalsStab.value = 0;
5731 globalsStab.string = name;
5732 state.stabs.push_back(globalsStab);
5734 else {
5735 // Synthesize GSYM stab for other globals
5736 globalsStab.atom = atom;
5737 globalsStab.type = N_GSYM;
5738 globalsStab.other = 1;
5739 globalsStab.desc = 0;
5740 globalsStab.value = 0;
5741 globalsStab.string = name;
5742 state.stabs.push_back(globalsStab);
5748 if ( wroteStartSO ) {
5749 // emit ending SO
5750 ld::relocatable::File::Stab endFileStab;
5751 endFileStab.atom = NULL;
5752 endFileStab.type = N_SO;
5753 endFileStab.other = 1;
5754 endFileStab.desc = 0;
5755 endFileStab.value = 0;
5756 endFileStab.string = "";
5757 state.stabs.push_back(endFileStab);
5760 // copy any stabs from .o file
5761 std::set<const ld::File*> filesSeenWithStabs;
5762 for (std::set<const ld::Atom*>::iterator it=atomsWithStabs.begin(); it != atomsWithStabs.end(); it++) {
5763 const ld::Atom* atom = *it;
5764 objFile = dynamic_cast<const ld::relocatable::File*>(atom->file());
5765 if ( objFile != NULL ) {
5766 if ( filesSeenWithStabs.count(objFile) == 0 ) {
5767 filesSeenWithStabs.insert(objFile);
5768 const std::vector<ld::relocatable::File::Stab>* stabs = objFile->stabs();
5769 if ( stabs != NULL ) {
5770 for(std::vector<ld::relocatable::File::Stab>::const_iterator sit = stabs->begin(); sit != stabs->end(); ++sit) {
5771 ld::relocatable::File::Stab stab = *sit;
5772 // ignore stabs associated with atoms that were dead stripped or coalesced away
5773 if ( (sit->atom != NULL) && (atomsWithStabs.count(sit->atom) == 0) )
5774 continue;
5775 // <rdar://problem/8284718> Value of N_SO stabs should be address of first atom from translation unit
5776 if ( (stab.type == N_SO) && (stab.string != NULL) && (stab.string[0] != '\0') ) {
5777 stab.atom = atom;
5779 state.stabs.push_back(stab);
5789 } // namespace tool
5790 } // namespace ld