MC: Switch MCFragment to storing the layout order index, not its index in the file.
[llvm.git] / lib / MC / MCAssembler.cpp
blob47c891534f53d7012ce1bd130ac890749de6c9e8
1 //===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "assembler"
11 #include "llvm/MC/MCAssembler.h"
12 #include "llvm/MC/MCAsmLayout.h"
13 #include "llvm/MC/MCCodeEmitter.h"
14 #include "llvm/MC/MCExpr.h"
15 #include "llvm/MC/MCObjectWriter.h"
16 #include "llvm/MC/MCSymbol.h"
17 #include "llvm/MC/MCValue.h"
18 #include "llvm/ADT/OwningPtr.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/StringExtras.h"
21 #include "llvm/ADT/Twine.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/raw_ostream.h"
25 #include "llvm/Target/TargetRegistry.h"
26 #include "llvm/Target/TargetAsmBackend.h"
28 #include <vector>
29 using namespace llvm;
31 namespace {
32 namespace stats {
33 STATISTIC(EmittedFragments, "Number of emitted assembler fragments");
34 STATISTIC(EvaluateFixup, "Number of evaluated fixups");
35 STATISTIC(FragmentLayouts, "Number of fragment layouts");
36 STATISTIC(ObjectBytes, "Number of emitted object file bytes");
37 STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps");
38 STATISTIC(RelaxedInstructions, "Number of relaxed instructions");
39 STATISTIC(SectionLayouts, "Number of section layouts");
43 // FIXME FIXME FIXME: There are number of places in this file where we convert
44 // what is a 64-bit assembler value used for computation into a value in the
45 // object file, which may truncate it. We should detect that truncation where
46 // invalid and report errors back.
48 /* *** */
50 MCAsmLayout::MCAsmLayout(MCAssembler &Asm) : Assembler(Asm) {
51 // Compute the section layout order. Virtual sections must go last.
52 for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it)
53 if (!Asm.getBackend().isVirtualSection(it->getSection()))
54 SectionOrder.push_back(&*it);
55 for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it)
56 if (Asm.getBackend().isVirtualSection(it->getSection()))
57 SectionOrder.push_back(&*it);
60 void MCAsmLayout::UpdateForSlide(MCFragment *F, int SlideAmount) {
61 // We shouldn't have to do anything special to support negative slides, and it
62 // is a perfectly valid thing to do as long as other parts of the system can
63 // guarantee convergence.
64 assert(SlideAmount >= 0 && "Negative slides not yet supported");
66 // Update the layout by simply recomputing the layout for the entire
67 // file. This is trivially correct, but very slow.
69 // FIXME-PERF: This is O(N^2), but will be eliminated once we get smarter.
71 // Layout the sections in order.
72 LayoutFile();
75 void MCAsmLayout::FragmentReplaced(MCFragment *Src, MCFragment *Dst) {
76 Dst->Offset = Src->Offset;
77 Dst->EffectiveSize = Src->EffectiveSize;
80 uint64_t MCAsmLayout::getFragmentAddress(const MCFragment *F) const {
81 assert(F->getParent() && "Missing section()!");
82 return getSectionAddress(F->getParent()) + getFragmentOffset(F);
85 uint64_t MCAsmLayout::getFragmentEffectiveSize(const MCFragment *F) const {
86 assert(F->EffectiveSize != ~UINT64_C(0) && "Address not set!");
87 return F->EffectiveSize;
90 uint64_t MCAsmLayout::getFragmentOffset(const MCFragment *F) const {
91 assert(F->Offset != ~UINT64_C(0) && "Address not set!");
92 return F->Offset;
95 uint64_t MCAsmLayout::getSymbolAddress(const MCSymbolData *SD) const {
96 assert(SD->getFragment() && "Invalid getAddress() on undefined symbol!");
97 return getFragmentAddress(SD->getFragment()) + SD->getOffset();
100 uint64_t MCAsmLayout::getSectionAddress(const MCSectionData *SD) const {
101 assert(SD->Address != ~UINT64_C(0) && "Address not set!");
102 return SD->Address;
105 uint64_t MCAsmLayout::getSectionAddressSize(const MCSectionData *SD) const {
106 // The size is the last fragment's end offset.
107 const MCFragment &F = SD->getFragmentList().back();
108 return getFragmentOffset(&F) + getFragmentEffectiveSize(&F);
111 uint64_t MCAsmLayout::getSectionFileSize(const MCSectionData *SD) const {
112 // Virtual sections have no file size.
113 if (getAssembler().getBackend().isVirtualSection(SD->getSection()))
114 return 0;
116 // Otherwise, the file size is the same as the address space size.
117 return getSectionAddressSize(SD);
120 uint64_t MCAsmLayout::getSectionSize(const MCSectionData *SD) const {
121 // The logical size is the address space size minus any tail padding.
122 uint64_t Size = getSectionAddressSize(SD);
123 const MCAlignFragment *AF =
124 dyn_cast<MCAlignFragment>(&(SD->getFragmentList().back()));
125 if (AF && AF->hasOnlyAlignAddress())
126 Size -= getFragmentEffectiveSize(AF);
128 return Size;
131 /* *** */
133 MCFragment::MCFragment() : Kind(FragmentType(~0)) {
136 MCFragment::MCFragment(FragmentType _Kind, MCSectionData *_Parent)
137 : Kind(_Kind), Parent(_Parent), Atom(0), EffectiveSize(~UINT64_C(0))
139 if (Parent)
140 Parent->getFragmentList().push_back(this);
143 MCFragment::~MCFragment() {
146 /* *** */
148 MCSectionData::MCSectionData() : Section(0) {}
150 MCSectionData::MCSectionData(const MCSection &_Section, MCAssembler *A)
151 : Section(&_Section),
152 Alignment(1),
153 Address(~UINT64_C(0)),
154 HasInstructions(false)
156 if (A)
157 A->getSectionList().push_back(this);
160 /* *** */
162 MCSymbolData::MCSymbolData() : Symbol(0) {}
164 MCSymbolData::MCSymbolData(const MCSymbol &_Symbol, MCFragment *_Fragment,
165 uint64_t _Offset, MCAssembler *A)
166 : Symbol(&_Symbol), Fragment(_Fragment), Offset(_Offset),
167 IsExternal(false), IsPrivateExtern(false),
168 CommonSize(0), CommonAlign(0), Flags(0), Index(0)
170 if (A)
171 A->getSymbolList().push_back(this);
174 /* *** */
176 MCAssembler::MCAssembler(MCContext &_Context, TargetAsmBackend &_Backend,
177 MCCodeEmitter &_Emitter, raw_ostream &_OS)
178 : Context(_Context), Backend(_Backend), Emitter(_Emitter),
179 OS(_OS), RelaxAll(false), SubsectionsViaSymbols(false)
183 MCAssembler::~MCAssembler() {
186 static bool isScatteredFixupFullyResolvedSimple(const MCAssembler &Asm,
187 const MCAsmFixup &Fixup,
188 const MCValue Target,
189 const MCSection *BaseSection) {
190 // The effective fixup address is
191 // addr(atom(A)) + offset(A)
192 // - addr(atom(B)) - offset(B)
193 // - addr(<base symbol>) + <fixup offset from base symbol>
194 // and the offsets are not relocatable, so the fixup is fully resolved when
195 // addr(atom(A)) - addr(atom(B)) - addr(<base symbol>)) == 0.
197 // The simple (Darwin, except on x86_64) way of dealing with this was to
198 // assume that any reference to a temporary symbol *must* be a temporary
199 // symbol in the same atom, unless the sections differ. Therefore, any PCrel
200 // relocation to a temporary symbol (in the same section) is fully
201 // resolved. This also works in conjunction with absolutized .set, which
202 // requires the compiler to use .set to absolutize the differences between
203 // symbols which the compiler knows to be assembly time constants, so we don't
204 // need to worry about considering symbol differences fully resolved.
206 // Non-relative fixups are only resolved if constant.
207 if (!BaseSection)
208 return Target.isAbsolute();
210 // Otherwise, relative fixups are only resolved if not a difference and the
211 // target is a temporary in the same section.
212 if (Target.isAbsolute() || Target.getSymB())
213 return false;
215 const MCSymbol *A = &Target.getSymA()->getSymbol();
216 if (!A->isTemporary() || !A->isInSection() ||
217 &A->getSection() != BaseSection)
218 return false;
220 return true;
223 static bool isScatteredFixupFullyResolved(const MCAssembler &Asm,
224 const MCAsmLayout &Layout,
225 const MCAsmFixup &Fixup,
226 const MCValue Target,
227 const MCSymbolData *BaseSymbol) {
228 // The effective fixup address is
229 // addr(atom(A)) + offset(A)
230 // - addr(atom(B)) - offset(B)
231 // - addr(BaseSymbol) + <fixup offset from base symbol>
232 // and the offsets are not relocatable, so the fixup is fully resolved when
233 // addr(atom(A)) - addr(atom(B)) - addr(BaseSymbol) == 0.
235 // Note that "false" is almost always conservatively correct (it means we emit
236 // a relocation which is unnecessary), except when it would force us to emit a
237 // relocation which the target cannot encode.
239 const MCSymbolData *A_Base = 0, *B_Base = 0;
240 if (const MCSymbolRefExpr *A = Target.getSymA()) {
241 // Modified symbol references cannot be resolved.
242 if (A->getKind() != MCSymbolRefExpr::VK_None)
243 return false;
245 A_Base = Asm.getAtom(Layout, &Asm.getSymbolData(A->getSymbol()));
246 if (!A_Base)
247 return false;
250 if (const MCSymbolRefExpr *B = Target.getSymB()) {
251 // Modified symbol references cannot be resolved.
252 if (B->getKind() != MCSymbolRefExpr::VK_None)
253 return false;
255 B_Base = Asm.getAtom(Layout, &Asm.getSymbolData(B->getSymbol()));
256 if (!B_Base)
257 return false;
260 // If there is no base, A and B have to be the same atom for this fixup to be
261 // fully resolved.
262 if (!BaseSymbol)
263 return A_Base == B_Base;
265 // Otherwise, B must be missing and A must be the base.
266 return !B_Base && BaseSymbol == A_Base;
269 bool MCAssembler::isSymbolLinkerVisible(const MCSymbolData *SD) const {
270 // Non-temporary labels should always be visible to the linker.
271 if (!SD->getSymbol().isTemporary())
272 return true;
274 // Absolute temporary labels are never visible.
275 if (!SD->getFragment())
276 return false;
278 // Otherwise, check if the section requires symbols even for temporary labels.
279 return getBackend().doesSectionRequireSymbols(
280 SD->getFragment()->getParent()->getSection());
283 const MCSymbolData *MCAssembler::getAtom(const MCAsmLayout &Layout,
284 const MCSymbolData *SD) const {
285 // Linker visible symbols define atoms.
286 if (isSymbolLinkerVisible(SD))
287 return SD;
289 // Absolute and undefined symbols have no defining atom.
290 if (!SD->getFragment())
291 return 0;
293 // Non-linker visible symbols in sections which can't be atomized have no
294 // defining atom.
295 if (!getBackend().isSectionAtomizable(
296 SD->getFragment()->getParent()->getSection()))
297 return 0;
299 // Otherwise, return the atom for the containing fragment.
300 return SD->getFragment()->getAtom();
303 bool MCAssembler::EvaluateFixup(const MCAsmLayout &Layout,
304 const MCAsmFixup &Fixup, const MCFragment *DF,
305 MCValue &Target, uint64_t &Value) const {
306 ++stats::EvaluateFixup;
308 if (!Fixup.Value->EvaluateAsRelocatable(Target, &Layout))
309 report_fatal_error("expected relocatable expression");
311 // FIXME: How do non-scattered symbols work in ELF? I presume the linker
312 // doesn't support small relocations, but then under what criteria does the
313 // assembler allow symbol differences?
315 Value = Target.getConstant();
317 bool IsPCRel =
318 Emitter.getFixupKindInfo(Fixup.Kind).Flags & MCFixupKindInfo::FKF_IsPCRel;
319 bool IsResolved = true;
320 if (const MCSymbolRefExpr *A = Target.getSymA()) {
321 if (A->getSymbol().isDefined())
322 Value += Layout.getSymbolAddress(&getSymbolData(A->getSymbol()));
323 else
324 IsResolved = false;
326 if (const MCSymbolRefExpr *B = Target.getSymB()) {
327 if (B->getSymbol().isDefined())
328 Value -= Layout.getSymbolAddress(&getSymbolData(B->getSymbol()));
329 else
330 IsResolved = false;
333 // If we are using scattered symbols, determine whether this value is actually
334 // resolved; scattering may cause atoms to move.
335 if (IsResolved && getBackend().hasScatteredSymbols()) {
336 if (getBackend().hasReliableSymbolDifference()) {
337 // If this is a PCrel relocation, find the base atom (identified by its
338 // symbol) that the fixup value is relative to.
339 const MCSymbolData *BaseSymbol = 0;
340 if (IsPCRel) {
341 BaseSymbol = DF->getAtom();
342 if (!BaseSymbol)
343 IsResolved = false;
346 if (IsResolved)
347 IsResolved = isScatteredFixupFullyResolved(*this, Layout, Fixup, Target,
348 BaseSymbol);
349 } else {
350 const MCSection *BaseSection = 0;
351 if (IsPCRel)
352 BaseSection = &DF->getParent()->getSection();
354 IsResolved = isScatteredFixupFullyResolvedSimple(*this, Fixup, Target,
355 BaseSection);
359 if (IsPCRel)
360 Value -= Layout.getFragmentAddress(DF) + Fixup.Offset;
362 return IsResolved;
365 uint64_t MCAssembler::ComputeFragmentSize(MCAsmLayout &Layout,
366 const MCFragment &F,
367 uint64_t SectionAddress,
368 uint64_t FragmentOffset) const {
369 switch (F.getKind()) {
370 case MCFragment::FT_Data:
371 return cast<MCDataFragment>(F).getContents().size();
372 case MCFragment::FT_Fill:
373 return cast<MCFillFragment>(F).getSize();
374 case MCFragment::FT_Inst:
375 return cast<MCInstFragment>(F).getInstSize();
377 case MCFragment::FT_Align: {
378 const MCAlignFragment &AF = cast<MCAlignFragment>(F);
380 assert((!AF.hasOnlyAlignAddress() || !AF.getNextNode()) &&
381 "Invalid OnlyAlignAddress bit, not the last fragment!");
383 uint64_t Size = OffsetToAlignment(SectionAddress + FragmentOffset,
384 AF.getAlignment());
386 // Honor MaxBytesToEmit.
387 if (Size > AF.getMaxBytesToEmit())
388 return 0;
390 return Size;
393 case MCFragment::FT_Org: {
394 const MCOrgFragment &OF = cast<MCOrgFragment>(F);
396 // FIXME: We should compute this sooner, we don't want to recurse here, and
397 // we would like to be more functional.
398 int64_t TargetLocation;
399 if (!OF.getOffset().EvaluateAsAbsolute(TargetLocation, &Layout))
400 report_fatal_error("expected assembly-time absolute expression");
402 // FIXME: We need a way to communicate this error.
403 int64_t Offset = TargetLocation - FragmentOffset;
404 if (Offset < 0)
405 report_fatal_error("invalid .org offset '" + Twine(TargetLocation) +
406 "' (at offset '" + Twine(FragmentOffset) + "'");
408 return Offset;
412 assert(0 && "invalid fragment kind");
413 return 0;
416 void MCAsmLayout::LayoutFile() {
417 for (unsigned i = 0, e = getSectionOrder().size(); i != e; ++i) {
418 MCSectionData *SD = getSectionOrder()[i];
420 LayoutSection(SD);
421 for (MCSectionData::iterator it = SD->begin(),
422 ie = SD->end(); it != ie; ++it)
423 LayoutFragment(it);
427 void MCAsmLayout::LayoutFragment(MCFragment *F) {
428 uint64_t StartAddress = getSectionAddress(F->getParent());
430 // Get the fragment start address.
431 uint64_t Address = StartAddress;
432 MCSectionData::iterator it = F;
433 if (MCFragment *Prev = F->getPrevNode())
434 Address = (StartAddress + getFragmentOffset(Prev) +
435 getFragmentEffectiveSize(Prev));
437 ++stats::FragmentLayouts;
439 // Compute fragment offset and size.
440 F->Offset = Address - StartAddress;
441 F->EffectiveSize = getAssembler().ComputeFragmentSize(*this, *F, StartAddress,
442 F->Offset);
445 void MCAsmLayout::LayoutSection(MCSectionData *SD) {
446 unsigned SectionOrderIndex = SD->getLayoutOrder();
448 ++stats::SectionLayouts;
450 // Compute the section start address.
451 uint64_t StartAddress = 0;
452 if (SectionOrderIndex) {
453 MCSectionData *Prev = getSectionOrder()[SectionOrderIndex - 1];
454 StartAddress = getSectionAddress(Prev) + getSectionAddressSize(Prev);
457 // Honor the section alignment requirements.
458 StartAddress = RoundUpToAlignment(StartAddress, SD->getAlignment());
460 // Set the section address.
461 SD->Address = StartAddress;
464 /// WriteFragmentData - Write the \arg F data to the output file.
465 static void WriteFragmentData(const MCAssembler &Asm, const MCAsmLayout &Layout,
466 const MCFragment &F, MCObjectWriter *OW) {
467 uint64_t Start = OW->getStream().tell();
468 (void) Start;
470 ++stats::EmittedFragments;
472 // FIXME: Embed in fragments instead?
473 uint64_t FragmentSize = Layout.getFragmentEffectiveSize(&F);
474 switch (F.getKind()) {
475 case MCFragment::FT_Align: {
476 MCAlignFragment &AF = cast<MCAlignFragment>(F);
477 uint64_t Count = FragmentSize / AF.getValueSize();
479 assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!");
481 // FIXME: This error shouldn't actually occur (the front end should emit
482 // multiple .align directives to enforce the semantics it wants), but is
483 // severe enough that we want to report it. How to handle this?
484 if (Count * AF.getValueSize() != FragmentSize)
485 report_fatal_error("undefined .align directive, value size '" +
486 Twine(AF.getValueSize()) +
487 "' is not a divisor of padding size '" +
488 Twine(FragmentSize) + "'");
490 // See if we are aligning with nops, and if so do that first to try to fill
491 // the Count bytes. Then if that did not fill any bytes or there are any
492 // bytes left to fill use the the Value and ValueSize to fill the rest.
493 // If we are aligning with nops, ask that target to emit the right data.
494 if (AF.hasEmitNops()) {
495 if (!Asm.getBackend().WriteNopData(Count, OW))
496 report_fatal_error("unable to write nop sequence of " +
497 Twine(Count) + " bytes");
498 break;
501 // Otherwise, write out in multiples of the value size.
502 for (uint64_t i = 0; i != Count; ++i) {
503 switch (AF.getValueSize()) {
504 default:
505 assert(0 && "Invalid size!");
506 case 1: OW->Write8 (uint8_t (AF.getValue())); break;
507 case 2: OW->Write16(uint16_t(AF.getValue())); break;
508 case 4: OW->Write32(uint32_t(AF.getValue())); break;
509 case 8: OW->Write64(uint64_t(AF.getValue())); break;
512 break;
515 case MCFragment::FT_Data: {
516 MCDataFragment &DF = cast<MCDataFragment>(F);
517 assert(FragmentSize == DF.getContents().size() && "Invalid size!");
518 OW->WriteBytes(DF.getContents().str());
519 break;
522 case MCFragment::FT_Fill: {
523 MCFillFragment &FF = cast<MCFillFragment>(F);
525 assert(FF.getValueSize() && "Invalid virtual align in concrete fragment!");
527 for (uint64_t i = 0, e = FF.getSize() / FF.getValueSize(); i != e; ++i) {
528 switch (FF.getValueSize()) {
529 default:
530 assert(0 && "Invalid size!");
531 case 1: OW->Write8 (uint8_t (FF.getValue())); break;
532 case 2: OW->Write16(uint16_t(FF.getValue())); break;
533 case 4: OW->Write32(uint32_t(FF.getValue())); break;
534 case 8: OW->Write64(uint64_t(FF.getValue())); break;
537 break;
540 case MCFragment::FT_Inst:
541 llvm_unreachable("unexpected inst fragment after lowering");
542 break;
544 case MCFragment::FT_Org: {
545 MCOrgFragment &OF = cast<MCOrgFragment>(F);
547 for (uint64_t i = 0, e = FragmentSize; i != e; ++i)
548 OW->Write8(uint8_t(OF.getValue()));
550 break;
554 assert(OW->getStream().tell() - Start == FragmentSize);
557 void MCAssembler::WriteSectionData(const MCSectionData *SD,
558 const MCAsmLayout &Layout,
559 MCObjectWriter *OW) const {
560 // Ignore virtual sections.
561 if (getBackend().isVirtualSection(SD->getSection())) {
562 assert(Layout.getSectionFileSize(SD) == 0 && "Invalid size for section!");
564 // Check that contents are only things legal inside a virtual section.
565 for (MCSectionData::const_iterator it = SD->begin(),
566 ie = SD->end(); it != ie; ++it) {
567 switch (it->getKind()) {
568 default:
569 assert(0 && "Invalid fragment in virtual section!");
570 case MCFragment::FT_Align:
571 assert(!cast<MCAlignFragment>(it)->getValueSize() &&
572 "Invalid align in virtual section!");
573 break;
574 case MCFragment::FT_Fill:
575 assert(!cast<MCFillFragment>(it)->getValueSize() &&
576 "Invalid fill in virtual section!");
577 break;
581 return;
584 uint64_t Start = OW->getStream().tell();
585 (void) Start;
587 for (MCSectionData::const_iterator it = SD->begin(),
588 ie = SD->end(); it != ie; ++it)
589 WriteFragmentData(*this, Layout, *it, OW);
591 assert(OW->getStream().tell() - Start == Layout.getSectionFileSize(SD));
594 void MCAssembler::Finish() {
595 DEBUG_WITH_TYPE("mc-dump", {
596 llvm::errs() << "assembler backend - pre-layout\n--\n";
597 dump(); });
599 // Create the layout object.
600 MCAsmLayout Layout(*this);
602 // Insert additional align fragments for concrete sections to explicitly pad
603 // the previous section to match their alignment requirements. This is for
604 // 'gas' compatibility, it shouldn't strictly be necessary.
606 // FIXME: This may be Mach-O specific.
607 for (unsigned i = 1, e = Layout.getSectionOrder().size(); i < e; ++i) {
608 MCSectionData *SD = Layout.getSectionOrder()[i];
610 // Ignore sections without alignment requirements.
611 unsigned Align = SD->getAlignment();
612 if (Align <= 1)
613 continue;
615 // Ignore virtual sections, they don't cause file size modifications.
616 if (getBackend().isVirtualSection(SD->getSection()))
617 continue;
619 // Otherwise, create a new align fragment at the end of the previous
620 // section.
621 MCAlignFragment *AF = new MCAlignFragment(Align, 0, 1, Align,
622 Layout.getSectionOrder()[i - 1]);
623 AF->setOnlyAlignAddress(true);
626 // Create dummy fragments and assign section ordinals.
627 unsigned SectionIndex = 0;
628 for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) {
629 // Create dummy fragments to eliminate any empty sections, this simplifies
630 // layout.
631 if (it->getFragmentList().empty()) {
632 unsigned ValueSize = 1;
633 if (getBackend().isVirtualSection(it->getSection()))
634 ValueSize = 1;
635 new MCFillFragment(0, 1, 0, it);
638 it->setOrdinal(SectionIndex++);
641 // Assign layout order indices to sections and fragments.
642 unsigned FragmentIndex = 0;
643 for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) {
644 MCSectionData *SD = Layout.getSectionOrder()[i];
645 SD->setLayoutOrder(i);
647 for (MCSectionData::iterator it2 = SD->begin(),
648 ie2 = SD->end(); it2 != ie2; ++it2)
649 it2->setLayoutOrder(FragmentIndex++);
652 // Layout until everything fits.
653 while (LayoutOnce(Layout))
654 continue;
656 DEBUG_WITH_TYPE("mc-dump", {
657 llvm::errs() << "assembler backend - post-relaxation\n--\n";
658 dump(); });
660 // Finalize the layout, including fragment lowering.
661 FinishLayout(Layout);
663 DEBUG_WITH_TYPE("mc-dump", {
664 llvm::errs() << "assembler backend - final-layout\n--\n";
665 dump(); });
667 uint64_t StartOffset = OS.tell();
668 llvm::OwningPtr<MCObjectWriter> Writer(getBackend().createObjectWriter(OS));
669 if (!Writer)
670 report_fatal_error("unable to create object writer!");
672 // Allow the object writer a chance to perform post-layout binding (for
673 // example, to set the index fields in the symbol data).
674 Writer->ExecutePostLayoutBinding(*this);
676 // Evaluate and apply the fixups, generating relocation entries as necessary.
677 for (MCAssembler::iterator it = begin(), ie = end(); it != ie; ++it) {
678 for (MCSectionData::iterator it2 = it->begin(),
679 ie2 = it->end(); it2 != ie2; ++it2) {
680 MCDataFragment *DF = dyn_cast<MCDataFragment>(it2);
681 if (!DF)
682 continue;
684 for (MCDataFragment::fixup_iterator it3 = DF->fixup_begin(),
685 ie3 = DF->fixup_end(); it3 != ie3; ++it3) {
686 MCAsmFixup &Fixup = *it3;
688 // Evaluate the fixup.
689 MCValue Target;
690 uint64_t FixedValue;
691 if (!EvaluateFixup(Layout, Fixup, DF, Target, FixedValue)) {
692 // The fixup was unresolved, we need a relocation. Inform the object
693 // writer of the relocation, and give it an opportunity to adjust the
694 // fixup value if need be.
695 Writer->RecordRelocation(*this, Layout, DF, Fixup, Target,FixedValue);
698 getBackend().ApplyFixup(Fixup, *DF, FixedValue);
703 // Write the object file.
704 Writer->WriteObject(*this, Layout);
705 OS.flush();
707 stats::ObjectBytes += OS.tell() - StartOffset;
710 bool MCAssembler::FixupNeedsRelaxation(const MCAsmFixup &Fixup,
711 const MCFragment *DF,
712 const MCAsmLayout &Layout) const {
713 if (getRelaxAll())
714 return true;
716 // If we cannot resolve the fixup value, it requires relaxation.
717 MCValue Target;
718 uint64_t Value;
719 if (!EvaluateFixup(Layout, Fixup, DF, Target, Value))
720 return true;
722 // Otherwise, relax if the value is too big for a (signed) i8.
724 // FIXME: This is target dependent!
725 return int64_t(Value) != int64_t(int8_t(Value));
728 bool MCAssembler::FragmentNeedsRelaxation(const MCInstFragment *IF,
729 const MCAsmLayout &Layout) const {
730 // If this inst doesn't ever need relaxation, ignore it. This occurs when we
731 // are intentionally pushing out inst fragments, or because we relaxed a
732 // previous instruction to one that doesn't need relaxation.
733 if (!getBackend().MayNeedRelaxation(IF->getInst(), IF->getFixups()))
734 return false;
736 for (MCInstFragment::const_fixup_iterator it = IF->fixup_begin(),
737 ie = IF->fixup_end(); it != ie; ++it)
738 if (FixupNeedsRelaxation(*it, IF, Layout))
739 return true;
741 return false;
744 bool MCAssembler::LayoutOnce(MCAsmLayout &Layout) {
745 ++stats::RelaxationSteps;
747 // Layout the sections in order.
748 Layout.LayoutFile();
750 // Scan for fragments that need relaxation.
751 bool WasRelaxed = false;
752 for (iterator it = begin(), ie = end(); it != ie; ++it) {
753 MCSectionData &SD = *it;
755 for (MCSectionData::iterator it2 = SD.begin(),
756 ie2 = SD.end(); it2 != ie2; ++it2) {
757 // Check if this is an instruction fragment that needs relaxation.
758 MCInstFragment *IF = dyn_cast<MCInstFragment>(it2);
759 if (!IF || !FragmentNeedsRelaxation(IF, Layout))
760 continue;
762 ++stats::RelaxedInstructions;
764 // FIXME-PERF: We could immediately lower out instructions if we can tell
765 // they are fully resolved, to avoid retesting on later passes.
767 // Relax the fragment.
769 MCInst Relaxed;
770 getBackend().RelaxInstruction(IF, Relaxed);
772 // Encode the new instruction.
774 // FIXME-PERF: If it matters, we could let the target do this. It can
775 // probably do so more efficiently in many cases.
776 SmallVector<MCFixup, 4> Fixups;
777 SmallString<256> Code;
778 raw_svector_ostream VecOS(Code);
779 getEmitter().EncodeInstruction(Relaxed, VecOS, Fixups);
780 VecOS.flush();
782 // Update the instruction fragment.
783 int SlideAmount = Code.size() - IF->getInstSize();
784 IF->setInst(Relaxed);
785 IF->getCode() = Code;
786 IF->getFixups().clear();
787 for (unsigned i = 0, e = Fixups.size(); i != e; ++i) {
788 MCFixup &F = Fixups[i];
789 IF->getFixups().push_back(MCAsmFixup(F.getOffset(), *F.getValue(),
790 F.getKind()));
793 // Update the layout, and remember that we relaxed. If we are relaxing
794 // everything, we can skip this step since nothing will depend on updating
795 // the values.
796 if (!getRelaxAll())
797 Layout.UpdateForSlide(IF, SlideAmount);
798 WasRelaxed = true;
802 return WasRelaxed;
805 void MCAssembler::FinishLayout(MCAsmLayout &Layout) {
806 // Lower out any instruction fragments, to simplify the fixup application and
807 // output.
809 // FIXME-PERF: We don't have to do this, but the assumption is that it is
810 // cheap (we will mostly end up eliminating fragments and appending on to data
811 // fragments), so the extra complexity downstream isn't worth it. Evaluate
812 // this assumption.
813 for (iterator it = begin(), ie = end(); it != ie; ++it) {
814 MCSectionData &SD = *it;
816 for (MCSectionData::iterator it2 = SD.begin(),
817 ie2 = SD.end(); it2 != ie2; ++it2) {
818 MCInstFragment *IF = dyn_cast<MCInstFragment>(it2);
819 if (!IF)
820 continue;
822 // Create a new data fragment for the instruction.
824 // FIXME-PERF: Reuse previous data fragment if possible.
825 MCDataFragment *DF = new MCDataFragment();
826 SD.getFragmentList().insert(it2, DF);
828 // Update the data fragments layout data.
829 DF->setParent(IF->getParent());
830 DF->setAtom(IF->getAtom());
831 DF->setLayoutOrder(IF->getLayoutOrder());
832 Layout.FragmentReplaced(IF, DF);
834 // Copy in the data and the fixups.
835 DF->getContents().append(IF->getCode().begin(), IF->getCode().end());
836 for (unsigned i = 0, e = IF->getFixups().size(); i != e; ++i)
837 DF->getFixups().push_back(IF->getFixups()[i]);
839 // Delete the instruction fragment and update the iterator.
840 SD.getFragmentList().erase(IF);
841 it2 = DF;
846 // Debugging methods
848 namespace llvm {
850 raw_ostream &operator<<(raw_ostream &OS, const MCAsmFixup &AF) {
851 OS << "<MCAsmFixup" << " Offset:" << AF.Offset << " Value:" << *AF.Value
852 << " Kind:" << AF.Kind << ">";
853 return OS;
858 void MCFragment::dump() {
859 raw_ostream &OS = llvm::errs();
861 OS << "<MCFragment " << (void*) this << " LayoutOrder:" << LayoutOrder
862 << " Offset:" << Offset << " EffectiveSize:" << EffectiveSize << ">";
865 void MCAlignFragment::dump() {
866 raw_ostream &OS = llvm::errs();
868 OS << "<MCAlignFragment ";
869 this->MCFragment::dump();
870 if (hasEmitNops())
871 OS << " (emit nops)";
872 if (hasOnlyAlignAddress())
873 OS << " (only align section)";
874 OS << "\n ";
875 OS << " Alignment:" << getAlignment()
876 << " Value:" << getValue() << " ValueSize:" << getValueSize()
877 << " MaxBytesToEmit:" << getMaxBytesToEmit() << ">";
880 void MCDataFragment::dump() {
881 raw_ostream &OS = llvm::errs();
883 OS << "<MCDataFragment ";
884 this->MCFragment::dump();
885 OS << "\n ";
886 OS << " Contents:[";
887 for (unsigned i = 0, e = getContents().size(); i != e; ++i) {
888 if (i) OS << ",";
889 OS << hexdigit((Contents[i] >> 4) & 0xF) << hexdigit(Contents[i] & 0xF);
891 OS << "] (" << getContents().size() << " bytes)";
893 if (!getFixups().empty()) {
894 OS << ",\n ";
895 OS << " Fixups:[";
896 for (fixup_iterator it = fixup_begin(), ie = fixup_end(); it != ie; ++it) {
897 if (it != fixup_begin()) OS << ",\n ";
898 OS << *it;
900 OS << "]";
903 OS << ">";
906 void MCFillFragment::dump() {
907 raw_ostream &OS = llvm::errs();
909 OS << "<MCFillFragment ";
910 this->MCFragment::dump();
911 OS << "\n ";
912 OS << " Value:" << getValue() << " ValueSize:" << getValueSize()
913 << " Size:" << getSize() << ">";
916 void MCInstFragment::dump() {
917 raw_ostream &OS = llvm::errs();
919 OS << "<MCInstFragment ";
920 this->MCFragment::dump();
921 OS << "\n ";
922 OS << " Inst:";
923 getInst().dump_pretty(OS);
924 OS << ">";
927 void MCOrgFragment::dump() {
928 raw_ostream &OS = llvm::errs();
930 OS << "<MCOrgFragment ";
931 this->MCFragment::dump();
932 OS << "\n ";
933 OS << " Offset:" << getOffset() << " Value:" << getValue() << ">";
936 void MCSectionData::dump() {
937 raw_ostream &OS = llvm::errs();
939 OS << "<MCSectionData";
940 OS << " Alignment:" << getAlignment() << " Address:" << Address
941 << " Fragments:[\n ";
942 for (iterator it = begin(), ie = end(); it != ie; ++it) {
943 if (it != begin()) OS << ",\n ";
944 it->dump();
946 OS << "]>";
949 void MCSymbolData::dump() {
950 raw_ostream &OS = llvm::errs();
952 OS << "<MCSymbolData Symbol:" << getSymbol()
953 << " Fragment:" << getFragment() << " Offset:" << getOffset()
954 << " Flags:" << getFlags() << " Index:" << getIndex();
955 if (isCommon())
956 OS << " (common, size:" << getCommonSize()
957 << " align: " << getCommonAlignment() << ")";
958 if (isExternal())
959 OS << " (external)";
960 if (isPrivateExtern())
961 OS << " (private extern)";
962 OS << ">";
965 void MCAssembler::dump() {
966 raw_ostream &OS = llvm::errs();
968 OS << "<MCAssembler\n";
969 OS << " Sections:[\n ";
970 for (iterator it = begin(), ie = end(); it != ie; ++it) {
971 if (it != begin()) OS << ",\n ";
972 it->dump();
974 OS << "],\n";
975 OS << " Symbols:[";
977 for (symbol_iterator it = symbol_begin(), ie = symbol_end(); it != ie; ++it) {
978 if (it != symbol_begin()) OS << ",\n ";
979 it->dump();
981 OS << "]>\n";