Revert r64616 which worked around http://gcc.gnu.org/PR42757 , we just didn't
[llvm.git] / lib / CodeGen / LiveIntervalAnalysis.cpp
blob1f00ccbbb01b877947e2b09f063fbcdf4b9df96f
1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Passes.h"
31 #include "llvm/CodeGen/ProcessImplicitDefs.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/ADT/DepthFirstIterator.h"
41 #include "llvm/ADT/SmallSet.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/ADT/STLExtras.h"
44 #include <algorithm>
45 #include <limits>
46 #include <cmath>
47 using namespace llvm;
49 // Hidden options for help debugging.
50 static cl::opt<bool> DisableReMat("disable-rematerialization",
51 cl::init(false), cl::Hidden);
53 static cl::opt<bool> EnableFastSpilling("fast-spill",
54 cl::init(false), cl::Hidden);
56 STATISTIC(numIntervals , "Number of original intervals");
57 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
58 STATISTIC(numSplits , "Number of intervals split");
60 char LiveIntervals::ID = 0;
61 static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
64 AU.setPreservesCFG();
65 AU.addRequired<AliasAnalysis>();
66 AU.addPreserved<AliasAnalysis>();
67 AU.addPreserved<LiveVariables>();
68 AU.addRequired<LiveVariables>();
69 AU.addPreservedID(MachineLoopInfoID);
70 AU.addPreservedID(MachineDominatorsID);
72 if (!StrongPHIElim) {
73 AU.addPreservedID(PHIEliminationID);
74 AU.addRequiredID(PHIEliminationID);
77 AU.addRequiredID(TwoAddressInstructionPassID);
78 AU.addPreserved<ProcessImplicitDefs>();
79 AU.addRequired<ProcessImplicitDefs>();
80 AU.addPreserved<SlotIndexes>();
81 AU.addRequiredTransitive<SlotIndexes>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 void LiveIntervals::releaseMemory() {
86 // Free the live intervals themselves.
87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
88 E = r2iMap_.end(); I != E; ++I)
89 delete I->second;
91 r2iMap_.clear();
93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
94 VNInfoAllocator.DestroyAll();
95 while (!CloneMIs.empty()) {
96 MachineInstr *MI = CloneMIs.back();
97 CloneMIs.pop_back();
98 mf_->DeleteMachineInstr(MI);
102 /// runOnMachineFunction - Register allocate the whole function
104 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
105 mf_ = &fn;
106 mri_ = &mf_->getRegInfo();
107 tm_ = &fn.getTarget();
108 tri_ = tm_->getRegisterInfo();
109 tii_ = tm_->getInstrInfo();
110 aa_ = &getAnalysis<AliasAnalysis>();
111 lv_ = &getAnalysis<LiveVariables>();
112 indexes_ = &getAnalysis<SlotIndexes>();
113 allocatableRegs_ = tri_->getAllocatableSet(fn);
115 computeIntervals();
117 numIntervals += getNumIntervals();
119 DEBUG(dump());
120 return true;
123 /// print - Implement the dump method.
124 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
125 OS << "********** INTERVALS **********\n";
126 for (const_iterator I = begin(), E = end(); I != E; ++I) {
127 I->second->print(OS, tri_);
128 OS << "\n";
131 printInstrs(OS);
134 void LiveIntervals::printInstrs(raw_ostream &OS) const {
135 OS << "********** MACHINEINSTRS **********\n";
137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
138 mbbi != mbbe; ++mbbi) {
139 OS << "BB#" << mbbi->getNumber()
140 << ":\t\t# derived from " << mbbi->getName() << "\n";
141 for (MachineBasicBlock::iterator mii = mbbi->begin(),
142 mie = mbbi->end(); mii != mie; ++mii) {
143 if (mii->isDebugValue())
144 OS << " \t" << *mii;
145 else
146 OS << getInstructionIndex(mii) << '\t' << *mii;
151 void LiveIntervals::dumpInstrs() const {
152 printInstrs(dbgs());
155 bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li,
156 VirtRegMap &vrm, unsigned reg) {
157 // We don't handle fancy stuff crossing basic block boundaries
158 if (li.ranges.size() != 1)
159 return true;
160 const LiveRange &range = li.ranges.front();
161 SlotIndex idx = range.start.getBaseIndex();
162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex();
164 // Skip deleted instructions
165 MachineInstr *firstMI = getInstructionFromIndex(idx);
166 while (!firstMI && idx != end) {
167 idx = idx.getNextIndex();
168 firstMI = getInstructionFromIndex(idx);
170 if (!firstMI)
171 return false;
173 // Find last instruction in range
174 SlotIndex lastIdx = end.getPrevIndex();
175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx);
176 while (!lastMI && lastIdx != idx) {
177 lastIdx = lastIdx.getPrevIndex();
178 lastMI = getInstructionFromIndex(lastIdx);
180 if (!lastMI)
181 return false;
183 // Range cannot cross basic block boundaries or terminators
184 MachineBasicBlock *MBB = firstMI->getParent();
185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator())
186 return true;
188 MachineBasicBlock::const_iterator E = lastMI;
189 ++E;
190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) {
191 const MachineInstr &MI = *I;
193 // Allow copies to and from li.reg
194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
196 if (SrcReg == li.reg || DstReg == li.reg)
197 continue;
199 // Check for operands using reg
200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
201 const MachineOperand& mop = MI.getOperand(i);
202 if (!mop.isReg())
203 continue;
204 unsigned PhysReg = mop.getReg();
205 if (PhysReg == 0 || PhysReg == li.reg)
206 continue;
207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
208 if (!vrm.hasPhys(PhysReg))
209 continue;
210 PhysReg = vrm.getPhys(PhysReg);
212 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
213 return true;
217 // No conflicts found.
218 return false;
221 /// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except
222 /// it checks for sub-register reference and it can check use as well.
223 bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li,
224 unsigned Reg, bool CheckUse,
225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) {
226 for (LiveInterval::Ranges::const_iterator
227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
228 for (SlotIndex index = I->start.getBaseIndex(),
229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
230 index != end;
231 index = index.getNextIndex()) {
232 MachineInstr *MI = getInstructionFromIndex(index);
233 if (!MI)
234 continue; // skip deleted instructions
236 if (JoinedCopies.count(MI))
237 continue;
238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
239 MachineOperand& MO = MI->getOperand(i);
240 if (!MO.isReg())
241 continue;
242 if (MO.isUse() && !CheckUse)
243 continue;
244 unsigned PhysReg = MO.getReg();
245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg))
246 continue;
247 if (tri_->isSubRegister(Reg, PhysReg))
248 return true;
253 return false;
256 #ifndef NDEBUG
257 static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) {
258 if (TargetRegisterInfo::isPhysicalRegister(reg))
259 dbgs() << tri_->getName(reg);
260 else
261 dbgs() << "%reg" << reg;
263 #endif
265 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
266 MachineBasicBlock::iterator mi,
267 SlotIndex MIIdx,
268 MachineOperand& MO,
269 unsigned MOIdx,
270 LiveInterval &interval) {
271 DEBUG({
272 dbgs() << "\t\tregister: ";
273 printRegName(interval.reg, tri_);
276 // Virtual registers may be defined multiple times (due to phi
277 // elimination and 2-addr elimination). Much of what we do only has to be
278 // done once for the vreg. We use an empty interval to detect the first
279 // time we see a vreg.
280 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
281 if (interval.empty()) {
282 // Get the Idx of the defining instructions.
283 SlotIndex defIndex = MIIdx.getDefIndex();
284 // Earlyclobbers move back one, so that they overlap the live range
285 // of inputs.
286 if (MO.isEarlyClobber())
287 defIndex = MIIdx.getUseIndex();
288 VNInfo *ValNo;
289 MachineInstr *CopyMI = NULL;
290 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
291 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() ||
292 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
293 CopyMI = mi;
294 // Earlyclobbers move back one.
295 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
297 assert(ValNo->id == 0 && "First value in interval is not 0?");
299 // Loop over all of the blocks that the vreg is defined in. There are
300 // two cases we have to handle here. The most common case is a vreg
301 // whose lifetime is contained within a basic block. In this case there
302 // will be a single kill, in MBB, which comes after the definition.
303 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
304 // FIXME: what about dead vars?
305 SlotIndex killIdx;
306 if (vi.Kills[0] != mi)
307 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex();
308 else
309 killIdx = defIndex.getStoreIndex();
311 // If the kill happens after the definition, we have an intra-block
312 // live range.
313 if (killIdx > defIndex) {
314 assert(vi.AliveBlocks.empty() &&
315 "Shouldn't be alive across any blocks!");
316 LiveRange LR(defIndex, killIdx, ValNo);
317 interval.addRange(LR);
318 DEBUG(dbgs() << " +" << LR << "\n");
319 ValNo->addKill(killIdx);
320 return;
324 // The other case we handle is when a virtual register lives to the end
325 // of the defining block, potentially live across some blocks, then is
326 // live into some number of blocks, but gets killed. Start by adding a
327 // range that goes from this definition to the end of the defining block.
328 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo);
329 DEBUG(dbgs() << " +" << NewLR);
330 interval.addRange(NewLR);
332 bool PHIJoin = lv_->isPHIJoin(interval.reg);
334 if (PHIJoin) {
335 // A phi join register is killed at the end of the MBB and revived as a new
336 // valno in the killing blocks.
337 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks");
338 DEBUG(dbgs() << " phi-join");
339 ValNo->addKill(indexes_->getTerminatorGap(mbb));
340 ValNo->setHasPHIKill(true);
341 } else {
342 // Iterate over all of the blocks that the variable is completely
343 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
344 // live interval.
345 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(),
346 E = vi.AliveBlocks.end(); I != E; ++I) {
347 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I);
348 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo);
349 interval.addRange(LR);
350 DEBUG(dbgs() << " +" << LR);
354 // Finally, this virtual register is live from the start of any killing
355 // block to the 'use' slot of the killing instruction.
356 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
357 MachineInstr *Kill = vi.Kills[i];
358 SlotIndex Start = getMBBStartIdx(Kill->getParent());
359 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex();
361 // Create interval with one of a NEW value number. Note that this value
362 // number isn't actually defined by an instruction, weird huh? :)
363 if (PHIJoin) {
364 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false,
365 VNInfoAllocator);
366 ValNo->setIsPHIDef(true);
368 LiveRange LR(Start, killIdx, ValNo);
369 interval.addRange(LR);
370 ValNo->addKill(killIdx);
371 DEBUG(dbgs() << " +" << LR);
374 } else {
375 // If this is the second time we see a virtual register definition, it
376 // must be due to phi elimination or two addr elimination. If this is
377 // the result of two address elimination, then the vreg is one of the
378 // def-and-use register operand.
379 if (mi->isRegTiedToUseOperand(MOIdx)) {
380 // If this is a two-address definition, then we have already processed
381 // the live range. The only problem is that we didn't realize there
382 // are actually two values in the live interval. Because of this we
383 // need to take the LiveRegion that defines this register and split it
384 // into two values.
385 assert(interval.containsOneValue());
386 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex();
387 SlotIndex RedefIndex = MIIdx.getDefIndex();
388 if (MO.isEarlyClobber())
389 RedefIndex = MIIdx.getUseIndex();
391 const LiveRange *OldLR =
392 interval.getLiveRangeContaining(RedefIndex.getUseIndex());
393 VNInfo *OldValNo = OldLR->valno;
395 // Delete the initial value, which should be short and continuous,
396 // because the 2-addr copy must be in the same MBB as the redef.
397 interval.removeRange(DefIndex, RedefIndex);
399 // Two-address vregs should always only be redefined once. This means
400 // that at this point, there should be exactly one value number in it.
401 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
403 // The new value number (#1) is defined by the instruction we claimed
404 // defined value #0.
405 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(),
406 false, // update at *
407 VNInfoAllocator);
408 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
410 // Value#0 is now defined by the 2-addr instruction.
411 OldValNo->def = RedefIndex;
412 OldValNo->setCopy(0);
414 // Add the new live interval which replaces the range for the input copy.
415 LiveRange LR(DefIndex, RedefIndex, ValNo);
416 DEBUG(dbgs() << " replace range with " << LR);
417 interval.addRange(LR);
418 ValNo->addKill(RedefIndex);
420 // If this redefinition is dead, we need to add a dummy unit live
421 // range covering the def slot.
422 if (MO.isDead())
423 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(),
424 OldValNo));
426 DEBUG({
427 dbgs() << " RESULT: ";
428 interval.print(dbgs(), tri_);
430 } else {
431 assert(lv_->isPHIJoin(interval.reg) && "Multiply defined register");
432 // In the case of PHI elimination, each variable definition is only
433 // live until the end of the block. We've already taken care of the
434 // rest of the live range.
436 SlotIndex defIndex = MIIdx.getDefIndex();
437 if (MO.isEarlyClobber())
438 defIndex = MIIdx.getUseIndex();
440 VNInfo *ValNo;
441 MachineInstr *CopyMI = NULL;
442 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
443 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()||
444 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
445 CopyMI = mi;
446 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
448 SlotIndex killIndex = getMBBEndIdx(mbb);
449 LiveRange LR(defIndex, killIndex, ValNo);
450 interval.addRange(LR);
451 ValNo->addKill(indexes_->getTerminatorGap(mbb));
452 ValNo->setHasPHIKill(true);
453 DEBUG(dbgs() << " phi-join +" << LR);
457 DEBUG(dbgs() << '\n');
460 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
461 MachineBasicBlock::iterator mi,
462 SlotIndex MIIdx,
463 MachineOperand& MO,
464 LiveInterval &interval,
465 MachineInstr *CopyMI) {
466 // A physical register cannot be live across basic block, so its
467 // lifetime must end somewhere in its defining basic block.
468 DEBUG({
469 dbgs() << "\t\tregister: ";
470 printRegName(interval.reg, tri_);
473 SlotIndex baseIndex = MIIdx;
474 SlotIndex start = baseIndex.getDefIndex();
475 // Earlyclobbers move back one.
476 if (MO.isEarlyClobber())
477 start = MIIdx.getUseIndex();
478 SlotIndex end = start;
480 // If it is not used after definition, it is considered dead at
481 // the instruction defining it. Hence its interval is:
482 // [defSlot(def), defSlot(def)+1)
483 // For earlyclobbers, the defSlot was pushed back one; the extra
484 // advance below compensates.
485 if (MO.isDead()) {
486 DEBUG(dbgs() << " dead");
487 end = start.getStoreIndex();
488 goto exit;
491 // If it is not dead on definition, it must be killed by a
492 // subsequent instruction. Hence its interval is:
493 // [defSlot(def), useSlot(kill)+1)
494 baseIndex = baseIndex.getNextIndex();
495 while (++mi != MBB->end()) {
497 if (mi->isDebugValue())
498 continue;
499 if (getInstructionFromIndex(baseIndex) == 0)
500 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
502 if (mi->killsRegister(interval.reg, tri_)) {
503 DEBUG(dbgs() << " killed");
504 end = baseIndex.getDefIndex();
505 goto exit;
506 } else {
507 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_);
508 if (DefIdx != -1) {
509 if (mi->isRegTiedToUseOperand(DefIdx)) {
510 // Two-address instruction.
511 end = baseIndex.getDefIndex();
512 } else {
513 // Another instruction redefines the register before it is ever read.
514 // Then the register is essentially dead at the instruction that
515 // defines it. Hence its interval is:
516 // [defSlot(def), defSlot(def)+1)
517 DEBUG(dbgs() << " dead");
518 end = start.getStoreIndex();
520 goto exit;
524 baseIndex = baseIndex.getNextIndex();
527 // The only case we should have a dead physreg here without a killing or
528 // instruction where we know it's dead is if it is live-in to the function
529 // and never used. Another possible case is the implicit use of the
530 // physical register has been deleted by two-address pass.
531 end = start.getStoreIndex();
533 exit:
534 assert(start < end && "did not find end of interval?");
536 // Already exists? Extend old live interval.
537 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
538 bool Extend = OldLR != interval.end();
539 VNInfo *ValNo = Extend
540 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
541 if (MO.isEarlyClobber() && Extend)
542 ValNo->setHasRedefByEC(true);
543 LiveRange LR(start, end, ValNo);
544 interval.addRange(LR);
545 LR.valno->addKill(end);
546 DEBUG(dbgs() << " +" << LR << '\n');
549 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
550 MachineBasicBlock::iterator MI,
551 SlotIndex MIIdx,
552 MachineOperand& MO,
553 unsigned MOIdx) {
554 if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
555 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
556 getOrCreateInterval(MO.getReg()));
557 else if (allocatableRegs_[MO.getReg()]) {
558 MachineInstr *CopyMI = NULL;
559 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
560 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() ||
561 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg))
562 CopyMI = MI;
563 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
564 getOrCreateInterval(MO.getReg()), CopyMI);
565 // Def of a register also defines its sub-registers.
566 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
567 // If MI also modifies the sub-register explicitly, avoid processing it
568 // more than once. Do not pass in TRI here so it checks for exact match.
569 if (!MI->modifiesRegister(*AS))
570 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
571 getOrCreateInterval(*AS), 0);
575 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
576 SlotIndex MIIdx,
577 LiveInterval &interval, bool isAlias) {
578 DEBUG({
579 dbgs() << "\t\tlivein register: ";
580 printRegName(interval.reg, tri_);
583 // Look for kills, if it reaches a def before it's killed, then it shouldn't
584 // be considered a livein.
585 MachineBasicBlock::iterator mi = MBB->begin();
586 MachineBasicBlock::iterator E = MBB->end();
587 // Skip over DBG_VALUE at the start of the MBB.
588 if (mi != E && mi->isDebugValue()) {
589 while (++mi != E && mi->isDebugValue())
591 if (mi == E)
592 // MBB is empty except for DBG_VALUE's.
593 return;
596 SlotIndex baseIndex = MIIdx;
597 SlotIndex start = baseIndex;
598 if (getInstructionFromIndex(baseIndex) == 0)
599 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
601 SlotIndex end = baseIndex;
602 bool SeenDefUse = false;
604 while (mi != E) {
605 if (mi->killsRegister(interval.reg, tri_)) {
606 DEBUG(dbgs() << " killed");
607 end = baseIndex.getDefIndex();
608 SeenDefUse = true;
609 break;
610 } else if (mi->modifiesRegister(interval.reg, tri_)) {
611 // Another instruction redefines the register before it is ever read.
612 // Then the register is essentially dead at the instruction that defines
613 // it. Hence its interval is:
614 // [defSlot(def), defSlot(def)+1)
615 DEBUG(dbgs() << " dead");
616 end = start.getStoreIndex();
617 SeenDefUse = true;
618 break;
621 while (++mi != E && mi->isDebugValue())
622 // Skip over DBG_VALUE.
624 if (mi != E)
625 baseIndex = indexes_->getNextNonNullIndex(baseIndex);
628 // Live-in register might not be used at all.
629 if (!SeenDefUse) {
630 if (isAlias) {
631 DEBUG(dbgs() << " dead");
632 end = MIIdx.getStoreIndex();
633 } else {
634 DEBUG(dbgs() << " live through");
635 end = baseIndex;
639 VNInfo *vni =
640 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true),
641 0, false, VNInfoAllocator);
642 vni->setIsPHIDef(true);
643 LiveRange LR(start, end, vni);
645 interval.addRange(LR);
646 LR.valno->addKill(end);
647 DEBUG(dbgs() << " +" << LR << '\n');
650 /// computeIntervals - computes the live intervals for virtual
651 /// registers. for some ordering of the machine instructions [1,N] a
652 /// live interval is an interval [i, j) where 1 <= i <= j < N for
653 /// which a variable is live
654 void LiveIntervals::computeIntervals() {
655 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n"
656 << "********** Function: "
657 << ((Value*)mf_->getFunction())->getName() << '\n');
659 SmallVector<unsigned, 8> UndefUses;
660 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
661 MBBI != E; ++MBBI) {
662 MachineBasicBlock *MBB = MBBI;
663 if (MBB->empty())
664 continue;
666 // Track the index of the current machine instr.
667 SlotIndex MIIndex = getMBBStartIdx(MBB);
668 DEBUG(dbgs() << MBB->getName() << ":\n");
670 // Create intervals for live-ins to this BB first.
671 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(),
672 LE = MBB->livein_end(); LI != LE; ++LI) {
673 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
674 // Multiple live-ins can alias the same register.
675 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
676 if (!hasInterval(*AS))
677 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
678 true);
681 // Skip over empty initial indices.
682 if (getInstructionFromIndex(MIIndex) == 0)
683 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
685 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
686 MI != miEnd; ++MI) {
687 DEBUG(dbgs() << MIIndex << "\t" << *MI);
688 if (MI->isDebugValue())
689 continue;
691 // Handle defs.
692 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
693 MachineOperand &MO = MI->getOperand(i);
694 if (!MO.isReg() || !MO.getReg())
695 continue;
697 // handle register defs - build intervals
698 if (MO.isDef())
699 handleRegisterDef(MBB, MI, MIIndex, MO, i);
700 else if (MO.isUndef())
701 UndefUses.push_back(MO.getReg());
704 // Move to the next instr slot.
705 MIIndex = indexes_->getNextNonNullIndex(MIIndex);
709 // Create empty intervals for registers defined by implicit_def's (except
710 // for those implicit_def that define values which are liveout of their
711 // blocks.
712 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) {
713 unsigned UndefReg = UndefUses[i];
714 (void)getOrCreateInterval(UndefReg);
718 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
719 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F;
720 return new LiveInterval(reg, Weight);
723 /// dupInterval - Duplicate a live interval. The caller is responsible for
724 /// managing the allocated memory.
725 LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
726 LiveInterval *NewLI = createInterval(li->reg);
727 NewLI->Copy(*li, mri_, getVNInfoAllocator());
728 return NewLI;
731 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
732 /// copy field and returns the source register that defines it.
733 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
734 if (!VNI->getCopy())
735 return 0;
737 if (VNI->getCopy()->isExtractSubreg()) {
738 // If it's extracting out of a physical register, return the sub-register.
739 unsigned Reg = VNI->getCopy()->getOperand(1).getReg();
740 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
741 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm();
742 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg();
743 if (SrcSubReg == DstSubReg)
744 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3
745 // reg1034 can still be coalesced to EDX.
746 return Reg;
747 assert(DstSubReg == 0);
748 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm());
750 return Reg;
751 } else if (VNI->getCopy()->isInsertSubreg() ||
752 VNI->getCopy()->isSubregToReg())
753 return VNI->getCopy()->getOperand(2).getReg();
755 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
756 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg))
757 return SrcReg;
758 llvm_unreachable("Unrecognized copy instruction!");
759 return 0;
762 //===----------------------------------------------------------------------===//
763 // Register allocator hooks.
766 /// getReMatImplicitUse - If the remat definition MI has one (for now, we only
767 /// allow one) virtual register operand, then its uses are implicitly using
768 /// the register. Returns the virtual register.
769 unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
770 MachineInstr *MI) const {
771 unsigned RegOp = 0;
772 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
773 MachineOperand &MO = MI->getOperand(i);
774 if (!MO.isReg() || !MO.isUse())
775 continue;
776 unsigned Reg = MO.getReg();
777 if (Reg == 0 || Reg == li.reg)
778 continue;
780 if (TargetRegisterInfo::isPhysicalRegister(Reg) &&
781 !allocatableRegs_[Reg])
782 continue;
783 // FIXME: For now, only remat MI with at most one register operand.
784 assert(!RegOp &&
785 "Can't rematerialize instruction with multiple register operand!");
786 RegOp = MO.getReg();
787 #ifndef NDEBUG
788 break;
789 #endif
791 return RegOp;
794 /// isValNoAvailableAt - Return true if the val# of the specified interval
795 /// which reaches the given instruction also reaches the specified use index.
796 bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
797 SlotIndex UseIdx) const {
798 SlotIndex Index = getInstructionIndex(MI);
799 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
800 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
801 return UI != li.end() && UI->valno == ValNo;
804 /// isReMaterializable - Returns true if the definition MI of the specified
805 /// val# of the specified interval is re-materializable.
806 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
807 const VNInfo *ValNo, MachineInstr *MI,
808 SmallVectorImpl<LiveInterval*> &SpillIs,
809 bool &isLoad) {
810 if (DisableReMat)
811 return false;
813 if (!tii_->isTriviallyReMaterializable(MI, aa_))
814 return false;
816 // Target-specific code can mark an instruction as being rematerializable
817 // if it has one virtual reg use, though it had better be something like
818 // a PIC base register which is likely to be live everywhere.
819 unsigned ImpUse = getReMatImplicitUse(li, MI);
820 if (ImpUse) {
821 const LiveInterval &ImpLi = getInterval(ImpUse);
822 for (MachineRegisterInfo::use_nodbg_iterator
823 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end();
824 ri != re; ++ri) {
825 MachineInstr *UseMI = &*ri;
826 SlotIndex UseIdx = getInstructionIndex(UseMI);
827 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
828 continue;
829 if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
830 return false;
833 // If a register operand of the re-materialized instruction is going to
834 // be spilled next, then it's not legal to re-materialize this instruction.
835 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
836 if (ImpUse == SpillIs[i]->reg)
837 return false;
839 return true;
842 /// isReMaterializable - Returns true if the definition MI of the specified
843 /// val# of the specified interval is re-materializable.
844 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
845 const VNInfo *ValNo, MachineInstr *MI) {
846 SmallVector<LiveInterval*, 4> Dummy1;
847 bool Dummy2;
848 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2);
851 /// isReMaterializable - Returns true if every definition of MI of every
852 /// val# of the specified interval is re-materializable.
853 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
854 SmallVectorImpl<LiveInterval*> &SpillIs,
855 bool &isLoad) {
856 isLoad = false;
857 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
858 i != e; ++i) {
859 const VNInfo *VNI = *i;
860 if (VNI->isUnused())
861 continue; // Dead val#.
862 // Is the def for the val# rematerializable?
863 if (!VNI->isDefAccurate())
864 return false;
865 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
866 bool DefIsLoad = false;
867 if (!ReMatDefMI ||
868 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
869 return false;
870 isLoad |= DefIsLoad;
872 return true;
875 /// FilterFoldedOps - Filter out two-address use operands. Return
876 /// true if it finds any issue with the operands that ought to prevent
877 /// folding.
878 static bool FilterFoldedOps(MachineInstr *MI,
879 SmallVector<unsigned, 2> &Ops,
880 unsigned &MRInfo,
881 SmallVector<unsigned, 2> &FoldOps) {
882 MRInfo = 0;
883 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
884 unsigned OpIdx = Ops[i];
885 MachineOperand &MO = MI->getOperand(OpIdx);
886 // FIXME: fold subreg use.
887 if (MO.getSubReg())
888 return true;
889 if (MO.isDef())
890 MRInfo |= (unsigned)VirtRegMap::isMod;
891 else {
892 // Filter out two-address use operand(s).
893 if (MI->isRegTiedToDefOperand(OpIdx)) {
894 MRInfo = VirtRegMap::isModRef;
895 continue;
897 MRInfo |= (unsigned)VirtRegMap::isRef;
899 FoldOps.push_back(OpIdx);
901 return false;
905 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
906 /// slot / to reg or any rematerialized load into ith operand of specified
907 /// MI. If it is successul, MI is updated with the newly created MI and
908 /// returns true.
909 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
910 VirtRegMap &vrm, MachineInstr *DefMI,
911 SlotIndex InstrIdx,
912 SmallVector<unsigned, 2> &Ops,
913 bool isSS, int Slot, unsigned Reg) {
914 // If it is an implicit def instruction, just delete it.
915 if (MI->isImplicitDef()) {
916 RemoveMachineInstrFromMaps(MI);
917 vrm.RemoveMachineInstrFromMaps(MI);
918 MI->eraseFromParent();
919 ++numFolds;
920 return true;
923 // Filter the list of operand indexes that are to be folded. Abort if
924 // any operand will prevent folding.
925 unsigned MRInfo = 0;
926 SmallVector<unsigned, 2> FoldOps;
927 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
928 return false;
930 // The only time it's safe to fold into a two address instruction is when
931 // it's folding reload and spill from / into a spill stack slot.
932 if (DefMI && (MRInfo & VirtRegMap::isMod))
933 return false;
935 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
936 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
937 if (fmi) {
938 // Remember this instruction uses the spill slot.
939 if (isSS) vrm.addSpillSlotUse(Slot, fmi);
941 // Attempt to fold the memory reference into the instruction. If
942 // we can do this, we don't need to insert spill code.
943 MachineBasicBlock &MBB = *MI->getParent();
944 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
945 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
946 vrm.transferSpillPts(MI, fmi);
947 vrm.transferRestorePts(MI, fmi);
948 vrm.transferEmergencySpills(MI, fmi);
949 ReplaceMachineInstrInMaps(MI, fmi);
950 MI = MBB.insert(MBB.erase(MI), fmi);
951 ++numFolds;
952 return true;
954 return false;
957 /// canFoldMemoryOperand - Returns true if the specified load / store
958 /// folding is possible.
959 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
960 SmallVector<unsigned, 2> &Ops,
961 bool ReMat) const {
962 // Filter the list of operand indexes that are to be folded. Abort if
963 // any operand will prevent folding.
964 unsigned MRInfo = 0;
965 SmallVector<unsigned, 2> FoldOps;
966 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
967 return false;
969 // It's only legal to remat for a use, not a def.
970 if (ReMat && (MRInfo & VirtRegMap::isMod))
971 return false;
973 return tii_->canFoldMemoryOperand(MI, FoldOps);
976 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
977 LiveInterval::Ranges::const_iterator itr = li.ranges.begin();
979 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end);
981 if (mbb == 0)
982 return false;
984 for (++itr; itr != li.ranges.end(); ++itr) {
985 MachineBasicBlock *mbb2 =
986 indexes_->getMBBCoveringRange(itr->start, itr->end);
988 if (mbb2 != mbb)
989 return false;
992 return true;
995 /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
996 /// interval on to-be re-materialized operands of MI) with new register.
997 void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
998 MachineInstr *MI, unsigned NewVReg,
999 VirtRegMap &vrm) {
1000 // There is an implicit use. That means one of the other operand is
1001 // being remat'ed and the remat'ed instruction has li.reg as an
1002 // use operand. Make sure we rewrite that as well.
1003 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1004 MachineOperand &MO = MI->getOperand(i);
1005 if (!MO.isReg())
1006 continue;
1007 unsigned Reg = MO.getReg();
1008 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1009 continue;
1010 if (!vrm.isReMaterialized(Reg))
1011 continue;
1012 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1013 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1014 if (UseMO)
1015 UseMO->setReg(NewVReg);
1019 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1020 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1021 bool LiveIntervals::
1022 rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1023 bool TrySplit, SlotIndex index, SlotIndex end,
1024 MachineInstr *MI,
1025 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1026 unsigned Slot, int LdSlot,
1027 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1028 VirtRegMap &vrm,
1029 const TargetRegisterClass* rc,
1030 SmallVector<int, 4> &ReMatIds,
1031 const MachineLoopInfo *loopInfo,
1032 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1033 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1034 std::vector<LiveInterval*> &NewLIs) {
1035 bool CanFold = false;
1036 RestartInstruction:
1037 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1038 MachineOperand& mop = MI->getOperand(i);
1039 if (!mop.isReg())
1040 continue;
1041 unsigned Reg = mop.getReg();
1042 unsigned RegI = Reg;
1043 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1044 continue;
1045 if (Reg != li.reg)
1046 continue;
1048 bool TryFold = !DefIsReMat;
1049 bool FoldSS = true; // Default behavior unless it's a remat.
1050 int FoldSlot = Slot;
1051 if (DefIsReMat) {
1052 // If this is the rematerializable definition MI itself and
1053 // all of its uses are rematerialized, simply delete it.
1054 if (MI == ReMatOrigDefMI && CanDelete) {
1055 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: "
1056 << *MI << '\n');
1057 RemoveMachineInstrFromMaps(MI);
1058 vrm.RemoveMachineInstrFromMaps(MI);
1059 MI->eraseFromParent();
1060 break;
1063 // If def for this use can't be rematerialized, then try folding.
1064 // If def is rematerializable and it's a load, also try folding.
1065 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1066 if (isLoad) {
1067 // Try fold loads (from stack slot, constant pool, etc.) into uses.
1068 FoldSS = isLoadSS;
1069 FoldSlot = LdSlot;
1073 // Scan all of the operands of this instruction rewriting operands
1074 // to use NewVReg instead of li.reg as appropriate. We do this for
1075 // two reasons:
1077 // 1. If the instr reads the same spilled vreg multiple times, we
1078 // want to reuse the NewVReg.
1079 // 2. If the instr is a two-addr instruction, we are required to
1080 // keep the src/dst regs pinned.
1082 // Keep track of whether we replace a use and/or def so that we can
1083 // create the spill interval with the appropriate range.
1085 HasUse = mop.isUse();
1086 HasDef = mop.isDef();
1087 SmallVector<unsigned, 2> Ops;
1088 Ops.push_back(i);
1089 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1090 const MachineOperand &MOj = MI->getOperand(j);
1091 if (!MOj.isReg())
1092 continue;
1093 unsigned RegJ = MOj.getReg();
1094 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1095 continue;
1096 if (RegJ == RegI) {
1097 Ops.push_back(j);
1098 if (!MOj.isUndef()) {
1099 HasUse |= MOj.isUse();
1100 HasDef |= MOj.isDef();
1105 // Create a new virtual register for the spill interval.
1106 // Create the new register now so we can map the fold instruction
1107 // to the new register so when it is unfolded we get the correct
1108 // answer.
1109 bool CreatedNewVReg = false;
1110 if (NewVReg == 0) {
1111 NewVReg = mri_->createVirtualRegister(rc);
1112 vrm.grow();
1113 CreatedNewVReg = true;
1115 // The new virtual register should get the same allocation hints as the
1116 // old one.
1117 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg);
1118 if (Hint.first || Hint.second)
1119 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second);
1122 if (!TryFold)
1123 CanFold = false;
1124 else {
1125 // Do not fold load / store here if we are splitting. We'll find an
1126 // optimal point to insert a load / store later.
1127 if (!TrySplit) {
1128 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1129 Ops, FoldSS, FoldSlot, NewVReg)) {
1130 // Folding the load/store can completely change the instruction in
1131 // unpredictable ways, rescan it from the beginning.
1133 if (FoldSS) {
1134 // We need to give the new vreg the same stack slot as the
1135 // spilled interval.
1136 vrm.assignVirt2StackSlot(NewVReg, FoldSlot);
1139 HasUse = false;
1140 HasDef = false;
1141 CanFold = false;
1142 if (isNotInMIMap(MI))
1143 break;
1144 goto RestartInstruction;
1146 } else {
1147 // We'll try to fold it later if it's profitable.
1148 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1152 mop.setReg(NewVReg);
1153 if (mop.isImplicit())
1154 rewriteImplicitOps(li, MI, NewVReg, vrm);
1156 // Reuse NewVReg for other reads.
1157 for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1158 MachineOperand &mopj = MI->getOperand(Ops[j]);
1159 mopj.setReg(NewVReg);
1160 if (mopj.isImplicit())
1161 rewriteImplicitOps(li, MI, NewVReg, vrm);
1164 if (CreatedNewVReg) {
1165 if (DefIsReMat) {
1166 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI);
1167 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1168 // Each valnum may have its own remat id.
1169 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1170 } else {
1171 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1173 if (!CanDelete || (HasUse && HasDef)) {
1174 // If this is a two-addr instruction then its use operands are
1175 // rematerializable but its def is not. It should be assigned a
1176 // stack slot.
1177 vrm.assignVirt2StackSlot(NewVReg, Slot);
1179 } else {
1180 vrm.assignVirt2StackSlot(NewVReg, Slot);
1182 } else if (HasUse && HasDef &&
1183 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1184 // If this interval hasn't been assigned a stack slot (because earlier
1185 // def is a deleted remat def), do it now.
1186 assert(Slot != VirtRegMap::NO_STACK_SLOT);
1187 vrm.assignVirt2StackSlot(NewVReg, Slot);
1190 // Re-matting an instruction with virtual register use. Add the
1191 // register as an implicit use on the use MI.
1192 if (DefIsReMat && ImpUse)
1193 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1195 // Create a new register interval for this spill / remat.
1196 LiveInterval &nI = getOrCreateInterval(NewVReg);
1197 if (CreatedNewVReg) {
1198 NewLIs.push_back(&nI);
1199 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1200 if (TrySplit)
1201 vrm.setIsSplitFromReg(NewVReg, li.reg);
1204 if (HasUse) {
1205 if (CreatedNewVReg) {
1206 LiveRange LR(index.getLoadIndex(), index.getDefIndex(),
1207 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1208 DEBUG(dbgs() << " +" << LR);
1209 nI.addRange(LR);
1210 } else {
1211 // Extend the split live interval to this def / use.
1212 SlotIndex End = index.getDefIndex();
1213 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1214 nI.getValNumInfo(nI.getNumValNums()-1));
1215 DEBUG(dbgs() << " +" << LR);
1216 nI.addRange(LR);
1219 if (HasDef) {
1220 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1221 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator));
1222 DEBUG(dbgs() << " +" << LR);
1223 nI.addRange(LR);
1226 DEBUG({
1227 dbgs() << "\t\t\t\tAdded new interval: ";
1228 nI.print(dbgs(), tri_);
1229 dbgs() << '\n';
1232 return CanFold;
1234 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1235 const VNInfo *VNI,
1236 MachineBasicBlock *MBB,
1237 SlotIndex Idx) const {
1238 SlotIndex End = getMBBEndIdx(MBB);
1239 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1240 if (VNI->kills[j].isPHI())
1241 continue;
1243 SlotIndex KillIdx = VNI->kills[j];
1244 if (KillIdx > Idx && KillIdx <= End)
1245 return true;
1247 return false;
1250 /// RewriteInfo - Keep track of machine instrs that will be rewritten
1251 /// during spilling.
1252 namespace {
1253 struct RewriteInfo {
1254 SlotIndex Index;
1255 MachineInstr *MI;
1256 bool HasUse;
1257 bool HasDef;
1258 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d)
1259 : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1262 struct RewriteInfoCompare {
1263 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1264 return LHS.Index < RHS.Index;
1269 void LiveIntervals::
1270 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1271 LiveInterval::Ranges::const_iterator &I,
1272 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1273 unsigned Slot, int LdSlot,
1274 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1275 VirtRegMap &vrm,
1276 const TargetRegisterClass* rc,
1277 SmallVector<int, 4> &ReMatIds,
1278 const MachineLoopInfo *loopInfo,
1279 BitVector &SpillMBBs,
1280 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1281 BitVector &RestoreMBBs,
1282 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1283 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1284 std::vector<LiveInterval*> &NewLIs) {
1285 bool AllCanFold = true;
1286 unsigned NewVReg = 0;
1287 SlotIndex start = I->start.getBaseIndex();
1288 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex();
1290 // First collect all the def / use in this live range that will be rewritten.
1291 // Make sure they are sorted according to instruction index.
1292 std::vector<RewriteInfo> RewriteMIs;
1293 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1294 re = mri_->reg_end(); ri != re; ) {
1295 MachineInstr *MI = &*ri;
1296 MachineOperand &O = ri.getOperand();
1297 ++ri;
1298 if (MI->isDebugValue()) {
1299 // Remove debug info for now.
1300 O.setReg(0U);
1301 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1302 continue;
1304 assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1305 SlotIndex index = getInstructionIndex(MI);
1306 if (index < start || index >= end)
1307 continue;
1309 if (O.isUndef())
1310 // Must be defined by an implicit def. It should not be spilled. Note,
1311 // this is for correctness reason. e.g.
1312 // 8 %reg1024<def> = IMPLICIT_DEF
1313 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1314 // The live range [12, 14) are not part of the r1024 live interval since
1315 // it's defined by an implicit def. It will not conflicts with live
1316 // interval of r1025. Now suppose both registers are spilled, you can
1317 // easily see a situation where both registers are reloaded before
1318 // the INSERT_SUBREG and both target registers that would overlap.
1319 continue;
1320 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1322 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1324 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1325 // Now rewrite the defs and uses.
1326 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1327 RewriteInfo &rwi = RewriteMIs[i];
1328 ++i;
1329 SlotIndex index = rwi.Index;
1330 bool MIHasUse = rwi.HasUse;
1331 bool MIHasDef = rwi.HasDef;
1332 MachineInstr *MI = rwi.MI;
1333 // If MI def and/or use the same register multiple times, then there
1334 // are multiple entries.
1335 unsigned NumUses = MIHasUse;
1336 while (i != e && RewriteMIs[i].MI == MI) {
1337 assert(RewriteMIs[i].Index == index);
1338 bool isUse = RewriteMIs[i].HasUse;
1339 if (isUse) ++NumUses;
1340 MIHasUse |= isUse;
1341 MIHasDef |= RewriteMIs[i].HasDef;
1342 ++i;
1344 MachineBasicBlock *MBB = MI->getParent();
1346 if (ImpUse && MI != ReMatDefMI) {
1347 // Re-matting an instruction with virtual register use. Prevent interval
1348 // from being spilled.
1349 getInterval(ImpUse).markNotSpillable();
1352 unsigned MBBId = MBB->getNumber();
1353 unsigned ThisVReg = 0;
1354 if (TrySplit) {
1355 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1356 if (NVI != MBBVRegsMap.end()) {
1357 ThisVReg = NVI->second;
1358 // One common case:
1359 // x = use
1360 // ...
1361 // ...
1362 // def = ...
1363 // = use
1364 // It's better to start a new interval to avoid artifically
1365 // extend the new interval.
1366 if (MIHasDef && !MIHasUse) {
1367 MBBVRegsMap.erase(MBB->getNumber());
1368 ThisVReg = 0;
1373 bool IsNew = ThisVReg == 0;
1374 if (IsNew) {
1375 // This ends the previous live interval. If all of its def / use
1376 // can be folded, give it a low spill weight.
1377 if (NewVReg && TrySplit && AllCanFold) {
1378 LiveInterval &nI = getOrCreateInterval(NewVReg);
1379 nI.weight /= 10.0F;
1381 AllCanFold = true;
1383 NewVReg = ThisVReg;
1385 bool HasDef = false;
1386 bool HasUse = false;
1387 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1388 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1389 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1390 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1391 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs);
1392 if (!HasDef && !HasUse)
1393 continue;
1395 AllCanFold &= CanFold;
1397 // Update weight of spill interval.
1398 LiveInterval &nI = getOrCreateInterval(NewVReg);
1399 if (!TrySplit) {
1400 // The spill weight is now infinity as it cannot be spilled again.
1401 nI.markNotSpillable();
1402 continue;
1405 // Keep track of the last def and first use in each MBB.
1406 if (HasDef) {
1407 if (MI != ReMatOrigDefMI || !CanDelete) {
1408 bool HasKill = false;
1409 if (!HasUse)
1410 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex());
1411 else {
1412 // If this is a two-address code, then this index starts a new VNInfo.
1413 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex());
1414 if (VNI)
1415 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex());
1417 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1418 SpillIdxes.find(MBBId);
1419 if (!HasKill) {
1420 if (SII == SpillIdxes.end()) {
1421 std::vector<SRInfo> S;
1422 S.push_back(SRInfo(index, NewVReg, true));
1423 SpillIdxes.insert(std::make_pair(MBBId, S));
1424 } else if (SII->second.back().vreg != NewVReg) {
1425 SII->second.push_back(SRInfo(index, NewVReg, true));
1426 } else if (index > SII->second.back().index) {
1427 // If there is an earlier def and this is a two-address
1428 // instruction, then it's not possible to fold the store (which
1429 // would also fold the load).
1430 SRInfo &Info = SII->second.back();
1431 Info.index = index;
1432 Info.canFold = !HasUse;
1434 SpillMBBs.set(MBBId);
1435 } else if (SII != SpillIdxes.end() &&
1436 SII->second.back().vreg == NewVReg &&
1437 index > SII->second.back().index) {
1438 // There is an earlier def that's not killed (must be two-address).
1439 // The spill is no longer needed.
1440 SII->second.pop_back();
1441 if (SII->second.empty()) {
1442 SpillIdxes.erase(MBBId);
1443 SpillMBBs.reset(MBBId);
1449 if (HasUse) {
1450 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1451 SpillIdxes.find(MBBId);
1452 if (SII != SpillIdxes.end() &&
1453 SII->second.back().vreg == NewVReg &&
1454 index > SII->second.back().index)
1455 // Use(s) following the last def, it's not safe to fold the spill.
1456 SII->second.back().canFold = false;
1457 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1458 RestoreIdxes.find(MBBId);
1459 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1460 // If we are splitting live intervals, only fold if it's the first
1461 // use and there isn't another use later in the MBB.
1462 RII->second.back().canFold = false;
1463 else if (IsNew) {
1464 // Only need a reload if there isn't an earlier def / use.
1465 if (RII == RestoreIdxes.end()) {
1466 std::vector<SRInfo> Infos;
1467 Infos.push_back(SRInfo(index, NewVReg, true));
1468 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1469 } else {
1470 RII->second.push_back(SRInfo(index, NewVReg, true));
1472 RestoreMBBs.set(MBBId);
1476 // Update spill weight.
1477 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1478 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1481 if (NewVReg && TrySplit && AllCanFold) {
1482 // If all of its def / use can be folded, give it a low spill weight.
1483 LiveInterval &nI = getOrCreateInterval(NewVReg);
1484 nI.weight /= 10.0F;
1488 bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index,
1489 unsigned vr, BitVector &RestoreMBBs,
1490 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1491 if (!RestoreMBBs[Id])
1492 return false;
1493 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1494 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1495 if (Restores[i].index == index &&
1496 Restores[i].vreg == vr &&
1497 Restores[i].canFold)
1498 return true;
1499 return false;
1502 void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index,
1503 unsigned vr, BitVector &RestoreMBBs,
1504 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1505 if (!RestoreMBBs[Id])
1506 return;
1507 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1508 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1509 if (Restores[i].index == index && Restores[i].vreg)
1510 Restores[i].index = SlotIndex();
1513 /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1514 /// spilled and create empty intervals for their uses.
1515 void
1516 LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1517 const TargetRegisterClass* rc,
1518 std::vector<LiveInterval*> &NewLIs) {
1519 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1520 re = mri_->reg_end(); ri != re; ) {
1521 MachineOperand &O = ri.getOperand();
1522 MachineInstr *MI = &*ri;
1523 ++ri;
1524 if (MI->isDebugValue()) {
1525 // Remove debug info for now.
1526 O.setReg(0U);
1527 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
1528 continue;
1530 if (O.isDef()) {
1531 assert(MI->isImplicitDef() &&
1532 "Register def was not rewritten?");
1533 RemoveMachineInstrFromMaps(MI);
1534 vrm.RemoveMachineInstrFromMaps(MI);
1535 MI->eraseFromParent();
1536 } else {
1537 // This must be an use of an implicit_def so it's not part of the live
1538 // interval. Create a new empty live interval for it.
1539 // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1540 unsigned NewVReg = mri_->createVirtualRegister(rc);
1541 vrm.grow();
1542 vrm.setIsImplicitlyDefined(NewVReg);
1543 NewLIs.push_back(&getOrCreateInterval(NewVReg));
1544 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1545 MachineOperand &MO = MI->getOperand(i);
1546 if (MO.isReg() && MO.getReg() == li.reg) {
1547 MO.setReg(NewVReg);
1548 MO.setIsUndef();
1555 float
1556 LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
1557 // Limit the loop depth ridiculousness.
1558 if (loopDepth > 200)
1559 loopDepth = 200;
1561 // The loop depth is used to roughly estimate the number of times the
1562 // instruction is executed. Something like 10^d is simple, but will quickly
1563 // overflow a float. This expression behaves like 10^d for small d, but is
1564 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of
1565 // headroom before overflow.
1566 float lc = powf(1 + (100.0f / (loopDepth+10)), (float)loopDepth);
1568 return (isDef + isUse) * lc;
1571 void
1572 LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
1573 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i)
1574 normalizeSpillWeight(*NewLIs[i]);
1577 std::vector<LiveInterval*> LiveIntervals::
1578 addIntervalsForSpillsFast(const LiveInterval &li,
1579 const MachineLoopInfo *loopInfo,
1580 VirtRegMap &vrm) {
1581 unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1583 std::vector<LiveInterval*> added;
1585 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1587 DEBUG({
1588 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1589 li.dump();
1590 dbgs() << '\n';
1593 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1595 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1596 while (RI != mri_->reg_end()) {
1597 MachineInstr* MI = &*RI;
1599 SmallVector<unsigned, 2> Indices;
1600 bool HasUse = false;
1601 bool HasDef = false;
1603 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1604 MachineOperand& mop = MI->getOperand(i);
1605 if (!mop.isReg() || mop.getReg() != li.reg) continue;
1607 HasUse |= MI->getOperand(i).isUse();
1608 HasDef |= MI->getOperand(i).isDef();
1610 Indices.push_back(i);
1613 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1614 Indices, true, slot, li.reg)) {
1615 unsigned NewVReg = mri_->createVirtualRegister(rc);
1616 vrm.grow();
1617 vrm.assignVirt2StackSlot(NewVReg, slot);
1619 // create a new register for this spill
1620 LiveInterval &nI = getOrCreateInterval(NewVReg);
1621 nI.markNotSpillable();
1623 // Rewrite register operands to use the new vreg.
1624 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1625 E = Indices.end(); I != E; ++I) {
1626 MI->getOperand(*I).setReg(NewVReg);
1628 if (MI->getOperand(*I).isUse())
1629 MI->getOperand(*I).setIsKill(true);
1632 // Fill in the new live interval.
1633 SlotIndex index = getInstructionIndex(MI);
1634 if (HasUse) {
1635 LiveRange LR(index.getLoadIndex(), index.getUseIndex(),
1636 nI.getNextValue(SlotIndex(), 0, false,
1637 getVNInfoAllocator()));
1638 DEBUG(dbgs() << " +" << LR);
1639 nI.addRange(LR);
1640 vrm.addRestorePoint(NewVReg, MI);
1642 if (HasDef) {
1643 LiveRange LR(index.getDefIndex(), index.getStoreIndex(),
1644 nI.getNextValue(SlotIndex(), 0, false,
1645 getVNInfoAllocator()));
1646 DEBUG(dbgs() << " +" << LR);
1647 nI.addRange(LR);
1648 vrm.addSpillPoint(NewVReg, true, MI);
1651 added.push_back(&nI);
1653 DEBUG({
1654 dbgs() << "\t\t\t\tadded new interval: ";
1655 nI.dump();
1656 dbgs() << '\n';
1661 RI = mri_->reg_begin(li.reg);
1664 return added;
1667 std::vector<LiveInterval*> LiveIntervals::
1668 addIntervalsForSpills(const LiveInterval &li,
1669 SmallVectorImpl<LiveInterval*> &SpillIs,
1670 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1672 if (EnableFastSpilling)
1673 return addIntervalsForSpillsFast(li, loopInfo, vrm);
1675 assert(li.isSpillable() && "attempt to spill already spilled interval!");
1677 DEBUG({
1678 dbgs() << "\t\t\t\tadding intervals for spills for interval: ";
1679 li.print(dbgs(), tri_);
1680 dbgs() << '\n';
1683 // Each bit specify whether a spill is required in the MBB.
1684 BitVector SpillMBBs(mf_->getNumBlockIDs());
1685 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1686 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1687 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1688 DenseMap<unsigned,unsigned> MBBVRegsMap;
1689 std::vector<LiveInterval*> NewLIs;
1690 const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1692 unsigned NumValNums = li.getNumValNums();
1693 SmallVector<MachineInstr*, 4> ReMatDefs;
1694 ReMatDefs.resize(NumValNums, NULL);
1695 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1696 ReMatOrigDefs.resize(NumValNums, NULL);
1697 SmallVector<int, 4> ReMatIds;
1698 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1699 BitVector ReMatDelete(NumValNums);
1700 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1702 // Spilling a split live interval. It cannot be split any further. Also,
1703 // it's also guaranteed to be a single val# / range interval.
1704 if (vrm.getPreSplitReg(li.reg)) {
1705 vrm.setIsSplitFromReg(li.reg, 0);
1706 // Unset the split kill marker on the last use.
1707 SlotIndex KillIdx = vrm.getKillPoint(li.reg);
1708 if (KillIdx != SlotIndex()) {
1709 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1710 assert(KillMI && "Last use disappeared?");
1711 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1712 assert(KillOp != -1 && "Last use disappeared?");
1713 KillMI->getOperand(KillOp).setIsKill(false);
1715 vrm.removeKillPoint(li.reg);
1716 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1717 Slot = vrm.getStackSlot(li.reg);
1718 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1719 MachineInstr *ReMatDefMI = DefIsReMat ?
1720 vrm.getReMaterializedMI(li.reg) : NULL;
1721 int LdSlot = 0;
1722 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1723 bool isLoad = isLoadSS ||
1724 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad()));
1725 bool IsFirstRange = true;
1726 for (LiveInterval::Ranges::const_iterator
1727 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1728 // If this is a split live interval with multiple ranges, it means there
1729 // are two-address instructions that re-defined the value. Only the
1730 // first def can be rematerialized!
1731 if (IsFirstRange) {
1732 // Note ReMatOrigDefMI has already been deleted.
1733 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1734 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1735 false, vrm, rc, ReMatIds, loopInfo,
1736 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1737 MBBVRegsMap, NewLIs);
1738 } else {
1739 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1740 Slot, 0, false, false, false,
1741 false, vrm, rc, ReMatIds, loopInfo,
1742 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1743 MBBVRegsMap, NewLIs);
1745 IsFirstRange = false;
1748 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1749 normalizeSpillWeights(NewLIs);
1750 return NewLIs;
1753 bool TrySplit = !intervalIsInOneMBB(li);
1754 if (TrySplit)
1755 ++numSplits;
1756 bool NeedStackSlot = false;
1757 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1758 i != e; ++i) {
1759 const VNInfo *VNI = *i;
1760 unsigned VN = VNI->id;
1761 if (VNI->isUnused())
1762 continue; // Dead val#.
1763 // Is the def for the val# rematerializable?
1764 MachineInstr *ReMatDefMI = VNI->isDefAccurate()
1765 ? getInstructionFromIndex(VNI->def) : 0;
1766 bool dummy;
1767 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1768 // Remember how to remat the def of this val#.
1769 ReMatOrigDefs[VN] = ReMatDefMI;
1770 // Original def may be modified so we have to make a copy here.
1771 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1772 CloneMIs.push_back(Clone);
1773 ReMatDefs[VN] = Clone;
1775 bool CanDelete = true;
1776 if (VNI->hasPHIKill()) {
1777 // A kill is a phi node, not all of its uses can be rematerialized.
1778 // It must not be deleted.
1779 CanDelete = false;
1780 // Need a stack slot if there is any live range where uses cannot be
1781 // rematerialized.
1782 NeedStackSlot = true;
1784 if (CanDelete)
1785 ReMatDelete.set(VN);
1786 } else {
1787 // Need a stack slot if there is any live range where uses cannot be
1788 // rematerialized.
1789 NeedStackSlot = true;
1793 // One stack slot per live interval.
1794 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) {
1795 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT)
1796 Slot = vrm.assignVirt2StackSlot(li.reg);
1798 // This case only occurs when the prealloc splitter has already assigned
1799 // a stack slot to this vreg.
1800 else
1801 Slot = vrm.getStackSlot(li.reg);
1804 // Create new intervals and rewrite defs and uses.
1805 for (LiveInterval::Ranges::const_iterator
1806 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1807 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1808 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1809 bool DefIsReMat = ReMatDefMI != NULL;
1810 bool CanDelete = ReMatDelete[I->valno->id];
1811 int LdSlot = 0;
1812 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1813 bool isLoad = isLoadSS ||
1814 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad());
1815 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1816 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1817 CanDelete, vrm, rc, ReMatIds, loopInfo,
1818 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1819 MBBVRegsMap, NewLIs);
1822 // Insert spills / restores if we are splitting.
1823 if (!TrySplit) {
1824 handleSpilledImpDefs(li, vrm, rc, NewLIs);
1825 normalizeSpillWeights(NewLIs);
1826 return NewLIs;
1829 SmallPtrSet<LiveInterval*, 4> AddedKill;
1830 SmallVector<unsigned, 2> Ops;
1831 if (NeedStackSlot) {
1832 int Id = SpillMBBs.find_first();
1833 while (Id != -1) {
1834 std::vector<SRInfo> &spills = SpillIdxes[Id];
1835 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1836 SlotIndex index = spills[i].index;
1837 unsigned VReg = spills[i].vreg;
1838 LiveInterval &nI = getOrCreateInterval(VReg);
1839 bool isReMat = vrm.isReMaterialized(VReg);
1840 MachineInstr *MI = getInstructionFromIndex(index);
1841 bool CanFold = false;
1842 bool FoundUse = false;
1843 Ops.clear();
1844 if (spills[i].canFold) {
1845 CanFold = true;
1846 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1847 MachineOperand &MO = MI->getOperand(j);
1848 if (!MO.isReg() || MO.getReg() != VReg)
1849 continue;
1851 Ops.push_back(j);
1852 if (MO.isDef())
1853 continue;
1854 if (isReMat ||
1855 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1856 RestoreMBBs, RestoreIdxes))) {
1857 // MI has two-address uses of the same register. If the use
1858 // isn't the first and only use in the BB, then we can't fold
1859 // it. FIXME: Move this to rewriteInstructionsForSpills.
1860 CanFold = false;
1861 break;
1863 FoundUse = true;
1866 // Fold the store into the def if possible.
1867 bool Folded = false;
1868 if (CanFold && !Ops.empty()) {
1869 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1870 Folded = true;
1871 if (FoundUse) {
1872 // Also folded uses, do not issue a load.
1873 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1874 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1876 nI.removeRange(index.getDefIndex(), index.getStoreIndex());
1880 // Otherwise tell the spiller to issue a spill.
1881 if (!Folded) {
1882 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1883 bool isKill = LR->end == index.getStoreIndex();
1884 if (!MI->registerDefIsDead(nI.reg))
1885 // No need to spill a dead def.
1886 vrm.addSpillPoint(VReg, isKill, MI);
1887 if (isKill)
1888 AddedKill.insert(&nI);
1891 Id = SpillMBBs.find_next(Id);
1895 int Id = RestoreMBBs.find_first();
1896 while (Id != -1) {
1897 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1898 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1899 SlotIndex index = restores[i].index;
1900 if (index == SlotIndex())
1901 continue;
1902 unsigned VReg = restores[i].vreg;
1903 LiveInterval &nI = getOrCreateInterval(VReg);
1904 bool isReMat = vrm.isReMaterialized(VReg);
1905 MachineInstr *MI = getInstructionFromIndex(index);
1906 bool CanFold = false;
1907 Ops.clear();
1908 if (restores[i].canFold) {
1909 CanFold = true;
1910 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1911 MachineOperand &MO = MI->getOperand(j);
1912 if (!MO.isReg() || MO.getReg() != VReg)
1913 continue;
1915 if (MO.isDef()) {
1916 // If this restore were to be folded, it would have been folded
1917 // already.
1918 CanFold = false;
1919 break;
1921 Ops.push_back(j);
1925 // Fold the load into the use if possible.
1926 bool Folded = false;
1927 if (CanFold && !Ops.empty()) {
1928 if (!isReMat)
1929 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1930 else {
1931 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1932 int LdSlot = 0;
1933 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1934 // If the rematerializable def is a load, also try to fold it.
1935 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad())
1936 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1937 Ops, isLoadSS, LdSlot, VReg);
1938 if (!Folded) {
1939 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
1940 if (ImpUse) {
1941 // Re-matting an instruction with virtual register use. Add the
1942 // register as an implicit use on the use MI and mark the register
1943 // interval as unspillable.
1944 LiveInterval &ImpLi = getInterval(ImpUse);
1945 ImpLi.markNotSpillable();
1946 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1951 // If folding is not possible / failed, then tell the spiller to issue a
1952 // load / rematerialization for us.
1953 if (Folded)
1954 nI.removeRange(index.getLoadIndex(), index.getDefIndex());
1955 else
1956 vrm.addRestorePoint(VReg, MI);
1958 Id = RestoreMBBs.find_next(Id);
1961 // Finalize intervals: add kills, finalize spill weights, and filter out
1962 // dead intervals.
1963 std::vector<LiveInterval*> RetNewLIs;
1964 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1965 LiveInterval *LI = NewLIs[i];
1966 if (!LI->empty()) {
1967 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI);
1968 if (!AddedKill.count(LI)) {
1969 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1970 SlotIndex LastUseIdx = LR->end.getBaseIndex();
1971 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1972 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
1973 assert(UseIdx != -1);
1974 if (!LastUse->isRegTiedToDefOperand(UseIdx)) {
1975 LastUse->getOperand(UseIdx).setIsKill();
1976 vrm.addKillPoint(LI->reg, LastUseIdx);
1979 RetNewLIs.push_back(LI);
1983 handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
1984 normalizeSpillWeights(RetNewLIs);
1985 return RetNewLIs;
1988 /// hasAllocatableSuperReg - Return true if the specified physical register has
1989 /// any super register that's allocatable.
1990 bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
1991 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
1992 if (allocatableRegs_[*AS] && hasInterval(*AS))
1993 return true;
1994 return false;
1997 /// getRepresentativeReg - Find the largest super register of the specified
1998 /// physical register.
1999 unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2000 // Find the largest super-register that is allocatable.
2001 unsigned BestReg = Reg;
2002 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2003 unsigned SuperReg = *AS;
2004 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2005 BestReg = SuperReg;
2006 break;
2009 return BestReg;
2012 /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2013 /// specified interval that conflicts with the specified physical register.
2014 unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2015 unsigned PhysReg) const {
2016 unsigned NumConflicts = 0;
2017 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2018 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2019 E = mri_->reg_end(); I != E; ++I) {
2020 MachineOperand &O = I.getOperand();
2021 MachineInstr *MI = O.getParent();
2022 if (MI->isDebugValue())
2023 continue;
2024 SlotIndex Index = getInstructionIndex(MI);
2025 if (pli.liveAt(Index))
2026 ++NumConflicts;
2028 return NumConflicts;
2031 /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2032 /// around all defs and uses of the specified interval. Return true if it
2033 /// was able to cut its interval.
2034 bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2035 unsigned PhysReg, VirtRegMap &vrm) {
2036 unsigned SpillReg = getRepresentativeReg(PhysReg);
2038 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2039 // If there are registers which alias PhysReg, but which are not a
2040 // sub-register of the chosen representative super register. Assert
2041 // since we can't handle it yet.
2042 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) ||
2043 tri_->isSuperRegister(*AS, SpillReg));
2045 bool Cut = false;
2046 SmallVector<unsigned, 4> PRegs;
2047 if (hasInterval(SpillReg))
2048 PRegs.push_back(SpillReg);
2049 else {
2050 SmallSet<unsigned, 4> Added;
2051 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS)
2052 if (Added.insert(*AS) && hasInterval(*AS)) {
2053 PRegs.push_back(*AS);
2054 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS)
2055 Added.insert(*ASS);
2059 SmallPtrSet<MachineInstr*, 8> SeenMIs;
2060 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2061 E = mri_->reg_end(); I != E; ++I) {
2062 MachineOperand &O = I.getOperand();
2063 MachineInstr *MI = O.getParent();
2064 if (MI->isDebugValue() || SeenMIs.count(MI))
2065 continue;
2066 SeenMIs.insert(MI);
2067 SlotIndex Index = getInstructionIndex(MI);
2068 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) {
2069 unsigned PReg = PRegs[i];
2070 LiveInterval &pli = getInterval(PReg);
2071 if (!pli.liveAt(Index))
2072 continue;
2073 vrm.addEmergencySpill(PReg, MI);
2074 SlotIndex StartIdx = Index.getLoadIndex();
2075 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex();
2076 if (pli.isInOneLiveRange(StartIdx, EndIdx)) {
2077 pli.removeRange(StartIdx, EndIdx);
2078 Cut = true;
2079 } else {
2080 std::string msg;
2081 raw_string_ostream Msg(msg);
2082 Msg << "Ran out of registers during register allocation!";
2083 if (MI->isInlineAsm()) {
2084 Msg << "\nPlease check your inline asm statement for invalid "
2085 << "constraints:\n";
2086 MI->print(Msg, tm_);
2088 report_fatal_error(Msg.str());
2090 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) {
2091 if (!hasInterval(*AS))
2092 continue;
2093 LiveInterval &spli = getInterval(*AS);
2094 if (spli.liveAt(Index))
2095 spli.removeRange(Index.getLoadIndex(),
2096 Index.getNextIndex().getBaseIndex());
2100 return Cut;
2103 LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2104 MachineInstr* startInst) {
2105 LiveInterval& Interval = getOrCreateInterval(reg);
2106 VNInfo* VN = Interval.getNextValue(
2107 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2108 startInst, true, getVNInfoAllocator());
2109 VN->setHasPHIKill(true);
2110 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent()));
2111 LiveRange LR(
2112 SlotIndex(getInstructionIndex(startInst).getDefIndex()),
2113 getMBBEndIdx(startInst->getParent()), VN);
2114 Interval.addRange(LR);
2116 return LR;