When removing a function from the function set and adding it to deferred, we
[llvm.git] / lib / CodeGen / MachineLICM.cpp
blob443fc2d97bdf591464d26f1918250b9e75a7d464
1 //===-- MachineLICM.cpp - Machine Loop Invariant Code Motion Pass ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass performs loop invariant code motion on machine instructions. We
11 // attempt to remove as much code from the body of a loop as possible.
13 // This pass does not attempt to throttle itself to limit register pressure.
14 // The register allocation phases are expected to perform rematerialization
15 // to recover when register pressure is high.
17 // This pass is not intended to be a replacement or a complete alternative
18 // for the LLVM-IR-level LICM pass. It is only designed to hoist simple
19 // constructs that are not exposed before lowering and instruction selection.
21 //===----------------------------------------------------------------------===//
23 #define DEBUG_TYPE "machine-licm"
24 #include "llvm/CodeGen/Passes.h"
25 #include "llvm/CodeGen/MachineDominators.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineLoopInfo.h"
28 #include "llvm/CodeGen/MachineMemOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/PseudoSourceValue.h"
31 #include "llvm/Target/TargetLowering.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetInstrItineraries.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Analysis/AliasAnalysis.h"
37 #include "llvm/ADT/DenseMap.h"
38 #include "llvm/ADT/SmallSet.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/raw_ostream.h"
43 using namespace llvm;
45 STATISTIC(NumHoisted,
46 "Number of machine instructions hoisted out of loops");
47 STATISTIC(NumLowRP,
48 "Number of instructions hoisted in low reg pressure situation");
49 STATISTIC(NumHighLatency,
50 "Number of high latency instructions hoisted");
51 STATISTIC(NumCSEed,
52 "Number of hoisted machine instructions CSEed");
53 STATISTIC(NumPostRAHoisted,
54 "Number of machine instructions hoisted out of loops post regalloc");
56 namespace {
57 class MachineLICM : public MachineFunctionPass {
58 bool PreRegAlloc;
60 const TargetMachine *TM;
61 const TargetInstrInfo *TII;
62 const TargetLowering *TLI;
63 const TargetRegisterInfo *TRI;
64 const MachineFrameInfo *MFI;
65 MachineRegisterInfo *MRI;
66 const InstrItineraryData *InstrItins;
68 // Various analyses that we use...
69 AliasAnalysis *AA; // Alias analysis info.
70 MachineLoopInfo *MLI; // Current MachineLoopInfo
71 MachineDominatorTree *DT; // Machine dominator tree for the cur loop
73 // State that is updated as we process loops
74 bool Changed; // True if a loop is changed.
75 bool FirstInLoop; // True if it's the first LICM in the loop.
76 MachineLoop *CurLoop; // The current loop we are working on.
77 MachineBasicBlock *CurPreheader; // The preheader for CurLoop.
79 BitVector AllocatableSet;
81 // Track 'estimated' register pressure.
82 SmallSet<unsigned, 32> RegSeen;
83 SmallVector<unsigned, 8> RegPressure;
85 // Register pressure "limit" per register class. If the pressure
86 // is higher than the limit, then it's considered high.
87 SmallVector<unsigned, 8> RegLimit;
89 // Register pressure on path leading from loop preheader to current BB.
90 SmallVector<SmallVector<unsigned, 8>, 16> BackTrace;
92 // For each opcode, keep a list of potential CSE instructions.
93 DenseMap<unsigned, std::vector<const MachineInstr*> > CSEMap;
95 public:
96 static char ID; // Pass identification, replacement for typeid
97 MachineLICM() :
98 MachineFunctionPass(ID), PreRegAlloc(true) {
99 initializeMachineLICMPass(*PassRegistry::getPassRegistry());
102 explicit MachineLICM(bool PreRA) :
103 MachineFunctionPass(ID), PreRegAlloc(PreRA) {
104 initializeMachineLICMPass(*PassRegistry::getPassRegistry());
107 virtual bool runOnMachineFunction(MachineFunction &MF);
109 const char *getPassName() const { return "Machine Instruction LICM"; }
111 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
112 AU.addRequired<MachineLoopInfo>();
113 AU.addRequired<MachineDominatorTree>();
114 AU.addRequired<AliasAnalysis>();
115 AU.addPreserved<MachineLoopInfo>();
116 AU.addPreserved<MachineDominatorTree>();
117 MachineFunctionPass::getAnalysisUsage(AU);
120 virtual void releaseMemory() {
121 RegSeen.clear();
122 RegPressure.clear();
123 RegLimit.clear();
124 BackTrace.clear();
125 for (DenseMap<unsigned,std::vector<const MachineInstr*> >::iterator
126 CI = CSEMap.begin(), CE = CSEMap.end(); CI != CE; ++CI)
127 CI->second.clear();
128 CSEMap.clear();
131 private:
132 /// CandidateInfo - Keep track of information about hoisting candidates.
133 struct CandidateInfo {
134 MachineInstr *MI;
135 unsigned Def;
136 int FI;
137 CandidateInfo(MachineInstr *mi, unsigned def, int fi)
138 : MI(mi), Def(def), FI(fi) {}
141 /// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
142 /// invariants out to the preheader.
143 void HoistRegionPostRA();
145 /// HoistPostRA - When an instruction is found to only use loop invariant
146 /// operands that is safe to hoist, this instruction is called to do the
147 /// dirty work.
148 void HoistPostRA(MachineInstr *MI, unsigned Def);
150 /// ProcessMI - Examine the instruction for potentai LICM candidate. Also
151 /// gather register def and frame object update information.
152 void ProcessMI(MachineInstr *MI, unsigned *PhysRegDefs,
153 SmallSet<int, 32> &StoredFIs,
154 SmallVector<CandidateInfo, 32> &Candidates);
156 /// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the
157 /// current loop.
158 void AddToLiveIns(unsigned Reg);
160 /// IsLICMCandidate - Returns true if the instruction may be a suitable
161 /// candidate for LICM. e.g. If the instruction is a call, then it's
162 /// obviously not safe to hoist it.
163 bool IsLICMCandidate(MachineInstr &I);
165 /// IsLoopInvariantInst - Returns true if the instruction is loop
166 /// invariant. I.e., all virtual register operands are defined outside of
167 /// the loop, physical registers aren't accessed (explicitly or implicitly),
168 /// and the instruction is hoistable.
169 ///
170 bool IsLoopInvariantInst(MachineInstr &I);
172 /// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
173 /// and an use in the current loop, return true if the target considered
174 /// it 'high'.
175 bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx,
176 unsigned Reg) const;
178 bool IsCheapInstruction(MachineInstr &MI) const;
180 /// CanCauseHighRegPressure - Visit BBs from header to current BB,
181 /// check if hoisting an instruction of the given cost matrix can cause high
182 /// register pressure.
183 bool CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost);
185 /// UpdateBackTraceRegPressure - Traverse the back trace from header to
186 /// the current block and update their register pressures to reflect the
187 /// effect of hoisting MI from the current block to the preheader.
188 void UpdateBackTraceRegPressure(const MachineInstr *MI);
190 /// IsProfitableToHoist - Return true if it is potentially profitable to
191 /// hoist the given loop invariant.
192 bool IsProfitableToHoist(MachineInstr &MI);
194 /// HoistRegion - Walk the specified region of the CFG (defined by all
195 /// blocks dominated by the specified block, and that are in the current
196 /// loop) in depth first order w.r.t the DominatorTree. This allows us to
197 /// visit definitions before uses, allowing us to hoist a loop body in one
198 /// pass without iteration.
200 void HoistRegion(MachineDomTreeNode *N, bool IsHeader = false);
202 /// InitRegPressure - Find all virtual register references that are liveout
203 /// of the preheader to initialize the starting "register pressure". Note
204 /// this does not count live through (livein but not used) registers.
205 void InitRegPressure(MachineBasicBlock *BB);
207 /// UpdateRegPressure - Update estimate of register pressure after the
208 /// specified instruction.
209 void UpdateRegPressure(const MachineInstr *MI);
211 /// ExtractHoistableLoad - Unfold a load from the given machineinstr if
212 /// the load itself could be hoisted. Return the unfolded and hoistable
213 /// load, or null if the load couldn't be unfolded or if it wouldn't
214 /// be hoistable.
215 MachineInstr *ExtractHoistableLoad(MachineInstr *MI);
217 /// LookForDuplicate - Find an instruction amount PrevMIs that is a
218 /// duplicate of MI. Return this instruction if it's found.
219 const MachineInstr *LookForDuplicate(const MachineInstr *MI,
220 std::vector<const MachineInstr*> &PrevMIs);
222 /// EliminateCSE - Given a LICM'ed instruction, look for an instruction on
223 /// the preheader that compute the same value. If it's found, do a RAU on
224 /// with the definition of the existing instruction rather than hoisting
225 /// the instruction to the preheader.
226 bool EliminateCSE(MachineInstr *MI,
227 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI);
229 /// Hoist - When an instruction is found to only use loop invariant operands
230 /// that is safe to hoist, this instruction is called to do the dirty work.
231 /// It returns true if the instruction is hoisted.
232 bool Hoist(MachineInstr *MI, MachineBasicBlock *Preheader);
234 /// InitCSEMap - Initialize the CSE map with instructions that are in the
235 /// current loop preheader that may become duplicates of instructions that
236 /// are hoisted out of the loop.
237 void InitCSEMap(MachineBasicBlock *BB);
239 /// getCurPreheader - Get the preheader for the current loop, splitting
240 /// a critical edge if needed.
241 MachineBasicBlock *getCurPreheader();
243 } // end anonymous namespace
245 char MachineLICM::ID = 0;
246 INITIALIZE_PASS_BEGIN(MachineLICM, "machinelicm",
247 "Machine Loop Invariant Code Motion", false, false)
248 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
249 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
250 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
251 INITIALIZE_PASS_END(MachineLICM, "machinelicm",
252 "Machine Loop Invariant Code Motion", false, false)
254 FunctionPass *llvm::createMachineLICMPass(bool PreRegAlloc) {
255 return new MachineLICM(PreRegAlloc);
258 /// LoopIsOuterMostWithPredecessor - Test if the given loop is the outer-most
259 /// loop that has a unique predecessor.
260 static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
261 // Check whether this loop even has a unique predecessor.
262 if (!CurLoop->getLoopPredecessor())
263 return false;
264 // Ok, now check to see if any of its outer loops do.
265 for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop())
266 if (L->getLoopPredecessor())
267 return false;
268 // None of them did, so this is the outermost with a unique predecessor.
269 return true;
272 bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
273 if (PreRegAlloc)
274 DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
275 else
276 DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
277 DEBUG(dbgs() << MF.getFunction()->getName() << " ********\n");
279 Changed = FirstInLoop = false;
280 TM = &MF.getTarget();
281 TII = TM->getInstrInfo();
282 TLI = TM->getTargetLowering();
283 TRI = TM->getRegisterInfo();
284 MFI = MF.getFrameInfo();
285 MRI = &MF.getRegInfo();
286 InstrItins = TM->getInstrItineraryData();
287 AllocatableSet = TRI->getAllocatableSet(MF);
289 if (PreRegAlloc) {
290 // Estimate register pressure during pre-regalloc pass.
291 unsigned NumRC = TRI->getNumRegClasses();
292 RegPressure.resize(NumRC);
293 std::fill(RegPressure.begin(), RegPressure.end(), 0);
294 RegLimit.resize(NumRC);
295 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
296 E = TRI->regclass_end(); I != E; ++I)
297 RegLimit[(*I)->getID()] = TLI->getRegPressureLimit(*I, MF);
300 // Get our Loop information...
301 MLI = &getAnalysis<MachineLoopInfo>();
302 DT = &getAnalysis<MachineDominatorTree>();
303 AA = &getAnalysis<AliasAnalysis>();
305 SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end());
306 while (!Worklist.empty()) {
307 CurLoop = Worklist.pop_back_val();
308 CurPreheader = 0;
310 // If this is done before regalloc, only visit outer-most preheader-sporting
311 // loops.
312 if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop)) {
313 Worklist.append(CurLoop->begin(), CurLoop->end());
314 continue;
317 if (!PreRegAlloc)
318 HoistRegionPostRA();
319 else {
320 // CSEMap is initialized for loop header when the first instruction is
321 // being hoisted.
322 MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
323 FirstInLoop = true;
324 HoistRegion(N, true);
325 CSEMap.clear();
329 return Changed;
332 /// InstructionStoresToFI - Return true if instruction stores to the
333 /// specified frame.
334 static bool InstructionStoresToFI(const MachineInstr *MI, int FI) {
335 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
336 oe = MI->memoperands_end(); o != oe; ++o) {
337 if (!(*o)->isStore() || !(*o)->getValue())
338 continue;
339 if (const FixedStackPseudoSourceValue *Value =
340 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
341 if (Value->getFrameIndex() == FI)
342 return true;
345 return false;
348 /// ProcessMI - Examine the instruction for potentai LICM candidate. Also
349 /// gather register def and frame object update information.
350 void MachineLICM::ProcessMI(MachineInstr *MI,
351 unsigned *PhysRegDefs,
352 SmallSet<int, 32> &StoredFIs,
353 SmallVector<CandidateInfo, 32> &Candidates) {
354 bool RuledOut = false;
355 bool HasNonInvariantUse = false;
356 unsigned Def = 0;
357 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
358 const MachineOperand &MO = MI->getOperand(i);
359 if (MO.isFI()) {
360 // Remember if the instruction stores to the frame index.
361 int FI = MO.getIndex();
362 if (!StoredFIs.count(FI) &&
363 MFI->isSpillSlotObjectIndex(FI) &&
364 InstructionStoresToFI(MI, FI))
365 StoredFIs.insert(FI);
366 HasNonInvariantUse = true;
367 continue;
370 if (!MO.isReg())
371 continue;
372 unsigned Reg = MO.getReg();
373 if (!Reg)
374 continue;
375 assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
376 "Not expecting virtual register!");
378 if (!MO.isDef()) {
379 if (Reg && PhysRegDefs[Reg])
380 // If it's using a non-loop-invariant register, then it's obviously not
381 // safe to hoist.
382 HasNonInvariantUse = true;
383 continue;
386 if (MO.isImplicit()) {
387 ++PhysRegDefs[Reg];
388 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
389 ++PhysRegDefs[*AS];
390 if (!MO.isDead())
391 // Non-dead implicit def? This cannot be hoisted.
392 RuledOut = true;
393 // No need to check if a dead implicit def is also defined by
394 // another instruction.
395 continue;
398 // FIXME: For now, avoid instructions with multiple defs, unless
399 // it's a dead implicit def.
400 if (Def)
401 RuledOut = true;
402 else
403 Def = Reg;
405 // If we have already seen another instruction that defines the same
406 // register, then this is not safe.
407 if (++PhysRegDefs[Reg] > 1)
408 // MI defined register is seen defined by another instruction in
409 // the loop, it cannot be a LICM candidate.
410 RuledOut = true;
411 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
412 if (++PhysRegDefs[*AS] > 1)
413 RuledOut = true;
416 // Only consider reloads for now and remats which do not have register
417 // operands. FIXME: Consider unfold load folding instructions.
418 if (Def && !RuledOut) {
419 int FI = INT_MIN;
420 if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) ||
421 (TII->isLoadFromStackSlot(MI, FI) && MFI->isSpillSlotObjectIndex(FI)))
422 Candidates.push_back(CandidateInfo(MI, Def, FI));
426 /// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
427 /// invariants out to the preheader.
428 void MachineLICM::HoistRegionPostRA() {
429 unsigned NumRegs = TRI->getNumRegs();
430 unsigned *PhysRegDefs = new unsigned[NumRegs];
431 std::fill(PhysRegDefs, PhysRegDefs + NumRegs, 0);
433 SmallVector<CandidateInfo, 32> Candidates;
434 SmallSet<int, 32> StoredFIs;
436 // Walk the entire region, count number of defs for each register, and
437 // collect potential LICM candidates.
438 const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks();
439 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
440 MachineBasicBlock *BB = Blocks[i];
441 // Conservatively treat live-in's as an external def.
442 // FIXME: That means a reload that're reused in successor block(s) will not
443 // be LICM'ed.
444 for (MachineBasicBlock::livein_iterator I = BB->livein_begin(),
445 E = BB->livein_end(); I != E; ++I) {
446 unsigned Reg = *I;
447 ++PhysRegDefs[Reg];
448 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS)
449 ++PhysRegDefs[*AS];
452 for (MachineBasicBlock::iterator
453 MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
454 MachineInstr *MI = &*MII;
455 ProcessMI(MI, PhysRegDefs, StoredFIs, Candidates);
459 // Now evaluate whether the potential candidates qualify.
460 // 1. Check if the candidate defined register is defined by another
461 // instruction in the loop.
462 // 2. If the candidate is a load from stack slot (always true for now),
463 // check if the slot is stored anywhere in the loop.
464 for (unsigned i = 0, e = Candidates.size(); i != e; ++i) {
465 if (Candidates[i].FI != INT_MIN &&
466 StoredFIs.count(Candidates[i].FI))
467 continue;
469 if (PhysRegDefs[Candidates[i].Def] == 1) {
470 bool Safe = true;
471 MachineInstr *MI = Candidates[i].MI;
472 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
473 const MachineOperand &MO = MI->getOperand(j);
474 if (!MO.isReg() || MO.isDef() || !MO.getReg())
475 continue;
476 if (PhysRegDefs[MO.getReg()]) {
477 // If it's using a non-loop-invariant register, then it's obviously
478 // not safe to hoist.
479 Safe = false;
480 break;
483 if (Safe)
484 HoistPostRA(MI, Candidates[i].Def);
488 delete[] PhysRegDefs;
491 /// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the current
492 /// loop, and make sure it is not killed by any instructions in the loop.
493 void MachineLICM::AddToLiveIns(unsigned Reg) {
494 const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks();
495 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
496 MachineBasicBlock *BB = Blocks[i];
497 if (!BB->isLiveIn(Reg))
498 BB->addLiveIn(Reg);
499 for (MachineBasicBlock::iterator
500 MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
501 MachineInstr *MI = &*MII;
502 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
503 MachineOperand &MO = MI->getOperand(i);
504 if (!MO.isReg() || !MO.getReg() || MO.isDef()) continue;
505 if (MO.getReg() == Reg || TRI->isSuperRegister(Reg, MO.getReg()))
506 MO.setIsKill(false);
512 /// HoistPostRA - When an instruction is found to only use loop invariant
513 /// operands that is safe to hoist, this instruction is called to do the
514 /// dirty work.
515 void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
516 MachineBasicBlock *Preheader = getCurPreheader();
517 if (!Preheader) return;
519 // Now move the instructions to the predecessor, inserting it before any
520 // terminator instructions.
521 DEBUG({
522 dbgs() << "Hoisting " << *MI;
523 if (Preheader->getBasicBlock())
524 dbgs() << " to MachineBasicBlock "
525 << Preheader->getName();
526 if (MI->getParent()->getBasicBlock())
527 dbgs() << " from MachineBasicBlock "
528 << MI->getParent()->getName();
529 dbgs() << "\n";
532 // Splice the instruction to the preheader.
533 MachineBasicBlock *MBB = MI->getParent();
534 Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
536 // Add register to livein list to all the BBs in the current loop since a
537 // loop invariant must be kept live throughout the whole loop. This is
538 // important to ensure later passes do not scavenge the def register.
539 AddToLiveIns(Def);
541 ++NumPostRAHoisted;
542 Changed = true;
545 /// HoistRegion - Walk the specified region of the CFG (defined by all blocks
546 /// dominated by the specified block, and that are in the current loop) in depth
547 /// first order w.r.t the DominatorTree. This allows us to visit definitions
548 /// before uses, allowing us to hoist a loop body in one pass without iteration.
550 void MachineLICM::HoistRegion(MachineDomTreeNode *N, bool IsHeader) {
551 assert(N != 0 && "Null dominator tree node?");
552 MachineBasicBlock *BB = N->getBlock();
554 // If this subregion is not in the top level loop at all, exit.
555 if (!CurLoop->contains(BB)) return;
557 MachineBasicBlock *Preheader = getCurPreheader();
558 if (!Preheader)
559 return;
561 if (IsHeader) {
562 // Compute registers which are livein into the loop headers.
563 RegSeen.clear();
564 BackTrace.clear();
565 InitRegPressure(Preheader);
568 // Remember livein register pressure.
569 BackTrace.push_back(RegPressure);
571 for (MachineBasicBlock::iterator
572 MII = BB->begin(), E = BB->end(); MII != E; ) {
573 MachineBasicBlock::iterator NextMII = MII; ++NextMII;
574 MachineInstr *MI = &*MII;
575 if (!Hoist(MI, Preheader))
576 UpdateRegPressure(MI);
577 MII = NextMII;
580 // Don't hoist things out of a large switch statement. This often causes
581 // code to be hoisted that wasn't going to be executed, and increases
582 // register pressure in a situation where it's likely to matter.
583 if (BB->succ_size() < 25) {
584 const std::vector<MachineDomTreeNode*> &Children = N->getChildren();
585 for (unsigned I = 0, E = Children.size(); I != E; ++I)
586 HoistRegion(Children[I]);
589 BackTrace.pop_back();
592 static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) {
593 return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg());
596 /// InitRegPressure - Find all virtual register references that are liveout of
597 /// the preheader to initialize the starting "register pressure". Note this
598 /// does not count live through (livein but not used) registers.
599 void MachineLICM::InitRegPressure(MachineBasicBlock *BB) {
600 std::fill(RegPressure.begin(), RegPressure.end(), 0);
602 // If the preheader has only a single predecessor and it ends with a
603 // fallthrough or an unconditional branch, then scan its predecessor for live
604 // defs as well. This happens whenever the preheader is created by splitting
605 // the critical edge from the loop predecessor to the loop header.
606 if (BB->pred_size() == 1) {
607 MachineBasicBlock *TBB = 0, *FBB = 0;
608 SmallVector<MachineOperand, 4> Cond;
609 if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty())
610 InitRegPressure(*BB->pred_begin());
613 for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end();
614 MII != E; ++MII) {
615 MachineInstr *MI = &*MII;
616 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
617 const MachineOperand &MO = MI->getOperand(i);
618 if (!MO.isReg() || MO.isImplicit())
619 continue;
620 unsigned Reg = MO.getReg();
621 if (!TargetRegisterInfo::isVirtualRegister(Reg))
622 continue;
624 bool isNew = RegSeen.insert(Reg);
625 const TargetRegisterClass *RC = MRI->getRegClass(Reg);
626 EVT VT = *RC->vt_begin();
627 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
628 if (MO.isDef())
629 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
630 else {
631 bool isKill = isOperandKill(MO, MRI);
632 if (isNew && !isKill)
633 // Haven't seen this, it must be a livein.
634 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
635 else if (!isNew && isKill)
636 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
642 /// UpdateRegPressure - Update estimate of register pressure after the
643 /// specified instruction.
644 void MachineLICM::UpdateRegPressure(const MachineInstr *MI) {
645 if (MI->isImplicitDef())
646 return;
648 SmallVector<unsigned, 4> Defs;
649 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
650 const MachineOperand &MO = MI->getOperand(i);
651 if (!MO.isReg() || MO.isImplicit())
652 continue;
653 unsigned Reg = MO.getReg();
654 if (!TargetRegisterInfo::isVirtualRegister(Reg))
655 continue;
657 bool isNew = RegSeen.insert(Reg);
658 if (MO.isDef())
659 Defs.push_back(Reg);
660 else if (!isNew && isOperandKill(MO, MRI)) {
661 const TargetRegisterClass *RC = MRI->getRegClass(Reg);
662 EVT VT = *RC->vt_begin();
663 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
664 unsigned RCCost = TLI->getRepRegClassCostFor(VT);
666 if (RCCost > RegPressure[RCId])
667 RegPressure[RCId] = 0;
668 else
669 RegPressure[RCId] -= RCCost;
673 while (!Defs.empty()) {
674 unsigned Reg = Defs.pop_back_val();
675 const TargetRegisterClass *RC = MRI->getRegClass(Reg);
676 EVT VT = *RC->vt_begin();
677 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
678 unsigned RCCost = TLI->getRepRegClassCostFor(VT);
679 RegPressure[RCId] += RCCost;
683 /// IsLICMCandidate - Returns true if the instruction may be a suitable
684 /// candidate for LICM. e.g. If the instruction is a call, then it's obviously
685 /// not safe to hoist it.
686 bool MachineLICM::IsLICMCandidate(MachineInstr &I) {
687 // Check if it's safe to move the instruction.
688 bool DontMoveAcrossStore = true;
689 if (!I.isSafeToMove(TII, AA, DontMoveAcrossStore))
690 return false;
692 return true;
695 /// IsLoopInvariantInst - Returns true if the instruction is loop
696 /// invariant. I.e., all virtual register operands are defined outside of the
697 /// loop, physical registers aren't accessed explicitly, and there are no side
698 /// effects that aren't captured by the operands or other flags.
699 ///
700 bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
701 if (!IsLICMCandidate(I))
702 return false;
704 // The instruction is loop invariant if all of its operands are.
705 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
706 const MachineOperand &MO = I.getOperand(i);
708 if (!MO.isReg())
709 continue;
711 unsigned Reg = MO.getReg();
712 if (Reg == 0) continue;
714 // Don't hoist an instruction that uses or defines a physical register.
715 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
716 if (MO.isUse()) {
717 // If the physreg has no defs anywhere, it's just an ambient register
718 // and we can freely move its uses. Alternatively, if it's allocatable,
719 // it could get allocated to something with a def during allocation.
720 if (!MRI->def_empty(Reg))
721 return false;
722 if (AllocatableSet.test(Reg))
723 return false;
724 // Check for a def among the register's aliases too.
725 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
726 unsigned AliasReg = *Alias;
727 if (!MRI->def_empty(AliasReg))
728 return false;
729 if (AllocatableSet.test(AliasReg))
730 return false;
732 // Otherwise it's safe to move.
733 continue;
734 } else if (!MO.isDead()) {
735 // A def that isn't dead. We can't move it.
736 return false;
737 } else if (CurLoop->getHeader()->isLiveIn(Reg)) {
738 // If the reg is live into the loop, we can't hoist an instruction
739 // which would clobber it.
740 return false;
744 if (!MO.isUse())
745 continue;
747 assert(MRI->getVRegDef(Reg) &&
748 "Machine instr not mapped for this vreg?!");
750 // If the loop contains the definition of an operand, then the instruction
751 // isn't loop invariant.
752 if (CurLoop->contains(MRI->getVRegDef(Reg)))
753 return false;
756 // If we got this far, the instruction is loop invariant!
757 return true;
761 /// HasPHIUses - Return true if the specified register has any PHI use.
762 static bool HasPHIUses(unsigned Reg, MachineRegisterInfo *MRI) {
763 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
764 UE = MRI->use_end(); UI != UE; ++UI) {
765 MachineInstr *UseMI = &*UI;
766 if (UseMI->isPHI())
767 return true;
769 return false;
773 /// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
774 /// and an use in the current loop, return true if the target considered
775 /// it 'high'.
776 bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
777 unsigned DefIdx, unsigned Reg) const {
778 if (!InstrItins || InstrItins->isEmpty() || MRI->use_nodbg_empty(Reg))
779 return false;
781 for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
782 E = MRI->use_nodbg_end(); I != E; ++I) {
783 MachineInstr *UseMI = &*I;
784 if (UseMI->isCopyLike())
785 continue;
786 if (!CurLoop->contains(UseMI->getParent()))
787 continue;
788 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
789 const MachineOperand &MO = UseMI->getOperand(i);
790 if (!MO.isReg() || !MO.isUse())
791 continue;
792 unsigned MOReg = MO.getReg();
793 if (MOReg != Reg)
794 continue;
796 if (TII->hasHighOperandLatency(InstrItins, MRI, &MI, DefIdx, UseMI, i))
797 return true;
800 // Only look at the first in loop use.
801 break;
804 return false;
807 /// IsCheapInstruction - Return true if the instruction is marked "cheap" or
808 /// the operand latency between its def and a use is one or less.
809 bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
810 if (MI.getDesc().isAsCheapAsAMove() || MI.isCopyLike())
811 return true;
812 if (!InstrItins || InstrItins->isEmpty())
813 return false;
815 bool isCheap = false;
816 unsigned NumDefs = MI.getDesc().getNumDefs();
817 for (unsigned i = 0, e = MI.getNumOperands(); NumDefs && i != e; ++i) {
818 MachineOperand &DefMO = MI.getOperand(i);
819 if (!DefMO.isReg() || !DefMO.isDef())
820 continue;
821 --NumDefs;
822 unsigned Reg = DefMO.getReg();
823 if (TargetRegisterInfo::isPhysicalRegister(Reg))
824 continue;
826 if (!TII->hasLowDefLatency(InstrItins, &MI, i))
827 return false;
828 isCheap = true;
831 return isCheap;
834 /// CanCauseHighRegPressure - Visit BBs from header to current BB, check
835 /// if hoisting an instruction of the given cost matrix can cause high
836 /// register pressure.
837 bool MachineLICM::CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost) {
838 for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
839 CI != CE; ++CI) {
840 if (CI->second <= 0)
841 continue;
843 unsigned RCId = CI->first;
844 for (unsigned i = BackTrace.size(); i != 0; --i) {
845 SmallVector<unsigned, 8> &RP = BackTrace[i-1];
846 if (RP[RCId] + CI->second >= RegLimit[RCId])
847 return true;
851 return false;
854 /// UpdateBackTraceRegPressure - Traverse the back trace from header to the
855 /// current block and update their register pressures to reflect the effect
856 /// of hoisting MI from the current block to the preheader.
857 void MachineLICM::UpdateBackTraceRegPressure(const MachineInstr *MI) {
858 if (MI->isImplicitDef())
859 return;
861 // First compute the 'cost' of the instruction, i.e. its contribution
862 // to register pressure.
863 DenseMap<unsigned, int> Cost;
864 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
865 const MachineOperand &MO = MI->getOperand(i);
866 if (!MO.isReg() || MO.isImplicit())
867 continue;
868 unsigned Reg = MO.getReg();
869 if (!TargetRegisterInfo::isVirtualRegister(Reg))
870 continue;
872 const TargetRegisterClass *RC = MRI->getRegClass(Reg);
873 EVT VT = *RC->vt_begin();
874 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
875 unsigned RCCost = TLI->getRepRegClassCostFor(VT);
876 if (MO.isDef()) {
877 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
878 if (CI != Cost.end())
879 CI->second += RCCost;
880 else
881 Cost.insert(std::make_pair(RCId, RCCost));
882 } else if (isOperandKill(MO, MRI)) {
883 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
884 if (CI != Cost.end())
885 CI->second -= RCCost;
886 else
887 Cost.insert(std::make_pair(RCId, -RCCost));
891 // Update register pressure of blocks from loop header to current block.
892 for (unsigned i = 0, e = BackTrace.size(); i != e; ++i) {
893 SmallVector<unsigned, 8> &RP = BackTrace[i];
894 for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
895 CI != CE; ++CI) {
896 unsigned RCId = CI->first;
897 RP[RCId] += CI->second;
902 /// IsProfitableToHoist - Return true if it is potentially profitable to hoist
903 /// the given loop invariant.
904 bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
905 if (MI.isImplicitDef())
906 return true;
908 // If the instruction is cheap, only hoist if it is re-materilizable. LICM
909 // will increase register pressure. It's probably not worth it if the
910 // instruction is cheap.
911 // Also hoist loads from constant memory, e.g. load from stubs, GOT. Hoisting
912 // these tend to help performance in low register pressure situation. The
913 // trade off is it may cause spill in high pressure situation. It will end up
914 // adding a store in the loop preheader. But the reload is no more expensive.
915 // The side benefit is these loads are frequently CSE'ed.
916 if (IsCheapInstruction(MI)) {
917 if (!TII->isTriviallyReMaterializable(&MI, AA))
918 return false;
919 } else {
920 // Estimate register pressure to determine whether to LICM the instruction.
921 // In low register pressure situation, we can be more aggressive about
922 // hoisting. Also, favors hoisting long latency instructions even in
923 // moderately high pressure situation.
924 // FIXME: If there are long latency loop-invariant instructions inside the
925 // loop at this point, why didn't the optimizer's LICM hoist them?
926 DenseMap<unsigned, int> Cost;
927 for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
928 const MachineOperand &MO = MI.getOperand(i);
929 if (!MO.isReg() || MO.isImplicit())
930 continue;
931 unsigned Reg = MO.getReg();
932 if (!TargetRegisterInfo::isVirtualRegister(Reg))
933 continue;
934 if (MO.isDef()) {
935 if (HasHighOperandLatency(MI, i, Reg)) {
936 ++NumHighLatency;
937 return true;
940 const TargetRegisterClass *RC = MRI->getRegClass(Reg);
941 EVT VT = *RC->vt_begin();
942 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
943 unsigned RCCost = TLI->getRepRegClassCostFor(VT);
944 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
945 if (CI != Cost.end())
946 CI->second += RCCost;
947 else
948 Cost.insert(std::make_pair(RCId, RCCost));
949 } else if (isOperandKill(MO, MRI)) {
950 // Is a virtual register use is a kill, hoisting it out of the loop
951 // may actually reduce register pressure or be register pressure
952 // neutral.
953 const TargetRegisterClass *RC = MRI->getRegClass(Reg);
954 EVT VT = *RC->vt_begin();
955 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
956 unsigned RCCost = TLI->getRepRegClassCostFor(VT);
957 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
958 if (CI != Cost.end())
959 CI->second -= RCCost;
960 else
961 Cost.insert(std::make_pair(RCId, -RCCost));
965 // Visit BBs from header to current BB, if hoisting this doesn't cause
966 // high register pressure, then it's safe to proceed.
967 if (!CanCauseHighRegPressure(Cost)) {
968 ++NumLowRP;
969 return true;
972 // High register pressure situation, only hoist if the instruction is going to
973 // be remat'ed.
974 if (!TII->isTriviallyReMaterializable(&MI, AA) &&
975 !MI.isInvariantLoad(AA))
976 return false;
979 // If result(s) of this instruction is used by PHIs, then don't hoist it.
980 // The presence of joins makes it difficult for current register allocator
981 // implementation to perform remat.
982 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
983 const MachineOperand &MO = MI.getOperand(i);
984 if (!MO.isReg() || !MO.isDef())
985 continue;
986 if (HasPHIUses(MO.getReg(), MRI))
987 return false;
990 return true;
993 MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
994 // Don't unfold simple loads.
995 if (MI->getDesc().canFoldAsLoad())
996 return 0;
998 // If not, we may be able to unfold a load and hoist that.
999 // First test whether the instruction is loading from an amenable
1000 // memory location.
1001 if (!MI->isInvariantLoad(AA))
1002 return 0;
1004 // Next determine the register class for a temporary register.
1005 unsigned LoadRegIndex;
1006 unsigned NewOpc =
1007 TII->getOpcodeAfterMemoryUnfold(MI->getOpcode(),
1008 /*UnfoldLoad=*/true,
1009 /*UnfoldStore=*/false,
1010 &LoadRegIndex);
1011 if (NewOpc == 0) return 0;
1012 const TargetInstrDesc &TID = TII->get(NewOpc);
1013 if (TID.getNumDefs() != 1) return 0;
1014 const TargetRegisterClass *RC = TID.OpInfo[LoadRegIndex].getRegClass(TRI);
1015 // Ok, we're unfolding. Create a temporary register and do the unfold.
1016 unsigned Reg = MRI->createVirtualRegister(RC);
1018 MachineFunction &MF = *MI->getParent()->getParent();
1019 SmallVector<MachineInstr *, 2> NewMIs;
1020 bool Success =
1021 TII->unfoldMemoryOperand(MF, MI, Reg,
1022 /*UnfoldLoad=*/true, /*UnfoldStore=*/false,
1023 NewMIs);
1024 (void)Success;
1025 assert(Success &&
1026 "unfoldMemoryOperand failed when getOpcodeAfterMemoryUnfold "
1027 "succeeded!");
1028 assert(NewMIs.size() == 2 &&
1029 "Unfolded a load into multiple instructions!");
1030 MachineBasicBlock *MBB = MI->getParent();
1031 MBB->insert(MI, NewMIs[0]);
1032 MBB->insert(MI, NewMIs[1]);
1033 // If unfolding produced a load that wasn't loop-invariant or profitable to
1034 // hoist, discard the new instructions and bail.
1035 if (!IsLoopInvariantInst(*NewMIs[0]) || !IsProfitableToHoist(*NewMIs[0])) {
1036 NewMIs[0]->eraseFromParent();
1037 NewMIs[1]->eraseFromParent();
1038 return 0;
1041 // Update register pressure for the unfolded instruction.
1042 UpdateRegPressure(NewMIs[1]);
1044 // Otherwise we successfully unfolded a load that we can hoist.
1045 MI->eraseFromParent();
1046 return NewMIs[0];
1049 void MachineLICM::InitCSEMap(MachineBasicBlock *BB) {
1050 for (MachineBasicBlock::iterator I = BB->begin(),E = BB->end(); I != E; ++I) {
1051 const MachineInstr *MI = &*I;
1052 unsigned Opcode = MI->getOpcode();
1053 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1054 CI = CSEMap.find(Opcode);
1055 if (CI != CSEMap.end())
1056 CI->second.push_back(MI);
1057 else {
1058 std::vector<const MachineInstr*> CSEMIs;
1059 CSEMIs.push_back(MI);
1060 CSEMap.insert(std::make_pair(Opcode, CSEMIs));
1065 const MachineInstr*
1066 MachineLICM::LookForDuplicate(const MachineInstr *MI,
1067 std::vector<const MachineInstr*> &PrevMIs) {
1068 for (unsigned i = 0, e = PrevMIs.size(); i != e; ++i) {
1069 const MachineInstr *PrevMI = PrevMIs[i];
1070 if (TII->produceSameValue(MI, PrevMI, (PreRegAlloc ? MRI : 0)))
1071 return PrevMI;
1073 return 0;
1076 bool MachineLICM::EliminateCSE(MachineInstr *MI,
1077 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI) {
1078 // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
1079 // the undef property onto uses.
1080 if (CI == CSEMap.end() || MI->isImplicitDef())
1081 return false;
1083 if (const MachineInstr *Dup = LookForDuplicate(MI, CI->second)) {
1084 DEBUG(dbgs() << "CSEing " << *MI << " with " << *Dup);
1086 // Replace virtual registers defined by MI by their counterparts defined
1087 // by Dup.
1088 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1089 const MachineOperand &MO = MI->getOperand(i);
1091 // Physical registers may not differ here.
1092 assert((!MO.isReg() || MO.getReg() == 0 ||
1093 !TargetRegisterInfo::isPhysicalRegister(MO.getReg()) ||
1094 MO.getReg() == Dup->getOperand(i).getReg()) &&
1095 "Instructions with different phys regs are not identical!");
1097 if (MO.isReg() && MO.isDef() &&
1098 !TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
1099 MRI->replaceRegWith(MO.getReg(), Dup->getOperand(i).getReg());
1100 MRI->clearKillFlags(Dup->getOperand(i).getReg());
1103 MI->eraseFromParent();
1104 ++NumCSEed;
1105 return true;
1107 return false;
1110 /// Hoist - When an instruction is found to use only loop invariant operands
1111 /// that are safe to hoist, this instruction is called to do the dirty work.
1113 bool MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) {
1114 // First check whether we should hoist this instruction.
1115 if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
1116 // If not, try unfolding a hoistable load.
1117 MI = ExtractHoistableLoad(MI);
1118 if (!MI) return false;
1121 // Now move the instructions to the predecessor, inserting it before any
1122 // terminator instructions.
1123 DEBUG({
1124 dbgs() << "Hoisting " << *MI;
1125 if (Preheader->getBasicBlock())
1126 dbgs() << " to MachineBasicBlock "
1127 << Preheader->getName();
1128 if (MI->getParent()->getBasicBlock())
1129 dbgs() << " from MachineBasicBlock "
1130 << MI->getParent()->getName();
1131 dbgs() << "\n";
1134 // If this is the first instruction being hoisted to the preheader,
1135 // initialize the CSE map with potential common expressions.
1136 if (FirstInLoop) {
1137 InitCSEMap(Preheader);
1138 FirstInLoop = false;
1141 // Look for opportunity to CSE the hoisted instruction.
1142 unsigned Opcode = MI->getOpcode();
1143 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1144 CI = CSEMap.find(Opcode);
1145 if (!EliminateCSE(MI, CI)) {
1146 // Otherwise, splice the instruction to the preheader.
1147 Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
1149 // Update register pressure for BBs from header to this block.
1150 UpdateBackTraceRegPressure(MI);
1152 // Clear the kill flags of any register this instruction defines,
1153 // since they may need to be live throughout the entire loop
1154 // rather than just live for part of it.
1155 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1156 MachineOperand &MO = MI->getOperand(i);
1157 if (MO.isReg() && MO.isDef() && !MO.isDead())
1158 MRI->clearKillFlags(MO.getReg());
1161 // Add to the CSE map.
1162 if (CI != CSEMap.end())
1163 CI->second.push_back(MI);
1164 else {
1165 std::vector<const MachineInstr*> CSEMIs;
1166 CSEMIs.push_back(MI);
1167 CSEMap.insert(std::make_pair(Opcode, CSEMIs));
1171 ++NumHoisted;
1172 Changed = true;
1174 return true;
1177 MachineBasicBlock *MachineLICM::getCurPreheader() {
1178 // Determine the block to which to hoist instructions. If we can't find a
1179 // suitable loop predecessor, we can't do any hoisting.
1181 // If we've tried to get a preheader and failed, don't try again.
1182 if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1))
1183 return 0;
1185 if (!CurPreheader) {
1186 CurPreheader = CurLoop->getLoopPreheader();
1187 if (!CurPreheader) {
1188 MachineBasicBlock *Pred = CurLoop->getLoopPredecessor();
1189 if (!Pred) {
1190 CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
1191 return 0;
1194 CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), this);
1195 if (!CurPreheader) {
1196 CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
1197 return 0;
1201 return CurPreheader;