1 //===-- llvm/CodeGen/Rewriter.cpp - Rewriter -----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "virtregrewriter"
11 #include "VirtRegRewriter.h"
12 #include "VirtRegMap.h"
13 #include "llvm/Function.h"
14 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/Support/CommandLine.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/ErrorHandling.h"
21 #include "llvm/Support/raw_ostream.h"
22 #include "llvm/Target/TargetInstrInfo.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/ADT/DepthFirstIterator.h"
25 #include "llvm/ADT/Statistic.h"
29 STATISTIC(NumDSE
, "Number of dead stores elided");
30 STATISTIC(NumDSS
, "Number of dead spill slots removed");
31 STATISTIC(NumCommutes
, "Number of instructions commuted");
32 STATISTIC(NumDRM
, "Number of re-materializable defs elided");
33 STATISTIC(NumStores
, "Number of stores added");
34 STATISTIC(NumPSpills
, "Number of physical register spills");
35 STATISTIC(NumOmitted
, "Number of reloads omited");
36 STATISTIC(NumAvoided
, "Number of reloads deemed unnecessary");
37 STATISTIC(NumCopified
, "Number of available reloads turned into copies");
38 STATISTIC(NumReMats
, "Number of re-materialization");
39 STATISTIC(NumLoads
, "Number of loads added");
40 STATISTIC(NumReused
, "Number of values reused");
41 STATISTIC(NumDCE
, "Number of copies elided");
42 STATISTIC(NumSUnfold
, "Number of stores unfolded");
43 STATISTIC(NumModRefUnfold
, "Number of modref unfolded");
46 enum RewriterName
{ local
, trivial
};
49 static cl::opt
<RewriterName
>
50 RewriterOpt("rewriter",
51 cl::desc("Rewriter to use (default=local)"),
53 cl::values(clEnumVal(local
, "local rewriter"),
54 clEnumVal(trivial
, "trivial rewriter"),
59 ScheduleSpills("schedule-spills",
60 cl::desc("Schedule spill code"),
63 VirtRegRewriter::~VirtRegRewriter() {}
65 /// substitutePhysReg - Replace virtual register in MachineOperand with a
66 /// physical register. Do the right thing with the sub-register index.
67 /// Note that operands may be added, so the MO reference is no longer valid.
68 static void substitutePhysReg(MachineOperand
&MO
, unsigned Reg
,
69 const TargetRegisterInfo
&TRI
) {
70 if (unsigned SubIdx
= MO
.getSubReg()) {
71 // Insert the physical subreg and reset the subreg field.
72 MO
.setReg(TRI
.getSubReg(Reg
, SubIdx
));
75 // Any def, dead, and kill flags apply to the full virtual register, so they
76 // also apply to the full physical register. Add imp-def/dead and imp-kill
78 MachineInstr
&MI
= *MO
.getParent();
81 MI
.addRegisterDead(Reg
, &TRI
, /*AddIfNotFound=*/ true);
83 MI
.addRegisterDefined(Reg
, &TRI
);
84 else if (!MO
.isUndef() &&
86 MI
.isRegTiedToDefOperand(&MO
-&MI
.getOperand(0))))
87 MI
.addRegisterKilled(Reg
, &TRI
, /*AddIfNotFound=*/ true);
95 /// This class is intended for use with the new spilling framework only. It
96 /// rewrites vreg def/uses to use the assigned preg, but does not insert any
98 struct TrivialRewriter
: public VirtRegRewriter
{
100 bool runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
,
101 LiveIntervals
* LIs
) {
102 DEBUG(dbgs() << "********** REWRITE MACHINE CODE **********\n");
103 DEBUG(dbgs() << "********** Function: "
104 << MF
.getFunction()->getName() << '\n');
105 DEBUG(dbgs() << "**** Machine Instrs"
106 << "(NOTE! Does not include spills and reloads!) ****\n");
109 MachineRegisterInfo
*mri
= &MF
.getRegInfo();
110 const TargetRegisterInfo
*tri
= MF
.getTarget().getRegisterInfo();
112 bool changed
= false;
114 for (LiveIntervals::iterator liItr
= LIs
->begin(), liEnd
= LIs
->end();
115 liItr
!= liEnd
; ++liItr
) {
117 const LiveInterval
*li
= liItr
->second
;
118 unsigned reg
= li
->reg
;
120 if (TargetRegisterInfo::isPhysicalRegister(reg
)) {
122 mri
->setPhysRegUsed(reg
);
125 if (!VRM
.hasPhys(reg
))
127 unsigned pReg
= VRM
.getPhys(reg
);
128 mri
->setPhysRegUsed(pReg
);
129 // Copy the register use-list before traversing it.
130 SmallVector
<std::pair
<MachineInstr
*, unsigned>, 32> reglist
;
131 for (MachineRegisterInfo::reg_iterator I
= mri
->reg_begin(reg
),
132 E
= mri
->reg_end(); I
!= E
; ++I
)
133 reglist
.push_back(std::make_pair(&*I
, I
.getOperandNo()));
134 for (unsigned N
=0; N
!= reglist
.size(); ++N
)
135 substitutePhysReg(reglist
[N
].first
->getOperand(reglist
[N
].second
),
137 changed
|= !reglist
.empty();
141 DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
151 // ************************************************************************ //
155 /// AvailableSpills - As the local rewriter is scanning and rewriting an MBB
156 /// from top down, keep track of which spill slots or remat are available in
159 /// Note that not all physregs are created equal here. In particular, some
160 /// physregs are reloads that we are allowed to clobber or ignore at any time.
161 /// Other physregs are values that the register allocated program is using
162 /// that we cannot CHANGE, but we can read if we like. We keep track of this
163 /// on a per-stack-slot / remat id basis as the low bit in the value of the
164 /// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
165 /// this bit and addAvailable sets it if.
166 class AvailableSpills
{
167 const TargetRegisterInfo
*TRI
;
168 const TargetInstrInfo
*TII
;
170 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
171 // or remat'ed virtual register values that are still available, due to
172 // being loaded or stored to, but not invalidated yet.
173 std::map
<int, unsigned> SpillSlotsOrReMatsAvailable
;
175 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
176 // indicating which stack slot values are currently held by a physreg. This
177 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
178 // physreg is modified.
179 std::multimap
<unsigned, int> PhysRegsAvailable
;
181 void disallowClobberPhysRegOnly(unsigned PhysReg
);
183 void ClobberPhysRegOnly(unsigned PhysReg
);
185 AvailableSpills(const TargetRegisterInfo
*tri
, const TargetInstrInfo
*tii
)
186 : TRI(tri
), TII(tii
) {
189 /// clear - Reset the state.
191 SpillSlotsOrReMatsAvailable
.clear();
192 PhysRegsAvailable
.clear();
195 const TargetRegisterInfo
*getRegInfo() const { return TRI
; }
197 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
198 /// available in a physical register, return that PhysReg, otherwise
200 unsigned getSpillSlotOrReMatPhysReg(int Slot
) const {
201 std::map
<int, unsigned>::const_iterator I
=
202 SpillSlotsOrReMatsAvailable
.find(Slot
);
203 if (I
!= SpillSlotsOrReMatsAvailable
.end()) {
204 return I
->second
>> 1; // Remove the CanClobber bit.
209 /// addAvailable - Mark that the specified stack slot / remat is available
210 /// in the specified physreg. If CanClobber is true, the physreg can be
211 /// modified at any time without changing the semantics of the program.
212 void addAvailable(int SlotOrReMat
, unsigned Reg
, bool CanClobber
= true) {
213 // If this stack slot is thought to be available in some other physreg,
214 // remove its record.
215 ModifyStackSlotOrReMat(SlotOrReMat
);
217 PhysRegsAvailable
.insert(std::make_pair(Reg
, SlotOrReMat
));
218 SpillSlotsOrReMatsAvailable
[SlotOrReMat
]= (Reg
<< 1) |
219 (unsigned)CanClobber
;
221 if (SlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
)
222 DEBUG(dbgs() << "Remembering RM#"
223 << SlotOrReMat
-VirtRegMap::MAX_STACK_SLOT
-1);
225 DEBUG(dbgs() << "Remembering SS#" << SlotOrReMat
);
226 DEBUG(dbgs() << " in physreg " << TRI
->getName(Reg
) << "\n");
229 /// canClobberPhysRegForSS - Return true if the spiller is allowed to change
230 /// the value of the specified stackslot register if it desires. The
231 /// specified stack slot must be available in a physreg for this query to
233 bool canClobberPhysRegForSS(int SlotOrReMat
) const {
234 assert(SpillSlotsOrReMatsAvailable
.count(SlotOrReMat
) &&
235 "Value not available!");
236 return SpillSlotsOrReMatsAvailable
.find(SlotOrReMat
)->second
& 1;
239 /// canClobberPhysReg - Return true if the spiller is allowed to clobber the
240 /// physical register where values for some stack slot(s) might be
242 bool canClobberPhysReg(unsigned PhysReg
) const {
243 std::multimap
<unsigned, int>::const_iterator I
=
244 PhysRegsAvailable
.lower_bound(PhysReg
);
245 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
246 int SlotOrReMat
= I
->second
;
248 if (!canClobberPhysRegForSS(SlotOrReMat
))
254 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
255 /// stackslot register. The register is still available but is no longer
256 /// allowed to be modifed.
257 void disallowClobberPhysReg(unsigned PhysReg
);
259 /// ClobberPhysReg - This is called when the specified physreg changes
260 /// value. We use this to invalidate any info about stuff that lives in
261 /// it and any of its aliases.
262 void ClobberPhysReg(unsigned PhysReg
);
264 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
265 /// slot changes. This removes information about which register the
266 /// previous value for this slot lives in (as the previous value is dead
268 void ModifyStackSlotOrReMat(int SlotOrReMat
);
270 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
271 /// into the specified MBB. Add available physical registers as potential
272 /// live-in's. If they are reused in the MBB, they will be added to the
273 /// live-in set to make register scavenger and post-allocation scheduler.
274 void AddAvailableRegsToLiveIn(MachineBasicBlock
&MBB
, BitVector
&RegKills
,
275 std::vector
<MachineOperand
*> &KillOps
);
280 // ************************************************************************ //
282 // Given a location where a reload of a spilled register or a remat of
283 // a constant is to be inserted, attempt to find a safe location to
284 // insert the load at an earlier point in the basic-block, to hide
285 // latency of the load and to avoid address-generation interlock
287 static MachineBasicBlock::iterator
288 ComputeReloadLoc(MachineBasicBlock::iterator
const InsertLoc
,
289 MachineBasicBlock::iterator
const Begin
,
291 const TargetRegisterInfo
*TRI
,
294 const TargetInstrInfo
*TII
,
295 const MachineFunction
&MF
)
300 // Spill backscheduling is of primary interest to addresses, so
301 // don't do anything if the register isn't in the register class
302 // used for pointers.
304 const TargetLowering
*TL
= MF
.getTarget().getTargetLowering();
306 if (!TL
->isTypeLegal(TL
->getPointerTy()))
307 // Believe it or not, this is true on PIC16.
310 const TargetRegisterClass
*ptrRegClass
=
311 TL
->getRegClassFor(TL
->getPointerTy());
312 if (!ptrRegClass
->contains(PhysReg
))
315 // Scan upwards through the preceding instructions. If an instruction doesn't
316 // reference the stack slot or the register we're loading, we can
317 // backschedule the reload up past it.
318 MachineBasicBlock::iterator NewInsertLoc
= InsertLoc
;
319 while (NewInsertLoc
!= Begin
) {
320 MachineBasicBlock::iterator Prev
= prior(NewInsertLoc
);
321 for (unsigned i
= 0; i
< Prev
->getNumOperands(); ++i
) {
322 MachineOperand
&Op
= Prev
->getOperand(i
);
323 if (!DoReMat
&& Op
.isFI() && Op
.getIndex() == SSorRMId
)
326 if (Prev
->findRegisterUseOperandIdx(PhysReg
) != -1 ||
327 Prev
->findRegisterDefOperand(PhysReg
))
329 for (const unsigned *Alias
= TRI
->getAliasSet(PhysReg
); *Alias
; ++Alias
)
330 if (Prev
->findRegisterUseOperandIdx(*Alias
) != -1 ||
331 Prev
->findRegisterDefOperand(*Alias
))
337 // If we made it to the beginning of the block, turn around and move back
338 // down just past any existing reloads. They're likely to be reloads/remats
339 // for instructions earlier than what our current reload/remat is for, so
340 // they should be scheduled earlier.
341 if (NewInsertLoc
== Begin
) {
343 while (InsertLoc
!= NewInsertLoc
&&
344 (TII
->isLoadFromStackSlot(NewInsertLoc
, FrameIdx
) ||
345 TII
->isTriviallyReMaterializable(NewInsertLoc
)))
354 // ReusedOp - For each reused operand, we keep track of a bit of information,
355 // in case we need to rollback upon processing a new operand. See comments
358 // The MachineInstr operand that reused an available value.
361 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
362 unsigned StackSlotOrReMat
;
364 // PhysRegReused - The physical register the value was available in.
365 unsigned PhysRegReused
;
367 // AssignedPhysReg - The physreg that was assigned for use by the reload.
368 unsigned AssignedPhysReg
;
370 // VirtReg - The virtual register itself.
373 ReusedOp(unsigned o
, unsigned ss
, unsigned prr
, unsigned apr
,
375 : Operand(o
), StackSlotOrReMat(ss
), PhysRegReused(prr
),
376 AssignedPhysReg(apr
), VirtReg(vreg
) {}
379 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
380 /// is reused instead of reloaded.
383 std::vector
<ReusedOp
> Reuses
;
384 BitVector PhysRegsClobbered
;
386 ReuseInfo(MachineInstr
&mi
, const TargetRegisterInfo
*tri
) : MI(mi
) {
387 PhysRegsClobbered
.resize(tri
->getNumRegs());
390 bool hasReuses() const {
391 return !Reuses
.empty();
394 /// addReuse - If we choose to reuse a virtual register that is already
395 /// available instead of reloading it, remember that we did so.
396 void addReuse(unsigned OpNo
, unsigned StackSlotOrReMat
,
397 unsigned PhysRegReused
, unsigned AssignedPhysReg
,
399 // If the reload is to the assigned register anyway, no undo will be
401 if (PhysRegReused
== AssignedPhysReg
) return;
403 // Otherwise, remember this.
404 Reuses
.push_back(ReusedOp(OpNo
, StackSlotOrReMat
, PhysRegReused
,
405 AssignedPhysReg
, VirtReg
));
408 void markClobbered(unsigned PhysReg
) {
409 PhysRegsClobbered
.set(PhysReg
);
412 bool isClobbered(unsigned PhysReg
) const {
413 return PhysRegsClobbered
.test(PhysReg
);
416 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
417 /// is some other operand that is using the specified register, either pick
418 /// a new register to use, or evict the previous reload and use this reg.
419 unsigned GetRegForReload(const TargetRegisterClass
*RC
, unsigned PhysReg
,
420 MachineFunction
&MF
, MachineInstr
*MI
,
421 AvailableSpills
&Spills
,
422 std::vector
<MachineInstr
*> &MaybeDeadStores
,
423 SmallSet
<unsigned, 8> &Rejected
,
425 std::vector
<MachineOperand
*> &KillOps
,
428 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
429 /// 'Rejected' set to remember which registers have been considered and
430 /// rejected for the reload. This avoids infinite looping in case like
433 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
434 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
436 /// sees r1 is taken by t2, tries t2's reload register r0
437 /// sees r0 is taken by t3, tries t3's reload register r1
438 /// sees r1 is taken by t2, tries t2's reload register r0 ...
439 unsigned GetRegForReload(unsigned VirtReg
, unsigned PhysReg
, MachineInstr
*MI
,
440 AvailableSpills
&Spills
,
441 std::vector
<MachineInstr
*> &MaybeDeadStores
,
443 std::vector
<MachineOperand
*> &KillOps
,
445 SmallSet
<unsigned, 8> Rejected
;
446 MachineFunction
&MF
= *MI
->getParent()->getParent();
447 const TargetRegisterClass
* RC
= MF
.getRegInfo().getRegClass(VirtReg
);
448 return GetRegForReload(RC
, PhysReg
, MF
, MI
, Spills
, MaybeDeadStores
,
449 Rejected
, RegKills
, KillOps
, VRM
);
455 // ****************** //
456 // Utility Functions //
457 // ****************** //
459 /// findSinglePredSuccessor - Return via reference a vector of machine basic
460 /// blocks each of which is a successor of the specified BB and has no other
462 static void findSinglePredSuccessor(MachineBasicBlock
*MBB
,
463 SmallVectorImpl
<MachineBasicBlock
*> &Succs
) {
464 for (MachineBasicBlock::succ_iterator SI
= MBB
->succ_begin(),
465 SE
= MBB
->succ_end(); SI
!= SE
; ++SI
) {
466 MachineBasicBlock
*SuccMBB
= *SI
;
467 if (SuccMBB
->pred_size() == 1)
468 Succs
.push_back(SuccMBB
);
472 /// InvalidateKill - Invalidate register kill information for a specific
473 /// register. This also unsets the kills marker on the last kill operand.
474 static void InvalidateKill(unsigned Reg
,
475 const TargetRegisterInfo
* TRI
,
477 std::vector
<MachineOperand
*> &KillOps
) {
479 KillOps
[Reg
]->setIsKill(false);
480 // KillOps[Reg] might be a def of a super-register.
481 unsigned KReg
= KillOps
[Reg
]->getReg();
482 KillOps
[KReg
] = NULL
;
483 RegKills
.reset(KReg
);
484 for (const unsigned *SR
= TRI
->getSubRegisters(KReg
); *SR
; ++SR
) {
486 KillOps
[*SR
]->setIsKill(false);
494 /// InvalidateKills - MI is going to be deleted. If any of its operands are
495 /// marked kill, then invalidate the information.
496 static void InvalidateKills(MachineInstr
&MI
,
497 const TargetRegisterInfo
* TRI
,
499 std::vector
<MachineOperand
*> &KillOps
,
500 SmallVector
<unsigned, 2> *KillRegs
= NULL
) {
501 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
502 MachineOperand
&MO
= MI
.getOperand(i
);
503 if (!MO
.isReg() || !MO
.isUse() || !MO
.isKill() || MO
.isUndef())
505 unsigned Reg
= MO
.getReg();
506 if (TargetRegisterInfo::isVirtualRegister(Reg
))
509 KillRegs
->push_back(Reg
);
510 assert(Reg
< KillOps
.size());
511 if (KillOps
[Reg
] == &MO
) {
514 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
524 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
525 /// (since its spill instruction is removed), mark it isDead. Also checks if
526 /// the def MI has other definition operands that are not dead. Returns it by
528 static bool InvalidateRegDef(MachineBasicBlock::iterator I
,
529 MachineInstr
&NewDef
, unsigned Reg
,
531 const TargetRegisterInfo
*TRI
) {
532 // Due to remat, it's possible this reg isn't being reused. That is,
533 // the def of this reg (by prev MI) is now dead.
534 MachineInstr
*DefMI
= I
;
535 MachineOperand
*DefOp
= NULL
;
536 for (unsigned i
= 0, e
= DefMI
->getNumOperands(); i
!= e
; ++i
) {
537 MachineOperand
&MO
= DefMI
->getOperand(i
);
538 if (!MO
.isReg() || !MO
.isDef() || !MO
.isKill() || MO
.isUndef())
540 if (MO
.getReg() == Reg
)
542 else if (!MO
.isDead())
548 bool FoundUse
= false, Done
= false;
549 MachineBasicBlock::iterator E
= &NewDef
;
551 for (; !Done
&& I
!= E
; ++I
) {
552 MachineInstr
*NMI
= I
;
553 for (unsigned j
= 0, ee
= NMI
->getNumOperands(); j
!= ee
; ++j
) {
554 MachineOperand
&MO
= NMI
->getOperand(j
);
555 if (!MO
.isReg() || MO
.getReg() == 0 ||
556 (MO
.getReg() != Reg
&& !TRI
->isSubRegister(Reg
, MO
.getReg())))
560 Done
= true; // Stop after scanning all the operands of this MI.
571 /// UpdateKills - Track and update kill info. If a MI reads a register that is
572 /// marked kill, then it must be due to register reuse. Transfer the kill info
574 static void UpdateKills(MachineInstr
&MI
, const TargetRegisterInfo
* TRI
,
576 std::vector
<MachineOperand
*> &KillOps
) {
577 // These do not affect kill info at all.
578 if (MI
.isDebugValue())
580 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
581 MachineOperand
&MO
= MI
.getOperand(i
);
582 if (!MO
.isReg() || !MO
.isUse() || MO
.isUndef())
584 unsigned Reg
= MO
.getReg();
588 if (RegKills
[Reg
] && KillOps
[Reg
]->getParent() != &MI
) {
589 // That can't be right. Register is killed but not re-defined and it's
590 // being reused. Let's fix that.
591 KillOps
[Reg
]->setIsKill(false);
592 // KillOps[Reg] might be a def of a super-register.
593 unsigned KReg
= KillOps
[Reg
]->getReg();
594 KillOps
[KReg
] = NULL
;
595 RegKills
.reset(KReg
);
597 // Must be a def of a super-register. Its other sub-regsters are no
598 // longer killed as well.
599 for (const unsigned *SR
= TRI
->getSubRegisters(KReg
); *SR
; ++SR
) {
604 // Check for subreg kills as well.
610 // = d4 <avoiding reload>
611 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
613 if (RegKills
[SReg
] && KillOps
[SReg
]->getParent() != &MI
) {
614 KillOps
[SReg
]->setIsKill(false);
615 unsigned KReg
= KillOps
[SReg
]->getReg();
616 KillOps
[KReg
] = NULL
;
617 RegKills
.reset(KReg
);
619 for (const unsigned *SSR
= TRI
->getSubRegisters(KReg
); *SSR
; ++SSR
) {
620 KillOps
[*SSR
] = NULL
;
621 RegKills
.reset(*SSR
);
630 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
637 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
638 const MachineOperand
&MO
= MI
.getOperand(i
);
639 if (!MO
.isReg() || !MO
.getReg() || !MO
.isDef())
641 unsigned Reg
= MO
.getReg();
644 // It also defines (or partially define) aliases.
645 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
649 for (const unsigned *SR
= TRI
->getSuperRegisters(Reg
); *SR
; ++SR
) {
656 /// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
658 static void ReMaterialize(MachineBasicBlock
&MBB
,
659 MachineBasicBlock::iterator
&MII
,
660 unsigned DestReg
, unsigned Reg
,
661 const TargetInstrInfo
*TII
,
662 const TargetRegisterInfo
*TRI
,
664 MachineInstr
*ReMatDefMI
= VRM
.getReMaterializedMI(Reg
);
666 const TargetInstrDesc
&TID
= ReMatDefMI
->getDesc();
667 assert(TID
.getNumDefs() == 1 &&
668 "Don't know how to remat instructions that define > 1 values!");
670 TII
->reMaterialize(MBB
, MII
, DestReg
,
671 ReMatDefMI
->getOperand(0).getSubReg(), ReMatDefMI
, TRI
);
672 MachineInstr
*NewMI
= prior(MII
);
673 for (unsigned i
= 0, e
= NewMI
->getNumOperands(); i
!= e
; ++i
) {
674 MachineOperand
&MO
= NewMI
->getOperand(i
);
675 if (!MO
.isReg() || MO
.getReg() == 0)
677 unsigned VirtReg
= MO
.getReg();
678 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
))
681 unsigned Phys
= VRM
.getPhys(VirtReg
);
682 assert(Phys
&& "Virtual register is not assigned a register?");
683 substitutePhysReg(MO
, Phys
, *TRI
);
688 /// findSuperReg - Find the SubReg's super-register of given register class
689 /// where its SubIdx sub-register is SubReg.
690 static unsigned findSuperReg(const TargetRegisterClass
*RC
, unsigned SubReg
,
691 unsigned SubIdx
, const TargetRegisterInfo
*TRI
) {
692 for (TargetRegisterClass::iterator I
= RC
->begin(), E
= RC
->end();
695 if (TRI
->getSubReg(Reg
, SubIdx
) == SubReg
)
701 // ******************************** //
702 // Available Spills Implementation //
703 // ******************************** //
705 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
706 /// stackslot register. The register is still available but is no longer
707 /// allowed to be modifed.
708 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg
) {
709 std::multimap
<unsigned, int>::iterator I
=
710 PhysRegsAvailable
.lower_bound(PhysReg
);
711 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
712 int SlotOrReMat
= I
->second
;
714 assert((SpillSlotsOrReMatsAvailable
[SlotOrReMat
] >> 1) == PhysReg
&&
715 "Bidirectional map mismatch!");
716 SpillSlotsOrReMatsAvailable
[SlotOrReMat
] &= ~1;
717 DEBUG(dbgs() << "PhysReg " << TRI
->getName(PhysReg
)
718 << " copied, it is available for use but can no longer be modified\n");
722 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
723 /// stackslot register and its aliases. The register and its aliases may
724 /// still available but is no longer allowed to be modifed.
725 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg
) {
726 for (const unsigned *AS
= TRI
->getAliasSet(PhysReg
); *AS
; ++AS
)
727 disallowClobberPhysRegOnly(*AS
);
728 disallowClobberPhysRegOnly(PhysReg
);
731 /// ClobberPhysRegOnly - This is called when the specified physreg changes
732 /// value. We use this to invalidate any info about stuff we thing lives in it.
733 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg
) {
734 std::multimap
<unsigned, int>::iterator I
=
735 PhysRegsAvailable
.lower_bound(PhysReg
);
736 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
737 int SlotOrReMat
= I
->second
;
738 PhysRegsAvailable
.erase(I
++);
739 assert((SpillSlotsOrReMatsAvailable
[SlotOrReMat
] >> 1) == PhysReg
&&
740 "Bidirectional map mismatch!");
741 SpillSlotsOrReMatsAvailable
.erase(SlotOrReMat
);
742 DEBUG(dbgs() << "PhysReg " << TRI
->getName(PhysReg
)
743 << " clobbered, invalidating ");
744 if (SlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
)
745 DEBUG(dbgs() << "RM#" << SlotOrReMat
-VirtRegMap::MAX_STACK_SLOT
-1 <<"\n");
747 DEBUG(dbgs() << "SS#" << SlotOrReMat
<< "\n");
751 /// ClobberPhysReg - This is called when the specified physreg changes
752 /// value. We use this to invalidate any info about stuff we thing lives in
753 /// it and any of its aliases.
754 void AvailableSpills::ClobberPhysReg(unsigned PhysReg
) {
755 for (const unsigned *AS
= TRI
->getAliasSet(PhysReg
); *AS
; ++AS
)
756 ClobberPhysRegOnly(*AS
);
757 ClobberPhysRegOnly(PhysReg
);
760 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
761 /// into the specified MBB. Add available physical registers as potential
762 /// live-in's. If they are reused in the MBB, they will be added to the
763 /// live-in set to make register scavenger and post-allocation scheduler.
764 void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock
&MBB
,
766 std::vector
<MachineOperand
*> &KillOps
) {
767 std::set
<unsigned> NotAvailable
;
768 for (std::multimap
<unsigned, int>::iterator
769 I
= PhysRegsAvailable
.begin(), E
= PhysRegsAvailable
.end();
771 unsigned Reg
= I
->first
;
772 const TargetRegisterClass
* RC
= TRI
->getPhysicalRegisterRegClass(Reg
);
773 // FIXME: A temporary workaround. We can't reuse available value if it's
774 // not safe to move the def of the virtual register's class. e.g.
775 // X86::RFP* register classes. Do not add it as a live-in.
776 if (!TII
->isSafeToMoveRegClassDefs(RC
))
777 // This is no longer available.
778 NotAvailable
.insert(Reg
);
781 InvalidateKill(Reg
, TRI
, RegKills
, KillOps
);
784 // Skip over the same register.
785 std::multimap
<unsigned, int>::iterator NI
= llvm::next(I
);
786 while (NI
!= E
&& NI
->first
== Reg
) {
792 for (std::set
<unsigned>::iterator I
= NotAvailable
.begin(),
793 E
= NotAvailable
.end(); I
!= E
; ++I
) {
795 for (const unsigned *SubRegs
= TRI
->getSubRegisters(*I
);
797 ClobberPhysReg(*SubRegs
);
801 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
802 /// slot changes. This removes information about which register the previous
803 /// value for this slot lives in (as the previous value is dead now).
804 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat
) {
805 std::map
<int, unsigned>::iterator It
=
806 SpillSlotsOrReMatsAvailable
.find(SlotOrReMat
);
807 if (It
== SpillSlotsOrReMatsAvailable
.end()) return;
808 unsigned Reg
= It
->second
>> 1;
809 SpillSlotsOrReMatsAvailable
.erase(It
);
811 // This register may hold the value of multiple stack slots, only remove this
812 // stack slot from the set of values the register contains.
813 std::multimap
<unsigned, int>::iterator I
= PhysRegsAvailable
.lower_bound(Reg
);
815 assert(I
!= PhysRegsAvailable
.end() && I
->first
== Reg
&&
816 "Map inverse broken!");
817 if (I
->second
== SlotOrReMat
) break;
819 PhysRegsAvailable
.erase(I
);
822 // ************************** //
823 // Reuse Info Implementation //
824 // ************************** //
826 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
827 /// is some other operand that is using the specified register, either pick
828 /// a new register to use, or evict the previous reload and use this reg.
829 unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass
*RC
,
832 MachineInstr
*MI
, AvailableSpills
&Spills
,
833 std::vector
<MachineInstr
*> &MaybeDeadStores
,
834 SmallSet
<unsigned, 8> &Rejected
,
836 std::vector
<MachineOperand
*> &KillOps
,
838 const TargetInstrInfo
* TII
= MF
.getTarget().getInstrInfo();
839 const TargetRegisterInfo
*TRI
= Spills
.getRegInfo();
841 if (Reuses
.empty()) return PhysReg
; // This is most often empty.
843 for (unsigned ro
= 0, e
= Reuses
.size(); ro
!= e
; ++ro
) {
844 ReusedOp
&Op
= Reuses
[ro
];
845 // If we find some other reuse that was supposed to use this register
846 // exactly for its reload, we can change this reload to use ITS reload
847 // register. That is, unless its reload register has already been
848 // considered and subsequently rejected because it has also been reused
849 // by another operand.
850 if (Op
.PhysRegReused
== PhysReg
&&
851 Rejected
.count(Op
.AssignedPhysReg
) == 0 &&
852 RC
->contains(Op
.AssignedPhysReg
)) {
853 // Yup, use the reload register that we didn't use before.
854 unsigned NewReg
= Op
.AssignedPhysReg
;
855 Rejected
.insert(PhysReg
);
856 return GetRegForReload(RC
, NewReg
, MF
, MI
, Spills
, MaybeDeadStores
, Rejected
,
857 RegKills
, KillOps
, VRM
);
859 // Otherwise, we might also have a problem if a previously reused
860 // value aliases the new register. If so, codegen the previous reload
862 unsigned PRRU
= Op
.PhysRegReused
;
863 if (TRI
->regsOverlap(PRRU
, PhysReg
)) {
864 // Okay, we found out that an alias of a reused register
865 // was used. This isn't good because it means we have
866 // to undo a previous reuse.
867 MachineBasicBlock
*MBB
= MI
->getParent();
868 const TargetRegisterClass
*AliasRC
=
869 MBB
->getParent()->getRegInfo().getRegClass(Op
.VirtReg
);
871 // Copy Op out of the vector and remove it, we're going to insert an
872 // explicit load for it.
874 Reuses
.erase(Reuses
.begin()+ro
);
876 // MI may be using only a sub-register of PhysRegUsed.
877 unsigned RealPhysRegUsed
= MI
->getOperand(NewOp
.Operand
).getReg();
879 assert(TargetRegisterInfo::isPhysicalRegister(RealPhysRegUsed
) &&
880 "A reuse cannot be a virtual register");
881 if (PRRU
!= RealPhysRegUsed
) {
882 // What was the sub-register index?
883 SubIdx
= TRI
->getSubRegIndex(PRRU
, RealPhysRegUsed
);
885 "Operand physreg is not a sub-register of PhysRegUsed");
888 // Ok, we're going to try to reload the assigned physreg into the
889 // slot that we were supposed to in the first place. However, that
890 // register could hold a reuse. Check to see if it conflicts or
891 // would prefer us to use a different register.
892 unsigned NewPhysReg
= GetRegForReload(RC
, NewOp
.AssignedPhysReg
,
893 MF
, MI
, Spills
, MaybeDeadStores
,
894 Rejected
, RegKills
, KillOps
, VRM
);
896 bool DoReMat
= NewOp
.StackSlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
;
897 int SSorRMId
= DoReMat
898 ? VRM
.getReMatId(NewOp
.VirtReg
) : (int) NewOp
.StackSlotOrReMat
;
900 // Back-schedule reloads and remats.
901 MachineBasicBlock::iterator InsertLoc
=
902 ComputeReloadLoc(MI
, MBB
->begin(), PhysReg
, TRI
,
903 DoReMat
, SSorRMId
, TII
, MF
);
906 ReMaterialize(*MBB
, InsertLoc
, NewPhysReg
, NewOp
.VirtReg
, TII
,
909 TII
->loadRegFromStackSlot(*MBB
, InsertLoc
, NewPhysReg
,
910 NewOp
.StackSlotOrReMat
, AliasRC
);
911 MachineInstr
*LoadMI
= prior(InsertLoc
);
912 VRM
.addSpillSlotUse(NewOp
.StackSlotOrReMat
, LoadMI
);
913 // Any stores to this stack slot are not dead anymore.
914 MaybeDeadStores
[NewOp
.StackSlotOrReMat
] = NULL
;
917 Spills
.ClobberPhysReg(NewPhysReg
);
918 Spills
.ClobberPhysReg(NewOp
.PhysRegReused
);
920 unsigned RReg
= SubIdx
? TRI
->getSubReg(NewPhysReg
, SubIdx
) :NewPhysReg
;
921 MI
->getOperand(NewOp
.Operand
).setReg(RReg
);
922 MI
->getOperand(NewOp
.Operand
).setSubReg(0);
924 Spills
.addAvailable(NewOp
.StackSlotOrReMat
, NewPhysReg
);
925 UpdateKills(*prior(InsertLoc
), TRI
, RegKills
, KillOps
);
926 DEBUG(dbgs() << '\t' << *prior(InsertLoc
));
928 DEBUG(dbgs() << "Reuse undone!\n");
931 // Finally, PhysReg is now available, go ahead and use it.
939 // ************************************************************************ //
941 /// FoldsStackSlotModRef - Return true if the specified MI folds the specified
942 /// stack slot mod/ref. It also checks if it's possible to unfold the
943 /// instruction by having it define a specified physical register instead.
944 static bool FoldsStackSlotModRef(MachineInstr
&MI
, int SS
, unsigned PhysReg
,
945 const TargetInstrInfo
*TII
,
946 const TargetRegisterInfo
*TRI
,
948 if (VRM
.hasEmergencySpills(&MI
) || VRM
.isSpillPt(&MI
))
952 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
953 for (tie(I
, End
) = VRM
.getFoldedVirts(&MI
); I
!= End
; ++I
) {
954 unsigned VirtReg
= I
->second
.first
;
955 VirtRegMap::ModRef MR
= I
->second
.second
;
956 if (MR
& VirtRegMap::isModRef
)
957 if (VRM
.getStackSlot(VirtReg
) == SS
) {
958 Found
= TII
->getOpcodeAfterMemoryUnfold(MI
.getOpcode(), true, true) != 0;
965 // Does the instruction uses a register that overlaps the scratch register?
966 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
967 MachineOperand
&MO
= MI
.getOperand(i
);
968 if (!MO
.isReg() || MO
.getReg() == 0)
970 unsigned Reg
= MO
.getReg();
971 if (TargetRegisterInfo::isVirtualRegister(Reg
)) {
972 if (!VRM
.hasPhys(Reg
))
974 Reg
= VRM
.getPhys(Reg
);
976 if (TRI
->regsOverlap(PhysReg
, Reg
))
982 /// FindFreeRegister - Find a free register of a given register class by looking
983 /// at (at most) the last two machine instructions.
984 static unsigned FindFreeRegister(MachineBasicBlock::iterator MII
,
985 MachineBasicBlock
&MBB
,
986 const TargetRegisterClass
*RC
,
987 const TargetRegisterInfo
*TRI
,
988 BitVector
&AllocatableRegs
) {
989 BitVector
Defs(TRI
->getNumRegs());
990 BitVector
Uses(TRI
->getNumRegs());
991 SmallVector
<unsigned, 4> LocalUses
;
992 SmallVector
<unsigned, 4> Kills
;
994 // Take a look at 2 instructions at most.
997 if (MII
== MBB
.begin())
999 MachineInstr
*PrevMI
= prior(MII
);
1002 if (PrevMI
->isDebugValue())
1003 continue; // Skip over dbg_value instructions.
1006 for (unsigned i
= 0, e
= PrevMI
->getNumOperands(); i
!= e
; ++i
) {
1007 MachineOperand
&MO
= PrevMI
->getOperand(i
);
1008 if (!MO
.isReg() || MO
.getReg() == 0)
1010 unsigned Reg
= MO
.getReg();
1013 for (const unsigned *AS
= TRI
->getAliasSet(Reg
); *AS
; ++AS
)
1016 LocalUses
.push_back(Reg
);
1017 if (MO
.isKill() && AllocatableRegs
[Reg
])
1018 Kills
.push_back(Reg
);
1022 for (unsigned i
= 0, e
= Kills
.size(); i
!= e
; ++i
) {
1023 unsigned Kill
= Kills
[i
];
1024 if (!Defs
[Kill
] && !Uses
[Kill
] &&
1025 TRI
->getPhysicalRegisterRegClass(Kill
) == RC
)
1028 for (unsigned i
= 0, e
= LocalUses
.size(); i
!= e
; ++i
) {
1029 unsigned Reg
= LocalUses
[i
];
1031 for (const unsigned *AS
= TRI
->getAliasSet(Reg
); *AS
; ++AS
)
1040 void AssignPhysToVirtReg(MachineInstr
*MI
, unsigned VirtReg
, unsigned PhysReg
,
1041 const TargetRegisterInfo
&TRI
) {
1042 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
1043 MachineOperand
&MO
= MI
->getOperand(i
);
1044 if (MO
.isReg() && MO
.getReg() == VirtReg
)
1045 substitutePhysReg(MO
, PhysReg
, TRI
);
1052 bool operator()(const std::pair
<MachineInstr
*, int> &A
,
1053 const std::pair
<MachineInstr
*, int> &B
) {
1054 return A
.second
< B
.second
;
1058 // ***************************** //
1059 // Local Spiller Implementation //
1060 // ***************************** //
1062 class LocalRewriter
: public VirtRegRewriter
{
1063 MachineRegisterInfo
*MRI
;
1064 const TargetRegisterInfo
*TRI
;
1065 const TargetInstrInfo
*TII
;
1067 BitVector AllocatableRegs
;
1068 DenseMap
<MachineInstr
*, unsigned> DistanceMap
;
1070 MachineBasicBlock
*MBB
; // Basic block currently being processed.
1074 bool runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
,
1075 LiveIntervals
* LIs
);
1079 bool OptimizeByUnfold2(unsigned VirtReg
, int SS
,
1080 MachineBasicBlock::iterator
&MII
,
1081 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1082 AvailableSpills
&Spills
,
1083 BitVector
&RegKills
,
1084 std::vector
<MachineOperand
*> &KillOps
);
1086 bool OptimizeByUnfold(MachineBasicBlock::iterator
&MII
,
1087 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1088 AvailableSpills
&Spills
,
1089 BitVector
&RegKills
,
1090 std::vector
<MachineOperand
*> &KillOps
);
1092 bool CommuteToFoldReload(MachineBasicBlock::iterator
&MII
,
1093 unsigned VirtReg
, unsigned SrcReg
, int SS
,
1094 AvailableSpills
&Spills
,
1095 BitVector
&RegKills
,
1096 std::vector
<MachineOperand
*> &KillOps
,
1097 const TargetRegisterInfo
*TRI
);
1099 void SpillRegToStackSlot(MachineBasicBlock::iterator
&MII
,
1100 int Idx
, unsigned PhysReg
, int StackSlot
,
1101 const TargetRegisterClass
*RC
,
1102 bool isAvailable
, MachineInstr
*&LastStore
,
1103 AvailableSpills
&Spills
,
1104 SmallSet
<MachineInstr
*, 4> &ReMatDefs
,
1105 BitVector
&RegKills
,
1106 std::vector
<MachineOperand
*> &KillOps
);
1108 void TransferDeadness(unsigned Reg
, BitVector
&RegKills
,
1109 std::vector
<MachineOperand
*> &KillOps
);
1111 bool InsertEmergencySpills(MachineInstr
*MI
);
1113 bool InsertRestores(MachineInstr
*MI
,
1114 AvailableSpills
&Spills
,
1115 BitVector
&RegKills
,
1116 std::vector
<MachineOperand
*> &KillOps
);
1118 bool InsertSpills(MachineInstr
*MI
);
1120 void RewriteMBB(LiveIntervals
*LIs
,
1121 AvailableSpills
&Spills
, BitVector
&RegKills
,
1122 std::vector
<MachineOperand
*> &KillOps
);
1126 bool LocalRewriter::runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&vrm
,
1127 LiveIntervals
* LIs
) {
1128 MRI
= &MF
.getRegInfo();
1129 TRI
= MF
.getTarget().getRegisterInfo();
1130 TII
= MF
.getTarget().getInstrInfo();
1132 AllocatableRegs
= TRI
->getAllocatableSet(MF
);
1133 DEBUG(dbgs() << "\n**** Local spiller rewriting function '"
1134 << MF
.getFunction()->getName() << "':\n");
1135 DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and"
1136 " reloads!) ****\n");
1139 // Spills - Keep track of which spilled values are available in physregs
1140 // so that we can choose to reuse the physregs instead of emitting
1141 // reloads. This is usually refreshed per basic block.
1142 AvailableSpills
Spills(TRI
, TII
);
1144 // Keep track of kill information.
1145 BitVector
RegKills(TRI
->getNumRegs());
1146 std::vector
<MachineOperand
*> KillOps
;
1147 KillOps
.resize(TRI
->getNumRegs(), NULL
);
1149 // SingleEntrySuccs - Successor blocks which have a single predecessor.
1150 SmallVector
<MachineBasicBlock
*, 4> SinglePredSuccs
;
1151 SmallPtrSet
<MachineBasicBlock
*,16> EarlyVisited
;
1153 // Traverse the basic blocks depth first.
1154 MachineBasicBlock
*Entry
= MF
.begin();
1155 SmallPtrSet
<MachineBasicBlock
*,16> Visited
;
1156 for (df_ext_iterator
<MachineBasicBlock
*,
1157 SmallPtrSet
<MachineBasicBlock
*,16> >
1158 DFI
= df_ext_begin(Entry
, Visited
), E
= df_ext_end(Entry
, Visited
);
1161 if (!EarlyVisited
.count(MBB
))
1162 RewriteMBB(LIs
, Spills
, RegKills
, KillOps
);
1164 // If this MBB is the only predecessor of a successor. Keep the
1165 // availability information and visit it next.
1167 // Keep visiting single predecessor successor as long as possible.
1168 SinglePredSuccs
.clear();
1169 findSinglePredSuccessor(MBB
, SinglePredSuccs
);
1170 if (SinglePredSuccs
.empty())
1173 // FIXME: More than one successors, each of which has MBB has
1174 // the only predecessor.
1175 MBB
= SinglePredSuccs
[0];
1176 if (!Visited
.count(MBB
) && EarlyVisited
.insert(MBB
)) {
1177 Spills
.AddAvailableRegsToLiveIn(*MBB
, RegKills
, KillOps
);
1178 RewriteMBB(LIs
, Spills
, RegKills
, KillOps
);
1183 // Clear the availability info.
1187 DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
1190 // Mark unused spill slots.
1191 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
1192 int SS
= VRM
->getLowSpillSlot();
1193 if (SS
!= VirtRegMap::NO_STACK_SLOT
)
1194 for (int e
= VRM
->getHighSpillSlot(); SS
<= e
; ++SS
)
1195 if (!VRM
->isSpillSlotUsed(SS
)) {
1196 MFI
->RemoveStackObject(SS
);
1203 /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
1204 /// a scratch register is available.
1205 /// xorq %r12<kill>, %r13
1206 /// addq %rax, -184(%rbp)
1207 /// addq %r13, -184(%rbp)
1209 /// xorq %r12<kill>, %r13
1210 /// movq -184(%rbp), %r12
1213 /// movq %r12, -184(%rbp)
1214 bool LocalRewriter::
1215 OptimizeByUnfold2(unsigned VirtReg
, int SS
,
1216 MachineBasicBlock::iterator
&MII
,
1217 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1218 AvailableSpills
&Spills
,
1219 BitVector
&RegKills
,
1220 std::vector
<MachineOperand
*> &KillOps
) {
1222 MachineBasicBlock::iterator NextMII
= llvm::next(MII
);
1223 // Skip over dbg_value instructions.
1224 while (NextMII
!= MBB
->end() && NextMII
->isDebugValue())
1225 NextMII
= llvm::next(NextMII
);
1226 if (NextMII
== MBB
->end())
1229 if (TII
->getOpcodeAfterMemoryUnfold(MII
->getOpcode(), true, true) == 0)
1232 // Now let's see if the last couple of instructions happens to have freed up
1234 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
1235 unsigned PhysReg
= FindFreeRegister(MII
, *MBB
, RC
, TRI
, AllocatableRegs
);
1239 MachineFunction
&MF
= *MBB
->getParent();
1240 TRI
= MF
.getTarget().getRegisterInfo();
1241 MachineInstr
&MI
= *MII
;
1242 if (!FoldsStackSlotModRef(MI
, SS
, PhysReg
, TII
, TRI
, *VRM
))
1245 // If the next instruction also folds the same SS modref and can be unfoled,
1246 // then it's worthwhile to issue a load from SS into the free register and
1247 // then unfold these instructions.
1248 if (!FoldsStackSlotModRef(*NextMII
, SS
, PhysReg
, TII
, TRI
, *VRM
))
1251 // Back-schedule reloads and remats.
1252 ComputeReloadLoc(MII
, MBB
->begin(), PhysReg
, TRI
, false, SS
, TII
, MF
);
1254 // Load from SS to the spare physical register.
1255 TII
->loadRegFromStackSlot(*MBB
, MII
, PhysReg
, SS
, RC
);
1256 // This invalidates Phys.
1257 Spills
.ClobberPhysReg(PhysReg
);
1258 // Remember it's available.
1259 Spills
.addAvailable(SS
, PhysReg
);
1260 MaybeDeadStores
[SS
] = NULL
;
1262 // Unfold current MI.
1263 SmallVector
<MachineInstr
*, 4> NewMIs
;
1264 if (!TII
->unfoldMemoryOperand(MF
, &MI
, VirtReg
, false, false, NewMIs
))
1265 llvm_unreachable("Unable unfold the load / store folding instruction!");
1266 assert(NewMIs
.size() == 1);
1267 AssignPhysToVirtReg(NewMIs
[0], VirtReg
, PhysReg
, *TRI
);
1268 VRM
->transferRestorePts(&MI
, NewMIs
[0]);
1269 MII
= MBB
->insert(MII
, NewMIs
[0]);
1270 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1271 VRM
->RemoveMachineInstrFromMaps(&MI
);
1275 // Unfold next instructions that fold the same SS.
1277 MachineInstr
&NextMI
= *NextMII
;
1278 NextMII
= llvm::next(NextMII
);
1280 if (!TII
->unfoldMemoryOperand(MF
, &NextMI
, VirtReg
, false, false, NewMIs
))
1281 llvm_unreachable("Unable unfold the load / store folding instruction!");
1282 assert(NewMIs
.size() == 1);
1283 AssignPhysToVirtReg(NewMIs
[0], VirtReg
, PhysReg
, *TRI
);
1284 VRM
->transferRestorePts(&NextMI
, NewMIs
[0]);
1285 MBB
->insert(NextMII
, NewMIs
[0]);
1286 InvalidateKills(NextMI
, TRI
, RegKills
, KillOps
);
1287 VRM
->RemoveMachineInstrFromMaps(&NextMI
);
1288 MBB
->erase(&NextMI
);
1290 // Skip over dbg_value instructions.
1291 while (NextMII
!= MBB
->end() && NextMII
->isDebugValue())
1292 NextMII
= llvm::next(NextMII
);
1293 if (NextMII
== MBB
->end())
1295 } while (FoldsStackSlotModRef(*NextMII
, SS
, PhysReg
, TII
, TRI
, *VRM
));
1297 // Store the value back into SS.
1298 TII
->storeRegToStackSlot(*MBB
, NextMII
, PhysReg
, true, SS
, RC
);
1299 MachineInstr
*StoreMI
= prior(NextMII
);
1300 VRM
->addSpillSlotUse(SS
, StoreMI
);
1301 VRM
->virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1306 /// OptimizeByUnfold - Turn a store folding instruction into a load folding
1307 /// instruction. e.g.
1309 /// movl %eax, -32(%ebp)
1310 /// movl -36(%ebp), %eax
1311 /// orl %eax, -32(%ebp)
1314 /// orl -36(%ebp), %eax
1315 /// mov %eax, -32(%ebp)
1316 /// This enables unfolding optimization for a subsequent instruction which will
1317 /// also eliminate the newly introduced store instruction.
1318 bool LocalRewriter::
1319 OptimizeByUnfold(MachineBasicBlock::iterator
&MII
,
1320 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1321 AvailableSpills
&Spills
,
1322 BitVector
&RegKills
,
1323 std::vector
<MachineOperand
*> &KillOps
) {
1324 MachineFunction
&MF
= *MBB
->getParent();
1325 MachineInstr
&MI
= *MII
;
1326 unsigned UnfoldedOpc
= 0;
1327 unsigned UnfoldPR
= 0;
1328 unsigned UnfoldVR
= 0;
1329 int FoldedSS
= VirtRegMap::NO_STACK_SLOT
;
1330 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
1331 for (tie(I
, End
) = VRM
->getFoldedVirts(&MI
); I
!= End
; ) {
1332 // Only transform a MI that folds a single register.
1335 UnfoldVR
= I
->second
.first
;
1336 VirtRegMap::ModRef MR
= I
->second
.second
;
1337 // MI2VirtMap be can updated which invalidate the iterator.
1338 // Increment the iterator first.
1340 if (VRM
->isAssignedReg(UnfoldVR
))
1342 // If this reference is not a use, any previous store is now dead.
1343 // Otherwise, the store to this stack slot is not dead anymore.
1344 FoldedSS
= VRM
->getStackSlot(UnfoldVR
);
1345 MachineInstr
* DeadStore
= MaybeDeadStores
[FoldedSS
];
1346 if (DeadStore
&& (MR
& VirtRegMap::isModRef
)) {
1347 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(FoldedSS
);
1348 if (!PhysReg
|| !DeadStore
->readsRegister(PhysReg
))
1351 UnfoldedOpc
= TII
->getOpcodeAfterMemoryUnfold(MI
.getOpcode(),
1360 // Look for other unfolding opportunities.
1361 return OptimizeByUnfold2(UnfoldVR
, FoldedSS
, MII
, MaybeDeadStores
, Spills
,
1365 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1366 MachineOperand
&MO
= MI
.getOperand(i
);
1367 if (!MO
.isReg() || MO
.getReg() == 0 || !MO
.isUse())
1369 unsigned VirtReg
= MO
.getReg();
1370 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
) || MO
.getSubReg())
1372 if (VRM
->isAssignedReg(VirtReg
)) {
1373 unsigned PhysReg
= VRM
->getPhys(VirtReg
);
1374 if (PhysReg
&& TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1376 } else if (VRM
->isReMaterialized(VirtReg
))
1378 int SS
= VRM
->getStackSlot(VirtReg
);
1379 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
1381 if (TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1385 if (VRM
->hasPhys(VirtReg
)) {
1386 PhysReg
= VRM
->getPhys(VirtReg
);
1387 if (!TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1391 // Ok, we'll need to reload the value into a register which makes
1392 // it impossible to perform the store unfolding optimization later.
1393 // Let's see if it is possible to fold the load if the store is
1394 // unfolded. This allows us to perform the store unfolding
1396 SmallVector
<MachineInstr
*, 4> NewMIs
;
1397 if (TII
->unfoldMemoryOperand(MF
, &MI
, UnfoldVR
, false, false, NewMIs
)) {
1398 assert(NewMIs
.size() == 1);
1399 MachineInstr
*NewMI
= NewMIs
.back();
1401 int Idx
= NewMI
->findRegisterUseOperandIdx(VirtReg
, false);
1403 SmallVector
<unsigned, 1> Ops
;
1405 MachineInstr
*FoldedMI
= TII
->foldMemoryOperand(MF
, NewMI
, Ops
, SS
);
1407 VRM
->addSpillSlotUse(SS
, FoldedMI
);
1408 if (!VRM
->hasPhys(UnfoldVR
))
1409 VRM
->assignVirt2Phys(UnfoldVR
, UnfoldPR
);
1410 VRM
->virtFolded(VirtReg
, FoldedMI
, VirtRegMap::isRef
);
1411 MII
= MBB
->insert(MII
, FoldedMI
);
1412 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1413 VRM
->RemoveMachineInstrFromMaps(&MI
);
1415 MF
.DeleteMachineInstr(NewMI
);
1418 MF
.DeleteMachineInstr(NewMI
);
1425 /// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
1426 /// where SrcReg is r1 and it is tied to r0. Return true if after
1427 /// commuting this instruction it will be r0 = op r2, r1.
1428 static bool CommuteChangesDestination(MachineInstr
*DefMI
,
1429 const TargetInstrDesc
&TID
,
1431 const TargetInstrInfo
*TII
,
1433 if (TID
.getNumDefs() != 1 && TID
.getNumOperands() != 3)
1435 if (!DefMI
->getOperand(1).isReg() ||
1436 DefMI
->getOperand(1).getReg() != SrcReg
)
1439 if (!DefMI
->isRegTiedToDefOperand(1, &DefIdx
) || DefIdx
!= 0)
1441 unsigned SrcIdx1
, SrcIdx2
;
1442 if (!TII
->findCommutedOpIndices(DefMI
, SrcIdx1
, SrcIdx2
))
1444 if (SrcIdx1
== 1 && SrcIdx2
== 2) {
1451 /// CommuteToFoldReload -
1454 /// r1 = op r1, r2<kill>
1457 /// If op is commutable and r2 is killed, then we can xform these to
1458 /// r2 = op r2, fi#1
1460 bool LocalRewriter::
1461 CommuteToFoldReload(MachineBasicBlock::iterator
&MII
,
1462 unsigned VirtReg
, unsigned SrcReg
, int SS
,
1463 AvailableSpills
&Spills
,
1464 BitVector
&RegKills
,
1465 std::vector
<MachineOperand
*> &KillOps
,
1466 const TargetRegisterInfo
*TRI
) {
1467 if (MII
== MBB
->begin() || !MII
->killsRegister(SrcReg
))
1470 MachineFunction
&MF
= *MBB
->getParent();
1471 MachineInstr
&MI
= *MII
;
1472 MachineBasicBlock::iterator DefMII
= prior(MII
);
1473 MachineInstr
*DefMI
= DefMII
;
1474 const TargetInstrDesc
&TID
= DefMI
->getDesc();
1476 if (DefMII
!= MBB
->begin() &&
1477 TID
.isCommutable() &&
1478 CommuteChangesDestination(DefMI
, TID
, SrcReg
, TII
, NewDstIdx
)) {
1479 MachineOperand
&NewDstMO
= DefMI
->getOperand(NewDstIdx
);
1480 unsigned NewReg
= NewDstMO
.getReg();
1481 if (!NewDstMO
.isKill() || TRI
->regsOverlap(NewReg
, SrcReg
))
1483 MachineInstr
*ReloadMI
= prior(DefMII
);
1485 unsigned DestReg
= TII
->isLoadFromStackSlot(ReloadMI
, FrameIdx
);
1486 if (DestReg
!= SrcReg
|| FrameIdx
!= SS
)
1488 int UseIdx
= DefMI
->findRegisterUseOperandIdx(DestReg
, false);
1492 if (!MI
.isRegTiedToDefOperand(UseIdx
, &DefIdx
))
1494 assert(DefMI
->getOperand(DefIdx
).isReg() &&
1495 DefMI
->getOperand(DefIdx
).getReg() == SrcReg
);
1497 // Now commute def instruction.
1498 MachineInstr
*CommutedMI
= TII
->commuteInstruction(DefMI
, true);
1501 SmallVector
<unsigned, 1> Ops
;
1502 Ops
.push_back(NewDstIdx
);
1503 MachineInstr
*FoldedMI
= TII
->foldMemoryOperand(MF
, CommutedMI
, Ops
, SS
);
1504 // Not needed since foldMemoryOperand returns new MI.
1505 MF
.DeleteMachineInstr(CommutedMI
);
1509 VRM
->addSpillSlotUse(SS
, FoldedMI
);
1510 VRM
->virtFolded(VirtReg
, FoldedMI
, VirtRegMap::isRef
);
1511 // Insert new def MI and spill MI.
1512 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
1513 TII
->storeRegToStackSlot(*MBB
, &MI
, NewReg
, true, SS
, RC
);
1515 MachineInstr
*StoreMI
= MII
;
1516 VRM
->addSpillSlotUse(SS
, StoreMI
);
1517 VRM
->virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1518 MII
= MBB
->insert(MII
, FoldedMI
); // Update MII to backtrack.
1520 // Delete all 3 old instructions.
1521 InvalidateKills(*ReloadMI
, TRI
, RegKills
, KillOps
);
1522 VRM
->RemoveMachineInstrFromMaps(ReloadMI
);
1523 MBB
->erase(ReloadMI
);
1524 InvalidateKills(*DefMI
, TRI
, RegKills
, KillOps
);
1525 VRM
->RemoveMachineInstrFromMaps(DefMI
);
1527 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1528 VRM
->RemoveMachineInstrFromMaps(&MI
);
1531 // If NewReg was previously holding value of some SS, it's now clobbered.
1532 // This has to be done now because it's a physical register. When this
1533 // instruction is re-visited, it's ignored.
1534 Spills
.ClobberPhysReg(NewReg
);
1543 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1544 /// the last store to the same slot is now dead. If so, remove the last store.
1545 void LocalRewriter::
1546 SpillRegToStackSlot(MachineBasicBlock::iterator
&MII
,
1547 int Idx
, unsigned PhysReg
, int StackSlot
,
1548 const TargetRegisterClass
*RC
,
1549 bool isAvailable
, MachineInstr
*&LastStore
,
1550 AvailableSpills
&Spills
,
1551 SmallSet
<MachineInstr
*, 4> &ReMatDefs
,
1552 BitVector
&RegKills
,
1553 std::vector
<MachineOperand
*> &KillOps
) {
1555 MachineBasicBlock::iterator oldNextMII
= llvm::next(MII
);
1556 TII
->storeRegToStackSlot(*MBB
, llvm::next(MII
), PhysReg
, true, StackSlot
, RC
);
1557 MachineInstr
*StoreMI
= prior(oldNextMII
);
1558 VRM
->addSpillSlotUse(StackSlot
, StoreMI
);
1559 DEBUG(dbgs() << "Store:\t" << *StoreMI
);
1561 // If there is a dead store to this stack slot, nuke it now.
1563 DEBUG(dbgs() << "Removed dead store:\t" << *LastStore
);
1565 SmallVector
<unsigned, 2> KillRegs
;
1566 InvalidateKills(*LastStore
, TRI
, RegKills
, KillOps
, &KillRegs
);
1567 MachineBasicBlock::iterator PrevMII
= LastStore
;
1568 bool CheckDef
= PrevMII
!= MBB
->begin();
1571 VRM
->RemoveMachineInstrFromMaps(LastStore
);
1572 MBB
->erase(LastStore
);
1574 // Look at defs of killed registers on the store. Mark the defs
1575 // as dead since the store has been deleted and they aren't
1577 for (unsigned j
= 0, ee
= KillRegs
.size(); j
!= ee
; ++j
) {
1578 bool HasOtherDef
= false;
1579 if (InvalidateRegDef(PrevMII
, *MII
, KillRegs
[j
], HasOtherDef
, TRI
)) {
1580 MachineInstr
*DeadDef
= PrevMII
;
1581 if (ReMatDefs
.count(DeadDef
) && !HasOtherDef
) {
1582 // FIXME: This assumes a remat def does not have side effects.
1583 VRM
->RemoveMachineInstrFromMaps(DeadDef
);
1584 MBB
->erase(DeadDef
);
1592 // Allow for multi-instruction spill sequences, as on PPC Altivec. Presume
1593 // the last of multiple instructions is the actual store.
1594 LastStore
= prior(oldNextMII
);
1596 // If the stack slot value was previously available in some other
1597 // register, change it now. Otherwise, make the register available,
1599 Spills
.ModifyStackSlotOrReMat(StackSlot
);
1600 Spills
.ClobberPhysReg(PhysReg
);
1601 Spills
.addAvailable(StackSlot
, PhysReg
, isAvailable
);
1605 /// isSafeToDelete - Return true if this instruction doesn't produce any side
1606 /// effect and all of its defs are dead.
1607 static bool isSafeToDelete(MachineInstr
&MI
) {
1608 const TargetInstrDesc
&TID
= MI
.getDesc();
1609 if (TID
.mayLoad() || TID
.mayStore() || TID
.isCall() || TID
.isTerminator() ||
1610 TID
.isCall() || TID
.isBarrier() || TID
.isReturn() ||
1611 TID
.hasUnmodeledSideEffects())
1613 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1614 MachineOperand
&MO
= MI
.getOperand(i
);
1615 if (!MO
.isReg() || !MO
.getReg())
1617 if (MO
.isDef() && !MO
.isDead())
1619 if (MO
.isUse() && MO
.isKill())
1620 // FIXME: We can't remove kill markers or else the scavenger will assert.
1621 // An alternative is to add a ADD pseudo instruction to replace kill
1628 /// TransferDeadness - A identity copy definition is dead and it's being
1629 /// removed. Find the last def or use and mark it as dead / kill.
1630 void LocalRewriter::
1631 TransferDeadness(unsigned Reg
, BitVector
&RegKills
,
1632 std::vector
<MachineOperand
*> &KillOps
) {
1633 SmallPtrSet
<MachineInstr
*, 4> Seens
;
1634 SmallVector
<std::pair
<MachineInstr
*, int>,8> Refs
;
1635 for (MachineRegisterInfo::reg_iterator RI
= MRI
->reg_begin(Reg
),
1636 RE
= MRI
->reg_end(); RI
!= RE
; ++RI
) {
1637 MachineInstr
*UDMI
= &*RI
;
1638 if (UDMI
->isDebugValue() || UDMI
->getParent() != MBB
)
1640 DenseMap
<MachineInstr
*, unsigned>::iterator DI
= DistanceMap
.find(UDMI
);
1641 if (DI
== DistanceMap
.end())
1643 if (Seens
.insert(UDMI
))
1644 Refs
.push_back(std::make_pair(UDMI
, DI
->second
));
1649 std::sort(Refs
.begin(), Refs
.end(), RefSorter());
1651 while (!Refs
.empty()) {
1652 MachineInstr
*LastUDMI
= Refs
.back().first
;
1655 MachineOperand
*LastUD
= NULL
;
1656 for (unsigned i
= 0, e
= LastUDMI
->getNumOperands(); i
!= e
; ++i
) {
1657 MachineOperand
&MO
= LastUDMI
->getOperand(i
);
1658 if (!MO
.isReg() || MO
.getReg() != Reg
)
1660 if (!LastUD
|| (LastUD
->isUse() && MO
.isDef()))
1662 if (LastUDMI
->isRegTiedToDefOperand(i
))
1665 if (LastUD
->isDef()) {
1666 // If the instruction has no side effect, delete it and propagate
1667 // backward further. Otherwise, mark is dead and we are done.
1668 if (!isSafeToDelete(*LastUDMI
)) {
1669 LastUD
->setIsDead();
1672 VRM
->RemoveMachineInstrFromMaps(LastUDMI
);
1673 MBB
->erase(LastUDMI
);
1675 LastUD
->setIsKill();
1677 KillOps
[Reg
] = LastUD
;
1683 /// InsertEmergencySpills - Insert emergency spills before MI if requested by
1684 /// VRM. Return true if spills were inserted.
1685 bool LocalRewriter::InsertEmergencySpills(MachineInstr
*MI
) {
1686 if (!VRM
->hasEmergencySpills(MI
))
1688 MachineBasicBlock::iterator MII
= MI
;
1689 SmallSet
<int, 4> UsedSS
;
1690 std::vector
<unsigned> &EmSpills
= VRM
->getEmergencySpills(MI
);
1691 for (unsigned i
= 0, e
= EmSpills
.size(); i
!= e
; ++i
) {
1692 unsigned PhysReg
= EmSpills
[i
];
1693 const TargetRegisterClass
*RC
= TRI
->getPhysicalRegisterRegClass(PhysReg
);
1694 assert(RC
&& "Unable to determine register class!");
1695 int SS
= VRM
->getEmergencySpillSlot(RC
);
1696 if (UsedSS
.count(SS
))
1697 llvm_unreachable("Need to spill more than one physical registers!");
1699 TII
->storeRegToStackSlot(*MBB
, MII
, PhysReg
, true, SS
, RC
);
1700 MachineInstr
*StoreMI
= prior(MII
);
1701 VRM
->addSpillSlotUse(SS
, StoreMI
);
1703 // Back-schedule reloads and remats.
1704 MachineBasicBlock::iterator InsertLoc
=
1705 ComputeReloadLoc(llvm::next(MII
), MBB
->begin(), PhysReg
, TRI
, false, SS
,
1706 TII
, *MBB
->getParent());
1708 TII
->loadRegFromStackSlot(*MBB
, InsertLoc
, PhysReg
, SS
, RC
);
1710 MachineInstr
*LoadMI
= prior(InsertLoc
);
1711 VRM
->addSpillSlotUse(SS
, LoadMI
);
1713 DistanceMap
.insert(std::make_pair(LoadMI
, DistanceMap
.size()));
1718 /// InsertRestores - Restore registers before MI is requested by VRM. Return
1719 /// true is any instructions were inserted.
1720 bool LocalRewriter::InsertRestores(MachineInstr
*MI
,
1721 AvailableSpills
&Spills
,
1722 BitVector
&RegKills
,
1723 std::vector
<MachineOperand
*> &KillOps
) {
1724 if (!VRM
->isRestorePt(MI
))
1726 MachineBasicBlock::iterator MII
= MI
;
1727 std::vector
<unsigned> &RestoreRegs
= VRM
->getRestorePtRestores(MI
);
1728 for (unsigned i
= 0, e
= RestoreRegs
.size(); i
!= e
; ++i
) {
1729 unsigned VirtReg
= RestoreRegs
[e
-i
-1]; // Reverse order.
1730 if (!VRM
->getPreSplitReg(VirtReg
))
1731 continue; // Split interval spilled again.
1732 unsigned Phys
= VRM
->getPhys(VirtReg
);
1733 MRI
->setPhysRegUsed(Phys
);
1735 // Check if the value being restored if available. If so, it must be
1736 // from a predecessor BB that fallthrough into this BB. We do not
1742 // ... # r1 not clobbered
1745 bool DoReMat
= VRM
->isReMaterialized(VirtReg
);
1746 int SSorRMId
= DoReMat
1747 ? VRM
->getReMatId(VirtReg
) : VRM
->getStackSlot(VirtReg
);
1748 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
1749 unsigned InReg
= Spills
.getSpillSlotOrReMatPhysReg(SSorRMId
);
1750 if (InReg
== Phys
) {
1751 // If the value is already available in the expected register, save
1752 // a reload / remat.
1754 DEBUG(dbgs() << "Reusing RM#"
1755 << SSorRMId
-VirtRegMap::MAX_STACK_SLOT
-1);
1757 DEBUG(dbgs() << "Reusing SS#" << SSorRMId
);
1758 DEBUG(dbgs() << " from physreg "
1759 << TRI
->getName(InReg
) << " for vreg"
1760 << VirtReg
<<" instead of reloading into physreg "
1761 << TRI
->getName(Phys
) << '\n');
1764 } else if (InReg
&& InReg
!= Phys
) {
1766 DEBUG(dbgs() << "Reusing RM#"
1767 << SSorRMId
-VirtRegMap::MAX_STACK_SLOT
-1);
1769 DEBUG(dbgs() << "Reusing SS#" << SSorRMId
);
1770 DEBUG(dbgs() << " from physreg "
1771 << TRI
->getName(InReg
) << " for vreg"
1772 << VirtReg
<<" by copying it into physreg "
1773 << TRI
->getName(Phys
) << '\n');
1775 // If the reloaded / remat value is available in another register,
1776 // copy it to the desired register.
1778 // Back-schedule reloads and remats.
1779 MachineBasicBlock::iterator InsertLoc
=
1780 ComputeReloadLoc(MII
, MBB
->begin(), Phys
, TRI
, DoReMat
, SSorRMId
, TII
,
1783 TII
->copyRegToReg(*MBB
, InsertLoc
, Phys
, InReg
, RC
, RC
);
1785 // This invalidates Phys.
1786 Spills
.ClobberPhysReg(Phys
);
1787 // Remember it's available.
1788 Spills
.addAvailable(SSorRMId
, Phys
);
1791 MachineInstr
*CopyMI
= prior(InsertLoc
);
1792 CopyMI
->setAsmPrinterFlag(MachineInstr::ReloadReuse
);
1793 MachineOperand
*KillOpnd
= CopyMI
->findRegisterUseOperand(InReg
);
1794 KillOpnd
->setIsKill();
1795 UpdateKills(*CopyMI
, TRI
, RegKills
, KillOps
);
1797 DEBUG(dbgs() << '\t' << *CopyMI
);
1802 // Back-schedule reloads and remats.
1803 MachineBasicBlock::iterator InsertLoc
=
1804 ComputeReloadLoc(MII
, MBB
->begin(), Phys
, TRI
, DoReMat
, SSorRMId
, TII
,
1807 if (VRM
->isReMaterialized(VirtReg
)) {
1808 ReMaterialize(*MBB
, InsertLoc
, Phys
, VirtReg
, TII
, TRI
, *VRM
);
1810 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
1811 TII
->loadRegFromStackSlot(*MBB
, InsertLoc
, Phys
, SSorRMId
, RC
);
1812 MachineInstr
*LoadMI
= prior(InsertLoc
);
1813 VRM
->addSpillSlotUse(SSorRMId
, LoadMI
);
1815 DistanceMap
.insert(std::make_pair(LoadMI
, DistanceMap
.size()));
1818 // This invalidates Phys.
1819 Spills
.ClobberPhysReg(Phys
);
1820 // Remember it's available.
1821 Spills
.addAvailable(SSorRMId
, Phys
);
1823 UpdateKills(*prior(InsertLoc
), TRI
, RegKills
, KillOps
);
1824 DEBUG(dbgs() << '\t' << *prior(MII
));
1829 /// InsertEmergencySpills - Insert spills after MI if requested by VRM. Return
1830 /// true if spills were inserted.
1831 bool LocalRewriter::InsertSpills(MachineInstr
*MI
) {
1832 if (!VRM
->isSpillPt(MI
))
1834 MachineBasicBlock::iterator MII
= MI
;
1835 std::vector
<std::pair
<unsigned,bool> > &SpillRegs
=
1836 VRM
->getSpillPtSpills(MI
);
1837 for (unsigned i
= 0, e
= SpillRegs
.size(); i
!= e
; ++i
) {
1838 unsigned VirtReg
= SpillRegs
[i
].first
;
1839 bool isKill
= SpillRegs
[i
].second
;
1840 if (!VRM
->getPreSplitReg(VirtReg
))
1841 continue; // Split interval spilled again.
1842 const TargetRegisterClass
*RC
= MRI
->getRegClass(VirtReg
);
1843 unsigned Phys
= VRM
->getPhys(VirtReg
);
1844 int StackSlot
= VRM
->getStackSlot(VirtReg
);
1845 MachineBasicBlock::iterator oldNextMII
= llvm::next(MII
);
1846 TII
->storeRegToStackSlot(*MBB
, llvm::next(MII
), Phys
, isKill
, StackSlot
,
1848 MachineInstr
*StoreMI
= prior(oldNextMII
);
1849 VRM
->addSpillSlotUse(StackSlot
, StoreMI
);
1850 DEBUG(dbgs() << "Store:\t" << *StoreMI
);
1851 VRM
->virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1857 /// rewriteMBB - Keep track of which spills are available even after the
1858 /// register allocator is done with them. If possible, avid reloading vregs.
1860 LocalRewriter::RewriteMBB(LiveIntervals
*LIs
,
1861 AvailableSpills
&Spills
, BitVector
&RegKills
,
1862 std::vector
<MachineOperand
*> &KillOps
) {
1864 DEBUG(dbgs() << "\n**** Local spiller rewriting MBB '"
1865 << MBB
->getName() << "':\n");
1867 MachineFunction
&MF
= *MBB
->getParent();
1869 // MaybeDeadStores - When we need to write a value back into a stack slot,
1870 // keep track of the inserted store. If the stack slot value is never read
1871 // (because the value was used from some available register, for example), and
1872 // subsequently stored to, the original store is dead. This map keeps track
1873 // of inserted stores that are not used. If we see a subsequent store to the
1874 // same stack slot, the original store is deleted.
1875 std::vector
<MachineInstr
*> MaybeDeadStores
;
1876 MaybeDeadStores
.resize(MF
.getFrameInfo()->getObjectIndexEnd(), NULL
);
1878 // ReMatDefs - These are rematerializable def MIs which are not deleted.
1879 SmallSet
<MachineInstr
*, 4> ReMatDefs
;
1882 SmallSet
<unsigned, 2> KilledMIRegs
;
1885 KillOps
.resize(TRI
->getNumRegs(), NULL
);
1887 DistanceMap
.clear();
1888 for (MachineBasicBlock::iterator MII
= MBB
->begin(), E
= MBB
->end();
1890 MachineBasicBlock::iterator NextMII
= llvm::next(MII
);
1892 if (OptimizeByUnfold(MII
, MaybeDeadStores
, Spills
, RegKills
, KillOps
))
1893 NextMII
= llvm::next(MII
);
1895 if (InsertEmergencySpills(MII
))
1896 NextMII
= llvm::next(MII
);
1898 InsertRestores(MII
, Spills
, RegKills
, KillOps
);
1900 if (InsertSpills(MII
))
1901 NextMII
= llvm::next(MII
);
1903 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
1904 bool Erased
= false;
1905 bool BackTracked
= false;
1906 MachineInstr
&MI
= *MII
;
1908 /// ReusedOperands - Keep track of operand reuse in case we need to undo
1910 ReuseInfo
ReusedOperands(MI
, TRI
);
1911 SmallVector
<unsigned, 4> VirtUseOps
;
1912 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1913 MachineOperand
&MO
= MI
.getOperand(i
);
1914 if (!MO
.isReg() || MO
.getReg() == 0)
1915 continue; // Ignore non-register operands.
1917 unsigned VirtReg
= MO
.getReg();
1918 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
)) {
1919 // Ignore physregs for spilling, but remember that it is used by this
1921 MRI
->setPhysRegUsed(VirtReg
);
1925 // We want to process implicit virtual register uses first.
1926 if (MO
.isImplicit())
1927 // If the virtual register is implicitly defined, emit a implicit_def
1928 // before so scavenger knows it's "defined".
1929 // FIXME: This is a horrible hack done the by register allocator to
1930 // remat a definition with virtual register operand.
1931 VirtUseOps
.insert(VirtUseOps
.begin(), i
);
1933 VirtUseOps
.push_back(i
);
1936 // Process all of the spilled uses and all non spilled reg references.
1937 SmallVector
<int, 2> PotentialDeadStoreSlots
;
1938 KilledMIRegs
.clear();
1939 for (unsigned j
= 0, e
= VirtUseOps
.size(); j
!= e
; ++j
) {
1940 unsigned i
= VirtUseOps
[j
];
1941 unsigned VirtReg
= MI
.getOperand(i
).getReg();
1942 assert(TargetRegisterInfo::isVirtualRegister(VirtReg
) &&
1943 "Not a virtual register?");
1945 unsigned SubIdx
= MI
.getOperand(i
).getSubReg();
1946 if (VRM
->isAssignedReg(VirtReg
)) {
1947 // This virtual register was assigned a physreg!
1948 unsigned Phys
= VRM
->getPhys(VirtReg
);
1949 MRI
->setPhysRegUsed(Phys
);
1950 if (MI
.getOperand(i
).isDef())
1951 ReusedOperands
.markClobbered(Phys
);
1952 substitutePhysReg(MI
.getOperand(i
), Phys
, *TRI
);
1953 if (VRM
->isImplicitlyDefined(VirtReg
))
1954 // FIXME: Is this needed?
1955 BuildMI(*MBB
, &MI
, MI
.getDebugLoc(),
1956 TII
->get(TargetOpcode::IMPLICIT_DEF
), Phys
);
1960 // This virtual register is now known to be a spilled value.
1961 if (!MI
.getOperand(i
).isUse())
1962 continue; // Handle defs in the loop below (handle use&def here though)
1964 bool AvoidReload
= MI
.getOperand(i
).isUndef();
1965 // Check if it is defined by an implicit def. It should not be spilled.
1966 // Note, this is for correctness reason. e.g.
1967 // 8 %reg1024<def> = IMPLICIT_DEF
1968 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1969 // The live range [12, 14) are not part of the r1024 live interval since
1970 // it's defined by an implicit def. It will not conflicts with live
1971 // interval of r1025. Now suppose both registers are spilled, you can
1972 // easily see a situation where both registers are reloaded before
1973 // the INSERT_SUBREG and both target registers that would overlap.
1974 bool DoReMat
= VRM
->isReMaterialized(VirtReg
);
1975 int SSorRMId
= DoReMat
1976 ? VRM
->getReMatId(VirtReg
) : VRM
->getStackSlot(VirtReg
);
1977 int ReuseSlot
= SSorRMId
;
1979 // Check to see if this stack slot is available.
1980 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SSorRMId
);
1982 // If this is a sub-register use, make sure the reuse register is in the
1983 // right register class. For example, for x86 not all of the 32-bit
1984 // registers have accessible sub-registers.
1985 // Similarly so for EXTRACT_SUBREG. Consider this:
1987 // MOV32_mr fi#1, EDI
1989 // = EXTRACT_SUBREG fi#1
1990 // fi#1 is available in EDI, but it cannot be reused because it's not in
1991 // the right register file.
1992 if (PhysReg
&& !AvoidReload
&& (SubIdx
|| MI
.isExtractSubreg())) {
1993 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
1994 if (!RC
->contains(PhysReg
))
1998 if (PhysReg
&& !AvoidReload
) {
1999 // This spilled operand might be part of a two-address operand. If this
2000 // is the case, then changing it will necessarily require changing the
2001 // def part of the instruction as well. However, in some cases, we
2002 // aren't allowed to modify the reused register. If none of these cases
2004 bool CanReuse
= true;
2005 bool isTied
= MI
.isRegTiedToDefOperand(i
);
2007 // Okay, we have a two address operand. We can reuse this physreg as
2008 // long as we are allowed to clobber the value and there isn't an
2009 // earlier def that has already clobbered the physreg.
2010 CanReuse
= !ReusedOperands
.isClobbered(PhysReg
) &&
2011 Spills
.canClobberPhysReg(PhysReg
);
2015 // If this stack slot value is already available, reuse it!
2016 if (ReuseSlot
> VirtRegMap::MAX_STACK_SLOT
)
2017 DEBUG(dbgs() << "Reusing RM#"
2018 << ReuseSlot
-VirtRegMap::MAX_STACK_SLOT
-1);
2020 DEBUG(dbgs() << "Reusing SS#" << ReuseSlot
);
2021 DEBUG(dbgs() << " from physreg "
2022 << TRI
->getName(PhysReg
) << " for vreg"
2023 << VirtReg
<<" instead of reloading into physreg "
2024 << TRI
->getName(VRM
->getPhys(VirtReg
)) << '\n');
2025 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2026 MI
.getOperand(i
).setReg(RReg
);
2027 MI
.getOperand(i
).setSubReg(0);
2029 // The only technical detail we have is that we don't know that
2030 // PhysReg won't be clobbered by a reloaded stack slot that occurs
2031 // later in the instruction. In particular, consider 'op V1, V2'.
2032 // If V1 is available in physreg R0, we would choose to reuse it
2033 // here, instead of reloading it into the register the allocator
2034 // indicated (say R1). However, V2 might have to be reloaded
2035 // later, and it might indicate that it needs to live in R0. When
2036 // this occurs, we need to have information available that
2037 // indicates it is safe to use R1 for the reload instead of R0.
2039 // To further complicate matters, we might conflict with an alias,
2040 // or R0 and R1 might not be compatible with each other. In this
2041 // case, we actually insert a reload for V1 in R1, ensuring that
2042 // we can get at R0 or its alias.
2043 ReusedOperands
.addReuse(i
, ReuseSlot
, PhysReg
,
2044 VRM
->getPhys(VirtReg
), VirtReg
);
2046 // Only mark it clobbered if this is a use&def operand.
2047 ReusedOperands
.markClobbered(PhysReg
);
2050 if (MI
.getOperand(i
).isKill() &&
2051 ReuseSlot
<= VirtRegMap::MAX_STACK_SLOT
) {
2053 // The store of this spilled value is potentially dead, but we
2054 // won't know for certain until we've confirmed that the re-use
2055 // above is valid, which means waiting until the other operands
2056 // are processed. For now we just track the spill slot, we'll
2057 // remove it after the other operands are processed if valid.
2059 PotentialDeadStoreSlots
.push_back(ReuseSlot
);
2062 // Mark is isKill if it's there no other uses of the same virtual
2063 // register and it's not a two-address operand. IsKill will be
2064 // unset if reg is reused.
2065 if (!isTied
&& KilledMIRegs
.count(VirtReg
) == 0) {
2066 MI
.getOperand(i
).setIsKill();
2067 KilledMIRegs
.insert(VirtReg
);
2073 // Otherwise we have a situation where we have a two-address instruction
2074 // whose mod/ref operand needs to be reloaded. This reload is already
2075 // available in some register "PhysReg", but if we used PhysReg as the
2076 // operand to our 2-addr instruction, the instruction would modify
2077 // PhysReg. This isn't cool if something later uses PhysReg and expects
2078 // to get its initial value.
2080 // To avoid this problem, and to avoid doing a load right after a store,
2081 // we emit a copy from PhysReg into the designated register for this
2083 unsigned DesignatedReg
= VRM
->getPhys(VirtReg
);
2084 assert(DesignatedReg
&& "Must map virtreg to physreg!");
2086 // Note that, if we reused a register for a previous operand, the
2087 // register we want to reload into might not actually be
2088 // available. If this occurs, use the register indicated by the
2090 if (ReusedOperands
.hasReuses())
2091 DesignatedReg
= ReusedOperands
.
2092 GetRegForReload(VirtReg
, DesignatedReg
, &MI
, Spills
,
2093 MaybeDeadStores
, RegKills
, KillOps
, *VRM
);
2095 // If the mapped designated register is actually the physreg we have
2096 // incoming, we don't need to inserted a dead copy.
2097 if (DesignatedReg
== PhysReg
) {
2098 // If this stack slot value is already available, reuse it!
2099 if (ReuseSlot
> VirtRegMap::MAX_STACK_SLOT
)
2100 DEBUG(dbgs() << "Reusing RM#"
2101 << ReuseSlot
-VirtRegMap::MAX_STACK_SLOT
-1);
2103 DEBUG(dbgs() << "Reusing SS#" << ReuseSlot
);
2104 DEBUG(dbgs() << " from physreg " << TRI
->getName(PhysReg
)
2105 << " for vreg" << VirtReg
2106 << " instead of reloading into same physreg.\n");
2107 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2108 MI
.getOperand(i
).setReg(RReg
);
2109 MI
.getOperand(i
).setSubReg(0);
2110 ReusedOperands
.markClobbered(RReg
);
2115 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
2116 MRI
->setPhysRegUsed(DesignatedReg
);
2117 ReusedOperands
.markClobbered(DesignatedReg
);
2119 // Back-schedule reloads and remats.
2120 MachineBasicBlock::iterator InsertLoc
=
2121 ComputeReloadLoc(&MI
, MBB
->begin(), PhysReg
, TRI
, DoReMat
,
2124 TII
->copyRegToReg(*MBB
, InsertLoc
, DesignatedReg
, PhysReg
, RC
, RC
);
2126 MachineInstr
*CopyMI
= prior(InsertLoc
);
2127 CopyMI
->setAsmPrinterFlag(MachineInstr::ReloadReuse
);
2128 UpdateKills(*CopyMI
, TRI
, RegKills
, KillOps
);
2130 // This invalidates DesignatedReg.
2131 Spills
.ClobberPhysReg(DesignatedReg
);
2133 Spills
.addAvailable(ReuseSlot
, DesignatedReg
);
2135 SubIdx
? TRI
->getSubReg(DesignatedReg
, SubIdx
) : DesignatedReg
;
2136 MI
.getOperand(i
).setReg(RReg
);
2137 MI
.getOperand(i
).setSubReg(0);
2138 DEBUG(dbgs() << '\t' << *prior(MII
));
2143 // Otherwise, reload it and remember that we have it.
2144 PhysReg
= VRM
->getPhys(VirtReg
);
2145 assert(PhysReg
&& "Must map virtreg to physreg!");
2147 // Note that, if we reused a register for a previous operand, the
2148 // register we want to reload into might not actually be
2149 // available. If this occurs, use the register indicated by the
2151 if (ReusedOperands
.hasReuses())
2152 PhysReg
= ReusedOperands
.GetRegForReload(VirtReg
, PhysReg
, &MI
,
2153 Spills
, MaybeDeadStores
, RegKills
, KillOps
, *VRM
);
2155 MRI
->setPhysRegUsed(PhysReg
);
2156 ReusedOperands
.markClobbered(PhysReg
);
2160 // Back-schedule reloads and remats.
2161 MachineBasicBlock::iterator InsertLoc
=
2162 ComputeReloadLoc(MII
, MBB
->begin(), PhysReg
, TRI
, DoReMat
,
2166 ReMaterialize(*MBB
, InsertLoc
, PhysReg
, VirtReg
, TII
, TRI
, *VRM
);
2168 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
2169 TII
->loadRegFromStackSlot(*MBB
, InsertLoc
, PhysReg
, SSorRMId
, RC
);
2170 MachineInstr
*LoadMI
= prior(InsertLoc
);
2171 VRM
->addSpillSlotUse(SSorRMId
, LoadMI
);
2173 DistanceMap
.insert(std::make_pair(LoadMI
, DistanceMap
.size()));
2175 // This invalidates PhysReg.
2176 Spills
.ClobberPhysReg(PhysReg
);
2178 // Any stores to this stack slot are not dead anymore.
2180 MaybeDeadStores
[SSorRMId
] = NULL
;
2181 Spills
.addAvailable(SSorRMId
, PhysReg
);
2182 // Assumes this is the last use. IsKill will be unset if reg is reused
2183 // unless it's a two-address operand.
2184 if (!MI
.isRegTiedToDefOperand(i
) &&
2185 KilledMIRegs
.count(VirtReg
) == 0) {
2186 MI
.getOperand(i
).setIsKill();
2187 KilledMIRegs
.insert(VirtReg
);
2190 UpdateKills(*prior(InsertLoc
), TRI
, RegKills
, KillOps
);
2191 DEBUG(dbgs() << '\t' << *prior(InsertLoc
));
2193 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2194 MI
.getOperand(i
).setReg(RReg
);
2195 MI
.getOperand(i
).setSubReg(0);
2198 // Ok - now we can remove stores that have been confirmed dead.
2199 for (unsigned j
= 0, e
= PotentialDeadStoreSlots
.size(); j
!= e
; ++j
) {
2200 // This was the last use and the spilled value is still available
2201 // for reuse. That means the spill was unnecessary!
2202 int PDSSlot
= PotentialDeadStoreSlots
[j
];
2203 MachineInstr
* DeadStore
= MaybeDeadStores
[PDSSlot
];
2205 DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore
);
2206 InvalidateKills(*DeadStore
, TRI
, RegKills
, KillOps
);
2207 VRM
->RemoveMachineInstrFromMaps(DeadStore
);
2208 MBB
->erase(DeadStore
);
2209 MaybeDeadStores
[PDSSlot
] = NULL
;
2215 DEBUG(dbgs() << '\t' << MI
);
2218 // If we have folded references to memory operands, make sure we clear all
2219 // physical registers that may contain the value of the spilled virtual
2221 SmallSet
<int, 2> FoldedSS
;
2222 for (tie(I
, End
) = VRM
->getFoldedVirts(&MI
); I
!= End
; ) {
2223 unsigned VirtReg
= I
->second
.first
;
2224 VirtRegMap::ModRef MR
= I
->second
.second
;
2225 DEBUG(dbgs() << "Folded vreg: " << VirtReg
<< " MR: " << MR
);
2227 // MI2VirtMap be can updated which invalidate the iterator.
2228 // Increment the iterator first.
2230 int SS
= VRM
->getStackSlot(VirtReg
);
2231 if (SS
== VirtRegMap::NO_STACK_SLOT
)
2233 FoldedSS
.insert(SS
);
2234 DEBUG(dbgs() << " - StackSlot: " << SS
<< "\n");
2236 // If this folded instruction is just a use, check to see if it's a
2237 // straight load from the virt reg slot.
2238 if ((MR
& VirtRegMap::isRef
) && !(MR
& VirtRegMap::isMod
)) {
2240 unsigned DestReg
= TII
->isLoadFromStackSlot(&MI
, FrameIdx
);
2241 if (DestReg
&& FrameIdx
== SS
) {
2242 // If this spill slot is available, turn it into a copy (or nothing)
2243 // instead of leaving it as a load!
2244 if (unsigned InReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
)) {
2245 DEBUG(dbgs() << "Promoted Load To Copy: " << MI
);
2246 if (DestReg
!= InReg
) {
2247 const TargetRegisterClass
*RC
= MRI
->getRegClass(VirtReg
);
2248 TII
->copyRegToReg(*MBB
, &MI
, DestReg
, InReg
, RC
, RC
);
2249 MachineOperand
*DefMO
= MI
.findRegisterDefOperand(DestReg
);
2250 unsigned SubIdx
= DefMO
->getSubReg();
2251 // Revisit the copy so we make sure to notice the effects of the
2252 // operation on the destreg (either needing to RA it if it's
2253 // virtual or needing to clobber any values if it's physical).
2255 --NextMII
; // backtrack to the copy.
2256 NextMII
->setAsmPrinterFlag(MachineInstr::ReloadReuse
);
2257 // Propagate the sub-register index over.
2259 DefMO
= NextMII
->findRegisterDefOperand(DestReg
);
2260 DefMO
->setSubReg(SubIdx
);
2264 MachineOperand
*KillOpnd
= NextMII
->findRegisterUseOperand(InReg
);
2265 KillOpnd
->setIsKill();
2269 DEBUG(dbgs() << "Removing now-noop copy: " << MI
);
2270 // Unset last kill since it's being reused.
2271 InvalidateKill(InReg
, TRI
, RegKills
, KillOps
);
2272 Spills
.disallowClobberPhysReg(InReg
);
2275 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2276 VRM
->RemoveMachineInstrFromMaps(&MI
);
2279 goto ProcessNextInst
;
2282 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
2283 SmallVector
<MachineInstr
*, 4> NewMIs
;
2285 TII
->unfoldMemoryOperand(MF
, &MI
, PhysReg
, false, false, NewMIs
)) {
2286 MBB
->insert(MII
, NewMIs
[0]);
2287 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2288 VRM
->RemoveMachineInstrFromMaps(&MI
);
2291 --NextMII
; // backtrack to the unfolded instruction.
2293 goto ProcessNextInst
;
2298 // If this reference is not a use, any previous store is now dead.
2299 // Otherwise, the store to this stack slot is not dead anymore.
2300 MachineInstr
* DeadStore
= MaybeDeadStores
[SS
];
2302 bool isDead
= !(MR
& VirtRegMap::isRef
);
2303 MachineInstr
*NewStore
= NULL
;
2304 if (MR
& VirtRegMap::isModRef
) {
2305 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
2306 SmallVector
<MachineInstr
*, 4> NewMIs
;
2307 // We can reuse this physreg as long as we are allowed to clobber
2308 // the value and there isn't an earlier def that has already clobbered
2311 !ReusedOperands
.isClobbered(PhysReg
) &&
2312 Spills
.canClobberPhysReg(PhysReg
) &&
2313 !TII
->isStoreToStackSlot(&MI
, SS
)) { // Not profitable!
2314 MachineOperand
*KillOpnd
=
2315 DeadStore
->findRegisterUseOperand(PhysReg
, true);
2316 // Note, if the store is storing a sub-register, it's possible the
2317 // super-register is needed below.
2318 if (KillOpnd
&& !KillOpnd
->getSubReg() &&
2319 TII
->unfoldMemoryOperand(MF
, &MI
, PhysReg
, false, true,NewMIs
)){
2320 MBB
->insert(MII
, NewMIs
[0]);
2321 NewStore
= NewMIs
[1];
2322 MBB
->insert(MII
, NewStore
);
2323 VRM
->addSpillSlotUse(SS
, NewStore
);
2324 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2325 VRM
->RemoveMachineInstrFromMaps(&MI
);
2329 --NextMII
; // backtrack to the unfolded instruction.
2337 if (isDead
) { // Previous store is dead.
2338 // If we get here, the store is dead, nuke it now.
2339 DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore
);
2340 InvalidateKills(*DeadStore
, TRI
, RegKills
, KillOps
);
2341 VRM
->RemoveMachineInstrFromMaps(DeadStore
);
2342 MBB
->erase(DeadStore
);
2347 MaybeDeadStores
[SS
] = NULL
;
2349 // Treat this store as a spill merged into a copy. That makes the
2350 // stack slot value available.
2351 VRM
->virtFolded(VirtReg
, NewStore
, VirtRegMap::isMod
);
2352 goto ProcessNextInst
;
2356 // If the spill slot value is available, and this is a new definition of
2357 // the value, the value is not available anymore.
2358 if (MR
& VirtRegMap::isMod
) {
2359 // Notice that the value in this stack slot has been modified.
2360 Spills
.ModifyStackSlotOrReMat(SS
);
2362 // If this is *just* a mod of the value, check to see if this is just a
2363 // store to the spill slot (i.e. the spill got merged into the copy). If
2364 // so, realize that the vreg is available now, and add the store to the
2365 // MaybeDeadStore info.
2367 if (!(MR
& VirtRegMap::isRef
)) {
2368 if (unsigned SrcReg
= TII
->isStoreToStackSlot(&MI
, StackSlot
)) {
2369 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg
) &&
2370 "Src hasn't been allocated yet?");
2372 if (CommuteToFoldReload(MII
, VirtReg
, SrcReg
, StackSlot
,
2373 Spills
, RegKills
, KillOps
, TRI
)) {
2374 NextMII
= llvm::next(MII
);
2376 goto ProcessNextInst
;
2379 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
2380 // this as a potentially dead store in case there is a subsequent
2381 // store into the stack slot without a read from it.
2382 MaybeDeadStores
[StackSlot
] = &MI
;
2384 // If the stack slot value was previously available in some other
2385 // register, change it now. Otherwise, make the register
2386 // available in PhysReg.
2387 Spills
.addAvailable(StackSlot
, SrcReg
, MI
.killsRegister(SrcReg
));
2393 // Process all of the spilled defs.
2394 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
2395 MachineOperand
&MO
= MI
.getOperand(i
);
2396 if (!(MO
.isReg() && MO
.getReg() && MO
.isDef()))
2399 unsigned VirtReg
= MO
.getReg();
2400 if (!TargetRegisterInfo::isVirtualRegister(VirtReg
)) {
2401 // Check to see if this is a noop copy. If so, eliminate the
2402 // instruction before considering the dest reg to be changed.
2403 // Also check if it's copying from an "undef", if so, we can't
2404 // eliminate this or else the undef marker is lost and it will
2405 // confuses the scavenger. This is extremely rare.
2406 unsigned Src
, Dst
, SrcSR
, DstSR
;
2407 if (TII
->isMoveInstr(MI
, Src
, Dst
, SrcSR
, DstSR
) && Src
== Dst
&&
2408 !MI
.findRegisterUseOperand(Src
)->isUndef()) {
2410 DEBUG(dbgs() << "Removing now-noop copy: " << MI
);
2411 SmallVector
<unsigned, 2> KillRegs
;
2412 InvalidateKills(MI
, TRI
, RegKills
, KillOps
, &KillRegs
);
2413 if (MO
.isDead() && !KillRegs
.empty()) {
2414 // Source register or an implicit super/sub-register use is killed.
2415 assert(KillRegs
[0] == Dst
||
2416 TRI
->isSubRegister(KillRegs
[0], Dst
) ||
2417 TRI
->isSuperRegister(KillRegs
[0], Dst
));
2418 // Last def is now dead.
2419 TransferDeadness(Src
, RegKills
, KillOps
);
2421 VRM
->RemoveMachineInstrFromMaps(&MI
);
2424 Spills
.disallowClobberPhysReg(VirtReg
);
2425 goto ProcessNextInst
;
2428 // If it's not a no-op copy, it clobbers the value in the destreg.
2429 Spills
.ClobberPhysReg(VirtReg
);
2430 ReusedOperands
.markClobbered(VirtReg
);
2432 // Check to see if this instruction is a load from a stack slot into
2433 // a register. If so, this provides the stack slot value in the reg.
2435 if (unsigned DestReg
= TII
->isLoadFromStackSlot(&MI
, FrameIdx
)) {
2436 assert(DestReg
== VirtReg
&& "Unknown load situation!");
2438 // If it is a folded reference, then it's not safe to clobber.
2439 bool Folded
= FoldedSS
.count(FrameIdx
);
2440 // Otherwise, if it wasn't available, remember that it is now!
2441 Spills
.addAvailable(FrameIdx
, DestReg
, !Folded
);
2442 goto ProcessNextInst
;
2448 unsigned SubIdx
= MO
.getSubReg();
2449 bool DoReMat
= VRM
->isReMaterialized(VirtReg
);
2451 ReMatDefs
.insert(&MI
);
2453 // The only vregs left are stack slot definitions.
2454 int StackSlot
= VRM
->getStackSlot(VirtReg
);
2455 const TargetRegisterClass
*RC
= MRI
->getRegClass(VirtReg
);
2457 // If this def is part of a two-address operand, make sure to execute
2458 // the store from the correct physical register.
2461 if (MI
.isRegTiedToUseOperand(i
, &TiedOp
)) {
2462 PhysReg
= MI
.getOperand(TiedOp
).getReg();
2464 unsigned SuperReg
= findSuperReg(RC
, PhysReg
, SubIdx
, TRI
);
2465 assert(SuperReg
&& TRI
->getSubReg(SuperReg
, SubIdx
) == PhysReg
&&
2466 "Can't find corresponding super-register!");
2470 PhysReg
= VRM
->getPhys(VirtReg
);
2471 if (ReusedOperands
.isClobbered(PhysReg
)) {
2472 // Another def has taken the assigned physreg. It must have been a
2473 // use&def which got it due to reuse. Undo the reuse!
2474 PhysReg
= ReusedOperands
.GetRegForReload(VirtReg
, PhysReg
, &MI
,
2475 Spills
, MaybeDeadStores
, RegKills
, KillOps
, *VRM
);
2479 assert(PhysReg
&& "VR not assigned a physical register?");
2480 MRI
->setPhysRegUsed(PhysReg
);
2481 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2482 ReusedOperands
.markClobbered(RReg
);
2483 MI
.getOperand(i
).setReg(RReg
);
2484 MI
.getOperand(i
).setSubReg(0);
2487 MachineInstr
*&LastStore
= MaybeDeadStores
[StackSlot
];
2488 SpillRegToStackSlot(MII
, -1, PhysReg
, StackSlot
, RC
, true,
2489 LastStore
, Spills
, ReMatDefs
, RegKills
, KillOps
);
2490 NextMII
= llvm::next(MII
);
2492 // Check to see if this is a noop copy. If so, eliminate the
2493 // instruction before considering the dest reg to be changed.
2495 unsigned Src
, Dst
, SrcSR
, DstSR
;
2496 if (TII
->isMoveInstr(MI
, Src
, Dst
, SrcSR
, DstSR
) && Src
== Dst
) {
2498 DEBUG(dbgs() << "Removing now-noop copy: " << MI
);
2499 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2500 VRM
->RemoveMachineInstrFromMaps(&MI
);
2503 UpdateKills(*LastStore
, TRI
, RegKills
, KillOps
);
2504 goto ProcessNextInst
;
2510 // Delete dead instructions without side effects.
2511 if (!Erased
&& !BackTracked
&& isSafeToDelete(MI
)) {
2512 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2513 VRM
->RemoveMachineInstrFromMaps(&MI
);
2518 DistanceMap
.insert(std::make_pair(&MI
, DistanceMap
.size()));
2519 if (!Erased
&& !BackTracked
) {
2520 for (MachineBasicBlock::iterator II
= &MI
; II
!= NextMII
; ++II
)
2521 UpdateKills(*II
, TRI
, RegKills
, KillOps
);
2528 llvm::VirtRegRewriter
* llvm::createVirtRegRewriter() {
2529 switch (RewriterOpt
) {
2530 default: llvm_unreachable("Unreachable!");
2532 return new LocalRewriter();
2534 return new TrivialRewriter();