1 //===-- llvm/CodeGen/Rewriter.cpp - Rewriter -----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #define DEBUG_TYPE "virtregrewriter"
11 #include "VirtRegRewriter.h"
12 #include "VirtRegMap.h"
13 #include "llvm/Function.h"
14 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/Support/CommandLine.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/ErrorHandling.h"
21 #include "llvm/Support/raw_ostream.h"
22 #include "llvm/Target/TargetInstrInfo.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/ADT/DepthFirstIterator.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
29 STATISTIC(NumDSE
, "Number of dead stores elided");
30 STATISTIC(NumDSS
, "Number of dead spill slots removed");
31 STATISTIC(NumCommutes
, "Number of instructions commuted");
32 STATISTIC(NumDRM
, "Number of re-materializable defs elided");
33 STATISTIC(NumStores
, "Number of stores added");
34 STATISTIC(NumPSpills
, "Number of physical register spills");
35 STATISTIC(NumOmitted
, "Number of reloads omited");
36 STATISTIC(NumAvoided
, "Number of reloads deemed unnecessary");
37 STATISTIC(NumCopified
, "Number of available reloads turned into copies");
38 STATISTIC(NumReMats
, "Number of re-materialization");
39 STATISTIC(NumLoads
, "Number of loads added");
40 STATISTIC(NumReused
, "Number of values reused");
41 STATISTIC(NumDCE
, "Number of copies elided");
42 STATISTIC(NumSUnfold
, "Number of stores unfolded");
43 STATISTIC(NumModRefUnfold
, "Number of modref unfolded");
46 enum RewriterName
{ local
, trivial
};
49 static cl::opt
<RewriterName
>
50 RewriterOpt("rewriter",
51 cl::desc("Rewriter to use (default=local)"),
53 cl::values(clEnumVal(local
, "local rewriter"),
54 clEnumVal(trivial
, "trivial rewriter"),
59 ScheduleSpills("schedule-spills",
60 cl::desc("Schedule spill code"),
63 VirtRegRewriter::~VirtRegRewriter() {}
65 /// substitutePhysReg - Replace virtual register in MachineOperand with a
66 /// physical register. Do the right thing with the sub-register index.
67 /// Note that operands may be added, so the MO reference is no longer valid.
68 static void substitutePhysReg(MachineOperand
&MO
, unsigned Reg
,
69 const TargetRegisterInfo
&TRI
) {
71 MO
.substPhysReg(Reg
, TRI
);
73 // Any kill flags apply to the full virtual register, so they also apply to
74 // the full physical register.
75 // We assume that partial defs have already been decorated with a super-reg
76 // <imp-def> operand by LiveIntervals.
77 MachineInstr
&MI
= *MO
.getParent();
78 if (MO
.isUse() && !MO
.isUndef() &&
79 (MO
.isKill() || MI
.isRegTiedToDefOperand(&MO
-&MI
.getOperand(0))))
80 MI
.addRegisterKilled(Reg
, &TRI
, /*AddIfNotFound=*/ true);
88 /// This class is intended for use with the new spilling framework only. It
89 /// rewrites vreg def/uses to use the assigned preg, but does not insert any
91 struct TrivialRewriter
: public VirtRegRewriter
{
93 bool runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
,
95 DEBUG(dbgs() << "********** REWRITE MACHINE CODE **********\n");
96 DEBUG(dbgs() << "********** Function: "
97 << MF
.getFunction()->getName() << '\n');
98 DEBUG(dbgs() << "**** Machine Instrs"
99 << "(NOTE! Does not include spills and reloads!) ****\n");
102 MachineRegisterInfo
*mri
= &MF
.getRegInfo();
103 const TargetRegisterInfo
*tri
= MF
.getTarget().getRegisterInfo();
105 bool changed
= false;
107 for (LiveIntervals::iterator liItr
= LIs
->begin(), liEnd
= LIs
->end();
108 liItr
!= liEnd
; ++liItr
) {
110 const LiveInterval
*li
= liItr
->second
;
111 unsigned reg
= li
->reg
;
113 if (TargetRegisterInfo::isPhysicalRegister(reg
)) {
115 mri
->setPhysRegUsed(reg
);
118 if (!VRM
.hasPhys(reg
))
120 unsigned pReg
= VRM
.getPhys(reg
);
121 mri
->setPhysRegUsed(pReg
);
122 // Copy the register use-list before traversing it.
123 SmallVector
<std::pair
<MachineInstr
*, unsigned>, 32> reglist
;
124 for (MachineRegisterInfo::reg_iterator I
= mri
->reg_begin(reg
),
125 E
= mri
->reg_end(); I
!= E
; ++I
)
126 reglist
.push_back(std::make_pair(&*I
, I
.getOperandNo()));
127 for (unsigned N
=0; N
!= reglist
.size(); ++N
)
128 substitutePhysReg(reglist
[N
].first
->getOperand(reglist
[N
].second
),
130 changed
|= !reglist
.empty();
134 DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
144 // ************************************************************************ //
148 /// AvailableSpills - As the local rewriter is scanning and rewriting an MBB
149 /// from top down, keep track of which spill slots or remat are available in
152 /// Note that not all physregs are created equal here. In particular, some
153 /// physregs are reloads that we are allowed to clobber or ignore at any time.
154 /// Other physregs are values that the register allocated program is using
155 /// that we cannot CHANGE, but we can read if we like. We keep track of this
156 /// on a per-stack-slot / remat id basis as the low bit in the value of the
157 /// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
158 /// this bit and addAvailable sets it if.
159 class AvailableSpills
{
160 const TargetRegisterInfo
*TRI
;
161 const TargetInstrInfo
*TII
;
163 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
164 // or remat'ed virtual register values that are still available, due to
165 // being loaded or stored to, but not invalidated yet.
166 std::map
<int, unsigned> SpillSlotsOrReMatsAvailable
;
168 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
169 // indicating which stack slot values are currently held by a physreg. This
170 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
171 // physreg is modified.
172 std::multimap
<unsigned, int> PhysRegsAvailable
;
174 void disallowClobberPhysRegOnly(unsigned PhysReg
);
176 void ClobberPhysRegOnly(unsigned PhysReg
);
178 AvailableSpills(const TargetRegisterInfo
*tri
, const TargetInstrInfo
*tii
)
179 : TRI(tri
), TII(tii
) {
182 /// clear - Reset the state.
184 SpillSlotsOrReMatsAvailable
.clear();
185 PhysRegsAvailable
.clear();
188 const TargetRegisterInfo
*getRegInfo() const { return TRI
; }
190 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
191 /// available in a physical register, return that PhysReg, otherwise
193 unsigned getSpillSlotOrReMatPhysReg(int Slot
) const {
194 std::map
<int, unsigned>::const_iterator I
=
195 SpillSlotsOrReMatsAvailable
.find(Slot
);
196 if (I
!= SpillSlotsOrReMatsAvailable
.end()) {
197 return I
->second
>> 1; // Remove the CanClobber bit.
202 /// addAvailable - Mark that the specified stack slot / remat is available
203 /// in the specified physreg. If CanClobber is true, the physreg can be
204 /// modified at any time without changing the semantics of the program.
205 void addAvailable(int SlotOrReMat
, unsigned Reg
, bool CanClobber
= true) {
206 // If this stack slot is thought to be available in some other physreg,
207 // remove its record.
208 ModifyStackSlotOrReMat(SlotOrReMat
);
210 PhysRegsAvailable
.insert(std::make_pair(Reg
, SlotOrReMat
));
211 SpillSlotsOrReMatsAvailable
[SlotOrReMat
]= (Reg
<< 1) |
212 (unsigned)CanClobber
;
214 if (SlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
)
215 DEBUG(dbgs() << "Remembering RM#"
216 << SlotOrReMat
-VirtRegMap::MAX_STACK_SLOT
-1);
218 DEBUG(dbgs() << "Remembering SS#" << SlotOrReMat
);
219 DEBUG(dbgs() << " in physreg " << TRI
->getName(Reg
)
220 << (CanClobber
? " canclobber" : "") << "\n");
223 /// canClobberPhysRegForSS - Return true if the spiller is allowed to change
224 /// the value of the specified stackslot register if it desires. The
225 /// specified stack slot must be available in a physreg for this query to
227 bool canClobberPhysRegForSS(int SlotOrReMat
) const {
228 assert(SpillSlotsOrReMatsAvailable
.count(SlotOrReMat
) &&
229 "Value not available!");
230 return SpillSlotsOrReMatsAvailable
.find(SlotOrReMat
)->second
& 1;
233 /// canClobberPhysReg - Return true if the spiller is allowed to clobber the
234 /// physical register where values for some stack slot(s) might be
236 bool canClobberPhysReg(unsigned PhysReg
) const {
237 std::multimap
<unsigned, int>::const_iterator I
=
238 PhysRegsAvailable
.lower_bound(PhysReg
);
239 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
240 int SlotOrReMat
= I
->second
;
242 if (!canClobberPhysRegForSS(SlotOrReMat
))
248 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
249 /// stackslot register. The register is still available but is no longer
250 /// allowed to be modifed.
251 void disallowClobberPhysReg(unsigned PhysReg
);
253 /// ClobberPhysReg - This is called when the specified physreg changes
254 /// value. We use this to invalidate any info about stuff that lives in
255 /// it and any of its aliases.
256 void ClobberPhysReg(unsigned PhysReg
);
258 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
259 /// slot changes. This removes information about which register the
260 /// previous value for this slot lives in (as the previous value is dead
262 void ModifyStackSlotOrReMat(int SlotOrReMat
);
264 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
265 /// into the specified MBB. Add available physical registers as potential
266 /// live-in's. If they are reused in the MBB, they will be added to the
267 /// live-in set to make register scavenger and post-allocation scheduler.
268 void AddAvailableRegsToLiveIn(MachineBasicBlock
&MBB
, BitVector
&RegKills
,
269 std::vector
<MachineOperand
*> &KillOps
);
274 // ************************************************************************ //
276 // Given a location where a reload of a spilled register or a remat of
277 // a constant is to be inserted, attempt to find a safe location to
278 // insert the load at an earlier point in the basic-block, to hide
279 // latency of the load and to avoid address-generation interlock
281 static MachineBasicBlock::iterator
282 ComputeReloadLoc(MachineBasicBlock::iterator
const InsertLoc
,
283 MachineBasicBlock::iterator
const Begin
,
285 const TargetRegisterInfo
*TRI
,
288 const TargetInstrInfo
*TII
,
289 const MachineFunction
&MF
)
294 // Spill backscheduling is of primary interest to addresses, so
295 // don't do anything if the register isn't in the register class
296 // used for pointers.
298 const TargetLowering
*TL
= MF
.getTarget().getTargetLowering();
300 if (!TL
->isTypeLegal(TL
->getPointerTy()))
301 // Believe it or not, this is true on 16-bit targets like PIC16.
304 const TargetRegisterClass
*ptrRegClass
=
305 TL
->getRegClassFor(TL
->getPointerTy());
306 if (!ptrRegClass
->contains(PhysReg
))
309 // Scan upwards through the preceding instructions. If an instruction doesn't
310 // reference the stack slot or the register we're loading, we can
311 // backschedule the reload up past it.
312 MachineBasicBlock::iterator NewInsertLoc
= InsertLoc
;
313 while (NewInsertLoc
!= Begin
) {
314 MachineBasicBlock::iterator Prev
= prior(NewInsertLoc
);
315 for (unsigned i
= 0; i
< Prev
->getNumOperands(); ++i
) {
316 MachineOperand
&Op
= Prev
->getOperand(i
);
317 if (!DoReMat
&& Op
.isFI() && Op
.getIndex() == SSorRMId
)
320 if (Prev
->findRegisterUseOperandIdx(PhysReg
) != -1 ||
321 Prev
->findRegisterDefOperand(PhysReg
))
323 for (const unsigned *Alias
= TRI
->getAliasSet(PhysReg
); *Alias
; ++Alias
)
324 if (Prev
->findRegisterUseOperandIdx(*Alias
) != -1 ||
325 Prev
->findRegisterDefOperand(*Alias
))
331 // If we made it to the beginning of the block, turn around and move back
332 // down just past any existing reloads. They're likely to be reloads/remats
333 // for instructions earlier than what our current reload/remat is for, so
334 // they should be scheduled earlier.
335 if (NewInsertLoc
== Begin
) {
337 while (InsertLoc
!= NewInsertLoc
&&
338 (TII
->isLoadFromStackSlot(NewInsertLoc
, FrameIdx
) ||
339 TII
->isTriviallyReMaterializable(NewInsertLoc
)))
348 // ReusedOp - For each reused operand, we keep track of a bit of information,
349 // in case we need to rollback upon processing a new operand. See comments
352 // The MachineInstr operand that reused an available value.
355 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
356 unsigned StackSlotOrReMat
;
358 // PhysRegReused - The physical register the value was available in.
359 unsigned PhysRegReused
;
361 // AssignedPhysReg - The physreg that was assigned for use by the reload.
362 unsigned AssignedPhysReg
;
364 // VirtReg - The virtual register itself.
367 ReusedOp(unsigned o
, unsigned ss
, unsigned prr
, unsigned apr
,
369 : Operand(o
), StackSlotOrReMat(ss
), PhysRegReused(prr
),
370 AssignedPhysReg(apr
), VirtReg(vreg
) {}
373 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
374 /// is reused instead of reloaded.
377 std::vector
<ReusedOp
> Reuses
;
378 BitVector PhysRegsClobbered
;
380 ReuseInfo(MachineInstr
&mi
, const TargetRegisterInfo
*tri
) : MI(mi
) {
381 PhysRegsClobbered
.resize(tri
->getNumRegs());
384 bool hasReuses() const {
385 return !Reuses
.empty();
388 /// addReuse - If we choose to reuse a virtual register that is already
389 /// available instead of reloading it, remember that we did so.
390 void addReuse(unsigned OpNo
, unsigned StackSlotOrReMat
,
391 unsigned PhysRegReused
, unsigned AssignedPhysReg
,
393 // If the reload is to the assigned register anyway, no undo will be
395 if (PhysRegReused
== AssignedPhysReg
) return;
397 // Otherwise, remember this.
398 Reuses
.push_back(ReusedOp(OpNo
, StackSlotOrReMat
, PhysRegReused
,
399 AssignedPhysReg
, VirtReg
));
402 void markClobbered(unsigned PhysReg
) {
403 PhysRegsClobbered
.set(PhysReg
);
406 bool isClobbered(unsigned PhysReg
) const {
407 return PhysRegsClobbered
.test(PhysReg
);
410 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
411 /// is some other operand that is using the specified register, either pick
412 /// a new register to use, or evict the previous reload and use this reg.
413 unsigned GetRegForReload(const TargetRegisterClass
*RC
, unsigned PhysReg
,
414 MachineFunction
&MF
, MachineInstr
*MI
,
415 AvailableSpills
&Spills
,
416 std::vector
<MachineInstr
*> &MaybeDeadStores
,
417 SmallSet
<unsigned, 8> &Rejected
,
419 std::vector
<MachineOperand
*> &KillOps
,
422 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
423 /// 'Rejected' set to remember which registers have been considered and
424 /// rejected for the reload. This avoids infinite looping in case like
427 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
428 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
430 /// sees r1 is taken by t2, tries t2's reload register r0
431 /// sees r0 is taken by t3, tries t3's reload register r1
432 /// sees r1 is taken by t2, tries t2's reload register r0 ...
433 unsigned GetRegForReload(unsigned VirtReg
, unsigned PhysReg
, MachineInstr
*MI
,
434 AvailableSpills
&Spills
,
435 std::vector
<MachineInstr
*> &MaybeDeadStores
,
437 std::vector
<MachineOperand
*> &KillOps
,
439 SmallSet
<unsigned, 8> Rejected
;
440 MachineFunction
&MF
= *MI
->getParent()->getParent();
441 const TargetRegisterClass
* RC
= MF
.getRegInfo().getRegClass(VirtReg
);
442 return GetRegForReload(RC
, PhysReg
, MF
, MI
, Spills
, MaybeDeadStores
,
443 Rejected
, RegKills
, KillOps
, VRM
);
449 // ****************** //
450 // Utility Functions //
451 // ****************** //
453 /// findSinglePredSuccessor - Return via reference a vector of machine basic
454 /// blocks each of which is a successor of the specified BB and has no other
456 static void findSinglePredSuccessor(MachineBasicBlock
*MBB
,
457 SmallVectorImpl
<MachineBasicBlock
*> &Succs
){
458 for (MachineBasicBlock::succ_iterator SI
= MBB
->succ_begin(),
459 SE
= MBB
->succ_end(); SI
!= SE
; ++SI
) {
460 MachineBasicBlock
*SuccMBB
= *SI
;
461 if (SuccMBB
->pred_size() == 1)
462 Succs
.push_back(SuccMBB
);
466 /// ResurrectConfirmedKill - Helper for ResurrectKill. This register is killed
467 /// but not re-defined and it's being reused. Remove the kill flag for the
468 /// register and unset the kill's marker and last kill operand.
469 static void ResurrectConfirmedKill(unsigned Reg
, const TargetRegisterInfo
* TRI
,
471 std::vector
<MachineOperand
*> &KillOps
) {
472 DEBUG(dbgs() << "Resurrect " << TRI
->getName(Reg
) << "\n");
474 MachineOperand
*KillOp
= KillOps
[Reg
];
475 KillOp
->setIsKill(false);
476 // KillOps[Reg] might be a def of a super-register.
477 unsigned KReg
= KillOp
->getReg();
481 assert(KillOps
[KReg
] == KillOp
&& "invalid superreg kill flags");
482 KillOps
[KReg
] = NULL
;
483 RegKills
.reset(KReg
);
485 // If it's a def of a super-register. Its other sub-regsters are no
486 // longer killed as well.
487 for (const unsigned *SR
= TRI
->getSubRegisters(KReg
); *SR
; ++SR
) {
488 DEBUG(dbgs() << " Resurrect subreg " << TRI
->getName(*SR
) << "\n");
490 assert(KillOps
[*SR
] == KillOp
&& "invalid subreg kill flags");
496 /// ResurrectKill - Invalidate kill info associated with a previous MI. An
497 /// optimization may have decided that it's safe to reuse a previously killed
498 /// register. If we fail to erase the invalid kill flags, then the register
499 /// scavenger may later clobber the register used by this MI. Note that this
500 /// must be done even if this MI is being deleted! Consider:
502 /// USE $r1 (vreg1) <kill>
504 /// $r1(vreg3) = COPY $r1 (vreg2)
506 /// RegAlloc has smartly assigned all three vregs to the same physreg. Initially
507 /// vreg1's only use is a kill. The rewriter doesn't know it should be live
508 /// until it rewrites vreg2. At that points it sees that the copy is dead and
509 /// deletes it. However, deleting the copy implicitly forwards liveness of $r1
510 /// (it's copy coalescing). We must resurrect $r1 by removing the kill flag at
511 /// vreg1 before deleting the copy.
512 static void ResurrectKill(MachineInstr
&MI
, unsigned Reg
,
513 const TargetRegisterInfo
* TRI
, BitVector
&RegKills
,
514 std::vector
<MachineOperand
*> &KillOps
) {
515 if (RegKills
[Reg
] && KillOps
[Reg
]->getParent() != &MI
) {
516 ResurrectConfirmedKill(Reg
, TRI
, RegKills
, KillOps
);
519 // No previous kill for this reg. Check for subreg kills as well.
525 // = d4 <avoiding reload>
526 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
528 if (RegKills
[SReg
] && KillOps
[SReg
]->getParent() != &MI
)
529 ResurrectConfirmedKill(SReg
, TRI
, RegKills
, KillOps
);
533 /// InvalidateKills - MI is going to be deleted. If any of its operands are
534 /// marked kill, then invalidate the information.
535 static void InvalidateKills(MachineInstr
&MI
,
536 const TargetRegisterInfo
* TRI
,
538 std::vector
<MachineOperand
*> &KillOps
,
539 SmallVector
<unsigned, 2> *KillRegs
= NULL
) {
540 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
541 MachineOperand
&MO
= MI
.getOperand(i
);
542 if (!MO
.isReg() || !MO
.isUse() || !MO
.isKill() || MO
.isUndef())
544 unsigned Reg
= MO
.getReg();
545 if (TargetRegisterInfo::isVirtualRegister(Reg
))
548 KillRegs
->push_back(Reg
);
549 assert(Reg
< KillOps
.size());
550 if (KillOps
[Reg
] == &MO
) {
551 // This operand was the kill, now no longer.
554 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
556 assert(KillOps
[*SR
] == &MO
&& "bad subreg kill flags");
563 // This operand may have reused a previously killed reg. Keep it live in
564 // case it continues to be used after erasing this instruction.
565 ResurrectKill(MI
, Reg
, TRI
, RegKills
, KillOps
);
570 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
571 /// (since its spill instruction is removed), mark it isDead. Also checks if
572 /// the def MI has other definition operands that are not dead. Returns it by
574 static bool InvalidateRegDef(MachineBasicBlock::iterator I
,
575 MachineInstr
&NewDef
, unsigned Reg
,
577 const TargetRegisterInfo
*TRI
) {
578 // Due to remat, it's possible this reg isn't being reused. That is,
579 // the def of this reg (by prev MI) is now dead.
580 MachineInstr
*DefMI
= I
;
581 MachineOperand
*DefOp
= NULL
;
582 for (unsigned i
= 0, e
= DefMI
->getNumOperands(); i
!= e
; ++i
) {
583 MachineOperand
&MO
= DefMI
->getOperand(i
);
584 if (!MO
.isReg() || !MO
.isDef() || !MO
.isKill() || MO
.isUndef())
586 if (MO
.getReg() == Reg
)
588 else if (!MO
.isDead())
594 bool FoundUse
= false, Done
= false;
595 MachineBasicBlock::iterator E
= &NewDef
;
597 for (; !Done
&& I
!= E
; ++I
) {
598 MachineInstr
*NMI
= I
;
599 for (unsigned j
= 0, ee
= NMI
->getNumOperands(); j
!= ee
; ++j
) {
600 MachineOperand
&MO
= NMI
->getOperand(j
);
601 if (!MO
.isReg() || MO
.getReg() == 0 ||
602 (MO
.getReg() != Reg
&& !TRI
->isSubRegister(Reg
, MO
.getReg())))
606 Done
= true; // Stop after scanning all the operands of this MI.
617 /// UpdateKills - Track and update kill info. If a MI reads a register that is
618 /// marked kill, then it must be due to register reuse. Transfer the kill info
620 static void UpdateKills(MachineInstr
&MI
, const TargetRegisterInfo
* TRI
,
622 std::vector
<MachineOperand
*> &KillOps
) {
623 // These do not affect kill info at all.
624 if (MI
.isDebugValue())
626 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
627 MachineOperand
&MO
= MI
.getOperand(i
);
628 if (!MO
.isReg() || !MO
.isUse() || MO
.isUndef())
630 unsigned Reg
= MO
.getReg();
634 // This operand may have reused a previously killed reg. Keep it live.
635 ResurrectKill(MI
, Reg
, TRI
, RegKills
, KillOps
);
640 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
647 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
648 const MachineOperand
&MO
= MI
.getOperand(i
);
649 if (!MO
.isReg() || !MO
.getReg() || !MO
.isDef())
651 unsigned Reg
= MO
.getReg();
654 // It also defines (or partially define) aliases.
655 for (const unsigned *SR
= TRI
->getSubRegisters(Reg
); *SR
; ++SR
) {
659 for (const unsigned *SR
= TRI
->getSuperRegisters(Reg
); *SR
; ++SR
) {
666 /// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
668 static void ReMaterialize(MachineBasicBlock
&MBB
,
669 MachineBasicBlock::iterator
&MII
,
670 unsigned DestReg
, unsigned Reg
,
671 const TargetInstrInfo
*TII
,
672 const TargetRegisterInfo
*TRI
,
674 MachineInstr
*ReMatDefMI
= VRM
.getReMaterializedMI(Reg
);
676 const TargetInstrDesc
&TID
= ReMatDefMI
->getDesc();
677 assert(TID
.getNumDefs() == 1 &&
678 "Don't know how to remat instructions that define > 1 values!");
680 TII
->reMaterialize(MBB
, MII
, DestReg
, 0, ReMatDefMI
, *TRI
);
681 MachineInstr
*NewMI
= prior(MII
);
682 for (unsigned i
= 0, e
= NewMI
->getNumOperands(); i
!= e
; ++i
) {
683 MachineOperand
&MO
= NewMI
->getOperand(i
);
684 if (!MO
.isReg() || MO
.getReg() == 0)
686 unsigned VirtReg
= MO
.getReg();
687 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
))
690 unsigned Phys
= VRM
.getPhys(VirtReg
);
691 assert(Phys
&& "Virtual register is not assigned a register?");
692 substitutePhysReg(MO
, Phys
, *TRI
);
697 /// findSuperReg - Find the SubReg's super-register of given register class
698 /// where its SubIdx sub-register is SubReg.
699 static unsigned findSuperReg(const TargetRegisterClass
*RC
, unsigned SubReg
,
700 unsigned SubIdx
, const TargetRegisterInfo
*TRI
) {
701 for (TargetRegisterClass::iterator I
= RC
->begin(), E
= RC
->end();
704 if (TRI
->getSubReg(Reg
, SubIdx
) == SubReg
)
710 // ******************************** //
711 // Available Spills Implementation //
712 // ******************************** //
714 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
715 /// stackslot register. The register is still available but is no longer
716 /// allowed to be modifed.
717 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg
) {
718 std::multimap
<unsigned, int>::iterator I
=
719 PhysRegsAvailable
.lower_bound(PhysReg
);
720 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
721 int SlotOrReMat
= I
->second
;
723 assert((SpillSlotsOrReMatsAvailable
[SlotOrReMat
] >> 1) == PhysReg
&&
724 "Bidirectional map mismatch!");
725 SpillSlotsOrReMatsAvailable
[SlotOrReMat
] &= ~1;
726 DEBUG(dbgs() << "PhysReg " << TRI
->getName(PhysReg
)
727 << " copied, it is available for use but can no longer be modified\n");
731 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
732 /// stackslot register and its aliases. The register and its aliases may
733 /// still available but is no longer allowed to be modifed.
734 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg
) {
735 for (const unsigned *AS
= TRI
->getAliasSet(PhysReg
); *AS
; ++AS
)
736 disallowClobberPhysRegOnly(*AS
);
737 disallowClobberPhysRegOnly(PhysReg
);
740 /// ClobberPhysRegOnly - This is called when the specified physreg changes
741 /// value. We use this to invalidate any info about stuff we thing lives in it.
742 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg
) {
743 std::multimap
<unsigned, int>::iterator I
=
744 PhysRegsAvailable
.lower_bound(PhysReg
);
745 while (I
!= PhysRegsAvailable
.end() && I
->first
== PhysReg
) {
746 int SlotOrReMat
= I
->second
;
747 PhysRegsAvailable
.erase(I
++);
748 assert((SpillSlotsOrReMatsAvailable
[SlotOrReMat
] >> 1) == PhysReg
&&
749 "Bidirectional map mismatch!");
750 SpillSlotsOrReMatsAvailable
.erase(SlotOrReMat
);
751 DEBUG(dbgs() << "PhysReg " << TRI
->getName(PhysReg
)
752 << " clobbered, invalidating ");
753 if (SlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
)
754 DEBUG(dbgs() << "RM#" << SlotOrReMat
-VirtRegMap::MAX_STACK_SLOT
-1 <<"\n");
756 DEBUG(dbgs() << "SS#" << SlotOrReMat
<< "\n");
760 /// ClobberPhysReg - This is called when the specified physreg changes
761 /// value. We use this to invalidate any info about stuff we thing lives in
762 /// it and any of its aliases.
763 void AvailableSpills::ClobberPhysReg(unsigned PhysReg
) {
764 for (const unsigned *AS
= TRI
->getAliasSet(PhysReg
); *AS
; ++AS
)
765 ClobberPhysRegOnly(*AS
);
766 ClobberPhysRegOnly(PhysReg
);
769 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
770 /// into the specified MBB. Add available physical registers as potential
771 /// live-in's. If they are reused in the MBB, they will be added to the
772 /// live-in set to make register scavenger and post-allocation scheduler.
773 void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock
&MBB
,
775 std::vector
<MachineOperand
*> &KillOps
) {
776 std::set
<unsigned> NotAvailable
;
777 for (std::multimap
<unsigned, int>::iterator
778 I
= PhysRegsAvailable
.begin(), E
= PhysRegsAvailable
.end();
780 unsigned Reg
= I
->first
;
781 const TargetRegisterClass
* RC
= TRI
->getMinimalPhysRegClass(Reg
);
782 // FIXME: A temporary workaround. We can't reuse available value if it's
783 // not safe to move the def of the virtual register's class. e.g.
784 // X86::RFP* register classes. Do not add it as a live-in.
785 if (!TII
->isSafeToMoveRegClassDefs(RC
))
786 // This is no longer available.
787 NotAvailable
.insert(Reg
);
791 ResurrectConfirmedKill(Reg
, TRI
, RegKills
, KillOps
);
794 // Skip over the same register.
795 std::multimap
<unsigned, int>::iterator NI
= llvm::next(I
);
796 while (NI
!= E
&& NI
->first
== Reg
) {
802 for (std::set
<unsigned>::iterator I
= NotAvailable
.begin(),
803 E
= NotAvailable
.end(); I
!= E
; ++I
) {
805 for (const unsigned *SubRegs
= TRI
->getSubRegisters(*I
);
807 ClobberPhysReg(*SubRegs
);
811 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
812 /// slot changes. This removes information about which register the previous
813 /// value for this slot lives in (as the previous value is dead now).
814 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat
) {
815 std::map
<int, unsigned>::iterator It
=
816 SpillSlotsOrReMatsAvailable
.find(SlotOrReMat
);
817 if (It
== SpillSlotsOrReMatsAvailable
.end()) return;
818 unsigned Reg
= It
->second
>> 1;
819 SpillSlotsOrReMatsAvailable
.erase(It
);
821 // This register may hold the value of multiple stack slots, only remove this
822 // stack slot from the set of values the register contains.
823 std::multimap
<unsigned, int>::iterator I
= PhysRegsAvailable
.lower_bound(Reg
);
825 assert(I
!= PhysRegsAvailable
.end() && I
->first
== Reg
&&
826 "Map inverse broken!");
827 if (I
->second
== SlotOrReMat
) break;
829 PhysRegsAvailable
.erase(I
);
832 // ************************** //
833 // Reuse Info Implementation //
834 // ************************** //
836 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
837 /// is some other operand that is using the specified register, either pick
838 /// a new register to use, or evict the previous reload and use this reg.
839 unsigned ReuseInfo::GetRegForReload(const TargetRegisterClass
*RC
,
842 MachineInstr
*MI
, AvailableSpills
&Spills
,
843 std::vector
<MachineInstr
*> &MaybeDeadStores
,
844 SmallSet
<unsigned, 8> &Rejected
,
846 std::vector
<MachineOperand
*> &KillOps
,
848 const TargetInstrInfo
* TII
= MF
.getTarget().getInstrInfo();
849 const TargetRegisterInfo
*TRI
= Spills
.getRegInfo();
851 if (Reuses
.empty()) return PhysReg
; // This is most often empty.
853 for (unsigned ro
= 0, e
= Reuses
.size(); ro
!= e
; ++ro
) {
854 ReusedOp
&Op
= Reuses
[ro
];
855 // If we find some other reuse that was supposed to use this register
856 // exactly for its reload, we can change this reload to use ITS reload
857 // register. That is, unless its reload register has already been
858 // considered and subsequently rejected because it has also been reused
859 // by another operand.
860 if (Op
.PhysRegReused
== PhysReg
&&
861 Rejected
.count(Op
.AssignedPhysReg
) == 0 &&
862 RC
->contains(Op
.AssignedPhysReg
)) {
863 // Yup, use the reload register that we didn't use before.
864 unsigned NewReg
= Op
.AssignedPhysReg
;
865 Rejected
.insert(PhysReg
);
866 return GetRegForReload(RC
, NewReg
, MF
, MI
, Spills
, MaybeDeadStores
,
867 Rejected
, RegKills
, KillOps
, VRM
);
869 // Otherwise, we might also have a problem if a previously reused
870 // value aliases the new register. If so, codegen the previous reload
872 unsigned PRRU
= Op
.PhysRegReused
;
873 if (TRI
->regsOverlap(PRRU
, PhysReg
)) {
874 // Okay, we found out that an alias of a reused register
875 // was used. This isn't good because it means we have
876 // to undo a previous reuse.
877 MachineBasicBlock
*MBB
= MI
->getParent();
878 const TargetRegisterClass
*AliasRC
=
879 MBB
->getParent()->getRegInfo().getRegClass(Op
.VirtReg
);
881 // Copy Op out of the vector and remove it, we're going to insert an
882 // explicit load for it.
884 Reuses
.erase(Reuses
.begin()+ro
);
886 // MI may be using only a sub-register of PhysRegUsed.
887 unsigned RealPhysRegUsed
= MI
->getOperand(NewOp
.Operand
).getReg();
889 assert(TargetRegisterInfo::isPhysicalRegister(RealPhysRegUsed
) &&
890 "A reuse cannot be a virtual register");
891 if (PRRU
!= RealPhysRegUsed
) {
892 // What was the sub-register index?
893 SubIdx
= TRI
->getSubRegIndex(PRRU
, RealPhysRegUsed
);
895 "Operand physreg is not a sub-register of PhysRegUsed");
898 // Ok, we're going to try to reload the assigned physreg into the
899 // slot that we were supposed to in the first place. However, that
900 // register could hold a reuse. Check to see if it conflicts or
901 // would prefer us to use a different register.
902 unsigned NewPhysReg
= GetRegForReload(RC
, NewOp
.AssignedPhysReg
,
903 MF
, MI
, Spills
, MaybeDeadStores
,
904 Rejected
, RegKills
, KillOps
, VRM
);
906 bool DoReMat
= NewOp
.StackSlotOrReMat
> VirtRegMap::MAX_STACK_SLOT
;
907 int SSorRMId
= DoReMat
908 ? VRM
.getReMatId(NewOp
.VirtReg
) : (int) NewOp
.StackSlotOrReMat
;
910 // Back-schedule reloads and remats.
911 MachineBasicBlock::iterator InsertLoc
=
912 ComputeReloadLoc(MI
, MBB
->begin(), PhysReg
, TRI
,
913 DoReMat
, SSorRMId
, TII
, MF
);
916 ReMaterialize(*MBB
, InsertLoc
, NewPhysReg
, NewOp
.VirtReg
, TII
,
919 TII
->loadRegFromStackSlot(*MBB
, InsertLoc
, NewPhysReg
,
920 NewOp
.StackSlotOrReMat
, AliasRC
, TRI
);
921 MachineInstr
*LoadMI
= prior(InsertLoc
);
922 VRM
.addSpillSlotUse(NewOp
.StackSlotOrReMat
, LoadMI
);
923 // Any stores to this stack slot are not dead anymore.
924 MaybeDeadStores
[NewOp
.StackSlotOrReMat
] = NULL
;
927 Spills
.ClobberPhysReg(NewPhysReg
);
928 Spills
.ClobberPhysReg(NewOp
.PhysRegReused
);
930 unsigned RReg
= SubIdx
? TRI
->getSubReg(NewPhysReg
, SubIdx
) :NewPhysReg
;
931 MI
->getOperand(NewOp
.Operand
).setReg(RReg
);
932 MI
->getOperand(NewOp
.Operand
).setSubReg(0);
934 Spills
.addAvailable(NewOp
.StackSlotOrReMat
, NewPhysReg
);
935 UpdateKills(*prior(InsertLoc
), TRI
, RegKills
, KillOps
);
936 DEBUG(dbgs() << '\t' << *prior(InsertLoc
));
938 DEBUG(dbgs() << "Reuse undone!\n");
941 // Finally, PhysReg is now available, go ahead and use it.
949 // ************************************************************************ //
951 /// FoldsStackSlotModRef - Return true if the specified MI folds the specified
952 /// stack slot mod/ref. It also checks if it's possible to unfold the
953 /// instruction by having it define a specified physical register instead.
954 static bool FoldsStackSlotModRef(MachineInstr
&MI
, int SS
, unsigned PhysReg
,
955 const TargetInstrInfo
*TII
,
956 const TargetRegisterInfo
*TRI
,
958 if (VRM
.hasEmergencySpills(&MI
) || VRM
.isSpillPt(&MI
))
962 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
963 for (tie(I
, End
) = VRM
.getFoldedVirts(&MI
); I
!= End
; ++I
) {
964 unsigned VirtReg
= I
->second
.first
;
965 VirtRegMap::ModRef MR
= I
->second
.second
;
966 if (MR
& VirtRegMap::isModRef
)
967 if (VRM
.getStackSlot(VirtReg
) == SS
) {
968 Found
= TII
->getOpcodeAfterMemoryUnfold(MI
.getOpcode(), true, true) != 0;
975 // Does the instruction uses a register that overlaps the scratch register?
976 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
977 MachineOperand
&MO
= MI
.getOperand(i
);
978 if (!MO
.isReg() || MO
.getReg() == 0)
980 unsigned Reg
= MO
.getReg();
981 if (TargetRegisterInfo::isVirtualRegister(Reg
)) {
982 if (!VRM
.hasPhys(Reg
))
984 Reg
= VRM
.getPhys(Reg
);
986 if (TRI
->regsOverlap(PhysReg
, Reg
))
992 /// FindFreeRegister - Find a free register of a given register class by looking
993 /// at (at most) the last two machine instructions.
994 static unsigned FindFreeRegister(MachineBasicBlock::iterator MII
,
995 MachineBasicBlock
&MBB
,
996 const TargetRegisterClass
*RC
,
997 const TargetRegisterInfo
*TRI
,
998 BitVector
&AllocatableRegs
) {
999 BitVector
Defs(TRI
->getNumRegs());
1000 BitVector
Uses(TRI
->getNumRegs());
1001 SmallVector
<unsigned, 4> LocalUses
;
1002 SmallVector
<unsigned, 4> Kills
;
1004 // Take a look at 2 instructions at most.
1007 if (MII
== MBB
.begin())
1009 MachineInstr
*PrevMI
= prior(MII
);
1012 if (PrevMI
->isDebugValue())
1013 continue; // Skip over dbg_value instructions.
1016 for (unsigned i
= 0, e
= PrevMI
->getNumOperands(); i
!= e
; ++i
) {
1017 MachineOperand
&MO
= PrevMI
->getOperand(i
);
1018 if (!MO
.isReg() || MO
.getReg() == 0)
1020 unsigned Reg
= MO
.getReg();
1023 for (const unsigned *AS
= TRI
->getAliasSet(Reg
); *AS
; ++AS
)
1026 LocalUses
.push_back(Reg
);
1027 if (MO
.isKill() && AllocatableRegs
[Reg
])
1028 Kills
.push_back(Reg
);
1032 for (unsigned i
= 0, e
= Kills
.size(); i
!= e
; ++i
) {
1033 unsigned Kill
= Kills
[i
];
1034 if (!Defs
[Kill
] && !Uses
[Kill
] &&
1038 for (unsigned i
= 0, e
= LocalUses
.size(); i
!= e
; ++i
) {
1039 unsigned Reg
= LocalUses
[i
];
1041 for (const unsigned *AS
= TRI
->getAliasSet(Reg
); *AS
; ++AS
)
1050 void AssignPhysToVirtReg(MachineInstr
*MI
, unsigned VirtReg
, unsigned PhysReg
,
1051 const TargetRegisterInfo
&TRI
) {
1052 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
1053 MachineOperand
&MO
= MI
->getOperand(i
);
1054 if (MO
.isReg() && MO
.getReg() == VirtReg
)
1055 substitutePhysReg(MO
, PhysReg
, TRI
);
1062 bool operator()(const std::pair
<MachineInstr
*, int> &A
,
1063 const std::pair
<MachineInstr
*, int> &B
) {
1064 return A
.second
< B
.second
;
1068 // ***************************** //
1069 // Local Spiller Implementation //
1070 // ***************************** //
1072 class LocalRewriter
: public VirtRegRewriter
{
1073 MachineRegisterInfo
*MRI
;
1074 const TargetRegisterInfo
*TRI
;
1075 const TargetInstrInfo
*TII
;
1078 BitVector AllocatableRegs
;
1079 DenseMap
<MachineInstr
*, unsigned> DistanceMap
;
1080 DenseMap
<int, SmallVector
<MachineInstr
*,4> > Slot2DbgValues
;
1082 MachineBasicBlock
*MBB
; // Basic block currently being processed.
1086 bool runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&VRM
,
1087 LiveIntervals
* LIs
);
1090 void EraseInstr(MachineInstr
*MI
) {
1091 VRM
->RemoveMachineInstrFromMaps(MI
);
1092 LIs
->RemoveMachineInstrFromMaps(MI
);
1093 MI
->eraseFromParent();
1096 bool OptimizeByUnfold2(unsigned VirtReg
, int SS
,
1097 MachineBasicBlock::iterator
&MII
,
1098 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1099 AvailableSpills
&Spills
,
1100 BitVector
&RegKills
,
1101 std::vector
<MachineOperand
*> &KillOps
);
1103 bool OptimizeByUnfold(MachineBasicBlock::iterator
&MII
,
1104 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1105 AvailableSpills
&Spills
,
1106 BitVector
&RegKills
,
1107 std::vector
<MachineOperand
*> &KillOps
);
1109 bool CommuteToFoldReload(MachineBasicBlock::iterator
&MII
,
1110 unsigned VirtReg
, unsigned SrcReg
, int SS
,
1111 AvailableSpills
&Spills
,
1112 BitVector
&RegKills
,
1113 std::vector
<MachineOperand
*> &KillOps
,
1114 const TargetRegisterInfo
*TRI
);
1116 void SpillRegToStackSlot(MachineBasicBlock::iterator
&MII
,
1117 int Idx
, unsigned PhysReg
, int StackSlot
,
1118 const TargetRegisterClass
*RC
,
1119 bool isAvailable
, MachineInstr
*&LastStore
,
1120 AvailableSpills
&Spills
,
1121 SmallSet
<MachineInstr
*, 4> &ReMatDefs
,
1122 BitVector
&RegKills
,
1123 std::vector
<MachineOperand
*> &KillOps
);
1125 void TransferDeadness(unsigned Reg
, BitVector
&RegKills
,
1126 std::vector
<MachineOperand
*> &KillOps
);
1128 bool InsertEmergencySpills(MachineInstr
*MI
);
1130 bool InsertRestores(MachineInstr
*MI
,
1131 AvailableSpills
&Spills
,
1132 BitVector
&RegKills
,
1133 std::vector
<MachineOperand
*> &KillOps
);
1135 bool InsertSpills(MachineInstr
*MI
);
1137 void ProcessUses(MachineInstr
&MI
, AvailableSpills
&Spills
,
1138 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1139 BitVector
&RegKills
,
1140 ReuseInfo
&ReusedOperands
,
1141 std::vector
<MachineOperand
*> &KillOps
);
1143 void RewriteMBB(LiveIntervals
*LIs
,
1144 AvailableSpills
&Spills
, BitVector
&RegKills
,
1145 std::vector
<MachineOperand
*> &KillOps
);
1149 bool LocalRewriter::runOnMachineFunction(MachineFunction
&MF
, VirtRegMap
&vrm
,
1150 LiveIntervals
* lis
) {
1151 MRI
= &MF
.getRegInfo();
1152 TRI
= MF
.getTarget().getRegisterInfo();
1153 TII
= MF
.getTarget().getInstrInfo();
1156 AllocatableRegs
= TRI
->getAllocatableSet(MF
);
1157 DEBUG(dbgs() << "\n**** Local spiller rewriting function '"
1158 << MF
.getFunction()->getName() << "':\n");
1159 DEBUG(dbgs() << "**** Machine Instrs (NOTE! Does not include spills and"
1160 " reloads!) ****\n");
1161 DEBUG(MF
.print(dbgs(), LIs
->getSlotIndexes()));
1163 // Spills - Keep track of which spilled values are available in physregs
1164 // so that we can choose to reuse the physregs instead of emitting
1165 // reloads. This is usually refreshed per basic block.
1166 AvailableSpills
Spills(TRI
, TII
);
1168 // Keep track of kill information.
1169 BitVector
RegKills(TRI
->getNumRegs());
1170 std::vector
<MachineOperand
*> KillOps
;
1171 KillOps
.resize(TRI
->getNumRegs(), NULL
);
1173 // SingleEntrySuccs - Successor blocks which have a single predecessor.
1174 SmallVector
<MachineBasicBlock
*, 4> SinglePredSuccs
;
1175 SmallPtrSet
<MachineBasicBlock
*,16> EarlyVisited
;
1177 // Traverse the basic blocks depth first.
1178 MachineBasicBlock
*Entry
= MF
.begin();
1179 SmallPtrSet
<MachineBasicBlock
*,16> Visited
;
1180 for (df_ext_iterator
<MachineBasicBlock
*,
1181 SmallPtrSet
<MachineBasicBlock
*,16> >
1182 DFI
= df_ext_begin(Entry
, Visited
), E
= df_ext_end(Entry
, Visited
);
1185 if (!EarlyVisited
.count(MBB
))
1186 RewriteMBB(LIs
, Spills
, RegKills
, KillOps
);
1188 // If this MBB is the only predecessor of a successor. Keep the
1189 // availability information and visit it next.
1191 // Keep visiting single predecessor successor as long as possible.
1192 SinglePredSuccs
.clear();
1193 findSinglePredSuccessor(MBB
, SinglePredSuccs
);
1194 if (SinglePredSuccs
.empty())
1197 // FIXME: More than one successors, each of which has MBB has
1198 // the only predecessor.
1199 MBB
= SinglePredSuccs
[0];
1200 if (!Visited
.count(MBB
) && EarlyVisited
.insert(MBB
)) {
1201 Spills
.AddAvailableRegsToLiveIn(*MBB
, RegKills
, KillOps
);
1202 RewriteMBB(LIs
, Spills
, RegKills
, KillOps
);
1207 // Clear the availability info.
1211 DEBUG(dbgs() << "**** Post Machine Instrs ****\n");
1212 DEBUG(MF
.print(dbgs(), LIs
->getSlotIndexes()));
1214 // Mark unused spill slots.
1215 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
1216 int SS
= VRM
->getLowSpillSlot();
1217 if (SS
!= VirtRegMap::NO_STACK_SLOT
) {
1218 for (int e
= VRM
->getHighSpillSlot(); SS
<= e
; ++SS
) {
1219 SmallVector
<MachineInstr
*, 4> &DbgValues
= Slot2DbgValues
[SS
];
1220 if (!VRM
->isSpillSlotUsed(SS
)) {
1221 MFI
->RemoveStackObject(SS
);
1222 for (unsigned j
= 0, ee
= DbgValues
.size(); j
!= ee
; ++j
) {
1223 MachineInstr
*DVMI
= DbgValues
[j
];
1224 DEBUG(dbgs() << "Removing debug info referencing FI#" << SS
<< '\n');
1232 Slot2DbgValues
.clear();
1237 /// OptimizeByUnfold2 - Unfold a series of load / store folding instructions if
1238 /// a scratch register is available.
1239 /// xorq %r12<kill>, %r13
1240 /// addq %rax, -184(%rbp)
1241 /// addq %r13, -184(%rbp)
1243 /// xorq %r12<kill>, %r13
1244 /// movq -184(%rbp), %r12
1247 /// movq %r12, -184(%rbp)
1248 bool LocalRewriter::
1249 OptimizeByUnfold2(unsigned VirtReg
, int SS
,
1250 MachineBasicBlock::iterator
&MII
,
1251 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1252 AvailableSpills
&Spills
,
1253 BitVector
&RegKills
,
1254 std::vector
<MachineOperand
*> &KillOps
) {
1256 MachineBasicBlock::iterator NextMII
= llvm::next(MII
);
1257 // Skip over dbg_value instructions.
1258 while (NextMII
!= MBB
->end() && NextMII
->isDebugValue())
1259 NextMII
= llvm::next(NextMII
);
1260 if (NextMII
== MBB
->end())
1263 if (TII
->getOpcodeAfterMemoryUnfold(MII
->getOpcode(), true, true) == 0)
1266 // Now let's see if the last couple of instructions happens to have freed up
1268 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
1269 unsigned PhysReg
= FindFreeRegister(MII
, *MBB
, RC
, TRI
, AllocatableRegs
);
1273 MachineFunction
&MF
= *MBB
->getParent();
1274 TRI
= MF
.getTarget().getRegisterInfo();
1275 MachineInstr
&MI
= *MII
;
1276 if (!FoldsStackSlotModRef(MI
, SS
, PhysReg
, TII
, TRI
, *VRM
))
1279 // If the next instruction also folds the same SS modref and can be unfoled,
1280 // then it's worthwhile to issue a load from SS into the free register and
1281 // then unfold these instructions.
1282 if (!FoldsStackSlotModRef(*NextMII
, SS
, PhysReg
, TII
, TRI
, *VRM
))
1285 // Back-schedule reloads and remats.
1286 ComputeReloadLoc(MII
, MBB
->begin(), PhysReg
, TRI
, false, SS
, TII
, MF
);
1288 // Load from SS to the spare physical register.
1289 TII
->loadRegFromStackSlot(*MBB
, MII
, PhysReg
, SS
, RC
, TRI
);
1290 // This invalidates Phys.
1291 Spills
.ClobberPhysReg(PhysReg
);
1292 // Remember it's available.
1293 Spills
.addAvailable(SS
, PhysReg
);
1294 MaybeDeadStores
[SS
] = NULL
;
1296 // Unfold current MI.
1297 SmallVector
<MachineInstr
*, 4> NewMIs
;
1298 if (!TII
->unfoldMemoryOperand(MF
, &MI
, VirtReg
, false, false, NewMIs
))
1299 llvm_unreachable("Unable unfold the load / store folding instruction!");
1300 assert(NewMIs
.size() == 1);
1301 AssignPhysToVirtReg(NewMIs
[0], VirtReg
, PhysReg
, *TRI
);
1302 VRM
->transferRestorePts(&MI
, NewMIs
[0]);
1303 MII
= MBB
->insert(MII
, NewMIs
[0]);
1304 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1308 // Unfold next instructions that fold the same SS.
1310 MachineInstr
&NextMI
= *NextMII
;
1311 NextMII
= llvm::next(NextMII
);
1313 if (!TII
->unfoldMemoryOperand(MF
, &NextMI
, VirtReg
, false, false, NewMIs
))
1314 llvm_unreachable("Unable unfold the load / store folding instruction!");
1315 assert(NewMIs
.size() == 1);
1316 AssignPhysToVirtReg(NewMIs
[0], VirtReg
, PhysReg
, *TRI
);
1317 VRM
->transferRestorePts(&NextMI
, NewMIs
[0]);
1318 MBB
->insert(NextMII
, NewMIs
[0]);
1319 InvalidateKills(NextMI
, TRI
, RegKills
, KillOps
);
1320 EraseInstr(&NextMI
);
1322 // Skip over dbg_value instructions.
1323 while (NextMII
!= MBB
->end() && NextMII
->isDebugValue())
1324 NextMII
= llvm::next(NextMII
);
1325 if (NextMII
== MBB
->end())
1327 } while (FoldsStackSlotModRef(*NextMII
, SS
, PhysReg
, TII
, TRI
, *VRM
));
1329 // Store the value back into SS.
1330 TII
->storeRegToStackSlot(*MBB
, NextMII
, PhysReg
, true, SS
, RC
, TRI
);
1331 MachineInstr
*StoreMI
= prior(NextMII
);
1332 VRM
->addSpillSlotUse(SS
, StoreMI
);
1333 VRM
->virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1338 /// OptimizeByUnfold - Turn a store folding instruction into a load folding
1339 /// instruction. e.g.
1341 /// movl %eax, -32(%ebp)
1342 /// movl -36(%ebp), %eax
1343 /// orl %eax, -32(%ebp)
1346 /// orl -36(%ebp), %eax
1347 /// mov %eax, -32(%ebp)
1348 /// This enables unfolding optimization for a subsequent instruction which will
1349 /// also eliminate the newly introduced store instruction.
1350 bool LocalRewriter::
1351 OptimizeByUnfold(MachineBasicBlock::iterator
&MII
,
1352 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1353 AvailableSpills
&Spills
,
1354 BitVector
&RegKills
,
1355 std::vector
<MachineOperand
*> &KillOps
) {
1356 MachineFunction
&MF
= *MBB
->getParent();
1357 MachineInstr
&MI
= *MII
;
1358 unsigned UnfoldedOpc
= 0;
1359 unsigned UnfoldPR
= 0;
1360 unsigned UnfoldVR
= 0;
1361 int FoldedSS
= VirtRegMap::NO_STACK_SLOT
;
1362 VirtRegMap::MI2VirtMapTy::const_iterator I
, End
;
1363 for (tie(I
, End
) = VRM
->getFoldedVirts(&MI
); I
!= End
; ) {
1364 // Only transform a MI that folds a single register.
1367 UnfoldVR
= I
->second
.first
;
1368 VirtRegMap::ModRef MR
= I
->second
.second
;
1369 // MI2VirtMap be can updated which invalidate the iterator.
1370 // Increment the iterator first.
1372 if (VRM
->isAssignedReg(UnfoldVR
))
1374 // If this reference is not a use, any previous store is now dead.
1375 // Otherwise, the store to this stack slot is not dead anymore.
1376 FoldedSS
= VRM
->getStackSlot(UnfoldVR
);
1377 MachineInstr
* DeadStore
= MaybeDeadStores
[FoldedSS
];
1378 if (DeadStore
&& (MR
& VirtRegMap::isModRef
)) {
1379 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(FoldedSS
);
1380 if (!PhysReg
|| !DeadStore
->readsRegister(PhysReg
))
1383 UnfoldedOpc
= TII
->getOpcodeAfterMemoryUnfold(MI
.getOpcode(),
1392 // Look for other unfolding opportunities.
1393 return OptimizeByUnfold2(UnfoldVR
, FoldedSS
, MII
, MaybeDeadStores
, Spills
,
1397 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1398 MachineOperand
&MO
= MI
.getOperand(i
);
1399 if (!MO
.isReg() || MO
.getReg() == 0 || !MO
.isUse())
1401 unsigned VirtReg
= MO
.getReg();
1402 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
) || MO
.getSubReg())
1404 if (VRM
->isAssignedReg(VirtReg
)) {
1405 unsigned PhysReg
= VRM
->getPhys(VirtReg
);
1406 if (PhysReg
&& TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1408 } else if (VRM
->isReMaterialized(VirtReg
))
1410 int SS
= VRM
->getStackSlot(VirtReg
);
1411 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
1413 if (TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1417 if (VRM
->hasPhys(VirtReg
)) {
1418 PhysReg
= VRM
->getPhys(VirtReg
);
1419 if (!TRI
->regsOverlap(PhysReg
, UnfoldPR
))
1423 // Ok, we'll need to reload the value into a register which makes
1424 // it impossible to perform the store unfolding optimization later.
1425 // Let's see if it is possible to fold the load if the store is
1426 // unfolded. This allows us to perform the store unfolding
1428 SmallVector
<MachineInstr
*, 4> NewMIs
;
1429 if (TII
->unfoldMemoryOperand(MF
, &MI
, UnfoldVR
, false, false, NewMIs
)) {
1430 assert(NewMIs
.size() == 1);
1431 MachineInstr
*NewMI
= NewMIs
.back();
1432 MBB
->insert(MII
, NewMI
);
1434 int Idx
= NewMI
->findRegisterUseOperandIdx(VirtReg
, false);
1436 SmallVector
<unsigned, 1> Ops
;
1438 MachineInstr
*FoldedMI
= TII
->foldMemoryOperand(NewMI
, Ops
, SS
);
1439 NewMI
->eraseFromParent();
1441 VRM
->addSpillSlotUse(SS
, FoldedMI
);
1442 if (!VRM
->hasPhys(UnfoldVR
))
1443 VRM
->assignVirt2Phys(UnfoldVR
, UnfoldPR
);
1444 VRM
->virtFolded(VirtReg
, FoldedMI
, VirtRegMap::isRef
);
1446 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1456 /// CommuteChangesDestination - We are looking for r0 = op r1, r2 and
1457 /// where SrcReg is r1 and it is tied to r0. Return true if after
1458 /// commuting this instruction it will be r0 = op r2, r1.
1459 static bool CommuteChangesDestination(MachineInstr
*DefMI
,
1460 const TargetInstrDesc
&TID
,
1462 const TargetInstrInfo
*TII
,
1464 if (TID
.getNumDefs() != 1 && TID
.getNumOperands() != 3)
1466 if (!DefMI
->getOperand(1).isReg() ||
1467 DefMI
->getOperand(1).getReg() != SrcReg
)
1470 if (!DefMI
->isRegTiedToDefOperand(1, &DefIdx
) || DefIdx
!= 0)
1472 unsigned SrcIdx1
, SrcIdx2
;
1473 if (!TII
->findCommutedOpIndices(DefMI
, SrcIdx1
, SrcIdx2
))
1475 if (SrcIdx1
== 1 && SrcIdx2
== 2) {
1482 /// CommuteToFoldReload -
1485 /// r1 = op r1, r2<kill>
1488 /// If op is commutable and r2 is killed, then we can xform these to
1489 /// r2 = op r2, fi#1
1491 bool LocalRewriter::
1492 CommuteToFoldReload(MachineBasicBlock::iterator
&MII
,
1493 unsigned VirtReg
, unsigned SrcReg
, int SS
,
1494 AvailableSpills
&Spills
,
1495 BitVector
&RegKills
,
1496 std::vector
<MachineOperand
*> &KillOps
,
1497 const TargetRegisterInfo
*TRI
) {
1498 if (MII
== MBB
->begin() || !MII
->killsRegister(SrcReg
))
1501 MachineInstr
&MI
= *MII
;
1502 MachineBasicBlock::iterator DefMII
= prior(MII
);
1503 MachineInstr
*DefMI
= DefMII
;
1504 const TargetInstrDesc
&TID
= DefMI
->getDesc();
1506 if (DefMII
!= MBB
->begin() &&
1507 TID
.isCommutable() &&
1508 CommuteChangesDestination(DefMI
, TID
, SrcReg
, TII
, NewDstIdx
)) {
1509 MachineOperand
&NewDstMO
= DefMI
->getOperand(NewDstIdx
);
1510 unsigned NewReg
= NewDstMO
.getReg();
1511 if (!NewDstMO
.isKill() || TRI
->regsOverlap(NewReg
, SrcReg
))
1513 MachineInstr
*ReloadMI
= prior(DefMII
);
1515 unsigned DestReg
= TII
->isLoadFromStackSlot(ReloadMI
, FrameIdx
);
1516 if (DestReg
!= SrcReg
|| FrameIdx
!= SS
)
1518 int UseIdx
= DefMI
->findRegisterUseOperandIdx(DestReg
, false);
1522 if (!MI
.isRegTiedToDefOperand(UseIdx
, &DefIdx
))
1524 assert(DefMI
->getOperand(DefIdx
).isReg() &&
1525 DefMI
->getOperand(DefIdx
).getReg() == SrcReg
);
1527 // Now commute def instruction.
1528 MachineInstr
*CommutedMI
= TII
->commuteInstruction(DefMI
, true);
1531 MBB
->insert(MII
, CommutedMI
);
1532 SmallVector
<unsigned, 1> Ops
;
1533 Ops
.push_back(NewDstIdx
);
1534 MachineInstr
*FoldedMI
= TII
->foldMemoryOperand(CommutedMI
, Ops
, SS
);
1535 // Not needed since foldMemoryOperand returns new MI.
1536 CommutedMI
->eraseFromParent();
1540 VRM
->addSpillSlotUse(SS
, FoldedMI
);
1541 VRM
->virtFolded(VirtReg
, FoldedMI
, VirtRegMap::isRef
);
1542 // Insert new def MI and spill MI.
1543 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
1544 TII
->storeRegToStackSlot(*MBB
, &MI
, NewReg
, true, SS
, RC
, TRI
);
1546 MachineInstr
*StoreMI
= MII
;
1547 VRM
->addSpillSlotUse(SS
, StoreMI
);
1548 VRM
->virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1549 MII
= FoldedMI
; // Update MII to backtrack.
1551 // Delete all 3 old instructions.
1552 InvalidateKills(*ReloadMI
, TRI
, RegKills
, KillOps
);
1553 EraseInstr(ReloadMI
);
1554 InvalidateKills(*DefMI
, TRI
, RegKills
, KillOps
);
1556 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
1559 // If NewReg was previously holding value of some SS, it's now clobbered.
1560 // This has to be done now because it's a physical register. When this
1561 // instruction is re-visited, it's ignored.
1562 Spills
.ClobberPhysReg(NewReg
);
1571 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1572 /// the last store to the same slot is now dead. If so, remove the last store.
1573 void LocalRewriter::
1574 SpillRegToStackSlot(MachineBasicBlock::iterator
&MII
,
1575 int Idx
, unsigned PhysReg
, int StackSlot
,
1576 const TargetRegisterClass
*RC
,
1577 bool isAvailable
, MachineInstr
*&LastStore
,
1578 AvailableSpills
&Spills
,
1579 SmallSet
<MachineInstr
*, 4> &ReMatDefs
,
1580 BitVector
&RegKills
,
1581 std::vector
<MachineOperand
*> &KillOps
) {
1583 MachineBasicBlock::iterator oldNextMII
= llvm::next(MII
);
1584 TII
->storeRegToStackSlot(*MBB
, llvm::next(MII
), PhysReg
, true, StackSlot
, RC
,
1586 MachineInstr
*StoreMI
= prior(oldNextMII
);
1587 VRM
->addSpillSlotUse(StackSlot
, StoreMI
);
1588 DEBUG(dbgs() << "Store:\t" << *StoreMI
);
1590 // If there is a dead store to this stack slot, nuke it now.
1592 DEBUG(dbgs() << "Removed dead store:\t" << *LastStore
);
1594 SmallVector
<unsigned, 2> KillRegs
;
1595 InvalidateKills(*LastStore
, TRI
, RegKills
, KillOps
, &KillRegs
);
1596 MachineBasicBlock::iterator PrevMII
= LastStore
;
1597 bool CheckDef
= PrevMII
!= MBB
->begin();
1600 EraseInstr(LastStore
);
1602 // Look at defs of killed registers on the store. Mark the defs
1603 // as dead since the store has been deleted and they aren't
1605 for (unsigned j
= 0, ee
= KillRegs
.size(); j
!= ee
; ++j
) {
1606 bool HasOtherDef
= false;
1607 if (InvalidateRegDef(PrevMII
, *MII
, KillRegs
[j
], HasOtherDef
, TRI
)) {
1608 MachineInstr
*DeadDef
= PrevMII
;
1609 if (ReMatDefs
.count(DeadDef
) && !HasOtherDef
) {
1610 // FIXME: This assumes a remat def does not have side effects.
1611 EraseInstr(DeadDef
);
1619 // Allow for multi-instruction spill sequences, as on PPC Altivec. Presume
1620 // the last of multiple instructions is the actual store.
1621 LastStore
= prior(oldNextMII
);
1623 // If the stack slot value was previously available in some other
1624 // register, change it now. Otherwise, make the register available,
1626 Spills
.ModifyStackSlotOrReMat(StackSlot
);
1627 Spills
.ClobberPhysReg(PhysReg
);
1628 Spills
.addAvailable(StackSlot
, PhysReg
, isAvailable
);
1632 /// isSafeToDelete - Return true if this instruction doesn't produce any side
1633 /// effect and all of its defs are dead.
1634 static bool isSafeToDelete(MachineInstr
&MI
) {
1635 const TargetInstrDesc
&TID
= MI
.getDesc();
1636 if (TID
.mayLoad() || TID
.mayStore() || TID
.isCall() || TID
.isTerminator() ||
1637 TID
.isCall() || TID
.isBarrier() || TID
.isReturn() ||
1638 MI
.isLabel() || MI
.isDebugValue() ||
1639 MI
.hasUnmodeledSideEffects())
1642 // Technically speaking inline asm without side effects and no defs can still
1643 // be deleted. But there is so much bad inline asm code out there, we should
1645 if (MI
.isInlineAsm())
1648 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1649 MachineOperand
&MO
= MI
.getOperand(i
);
1650 if (!MO
.isReg() || !MO
.getReg())
1652 if (MO
.isDef() && !MO
.isDead())
1654 if (MO
.isUse() && MO
.isKill())
1655 // FIXME: We can't remove kill markers or else the scavenger will assert.
1656 // An alternative is to add a ADD pseudo instruction to replace kill
1663 /// TransferDeadness - A identity copy definition is dead and it's being
1664 /// removed. Find the last def or use and mark it as dead / kill.
1665 void LocalRewriter::
1666 TransferDeadness(unsigned Reg
, BitVector
&RegKills
,
1667 std::vector
<MachineOperand
*> &KillOps
) {
1668 SmallPtrSet
<MachineInstr
*, 4> Seens
;
1669 SmallVector
<std::pair
<MachineInstr
*, int>,8> Refs
;
1670 for (MachineRegisterInfo::reg_iterator RI
= MRI
->reg_begin(Reg
),
1671 RE
= MRI
->reg_end(); RI
!= RE
; ++RI
) {
1672 MachineInstr
*UDMI
= &*RI
;
1673 if (UDMI
->isDebugValue() || UDMI
->getParent() != MBB
)
1675 DenseMap
<MachineInstr
*, unsigned>::iterator DI
= DistanceMap
.find(UDMI
);
1676 if (DI
== DistanceMap
.end())
1678 if (Seens
.insert(UDMI
))
1679 Refs
.push_back(std::make_pair(UDMI
, DI
->second
));
1684 std::sort(Refs
.begin(), Refs
.end(), RefSorter());
1686 while (!Refs
.empty()) {
1687 MachineInstr
*LastUDMI
= Refs
.back().first
;
1690 MachineOperand
*LastUD
= NULL
;
1691 for (unsigned i
= 0, e
= LastUDMI
->getNumOperands(); i
!= e
; ++i
) {
1692 MachineOperand
&MO
= LastUDMI
->getOperand(i
);
1693 if (!MO
.isReg() || MO
.getReg() != Reg
)
1695 if (!LastUD
|| (LastUD
->isUse() && MO
.isDef()))
1697 if (LastUDMI
->isRegTiedToDefOperand(i
))
1700 if (LastUD
->isDef()) {
1701 // If the instruction has no side effect, delete it and propagate
1702 // backward further. Otherwise, mark is dead and we are done.
1703 if (!isSafeToDelete(*LastUDMI
)) {
1704 LastUD
->setIsDead();
1707 EraseInstr(LastUDMI
);
1709 LastUD
->setIsKill();
1711 KillOps
[Reg
] = LastUD
;
1717 /// InsertEmergencySpills - Insert emergency spills before MI if requested by
1718 /// VRM. Return true if spills were inserted.
1719 bool LocalRewriter::InsertEmergencySpills(MachineInstr
*MI
) {
1720 if (!VRM
->hasEmergencySpills(MI
))
1722 MachineBasicBlock::iterator MII
= MI
;
1723 SmallSet
<int, 4> UsedSS
;
1724 std::vector
<unsigned> &EmSpills
= VRM
->getEmergencySpills(MI
);
1725 for (unsigned i
= 0, e
= EmSpills
.size(); i
!= e
; ++i
) {
1726 unsigned PhysReg
= EmSpills
[i
];
1727 const TargetRegisterClass
*RC
= TRI
->getMinimalPhysRegClass(PhysReg
);
1728 assert(RC
&& "Unable to determine register class!");
1729 int SS
= VRM
->getEmergencySpillSlot(RC
);
1730 if (UsedSS
.count(SS
))
1731 llvm_unreachable("Need to spill more than one physical registers!");
1733 TII
->storeRegToStackSlot(*MBB
, MII
, PhysReg
, true, SS
, RC
, TRI
);
1734 MachineInstr
*StoreMI
= prior(MII
);
1735 VRM
->addSpillSlotUse(SS
, StoreMI
);
1737 // Back-schedule reloads and remats.
1738 MachineBasicBlock::iterator InsertLoc
=
1739 ComputeReloadLoc(llvm::next(MII
), MBB
->begin(), PhysReg
, TRI
, false, SS
,
1740 TII
, *MBB
->getParent());
1742 TII
->loadRegFromStackSlot(*MBB
, InsertLoc
, PhysReg
, SS
, RC
, TRI
);
1744 MachineInstr
*LoadMI
= prior(InsertLoc
);
1745 VRM
->addSpillSlotUse(SS
, LoadMI
);
1747 DistanceMap
.insert(std::make_pair(LoadMI
, DistanceMap
.size()));
1752 /// InsertRestores - Restore registers before MI is requested by VRM. Return
1753 /// true is any instructions were inserted.
1754 bool LocalRewriter::InsertRestores(MachineInstr
*MI
,
1755 AvailableSpills
&Spills
,
1756 BitVector
&RegKills
,
1757 std::vector
<MachineOperand
*> &KillOps
) {
1758 if (!VRM
->isRestorePt(MI
))
1760 MachineBasicBlock::iterator MII
= MI
;
1761 std::vector
<unsigned> &RestoreRegs
= VRM
->getRestorePtRestores(MI
);
1762 for (unsigned i
= 0, e
= RestoreRegs
.size(); i
!= e
; ++i
) {
1763 unsigned VirtReg
= RestoreRegs
[e
-i
-1]; // Reverse order.
1764 if (!VRM
->getPreSplitReg(VirtReg
))
1765 continue; // Split interval spilled again.
1766 unsigned Phys
= VRM
->getPhys(VirtReg
);
1767 MRI
->setPhysRegUsed(Phys
);
1769 // Check if the value being restored if available. If so, it must be
1770 // from a predecessor BB that fallthrough into this BB. We do not
1776 // ... # r1 not clobbered
1779 bool DoReMat
= VRM
->isReMaterialized(VirtReg
);
1780 int SSorRMId
= DoReMat
1781 ? VRM
->getReMatId(VirtReg
) : VRM
->getStackSlot(VirtReg
);
1782 unsigned InReg
= Spills
.getSpillSlotOrReMatPhysReg(SSorRMId
);
1783 if (InReg
== Phys
) {
1784 // If the value is already available in the expected register, save
1785 // a reload / remat.
1787 DEBUG(dbgs() << "Reusing RM#"
1788 << SSorRMId
-VirtRegMap::MAX_STACK_SLOT
-1);
1790 DEBUG(dbgs() << "Reusing SS#" << SSorRMId
);
1791 DEBUG(dbgs() << " from physreg "
1792 << TRI
->getName(InReg
) << " for vreg"
1793 << VirtReg
<<" instead of reloading into physreg "
1794 << TRI
->getName(Phys
) << '\n');
1796 // Reusing a physreg may resurrect it. But we expect ProcessUses to update
1797 // the kill flags for the current instruction after processing it.
1801 } else if (InReg
&& InReg
!= Phys
) {
1803 DEBUG(dbgs() << "Reusing RM#"
1804 << SSorRMId
-VirtRegMap::MAX_STACK_SLOT
-1);
1806 DEBUG(dbgs() << "Reusing SS#" << SSorRMId
);
1807 DEBUG(dbgs() << " from physreg "
1808 << TRI
->getName(InReg
) << " for vreg"
1809 << VirtReg
<<" by copying it into physreg "
1810 << TRI
->getName(Phys
) << '\n');
1812 // If the reloaded / remat value is available in another register,
1813 // copy it to the desired register.
1815 // Back-schedule reloads and remats.
1816 MachineBasicBlock::iterator InsertLoc
=
1817 ComputeReloadLoc(MII
, MBB
->begin(), Phys
, TRI
, DoReMat
, SSorRMId
, TII
,
1819 MachineInstr
*CopyMI
= BuildMI(*MBB
, InsertLoc
, MI
->getDebugLoc(),
1820 TII
->get(TargetOpcode::COPY
), Phys
)
1821 .addReg(InReg
, RegState::Kill
);
1823 // This invalidates Phys.
1824 Spills
.ClobberPhysReg(Phys
);
1825 // Remember it's available.
1826 Spills
.addAvailable(SSorRMId
, Phys
);
1828 CopyMI
->setAsmPrinterFlag(MachineInstr::ReloadReuse
);
1829 UpdateKills(*CopyMI
, TRI
, RegKills
, KillOps
);
1831 DEBUG(dbgs() << '\t' << *CopyMI
);
1836 // Back-schedule reloads and remats.
1837 MachineBasicBlock::iterator InsertLoc
=
1838 ComputeReloadLoc(MII
, MBB
->begin(), Phys
, TRI
, DoReMat
, SSorRMId
, TII
,
1841 if (VRM
->isReMaterialized(VirtReg
)) {
1842 ReMaterialize(*MBB
, InsertLoc
, Phys
, VirtReg
, TII
, TRI
, *VRM
);
1844 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
1845 TII
->loadRegFromStackSlot(*MBB
, InsertLoc
, Phys
, SSorRMId
, RC
, TRI
);
1846 MachineInstr
*LoadMI
= prior(InsertLoc
);
1847 VRM
->addSpillSlotUse(SSorRMId
, LoadMI
);
1849 DistanceMap
.insert(std::make_pair(LoadMI
, DistanceMap
.size()));
1852 // This invalidates Phys.
1853 Spills
.ClobberPhysReg(Phys
);
1854 // Remember it's available.
1855 Spills
.addAvailable(SSorRMId
, Phys
);
1857 UpdateKills(*prior(InsertLoc
), TRI
, RegKills
, KillOps
);
1858 DEBUG(dbgs() << '\t' << *prior(MII
));
1863 /// InsertSpills - Insert spills after MI if requested by VRM. Return
1864 /// true if spills were inserted.
1865 bool LocalRewriter::InsertSpills(MachineInstr
*MI
) {
1866 if (!VRM
->isSpillPt(MI
))
1868 MachineBasicBlock::iterator MII
= MI
;
1869 std::vector
<std::pair
<unsigned,bool> > &SpillRegs
=
1870 VRM
->getSpillPtSpills(MI
);
1871 for (unsigned i
= 0, e
= SpillRegs
.size(); i
!= e
; ++i
) {
1872 unsigned VirtReg
= SpillRegs
[i
].first
;
1873 bool isKill
= SpillRegs
[i
].second
;
1874 if (!VRM
->getPreSplitReg(VirtReg
))
1875 continue; // Split interval spilled again.
1876 const TargetRegisterClass
*RC
= MRI
->getRegClass(VirtReg
);
1877 unsigned Phys
= VRM
->getPhys(VirtReg
);
1878 int StackSlot
= VRM
->getStackSlot(VirtReg
);
1879 MachineBasicBlock::iterator oldNextMII
= llvm::next(MII
);
1880 TII
->storeRegToStackSlot(*MBB
, llvm::next(MII
), Phys
, isKill
, StackSlot
,
1882 MachineInstr
*StoreMI
= prior(oldNextMII
);
1883 VRM
->addSpillSlotUse(StackSlot
, StoreMI
);
1884 DEBUG(dbgs() << "Store:\t" << *StoreMI
);
1885 VRM
->virtFolded(VirtReg
, StoreMI
, VirtRegMap::isMod
);
1891 /// ProcessUses - Process all of MI's spilled operands and all available
1893 void LocalRewriter::ProcessUses(MachineInstr
&MI
, AvailableSpills
&Spills
,
1894 std::vector
<MachineInstr
*> &MaybeDeadStores
,
1895 BitVector
&RegKills
,
1896 ReuseInfo
&ReusedOperands
,
1897 std::vector
<MachineOperand
*> &KillOps
) {
1899 SmallSet
<unsigned, 2> KilledMIRegs
;
1900 SmallVector
<unsigned, 4> VirtUseOps
;
1901 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
1902 MachineOperand
&MO
= MI
.getOperand(i
);
1903 if (!MO
.isReg() || MO
.getReg() == 0)
1904 continue; // Ignore non-register operands.
1906 unsigned VirtReg
= MO
.getReg();
1908 if (TargetRegisterInfo::isPhysicalRegister(VirtReg
)) {
1909 // Ignore physregs for spilling, but remember that it is used by this
1911 MRI
->setPhysRegUsed(VirtReg
);
1915 // We want to process implicit virtual register uses first.
1916 if (MO
.isImplicit())
1917 // If the virtual register is implicitly defined, emit a implicit_def
1918 // before so scavenger knows it's "defined".
1919 // FIXME: This is a horrible hack done the by register allocator to
1920 // remat a definition with virtual register operand.
1921 VirtUseOps
.insert(VirtUseOps
.begin(), i
);
1923 VirtUseOps
.push_back(i
);
1925 // A partial def causes problems because the same operand both reads and
1926 // writes the register. This rewriter is designed to rewrite uses and defs
1927 // separately, so a partial def would already have been rewritten to a
1928 // physreg by the time we get to processing defs.
1929 // Add an implicit use operand to model the partial def.
1930 if (MO
.isDef() && MO
.getSubReg() && MI
.readsVirtualRegister(VirtReg
) &&
1931 MI
.findRegisterUseOperandIdx(VirtReg
) == -1) {
1932 VirtUseOps
.insert(VirtUseOps
.begin(), MI
.getNumOperands());
1933 MI
.addOperand(MachineOperand::CreateReg(VirtReg
,
1935 true)); // isImplicit
1936 DEBUG(dbgs() << "Partial redef: " << MI
);
1940 // Process all of the spilled uses and all non spilled reg references.
1941 SmallVector
<int, 2> PotentialDeadStoreSlots
;
1942 KilledMIRegs
.clear();
1943 for (unsigned j
= 0, e
= VirtUseOps
.size(); j
!= e
; ++j
) {
1944 unsigned i
= VirtUseOps
[j
];
1945 unsigned VirtReg
= MI
.getOperand(i
).getReg();
1946 assert(TargetRegisterInfo::isVirtualRegister(VirtReg
) &&
1947 "Not a virtual register?");
1949 unsigned SubIdx
= MI
.getOperand(i
).getSubReg();
1950 if (VRM
->isAssignedReg(VirtReg
)) {
1951 // This virtual register was assigned a physreg!
1952 unsigned Phys
= VRM
->getPhys(VirtReg
);
1953 MRI
->setPhysRegUsed(Phys
);
1954 if (MI
.getOperand(i
).isDef())
1955 ReusedOperands
.markClobbered(Phys
);
1956 substitutePhysReg(MI
.getOperand(i
), Phys
, *TRI
);
1957 if (VRM
->isImplicitlyDefined(VirtReg
))
1958 // FIXME: Is this needed?
1959 BuildMI(*MBB
, &MI
, MI
.getDebugLoc(),
1960 TII
->get(TargetOpcode::IMPLICIT_DEF
), Phys
);
1964 // This virtual register is now known to be a spilled value.
1965 if (!MI
.getOperand(i
).isUse())
1966 continue; // Handle defs in the loop below (handle use&def here though)
1968 bool AvoidReload
= MI
.getOperand(i
).isUndef();
1969 // Check if it is defined by an implicit def. It should not be spilled.
1970 // Note, this is for correctness reason. e.g.
1971 // 8 %reg1024<def> = IMPLICIT_DEF
1972 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1973 // The live range [12, 14) are not part of the r1024 live interval since
1974 // it's defined by an implicit def. It will not conflicts with live
1975 // interval of r1025. Now suppose both registers are spilled, you can
1976 // easily see a situation where both registers are reloaded before
1977 // the INSERT_SUBREG and both target registers that would overlap.
1978 bool DoReMat
= VRM
->isReMaterialized(VirtReg
);
1979 int SSorRMId
= DoReMat
1980 ? VRM
->getReMatId(VirtReg
) : VRM
->getStackSlot(VirtReg
);
1981 int ReuseSlot
= SSorRMId
;
1983 // Check to see if this stack slot is available.
1984 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SSorRMId
);
1986 // If this is a sub-register use, make sure the reuse register is in the
1987 // right register class. For example, for x86 not all of the 32-bit
1988 // registers have accessible sub-registers.
1989 // Similarly so for EXTRACT_SUBREG. Consider this:
1991 // MOV32_mr fi#1, EDI
1993 // = EXTRACT_SUBREG fi#1
1994 // fi#1 is available in EDI, but it cannot be reused because it's not in
1995 // the right register file.
1996 if (PhysReg
&& !AvoidReload
&& SubIdx
) {
1997 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
1998 if (!RC
->contains(PhysReg
))
2002 if (PhysReg
&& !AvoidReload
) {
2003 // This spilled operand might be part of a two-address operand. If this
2004 // is the case, then changing it will necessarily require changing the
2005 // def part of the instruction as well. However, in some cases, we
2006 // aren't allowed to modify the reused register. If none of these cases
2008 bool CanReuse
= true;
2009 bool isTied
= MI
.isRegTiedToDefOperand(i
);
2011 // Okay, we have a two address operand. We can reuse this physreg as
2012 // long as we are allowed to clobber the value and there isn't an
2013 // earlier def that has already clobbered the physreg.
2014 CanReuse
= !ReusedOperands
.isClobbered(PhysReg
) &&
2015 Spills
.canClobberPhysReg(PhysReg
);
2017 // If this is an asm, and a PhysReg alias is used elsewhere as an
2018 // earlyclobber operand, we can't also use it as an input.
2019 if (MI
.isInlineAsm()) {
2020 for (unsigned k
= 0, e
= MI
.getNumOperands(); k
!= e
; ++k
) {
2021 MachineOperand
&MOk
= MI
.getOperand(k
);
2022 if (MOk
.isReg() && MOk
.isEarlyClobber() &&
2023 TRI
->regsOverlap(MOk
.getReg(), PhysReg
)) {
2025 DEBUG(dbgs() << "Not reusing physreg " << TRI
->getName(PhysReg
)
2026 << " for vreg" << VirtReg
<< ": " << MOk
<< '\n');
2033 // If this stack slot value is already available, reuse it!
2034 if (ReuseSlot
> VirtRegMap::MAX_STACK_SLOT
)
2035 DEBUG(dbgs() << "Reusing RM#"
2036 << ReuseSlot
-VirtRegMap::MAX_STACK_SLOT
-1);
2038 DEBUG(dbgs() << "Reusing SS#" << ReuseSlot
);
2039 DEBUG(dbgs() << " from physreg "
2040 << TRI
->getName(PhysReg
) << " for vreg"
2041 << VirtReg
<<" instead of reloading into physreg "
2042 << TRI
->getName(VRM
->getPhys(VirtReg
)) << '\n');
2043 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2044 MI
.getOperand(i
).setReg(RReg
);
2045 MI
.getOperand(i
).setSubReg(0);
2047 // Reusing a physreg may resurrect it. But we expect ProcessUses to
2048 // update the kill flags for the current instr after processing it.
2050 // The only technical detail we have is that we don't know that
2051 // PhysReg won't be clobbered by a reloaded stack slot that occurs
2052 // later in the instruction. In particular, consider 'op V1, V2'.
2053 // If V1 is available in physreg R0, we would choose to reuse it
2054 // here, instead of reloading it into the register the allocator
2055 // indicated (say R1). However, V2 might have to be reloaded
2056 // later, and it might indicate that it needs to live in R0. When
2057 // this occurs, we need to have information available that
2058 // indicates it is safe to use R1 for the reload instead of R0.
2060 // To further complicate matters, we might conflict with an alias,
2061 // or R0 and R1 might not be compatible with each other. In this
2062 // case, we actually insert a reload for V1 in R1, ensuring that
2063 // we can get at R0 or its alias.
2064 ReusedOperands
.addReuse(i
, ReuseSlot
, PhysReg
,
2065 VRM
->getPhys(VirtReg
), VirtReg
);
2067 // Only mark it clobbered if this is a use&def operand.
2068 ReusedOperands
.markClobbered(PhysReg
);
2071 if (MI
.getOperand(i
).isKill() &&
2072 ReuseSlot
<= VirtRegMap::MAX_STACK_SLOT
) {
2074 // The store of this spilled value is potentially dead, but we
2075 // won't know for certain until we've confirmed that the re-use
2076 // above is valid, which means waiting until the other operands
2077 // are processed. For now we just track the spill slot, we'll
2078 // remove it after the other operands are processed if valid.
2080 PotentialDeadStoreSlots
.push_back(ReuseSlot
);
2083 // Mark is isKill if it's there no other uses of the same virtual
2084 // register and it's not a two-address operand. IsKill will be
2085 // unset if reg is reused.
2086 if (!isTied
&& KilledMIRegs
.count(VirtReg
) == 0) {
2087 MI
.getOperand(i
).setIsKill();
2088 KilledMIRegs
.insert(VirtReg
);
2093 // Otherwise we have a situation where we have a two-address instruction
2094 // whose mod/ref operand needs to be reloaded. This reload is already
2095 // available in some register "PhysReg", but if we used PhysReg as the
2096 // operand to our 2-addr instruction, the instruction would modify
2097 // PhysReg. This isn't cool if something later uses PhysReg and expects
2098 // to get its initial value.
2100 // To avoid this problem, and to avoid doing a load right after a store,
2101 // we emit a copy from PhysReg into the designated register for this
2104 // This case also applies to an earlyclobber'd PhysReg.
2105 unsigned DesignatedReg
= VRM
->getPhys(VirtReg
);
2106 assert(DesignatedReg
&& "Must map virtreg to physreg!");
2108 // Note that, if we reused a register for a previous operand, the
2109 // register we want to reload into might not actually be
2110 // available. If this occurs, use the register indicated by the
2112 if (ReusedOperands
.hasReuses())
2113 DesignatedReg
= ReusedOperands
.
2114 GetRegForReload(VirtReg
, DesignatedReg
, &MI
, Spills
,
2115 MaybeDeadStores
, RegKills
, KillOps
, *VRM
);
2117 // If the mapped designated register is actually the physreg we have
2118 // incoming, we don't need to inserted a dead copy.
2119 if (DesignatedReg
== PhysReg
) {
2120 // If this stack slot value is already available, reuse it!
2121 if (ReuseSlot
> VirtRegMap::MAX_STACK_SLOT
)
2122 DEBUG(dbgs() << "Reusing RM#"
2123 << ReuseSlot
-VirtRegMap::MAX_STACK_SLOT
-1);
2125 DEBUG(dbgs() << "Reusing SS#" << ReuseSlot
);
2126 DEBUG(dbgs() << " from physreg " << TRI
->getName(PhysReg
)
2127 << " for vreg" << VirtReg
2128 << " instead of reloading into same physreg.\n");
2129 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2130 MI
.getOperand(i
).setReg(RReg
);
2131 MI
.getOperand(i
).setSubReg(0);
2132 ReusedOperands
.markClobbered(RReg
);
2137 MRI
->setPhysRegUsed(DesignatedReg
);
2138 ReusedOperands
.markClobbered(DesignatedReg
);
2140 // Back-schedule reloads and remats.
2141 MachineBasicBlock::iterator InsertLoc
=
2142 ComputeReloadLoc(&MI
, MBB
->begin(), PhysReg
, TRI
, DoReMat
,
2143 SSorRMId
, TII
, *MBB
->getParent());
2144 MachineInstr
*CopyMI
= BuildMI(*MBB
, InsertLoc
, MI
.getDebugLoc(),
2145 TII
->get(TargetOpcode::COPY
),
2146 DesignatedReg
).addReg(PhysReg
);
2147 CopyMI
->setAsmPrinterFlag(MachineInstr::ReloadReuse
);
2148 UpdateKills(*CopyMI
, TRI
, RegKills
, KillOps
);
2150 // This invalidates DesignatedReg.
2151 Spills
.ClobberPhysReg(DesignatedReg
);
2153 Spills
.addAvailable(ReuseSlot
, DesignatedReg
);
2155 SubIdx
? TRI
->getSubReg(DesignatedReg
, SubIdx
) : DesignatedReg
;
2156 MI
.getOperand(i
).setReg(RReg
);
2157 MI
.getOperand(i
).setSubReg(0);
2158 DEBUG(dbgs() << '\t' << *prior(InsertLoc
));
2163 // Otherwise, reload it and remember that we have it.
2164 PhysReg
= VRM
->getPhys(VirtReg
);
2165 assert(PhysReg
&& "Must map virtreg to physreg!");
2167 // Note that, if we reused a register for a previous operand, the
2168 // register we want to reload into might not actually be
2169 // available. If this occurs, use the register indicated by the
2171 if (ReusedOperands
.hasReuses())
2172 PhysReg
= ReusedOperands
.GetRegForReload(VirtReg
, PhysReg
, &MI
,
2173 Spills
, MaybeDeadStores
, RegKills
, KillOps
, *VRM
);
2175 MRI
->setPhysRegUsed(PhysReg
);
2176 ReusedOperands
.markClobbered(PhysReg
);
2180 // Back-schedule reloads and remats.
2181 MachineBasicBlock::iterator InsertLoc
=
2182 ComputeReloadLoc(MI
, MBB
->begin(), PhysReg
, TRI
, DoReMat
,
2183 SSorRMId
, TII
, *MBB
->getParent());
2186 ReMaterialize(*MBB
, InsertLoc
, PhysReg
, VirtReg
, TII
, TRI
, *VRM
);
2188 const TargetRegisterClass
* RC
= MRI
->getRegClass(VirtReg
);
2189 TII
->loadRegFromStackSlot(*MBB
, InsertLoc
, PhysReg
, SSorRMId
, RC
,TRI
);
2190 MachineInstr
*LoadMI
= prior(InsertLoc
);
2191 VRM
->addSpillSlotUse(SSorRMId
, LoadMI
);
2193 DistanceMap
.insert(std::make_pair(LoadMI
, DistanceMap
.size()));
2195 // This invalidates PhysReg.
2196 Spills
.ClobberPhysReg(PhysReg
);
2198 // Any stores to this stack slot are not dead anymore.
2200 MaybeDeadStores
[SSorRMId
] = NULL
;
2201 Spills
.addAvailable(SSorRMId
, PhysReg
);
2202 // Assumes this is the last use. IsKill will be unset if reg is reused
2203 // unless it's a two-address operand.
2204 if (!MI
.isRegTiedToDefOperand(i
) &&
2205 KilledMIRegs
.count(VirtReg
) == 0) {
2206 MI
.getOperand(i
).setIsKill();
2207 KilledMIRegs
.insert(VirtReg
);
2210 UpdateKills(*prior(InsertLoc
), TRI
, RegKills
, KillOps
);
2211 DEBUG(dbgs() << '\t' << *prior(InsertLoc
));
2213 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2214 MI
.getOperand(i
).setReg(RReg
);
2215 MI
.getOperand(i
).setSubReg(0);
2218 // Ok - now we can remove stores that have been confirmed dead.
2219 for (unsigned j
= 0, e
= PotentialDeadStoreSlots
.size(); j
!= e
; ++j
) {
2220 // This was the last use and the spilled value is still available
2221 // for reuse. That means the spill was unnecessary!
2222 int PDSSlot
= PotentialDeadStoreSlots
[j
];
2223 MachineInstr
* DeadStore
= MaybeDeadStores
[PDSSlot
];
2225 DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore
);
2226 InvalidateKills(*DeadStore
, TRI
, RegKills
, KillOps
);
2227 EraseInstr(DeadStore
);
2228 MaybeDeadStores
[PDSSlot
] = NULL
;
2234 /// rewriteMBB - Keep track of which spills are available even after the
2235 /// register allocator is done with them. If possible, avoid reloading vregs.
2237 LocalRewriter::RewriteMBB(LiveIntervals
*LIs
,
2238 AvailableSpills
&Spills
, BitVector
&RegKills
,
2239 std::vector
<MachineOperand
*> &KillOps
) {
2241 DEBUG(dbgs() << "\n**** Local spiller rewriting MBB '"
2242 << MBB
->getName() << "':\n");
2244 MachineFunction
&MF
= *MBB
->getParent();
2246 // MaybeDeadStores - When we need to write a value back into a stack slot,
2247 // keep track of the inserted store. If the stack slot value is never read
2248 // (because the value was used from some available register, for example), and
2249 // subsequently stored to, the original store is dead. This map keeps track
2250 // of inserted stores that are not used. If we see a subsequent store to the
2251 // same stack slot, the original store is deleted.
2252 std::vector
<MachineInstr
*> MaybeDeadStores
;
2253 MaybeDeadStores
.resize(MF
.getFrameInfo()->getObjectIndexEnd(), NULL
);
2255 // ReMatDefs - These are rematerializable def MIs which are not deleted.
2256 SmallSet
<MachineInstr
*, 4> ReMatDefs
;
2258 // Keep track of the registers we have already spilled in case there are
2259 // multiple defs of the same register in MI.
2260 SmallSet
<unsigned, 8> SpilledMIRegs
;
2264 KillOps
.resize(TRI
->getNumRegs(), NULL
);
2266 DistanceMap
.clear();
2267 for (MachineBasicBlock::iterator MII
= MBB
->begin(), E
= MBB
->end();
2269 MachineBasicBlock::iterator NextMII
= llvm::next(MII
);
2271 if (OptimizeByUnfold(MII
, MaybeDeadStores
, Spills
, RegKills
, KillOps
))
2272 NextMII
= llvm::next(MII
);
2274 if (InsertEmergencySpills(MII
))
2275 NextMII
= llvm::next(MII
);
2277 InsertRestores(MII
, Spills
, RegKills
, KillOps
);
2279 if (InsertSpills(MII
))
2280 NextMII
= llvm::next(MII
);
2282 bool Erased
= false;
2283 bool BackTracked
= false;
2284 MachineInstr
&MI
= *MII
;
2286 // Remember DbgValue's which reference stack slots.
2287 if (MI
.isDebugValue() && MI
.getOperand(0).isFI())
2288 Slot2DbgValues
[MI
.getOperand(0).getIndex()].push_back(&MI
);
2290 /// ReusedOperands - Keep track of operand reuse in case we need to undo
2292 ReuseInfo
ReusedOperands(MI
, TRI
);
2294 ProcessUses(MI
, Spills
, MaybeDeadStores
, RegKills
, ReusedOperands
, KillOps
);
2296 DEBUG(dbgs() << '\t' << MI
);
2299 // If we have folded references to memory operands, make sure we clear all
2300 // physical registers that may contain the value of the spilled virtual
2303 // Copy the folded virts to a small vector, we may change MI2VirtMap.
2304 SmallVector
<std::pair
<unsigned, VirtRegMap::ModRef
>, 4> FoldedVirts
;
2306 for (std::pair
<VirtRegMap::MI2VirtMapTy::const_iterator
,
2307 VirtRegMap::MI2VirtMapTy::const_iterator
> FVRange
=
2308 VRM
->getFoldedVirts(&MI
);
2309 FVRange
.first
!= FVRange
.second
; ++FVRange
.first
)
2310 FoldedVirts
.push_back(FVRange
.first
->second
);
2312 SmallSet
<int, 2> FoldedSS
;
2313 for (unsigned FVI
= 0, FVE
= FoldedVirts
.size(); FVI
!= FVE
; ++FVI
) {
2314 unsigned VirtReg
= FoldedVirts
[FVI
].first
;
2315 VirtRegMap::ModRef MR
= FoldedVirts
[FVI
].second
;
2316 DEBUG(dbgs() << "Folded vreg: " << VirtReg
<< " MR: " << MR
);
2318 int SS
= VRM
->getStackSlot(VirtReg
);
2319 if (SS
== VirtRegMap::NO_STACK_SLOT
)
2321 FoldedSS
.insert(SS
);
2322 DEBUG(dbgs() << " - StackSlot: " << SS
<< "\n");
2324 // If this folded instruction is just a use, check to see if it's a
2325 // straight load from the virt reg slot.
2326 if ((MR
& VirtRegMap::isRef
) && !(MR
& VirtRegMap::isMod
)) {
2328 unsigned DestReg
= TII
->isLoadFromStackSlot(&MI
, FrameIdx
);
2329 if (DestReg
&& FrameIdx
== SS
) {
2330 // If this spill slot is available, turn it into a copy (or nothing)
2331 // instead of leaving it as a load!
2332 if (unsigned InReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
)) {
2333 DEBUG(dbgs() << "Promoted Load To Copy: " << MI
);
2334 if (DestReg
!= InReg
) {
2335 MachineOperand
*DefMO
= MI
.findRegisterDefOperand(DestReg
);
2336 MachineInstr
*CopyMI
= BuildMI(*MBB
, &MI
, MI
.getDebugLoc(),
2337 TII
->get(TargetOpcode::COPY
))
2338 .addReg(DestReg
, RegState::Define
, DefMO
->getSubReg())
2339 .addReg(InReg
, RegState::Kill
);
2340 // Revisit the copy so we make sure to notice the effects of the
2341 // operation on the destreg (either needing to RA it if it's
2342 // virtual or needing to clobber any values if it's physical).
2344 NextMII
->setAsmPrinterFlag(MachineInstr::ReloadReuse
);
2347 DEBUG(dbgs() << "Removing now-noop copy: " << MI
);
2348 // InvalidateKills resurrects any prior kill of the copy's source
2349 // allowing the source reg to be reused in place of the copy.
2350 Spills
.disallowClobberPhysReg(InReg
);
2353 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2356 goto ProcessNextInst
;
2359 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
2360 SmallVector
<MachineInstr
*, 4> NewMIs
;
2362 TII
->unfoldMemoryOperand(MF
, &MI
, PhysReg
, false, false, NewMIs
)){
2363 MBB
->insert(MII
, NewMIs
[0]);
2364 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2367 --NextMII
; // backtrack to the unfolded instruction.
2369 goto ProcessNextInst
;
2374 // If this reference is not a use, any previous store is now dead.
2375 // Otherwise, the store to this stack slot is not dead anymore.
2376 MachineInstr
* DeadStore
= MaybeDeadStores
[SS
];
2378 bool isDead
= !(MR
& VirtRegMap::isRef
);
2379 MachineInstr
*NewStore
= NULL
;
2380 if (MR
& VirtRegMap::isModRef
) {
2381 unsigned PhysReg
= Spills
.getSpillSlotOrReMatPhysReg(SS
);
2382 SmallVector
<MachineInstr
*, 4> NewMIs
;
2383 // We can reuse this physreg as long as we are allowed to clobber
2384 // the value and there isn't an earlier def that has already clobbered
2387 !ReusedOperands
.isClobbered(PhysReg
) &&
2388 Spills
.canClobberPhysReg(PhysReg
) &&
2389 !TII
->isStoreToStackSlot(&MI
, SS
)) { // Not profitable!
2390 MachineOperand
*KillOpnd
=
2391 DeadStore
->findRegisterUseOperand(PhysReg
, true);
2392 // Note, if the store is storing a sub-register, it's possible the
2393 // super-register is needed below.
2394 if (KillOpnd
&& !KillOpnd
->getSubReg() &&
2395 TII
->unfoldMemoryOperand(MF
, &MI
, PhysReg
, false, true,NewMIs
)){
2396 MBB
->insert(MII
, NewMIs
[0]);
2397 NewStore
= NewMIs
[1];
2398 MBB
->insert(MII
, NewStore
);
2399 VRM
->addSpillSlotUse(SS
, NewStore
);
2400 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2404 --NextMII
; // backtrack to the unfolded instruction.
2412 if (isDead
) { // Previous store is dead.
2413 // If we get here, the store is dead, nuke it now.
2414 DEBUG(dbgs() << "Removed dead store:\t" << *DeadStore
);
2415 InvalidateKills(*DeadStore
, TRI
, RegKills
, KillOps
);
2416 EraseInstr(DeadStore
);
2421 MaybeDeadStores
[SS
] = NULL
;
2423 // Treat this store as a spill merged into a copy. That makes the
2424 // stack slot value available.
2425 VRM
->virtFolded(VirtReg
, NewStore
, VirtRegMap::isMod
);
2426 goto ProcessNextInst
;
2430 // If the spill slot value is available, and this is a new definition of
2431 // the value, the value is not available anymore.
2432 if (MR
& VirtRegMap::isMod
) {
2433 // Notice that the value in this stack slot has been modified.
2434 Spills
.ModifyStackSlotOrReMat(SS
);
2436 // If this is *just* a mod of the value, check to see if this is just a
2437 // store to the spill slot (i.e. the spill got merged into the copy). If
2438 // so, realize that the vreg is available now, and add the store to the
2439 // MaybeDeadStore info.
2441 if (!(MR
& VirtRegMap::isRef
)) {
2442 if (unsigned SrcReg
= TII
->isStoreToStackSlot(&MI
, StackSlot
)) {
2443 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg
) &&
2444 "Src hasn't been allocated yet?");
2446 if (CommuteToFoldReload(MII
, VirtReg
, SrcReg
, StackSlot
,
2447 Spills
, RegKills
, KillOps
, TRI
)) {
2448 NextMII
= llvm::next(MII
);
2450 goto ProcessNextInst
;
2453 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
2454 // this as a potentially dead store in case there is a subsequent
2455 // store into the stack slot without a read from it.
2456 MaybeDeadStores
[StackSlot
] = &MI
;
2458 // If the stack slot value was previously available in some other
2459 // register, change it now. Otherwise, make the register
2460 // available in PhysReg.
2461 Spills
.addAvailable(StackSlot
, SrcReg
, MI
.killsRegister(SrcReg
));
2467 // Process all of the spilled defs.
2468 SpilledMIRegs
.clear();
2469 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
2470 MachineOperand
&MO
= MI
.getOperand(i
);
2471 if (!(MO
.isReg() && MO
.getReg() && MO
.isDef()))
2474 unsigned VirtReg
= MO
.getReg();
2475 if (!TargetRegisterInfo::isVirtualRegister(VirtReg
)) {
2476 // Check to see if this is a noop copy. If so, eliminate the
2477 // instruction before considering the dest reg to be changed.
2478 // Also check if it's copying from an "undef", if so, we can't
2479 // eliminate this or else the undef marker is lost and it will
2480 // confuses the scavenger. This is extremely rare.
2481 if (MI
.isIdentityCopy() && !MI
.getOperand(1).isUndef() &&
2482 MI
.getNumOperands() == 2) {
2484 DEBUG(dbgs() << "Removing now-noop copy: " << MI
);
2485 SmallVector
<unsigned, 2> KillRegs
;
2486 InvalidateKills(MI
, TRI
, RegKills
, KillOps
, &KillRegs
);
2487 if (MO
.isDead() && !KillRegs
.empty()) {
2488 // Source register or an implicit super/sub-register use is killed.
2489 assert(TRI
->regsOverlap(KillRegs
[0], MI
.getOperand(0).getReg()));
2490 // Last def is now dead.
2491 TransferDeadness(MI
.getOperand(1).getReg(), RegKills
, KillOps
);
2495 Spills
.disallowClobberPhysReg(VirtReg
);
2496 goto ProcessNextInst
;
2499 // If it's not a no-op copy, it clobbers the value in the destreg.
2500 Spills
.ClobberPhysReg(VirtReg
);
2501 ReusedOperands
.markClobbered(VirtReg
);
2503 // Check to see if this instruction is a load from a stack slot into
2504 // a register. If so, this provides the stack slot value in the reg.
2506 if (unsigned DestReg
= TII
->isLoadFromStackSlot(&MI
, FrameIdx
)) {
2507 assert(DestReg
== VirtReg
&& "Unknown load situation!");
2509 // If it is a folded reference, then it's not safe to clobber.
2510 bool Folded
= FoldedSS
.count(FrameIdx
);
2511 // Otherwise, if it wasn't available, remember that it is now!
2512 Spills
.addAvailable(FrameIdx
, DestReg
, !Folded
);
2513 goto ProcessNextInst
;
2519 unsigned SubIdx
= MO
.getSubReg();
2520 bool DoReMat
= VRM
->isReMaterialized(VirtReg
);
2522 ReMatDefs
.insert(&MI
);
2524 // The only vregs left are stack slot definitions.
2525 int StackSlot
= VRM
->getStackSlot(VirtReg
);
2526 const TargetRegisterClass
*RC
= MRI
->getRegClass(VirtReg
);
2528 // If this def is part of a two-address operand, make sure to execute
2529 // the store from the correct physical register.
2532 if (MI
.isRegTiedToUseOperand(i
, &TiedOp
)) {
2533 PhysReg
= MI
.getOperand(TiedOp
).getReg();
2535 unsigned SuperReg
= findSuperReg(RC
, PhysReg
, SubIdx
, TRI
);
2536 assert(SuperReg
&& TRI
->getSubReg(SuperReg
, SubIdx
) == PhysReg
&&
2537 "Can't find corresponding super-register!");
2541 PhysReg
= VRM
->getPhys(VirtReg
);
2542 if (ReusedOperands
.isClobbered(PhysReg
)) {
2543 // Another def has taken the assigned physreg. It must have been a
2544 // use&def which got it due to reuse. Undo the reuse!
2545 PhysReg
= ReusedOperands
.GetRegForReload(VirtReg
, PhysReg
, &MI
,
2546 Spills
, MaybeDeadStores
, RegKills
, KillOps
, *VRM
);
2550 assert(PhysReg
&& "VR not assigned a physical register?");
2551 MRI
->setPhysRegUsed(PhysReg
);
2552 unsigned RReg
= SubIdx
? TRI
->getSubReg(PhysReg
, SubIdx
) : PhysReg
;
2553 ReusedOperands
.markClobbered(RReg
);
2554 MI
.getOperand(i
).setReg(RReg
);
2555 MI
.getOperand(i
).setSubReg(0);
2557 if (!MO
.isDead() && SpilledMIRegs
.insert(VirtReg
)) {
2558 MachineInstr
*&LastStore
= MaybeDeadStores
[StackSlot
];
2559 SpillRegToStackSlot(MII
, -1, PhysReg
, StackSlot
, RC
, true,
2560 LastStore
, Spills
, ReMatDefs
, RegKills
, KillOps
);
2561 NextMII
= llvm::next(MII
);
2563 // Check to see if this is a noop copy. If so, eliminate the
2564 // instruction before considering the dest reg to be changed.
2565 if (MI
.isIdentityCopy()) {
2567 DEBUG(dbgs() << "Removing now-noop copy: " << MI
);
2568 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2571 UpdateKills(*LastStore
, TRI
, RegKills
, KillOps
);
2572 goto ProcessNextInst
;
2577 // Delete dead instructions without side effects.
2578 if (!Erased
&& !BackTracked
&& isSafeToDelete(MI
)) {
2579 InvalidateKills(MI
, TRI
, RegKills
, KillOps
);
2584 DistanceMap
.insert(std::make_pair(&MI
, DistanceMap
.size()));
2585 if (!Erased
&& !BackTracked
) {
2586 for (MachineBasicBlock::iterator II
= &MI
; II
!= NextMII
; ++II
)
2587 UpdateKills(*II
, TRI
, RegKills
, KillOps
);
2594 llvm::VirtRegRewriter
* llvm::createVirtRegRewriter() {
2595 switch (RewriterOpt
) {
2596 default: llvm_unreachable("Unreachable!");
2598 return new LocalRewriter();
2600 return new TrivialRewriter();