1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Target/TargetInstrInfo.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineMemOperand.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/PseudoSourceValue.h"
20 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
21 #include "llvm/CodeGen/StackMaps.h"
22 #include "llvm/CodeGen/TargetSchedule.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/MC/MCAsmInfo.h"
25 #include "llvm/MC/MCInstrItineraries.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/Target/TargetFrameLowering.h"
30 #include "llvm/Target/TargetLowering.h"
31 #include "llvm/Target/TargetMachine.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
36 static cl::opt
<bool> DisableHazardRecognizer(
37 "disable-sched-hazard", cl::Hidden
, cl::init(false),
38 cl::desc("Disable hazard detection during preRA scheduling"));
40 TargetInstrInfo::~TargetInstrInfo() {
43 const TargetRegisterClass
*
44 TargetInstrInfo::getRegClass(const MCInstrDesc
&MCID
, unsigned OpNum
,
45 const TargetRegisterInfo
*TRI
,
46 const MachineFunction
&MF
) const {
47 if (OpNum
>= MCID
.getNumOperands())
50 short RegClass
= MCID
.OpInfo
[OpNum
].RegClass
;
51 if (MCID
.OpInfo
[OpNum
].isLookupPtrRegClass())
52 return TRI
->getPointerRegClass(MF
, RegClass
);
54 // Instructions like INSERT_SUBREG do not have fixed register classes.
58 // Otherwise just look it up normally.
59 return TRI
->getRegClass(RegClass
);
62 /// insertNoop - Insert a noop into the instruction stream at the specified
64 void TargetInstrInfo::insertNoop(MachineBasicBlock
&MBB
,
65 MachineBasicBlock::iterator MI
) const {
66 llvm_unreachable("Target didn't implement insertNoop!");
69 /// Measure the specified inline asm to determine an approximation of its
71 /// Comments (which run till the next SeparatorString or newline) do not
72 /// count as an instruction.
73 /// Any other non-whitespace text is considered an instruction, with
74 /// multiple instructions separated by SeparatorString or newlines.
75 /// Variable-length instructions are not handled here; this function
76 /// may be overloaded in the target code to do that.
77 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str
,
78 const MCAsmInfo
&MAI
) const {
81 // Count the number of instructions in the asm.
82 bool atInsnStart
= true;
85 if (*Str
== '\n' || strncmp(Str
, MAI
.getSeparatorString(),
86 strlen(MAI
.getSeparatorString())) == 0)
88 if (atInsnStart
&& !std::isspace(static_cast<unsigned char>(*Str
))) {
89 Length
+= MAI
.getMaxInstLength();
92 if (atInsnStart
&& strncmp(Str
, MAI
.getCommentString(),
93 strlen(MAI
.getCommentString())) == 0)
100 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
101 /// after it, replacing it with an unconditional branch to NewDest.
103 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail
,
104 MachineBasicBlock
*NewDest
) const {
105 MachineBasicBlock
*MBB
= Tail
->getParent();
107 // Remove all the old successors of MBB from the CFG.
108 while (!MBB
->succ_empty())
109 MBB
->removeSuccessor(MBB
->succ_begin());
111 // Remove all the dead instructions from the end of MBB.
112 MBB
->erase(Tail
, MBB
->end());
114 // If MBB isn't immediately before MBB, insert a branch to it.
115 if (++MachineFunction::iterator(MBB
) != MachineFunction::iterator(NewDest
))
116 InsertBranch(*MBB
, NewDest
, nullptr, SmallVector
<MachineOperand
, 0>(),
117 Tail
->getDebugLoc());
118 MBB
->addSuccessor(NewDest
);
121 MachineInstr
*TargetInstrInfo::commuteInstructionImpl(MachineInstr
*MI
,
124 unsigned Idx2
) const {
125 const MCInstrDesc
&MCID
= MI
->getDesc();
126 bool HasDef
= MCID
.getNumDefs();
127 if (HasDef
&& !MI
->getOperand(0).isReg())
128 // No idea how to commute this instruction. Target should implement its own.
131 unsigned CommutableOpIdx1
= Idx1
; (void)CommutableOpIdx1
;
132 unsigned CommutableOpIdx2
= Idx2
; (void)CommutableOpIdx2
;
133 assert(findCommutedOpIndices(MI
, CommutableOpIdx1
, CommutableOpIdx2
) &&
134 CommutableOpIdx1
== Idx1
&& CommutableOpIdx2
== Idx2
&&
135 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
136 assert(MI
->getOperand(Idx1
).isReg() && MI
->getOperand(Idx2
).isReg() &&
137 "This only knows how to commute register operands so far");
139 unsigned Reg0
= HasDef
? MI
->getOperand(0).getReg() : 0;
140 unsigned Reg1
= MI
->getOperand(Idx1
).getReg();
141 unsigned Reg2
= MI
->getOperand(Idx2
).getReg();
142 unsigned SubReg0
= HasDef
? MI
->getOperand(0).getSubReg() : 0;
143 unsigned SubReg1
= MI
->getOperand(Idx1
).getSubReg();
144 unsigned SubReg2
= MI
->getOperand(Idx2
).getSubReg();
145 bool Reg1IsKill
= MI
->getOperand(Idx1
).isKill();
146 bool Reg2IsKill
= MI
->getOperand(Idx2
).isKill();
147 bool Reg1IsUndef
= MI
->getOperand(Idx1
).isUndef();
148 bool Reg2IsUndef
= MI
->getOperand(Idx2
).isUndef();
149 bool Reg1IsInternal
= MI
->getOperand(Idx1
).isInternalRead();
150 bool Reg2IsInternal
= MI
->getOperand(Idx2
).isInternalRead();
151 // If destination is tied to either of the commuted source register, then
152 // it must be updated.
153 if (HasDef
&& Reg0
== Reg1
&&
154 MI
->getDesc().getOperandConstraint(Idx1
, MCOI::TIED_TO
) == 0) {
158 } else if (HasDef
&& Reg0
== Reg2
&&
159 MI
->getDesc().getOperandConstraint(Idx2
, MCOI::TIED_TO
) == 0) {
166 // Create a new instruction.
167 MachineFunction
&MF
= *MI
->getParent()->getParent();
168 MI
= MF
.CloneMachineInstr(MI
);
172 MI
->getOperand(0).setReg(Reg0
);
173 MI
->getOperand(0).setSubReg(SubReg0
);
175 MI
->getOperand(Idx2
).setReg(Reg1
);
176 MI
->getOperand(Idx1
).setReg(Reg2
);
177 MI
->getOperand(Idx2
).setSubReg(SubReg1
);
178 MI
->getOperand(Idx1
).setSubReg(SubReg2
);
179 MI
->getOperand(Idx2
).setIsKill(Reg1IsKill
);
180 MI
->getOperand(Idx1
).setIsKill(Reg2IsKill
);
181 MI
->getOperand(Idx2
).setIsUndef(Reg1IsUndef
);
182 MI
->getOperand(Idx1
).setIsUndef(Reg2IsUndef
);
183 MI
->getOperand(Idx2
).setIsInternalRead(Reg1IsInternal
);
184 MI
->getOperand(Idx1
).setIsInternalRead(Reg2IsInternal
);
188 MachineInstr
*TargetInstrInfo::commuteInstruction(MachineInstr
*MI
,
191 unsigned OpIdx2
) const {
192 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
193 // any commutable operand, which is done in findCommutedOpIndices() method
195 if ((OpIdx1
== CommuteAnyOperandIndex
|| OpIdx2
== CommuteAnyOperandIndex
) &&
196 !findCommutedOpIndices(MI
, OpIdx1
, OpIdx2
)) {
197 assert(MI
->isCommutable() &&
198 "Precondition violation: MI must be commutable.");
201 return commuteInstructionImpl(MI
, NewMI
, OpIdx1
, OpIdx2
);
204 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1
,
205 unsigned &ResultIdx2
,
206 unsigned CommutableOpIdx1
,
207 unsigned CommutableOpIdx2
) {
208 if (ResultIdx1
== CommuteAnyOperandIndex
&&
209 ResultIdx2
== CommuteAnyOperandIndex
) {
210 ResultIdx1
= CommutableOpIdx1
;
211 ResultIdx2
= CommutableOpIdx2
;
212 } else if (ResultIdx1
== CommuteAnyOperandIndex
) {
213 if (ResultIdx2
== CommutableOpIdx1
)
214 ResultIdx1
= CommutableOpIdx2
;
215 else if (ResultIdx2
== CommutableOpIdx2
)
216 ResultIdx1
= CommutableOpIdx1
;
219 } else if (ResultIdx2
== CommuteAnyOperandIndex
) {
220 if (ResultIdx1
== CommutableOpIdx1
)
221 ResultIdx2
= CommutableOpIdx2
;
222 else if (ResultIdx1
== CommutableOpIdx2
)
223 ResultIdx2
= CommutableOpIdx1
;
227 // Check that the result operand indices match the given commutable
229 return (ResultIdx1
== CommutableOpIdx1
&& ResultIdx2
== CommutableOpIdx2
) ||
230 (ResultIdx1
== CommutableOpIdx2
&& ResultIdx2
== CommutableOpIdx1
);
235 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr
*MI
,
237 unsigned &SrcOpIdx2
) const {
238 assert(!MI
->isBundle() &&
239 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
241 const MCInstrDesc
&MCID
= MI
->getDesc();
242 if (!MCID
.isCommutable())
245 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
246 // is not true, then the target must implement this.
247 unsigned CommutableOpIdx1
= MCID
.getNumDefs();
248 unsigned CommutableOpIdx2
= CommutableOpIdx1
+ 1;
249 if (!fixCommutedOpIndices(SrcOpIdx1
, SrcOpIdx2
,
250 CommutableOpIdx1
, CommutableOpIdx2
))
253 if (!MI
->getOperand(SrcOpIdx1
).isReg() ||
254 !MI
->getOperand(SrcOpIdx2
).isReg())
261 TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr
*MI
) const {
262 if (!MI
->isTerminator()) return false;
264 // Conditional branch is a special case.
265 if (MI
->isBranch() && !MI
->isBarrier())
267 if (!MI
->isPredicable())
269 return !isPredicated(MI
);
272 bool TargetInstrInfo::PredicateInstruction(
273 MachineInstr
*MI
, ArrayRef
<MachineOperand
> Pred
) const {
274 bool MadeChange
= false;
276 assert(!MI
->isBundle() &&
277 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
279 const MCInstrDesc
&MCID
= MI
->getDesc();
280 if (!MI
->isPredicable())
283 for (unsigned j
= 0, i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
284 if (MCID
.OpInfo
[i
].isPredicate()) {
285 MachineOperand
&MO
= MI
->getOperand(i
);
287 MO
.setReg(Pred
[j
].getReg());
289 } else if (MO
.isImm()) {
290 MO
.setImm(Pred
[j
].getImm());
292 } else if (MO
.isMBB()) {
293 MO
.setMBB(Pred
[j
].getMBB());
302 bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr
*MI
,
303 const MachineMemOperand
*&MMO
,
304 int &FrameIndex
) const {
305 for (MachineInstr::mmo_iterator o
= MI
->memoperands_begin(),
306 oe
= MI
->memoperands_end();
309 if ((*o
)->isLoad()) {
310 if (const FixedStackPseudoSourceValue
*Value
=
311 dyn_cast_or_null
<FixedStackPseudoSourceValue
>(
312 (*o
)->getPseudoValue())) {
313 FrameIndex
= Value
->getFrameIndex();
322 bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr
*MI
,
323 const MachineMemOperand
*&MMO
,
324 int &FrameIndex
) const {
325 for (MachineInstr::mmo_iterator o
= MI
->memoperands_begin(),
326 oe
= MI
->memoperands_end();
329 if ((*o
)->isStore()) {
330 if (const FixedStackPseudoSourceValue
*Value
=
331 dyn_cast_or_null
<FixedStackPseudoSourceValue
>(
332 (*o
)->getPseudoValue())) {
333 FrameIndex
= Value
->getFrameIndex();
342 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass
*RC
,
343 unsigned SubIdx
, unsigned &Size
,
345 const MachineFunction
&MF
) const {
347 Size
= RC
->getSize();
351 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
352 unsigned BitSize
= TRI
->getSubRegIdxSize(SubIdx
);
353 // Convert bit size to byte size to be consistent with
354 // MCRegisterClass::getSize().
358 int BitOffset
= TRI
->getSubRegIdxOffset(SubIdx
);
359 if (BitOffset
< 0 || BitOffset
% 8)
363 Offset
= (unsigned)BitOffset
/ 8;
365 assert(RC
->getSize() >= (Offset
+ Size
) && "bad subregister range");
367 if (!MF
.getDataLayout().isLittleEndian()) {
368 Offset
= RC
->getSize() - (Offset
+ Size
);
373 void TargetInstrInfo::reMaterialize(MachineBasicBlock
&MBB
,
374 MachineBasicBlock::iterator I
,
377 const MachineInstr
*Orig
,
378 const TargetRegisterInfo
&TRI
) const {
379 MachineInstr
*MI
= MBB
.getParent()->CloneMachineInstr(Orig
);
380 MI
->substituteRegister(MI
->getOperand(0).getReg(), DestReg
, SubIdx
, TRI
);
385 TargetInstrInfo::produceSameValue(const MachineInstr
*MI0
,
386 const MachineInstr
*MI1
,
387 const MachineRegisterInfo
*MRI
) const {
388 return MI0
->isIdenticalTo(MI1
, MachineInstr::IgnoreVRegDefs
);
391 MachineInstr
*TargetInstrInfo::duplicate(MachineInstr
*Orig
,
392 MachineFunction
&MF
) const {
393 assert(!Orig
->isNotDuplicable() &&
394 "Instruction cannot be duplicated");
395 return MF
.CloneMachineInstr(Orig
);
398 // If the COPY instruction in MI can be folded to a stack operation, return
399 // the register class to use.
400 static const TargetRegisterClass
*canFoldCopy(const MachineInstr
*MI
,
402 assert(MI
->isCopy() && "MI must be a COPY instruction");
403 if (MI
->getNumOperands() != 2)
405 assert(FoldIdx
<2 && "FoldIdx refers no nonexistent operand");
407 const MachineOperand
&FoldOp
= MI
->getOperand(FoldIdx
);
408 const MachineOperand
&LiveOp
= MI
->getOperand(1-FoldIdx
);
410 if (FoldOp
.getSubReg() || LiveOp
.getSubReg())
413 unsigned FoldReg
= FoldOp
.getReg();
414 unsigned LiveReg
= LiveOp
.getReg();
416 assert(TargetRegisterInfo::isVirtualRegister(FoldReg
) &&
417 "Cannot fold physregs");
419 const MachineRegisterInfo
&MRI
= MI
->getParent()->getParent()->getRegInfo();
420 const TargetRegisterClass
*RC
= MRI
.getRegClass(FoldReg
);
422 if (TargetRegisterInfo::isPhysicalRegister(LiveOp
.getReg()))
423 return RC
->contains(LiveOp
.getReg()) ? RC
: nullptr;
425 if (RC
->hasSubClassEq(MRI
.getRegClass(LiveReg
)))
428 // FIXME: Allow folding when register classes are memory compatible.
432 void TargetInstrInfo::getNoopForMachoTarget(MCInst
&NopInst
) const {
433 llvm_unreachable("Not a MachO target");
436 static MachineInstr
*foldPatchpoint(MachineFunction
&MF
, MachineInstr
*MI
,
437 ArrayRef
<unsigned> Ops
, int FrameIndex
,
438 const TargetInstrInfo
&TII
) {
439 unsigned StartIdx
= 0;
440 switch (MI
->getOpcode()) {
441 case TargetOpcode::STACKMAP
:
442 StartIdx
= 2; // Skip ID, nShadowBytes.
444 case TargetOpcode::PATCHPOINT
: {
445 // For PatchPoint, the call args are not foldable.
446 PatchPointOpers
opers(MI
);
447 StartIdx
= opers
.getVarIdx();
451 llvm_unreachable("unexpected stackmap opcode");
454 // Return false if any operands requested for folding are not foldable (not
455 // part of the stackmap's live values).
456 for (unsigned Op
: Ops
) {
461 MachineInstr
*NewMI
=
462 MF
.CreateMachineInstr(TII
.get(MI
->getOpcode()), MI
->getDebugLoc(), true);
463 MachineInstrBuilder
MIB(MF
, NewMI
);
465 // No need to fold return, the meta data, and function arguments
466 for (unsigned i
= 0; i
< StartIdx
; ++i
)
467 MIB
.addOperand(MI
->getOperand(i
));
469 for (unsigned i
= StartIdx
; i
< MI
->getNumOperands(); ++i
) {
470 MachineOperand
&MO
= MI
->getOperand(i
);
471 if (std::find(Ops
.begin(), Ops
.end(), i
) != Ops
.end()) {
473 unsigned SpillOffset
;
474 // Compute the spill slot size and offset.
475 const TargetRegisterClass
*RC
=
476 MF
.getRegInfo().getRegClass(MO
.getReg());
478 TII
.getStackSlotRange(RC
, MO
.getSubReg(), SpillSize
, SpillOffset
, MF
);
480 report_fatal_error("cannot spill patchpoint subregister operand");
481 MIB
.addImm(StackMaps::IndirectMemRefOp
);
482 MIB
.addImm(SpillSize
);
483 MIB
.addFrameIndex(FrameIndex
);
484 MIB
.addImm(SpillOffset
);
492 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
493 /// slot into the specified machine instruction for the specified operand(s).
494 /// If this is possible, a new instruction is returned with the specified
495 /// operand folded, otherwise NULL is returned. The client is responsible for
496 /// removing the old instruction and adding the new one in the instruction
498 MachineInstr
*TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI
,
499 ArrayRef
<unsigned> Ops
,
502 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
503 if (MI
->getOperand(Ops
[i
]).isDef())
504 Flags
|= MachineMemOperand::MOStore
;
506 Flags
|= MachineMemOperand::MOLoad
;
508 MachineBasicBlock
*MBB
= MI
->getParent();
509 assert(MBB
&& "foldMemoryOperand needs an inserted instruction");
510 MachineFunction
&MF
= *MBB
->getParent();
512 MachineInstr
*NewMI
= nullptr;
514 if (MI
->getOpcode() == TargetOpcode::STACKMAP
||
515 MI
->getOpcode() == TargetOpcode::PATCHPOINT
) {
516 // Fold stackmap/patchpoint.
517 NewMI
= foldPatchpoint(MF
, MI
, Ops
, FI
, *this);
519 MBB
->insert(MI
, NewMI
);
521 // Ask the target to do the actual folding.
522 NewMI
= foldMemoryOperandImpl(MF
, MI
, Ops
, MI
, FI
);
526 NewMI
->setMemRefs(MI
->memoperands_begin(), MI
->memoperands_end());
527 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
528 assert((!(Flags
& MachineMemOperand::MOStore
) ||
529 NewMI
->mayStore()) &&
530 "Folded a def to a non-store!");
531 assert((!(Flags
& MachineMemOperand::MOLoad
) ||
533 "Folded a use to a non-load!");
534 const MachineFrameInfo
&MFI
= *MF
.getFrameInfo();
535 assert(MFI
.getObjectOffset(FI
) != -1);
536 MachineMemOperand
*MMO
= MF
.getMachineMemOperand(
537 MachinePointerInfo::getFixedStack(MF
, FI
), Flags
, MFI
.getObjectSize(FI
),
538 MFI
.getObjectAlignment(FI
));
539 NewMI
->addMemOperand(MF
, MMO
);
544 // Straight COPY may fold as load/store.
545 if (!MI
->isCopy() || Ops
.size() != 1)
548 const TargetRegisterClass
*RC
= canFoldCopy(MI
, Ops
[0]);
552 const MachineOperand
&MO
= MI
->getOperand(1-Ops
[0]);
553 MachineBasicBlock::iterator Pos
= MI
;
554 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
556 if (Flags
== MachineMemOperand::MOStore
)
557 storeRegToStackSlot(*MBB
, Pos
, MO
.getReg(), MO
.isKill(), FI
, RC
, TRI
);
559 loadRegFromStackSlot(*MBB
, Pos
, MO
.getReg(), FI
, RC
, TRI
);
563 bool TargetInstrInfo::hasReassociableOperands(
564 const MachineInstr
&Inst
, const MachineBasicBlock
*MBB
) const {
565 const MachineOperand
&Op1
= Inst
.getOperand(1);
566 const MachineOperand
&Op2
= Inst
.getOperand(2);
567 const MachineRegisterInfo
&MRI
= MBB
->getParent()->getRegInfo();
569 // We need virtual register definitions for the operands that we will
571 MachineInstr
*MI1
= nullptr;
572 MachineInstr
*MI2
= nullptr;
573 if (Op1
.isReg() && TargetRegisterInfo::isVirtualRegister(Op1
.getReg()))
574 MI1
= MRI
.getUniqueVRegDef(Op1
.getReg());
575 if (Op2
.isReg() && TargetRegisterInfo::isVirtualRegister(Op2
.getReg()))
576 MI2
= MRI
.getUniqueVRegDef(Op2
.getReg());
578 // And they need to be in the trace (otherwise, they won't have a depth).
579 return MI1
&& MI2
&& MI1
->getParent() == MBB
&& MI2
->getParent() == MBB
;
582 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr
&Inst
,
583 bool &Commuted
) const {
584 const MachineBasicBlock
*MBB
= Inst
.getParent();
585 const MachineRegisterInfo
&MRI
= MBB
->getParent()->getRegInfo();
586 MachineInstr
*MI1
= MRI
.getUniqueVRegDef(Inst
.getOperand(1).getReg());
587 MachineInstr
*MI2
= MRI
.getUniqueVRegDef(Inst
.getOperand(2).getReg());
588 unsigned AssocOpcode
= Inst
.getOpcode();
590 // If only one operand has the same opcode and it's the second source operand,
591 // the operands must be commuted.
592 Commuted
= MI1
->getOpcode() != AssocOpcode
&& MI2
->getOpcode() == AssocOpcode
;
596 // 1. The previous instruction must be the same type as Inst.
597 // 2. The previous instruction must have virtual register definitions for its
598 // operands in the same basic block as Inst.
599 // 3. The previous instruction's result must only be used by Inst.
600 return MI1
->getOpcode() == AssocOpcode
&&
601 hasReassociableOperands(*MI1
, MBB
) &&
602 MRI
.hasOneNonDBGUse(MI1
->getOperand(0).getReg());
605 // 1. The operation must be associative and commutative.
606 // 2. The instruction must have virtual register definitions for its
607 // operands in the same basic block.
608 // 3. The instruction must have a reassociable sibling.
609 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr
&Inst
,
610 bool &Commuted
) const {
611 return isAssociativeAndCommutative(Inst
) &&
612 hasReassociableOperands(Inst
, Inst
.getParent()) &&
613 hasReassociableSibling(Inst
, Commuted
);
616 // The concept of the reassociation pass is that these operations can benefit
617 // from this kind of transformation:
627 // breaking the dependency between A and B, allowing them to be executed in
628 // parallel (or back-to-back in a pipeline) instead of depending on each other.
630 // FIXME: This has the potential to be expensive (compile time) while not
631 // improving the code at all. Some ways to limit the overhead:
632 // 1. Track successful transforms; bail out if hit rate gets too low.
633 // 2. Only enable at -O3 or some other non-default optimization level.
634 // 3. Pre-screen pattern candidates here: if an operand of the previous
635 // instruction is known to not increase the critical path, then don't match
637 bool TargetInstrInfo::getMachineCombinerPatterns(
639 SmallVectorImpl
<MachineCombinerPattern
> &Patterns
) const {
642 if (isReassociationCandidate(Root
, Commute
)) {
643 // We found a sequence of instructions that may be suitable for a
644 // reassociation of operands to increase ILP. Specify each commutation
645 // possibility for the Prev instruction in the sequence and let the
646 // machine combiner decide if changing the operands is worthwhile.
648 Patterns
.push_back(MachineCombinerPattern::REASSOC_AX_YB
);
649 Patterns
.push_back(MachineCombinerPattern::REASSOC_XA_YB
);
651 Patterns
.push_back(MachineCombinerPattern::REASSOC_AX_BY
);
652 Patterns
.push_back(MachineCombinerPattern::REASSOC_XA_BY
);
660 /// Attempt the reassociation transformation to reduce critical path length.
661 /// See the above comments before getMachineCombinerPatterns().
662 void TargetInstrInfo::reassociateOps(
663 MachineInstr
&Root
, MachineInstr
&Prev
,
664 MachineCombinerPattern Pattern
,
665 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
666 SmallVectorImpl
<MachineInstr
*> &DelInstrs
,
667 DenseMap
<unsigned, unsigned> &InstrIdxForVirtReg
) const {
668 MachineFunction
*MF
= Root
.getParent()->getParent();
669 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
670 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
671 const TargetRegisterInfo
*TRI
= MF
->getSubtarget().getRegisterInfo();
672 const TargetRegisterClass
*RC
= Root
.getRegClassConstraint(0, TII
, TRI
);
674 // This array encodes the operand index for each parameter because the
675 // operands may be commuted. Each row corresponds to a pattern value,
676 // and each column specifies the index of A, B, X, Y.
677 unsigned OpIdx
[4][4] = {
686 case MachineCombinerPattern::REASSOC_AX_BY
: Row
= 0; break;
687 case MachineCombinerPattern::REASSOC_AX_YB
: Row
= 1; break;
688 case MachineCombinerPattern::REASSOC_XA_BY
: Row
= 2; break;
689 case MachineCombinerPattern::REASSOC_XA_YB
: Row
= 3; break;
690 default: llvm_unreachable("unexpected MachineCombinerPattern");
693 MachineOperand
&OpA
= Prev
.getOperand(OpIdx
[Row
][0]);
694 MachineOperand
&OpB
= Root
.getOperand(OpIdx
[Row
][1]);
695 MachineOperand
&OpX
= Prev
.getOperand(OpIdx
[Row
][2]);
696 MachineOperand
&OpY
= Root
.getOperand(OpIdx
[Row
][3]);
697 MachineOperand
&OpC
= Root
.getOperand(0);
699 unsigned RegA
= OpA
.getReg();
700 unsigned RegB
= OpB
.getReg();
701 unsigned RegX
= OpX
.getReg();
702 unsigned RegY
= OpY
.getReg();
703 unsigned RegC
= OpC
.getReg();
705 if (TargetRegisterInfo::isVirtualRegister(RegA
))
706 MRI
.constrainRegClass(RegA
, RC
);
707 if (TargetRegisterInfo::isVirtualRegister(RegB
))
708 MRI
.constrainRegClass(RegB
, RC
);
709 if (TargetRegisterInfo::isVirtualRegister(RegX
))
710 MRI
.constrainRegClass(RegX
, RC
);
711 if (TargetRegisterInfo::isVirtualRegister(RegY
))
712 MRI
.constrainRegClass(RegY
, RC
);
713 if (TargetRegisterInfo::isVirtualRegister(RegC
))
714 MRI
.constrainRegClass(RegC
, RC
);
716 // Create a new virtual register for the result of (X op Y) instead of
717 // recycling RegB because the MachineCombiner's computation of the critical
718 // path requires a new register definition rather than an existing one.
719 unsigned NewVR
= MRI
.createVirtualRegister(RC
);
720 InstrIdxForVirtReg
.insert(std::make_pair(NewVR
, 0));
722 unsigned Opcode
= Root
.getOpcode();
723 bool KillA
= OpA
.isKill();
724 bool KillX
= OpX
.isKill();
725 bool KillY
= OpY
.isKill();
727 // Create new instructions for insertion.
728 MachineInstrBuilder MIB1
=
729 BuildMI(*MF
, Prev
.getDebugLoc(), TII
->get(Opcode
), NewVR
)
730 .addReg(RegX
, getKillRegState(KillX
))
731 .addReg(RegY
, getKillRegState(KillY
));
732 MachineInstrBuilder MIB2
=
733 BuildMI(*MF
, Root
.getDebugLoc(), TII
->get(Opcode
), RegC
)
734 .addReg(RegA
, getKillRegState(KillA
))
735 .addReg(NewVR
, getKillRegState(true));
737 setSpecialOperandAttr(Root
, Prev
, *MIB1
, *MIB2
);
739 // Record new instructions for insertion and old instructions for deletion.
740 InsInstrs
.push_back(MIB1
);
741 InsInstrs
.push_back(MIB2
);
742 DelInstrs
.push_back(&Prev
);
743 DelInstrs
.push_back(&Root
);
746 void TargetInstrInfo::genAlternativeCodeSequence(
747 MachineInstr
&Root
, MachineCombinerPattern Pattern
,
748 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
749 SmallVectorImpl
<MachineInstr
*> &DelInstrs
,
750 DenseMap
<unsigned, unsigned> &InstIdxForVirtReg
) const {
751 MachineRegisterInfo
&MRI
= Root
.getParent()->getParent()->getRegInfo();
753 // Select the previous instruction in the sequence based on the input pattern.
754 MachineInstr
*Prev
= nullptr;
756 case MachineCombinerPattern::REASSOC_AX_BY
:
757 case MachineCombinerPattern::REASSOC_XA_BY
:
758 Prev
= MRI
.getUniqueVRegDef(Root
.getOperand(1).getReg());
760 case MachineCombinerPattern::REASSOC_AX_YB
:
761 case MachineCombinerPattern::REASSOC_XA_YB
:
762 Prev
= MRI
.getUniqueVRegDef(Root
.getOperand(2).getReg());
768 assert(Prev
&& "Unknown pattern for machine combiner");
770 reassociateOps(Root
, *Prev
, Pattern
, InsInstrs
, DelInstrs
, InstIdxForVirtReg
);
774 /// foldMemoryOperand - Same as the previous version except it allows folding
775 /// of any load and store from / to any address, not just from a specific
777 MachineInstr
*TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI
,
778 ArrayRef
<unsigned> Ops
,
779 MachineInstr
*LoadMI
) const {
780 assert(LoadMI
->canFoldAsLoad() && "LoadMI isn't foldable!");
782 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
783 assert(MI
->getOperand(Ops
[i
]).isUse() && "Folding load into def!");
785 MachineBasicBlock
&MBB
= *MI
->getParent();
786 MachineFunction
&MF
= *MBB
.getParent();
788 // Ask the target to do the actual folding.
789 MachineInstr
*NewMI
= nullptr;
792 if ((MI
->getOpcode() == TargetOpcode::STACKMAP
||
793 MI
->getOpcode() == TargetOpcode::PATCHPOINT
) &&
794 isLoadFromStackSlot(LoadMI
, FrameIndex
)) {
795 // Fold stackmap/patchpoint.
796 NewMI
= foldPatchpoint(MF
, MI
, Ops
, FrameIndex
, *this);
798 NewMI
= MBB
.insert(MI
, NewMI
);
800 // Ask the target to do the actual folding.
801 NewMI
= foldMemoryOperandImpl(MF
, MI
, Ops
, MI
, LoadMI
);
804 if (!NewMI
) return nullptr;
806 // Copy the memoperands from the load to the folded instruction.
807 if (MI
->memoperands_empty()) {
808 NewMI
->setMemRefs(LoadMI
->memoperands_begin(),
809 LoadMI
->memoperands_end());
812 // Handle the rare case of folding multiple loads.
813 NewMI
->setMemRefs(MI
->memoperands_begin(),
814 MI
->memoperands_end());
815 for (MachineInstr::mmo_iterator I
= LoadMI
->memoperands_begin(),
816 E
= LoadMI
->memoperands_end(); I
!= E
; ++I
) {
817 NewMI
->addMemOperand(MF
, *I
);
823 bool TargetInstrInfo::
824 isReallyTriviallyReMaterializableGeneric(const MachineInstr
*MI
,
825 AliasAnalysis
*AA
) const {
826 const MachineFunction
&MF
= *MI
->getParent()->getParent();
827 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
829 // Remat clients assume operand 0 is the defined register.
830 if (!MI
->getNumOperands() || !MI
->getOperand(0).isReg())
832 unsigned DefReg
= MI
->getOperand(0).getReg();
834 // A sub-register definition can only be rematerialized if the instruction
835 // doesn't read the other parts of the register. Otherwise it is really a
836 // read-modify-write operation on the full virtual register which cannot be
838 if (TargetRegisterInfo::isVirtualRegister(DefReg
) &&
839 MI
->getOperand(0).getSubReg() && MI
->readsVirtualRegister(DefReg
))
842 // A load from a fixed stack slot can be rematerialized. This may be
843 // redundant with subsequent checks, but it's target-independent,
844 // simple, and a common case.
846 if (isLoadFromStackSlot(MI
, FrameIdx
) &&
847 MF
.getFrameInfo()->isImmutableObjectIndex(FrameIdx
))
850 // Avoid instructions obviously unsafe for remat.
851 if (MI
->isNotDuplicable() || MI
->mayStore() ||
852 MI
->hasUnmodeledSideEffects())
855 // Don't remat inline asm. We have no idea how expensive it is
856 // even if it's side effect free.
857 if (MI
->isInlineAsm())
860 // Avoid instructions which load from potentially varying memory.
861 if (MI
->mayLoad() && !MI
->isInvariantLoad(AA
))
864 // If any of the registers accessed are non-constant, conservatively assume
865 // the instruction is not rematerializable.
866 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
867 const MachineOperand
&MO
= MI
->getOperand(i
);
868 if (!MO
.isReg()) continue;
869 unsigned Reg
= MO
.getReg();
873 // Check for a well-behaved physical register.
874 if (TargetRegisterInfo::isPhysicalRegister(Reg
)) {
876 // If the physreg has no defs anywhere, it's just an ambient register
877 // and we can freely move its uses. Alternatively, if it's allocatable,
878 // it could get allocated to something with a def during allocation.
879 if (!MRI
.isConstantPhysReg(Reg
, MF
))
882 // A physreg def. We can't remat it.
888 // Only allow one virtual-register def. There may be multiple defs of the
889 // same virtual register, though.
890 if (MO
.isDef() && Reg
!= DefReg
)
893 // Don't allow any virtual-register uses. Rematting an instruction with
894 // virtual register uses would length the live ranges of the uses, which
895 // is not necessarily a good idea, certainly not "trivial".
900 // Everything checked out.
904 int TargetInstrInfo::getSPAdjust(const MachineInstr
*MI
) const {
905 const MachineFunction
*MF
= MI
->getParent()->getParent();
906 const TargetFrameLowering
*TFI
= MF
->getSubtarget().getFrameLowering();
907 bool StackGrowsDown
=
908 TFI
->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown
;
910 unsigned FrameSetupOpcode
= getCallFrameSetupOpcode();
911 unsigned FrameDestroyOpcode
= getCallFrameDestroyOpcode();
913 if (MI
->getOpcode() != FrameSetupOpcode
&&
914 MI
->getOpcode() != FrameDestroyOpcode
)
917 int SPAdj
= MI
->getOperand(0).getImm();
918 SPAdj
= TFI
->alignSPAdjust(SPAdj
);
920 if ((!StackGrowsDown
&& MI
->getOpcode() == FrameSetupOpcode
) ||
921 (StackGrowsDown
&& MI
->getOpcode() == FrameDestroyOpcode
))
927 /// isSchedulingBoundary - Test if the given instruction should be
928 /// considered a scheduling boundary. This primarily includes labels
930 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr
*MI
,
931 const MachineBasicBlock
*MBB
,
932 const MachineFunction
&MF
) const {
933 // Terminators and labels can't be scheduled around.
934 if (MI
->isTerminator() || MI
->isPosition())
937 // Don't attempt to schedule around any instruction that defines
938 // a stack-oriented pointer, as it's unlikely to be profitable. This
939 // saves compile time, because it doesn't require every single
940 // stack slot reference to depend on the instruction that does the
942 const TargetLowering
&TLI
= *MF
.getSubtarget().getTargetLowering();
943 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
944 return MI
->modifiesRegister(TLI
.getStackPointerRegisterToSaveRestore(), TRI
);
947 // Provide a global flag for disabling the PreRA hazard recognizer that targets
948 // may choose to honor.
949 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
950 return !DisableHazardRecognizer
;
953 // Default implementation of CreateTargetRAHazardRecognizer.
954 ScheduleHazardRecognizer
*TargetInstrInfo::
955 CreateTargetHazardRecognizer(const TargetSubtargetInfo
*STI
,
956 const ScheduleDAG
*DAG
) const {
957 // Dummy hazard recognizer allows all instructions to issue.
958 return new ScheduleHazardRecognizer();
961 // Default implementation of CreateTargetMIHazardRecognizer.
962 ScheduleHazardRecognizer
*TargetInstrInfo::
963 CreateTargetMIHazardRecognizer(const InstrItineraryData
*II
,
964 const ScheduleDAG
*DAG
) const {
965 return (ScheduleHazardRecognizer
*)
966 new ScoreboardHazardRecognizer(II
, DAG
, "misched");
969 // Default implementation of CreateTargetPostRAHazardRecognizer.
970 ScheduleHazardRecognizer
*TargetInstrInfo::
971 CreateTargetPostRAHazardRecognizer(const InstrItineraryData
*II
,
972 const ScheduleDAG
*DAG
) const {
973 return (ScheduleHazardRecognizer
*)
974 new ScoreboardHazardRecognizer(II
, DAG
, "post-RA-sched");
977 //===----------------------------------------------------------------------===//
978 // SelectionDAG latency interface.
979 //===----------------------------------------------------------------------===//
982 TargetInstrInfo::getOperandLatency(const InstrItineraryData
*ItinData
,
983 SDNode
*DefNode
, unsigned DefIdx
,
984 SDNode
*UseNode
, unsigned UseIdx
) const {
985 if (!ItinData
|| ItinData
->isEmpty())
988 if (!DefNode
->isMachineOpcode())
991 unsigned DefClass
= get(DefNode
->getMachineOpcode()).getSchedClass();
992 if (!UseNode
->isMachineOpcode())
993 return ItinData
->getOperandCycle(DefClass
, DefIdx
);
994 unsigned UseClass
= get(UseNode
->getMachineOpcode()).getSchedClass();
995 return ItinData
->getOperandLatency(DefClass
, DefIdx
, UseClass
, UseIdx
);
998 int TargetInstrInfo::getInstrLatency(const InstrItineraryData
*ItinData
,
1000 if (!ItinData
|| ItinData
->isEmpty())
1003 if (!N
->isMachineOpcode())
1006 return ItinData
->getStageLatency(get(N
->getMachineOpcode()).getSchedClass());
1009 //===----------------------------------------------------------------------===//
1010 // MachineInstr latency interface.
1011 //===----------------------------------------------------------------------===//
1014 TargetInstrInfo::getNumMicroOps(const InstrItineraryData
*ItinData
,
1015 const MachineInstr
*MI
) const {
1016 if (!ItinData
|| ItinData
->isEmpty())
1019 unsigned Class
= MI
->getDesc().getSchedClass();
1020 int UOps
= ItinData
->Itineraries
[Class
].NumMicroOps
;
1024 // The # of u-ops is dynamically determined. The specific target should
1025 // override this function to return the right number.
1029 /// Return the default expected latency for a def based on it's opcode.
1030 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel
&SchedModel
,
1031 const MachineInstr
*DefMI
) const {
1032 if (DefMI
->isTransient())
1034 if (DefMI
->mayLoad())
1035 return SchedModel
.LoadLatency
;
1036 if (isHighLatencyDef(DefMI
->getOpcode()))
1037 return SchedModel
.HighLatency
;
1041 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr
*) const {
1045 unsigned TargetInstrInfo::
1046 getInstrLatency(const InstrItineraryData
*ItinData
,
1047 const MachineInstr
*MI
,
1048 unsigned *PredCost
) const {
1049 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1050 // still have a MinLatency property, which getStageLatency checks.
1052 return MI
->mayLoad() ? 2 : 1;
1054 return ItinData
->getStageLatency(MI
->getDesc().getSchedClass());
1057 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel
&SchedModel
,
1058 const MachineInstr
*DefMI
,
1059 unsigned DefIdx
) const {
1060 const InstrItineraryData
*ItinData
= SchedModel
.getInstrItineraries();
1061 if (!ItinData
|| ItinData
->isEmpty())
1064 unsigned DefClass
= DefMI
->getDesc().getSchedClass();
1065 int DefCycle
= ItinData
->getOperandCycle(DefClass
, DefIdx
);
1066 return (DefCycle
!= -1 && DefCycle
<= 1);
1069 /// Both DefMI and UseMI must be valid. By default, call directly to the
1070 /// itinerary. This may be overriden by the target.
1071 int TargetInstrInfo::
1072 getOperandLatency(const InstrItineraryData
*ItinData
,
1073 const MachineInstr
*DefMI
, unsigned DefIdx
,
1074 const MachineInstr
*UseMI
, unsigned UseIdx
) const {
1075 unsigned DefClass
= DefMI
->getDesc().getSchedClass();
1076 unsigned UseClass
= UseMI
->getDesc().getSchedClass();
1077 return ItinData
->getOperandLatency(DefClass
, DefIdx
, UseClass
, UseIdx
);
1080 /// If we can determine the operand latency from the def only, without itinerary
1081 /// lookup, do so. Otherwise return -1.
1082 int TargetInstrInfo::computeDefOperandLatency(
1083 const InstrItineraryData
*ItinData
,
1084 const MachineInstr
*DefMI
) const {
1086 // Let the target hook getInstrLatency handle missing itineraries.
1088 return getInstrLatency(ItinData
, DefMI
);
1090 if(ItinData
->isEmpty())
1091 return defaultDefLatency(ItinData
->SchedModel
, DefMI
);
1093 // ...operand lookup required
1097 /// computeOperandLatency - Compute and return the latency of the given data
1098 /// dependent def and use when the operand indices are already known. UseMI may
1099 /// be NULL for an unknown use.
1101 /// FindMin may be set to get the minimum vs. expected latency. Minimum
1102 /// latency is used for scheduling groups, while expected latency is for
1103 /// instruction cost and critical path.
1105 /// Depending on the subtarget's itinerary properties, this may or may not need
1106 /// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
1107 /// UseIdx to compute min latency.
1108 unsigned TargetInstrInfo::
1109 computeOperandLatency(const InstrItineraryData
*ItinData
,
1110 const MachineInstr
*DefMI
, unsigned DefIdx
,
1111 const MachineInstr
*UseMI
, unsigned UseIdx
) const {
1113 int DefLatency
= computeDefOperandLatency(ItinData
, DefMI
);
1114 if (DefLatency
>= 0)
1117 assert(ItinData
&& !ItinData
->isEmpty() && "computeDefOperandLatency fail");
1119 int OperLatency
= 0;
1121 OperLatency
= getOperandLatency(ItinData
, DefMI
, DefIdx
, UseMI
, UseIdx
);
1123 unsigned DefClass
= DefMI
->getDesc().getSchedClass();
1124 OperLatency
= ItinData
->getOperandCycle(DefClass
, DefIdx
);
1126 if (OperLatency
>= 0)
1129 // No operand latency was found.
1130 unsigned InstrLatency
= getInstrLatency(ItinData
, DefMI
);
1132 // Expected latency is the max of the stage latency and itinerary props.
1133 InstrLatency
= std::max(InstrLatency
,
1134 defaultDefLatency(ItinData
->SchedModel
, DefMI
));
1135 return InstrLatency
;
1138 bool TargetInstrInfo::getRegSequenceInputs(
1139 const MachineInstr
&MI
, unsigned DefIdx
,
1140 SmallVectorImpl
<RegSubRegPairAndIdx
> &InputRegs
) const {
1141 assert((MI
.isRegSequence() ||
1142 MI
.isRegSequenceLike()) && "Instruction do not have the proper type");
1144 if (!MI
.isRegSequence())
1145 return getRegSequenceLikeInputs(MI
, DefIdx
, InputRegs
);
1147 // We are looking at:
1148 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1149 assert(DefIdx
== 0 && "REG_SEQUENCE only has one def");
1150 for (unsigned OpIdx
= 1, EndOpIdx
= MI
.getNumOperands(); OpIdx
!= EndOpIdx
;
1152 const MachineOperand
&MOReg
= MI
.getOperand(OpIdx
);
1153 const MachineOperand
&MOSubIdx
= MI
.getOperand(OpIdx
+ 1);
1154 assert(MOSubIdx
.isImm() &&
1155 "One of the subindex of the reg_sequence is not an immediate");
1156 // Record Reg:SubReg, SubIdx.
1157 InputRegs
.push_back(RegSubRegPairAndIdx(MOReg
.getReg(), MOReg
.getSubReg(),
1158 (unsigned)MOSubIdx
.getImm()));
1163 bool TargetInstrInfo::getExtractSubregInputs(
1164 const MachineInstr
&MI
, unsigned DefIdx
,
1165 RegSubRegPairAndIdx
&InputReg
) const {
1166 assert((MI
.isExtractSubreg() ||
1167 MI
.isExtractSubregLike()) && "Instruction do not have the proper type");
1169 if (!MI
.isExtractSubreg())
1170 return getExtractSubregLikeInputs(MI
, DefIdx
, InputReg
);
1172 // We are looking at:
1173 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1174 assert(DefIdx
== 0 && "EXTRACT_SUBREG only has one def");
1175 const MachineOperand
&MOReg
= MI
.getOperand(1);
1176 const MachineOperand
&MOSubIdx
= MI
.getOperand(2);
1177 assert(MOSubIdx
.isImm() &&
1178 "The subindex of the extract_subreg is not an immediate");
1180 InputReg
.Reg
= MOReg
.getReg();
1181 InputReg
.SubReg
= MOReg
.getSubReg();
1182 InputReg
.SubIdx
= (unsigned)MOSubIdx
.getImm();
1186 bool TargetInstrInfo::getInsertSubregInputs(
1187 const MachineInstr
&MI
, unsigned DefIdx
,
1188 RegSubRegPair
&BaseReg
, RegSubRegPairAndIdx
&InsertedReg
) const {
1189 assert((MI
.isInsertSubreg() ||
1190 MI
.isInsertSubregLike()) && "Instruction do not have the proper type");
1192 if (!MI
.isInsertSubreg())
1193 return getInsertSubregLikeInputs(MI
, DefIdx
, BaseReg
, InsertedReg
);
1195 // We are looking at:
1196 // Def = INSERT_SEQUENCE v0, v1, sub0.
1197 assert(DefIdx
== 0 && "INSERT_SUBREG only has one def");
1198 const MachineOperand
&MOBaseReg
= MI
.getOperand(1);
1199 const MachineOperand
&MOInsertedReg
= MI
.getOperand(2);
1200 const MachineOperand
&MOSubIdx
= MI
.getOperand(3);
1201 assert(MOSubIdx
.isImm() &&
1202 "One of the subindex of the reg_sequence is not an immediate");
1203 BaseReg
.Reg
= MOBaseReg
.getReg();
1204 BaseReg
.SubReg
= MOBaseReg
.getSubReg();
1206 InsertedReg
.Reg
= MOInsertedReg
.getReg();
1207 InsertedReg
.SubReg
= MOInsertedReg
.getSubReg();
1208 InsertedReg
.SubIdx
= (unsigned)MOSubIdx
.getImm();