1 //===-- LegalizeDAG.cpp - Implement SelectionDAG::Legalize ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the SelectionDAG::Legalize method.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/SelectionDAG.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineJumpTableInfo.h"
18 #include "llvm/CodeGen/MachineModuleInfo.h"
19 #include "llvm/Analysis/DebugInfo.h"
20 #include "llvm/CodeGen/PseudoSourceValue.h"
21 #include "llvm/Target/TargetFrameInfo.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetData.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetOptions.h"
26 #include "llvm/CallingConv.h"
27 #include "llvm/Constants.h"
28 #include "llvm/DerivedTypes.h"
29 #include "llvm/Function.h"
30 #include "llvm/GlobalVariable.h"
31 #include "llvm/LLVMContext.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/MathExtras.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/ADT/DenseMap.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/ADT/SmallPtrSet.h"
42 //===----------------------------------------------------------------------===//
43 /// SelectionDAGLegalize - This takes an arbitrary SelectionDAG as input and
44 /// hacks on it until the target machine can handle it. This involves
45 /// eliminating value sizes the machine cannot handle (promoting small sizes to
46 /// large sizes or splitting up large values into small values) as well as
47 /// eliminating operations the machine cannot handle.
49 /// This code also does a small amount of optimization and recognition of idioms
50 /// as part of its processing. For example, if a target does not support a
51 /// 'setcc' instruction efficiently, but does support 'brcc' instruction, this
52 /// will attempt merge setcc and brc instructions into brcc's.
55 class SelectionDAGLegalize
{
56 const TargetMachine
&TM
;
57 const TargetLowering
&TLI
;
59 CodeGenOpt::Level OptLevel
;
61 // Libcall insertion helpers.
63 /// LastCALLSEQ_END - This keeps track of the CALLSEQ_END node that has been
64 /// legalized. We use this to ensure that calls are properly serialized
65 /// against each other, including inserted libcalls.
66 SDValue LastCALLSEQ_END
;
68 /// IsLegalizingCall - This member is used *only* for purposes of providing
69 /// helpful assertions that a libcall isn't created while another call is
70 /// being legalized (which could lead to non-serialized call sequences).
71 bool IsLegalizingCall
;
74 Legal
, // The target natively supports this operation.
75 Promote
, // This operation should be executed in a larger type.
76 Expand
// Try to expand this to other ops, otherwise use a libcall.
79 /// ValueTypeActions - This is a bitvector that contains two bits for each
80 /// value type, where the two bits correspond to the LegalizeAction enum.
81 /// This can be queried with "getTypeAction(VT)".
82 TargetLowering::ValueTypeActionImpl ValueTypeActions
;
84 /// LegalizedNodes - For nodes that are of legal width, and that have more
85 /// than one use, this map indicates what regularized operand to use. This
86 /// allows us to avoid legalizing the same thing more than once.
87 DenseMap
<SDValue
, SDValue
> LegalizedNodes
;
89 void AddLegalizedOperand(SDValue From
, SDValue To
) {
90 LegalizedNodes
.insert(std::make_pair(From
, To
));
91 // If someone requests legalization of the new node, return itself.
93 LegalizedNodes
.insert(std::make_pair(To
, To
));
97 SelectionDAGLegalize(SelectionDAG
&DAG
, CodeGenOpt::Level ol
);
99 /// getTypeAction - Return how we should legalize values of this type, either
100 /// it is already legal or we need to expand it into multiple registers of
101 /// smaller integer type, or we need to promote it to a larger type.
102 LegalizeAction
getTypeAction(EVT VT
) const {
104 (LegalizeAction
)ValueTypeActions
.getTypeAction(*DAG
.getContext(), VT
);
107 /// isTypeLegal - Return true if this type is legal on this target.
109 bool isTypeLegal(EVT VT
) const {
110 return getTypeAction(VT
) == Legal
;
116 /// LegalizeOp - We know that the specified value has a legal type.
117 /// Recursively ensure that the operands have legal types, then return the
119 SDValue
LegalizeOp(SDValue O
);
121 SDValue
OptimizeFloatStore(StoreSDNode
*ST
);
123 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable
124 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
125 /// is necessary to spill the vector being inserted into to memory, perform
126 /// the insert there, and then read the result back.
127 SDValue
PerformInsertVectorEltInMemory(SDValue Vec
, SDValue Val
,
128 SDValue Idx
, DebugLoc dl
);
129 SDValue
ExpandINSERT_VECTOR_ELT(SDValue Vec
, SDValue Val
,
130 SDValue Idx
, DebugLoc dl
);
132 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which
133 /// performs the same shuffe in terms of order or result bytes, but on a type
134 /// whose vector element type is narrower than the original shuffle type.
135 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
136 SDValue
ShuffleWithNarrowerEltType(EVT NVT
, EVT VT
, DebugLoc dl
,
137 SDValue N1
, SDValue N2
,
138 SmallVectorImpl
<int> &Mask
) const;
140 bool LegalizeAllNodesNotLeadingTo(SDNode
*N
, SDNode
*Dest
,
141 SmallPtrSet
<SDNode
*, 32> &NodesLeadingTo
);
143 void LegalizeSetCCCondCode(EVT VT
, SDValue
&LHS
, SDValue
&RHS
, SDValue
&CC
,
146 SDValue
ExpandLibCall(RTLIB::Libcall LC
, SDNode
*Node
, bool isSigned
);
147 std::pair
<SDValue
, SDValue
> ExpandChainLibCall(RTLIB::Libcall LC
,
148 SDNode
*Node
, bool isSigned
);
149 SDValue
ExpandFPLibCall(SDNode
*Node
, RTLIB::Libcall Call_F32
,
150 RTLIB::Libcall Call_F64
, RTLIB::Libcall Call_F80
,
151 RTLIB::Libcall Call_PPCF128
);
152 SDValue
ExpandIntLibCall(SDNode
*Node
, bool isSigned
,
153 RTLIB::Libcall Call_I8
,
154 RTLIB::Libcall Call_I16
,
155 RTLIB::Libcall Call_I32
,
156 RTLIB::Libcall Call_I64
,
157 RTLIB::Libcall Call_I128
);
159 SDValue
EmitStackConvert(SDValue SrcOp
, EVT SlotVT
, EVT DestVT
, DebugLoc dl
);
160 SDValue
ExpandBUILD_VECTOR(SDNode
*Node
);
161 SDValue
ExpandSCALAR_TO_VECTOR(SDNode
*Node
);
162 void ExpandDYNAMIC_STACKALLOC(SDNode
*Node
,
163 SmallVectorImpl
<SDValue
> &Results
);
164 SDValue
ExpandFCOPYSIGN(SDNode
*Node
);
165 SDValue
ExpandLegalINT_TO_FP(bool isSigned
, SDValue LegalOp
, EVT DestVT
,
167 SDValue
PromoteLegalINT_TO_FP(SDValue LegalOp
, EVT DestVT
, bool isSigned
,
169 SDValue
PromoteLegalFP_TO_INT(SDValue LegalOp
, EVT DestVT
, bool isSigned
,
172 SDValue
ExpandBSWAP(SDValue Op
, DebugLoc dl
);
173 SDValue
ExpandBitCount(unsigned Opc
, SDValue Op
, DebugLoc dl
);
175 SDValue
ExpandExtractFromVectorThroughStack(SDValue Op
);
176 SDValue
ExpandVectorBuildThroughStack(SDNode
* Node
);
178 std::pair
<SDValue
, SDValue
> ExpandAtomic(SDNode
*Node
);
180 void ExpandNode(SDNode
*Node
, SmallVectorImpl
<SDValue
> &Results
);
181 void PromoteNode(SDNode
*Node
, SmallVectorImpl
<SDValue
> &Results
);
185 /// ShuffleWithNarrowerEltType - Return a vector shuffle operation which
186 /// performs the same shuffe in terms of order or result bytes, but on a type
187 /// whose vector element type is narrower than the original shuffle type.
188 /// e.g. <v4i32> <0, 1, 0, 1> -> v8i16 <0, 1, 2, 3, 0, 1, 2, 3>
190 SelectionDAGLegalize::ShuffleWithNarrowerEltType(EVT NVT
, EVT VT
, DebugLoc dl
,
191 SDValue N1
, SDValue N2
,
192 SmallVectorImpl
<int> &Mask
) const {
193 unsigned NumMaskElts
= VT
.getVectorNumElements();
194 unsigned NumDestElts
= NVT
.getVectorNumElements();
195 unsigned NumEltsGrowth
= NumDestElts
/ NumMaskElts
;
197 assert(NumEltsGrowth
&& "Cannot promote to vector type with fewer elts!");
199 if (NumEltsGrowth
== 1)
200 return DAG
.getVectorShuffle(NVT
, dl
, N1
, N2
, &Mask
[0]);
202 SmallVector
<int, 8> NewMask
;
203 for (unsigned i
= 0; i
!= NumMaskElts
; ++i
) {
205 for (unsigned j
= 0; j
!= NumEltsGrowth
; ++j
) {
207 NewMask
.push_back(-1);
209 NewMask
.push_back(Idx
* NumEltsGrowth
+ j
);
212 assert(NewMask
.size() == NumDestElts
&& "Non-integer NumEltsGrowth?");
213 assert(TLI
.isShuffleMaskLegal(NewMask
, NVT
) && "Shuffle not legal?");
214 return DAG
.getVectorShuffle(NVT
, dl
, N1
, N2
, &NewMask
[0]);
217 SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG
&dag
,
218 CodeGenOpt::Level ol
)
219 : TM(dag
.getTarget()), TLI(dag
.getTargetLoweringInfo()),
220 DAG(dag
), OptLevel(ol
),
221 ValueTypeActions(TLI
.getValueTypeActions()) {
222 assert(MVT::LAST_VALUETYPE
<= MVT::MAX_ALLOWED_VALUETYPE
&&
223 "Too many value types for ValueTypeActions to hold!");
226 void SelectionDAGLegalize::LegalizeDAG() {
227 LastCALLSEQ_END
= DAG
.getEntryNode();
228 IsLegalizingCall
= false;
230 // The legalize process is inherently a bottom-up recursive process (users
231 // legalize their uses before themselves). Given infinite stack space, we
232 // could just start legalizing on the root and traverse the whole graph. In
233 // practice however, this causes us to run out of stack space on large basic
234 // blocks. To avoid this problem, compute an ordering of the nodes where each
235 // node is only legalized after all of its operands are legalized.
236 DAG
.AssignTopologicalOrder();
237 for (SelectionDAG::allnodes_iterator I
= DAG
.allnodes_begin(),
238 E
= prior(DAG
.allnodes_end()); I
!= llvm::next(E
); ++I
)
239 LegalizeOp(SDValue(I
, 0));
241 // Finally, it's possible the root changed. Get the new root.
242 SDValue OldRoot
= DAG
.getRoot();
243 assert(LegalizedNodes
.count(OldRoot
) && "Root didn't get legalized?");
244 DAG
.setRoot(LegalizedNodes
[OldRoot
]);
246 LegalizedNodes
.clear();
248 // Remove dead nodes now.
249 DAG
.RemoveDeadNodes();
253 /// FindCallEndFromCallStart - Given a chained node that is part of a call
254 /// sequence, find the CALLSEQ_END node that terminates the call sequence.
255 static SDNode
*FindCallEndFromCallStart(SDNode
*Node
) {
256 if (Node
->getOpcode() == ISD::CALLSEQ_END
)
258 if (Node
->use_empty())
259 return 0; // No CallSeqEnd
261 // The chain is usually at the end.
262 SDValue
TheChain(Node
, Node
->getNumValues()-1);
263 if (TheChain
.getValueType() != MVT::Other
) {
264 // Sometimes it's at the beginning.
265 TheChain
= SDValue(Node
, 0);
266 if (TheChain
.getValueType() != MVT::Other
) {
267 // Otherwise, hunt for it.
268 for (unsigned i
= 1, e
= Node
->getNumValues(); i
!= e
; ++i
)
269 if (Node
->getValueType(i
) == MVT::Other
) {
270 TheChain
= SDValue(Node
, i
);
274 // Otherwise, we walked into a node without a chain.
275 if (TheChain
.getValueType() != MVT::Other
)
280 for (SDNode::use_iterator UI
= Node
->use_begin(),
281 E
= Node
->use_end(); UI
!= E
; ++UI
) {
283 // Make sure to only follow users of our token chain.
285 for (unsigned i
= 0, e
= User
->getNumOperands(); i
!= e
; ++i
)
286 if (User
->getOperand(i
) == TheChain
)
287 if (SDNode
*Result
= FindCallEndFromCallStart(User
))
293 /// FindCallStartFromCallEnd - Given a chained node that is part of a call
294 /// sequence, find the CALLSEQ_START node that initiates the call sequence.
295 static SDNode
*FindCallStartFromCallEnd(SDNode
*Node
) {
296 assert(Node
&& "Didn't find callseq_start for a call??");
297 if (Node
->getOpcode() == ISD::CALLSEQ_START
) return Node
;
299 assert(Node
->getOperand(0).getValueType() == MVT::Other
&&
300 "Node doesn't have a token chain argument!");
301 return FindCallStartFromCallEnd(Node
->getOperand(0).getNode());
304 /// LegalizeAllNodesNotLeadingTo - Recursively walk the uses of N, looking to
305 /// see if any uses can reach Dest. If no dest operands can get to dest,
306 /// legalize them, legalize ourself, and return false, otherwise, return true.
308 /// Keep track of the nodes we fine that actually do lead to Dest in
309 /// NodesLeadingTo. This avoids retraversing them exponential number of times.
311 bool SelectionDAGLegalize::LegalizeAllNodesNotLeadingTo(SDNode
*N
, SDNode
*Dest
,
312 SmallPtrSet
<SDNode
*, 32> &NodesLeadingTo
) {
313 if (N
== Dest
) return true; // N certainly leads to Dest :)
315 // If we've already processed this node and it does lead to Dest, there is no
316 // need to reprocess it.
317 if (NodesLeadingTo
.count(N
)) return true;
319 // If the first result of this node has been already legalized, then it cannot
321 if (LegalizedNodes
.count(SDValue(N
, 0))) return false;
323 // Okay, this node has not already been legalized. Check and legalize all
324 // operands. If none lead to Dest, then we can legalize this node.
325 bool OperandsLeadToDest
= false;
326 for (unsigned i
= 0, e
= N
->getNumOperands(); i
!= e
; ++i
)
327 OperandsLeadToDest
|= // If an operand leads to Dest, so do we.
328 LegalizeAllNodesNotLeadingTo(N
->getOperand(i
).getNode(), Dest
,
331 if (OperandsLeadToDest
) {
332 NodesLeadingTo
.insert(N
);
336 // Okay, this node looks safe, legalize it and return false.
337 LegalizeOp(SDValue(N
, 0));
341 /// ExpandConstantFP - Expands the ConstantFP node to an integer constant or
342 /// a load from the constant pool.
343 static SDValue
ExpandConstantFP(ConstantFPSDNode
*CFP
, bool UseCP
,
344 SelectionDAG
&DAG
, const TargetLowering
&TLI
) {
346 DebugLoc dl
= CFP
->getDebugLoc();
348 // If a FP immediate is precise when represented as a float and if the
349 // target can do an extending load from float to double, we put it into
350 // the constant pool as a float, even if it's is statically typed as a
351 // double. This shrinks FP constants and canonicalizes them for targets where
352 // an FP extending load is the same cost as a normal load (such as on the x87
353 // fp stack or PPC FP unit).
354 EVT VT
= CFP
->getValueType(0);
355 ConstantFP
*LLVMC
= const_cast<ConstantFP
*>(CFP
->getConstantFPValue());
357 assert((VT
== MVT::f64
|| VT
== MVT::f32
) && "Invalid type expansion");
358 return DAG
.getConstant(LLVMC
->getValueAPF().bitcastToAPInt(),
359 (VT
== MVT::f64
) ? MVT::i64
: MVT::i32
);
364 while (SVT
!= MVT::f32
) {
365 SVT
= (MVT::SimpleValueType
)(SVT
.getSimpleVT().SimpleTy
- 1);
366 if (ConstantFPSDNode::isValueValidForType(SVT
, CFP
->getValueAPF()) &&
367 // Only do this if the target has a native EXTLOAD instruction from
369 TLI
.isLoadExtLegal(ISD::EXTLOAD
, SVT
) &&
370 TLI
.ShouldShrinkFPConstant(OrigVT
)) {
371 const Type
*SType
= SVT
.getTypeForEVT(*DAG
.getContext());
372 LLVMC
= cast
<ConstantFP
>(ConstantExpr::getFPTrunc(LLVMC
, SType
));
378 SDValue CPIdx
= DAG
.getConstantPool(LLVMC
, TLI
.getPointerTy());
379 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
381 return DAG
.getExtLoad(ISD::EXTLOAD
, OrigVT
, dl
,
383 CPIdx
, PseudoSourceValue::getConstantPool(),
384 0, VT
, false, false, Alignment
);
385 return DAG
.getLoad(OrigVT
, dl
, DAG
.getEntryNode(), CPIdx
,
386 PseudoSourceValue::getConstantPool(), 0, false, false,
390 /// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores.
392 SDValue
ExpandUnalignedStore(StoreSDNode
*ST
, SelectionDAG
&DAG
,
393 const TargetLowering
&TLI
) {
394 SDValue Chain
= ST
->getChain();
395 SDValue Ptr
= ST
->getBasePtr();
396 SDValue Val
= ST
->getValue();
397 EVT VT
= Val
.getValueType();
398 int Alignment
= ST
->getAlignment();
399 int SVOffset
= ST
->getSrcValueOffset();
400 DebugLoc dl
= ST
->getDebugLoc();
401 if (ST
->getMemoryVT().isFloatingPoint() ||
402 ST
->getMemoryVT().isVector()) {
403 EVT intVT
= EVT::getIntegerVT(*DAG
.getContext(), VT
.getSizeInBits());
404 if (TLI
.isTypeLegal(intVT
)) {
405 // Expand to a bitconvert of the value to the integer type of the
406 // same size, then a (misaligned) int store.
407 // FIXME: Does not handle truncating floating point stores!
408 SDValue Result
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, intVT
, Val
);
409 return DAG
.getStore(Chain
, dl
, Result
, Ptr
, ST
->getSrcValue(),
410 SVOffset
, ST
->isVolatile(), ST
->isNonTemporal(),
413 // Do a (aligned) store to a stack slot, then copy from the stack slot
414 // to the final destination using (unaligned) integer loads and stores.
415 EVT StoredVT
= ST
->getMemoryVT();
417 TLI
.getRegisterType(*DAG
.getContext(),
418 EVT::getIntegerVT(*DAG
.getContext(),
419 StoredVT
.getSizeInBits()));
420 unsigned StoredBytes
= StoredVT
.getSizeInBits() / 8;
421 unsigned RegBytes
= RegVT
.getSizeInBits() / 8;
422 unsigned NumRegs
= (StoredBytes
+ RegBytes
- 1) / RegBytes
;
424 // Make sure the stack slot is also aligned for the register type.
425 SDValue StackPtr
= DAG
.CreateStackTemporary(StoredVT
, RegVT
);
427 // Perform the original store, only redirected to the stack slot.
428 SDValue Store
= DAG
.getTruncStore(Chain
, dl
,
429 Val
, StackPtr
, NULL
, 0, StoredVT
,
431 SDValue Increment
= DAG
.getConstant(RegBytes
, TLI
.getPointerTy());
432 SmallVector
<SDValue
, 8> Stores
;
435 // Do all but one copies using the full register width.
436 for (unsigned i
= 1; i
< NumRegs
; i
++) {
437 // Load one integer register's worth from the stack slot.
438 SDValue Load
= DAG
.getLoad(RegVT
, dl
, Store
, StackPtr
, NULL
, 0,
440 // Store it to the final location. Remember the store.
441 Stores
.push_back(DAG
.getStore(Load
.getValue(1), dl
, Load
, Ptr
,
442 ST
->getSrcValue(), SVOffset
+ Offset
,
443 ST
->isVolatile(), ST
->isNonTemporal(),
444 MinAlign(ST
->getAlignment(), Offset
)));
445 // Increment the pointers.
447 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, StackPtr
.getValueType(), StackPtr
,
449 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
, Increment
);
452 // The last store may be partial. Do a truncating store. On big-endian
453 // machines this requires an extending load from the stack slot to ensure
454 // that the bits are in the right place.
455 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(),
456 8 * (StoredBytes
- Offset
));
458 // Load from the stack slot.
459 SDValue Load
= DAG
.getExtLoad(ISD::EXTLOAD
, RegVT
, dl
, Store
, StackPtr
,
460 NULL
, 0, MemVT
, false, false, 0);
462 Stores
.push_back(DAG
.getTruncStore(Load
.getValue(1), dl
, Load
, Ptr
,
463 ST
->getSrcValue(), SVOffset
+ Offset
,
464 MemVT
, ST
->isVolatile(),
466 MinAlign(ST
->getAlignment(), Offset
)));
467 // The order of the stores doesn't matter - say it with a TokenFactor.
468 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, &Stores
[0],
472 assert(ST
->getMemoryVT().isInteger() &&
473 !ST
->getMemoryVT().isVector() &&
474 "Unaligned store of unknown type.");
475 // Get the half-size VT
476 EVT NewStoredVT
= ST
->getMemoryVT().getHalfSizedIntegerVT(*DAG
.getContext());
477 int NumBits
= NewStoredVT
.getSizeInBits();
478 int IncrementSize
= NumBits
/ 8;
480 // Divide the stored value in two parts.
481 SDValue ShiftAmount
= DAG
.getConstant(NumBits
, TLI
.getShiftAmountTy());
483 SDValue Hi
= DAG
.getNode(ISD::SRL
, dl
, VT
, Val
, ShiftAmount
);
485 // Store the two parts
486 SDValue Store1
, Store2
;
487 Store1
= DAG
.getTruncStore(Chain
, dl
, TLI
.isLittleEndian()?Lo
:Hi
, Ptr
,
488 ST
->getSrcValue(), SVOffset
, NewStoredVT
,
489 ST
->isVolatile(), ST
->isNonTemporal(), Alignment
);
490 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
491 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
492 Alignment
= MinAlign(Alignment
, IncrementSize
);
493 Store2
= DAG
.getTruncStore(Chain
, dl
, TLI
.isLittleEndian()?Hi
:Lo
, Ptr
,
494 ST
->getSrcValue(), SVOffset
+ IncrementSize
,
495 NewStoredVT
, ST
->isVolatile(), ST
->isNonTemporal(),
498 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Store1
, Store2
);
501 /// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads.
503 SDValue
ExpandUnalignedLoad(LoadSDNode
*LD
, SelectionDAG
&DAG
,
504 const TargetLowering
&TLI
) {
505 int SVOffset
= LD
->getSrcValueOffset();
506 SDValue Chain
= LD
->getChain();
507 SDValue Ptr
= LD
->getBasePtr();
508 EVT VT
= LD
->getValueType(0);
509 EVT LoadedVT
= LD
->getMemoryVT();
510 DebugLoc dl
= LD
->getDebugLoc();
511 if (VT
.isFloatingPoint() || VT
.isVector()) {
512 EVT intVT
= EVT::getIntegerVT(*DAG
.getContext(), LoadedVT
.getSizeInBits());
513 if (TLI
.isTypeLegal(intVT
)) {
514 // Expand to a (misaligned) integer load of the same size,
515 // then bitconvert to floating point or vector.
516 SDValue newLoad
= DAG
.getLoad(intVT
, dl
, Chain
, Ptr
, LD
->getSrcValue(),
517 SVOffset
, LD
->isVolatile(),
518 LD
->isNonTemporal(), LD
->getAlignment());
519 SDValue Result
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, LoadedVT
, newLoad
);
520 if (VT
.isFloatingPoint() && LoadedVT
!= VT
)
521 Result
= DAG
.getNode(ISD::FP_EXTEND
, dl
, VT
, Result
);
523 SDValue Ops
[] = { Result
, Chain
};
524 return DAG
.getMergeValues(Ops
, 2, dl
);
526 // Copy the value to a (aligned) stack slot using (unaligned) integer
527 // loads and stores, then do a (aligned) load from the stack slot.
528 EVT RegVT
= TLI
.getRegisterType(*DAG
.getContext(), intVT
);
529 unsigned LoadedBytes
= LoadedVT
.getSizeInBits() / 8;
530 unsigned RegBytes
= RegVT
.getSizeInBits() / 8;
531 unsigned NumRegs
= (LoadedBytes
+ RegBytes
- 1) / RegBytes
;
533 // Make sure the stack slot is also aligned for the register type.
534 SDValue StackBase
= DAG
.CreateStackTemporary(LoadedVT
, RegVT
);
536 SDValue Increment
= DAG
.getConstant(RegBytes
, TLI
.getPointerTy());
537 SmallVector
<SDValue
, 8> Stores
;
538 SDValue StackPtr
= StackBase
;
541 // Do all but one copies using the full register width.
542 for (unsigned i
= 1; i
< NumRegs
; i
++) {
543 // Load one integer register's worth from the original location.
544 SDValue Load
= DAG
.getLoad(RegVT
, dl
, Chain
, Ptr
, LD
->getSrcValue(),
545 SVOffset
+ Offset
, LD
->isVolatile(),
547 MinAlign(LD
->getAlignment(), Offset
));
548 // Follow the load with a store to the stack slot. Remember the store.
549 Stores
.push_back(DAG
.getStore(Load
.getValue(1), dl
, Load
, StackPtr
,
550 NULL
, 0, false, false, 0));
551 // Increment the pointers.
553 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
, Increment
);
554 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, StackPtr
.getValueType(), StackPtr
,
558 // The last copy may be partial. Do an extending load.
559 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(),
560 8 * (LoadedBytes
- Offset
));
561 SDValue Load
= DAG
.getExtLoad(ISD::EXTLOAD
, RegVT
, dl
, Chain
, Ptr
,
562 LD
->getSrcValue(), SVOffset
+ Offset
,
563 MemVT
, LD
->isVolatile(),
565 MinAlign(LD
->getAlignment(), Offset
));
566 // Follow the load with a store to the stack slot. Remember the store.
567 // On big-endian machines this requires a truncating store to ensure
568 // that the bits end up in the right place.
569 Stores
.push_back(DAG
.getTruncStore(Load
.getValue(1), dl
, Load
, StackPtr
,
570 NULL
, 0, MemVT
, false, false, 0));
572 // The order of the stores doesn't matter - say it with a TokenFactor.
573 SDValue TF
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, &Stores
[0],
576 // Finally, perform the original load only redirected to the stack slot.
577 Load
= DAG
.getExtLoad(LD
->getExtensionType(), VT
, dl
, TF
, StackBase
,
578 NULL
, 0, LoadedVT
, false, false, 0);
580 // Callers expect a MERGE_VALUES node.
581 SDValue Ops
[] = { Load
, TF
};
582 return DAG
.getMergeValues(Ops
, 2, dl
);
585 assert(LoadedVT
.isInteger() && !LoadedVT
.isVector() &&
586 "Unaligned load of unsupported type.");
588 // Compute the new VT that is half the size of the old one. This is an
590 unsigned NumBits
= LoadedVT
.getSizeInBits();
592 NewLoadedVT
= EVT::getIntegerVT(*DAG
.getContext(), NumBits
/2);
595 unsigned Alignment
= LD
->getAlignment();
596 unsigned IncrementSize
= NumBits
/ 8;
597 ISD::LoadExtType HiExtType
= LD
->getExtensionType();
599 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
600 if (HiExtType
== ISD::NON_EXTLOAD
)
601 HiExtType
= ISD::ZEXTLOAD
;
603 // Load the value in two parts
605 if (TLI
.isLittleEndian()) {
606 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, VT
, dl
, Chain
, Ptr
, LD
->getSrcValue(),
607 SVOffset
, NewLoadedVT
, LD
->isVolatile(),
608 LD
->isNonTemporal(), Alignment
);
609 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
610 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
611 Hi
= DAG
.getExtLoad(HiExtType
, VT
, dl
, Chain
, Ptr
, LD
->getSrcValue(),
612 SVOffset
+ IncrementSize
, NewLoadedVT
, LD
->isVolatile(),
613 LD
->isNonTemporal(), MinAlign(Alignment
,IncrementSize
));
615 Hi
= DAG
.getExtLoad(HiExtType
, VT
, dl
, Chain
, Ptr
, LD
->getSrcValue(),
616 SVOffset
, NewLoadedVT
, LD
->isVolatile(),
617 LD
->isNonTemporal(), Alignment
);
618 Ptr
= DAG
.getNode(ISD::ADD
, dl
, Ptr
.getValueType(), Ptr
,
619 DAG
.getConstant(IncrementSize
, TLI
.getPointerTy()));
620 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, VT
, dl
, Chain
, Ptr
, LD
->getSrcValue(),
621 SVOffset
+ IncrementSize
, NewLoadedVT
, LD
->isVolatile(),
622 LD
->isNonTemporal(), MinAlign(Alignment
,IncrementSize
));
625 // aggregate the two parts
626 SDValue ShiftAmount
= DAG
.getConstant(NumBits
, TLI
.getShiftAmountTy());
627 SDValue Result
= DAG
.getNode(ISD::SHL
, dl
, VT
, Hi
, ShiftAmount
);
628 Result
= DAG
.getNode(ISD::OR
, dl
, VT
, Result
, Lo
);
630 SDValue TF
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
633 SDValue Ops
[] = { Result
, TF
};
634 return DAG
.getMergeValues(Ops
, 2, dl
);
637 /// PerformInsertVectorEltInMemory - Some target cannot handle a variable
638 /// insertion index for the INSERT_VECTOR_ELT instruction. In this case, it
639 /// is necessary to spill the vector being inserted into to memory, perform
640 /// the insert there, and then read the result back.
641 SDValue
SelectionDAGLegalize::
642 PerformInsertVectorEltInMemory(SDValue Vec
, SDValue Val
, SDValue Idx
,
648 // If the target doesn't support this, we have to spill the input vector
649 // to a temporary stack slot, update the element, then reload it. This is
650 // badness. We could also load the value into a vector register (either
651 // with a "move to register" or "extload into register" instruction, then
652 // permute it into place, if the idx is a constant and if the idx is
653 // supported by the target.
654 EVT VT
= Tmp1
.getValueType();
655 EVT EltVT
= VT
.getVectorElementType();
656 EVT IdxVT
= Tmp3
.getValueType();
657 EVT PtrVT
= TLI
.getPointerTy();
658 SDValue StackPtr
= DAG
.CreateStackTemporary(VT
);
660 int SPFI
= cast
<FrameIndexSDNode
>(StackPtr
.getNode())->getIndex();
663 SDValue Ch
= DAG
.getStore(DAG
.getEntryNode(), dl
, Tmp1
, StackPtr
,
664 PseudoSourceValue::getFixedStack(SPFI
), 0,
667 // Truncate or zero extend offset to target pointer type.
668 unsigned CastOpc
= IdxVT
.bitsGT(PtrVT
) ? ISD::TRUNCATE
: ISD::ZERO_EXTEND
;
669 Tmp3
= DAG
.getNode(CastOpc
, dl
, PtrVT
, Tmp3
);
670 // Add the offset to the index.
671 unsigned EltSize
= EltVT
.getSizeInBits()/8;
672 Tmp3
= DAG
.getNode(ISD::MUL
, dl
, IdxVT
, Tmp3
,DAG
.getConstant(EltSize
, IdxVT
));
673 SDValue StackPtr2
= DAG
.getNode(ISD::ADD
, dl
, IdxVT
, Tmp3
, StackPtr
);
674 // Store the scalar value.
675 Ch
= DAG
.getTruncStore(Ch
, dl
, Tmp2
, StackPtr2
,
676 PseudoSourceValue::getFixedStack(SPFI
), 0, EltVT
,
678 // Load the updated vector.
679 return DAG
.getLoad(VT
, dl
, Ch
, StackPtr
,
680 PseudoSourceValue::getFixedStack(SPFI
), 0,
685 SDValue
SelectionDAGLegalize::
686 ExpandINSERT_VECTOR_ELT(SDValue Vec
, SDValue Val
, SDValue Idx
, DebugLoc dl
) {
687 if (ConstantSDNode
*InsertPos
= dyn_cast
<ConstantSDNode
>(Idx
)) {
688 // SCALAR_TO_VECTOR requires that the type of the value being inserted
689 // match the element type of the vector being created, except for
690 // integers in which case the inserted value can be over width.
691 EVT EltVT
= Vec
.getValueType().getVectorElementType();
692 if (Val
.getValueType() == EltVT
||
693 (EltVT
.isInteger() && Val
.getValueType().bitsGE(EltVT
))) {
694 SDValue ScVec
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
,
695 Vec
.getValueType(), Val
);
697 unsigned NumElts
= Vec
.getValueType().getVectorNumElements();
698 // We generate a shuffle of InVec and ScVec, so the shuffle mask
699 // should be 0,1,2,3,4,5... with the appropriate element replaced with
701 SmallVector
<int, 8> ShufOps
;
702 for (unsigned i
= 0; i
!= NumElts
; ++i
)
703 ShufOps
.push_back(i
!= InsertPos
->getZExtValue() ? i
: NumElts
);
705 return DAG
.getVectorShuffle(Vec
.getValueType(), dl
, Vec
, ScVec
,
709 return PerformInsertVectorEltInMemory(Vec
, Val
, Idx
, dl
);
712 SDValue
SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode
* ST
) {
713 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
714 // FIXME: We shouldn't do this for TargetConstantFP's.
715 // FIXME: move this to the DAG Combiner! Note that we can't regress due
716 // to phase ordering between legalized code and the dag combiner. This
717 // probably means that we need to integrate dag combiner and legalizer
719 // We generally can't do this one for long doubles.
720 SDValue Tmp1
= ST
->getChain();
721 SDValue Tmp2
= ST
->getBasePtr();
723 int SVOffset
= ST
->getSrcValueOffset();
724 unsigned Alignment
= ST
->getAlignment();
725 bool isVolatile
= ST
->isVolatile();
726 bool isNonTemporal
= ST
->isNonTemporal();
727 DebugLoc dl
= ST
->getDebugLoc();
728 if (ConstantFPSDNode
*CFP
= dyn_cast
<ConstantFPSDNode
>(ST
->getValue())) {
729 if (CFP
->getValueType(0) == MVT::f32
&&
730 getTypeAction(MVT::i32
) == Legal
) {
731 Tmp3
= DAG
.getConstant(CFP
->getValueAPF().
732 bitcastToAPInt().zextOrTrunc(32),
734 return DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
735 SVOffset
, isVolatile
, isNonTemporal
, Alignment
);
736 } else if (CFP
->getValueType(0) == MVT::f64
) {
737 // If this target supports 64-bit registers, do a single 64-bit store.
738 if (getTypeAction(MVT::i64
) == Legal
) {
739 Tmp3
= DAG
.getConstant(CFP
->getValueAPF().bitcastToAPInt().
740 zextOrTrunc(64), MVT::i64
);
741 return DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
742 SVOffset
, isVolatile
, isNonTemporal
, Alignment
);
743 } else if (getTypeAction(MVT::i32
) == Legal
&& !ST
->isVolatile()) {
744 // Otherwise, if the target supports 32-bit registers, use 2 32-bit
745 // stores. If the target supports neither 32- nor 64-bits, this
746 // xform is certainly not worth it.
747 const APInt
&IntVal
=CFP
->getValueAPF().bitcastToAPInt();
748 SDValue Lo
= DAG
.getConstant(APInt(IntVal
).trunc(32), MVT::i32
);
749 SDValue Hi
= DAG
.getConstant(IntVal
.lshr(32).trunc(32), MVT::i32
);
750 if (TLI
.isBigEndian()) std::swap(Lo
, Hi
);
752 Lo
= DAG
.getStore(Tmp1
, dl
, Lo
, Tmp2
, ST
->getSrcValue(),
753 SVOffset
, isVolatile
, isNonTemporal
, Alignment
);
754 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
755 DAG
.getIntPtrConstant(4));
756 Hi
= DAG
.getStore(Tmp1
, dl
, Hi
, Tmp2
, ST
->getSrcValue(), SVOffset
+4,
757 isVolatile
, isNonTemporal
, MinAlign(Alignment
, 4U));
759 return DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
, Hi
);
766 /// LegalizeOp - We know that the specified value has a legal type, and
767 /// that its operands are legal. Now ensure that the operation itself
768 /// is legal, recursively ensuring that the operands' operations remain
770 SDValue
SelectionDAGLegalize::LegalizeOp(SDValue Op
) {
771 if (Op
.getOpcode() == ISD::TargetConstant
) // Allow illegal target nodes.
774 SDNode
*Node
= Op
.getNode();
775 DebugLoc dl
= Node
->getDebugLoc();
777 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
778 assert(getTypeAction(Node
->getValueType(i
)) == Legal
&&
779 "Unexpected illegal type!");
781 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
)
782 assert((isTypeLegal(Node
->getOperand(i
).getValueType()) ||
783 Node
->getOperand(i
).getOpcode() == ISD::TargetConstant
) &&
784 "Unexpected illegal type!");
786 // Note that LegalizeOp may be reentered even from single-use nodes, which
787 // means that we always must cache transformed nodes.
788 DenseMap
<SDValue
, SDValue
>::iterator I
= LegalizedNodes
.find(Op
);
789 if (I
!= LegalizedNodes
.end()) return I
->second
;
791 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
;
793 bool isCustom
= false;
795 // Figure out the correct action; the way to query this varies by opcode
796 TargetLowering::LegalizeAction Action
;
797 bool SimpleFinishLegalizing
= true;
798 switch (Node
->getOpcode()) {
799 case ISD::INTRINSIC_W_CHAIN
:
800 case ISD::INTRINSIC_WO_CHAIN
:
801 case ISD::INTRINSIC_VOID
:
804 Action
= TLI
.getOperationAction(Node
->getOpcode(), MVT::Other
);
806 case ISD::SINT_TO_FP
:
807 case ISD::UINT_TO_FP
:
808 case ISD::EXTRACT_VECTOR_ELT
:
809 Action
= TLI
.getOperationAction(Node
->getOpcode(),
810 Node
->getOperand(0).getValueType());
812 case ISD::FP_ROUND_INREG
:
813 case ISD::SIGN_EXTEND_INREG
: {
814 EVT InnerType
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
815 Action
= TLI
.getOperationAction(Node
->getOpcode(), InnerType
);
821 unsigned CCOperand
= Node
->getOpcode() == ISD::SELECT_CC
? 4 :
822 Node
->getOpcode() == ISD::SETCC
? 2 : 1;
823 unsigned CompareOperand
= Node
->getOpcode() == ISD::BR_CC
? 2 : 0;
824 EVT OpVT
= Node
->getOperand(CompareOperand
).getValueType();
825 ISD::CondCode CCCode
=
826 cast
<CondCodeSDNode
>(Node
->getOperand(CCOperand
))->get();
827 Action
= TLI
.getCondCodeAction(CCCode
, OpVT
);
828 if (Action
== TargetLowering::Legal
) {
829 if (Node
->getOpcode() == ISD::SELECT_CC
)
830 Action
= TLI
.getOperationAction(Node
->getOpcode(),
831 Node
->getValueType(0));
833 Action
= TLI
.getOperationAction(Node
->getOpcode(), OpVT
);
839 // FIXME: Model these properly. LOAD and STORE are complicated, and
840 // STORE expects the unlegalized operand in some cases.
841 SimpleFinishLegalizing
= false;
843 case ISD::CALLSEQ_START
:
844 case ISD::CALLSEQ_END
:
845 // FIXME: This shouldn't be necessary. These nodes have special properties
846 // dealing with the recursive nature of legalization. Removing this
847 // special case should be done as part of making LegalizeDAG non-recursive.
848 SimpleFinishLegalizing
= false;
850 case ISD::EXTRACT_ELEMENT
:
851 case ISD::FLT_ROUNDS_
:
859 case ISD::MERGE_VALUES
:
861 case ISD::FRAME_TO_ARGS_OFFSET
:
862 case ISD::EH_SJLJ_SETJMP
:
863 case ISD::EH_SJLJ_LONGJMP
:
864 // These operations lie about being legal: when they claim to be legal,
865 // they should actually be expanded.
866 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
867 if (Action
== TargetLowering::Legal
)
868 Action
= TargetLowering::Expand
;
870 case ISD::TRAMPOLINE
:
872 case ISD::RETURNADDR
:
873 // These operations lie about being legal: when they claim to be legal,
874 // they should actually be custom-lowered.
875 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
876 if (Action
== TargetLowering::Legal
)
877 Action
= TargetLowering::Custom
;
879 case ISD::BUILD_VECTOR
:
880 // A weird case: legalization for BUILD_VECTOR never legalizes the
882 // FIXME: This really sucks... changing it isn't semantically incorrect,
883 // but it massively pessimizes the code for floating-point BUILD_VECTORs
884 // because ConstantFP operands get legalized into constant pool loads
885 // before the BUILD_VECTOR code can see them. It doesn't usually bite,
886 // though, because BUILD_VECTORS usually get lowered into other nodes
887 // which get legalized properly.
888 SimpleFinishLegalizing
= false;
891 if (Node
->getOpcode() >= ISD::BUILTIN_OP_END
) {
892 Action
= TargetLowering::Legal
;
894 Action
= TLI
.getOperationAction(Node
->getOpcode(), Node
->getValueType(0));
899 if (SimpleFinishLegalizing
) {
900 SmallVector
<SDValue
, 8> Ops
, ResultVals
;
901 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
)
902 Ops
.push_back(LegalizeOp(Node
->getOperand(i
)));
903 switch (Node
->getOpcode()) {
910 // Branches tweak the chain to include LastCALLSEQ_END
911 Ops
[0] = DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Ops
[0],
913 Ops
[0] = LegalizeOp(Ops
[0]);
914 LastCALLSEQ_END
= DAG
.getEntryNode();
921 // Legalizing shifts/rotates requires adjusting the shift amount
922 // to the appropriate width.
923 if (!Ops
[1].getValueType().isVector())
924 Ops
[1] = LegalizeOp(DAG
.getShiftAmountOperand(Ops
[1]));
929 // Legalizing shifts/rotates requires adjusting the shift amount
930 // to the appropriate width.
931 if (!Ops
[2].getValueType().isVector())
932 Ops
[2] = LegalizeOp(DAG
.getShiftAmountOperand(Ops
[2]));
936 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(), Ops
.data(),
939 case TargetLowering::Legal
:
940 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
941 ResultVals
.push_back(Result
.getValue(i
));
943 case TargetLowering::Custom
:
944 // FIXME: The handling for custom lowering with multiple results is
946 Tmp1
= TLI
.LowerOperation(Result
, DAG
);
947 if (Tmp1
.getNode()) {
948 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
) {
950 ResultVals
.push_back(Tmp1
);
952 ResultVals
.push_back(Tmp1
.getValue(i
));
958 case TargetLowering::Expand
:
959 ExpandNode(Result
.getNode(), ResultVals
);
961 case TargetLowering::Promote
:
962 PromoteNode(Result
.getNode(), ResultVals
);
965 if (!ResultVals
.empty()) {
966 for (unsigned i
= 0, e
= ResultVals
.size(); i
!= e
; ++i
) {
967 if (ResultVals
[i
] != SDValue(Node
, i
))
968 ResultVals
[i
] = LegalizeOp(ResultVals
[i
]);
969 AddLegalizedOperand(SDValue(Node
, i
), ResultVals
[i
]);
971 return ResultVals
[Op
.getResNo()];
975 switch (Node
->getOpcode()) {
982 assert(0 && "Do not know how to legalize this operator!");
984 case ISD::BUILD_VECTOR
:
985 switch (TLI
.getOperationAction(ISD::BUILD_VECTOR
, Node
->getValueType(0))) {
986 default: assert(0 && "This action is not supported yet!");
987 case TargetLowering::Custom
:
988 Tmp3
= TLI
.LowerOperation(Result
, DAG
);
989 if (Tmp3
.getNode()) {
994 case TargetLowering::Expand
:
995 Result
= ExpandBUILD_VECTOR(Result
.getNode());
999 case ISD::CALLSEQ_START
: {
1000 SDNode
*CallEnd
= FindCallEndFromCallStart(Node
);
1002 // Recursively Legalize all of the inputs of the call end that do not lead
1003 // to this call start. This ensures that any libcalls that need be inserted
1004 // are inserted *before* the CALLSEQ_START.
1005 {SmallPtrSet
<SDNode
*, 32> NodesLeadingTo
;
1006 for (unsigned i
= 0, e
= CallEnd
->getNumOperands(); i
!= e
; ++i
)
1007 LegalizeAllNodesNotLeadingTo(CallEnd
->getOperand(i
).getNode(), Node
,
1011 // Now that we have legalized all of the inputs (which may have inserted
1012 // libcalls), create the new CALLSEQ_START node.
1013 Tmp1
= LegalizeOp(Node
->getOperand(0)); // Legalize the chain.
1015 // Merge in the last call to ensure that this call starts after the last
1017 if (LastCALLSEQ_END
.getOpcode() != ISD::EntryToken
) {
1018 Tmp1
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1019 Tmp1
, LastCALLSEQ_END
);
1020 Tmp1
= LegalizeOp(Tmp1
);
1023 // Do not try to legalize the target-specific arguments (#1+).
1024 if (Tmp1
!= Node
->getOperand(0)) {
1025 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1027 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(), &Ops
[0],
1028 Ops
.size()), Result
.getResNo());
1031 // Remember that the CALLSEQ_START is legalized.
1032 AddLegalizedOperand(Op
.getValue(0), Result
);
1033 if (Node
->getNumValues() == 2) // If this has a flag result, remember it.
1034 AddLegalizedOperand(Op
.getValue(1), Result
.getValue(1));
1036 // Now that the callseq_start and all of the non-call nodes above this call
1037 // sequence have been legalized, legalize the call itself. During this
1038 // process, no libcalls can/will be inserted, guaranteeing that no calls
1040 assert(!IsLegalizingCall
&& "Inconsistent sequentialization of calls!");
1041 // Note that we are selecting this call!
1042 LastCALLSEQ_END
= SDValue(CallEnd
, 0);
1043 IsLegalizingCall
= true;
1045 // Legalize the call, starting from the CALLSEQ_END.
1046 LegalizeOp(LastCALLSEQ_END
);
1047 assert(!IsLegalizingCall
&& "CALLSEQ_END should have cleared this!");
1050 case ISD::CALLSEQ_END
:
1051 // If the CALLSEQ_START node hasn't been legalized first, legalize it. This
1052 // will cause this node to be legalized as well as handling libcalls right.
1053 if (LastCALLSEQ_END
.getNode() != Node
) {
1054 LegalizeOp(SDValue(FindCallStartFromCallEnd(Node
), 0));
1055 DenseMap
<SDValue
, SDValue
>::iterator I
= LegalizedNodes
.find(Op
);
1056 assert(I
!= LegalizedNodes
.end() &&
1057 "Legalizing the call start should have legalized this node!");
1061 // Otherwise, the call start has been legalized and everything is going
1062 // according to plan. Just legalize ourselves normally here.
1063 Tmp1
= LegalizeOp(Node
->getOperand(0)); // Legalize the chain.
1064 // Do not try to legalize the target-specific arguments (#1+), except for
1065 // an optional flag input.
1066 if (Node
->getOperand(Node
->getNumOperands()-1).getValueType() != MVT::Flag
){
1067 if (Tmp1
!= Node
->getOperand(0)) {
1068 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1070 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1071 &Ops
[0], Ops
.size()),
1075 Tmp2
= LegalizeOp(Node
->getOperand(Node
->getNumOperands()-1));
1076 if (Tmp1
!= Node
->getOperand(0) ||
1077 Tmp2
!= Node
->getOperand(Node
->getNumOperands()-1)) {
1078 SmallVector
<SDValue
, 8> Ops(Node
->op_begin(), Node
->op_end());
1081 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1082 &Ops
[0], Ops
.size()),
1086 assert(IsLegalizingCall
&& "Call sequence imbalance between start/end?");
1087 // This finishes up call legalization.
1088 IsLegalizingCall
= false;
1090 // If the CALLSEQ_END node has a flag, remember that we legalized it.
1091 AddLegalizedOperand(SDValue(Node
, 0), Result
.getValue(0));
1092 if (Node
->getNumValues() == 2)
1093 AddLegalizedOperand(SDValue(Node
, 1), Result
.getValue(1));
1094 return Result
.getValue(Op
.getResNo());
1096 LoadSDNode
*LD
= cast
<LoadSDNode
>(Node
);
1097 Tmp1
= LegalizeOp(LD
->getChain()); // Legalize the chain.
1098 Tmp2
= LegalizeOp(LD
->getBasePtr()); // Legalize the base pointer.
1100 ISD::LoadExtType ExtType
= LD
->getExtensionType();
1101 if (ExtType
== ISD::NON_EXTLOAD
) {
1102 EVT VT
= Node
->getValueType(0);
1103 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1104 Tmp1
, Tmp2
, LD
->getOffset()),
1106 Tmp3
= Result
.getValue(0);
1107 Tmp4
= Result
.getValue(1);
1109 switch (TLI
.getOperationAction(Node
->getOpcode(), VT
)) {
1110 default: assert(0 && "This action is not supported yet!");
1111 case TargetLowering::Legal
:
1112 // If this is an unaligned load and the target doesn't support it,
1114 if (!TLI
.allowsUnalignedMemoryAccesses(LD
->getMemoryVT())) {
1115 const Type
*Ty
= LD
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1116 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1117 if (LD
->getAlignment() < ABIAlignment
){
1118 Result
= ExpandUnalignedLoad(cast
<LoadSDNode
>(Result
.getNode()),
1120 Tmp3
= Result
.getOperand(0);
1121 Tmp4
= Result
.getOperand(1);
1122 Tmp3
= LegalizeOp(Tmp3
);
1123 Tmp4
= LegalizeOp(Tmp4
);
1127 case TargetLowering::Custom
:
1128 Tmp1
= TLI
.LowerOperation(Tmp3
, DAG
);
1129 if (Tmp1
.getNode()) {
1130 Tmp3
= LegalizeOp(Tmp1
);
1131 Tmp4
= LegalizeOp(Tmp1
.getValue(1));
1134 case TargetLowering::Promote
: {
1135 // Only promote a load of vector type to another.
1136 assert(VT
.isVector() && "Cannot promote this load!");
1137 // Change base type to a different vector type.
1138 EVT NVT
= TLI
.getTypeToPromoteTo(Node
->getOpcode(), VT
);
1140 Tmp1
= DAG
.getLoad(NVT
, dl
, Tmp1
, Tmp2
, LD
->getSrcValue(),
1141 LD
->getSrcValueOffset(),
1142 LD
->isVolatile(), LD
->isNonTemporal(),
1143 LD
->getAlignment());
1144 Tmp3
= LegalizeOp(DAG
.getNode(ISD::BIT_CONVERT
, dl
, VT
, Tmp1
));
1145 Tmp4
= LegalizeOp(Tmp1
.getValue(1));
1149 // Since loads produce two values, make sure to remember that we
1150 // legalized both of them.
1151 AddLegalizedOperand(SDValue(Node
, 0), Tmp3
);
1152 AddLegalizedOperand(SDValue(Node
, 1), Tmp4
);
1153 return Op
.getResNo() ? Tmp4
: Tmp3
;
1155 EVT SrcVT
= LD
->getMemoryVT();
1156 unsigned SrcWidth
= SrcVT
.getSizeInBits();
1157 int SVOffset
= LD
->getSrcValueOffset();
1158 unsigned Alignment
= LD
->getAlignment();
1159 bool isVolatile
= LD
->isVolatile();
1160 bool isNonTemporal
= LD
->isNonTemporal();
1162 if (SrcWidth
!= SrcVT
.getStoreSizeInBits() &&
1163 // Some targets pretend to have an i1 loading operation, and actually
1164 // load an i8. This trick is correct for ZEXTLOAD because the top 7
1165 // bits are guaranteed to be zero; it helps the optimizers understand
1166 // that these bits are zero. It is also useful for EXTLOAD, since it
1167 // tells the optimizers that those bits are undefined. It would be
1168 // nice to have an effective generic way of getting these benefits...
1169 // Until such a way is found, don't insist on promoting i1 here.
1170 (SrcVT
!= MVT::i1
||
1171 TLI
.getLoadExtAction(ExtType
, MVT::i1
) == TargetLowering::Promote
)) {
1172 // Promote to a byte-sized load if not loading an integral number of
1173 // bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
1174 unsigned NewWidth
= SrcVT
.getStoreSizeInBits();
1175 EVT NVT
= EVT::getIntegerVT(*DAG
.getContext(), NewWidth
);
1178 // The extra bits are guaranteed to be zero, since we stored them that
1179 // way. A zext load from NVT thus automatically gives zext from SrcVT.
1181 ISD::LoadExtType NewExtType
=
1182 ExtType
== ISD::ZEXTLOAD
? ISD::ZEXTLOAD
: ISD::EXTLOAD
;
1184 Result
= DAG
.getExtLoad(NewExtType
, Node
->getValueType(0), dl
,
1185 Tmp1
, Tmp2
, LD
->getSrcValue(), SVOffset
,
1186 NVT
, isVolatile
, isNonTemporal
, Alignment
);
1188 Ch
= Result
.getValue(1); // The chain.
1190 if (ExtType
== ISD::SEXTLOAD
)
1191 // Having the top bits zero doesn't help when sign extending.
1192 Result
= DAG
.getNode(ISD::SIGN_EXTEND_INREG
, dl
,
1193 Result
.getValueType(),
1194 Result
, DAG
.getValueType(SrcVT
));
1195 else if (ExtType
== ISD::ZEXTLOAD
|| NVT
== Result
.getValueType())
1196 // All the top bits are guaranteed to be zero - inform the optimizers.
1197 Result
= DAG
.getNode(ISD::AssertZext
, dl
,
1198 Result
.getValueType(), Result
,
1199 DAG
.getValueType(SrcVT
));
1201 Tmp1
= LegalizeOp(Result
);
1202 Tmp2
= LegalizeOp(Ch
);
1203 } else if (SrcWidth
& (SrcWidth
- 1)) {
1204 // If not loading a power-of-2 number of bits, expand as two loads.
1205 assert(!SrcVT
.isVector() && "Unsupported extload!");
1206 unsigned RoundWidth
= 1 << Log2_32(SrcWidth
);
1207 assert(RoundWidth
< SrcWidth
);
1208 unsigned ExtraWidth
= SrcWidth
- RoundWidth
;
1209 assert(ExtraWidth
< RoundWidth
);
1210 assert(!(RoundWidth
% 8) && !(ExtraWidth
% 8) &&
1211 "Load size not an integral number of bytes!");
1212 EVT RoundVT
= EVT::getIntegerVT(*DAG
.getContext(), RoundWidth
);
1213 EVT ExtraVT
= EVT::getIntegerVT(*DAG
.getContext(), ExtraWidth
);
1215 unsigned IncrementSize
;
1217 if (TLI
.isLittleEndian()) {
1218 // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD@+2:i8, 16)
1219 // Load the bottom RoundWidth bits.
1220 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
, Node
->getValueType(0), dl
,
1222 LD
->getSrcValue(), SVOffset
, RoundVT
, isVolatile
,
1223 isNonTemporal
, Alignment
);
1225 // Load the remaining ExtraWidth bits.
1226 IncrementSize
= RoundWidth
/ 8;
1227 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1228 DAG
.getIntPtrConstant(IncrementSize
));
1229 Hi
= DAG
.getExtLoad(ExtType
, Node
->getValueType(0), dl
, Tmp1
, Tmp2
,
1230 LD
->getSrcValue(), SVOffset
+ IncrementSize
,
1231 ExtraVT
, isVolatile
, isNonTemporal
,
1232 MinAlign(Alignment
, IncrementSize
));
1234 // Build a factor node to remember that this load is independent of
1236 Ch
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
1239 // Move the top bits to the right place.
1240 Hi
= DAG
.getNode(ISD::SHL
, dl
, Hi
.getValueType(), Hi
,
1241 DAG
.getConstant(RoundWidth
, TLI
.getShiftAmountTy()));
1243 // Join the hi and lo parts.
1244 Result
= DAG
.getNode(ISD::OR
, dl
, Node
->getValueType(0), Lo
, Hi
);
1246 // Big endian - avoid unaligned loads.
1247 // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD@+2:i8
1248 // Load the top RoundWidth bits.
1249 Hi
= DAG
.getExtLoad(ExtType
, Node
->getValueType(0), dl
, Tmp1
, Tmp2
,
1250 LD
->getSrcValue(), SVOffset
, RoundVT
, isVolatile
,
1251 isNonTemporal
, Alignment
);
1253 // Load the remaining ExtraWidth bits.
1254 IncrementSize
= RoundWidth
/ 8;
1255 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1256 DAG
.getIntPtrConstant(IncrementSize
));
1257 Lo
= DAG
.getExtLoad(ISD::ZEXTLOAD
,
1258 Node
->getValueType(0), dl
, Tmp1
, Tmp2
,
1259 LD
->getSrcValue(), SVOffset
+ IncrementSize
,
1260 ExtraVT
, isVolatile
, isNonTemporal
,
1261 MinAlign(Alignment
, IncrementSize
));
1263 // Build a factor node to remember that this load is independent of
1265 Ch
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
.getValue(1),
1268 // Move the top bits to the right place.
1269 Hi
= DAG
.getNode(ISD::SHL
, dl
, Hi
.getValueType(), Hi
,
1270 DAG
.getConstant(ExtraWidth
, TLI
.getShiftAmountTy()));
1272 // Join the hi and lo parts.
1273 Result
= DAG
.getNode(ISD::OR
, dl
, Node
->getValueType(0), Lo
, Hi
);
1276 Tmp1
= LegalizeOp(Result
);
1277 Tmp2
= LegalizeOp(Ch
);
1279 switch (TLI
.getLoadExtAction(ExtType
, SrcVT
)) {
1280 default: assert(0 && "This action is not supported yet!");
1281 case TargetLowering::Custom
:
1284 case TargetLowering::Legal
:
1285 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1286 Tmp1
, Tmp2
, LD
->getOffset()),
1288 Tmp1
= Result
.getValue(0);
1289 Tmp2
= Result
.getValue(1);
1292 Tmp3
= TLI
.LowerOperation(Result
, DAG
);
1293 if (Tmp3
.getNode()) {
1294 Tmp1
= LegalizeOp(Tmp3
);
1295 Tmp2
= LegalizeOp(Tmp3
.getValue(1));
1298 // If this is an unaligned load and the target doesn't support it,
1300 if (!TLI
.allowsUnalignedMemoryAccesses(LD
->getMemoryVT())) {
1302 LD
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1303 unsigned ABIAlignment
=
1304 TLI
.getTargetData()->getABITypeAlignment(Ty
);
1305 if (LD
->getAlignment() < ABIAlignment
){
1306 Result
= ExpandUnalignedLoad(cast
<LoadSDNode
>(Result
.getNode()),
1308 Tmp1
= Result
.getOperand(0);
1309 Tmp2
= Result
.getOperand(1);
1310 Tmp1
= LegalizeOp(Tmp1
);
1311 Tmp2
= LegalizeOp(Tmp2
);
1316 case TargetLowering::Expand
:
1317 // f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND
1318 // f128 = EXTLOAD {f32,f64} too
1319 if ((SrcVT
== MVT::f32
&& (Node
->getValueType(0) == MVT::f64
||
1320 Node
->getValueType(0) == MVT::f128
)) ||
1321 (SrcVT
== MVT::f64
&& Node
->getValueType(0) == MVT::f128
)) {
1322 SDValue Load
= DAG
.getLoad(SrcVT
, dl
, Tmp1
, Tmp2
, LD
->getSrcValue(),
1323 LD
->getSrcValueOffset(),
1324 LD
->isVolatile(), LD
->isNonTemporal(),
1325 LD
->getAlignment());
1326 Result
= DAG
.getNode(ISD::FP_EXTEND
, dl
,
1327 Node
->getValueType(0), Load
);
1328 Tmp1
= LegalizeOp(Result
); // Relegalize new nodes.
1329 Tmp2
= LegalizeOp(Load
.getValue(1));
1332 assert(ExtType
!= ISD::EXTLOAD
&&
1333 "EXTLOAD should always be supported!");
1334 // Turn the unsupported load into an EXTLOAD followed by an explicit
1335 // zero/sign extend inreg.
1336 Result
= DAG
.getExtLoad(ISD::EXTLOAD
, Node
->getValueType(0), dl
,
1337 Tmp1
, Tmp2
, LD
->getSrcValue(),
1338 LD
->getSrcValueOffset(), SrcVT
,
1339 LD
->isVolatile(), LD
->isNonTemporal(),
1340 LD
->getAlignment());
1342 if (ExtType
== ISD::SEXTLOAD
)
1343 ValRes
= DAG
.getNode(ISD::SIGN_EXTEND_INREG
, dl
,
1344 Result
.getValueType(),
1345 Result
, DAG
.getValueType(SrcVT
));
1347 ValRes
= DAG
.getZeroExtendInReg(Result
, dl
, SrcVT
);
1348 Tmp1
= LegalizeOp(ValRes
); // Relegalize new nodes.
1349 Tmp2
= LegalizeOp(Result
.getValue(1)); // Relegalize new nodes.
1354 // Since loads produce two values, make sure to remember that we legalized
1356 AddLegalizedOperand(SDValue(Node
, 0), Tmp1
);
1357 AddLegalizedOperand(SDValue(Node
, 1), Tmp2
);
1358 return Op
.getResNo() ? Tmp2
: Tmp1
;
1362 StoreSDNode
*ST
= cast
<StoreSDNode
>(Node
);
1363 Tmp1
= LegalizeOp(ST
->getChain()); // Legalize the chain.
1364 Tmp2
= LegalizeOp(ST
->getBasePtr()); // Legalize the pointer.
1365 int SVOffset
= ST
->getSrcValueOffset();
1366 unsigned Alignment
= ST
->getAlignment();
1367 bool isVolatile
= ST
->isVolatile();
1368 bool isNonTemporal
= ST
->isNonTemporal();
1370 if (!ST
->isTruncatingStore()) {
1371 if (SDNode
*OptStore
= OptimizeFloatStore(ST
).getNode()) {
1372 Result
= SDValue(OptStore
, 0);
1377 Tmp3
= LegalizeOp(ST
->getValue());
1378 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1383 EVT VT
= Tmp3
.getValueType();
1384 switch (TLI
.getOperationAction(ISD::STORE
, VT
)) {
1385 default: assert(0 && "This action is not supported yet!");
1386 case TargetLowering::Legal
:
1387 // If this is an unaligned store and the target doesn't support it,
1389 if (!TLI
.allowsUnalignedMemoryAccesses(ST
->getMemoryVT())) {
1390 const Type
*Ty
= ST
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1391 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1392 if (ST
->getAlignment() < ABIAlignment
)
1393 Result
= ExpandUnalignedStore(cast
<StoreSDNode
>(Result
.getNode()),
1397 case TargetLowering::Custom
:
1398 Tmp1
= TLI
.LowerOperation(Result
, DAG
);
1399 if (Tmp1
.getNode()) Result
= Tmp1
;
1401 case TargetLowering::Promote
:
1402 assert(VT
.isVector() && "Unknown legal promote case!");
1403 Tmp3
= DAG
.getNode(ISD::BIT_CONVERT
, dl
,
1404 TLI
.getTypeToPromoteTo(ISD::STORE
, VT
), Tmp3
);
1405 Result
= DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
,
1406 ST
->getSrcValue(), SVOffset
, isVolatile
,
1407 isNonTemporal
, Alignment
);
1413 Tmp3
= LegalizeOp(ST
->getValue());
1415 EVT StVT
= ST
->getMemoryVT();
1416 unsigned StWidth
= StVT
.getSizeInBits();
1418 if (StWidth
!= StVT
.getStoreSizeInBits()) {
1419 // Promote to a byte-sized store with upper bits zero if not
1420 // storing an integral number of bytes. For example, promote
1421 // TRUNCSTORE:i1 X -> TRUNCSTORE:i8 (and X, 1)
1422 EVT NVT
= EVT::getIntegerVT(*DAG
.getContext(),
1423 StVT
.getStoreSizeInBits());
1424 Tmp3
= DAG
.getZeroExtendInReg(Tmp3
, dl
, StVT
);
1425 Result
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
1426 SVOffset
, NVT
, isVolatile
, isNonTemporal
,
1428 } else if (StWidth
& (StWidth
- 1)) {
1429 // If not storing a power-of-2 number of bits, expand as two stores.
1430 assert(!StVT
.isVector() && "Unsupported truncstore!");
1431 unsigned RoundWidth
= 1 << Log2_32(StWidth
);
1432 assert(RoundWidth
< StWidth
);
1433 unsigned ExtraWidth
= StWidth
- RoundWidth
;
1434 assert(ExtraWidth
< RoundWidth
);
1435 assert(!(RoundWidth
% 8) && !(ExtraWidth
% 8) &&
1436 "Store size not an integral number of bytes!");
1437 EVT RoundVT
= EVT::getIntegerVT(*DAG
.getContext(), RoundWidth
);
1438 EVT ExtraVT
= EVT::getIntegerVT(*DAG
.getContext(), ExtraWidth
);
1440 unsigned IncrementSize
;
1442 if (TLI
.isLittleEndian()) {
1443 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 X, TRUNCSTORE@+2:i8 (srl X, 16)
1444 // Store the bottom RoundWidth bits.
1445 Lo
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
1447 isVolatile
, isNonTemporal
, Alignment
);
1449 // Store the remaining ExtraWidth bits.
1450 IncrementSize
= RoundWidth
/ 8;
1451 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1452 DAG
.getIntPtrConstant(IncrementSize
));
1453 Hi
= DAG
.getNode(ISD::SRL
, dl
, Tmp3
.getValueType(), Tmp3
,
1454 DAG
.getConstant(RoundWidth
, TLI
.getShiftAmountTy()));
1455 Hi
= DAG
.getTruncStore(Tmp1
, dl
, Hi
, Tmp2
, ST
->getSrcValue(),
1456 SVOffset
+ IncrementSize
, ExtraVT
, isVolatile
,
1458 MinAlign(Alignment
, IncrementSize
));
1460 // Big endian - avoid unaligned stores.
1461 // TRUNCSTORE:i24 X -> TRUNCSTORE:i16 (srl X, 8), TRUNCSTORE@+2:i8 X
1462 // Store the top RoundWidth bits.
1463 Hi
= DAG
.getNode(ISD::SRL
, dl
, Tmp3
.getValueType(), Tmp3
,
1464 DAG
.getConstant(ExtraWidth
, TLI
.getShiftAmountTy()));
1465 Hi
= DAG
.getTruncStore(Tmp1
, dl
, Hi
, Tmp2
, ST
->getSrcValue(),
1466 SVOffset
, RoundVT
, isVolatile
, isNonTemporal
,
1469 // Store the remaining ExtraWidth bits.
1470 IncrementSize
= RoundWidth
/ 8;
1471 Tmp2
= DAG
.getNode(ISD::ADD
, dl
, Tmp2
.getValueType(), Tmp2
,
1472 DAG
.getIntPtrConstant(IncrementSize
));
1473 Lo
= DAG
.getTruncStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
1474 SVOffset
+ IncrementSize
, ExtraVT
, isVolatile
,
1476 MinAlign(Alignment
, IncrementSize
));
1479 // The order of the stores doesn't matter.
1480 Result
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, Lo
, Hi
);
1482 if (Tmp1
!= ST
->getChain() || Tmp3
!= ST
->getValue() ||
1483 Tmp2
!= ST
->getBasePtr())
1484 Result
= SDValue(DAG
.UpdateNodeOperands(Result
.getNode(),
1489 switch (TLI
.getTruncStoreAction(ST
->getValue().getValueType(), StVT
)) {
1490 default: assert(0 && "This action is not supported yet!");
1491 case TargetLowering::Legal
:
1492 // If this is an unaligned store and the target doesn't support it,
1494 if (!TLI
.allowsUnalignedMemoryAccesses(ST
->getMemoryVT())) {
1495 const Type
*Ty
= ST
->getMemoryVT().getTypeForEVT(*DAG
.getContext());
1496 unsigned ABIAlignment
= TLI
.getTargetData()->getABITypeAlignment(Ty
);
1497 if (ST
->getAlignment() < ABIAlignment
)
1498 Result
= ExpandUnalignedStore(cast
<StoreSDNode
>(Result
.getNode()),
1502 case TargetLowering::Custom
:
1503 Result
= TLI
.LowerOperation(Result
, DAG
);
1506 // TRUNCSTORE:i16 i32 -> STORE i16
1507 assert(isTypeLegal(StVT
) && "Do not know how to expand this store!");
1508 Tmp3
= DAG
.getNode(ISD::TRUNCATE
, dl
, StVT
, Tmp3
);
1509 Result
= DAG
.getStore(Tmp1
, dl
, Tmp3
, Tmp2
, ST
->getSrcValue(),
1510 SVOffset
, isVolatile
, isNonTemporal
,
1519 assert(Result
.getValueType() == Op
.getValueType() &&
1520 "Bad legalization!");
1522 // Make sure that the generated code is itself legal.
1524 Result
= LegalizeOp(Result
);
1526 // Note that LegalizeOp may be reentered even from single-use nodes, which
1527 // means that we always must cache transformed nodes.
1528 AddLegalizedOperand(Op
, Result
);
1532 SDValue
SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op
) {
1533 SDValue Vec
= Op
.getOperand(0);
1534 SDValue Idx
= Op
.getOperand(1);
1535 DebugLoc dl
= Op
.getDebugLoc();
1536 // Store the value to a temporary stack slot, then LOAD the returned part.
1537 SDValue StackPtr
= DAG
.CreateStackTemporary(Vec
.getValueType());
1538 SDValue Ch
= DAG
.getStore(DAG
.getEntryNode(), dl
, Vec
, StackPtr
, NULL
, 0,
1541 // Add the offset to the index.
1543 Vec
.getValueType().getVectorElementType().getSizeInBits()/8;
1544 Idx
= DAG
.getNode(ISD::MUL
, dl
, Idx
.getValueType(), Idx
,
1545 DAG
.getConstant(EltSize
, Idx
.getValueType()));
1547 if (Idx
.getValueType().bitsGT(TLI
.getPointerTy()))
1548 Idx
= DAG
.getNode(ISD::TRUNCATE
, dl
, TLI
.getPointerTy(), Idx
);
1550 Idx
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, TLI
.getPointerTy(), Idx
);
1552 StackPtr
= DAG
.getNode(ISD::ADD
, dl
, Idx
.getValueType(), Idx
, StackPtr
);
1554 if (Op
.getValueType().isVector())
1555 return DAG
.getLoad(Op
.getValueType(), dl
, Ch
, StackPtr
, NULL
, 0,
1558 return DAG
.getExtLoad(ISD::EXTLOAD
, Op
.getValueType(), dl
, Ch
, StackPtr
,
1559 NULL
, 0, Vec
.getValueType().getVectorElementType(),
1563 SDValue
SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode
* Node
) {
1564 // We can't handle this case efficiently. Allocate a sufficiently
1565 // aligned object on the stack, store each element into it, then load
1566 // the result as a vector.
1567 // Create the stack frame object.
1568 EVT VT
= Node
->getValueType(0);
1569 EVT EltVT
= VT
.getVectorElementType();
1570 DebugLoc dl
= Node
->getDebugLoc();
1571 SDValue FIPtr
= DAG
.CreateStackTemporary(VT
);
1572 int FI
= cast
<FrameIndexSDNode
>(FIPtr
.getNode())->getIndex();
1573 const Value
*SV
= PseudoSourceValue::getFixedStack(FI
);
1575 // Emit a store of each element to the stack slot.
1576 SmallVector
<SDValue
, 8> Stores
;
1577 unsigned TypeByteSize
= EltVT
.getSizeInBits() / 8;
1578 // Store (in the right endianness) the elements to memory.
1579 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
1580 // Ignore undef elements.
1581 if (Node
->getOperand(i
).getOpcode() == ISD::UNDEF
) continue;
1583 unsigned Offset
= TypeByteSize
*i
;
1585 SDValue Idx
= DAG
.getConstant(Offset
, FIPtr
.getValueType());
1586 Idx
= DAG
.getNode(ISD::ADD
, dl
, FIPtr
.getValueType(), FIPtr
, Idx
);
1588 // If the destination vector element type is narrower than the source
1589 // element type, only store the bits necessary.
1590 if (EltVT
.bitsLT(Node
->getOperand(i
).getValueType().getScalarType())) {
1591 Stores
.push_back(DAG
.getTruncStore(DAG
.getEntryNode(), dl
,
1592 Node
->getOperand(i
), Idx
, SV
, Offset
,
1593 EltVT
, false, false, 0));
1595 Stores
.push_back(DAG
.getStore(DAG
.getEntryNode(), dl
,
1596 Node
->getOperand(i
), Idx
, SV
, Offset
,
1601 if (!Stores
.empty()) // Not all undef elements?
1602 StoreChain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
1603 &Stores
[0], Stores
.size());
1605 StoreChain
= DAG
.getEntryNode();
1607 // Result is a load from the stack slot.
1608 return DAG
.getLoad(VT
, dl
, StoreChain
, FIPtr
, SV
, 0, false, false, 0);
1611 SDValue
SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode
* Node
) {
1612 DebugLoc dl
= Node
->getDebugLoc();
1613 SDValue Tmp1
= Node
->getOperand(0);
1614 SDValue Tmp2
= Node
->getOperand(1);
1616 // Get the sign bit of the RHS. First obtain a value that has the same
1617 // sign as the sign bit, i.e. negative if and only if the sign bit is 1.
1619 EVT FloatVT
= Tmp2
.getValueType();
1620 EVT IVT
= EVT::getIntegerVT(*DAG
.getContext(), FloatVT
.getSizeInBits());
1621 if (isTypeLegal(IVT
)) {
1622 // Convert to an integer with the same sign bit.
1623 SignBit
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, IVT
, Tmp2
);
1625 // Store the float to memory, then load the sign part out as an integer.
1626 MVT LoadTy
= TLI
.getPointerTy();
1627 // First create a temporary that is aligned for both the load and store.
1628 SDValue StackPtr
= DAG
.CreateStackTemporary(FloatVT
, LoadTy
);
1629 // Then store the float to it.
1631 DAG
.getStore(DAG
.getEntryNode(), dl
, Tmp2
, StackPtr
, NULL
, 0,
1633 if (TLI
.isBigEndian()) {
1634 assert(FloatVT
.isByteSized() && "Unsupported floating point type!");
1635 // Load out a legal integer with the same sign bit as the float.
1636 SignBit
= DAG
.getLoad(LoadTy
, dl
, Ch
, StackPtr
, NULL
, 0, false, false, 0);
1637 } else { // Little endian
1638 SDValue LoadPtr
= StackPtr
;
1639 // The float may be wider than the integer we are going to load. Advance
1640 // the pointer so that the loaded integer will contain the sign bit.
1641 unsigned Strides
= (FloatVT
.getSizeInBits()-1)/LoadTy
.getSizeInBits();
1642 unsigned ByteOffset
= (Strides
* LoadTy
.getSizeInBits()) / 8;
1643 LoadPtr
= DAG
.getNode(ISD::ADD
, dl
, LoadPtr
.getValueType(),
1644 LoadPtr
, DAG
.getIntPtrConstant(ByteOffset
));
1645 // Load a legal integer containing the sign bit.
1646 SignBit
= DAG
.getLoad(LoadTy
, dl
, Ch
, LoadPtr
, NULL
, 0, false, false, 0);
1647 // Move the sign bit to the top bit of the loaded integer.
1648 unsigned BitShift
= LoadTy
.getSizeInBits() -
1649 (FloatVT
.getSizeInBits() - 8 * ByteOffset
);
1650 assert(BitShift
< LoadTy
.getSizeInBits() && "Pointer advanced wrong?");
1652 SignBit
= DAG
.getNode(ISD::SHL
, dl
, LoadTy
, SignBit
,
1653 DAG
.getConstant(BitShift
,TLI
.getShiftAmountTy()));
1656 // Now get the sign bit proper, by seeing whether the value is negative.
1657 SignBit
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(SignBit
.getValueType()),
1658 SignBit
, DAG
.getConstant(0, SignBit
.getValueType()),
1660 // Get the absolute value of the result.
1661 SDValue AbsVal
= DAG
.getNode(ISD::FABS
, dl
, Tmp1
.getValueType(), Tmp1
);
1662 // Select between the nabs and abs value based on the sign bit of
1664 return DAG
.getNode(ISD::SELECT
, dl
, AbsVal
.getValueType(), SignBit
,
1665 DAG
.getNode(ISD::FNEG
, dl
, AbsVal
.getValueType(), AbsVal
),
1669 void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode
* Node
,
1670 SmallVectorImpl
<SDValue
> &Results
) {
1671 unsigned SPReg
= TLI
.getStackPointerRegisterToSaveRestore();
1672 assert(SPReg
&& "Target cannot require DYNAMIC_STACKALLOC expansion and"
1673 " not tell us which reg is the stack pointer!");
1674 DebugLoc dl
= Node
->getDebugLoc();
1675 EVT VT
= Node
->getValueType(0);
1676 SDValue Tmp1
= SDValue(Node
, 0);
1677 SDValue Tmp2
= SDValue(Node
, 1);
1678 SDValue Tmp3
= Node
->getOperand(2);
1679 SDValue Chain
= Tmp1
.getOperand(0);
1681 // Chain the dynamic stack allocation so that it doesn't modify the stack
1682 // pointer when other instructions are using the stack.
1683 Chain
= DAG
.getCALLSEQ_START(Chain
, DAG
.getIntPtrConstant(0, true));
1685 SDValue Size
= Tmp2
.getOperand(1);
1686 SDValue SP
= DAG
.getCopyFromReg(Chain
, dl
, SPReg
, VT
);
1687 Chain
= SP
.getValue(1);
1688 unsigned Align
= cast
<ConstantSDNode
>(Tmp3
)->getZExtValue();
1689 unsigned StackAlign
= TM
.getFrameInfo()->getStackAlignment();
1690 if (Align
> StackAlign
)
1691 SP
= DAG
.getNode(ISD::AND
, dl
, VT
, SP
,
1692 DAG
.getConstant(-(uint64_t)Align
, VT
));
1693 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, VT
, SP
, Size
); // Value
1694 Chain
= DAG
.getCopyToReg(Chain
, dl
, SPReg
, Tmp1
); // Output chain
1696 Tmp2
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(0, true),
1697 DAG
.getIntPtrConstant(0, true), SDValue());
1699 Results
.push_back(Tmp1
);
1700 Results
.push_back(Tmp2
);
1703 /// LegalizeSetCCCondCode - Legalize a SETCC with given LHS and RHS and
1704 /// condition code CC on the current target. This routine expands SETCC with
1705 /// illegal condition code into AND / OR of multiple SETCC values.
1706 void SelectionDAGLegalize::LegalizeSetCCCondCode(EVT VT
,
1707 SDValue
&LHS
, SDValue
&RHS
,
1710 EVT OpVT
= LHS
.getValueType();
1711 ISD::CondCode CCCode
= cast
<CondCodeSDNode
>(CC
)->get();
1712 switch (TLI
.getCondCodeAction(CCCode
, OpVT
)) {
1713 default: assert(0 && "Unknown condition code action!");
1714 case TargetLowering::Legal
:
1717 case TargetLowering::Expand
: {
1718 ISD::CondCode CC1
= ISD::SETCC_INVALID
, CC2
= ISD::SETCC_INVALID
;
1721 default: assert(0 && "Don't know how to expand this condition!");
1722 case ISD::SETOEQ
: CC1
= ISD::SETEQ
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1723 case ISD::SETOGT
: CC1
= ISD::SETGT
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1724 case ISD::SETOGE
: CC1
= ISD::SETGE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1725 case ISD::SETOLT
: CC1
= ISD::SETLT
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1726 case ISD::SETOLE
: CC1
= ISD::SETLE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1727 case ISD::SETONE
: CC1
= ISD::SETNE
; CC2
= ISD::SETO
; Opc
= ISD::AND
; break;
1728 case ISD::SETUEQ
: CC1
= ISD::SETEQ
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1729 case ISD::SETUGT
: CC1
= ISD::SETGT
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1730 case ISD::SETUGE
: CC1
= ISD::SETGE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1731 case ISD::SETULT
: CC1
= ISD::SETLT
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1732 case ISD::SETULE
: CC1
= ISD::SETLE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1733 case ISD::SETUNE
: CC1
= ISD::SETNE
; CC2
= ISD::SETUO
; Opc
= ISD::OR
; break;
1734 // FIXME: Implement more expansions.
1737 SDValue SetCC1
= DAG
.getSetCC(dl
, VT
, LHS
, RHS
, CC1
);
1738 SDValue SetCC2
= DAG
.getSetCC(dl
, VT
, LHS
, RHS
, CC2
);
1739 LHS
= DAG
.getNode(Opc
, dl
, VT
, SetCC1
, SetCC2
);
1747 /// EmitStackConvert - Emit a store/load combination to the stack. This stores
1748 /// SrcOp to a stack slot of type SlotVT, truncating it if needed. It then does
1749 /// a load from the stack slot to DestVT, extending it if needed.
1750 /// The resultant code need not be legal.
1751 SDValue
SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp
,
1755 // Create the stack frame object.
1757 TLI
.getTargetData()->getPrefTypeAlignment(SrcOp
.getValueType().
1758 getTypeForEVT(*DAG
.getContext()));
1759 SDValue FIPtr
= DAG
.CreateStackTemporary(SlotVT
, SrcAlign
);
1761 FrameIndexSDNode
*StackPtrFI
= cast
<FrameIndexSDNode
>(FIPtr
);
1762 int SPFI
= StackPtrFI
->getIndex();
1763 const Value
*SV
= PseudoSourceValue::getFixedStack(SPFI
);
1765 unsigned SrcSize
= SrcOp
.getValueType().getSizeInBits();
1766 unsigned SlotSize
= SlotVT
.getSizeInBits();
1767 unsigned DestSize
= DestVT
.getSizeInBits();
1768 const Type
*DestType
= DestVT
.getTypeForEVT(*DAG
.getContext());
1769 unsigned DestAlign
= TLI
.getTargetData()->getPrefTypeAlignment(DestType
);
1771 // Emit a store to the stack slot. Use a truncstore if the input value is
1772 // later than DestVT.
1775 if (SrcSize
> SlotSize
)
1776 Store
= DAG
.getTruncStore(DAG
.getEntryNode(), dl
, SrcOp
, FIPtr
,
1777 SV
, 0, SlotVT
, false, false, SrcAlign
);
1779 assert(SrcSize
== SlotSize
&& "Invalid store");
1780 Store
= DAG
.getStore(DAG
.getEntryNode(), dl
, SrcOp
, FIPtr
,
1781 SV
, 0, false, false, SrcAlign
);
1784 // Result is a load from the stack slot.
1785 if (SlotSize
== DestSize
)
1786 return DAG
.getLoad(DestVT
, dl
, Store
, FIPtr
, SV
, 0, false, false,
1789 assert(SlotSize
< DestSize
&& "Unknown extension!");
1790 return DAG
.getExtLoad(ISD::EXTLOAD
, DestVT
, dl
, Store
, FIPtr
, SV
, 0, SlotVT
,
1791 false, false, DestAlign
);
1794 SDValue
SelectionDAGLegalize::ExpandSCALAR_TO_VECTOR(SDNode
*Node
) {
1795 DebugLoc dl
= Node
->getDebugLoc();
1796 // Create a vector sized/aligned stack slot, store the value to element #0,
1797 // then load the whole vector back out.
1798 SDValue StackPtr
= DAG
.CreateStackTemporary(Node
->getValueType(0));
1800 FrameIndexSDNode
*StackPtrFI
= cast
<FrameIndexSDNode
>(StackPtr
);
1801 int SPFI
= StackPtrFI
->getIndex();
1803 SDValue Ch
= DAG
.getTruncStore(DAG
.getEntryNode(), dl
, Node
->getOperand(0),
1805 PseudoSourceValue::getFixedStack(SPFI
), 0,
1806 Node
->getValueType(0).getVectorElementType(),
1808 return DAG
.getLoad(Node
->getValueType(0), dl
, Ch
, StackPtr
,
1809 PseudoSourceValue::getFixedStack(SPFI
), 0,
1814 /// ExpandBUILD_VECTOR - Expand a BUILD_VECTOR node on targets that don't
1815 /// support the operation, but do support the resultant vector type.
1816 SDValue
SelectionDAGLegalize::ExpandBUILD_VECTOR(SDNode
*Node
) {
1817 unsigned NumElems
= Node
->getNumOperands();
1818 SDValue Value1
, Value2
;
1819 DebugLoc dl
= Node
->getDebugLoc();
1820 EVT VT
= Node
->getValueType(0);
1821 EVT OpVT
= Node
->getOperand(0).getValueType();
1822 EVT EltVT
= VT
.getVectorElementType();
1824 // If the only non-undef value is the low element, turn this into a
1825 // SCALAR_TO_VECTOR node. If this is { X, X, X, X }, determine X.
1826 bool isOnlyLowElement
= true;
1827 bool MoreThanTwoValues
= false;
1828 bool isConstant
= true;
1829 for (unsigned i
= 0; i
< NumElems
; ++i
) {
1830 SDValue V
= Node
->getOperand(i
);
1831 if (V
.getOpcode() == ISD::UNDEF
)
1834 isOnlyLowElement
= false;
1835 if (!isa
<ConstantFPSDNode
>(V
) && !isa
<ConstantSDNode
>(V
))
1838 if (!Value1
.getNode()) {
1840 } else if (!Value2
.getNode()) {
1843 } else if (V
!= Value1
&& V
!= Value2
) {
1844 MoreThanTwoValues
= true;
1848 if (!Value1
.getNode())
1849 return DAG
.getUNDEF(VT
);
1851 if (isOnlyLowElement
)
1852 return DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Node
->getOperand(0));
1854 // If all elements are constants, create a load from the constant pool.
1856 std::vector
<Constant
*> CV
;
1857 for (unsigned i
= 0, e
= NumElems
; i
!= e
; ++i
) {
1858 if (ConstantFPSDNode
*V
=
1859 dyn_cast
<ConstantFPSDNode
>(Node
->getOperand(i
))) {
1860 CV
.push_back(const_cast<ConstantFP
*>(V
->getConstantFPValue()));
1861 } else if (ConstantSDNode
*V
=
1862 dyn_cast
<ConstantSDNode
>(Node
->getOperand(i
))) {
1864 CV
.push_back(const_cast<ConstantInt
*>(V
->getConstantIntValue()));
1866 // If OpVT and EltVT don't match, EltVT is not legal and the
1867 // element values have been promoted/truncated earlier. Undo this;
1868 // we don't want a v16i8 to become a v16i32 for example.
1869 const ConstantInt
*CI
= V
->getConstantIntValue();
1870 CV
.push_back(ConstantInt::get(EltVT
.getTypeForEVT(*DAG
.getContext()),
1871 CI
->getZExtValue()));
1874 assert(Node
->getOperand(i
).getOpcode() == ISD::UNDEF
);
1875 const Type
*OpNTy
= EltVT
.getTypeForEVT(*DAG
.getContext());
1876 CV
.push_back(UndefValue::get(OpNTy
));
1879 Constant
*CP
= ConstantVector::get(CV
);
1880 SDValue CPIdx
= DAG
.getConstantPool(CP
, TLI
.getPointerTy());
1881 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
1882 return DAG
.getLoad(VT
, dl
, DAG
.getEntryNode(), CPIdx
,
1883 PseudoSourceValue::getConstantPool(), 0,
1884 false, false, Alignment
);
1887 if (!MoreThanTwoValues
) {
1888 SmallVector
<int, 8> ShuffleVec(NumElems
, -1);
1889 for (unsigned i
= 0; i
< NumElems
; ++i
) {
1890 SDValue V
= Node
->getOperand(i
);
1891 if (V
.getOpcode() == ISD::UNDEF
)
1893 ShuffleVec
[i
] = V
== Value1
? 0 : NumElems
;
1895 if (TLI
.isShuffleMaskLegal(ShuffleVec
, Node
->getValueType(0))) {
1896 // Get the splatted value into the low element of a vector register.
1897 SDValue Vec1
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Value1
);
1899 if (Value2
.getNode())
1900 Vec2
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Value2
);
1902 Vec2
= DAG
.getUNDEF(VT
);
1904 // Return shuffle(LowValVec, undef, <0,0,0,0>)
1905 return DAG
.getVectorShuffle(VT
, dl
, Vec1
, Vec2
, ShuffleVec
.data());
1909 // Otherwise, we can't handle this case efficiently.
1910 return ExpandVectorBuildThroughStack(Node
);
1913 // ExpandLibCall - Expand a node into a call to a libcall. If the result value
1914 // does not fit into a register, return the lo part and set the hi part to the
1915 // by-reg argument. If it does fit into a single register, return the result
1916 // and leave the Hi part unset.
1917 SDValue
SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC
, SDNode
*Node
,
1919 assert(!IsLegalizingCall
&& "Cannot overlap legalization of calls!");
1920 // The input chain to this libcall is the entry node of the function.
1921 // Legalizing the call will automatically add the previous call to the
1923 SDValue InChain
= DAG
.getEntryNode();
1925 TargetLowering::ArgListTy Args
;
1926 TargetLowering::ArgListEntry Entry
;
1927 for (unsigned i
= 0, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
1928 EVT ArgVT
= Node
->getOperand(i
).getValueType();
1929 const Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
1930 Entry
.Node
= Node
->getOperand(i
); Entry
.Ty
= ArgTy
;
1931 Entry
.isSExt
= isSigned
;
1932 Entry
.isZExt
= !isSigned
;
1933 Args
.push_back(Entry
);
1935 SDValue Callee
= DAG
.getExternalSymbol(TLI
.getLibcallName(LC
),
1936 TLI
.getPointerTy());
1938 // Splice the libcall in wherever FindInputOutputChains tells us to.
1939 const Type
*RetTy
= Node
->getValueType(0).getTypeForEVT(*DAG
.getContext());
1940 std::pair
<SDValue
, SDValue
> CallInfo
=
1941 TLI
.LowerCallTo(InChain
, RetTy
, isSigned
, !isSigned
, false, false,
1942 0, TLI
.getLibcallCallingConv(LC
), false,
1943 /*isReturnValueUsed=*/true,
1944 Callee
, Args
, DAG
, Node
->getDebugLoc());
1946 // Legalize the call sequence, starting with the chain. This will advance
1947 // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
1948 // was added by LowerCallTo (guaranteeing proper serialization of calls).
1949 LegalizeOp(CallInfo
.second
);
1950 return CallInfo
.first
;
1953 // ExpandChainLibCall - Expand a node into a call to a libcall. Similar to
1954 // ExpandLibCall except that the first operand is the in-chain.
1955 std::pair
<SDValue
, SDValue
>
1956 SelectionDAGLegalize::ExpandChainLibCall(RTLIB::Libcall LC
,
1959 assert(!IsLegalizingCall
&& "Cannot overlap legalization of calls!");
1960 SDValue InChain
= Node
->getOperand(0);
1962 TargetLowering::ArgListTy Args
;
1963 TargetLowering::ArgListEntry Entry
;
1964 for (unsigned i
= 1, e
= Node
->getNumOperands(); i
!= e
; ++i
) {
1965 EVT ArgVT
= Node
->getOperand(i
).getValueType();
1966 const Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
1967 Entry
.Node
= Node
->getOperand(i
);
1969 Entry
.isSExt
= isSigned
;
1970 Entry
.isZExt
= !isSigned
;
1971 Args
.push_back(Entry
);
1973 SDValue Callee
= DAG
.getExternalSymbol(TLI
.getLibcallName(LC
),
1974 TLI
.getPointerTy());
1976 // Splice the libcall in wherever FindInputOutputChains tells us to.
1977 const Type
*RetTy
= Node
->getValueType(0).getTypeForEVT(*DAG
.getContext());
1978 std::pair
<SDValue
, SDValue
> CallInfo
=
1979 TLI
.LowerCallTo(InChain
, RetTy
, isSigned
, !isSigned
, false, false,
1980 0, TLI
.getLibcallCallingConv(LC
), false,
1981 /*isReturnValueUsed=*/true,
1982 Callee
, Args
, DAG
, Node
->getDebugLoc());
1984 // Legalize the call sequence, starting with the chain. This will advance
1985 // the LastCALLSEQ_END to the legalized version of the CALLSEQ_END node that
1986 // was added by LowerCallTo (guaranteeing proper serialization of calls).
1987 LegalizeOp(CallInfo
.second
);
1991 SDValue
SelectionDAGLegalize::ExpandFPLibCall(SDNode
* Node
,
1992 RTLIB::Libcall Call_F32
,
1993 RTLIB::Libcall Call_F64
,
1994 RTLIB::Libcall Call_F80
,
1995 RTLIB::Libcall Call_PPCF128
) {
1997 switch (Node
->getValueType(0).getSimpleVT().SimpleTy
) {
1998 default: assert(0 && "Unexpected request for libcall!");
1999 case MVT::f32
: LC
= Call_F32
; break;
2000 case MVT::f64
: LC
= Call_F64
; break;
2001 case MVT::f80
: LC
= Call_F80
; break;
2002 case MVT::ppcf128
: LC
= Call_PPCF128
; break;
2004 return ExpandLibCall(LC
, Node
, false);
2007 SDValue
SelectionDAGLegalize::ExpandIntLibCall(SDNode
* Node
, bool isSigned
,
2008 RTLIB::Libcall Call_I8
,
2009 RTLIB::Libcall Call_I16
,
2010 RTLIB::Libcall Call_I32
,
2011 RTLIB::Libcall Call_I64
,
2012 RTLIB::Libcall Call_I128
) {
2014 switch (Node
->getValueType(0).getSimpleVT().SimpleTy
) {
2015 default: assert(0 && "Unexpected request for libcall!");
2016 case MVT::i8
: LC
= Call_I8
; break;
2017 case MVT::i16
: LC
= Call_I16
; break;
2018 case MVT::i32
: LC
= Call_I32
; break;
2019 case MVT::i64
: LC
= Call_I64
; break;
2020 case MVT::i128
: LC
= Call_I128
; break;
2022 return ExpandLibCall(LC
, Node
, isSigned
);
2025 /// ExpandLegalINT_TO_FP - This function is responsible for legalizing a
2026 /// INT_TO_FP operation of the specified operand when the target requests that
2027 /// we expand it. At this point, we know that the result and operand types are
2028 /// legal for the target.
2029 SDValue
SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned
,
2033 if (Op0
.getValueType() == MVT::i32
) {
2034 // simple 32-bit [signed|unsigned] integer to float/double expansion
2036 // Get the stack frame index of a 8 byte buffer.
2037 SDValue StackSlot
= DAG
.CreateStackTemporary(MVT::f64
);
2039 // word offset constant for Hi/Lo address computation
2040 SDValue WordOff
= DAG
.getConstant(sizeof(int), TLI
.getPointerTy());
2041 // set up Hi and Lo (into buffer) address based on endian
2042 SDValue Hi
= StackSlot
;
2043 SDValue Lo
= DAG
.getNode(ISD::ADD
, dl
,
2044 TLI
.getPointerTy(), StackSlot
, WordOff
);
2045 if (TLI
.isLittleEndian())
2048 // if signed map to unsigned space
2051 // constant used to invert sign bit (signed to unsigned mapping)
2052 SDValue SignBit
= DAG
.getConstant(0x80000000u
, MVT::i32
);
2053 Op0Mapped
= DAG
.getNode(ISD::XOR
, dl
, MVT::i32
, Op0
, SignBit
);
2057 // store the lo of the constructed double - based on integer input
2058 SDValue Store1
= DAG
.getStore(DAG
.getEntryNode(), dl
,
2059 Op0Mapped
, Lo
, NULL
, 0,
2061 // initial hi portion of constructed double
2062 SDValue InitialHi
= DAG
.getConstant(0x43300000u
, MVT::i32
);
2063 // store the hi of the constructed double - biased exponent
2064 SDValue Store2
=DAG
.getStore(Store1
, dl
, InitialHi
, Hi
, NULL
, 0,
2066 // load the constructed double
2067 SDValue Load
= DAG
.getLoad(MVT::f64
, dl
, Store2
, StackSlot
, NULL
, 0,
2069 // FP constant to bias correct the final result
2070 SDValue Bias
= DAG
.getConstantFP(isSigned
?
2071 BitsToDouble(0x4330000080000000ULL
) :
2072 BitsToDouble(0x4330000000000000ULL
),
2074 // subtract the bias
2075 SDValue Sub
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f64
, Load
, Bias
);
2078 // handle final rounding
2079 if (DestVT
== MVT::f64
) {
2082 } else if (DestVT
.bitsLT(MVT::f64
)) {
2083 Result
= DAG
.getNode(ISD::FP_ROUND
, dl
, DestVT
, Sub
,
2084 DAG
.getIntPtrConstant(0));
2085 } else if (DestVT
.bitsGT(MVT::f64
)) {
2086 Result
= DAG
.getNode(ISD::FP_EXTEND
, dl
, DestVT
, Sub
);
2090 assert(!isSigned
&& "Legalize cannot Expand SINT_TO_FP for i64 yet");
2091 // Code below here assumes !isSigned without checking again.
2093 // Implementation of unsigned i64 to f64 following the algorithm in
2094 // __floatundidf in compiler_rt. This implementation has the advantage
2095 // of performing rounding correctly, both in the default rounding mode
2096 // and in all alternate rounding modes.
2097 // TODO: Generalize this for use with other types.
2098 if (Op0
.getValueType() == MVT::i64
&& DestVT
== MVT::f64
) {
2100 DAG
.getConstant(UINT64_C(0x4330000000000000), MVT::i64
);
2101 SDValue TwoP84PlusTwoP52
=
2102 DAG
.getConstantFP(BitsToDouble(UINT64_C(0x4530000000100000)), MVT::f64
);
2104 DAG
.getConstant(UINT64_C(0x4530000000000000), MVT::i64
);
2106 SDValue Lo
= DAG
.getZeroExtendInReg(Op0
, dl
, MVT::i32
);
2107 SDValue Hi
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Op0
,
2108 DAG
.getConstant(32, MVT::i64
));
2109 SDValue LoOr
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, Lo
, TwoP52
);
2110 SDValue HiOr
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, Hi
, TwoP84
);
2111 SDValue LoFlt
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, MVT::f64
, LoOr
);
2112 SDValue HiFlt
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, MVT::f64
, HiOr
);
2113 SDValue HiSub
= DAG
.getNode(ISD::FSUB
, dl
, MVT::f64
, HiFlt
,
2115 return DAG
.getNode(ISD::FADD
, dl
, MVT::f64
, LoFlt
, HiSub
);
2118 // Implementation of unsigned i64 to f32. This implementation has the
2119 // advantage of performing rounding correctly.
2120 // TODO: Generalize this for use with other types.
2121 if (Op0
.getValueType() == MVT::i64
&& DestVT
== MVT::f32
) {
2122 EVT SHVT
= TLI
.getShiftAmountTy();
2124 SDValue And
= DAG
.getNode(ISD::AND
, dl
, MVT::i64
, Op0
,
2125 DAG
.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64
));
2126 SDValue Or
= DAG
.getNode(ISD::OR
, dl
, MVT::i64
, And
,
2127 DAG
.getConstant(UINT64_C(0x800), MVT::i64
));
2128 SDValue And2
= DAG
.getNode(ISD::AND
, dl
, MVT::i64
, Op0
,
2129 DAG
.getConstant(UINT64_C(0x7ff), MVT::i64
));
2130 SDValue Ne
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(MVT::i64
),
2131 And2
, DAG
.getConstant(UINT64_C(0), MVT::i64
), ISD::SETNE
);
2132 SDValue Sel
= DAG
.getNode(ISD::SELECT
, dl
, MVT::i64
, Ne
, Or
, Op0
);
2133 SDValue Ge
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(MVT::i64
),
2134 Op0
, DAG
.getConstant(UINT64_C(0x0020000000000000), MVT::i64
),
2136 SDValue Sel2
= DAG
.getNode(ISD::SELECT
, dl
, MVT::i64
, Ge
, Sel
, Op0
);
2138 SDValue Sh
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Sel2
,
2139 DAG
.getConstant(32, SHVT
));
2140 SDValue Trunc
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Sh
);
2141 SDValue Fcvt
= DAG
.getNode(ISD::UINT_TO_FP
, dl
, MVT::f64
, Trunc
);
2143 DAG
.getConstantFP(BitsToDouble(UINT64_C(0x41f0000000000000)), MVT::f64
);
2144 SDValue Fmul
= DAG
.getNode(ISD::FMUL
, dl
, MVT::f64
, TwoP32
, Fcvt
);
2145 SDValue Lo
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Sel2
);
2146 SDValue Fcvt2
= DAG
.getNode(ISD::UINT_TO_FP
, dl
, MVT::f64
, Lo
);
2147 SDValue Fadd
= DAG
.getNode(ISD::FADD
, dl
, MVT::f64
, Fmul
, Fcvt2
);
2148 return DAG
.getNode(ISD::FP_ROUND
, dl
, MVT::f32
, Fadd
,
2149 DAG
.getIntPtrConstant(0));
2153 SDValue Tmp1
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, DestVT
, Op0
);
2155 SDValue SignSet
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(Op0
.getValueType()),
2156 Op0
, DAG
.getConstant(0, Op0
.getValueType()),
2158 SDValue Zero
= DAG
.getIntPtrConstant(0), Four
= DAG
.getIntPtrConstant(4);
2159 SDValue CstOffset
= DAG
.getNode(ISD::SELECT
, dl
, Zero
.getValueType(),
2160 SignSet
, Four
, Zero
);
2162 // If the sign bit of the integer is set, the large number will be treated
2163 // as a negative number. To counteract this, the dynamic code adds an
2164 // offset depending on the data type.
2166 switch (Op0
.getValueType().getSimpleVT().SimpleTy
) {
2167 default: assert(0 && "Unsupported integer type!");
2168 case MVT::i8
: FF
= 0x43800000ULL
; break; // 2^8 (as a float)
2169 case MVT::i16
: FF
= 0x47800000ULL
; break; // 2^16 (as a float)
2170 case MVT::i32
: FF
= 0x4F800000ULL
; break; // 2^32 (as a float)
2171 case MVT::i64
: FF
= 0x5F800000ULL
; break; // 2^64 (as a float)
2173 if (TLI
.isLittleEndian()) FF
<<= 32;
2174 Constant
*FudgeFactor
= ConstantInt::get(
2175 Type::getInt64Ty(*DAG
.getContext()), FF
);
2177 SDValue CPIdx
= DAG
.getConstantPool(FudgeFactor
, TLI
.getPointerTy());
2178 unsigned Alignment
= cast
<ConstantPoolSDNode
>(CPIdx
)->getAlignment();
2179 CPIdx
= DAG
.getNode(ISD::ADD
, dl
, TLI
.getPointerTy(), CPIdx
, CstOffset
);
2180 Alignment
= std::min(Alignment
, 4u);
2182 if (DestVT
== MVT::f32
)
2183 FudgeInReg
= DAG
.getLoad(MVT::f32
, dl
, DAG
.getEntryNode(), CPIdx
,
2184 PseudoSourceValue::getConstantPool(), 0,
2185 false, false, Alignment
);
2188 LegalizeOp(DAG
.getExtLoad(ISD::EXTLOAD
, DestVT
, dl
,
2189 DAG
.getEntryNode(), CPIdx
,
2190 PseudoSourceValue::getConstantPool(), 0,
2191 MVT::f32
, false, false, Alignment
));
2194 return DAG
.getNode(ISD::FADD
, dl
, DestVT
, Tmp1
, FudgeInReg
);
2197 /// PromoteLegalINT_TO_FP - This function is responsible for legalizing a
2198 /// *INT_TO_FP operation of the specified operand when the target requests that
2199 /// we promote it. At this point, we know that the result and operand types are
2200 /// legal for the target, and that there is a legal UINT_TO_FP or SINT_TO_FP
2201 /// operation that takes a larger input.
2202 SDValue
SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp
,
2206 // First step, figure out the appropriate *INT_TO_FP operation to use.
2207 EVT NewInTy
= LegalOp
.getValueType();
2209 unsigned OpToUse
= 0;
2211 // Scan for the appropriate larger type to use.
2213 NewInTy
= (MVT::SimpleValueType
)(NewInTy
.getSimpleVT().SimpleTy
+1);
2214 assert(NewInTy
.isInteger() && "Ran out of possibilities!");
2216 // If the target supports SINT_TO_FP of this type, use it.
2217 if (TLI
.isOperationLegalOrCustom(ISD::SINT_TO_FP
, NewInTy
)) {
2218 OpToUse
= ISD::SINT_TO_FP
;
2221 if (isSigned
) continue;
2223 // If the target supports UINT_TO_FP of this type, use it.
2224 if (TLI
.isOperationLegalOrCustom(ISD::UINT_TO_FP
, NewInTy
)) {
2225 OpToUse
= ISD::UINT_TO_FP
;
2229 // Otherwise, try a larger type.
2232 // Okay, we found the operation and type to use. Zero extend our input to the
2233 // desired type then run the operation on it.
2234 return DAG
.getNode(OpToUse
, dl
, DestVT
,
2235 DAG
.getNode(isSigned
? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND
,
2236 dl
, NewInTy
, LegalOp
));
2239 /// PromoteLegalFP_TO_INT - This function is responsible for legalizing a
2240 /// FP_TO_*INT operation of the specified operand when the target requests that
2241 /// we promote it. At this point, we know that the result and operand types are
2242 /// legal for the target, and that there is a legal FP_TO_UINT or FP_TO_SINT
2243 /// operation that returns a larger result.
2244 SDValue
SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp
,
2248 // First step, figure out the appropriate FP_TO*INT operation to use.
2249 EVT NewOutTy
= DestVT
;
2251 unsigned OpToUse
= 0;
2253 // Scan for the appropriate larger type to use.
2255 NewOutTy
= (MVT::SimpleValueType
)(NewOutTy
.getSimpleVT().SimpleTy
+1);
2256 assert(NewOutTy
.isInteger() && "Ran out of possibilities!");
2258 if (TLI
.isOperationLegalOrCustom(ISD::FP_TO_SINT
, NewOutTy
)) {
2259 OpToUse
= ISD::FP_TO_SINT
;
2263 if (TLI
.isOperationLegalOrCustom(ISD::FP_TO_UINT
, NewOutTy
)) {
2264 OpToUse
= ISD::FP_TO_UINT
;
2268 // Otherwise, try a larger type.
2272 // Okay, we found the operation and type to use.
2273 SDValue Operation
= DAG
.getNode(OpToUse
, dl
, NewOutTy
, LegalOp
);
2275 // Truncate the result of the extended FP_TO_*INT operation to the desired
2277 return DAG
.getNode(ISD::TRUNCATE
, dl
, DestVT
, Operation
);
2280 /// ExpandBSWAP - Open code the operations for BSWAP of the specified operation.
2282 SDValue
SelectionDAGLegalize::ExpandBSWAP(SDValue Op
, DebugLoc dl
) {
2283 EVT VT
= Op
.getValueType();
2284 EVT SHVT
= TLI
.getShiftAmountTy();
2285 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
, Tmp5
, Tmp6
, Tmp7
, Tmp8
;
2286 switch (VT
.getSimpleVT().SimpleTy
) {
2287 default: assert(0 && "Unhandled Expand type in BSWAP!");
2289 Tmp2
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2290 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2291 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp1
, Tmp2
);
2293 Tmp4
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2294 Tmp3
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2295 Tmp2
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2296 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2297 Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp3
, DAG
.getConstant(0xFF0000, VT
));
2298 Tmp2
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp2
, DAG
.getConstant(0xFF00, VT
));
2299 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp3
);
2300 Tmp2
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp2
, Tmp1
);
2301 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp2
);
2303 Tmp8
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(56, SHVT
));
2304 Tmp7
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(40, SHVT
));
2305 Tmp6
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2306 Tmp5
= DAG
.getNode(ISD::SHL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2307 Tmp4
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(8, SHVT
));
2308 Tmp3
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(24, SHVT
));
2309 Tmp2
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(40, SHVT
));
2310 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, DAG
.getConstant(56, SHVT
));
2311 Tmp7
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp7
, DAG
.getConstant(255ULL<<48, VT
));
2312 Tmp6
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp6
, DAG
.getConstant(255ULL<<40, VT
));
2313 Tmp5
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp5
, DAG
.getConstant(255ULL<<32, VT
));
2314 Tmp4
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp4
, DAG
.getConstant(255ULL<<24, VT
));
2315 Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp3
, DAG
.getConstant(255ULL<<16, VT
));
2316 Tmp2
= DAG
.getNode(ISD::AND
, dl
, VT
, Tmp2
, DAG
.getConstant(255ULL<<8 , VT
));
2317 Tmp8
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp7
);
2318 Tmp6
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp6
, Tmp5
);
2319 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp3
);
2320 Tmp2
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp2
, Tmp1
);
2321 Tmp8
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp6
);
2322 Tmp4
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp4
, Tmp2
);
2323 return DAG
.getNode(ISD::OR
, dl
, VT
, Tmp8
, Tmp4
);
2327 /// ExpandBitCount - Expand the specified bitcount instruction into operations.
2329 SDValue
SelectionDAGLegalize::ExpandBitCount(unsigned Opc
, SDValue Op
,
2332 default: assert(0 && "Cannot expand this yet!");
2334 static const uint64_t mask
[6] = {
2335 0x5555555555555555ULL
, 0x3333333333333333ULL
,
2336 0x0F0F0F0F0F0F0F0FULL
, 0x00FF00FF00FF00FFULL
,
2337 0x0000FFFF0000FFFFULL
, 0x00000000FFFFFFFFULL
2339 EVT VT
= Op
.getValueType();
2340 EVT ShVT
= TLI
.getShiftAmountTy();
2341 unsigned len
= VT
.getSizeInBits();
2342 for (unsigned i
= 0; (1U << i
) <= (len
/ 2); ++i
) {
2343 //x = (x & mask[i][len/8]) + (x >> (1 << i) & mask[i][len/8])
2344 unsigned EltSize
= VT
.isVector() ?
2345 VT
.getVectorElementType().getSizeInBits() : len
;
2346 SDValue Tmp2
= DAG
.getConstant(APInt(EltSize
, mask
[i
]), VT
);
2347 SDValue Tmp3
= DAG
.getConstant(1ULL << i
, ShVT
);
2348 Op
= DAG
.getNode(ISD::ADD
, dl
, VT
,
2349 DAG
.getNode(ISD::AND
, dl
, VT
, Op
, Tmp2
),
2350 DAG
.getNode(ISD::AND
, dl
, VT
,
2351 DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, Tmp3
),
2357 // for now, we do this:
2358 // x = x | (x >> 1);
2359 // x = x | (x >> 2);
2361 // x = x | (x >>16);
2362 // x = x | (x >>32); // for 64-bit input
2363 // return popcount(~x);
2365 // but see also: http://www.hackersdelight.org/HDcode/nlz.cc
2366 EVT VT
= Op
.getValueType();
2367 EVT ShVT
= TLI
.getShiftAmountTy();
2368 unsigned len
= VT
.getSizeInBits();
2369 for (unsigned i
= 0; (1U << i
) <= (len
/ 2); ++i
) {
2370 SDValue Tmp3
= DAG
.getConstant(1ULL << i
, ShVT
);
2371 Op
= DAG
.getNode(ISD::OR
, dl
, VT
, Op
,
2372 DAG
.getNode(ISD::SRL
, dl
, VT
, Op
, Tmp3
));
2374 Op
= DAG
.getNOT(dl
, Op
, VT
);
2375 return DAG
.getNode(ISD::CTPOP
, dl
, VT
, Op
);
2378 // for now, we use: { return popcount(~x & (x - 1)); }
2379 // unless the target has ctlz but not ctpop, in which case we use:
2380 // { return 32 - nlz(~x & (x-1)); }
2381 // see also http://www.hackersdelight.org/HDcode/ntz.cc
2382 EVT VT
= Op
.getValueType();
2383 SDValue Tmp3
= DAG
.getNode(ISD::AND
, dl
, VT
,
2384 DAG
.getNOT(dl
, Op
, VT
),
2385 DAG
.getNode(ISD::SUB
, dl
, VT
, Op
,
2386 DAG
.getConstant(1, VT
)));
2387 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead.
2388 if (!TLI
.isOperationLegalOrCustom(ISD::CTPOP
, VT
) &&
2389 TLI
.isOperationLegalOrCustom(ISD::CTLZ
, VT
))
2390 return DAG
.getNode(ISD::SUB
, dl
, VT
,
2391 DAG
.getConstant(VT
.getSizeInBits(), VT
),
2392 DAG
.getNode(ISD::CTLZ
, dl
, VT
, Tmp3
));
2393 return DAG
.getNode(ISD::CTPOP
, dl
, VT
, Tmp3
);
2398 std::pair
<SDValue
, SDValue
> SelectionDAGLegalize::ExpandAtomic(SDNode
*Node
) {
2399 unsigned Opc
= Node
->getOpcode();
2400 MVT VT
= cast
<AtomicSDNode
>(Node
)->getMemoryVT().getSimpleVT();
2405 llvm_unreachable("Unhandled atomic intrinsic Expand!");
2407 case ISD::ATOMIC_SWAP
:
2408 switch (VT
.SimpleTy
) {
2409 default: llvm_unreachable("Unexpected value type for atomic!");
2410 case MVT::i8
: LC
= RTLIB::SYNC_LOCK_TEST_AND_SET_1
; break;
2411 case MVT::i16
: LC
= RTLIB::SYNC_LOCK_TEST_AND_SET_2
; break;
2412 case MVT::i32
: LC
= RTLIB::SYNC_LOCK_TEST_AND_SET_4
; break;
2413 case MVT::i64
: LC
= RTLIB::SYNC_LOCK_TEST_AND_SET_8
; break;
2416 case ISD::ATOMIC_CMP_SWAP
:
2417 switch (VT
.SimpleTy
) {
2418 default: llvm_unreachable("Unexpected value type for atomic!");
2419 case MVT::i8
: LC
= RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1
; break;
2420 case MVT::i16
: LC
= RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2
; break;
2421 case MVT::i32
: LC
= RTLIB::SYNC_VAL_COMPARE_AND_SWAP_4
; break;
2422 case MVT::i64
: LC
= RTLIB::SYNC_VAL_COMPARE_AND_SWAP_8
; break;
2425 case ISD::ATOMIC_LOAD_ADD
:
2426 switch (VT
.SimpleTy
) {
2427 default: llvm_unreachable("Unexpected value type for atomic!");
2428 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_ADD_1
; break;
2429 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_ADD_2
; break;
2430 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_ADD_4
; break;
2431 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_ADD_8
; break;
2434 case ISD::ATOMIC_LOAD_SUB
:
2435 switch (VT
.SimpleTy
) {
2436 default: llvm_unreachable("Unexpected value type for atomic!");
2437 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_SUB_1
; break;
2438 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_SUB_2
; break;
2439 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_SUB_4
; break;
2440 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_SUB_8
; break;
2443 case ISD::ATOMIC_LOAD_AND
:
2444 switch (VT
.SimpleTy
) {
2445 default: llvm_unreachable("Unexpected value type for atomic!");
2446 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_AND_1
; break;
2447 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_AND_2
; break;
2448 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_AND_4
; break;
2449 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_AND_8
; break;
2452 case ISD::ATOMIC_LOAD_OR
:
2453 switch (VT
.SimpleTy
) {
2454 default: llvm_unreachable("Unexpected value type for atomic!");
2455 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_OR_1
; break;
2456 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_OR_2
; break;
2457 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_OR_4
; break;
2458 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_OR_8
; break;
2461 case ISD::ATOMIC_LOAD_XOR
:
2462 switch (VT
.SimpleTy
) {
2463 default: llvm_unreachable("Unexpected value type for atomic!");
2464 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_XOR_1
; break;
2465 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_XOR_2
; break;
2466 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_XOR_4
; break;
2467 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_XOR_8
; break;
2470 case ISD::ATOMIC_LOAD_NAND
:
2471 switch (VT
.SimpleTy
) {
2472 default: llvm_unreachable("Unexpected value type for atomic!");
2473 case MVT::i8
: LC
= RTLIB::SYNC_FETCH_AND_NAND_1
; break;
2474 case MVT::i16
: LC
= RTLIB::SYNC_FETCH_AND_NAND_2
; break;
2475 case MVT::i32
: LC
= RTLIB::SYNC_FETCH_AND_NAND_4
; break;
2476 case MVT::i64
: LC
= RTLIB::SYNC_FETCH_AND_NAND_8
; break;
2481 return ExpandChainLibCall(LC
, Node
, false);
2484 void SelectionDAGLegalize::ExpandNode(SDNode
*Node
,
2485 SmallVectorImpl
<SDValue
> &Results
) {
2486 DebugLoc dl
= Node
->getDebugLoc();
2487 SDValue Tmp1
, Tmp2
, Tmp3
, Tmp4
;
2488 switch (Node
->getOpcode()) {
2492 Tmp1
= ExpandBitCount(Node
->getOpcode(), Node
->getOperand(0), dl
);
2493 Results
.push_back(Tmp1
);
2496 Results
.push_back(ExpandBSWAP(Node
->getOperand(0), dl
));
2498 case ISD::FRAMEADDR
:
2499 case ISD::RETURNADDR
:
2500 case ISD::FRAME_TO_ARGS_OFFSET
:
2501 Results
.push_back(DAG
.getConstant(0, Node
->getValueType(0)));
2503 case ISD::FLT_ROUNDS_
:
2504 Results
.push_back(DAG
.getConstant(1, Node
->getValueType(0)));
2506 case ISD::EH_RETURN
:
2510 case ISD::EH_SJLJ_LONGJMP
:
2511 Results
.push_back(Node
->getOperand(0));
2513 case ISD::EH_SJLJ_SETJMP
:
2514 Results
.push_back(DAG
.getConstant(0, MVT::i32
));
2515 Results
.push_back(Node
->getOperand(0));
2517 case ISD::MEMBARRIER
: {
2518 // If the target didn't lower this, lower it to '__sync_synchronize()' call
2519 TargetLowering::ArgListTy Args
;
2520 std::pair
<SDValue
, SDValue
> CallResult
=
2521 TLI
.LowerCallTo(Node
->getOperand(0), Type::getVoidTy(*DAG
.getContext()),
2522 false, false, false, false, 0, CallingConv::C
, false,
2523 /*isReturnValueUsed=*/true,
2524 DAG
.getExternalSymbol("__sync_synchronize",
2525 TLI
.getPointerTy()),
2527 Results
.push_back(CallResult
.second
);
2530 // By default, atomic intrinsics are marked Legal and lowered. Targets
2531 // which don't support them directly, however, may want libcalls, in which
2532 // case they mark them Expand, and we get here.
2533 // FIXME: Unimplemented for now. Add libcalls.
2534 case ISD::ATOMIC_SWAP
:
2535 case ISD::ATOMIC_LOAD_ADD
:
2536 case ISD::ATOMIC_LOAD_SUB
:
2537 case ISD::ATOMIC_LOAD_AND
:
2538 case ISD::ATOMIC_LOAD_OR
:
2539 case ISD::ATOMIC_LOAD_XOR
:
2540 case ISD::ATOMIC_LOAD_NAND
:
2541 case ISD::ATOMIC_LOAD_MIN
:
2542 case ISD::ATOMIC_LOAD_MAX
:
2543 case ISD::ATOMIC_LOAD_UMIN
:
2544 case ISD::ATOMIC_LOAD_UMAX
:
2545 case ISD::ATOMIC_CMP_SWAP
: {
2546 std::pair
<SDValue
, SDValue
> Tmp
= ExpandAtomic(Node
);
2547 Results
.push_back(Tmp
.first
);
2548 Results
.push_back(Tmp
.second
);
2551 case ISD::DYNAMIC_STACKALLOC
:
2552 ExpandDYNAMIC_STACKALLOC(Node
, Results
);
2554 case ISD::MERGE_VALUES
:
2555 for (unsigned i
= 0; i
< Node
->getNumValues(); i
++)
2556 Results
.push_back(Node
->getOperand(i
));
2559 EVT VT
= Node
->getValueType(0);
2561 Results
.push_back(DAG
.getConstant(0, VT
));
2563 assert(VT
.isFloatingPoint() && "Unknown value type!");
2564 Results
.push_back(DAG
.getConstantFP(0, VT
));
2569 // If this operation is not supported, lower it to 'abort()' call
2570 TargetLowering::ArgListTy Args
;
2571 std::pair
<SDValue
, SDValue
> CallResult
=
2572 TLI
.LowerCallTo(Node
->getOperand(0), Type::getVoidTy(*DAG
.getContext()),
2573 false, false, false, false, 0, CallingConv::C
, false,
2574 /*isReturnValueUsed=*/true,
2575 DAG
.getExternalSymbol("abort", TLI
.getPointerTy()),
2577 Results
.push_back(CallResult
.second
);
2581 case ISD::BIT_CONVERT
:
2582 Tmp1
= EmitStackConvert(Node
->getOperand(0), Node
->getValueType(0),
2583 Node
->getValueType(0), dl
);
2584 Results
.push_back(Tmp1
);
2586 case ISD::FP_EXTEND
:
2587 Tmp1
= EmitStackConvert(Node
->getOperand(0),
2588 Node
->getOperand(0).getValueType(),
2589 Node
->getValueType(0), dl
);
2590 Results
.push_back(Tmp1
);
2592 case ISD::SIGN_EXTEND_INREG
: {
2593 // NOTE: we could fall back on load/store here too for targets without
2594 // SAR. However, it is doubtful that any exist.
2595 EVT ExtraVT
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
2596 EVT VT
= Node
->getValueType(0);
2597 EVT ShiftAmountTy
= TLI
.getShiftAmountTy();
2600 unsigned BitsDiff
= VT
.getScalarType().getSizeInBits() -
2601 ExtraVT
.getScalarType().getSizeInBits();
2602 SDValue ShiftCst
= DAG
.getConstant(BitsDiff
, ShiftAmountTy
);
2603 Tmp1
= DAG
.getNode(ISD::SHL
, dl
, Node
->getValueType(0),
2604 Node
->getOperand(0), ShiftCst
);
2605 Tmp1
= DAG
.getNode(ISD::SRA
, dl
, Node
->getValueType(0), Tmp1
, ShiftCst
);
2606 Results
.push_back(Tmp1
);
2609 case ISD::FP_ROUND_INREG
: {
2610 // The only way we can lower this is to turn it into a TRUNCSTORE,
2611 // EXTLOAD pair, targetting a temporary location (a stack slot).
2613 // NOTE: there is a choice here between constantly creating new stack
2614 // slots and always reusing the same one. We currently always create
2615 // new ones, as reuse may inhibit scheduling.
2616 EVT ExtraVT
= cast
<VTSDNode
>(Node
->getOperand(1))->getVT();
2617 Tmp1
= EmitStackConvert(Node
->getOperand(0), ExtraVT
,
2618 Node
->getValueType(0), dl
);
2619 Results
.push_back(Tmp1
);
2622 case ISD::SINT_TO_FP
:
2623 case ISD::UINT_TO_FP
:
2624 Tmp1
= ExpandLegalINT_TO_FP(Node
->getOpcode() == ISD::SINT_TO_FP
,
2625 Node
->getOperand(0), Node
->getValueType(0), dl
);
2626 Results
.push_back(Tmp1
);
2628 case ISD::FP_TO_UINT
: {
2629 SDValue True
, False
;
2630 EVT VT
= Node
->getOperand(0).getValueType();
2631 EVT NVT
= Node
->getValueType(0);
2632 const uint64_t zero
[] = {0, 0};
2633 APFloat apf
= APFloat(APInt(VT
.getSizeInBits(), 2, zero
));
2634 APInt x
= APInt::getSignBit(NVT
.getSizeInBits());
2635 (void)apf
.convertFromAPInt(x
, false, APFloat::rmNearestTiesToEven
);
2636 Tmp1
= DAG
.getConstantFP(apf
, VT
);
2637 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
),
2638 Node
->getOperand(0),
2640 True
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, NVT
, Node
->getOperand(0));
2641 False
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, NVT
,
2642 DAG
.getNode(ISD::FSUB
, dl
, VT
,
2643 Node
->getOperand(0), Tmp1
));
2644 False
= DAG
.getNode(ISD::XOR
, dl
, NVT
, False
,
2645 DAG
.getConstant(x
, NVT
));
2646 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp2
, True
, False
);
2647 Results
.push_back(Tmp1
);
2651 const Value
*V
= cast
<SrcValueSDNode
>(Node
->getOperand(2))->getValue();
2652 EVT VT
= Node
->getValueType(0);
2653 Tmp1
= Node
->getOperand(0);
2654 Tmp2
= Node
->getOperand(1);
2655 unsigned Align
= Node
->getConstantOperandVal(3);
2657 SDValue VAListLoad
= DAG
.getLoad(TLI
.getPointerTy(), dl
, Tmp1
, Tmp2
, V
, 0,
2659 SDValue VAList
= VAListLoad
;
2661 if (Align
> TLI
.getMinStackArgumentAlignment()) {
2662 assert(((Align
& (Align
-1)) == 0) && "Expected Align to be a power of 2");
2664 VAList
= DAG
.getNode(ISD::ADD
, dl
, TLI
.getPointerTy(), VAList
,
2665 DAG
.getConstant(Align
- 1,
2666 TLI
.getPointerTy()));
2668 VAList
= DAG
.getNode(ISD::AND
, dl
, TLI
.getPointerTy(), VAList
,
2669 DAG
.getConstant(-Align
,
2670 TLI
.getPointerTy()));
2673 // Increment the pointer, VAList, to the next vaarg
2674 Tmp3
= DAG
.getNode(ISD::ADD
, dl
, TLI
.getPointerTy(), VAList
,
2675 DAG
.getConstant(TLI
.getTargetData()->
2676 getTypeAllocSize(VT
.getTypeForEVT(*DAG
.getContext())),
2677 TLI
.getPointerTy()));
2678 // Store the incremented VAList to the legalized pointer
2679 Tmp3
= DAG
.getStore(VAListLoad
.getValue(1), dl
, Tmp3
, Tmp2
, V
, 0,
2681 // Load the actual argument out of the pointer VAList
2682 Results
.push_back(DAG
.getLoad(VT
, dl
, Tmp3
, VAList
, NULL
, 0,
2684 Results
.push_back(Results
[0].getValue(1));
2688 // This defaults to loading a pointer from the input and storing it to the
2689 // output, returning the chain.
2690 const Value
*VD
= cast
<SrcValueSDNode
>(Node
->getOperand(3))->getValue();
2691 const Value
*VS
= cast
<SrcValueSDNode
>(Node
->getOperand(4))->getValue();
2692 Tmp1
= DAG
.getLoad(TLI
.getPointerTy(), dl
, Node
->getOperand(0),
2693 Node
->getOperand(2), VS
, 0, false, false, 0);
2694 Tmp1
= DAG
.getStore(Tmp1
.getValue(1), dl
, Tmp1
, Node
->getOperand(1), VD
, 0,
2696 Results
.push_back(Tmp1
);
2699 case ISD::EXTRACT_VECTOR_ELT
:
2700 if (Node
->getOperand(0).getValueType().getVectorNumElements() == 1)
2701 // This must be an access of the only element. Return it.
2702 Tmp1
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, Node
->getValueType(0),
2703 Node
->getOperand(0));
2705 Tmp1
= ExpandExtractFromVectorThroughStack(SDValue(Node
, 0));
2706 Results
.push_back(Tmp1
);
2708 case ISD::EXTRACT_SUBVECTOR
:
2709 Results
.push_back(ExpandExtractFromVectorThroughStack(SDValue(Node
, 0)));
2711 case ISD::CONCAT_VECTORS
: {
2712 Results
.push_back(ExpandVectorBuildThroughStack(Node
));
2715 case ISD::SCALAR_TO_VECTOR
:
2716 Results
.push_back(ExpandSCALAR_TO_VECTOR(Node
));
2718 case ISD::INSERT_VECTOR_ELT
:
2719 Results
.push_back(ExpandINSERT_VECTOR_ELT(Node
->getOperand(0),
2720 Node
->getOperand(1),
2721 Node
->getOperand(2), dl
));
2723 case ISD::VECTOR_SHUFFLE
: {
2724 SmallVector
<int, 8> Mask
;
2725 cast
<ShuffleVectorSDNode
>(Node
)->getMask(Mask
);
2727 EVT VT
= Node
->getValueType(0);
2728 EVT EltVT
= VT
.getVectorElementType();
2729 if (getTypeAction(EltVT
) == Promote
)
2730 EltVT
= TLI
.getTypeToTransformTo(*DAG
.getContext(), EltVT
);
2731 unsigned NumElems
= VT
.getVectorNumElements();
2732 SmallVector
<SDValue
, 8> Ops
;
2733 for (unsigned i
= 0; i
!= NumElems
; ++i
) {
2735 Ops
.push_back(DAG
.getUNDEF(EltVT
));
2738 unsigned Idx
= Mask
[i
];
2740 Ops
.push_back(DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, EltVT
,
2741 Node
->getOperand(0),
2742 DAG
.getIntPtrConstant(Idx
)));
2744 Ops
.push_back(DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, EltVT
,
2745 Node
->getOperand(1),
2746 DAG
.getIntPtrConstant(Idx
- NumElems
)));
2748 Tmp1
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
, VT
, &Ops
[0], Ops
.size());
2749 Results
.push_back(Tmp1
);
2752 case ISD::EXTRACT_ELEMENT
: {
2753 EVT OpTy
= Node
->getOperand(0).getValueType();
2754 if (cast
<ConstantSDNode
>(Node
->getOperand(1))->getZExtValue()) {
2756 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, OpTy
, Node
->getOperand(0),
2757 DAG
.getConstant(OpTy
.getSizeInBits()/2,
2758 TLI
.getShiftAmountTy()));
2759 Tmp1
= DAG
.getNode(ISD::TRUNCATE
, dl
, Node
->getValueType(0), Tmp1
);
2762 Tmp1
= DAG
.getNode(ISD::TRUNCATE
, dl
, Node
->getValueType(0),
2763 Node
->getOperand(0));
2765 Results
.push_back(Tmp1
);
2768 case ISD::STACKSAVE
:
2769 // Expand to CopyFromReg if the target set
2770 // StackPointerRegisterToSaveRestore.
2771 if (unsigned SP
= TLI
.getStackPointerRegisterToSaveRestore()) {
2772 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(0), dl
, SP
,
2773 Node
->getValueType(0)));
2774 Results
.push_back(Results
[0].getValue(1));
2776 Results
.push_back(DAG
.getUNDEF(Node
->getValueType(0)));
2777 Results
.push_back(Node
->getOperand(0));
2780 case ISD::STACKRESTORE
:
2781 // Expand to CopyToReg if the target set
2782 // StackPointerRegisterToSaveRestore.
2783 if (unsigned SP
= TLI
.getStackPointerRegisterToSaveRestore()) {
2784 Results
.push_back(DAG
.getCopyToReg(Node
->getOperand(0), dl
, SP
,
2785 Node
->getOperand(1)));
2787 Results
.push_back(Node
->getOperand(0));
2790 case ISD::FCOPYSIGN
:
2791 Results
.push_back(ExpandFCOPYSIGN(Node
));
2794 // Expand Y = FNEG(X) -> Y = SUB -0.0, X
2795 Tmp1
= DAG
.getConstantFP(-0.0, Node
->getValueType(0));
2796 Tmp1
= DAG
.getNode(ISD::FSUB
, dl
, Node
->getValueType(0), Tmp1
,
2797 Node
->getOperand(0));
2798 Results
.push_back(Tmp1
);
2801 // Expand Y = FABS(X) -> Y = (X >u 0.0) ? X : fneg(X).
2802 EVT VT
= Node
->getValueType(0);
2803 Tmp1
= Node
->getOperand(0);
2804 Tmp2
= DAG
.getConstantFP(0.0, VT
);
2805 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(Tmp1
.getValueType()),
2806 Tmp1
, Tmp2
, ISD::SETUGT
);
2807 Tmp3
= DAG
.getNode(ISD::FNEG
, dl
, VT
, Tmp1
);
2808 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, VT
, Tmp2
, Tmp1
, Tmp3
);
2809 Results
.push_back(Tmp1
);
2813 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::SQRT_F32
, RTLIB::SQRT_F64
,
2814 RTLIB::SQRT_F80
, RTLIB::SQRT_PPCF128
));
2817 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::SIN_F32
, RTLIB::SIN_F64
,
2818 RTLIB::SIN_F80
, RTLIB::SIN_PPCF128
));
2821 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::COS_F32
, RTLIB::COS_F64
,
2822 RTLIB::COS_F80
, RTLIB::COS_PPCF128
));
2825 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG_F32
, RTLIB::LOG_F64
,
2826 RTLIB::LOG_F80
, RTLIB::LOG_PPCF128
));
2829 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG2_F32
, RTLIB::LOG2_F64
,
2830 RTLIB::LOG2_F80
, RTLIB::LOG2_PPCF128
));
2833 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::LOG10_F32
, RTLIB::LOG10_F64
,
2834 RTLIB::LOG10_F80
, RTLIB::LOG10_PPCF128
));
2837 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::EXP_F32
, RTLIB::EXP_F64
,
2838 RTLIB::EXP_F80
, RTLIB::EXP_PPCF128
));
2841 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::EXP2_F32
, RTLIB::EXP2_F64
,
2842 RTLIB::EXP2_F80
, RTLIB::EXP2_PPCF128
));
2845 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::TRUNC_F32
, RTLIB::TRUNC_F64
,
2846 RTLIB::TRUNC_F80
, RTLIB::TRUNC_PPCF128
));
2849 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::FLOOR_F32
, RTLIB::FLOOR_F64
,
2850 RTLIB::FLOOR_F80
, RTLIB::FLOOR_PPCF128
));
2853 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::CEIL_F32
, RTLIB::CEIL_F64
,
2854 RTLIB::CEIL_F80
, RTLIB::CEIL_PPCF128
));
2857 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::RINT_F32
, RTLIB::RINT_F64
,
2858 RTLIB::RINT_F80
, RTLIB::RINT_PPCF128
));
2860 case ISD::FNEARBYINT
:
2861 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::NEARBYINT_F32
,
2862 RTLIB::NEARBYINT_F64
,
2863 RTLIB::NEARBYINT_F80
,
2864 RTLIB::NEARBYINT_PPCF128
));
2867 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::POWI_F32
, RTLIB::POWI_F64
,
2868 RTLIB::POWI_F80
, RTLIB::POWI_PPCF128
));
2871 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::POW_F32
, RTLIB::POW_F64
,
2872 RTLIB::POW_F80
, RTLIB::POW_PPCF128
));
2875 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::DIV_F32
, RTLIB::DIV_F64
,
2876 RTLIB::DIV_F80
, RTLIB::DIV_PPCF128
));
2879 Results
.push_back(ExpandFPLibCall(Node
, RTLIB::REM_F32
, RTLIB::REM_F64
,
2880 RTLIB::REM_F80
, RTLIB::REM_PPCF128
));
2882 case ISD::FP16_TO_FP32
:
2883 Results
.push_back(ExpandLibCall(RTLIB::FPEXT_F16_F32
, Node
, false));
2885 case ISD::FP32_TO_FP16
:
2886 Results
.push_back(ExpandLibCall(RTLIB::FPROUND_F32_F16
, Node
, false));
2888 case ISD::ConstantFP
: {
2889 ConstantFPSDNode
*CFP
= cast
<ConstantFPSDNode
>(Node
);
2890 // Check to see if this FP immediate is already legal.
2891 // If this is a legal constant, turn it into a TargetConstantFP node.
2892 if (TLI
.isFPImmLegal(CFP
->getValueAPF(), Node
->getValueType(0)))
2893 Results
.push_back(SDValue(Node
, 0));
2895 Results
.push_back(ExpandConstantFP(CFP
, true, DAG
, TLI
));
2898 case ISD::EHSELECTION
: {
2899 unsigned Reg
= TLI
.getExceptionSelectorRegister();
2900 assert(Reg
&& "Can't expand to unknown register!");
2901 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(1), dl
, Reg
,
2902 Node
->getValueType(0)));
2903 Results
.push_back(Results
[0].getValue(1));
2906 case ISD::EXCEPTIONADDR
: {
2907 unsigned Reg
= TLI
.getExceptionAddressRegister();
2908 assert(Reg
&& "Can't expand to unknown register!");
2909 Results
.push_back(DAG
.getCopyFromReg(Node
->getOperand(0), dl
, Reg
,
2910 Node
->getValueType(0)));
2911 Results
.push_back(Results
[0].getValue(1));
2915 EVT VT
= Node
->getValueType(0);
2916 assert(TLI
.isOperationLegalOrCustom(ISD::ADD
, VT
) &&
2917 TLI
.isOperationLegalOrCustom(ISD::XOR
, VT
) &&
2918 "Don't know how to expand this subtraction!");
2919 Tmp1
= DAG
.getNode(ISD::XOR
, dl
, VT
, Node
->getOperand(1),
2920 DAG
.getConstant(APInt::getAllOnesValue(VT
.getSizeInBits()), VT
));
2921 Tmp1
= DAG
.getNode(ISD::ADD
, dl
, VT
, Tmp2
, DAG
.getConstant(1, VT
));
2922 Results
.push_back(DAG
.getNode(ISD::ADD
, dl
, VT
, Node
->getOperand(0), Tmp1
));
2927 EVT VT
= Node
->getValueType(0);
2928 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
2929 bool isSigned
= Node
->getOpcode() == ISD::SREM
;
2930 unsigned DivOpc
= isSigned
? ISD::SDIV
: ISD::UDIV
;
2931 unsigned DivRemOpc
= isSigned
? ISD::SDIVREM
: ISD::UDIVREM
;
2932 Tmp2
= Node
->getOperand(0);
2933 Tmp3
= Node
->getOperand(1);
2934 if (TLI
.isOperationLegalOrCustom(DivRemOpc
, VT
)) {
2935 Tmp1
= DAG
.getNode(DivRemOpc
, dl
, VTs
, Tmp2
, Tmp3
).getValue(1);
2936 } else if (TLI
.isOperationLegalOrCustom(DivOpc
, VT
)) {
2938 Tmp1
= DAG
.getNode(DivOpc
, dl
, VT
, Tmp2
, Tmp3
);
2939 Tmp1
= DAG
.getNode(ISD::MUL
, dl
, VT
, Tmp1
, Tmp3
);
2940 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, VT
, Tmp2
, Tmp1
);
2941 } else if (isSigned
) {
2942 Tmp1
= ExpandIntLibCall(Node
, true,
2944 RTLIB::SREM_I16
, RTLIB::SREM_I32
,
2945 RTLIB::SREM_I64
, RTLIB::SREM_I128
);
2947 Tmp1
= ExpandIntLibCall(Node
, false,
2949 RTLIB::UREM_I16
, RTLIB::UREM_I32
,
2950 RTLIB::UREM_I64
, RTLIB::UREM_I128
);
2952 Results
.push_back(Tmp1
);
2957 bool isSigned
= Node
->getOpcode() == ISD::SDIV
;
2958 unsigned DivRemOpc
= isSigned
? ISD::SDIVREM
: ISD::UDIVREM
;
2959 EVT VT
= Node
->getValueType(0);
2960 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
2961 if (TLI
.isOperationLegalOrCustom(DivRemOpc
, VT
))
2962 Tmp1
= DAG
.getNode(DivRemOpc
, dl
, VTs
, Node
->getOperand(0),
2963 Node
->getOperand(1));
2965 Tmp1
= ExpandIntLibCall(Node
, true,
2967 RTLIB::SDIV_I16
, RTLIB::SDIV_I32
,
2968 RTLIB::SDIV_I64
, RTLIB::SDIV_I128
);
2970 Tmp1
= ExpandIntLibCall(Node
, false,
2972 RTLIB::UDIV_I16
, RTLIB::UDIV_I32
,
2973 RTLIB::UDIV_I64
, RTLIB::UDIV_I128
);
2974 Results
.push_back(Tmp1
);
2979 unsigned ExpandOpcode
= Node
->getOpcode() == ISD::MULHU
? ISD::UMUL_LOHI
:
2981 EVT VT
= Node
->getValueType(0);
2982 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
2983 assert(TLI
.isOperationLegalOrCustom(ExpandOpcode
, VT
) &&
2984 "If this wasn't legal, it shouldn't have been created!");
2985 Tmp1
= DAG
.getNode(ExpandOpcode
, dl
, VTs
, Node
->getOperand(0),
2986 Node
->getOperand(1));
2987 Results
.push_back(Tmp1
.getValue(1));
2991 EVT VT
= Node
->getValueType(0);
2992 SDVTList VTs
= DAG
.getVTList(VT
, VT
);
2993 // See if multiply or divide can be lowered using two-result operations.
2994 // We just need the low half of the multiply; try both the signed
2995 // and unsigned forms. If the target supports both SMUL_LOHI and
2996 // UMUL_LOHI, form a preference by checking which forms of plain
2997 // MULH it supports.
2998 bool HasSMUL_LOHI
= TLI
.isOperationLegalOrCustom(ISD::SMUL_LOHI
, VT
);
2999 bool HasUMUL_LOHI
= TLI
.isOperationLegalOrCustom(ISD::UMUL_LOHI
, VT
);
3000 bool HasMULHS
= TLI
.isOperationLegalOrCustom(ISD::MULHS
, VT
);
3001 bool HasMULHU
= TLI
.isOperationLegalOrCustom(ISD::MULHU
, VT
);
3002 unsigned OpToUse
= 0;
3003 if (HasSMUL_LOHI
&& !HasMULHS
) {
3004 OpToUse
= ISD::SMUL_LOHI
;
3005 } else if (HasUMUL_LOHI
&& !HasMULHU
) {
3006 OpToUse
= ISD::UMUL_LOHI
;
3007 } else if (HasSMUL_LOHI
) {
3008 OpToUse
= ISD::SMUL_LOHI
;
3009 } else if (HasUMUL_LOHI
) {
3010 OpToUse
= ISD::UMUL_LOHI
;
3013 Results
.push_back(DAG
.getNode(OpToUse
, dl
, VTs
, Node
->getOperand(0),
3014 Node
->getOperand(1)));
3017 Tmp1
= ExpandIntLibCall(Node
, false,
3019 RTLIB::MUL_I16
, RTLIB::MUL_I32
,
3020 RTLIB::MUL_I64
, RTLIB::MUL_I128
);
3021 Results
.push_back(Tmp1
);
3026 SDValue LHS
= Node
->getOperand(0);
3027 SDValue RHS
= Node
->getOperand(1);
3028 SDValue Sum
= DAG
.getNode(Node
->getOpcode() == ISD::SADDO
?
3029 ISD::ADD
: ISD::SUB
, dl
, LHS
.getValueType(),
3031 Results
.push_back(Sum
);
3032 EVT OType
= Node
->getValueType(1);
3034 SDValue Zero
= DAG
.getConstant(0, LHS
.getValueType());
3036 // LHSSign -> LHS >= 0
3037 // RHSSign -> RHS >= 0
3038 // SumSign -> Sum >= 0
3041 // Overflow -> (LHSSign == RHSSign) && (LHSSign != SumSign)
3043 // Overflow -> (LHSSign != RHSSign) && (LHSSign != SumSign)
3045 SDValue LHSSign
= DAG
.getSetCC(dl
, OType
, LHS
, Zero
, ISD::SETGE
);
3046 SDValue RHSSign
= DAG
.getSetCC(dl
, OType
, RHS
, Zero
, ISD::SETGE
);
3047 SDValue SignsMatch
= DAG
.getSetCC(dl
, OType
, LHSSign
, RHSSign
,
3048 Node
->getOpcode() == ISD::SADDO
?
3049 ISD::SETEQ
: ISD::SETNE
);
3051 SDValue SumSign
= DAG
.getSetCC(dl
, OType
, Sum
, Zero
, ISD::SETGE
);
3052 SDValue SumSignNE
= DAG
.getSetCC(dl
, OType
, LHSSign
, SumSign
, ISD::SETNE
);
3054 SDValue Cmp
= DAG
.getNode(ISD::AND
, dl
, OType
, SignsMatch
, SumSignNE
);
3055 Results
.push_back(Cmp
);
3060 SDValue LHS
= Node
->getOperand(0);
3061 SDValue RHS
= Node
->getOperand(1);
3062 SDValue Sum
= DAG
.getNode(Node
->getOpcode() == ISD::UADDO
?
3063 ISD::ADD
: ISD::SUB
, dl
, LHS
.getValueType(),
3065 Results
.push_back(Sum
);
3066 Results
.push_back(DAG
.getSetCC(dl
, Node
->getValueType(1), Sum
, LHS
,
3067 Node
->getOpcode () == ISD::UADDO
?
3068 ISD::SETULT
: ISD::SETUGT
));
3073 EVT VT
= Node
->getValueType(0);
3074 SDValue LHS
= Node
->getOperand(0);
3075 SDValue RHS
= Node
->getOperand(1);
3078 static const unsigned Ops
[2][3] =
3079 { { ISD::MULHU
, ISD::UMUL_LOHI
, ISD::ZERO_EXTEND
},
3080 { ISD::MULHS
, ISD::SMUL_LOHI
, ISD::SIGN_EXTEND
}};
3081 bool isSigned
= Node
->getOpcode() == ISD::SMULO
;
3082 if (TLI
.isOperationLegalOrCustom(Ops
[isSigned
][0], VT
)) {
3083 BottomHalf
= DAG
.getNode(ISD::MUL
, dl
, VT
, LHS
, RHS
);
3084 TopHalf
= DAG
.getNode(Ops
[isSigned
][0], dl
, VT
, LHS
, RHS
);
3085 } else if (TLI
.isOperationLegalOrCustom(Ops
[isSigned
][1], VT
)) {
3086 BottomHalf
= DAG
.getNode(Ops
[isSigned
][1], dl
, DAG
.getVTList(VT
, VT
), LHS
,
3088 TopHalf
= BottomHalf
.getValue(1);
3090 // FIXME: We should be able to fall back to a libcall with an illegal
3091 // type in some cases.
3092 // Also, we can fall back to a division in some cases, but that's a big
3093 // performance hit in the general case.
3094 assert(TLI
.isTypeLegal(EVT::getIntegerVT(*DAG
.getContext(),
3095 VT
.getSizeInBits() * 2)) &&
3096 "Don't know how to expand this operation yet!");
3097 EVT WideVT
= EVT::getIntegerVT(*DAG
.getContext(), VT
.getSizeInBits() * 2);
3098 LHS
= DAG
.getNode(Ops
[isSigned
][2], dl
, WideVT
, LHS
);
3099 RHS
= DAG
.getNode(Ops
[isSigned
][2], dl
, WideVT
, RHS
);
3100 Tmp1
= DAG
.getNode(ISD::MUL
, dl
, WideVT
, LHS
, RHS
);
3101 BottomHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
, Tmp1
,
3102 DAG
.getIntPtrConstant(0));
3103 TopHalf
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, VT
, Tmp1
,
3104 DAG
.getIntPtrConstant(1));
3107 Tmp1
= DAG
.getConstant(VT
.getSizeInBits() - 1, TLI
.getShiftAmountTy());
3108 Tmp1
= DAG
.getNode(ISD::SRA
, dl
, VT
, BottomHalf
, Tmp1
);
3109 TopHalf
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
), TopHalf
, Tmp1
,
3112 TopHalf
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(VT
), TopHalf
,
3113 DAG
.getConstant(0, VT
), ISD::SETNE
);
3115 Results
.push_back(BottomHalf
);
3116 Results
.push_back(TopHalf
);
3119 case ISD::BUILD_PAIR
: {
3120 EVT PairTy
= Node
->getValueType(0);
3121 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, PairTy
, Node
->getOperand(0));
3122 Tmp2
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, PairTy
, Node
->getOperand(1));
3123 Tmp2
= DAG
.getNode(ISD::SHL
, dl
, PairTy
, Tmp2
,
3124 DAG
.getConstant(PairTy
.getSizeInBits()/2,
3125 TLI
.getShiftAmountTy()));
3126 Results
.push_back(DAG
.getNode(ISD::OR
, dl
, PairTy
, Tmp1
, Tmp2
));
3130 Tmp1
= Node
->getOperand(0);
3131 Tmp2
= Node
->getOperand(1);
3132 Tmp3
= Node
->getOperand(2);
3133 if (Tmp1
.getOpcode() == ISD::SETCC
) {
3134 Tmp1
= DAG
.getSelectCC(dl
, Tmp1
.getOperand(0), Tmp1
.getOperand(1),
3136 cast
<CondCodeSDNode
>(Tmp1
.getOperand(2))->get());
3138 Tmp1
= DAG
.getSelectCC(dl
, Tmp1
,
3139 DAG
.getConstant(0, Tmp1
.getValueType()),
3140 Tmp2
, Tmp3
, ISD::SETNE
);
3142 Results
.push_back(Tmp1
);
3145 SDValue Chain
= Node
->getOperand(0);
3146 SDValue Table
= Node
->getOperand(1);
3147 SDValue Index
= Node
->getOperand(2);
3149 EVT PTy
= TLI
.getPointerTy();
3151 const TargetData
&TD
= *TLI
.getTargetData();
3152 unsigned EntrySize
=
3153 DAG
.getMachineFunction().getJumpTableInfo()->getEntrySize(TD
);
3155 Index
= DAG
.getNode(ISD::MUL
, dl
, PTy
,
3156 Index
, DAG
.getConstant(EntrySize
, PTy
));
3157 SDValue Addr
= DAG
.getNode(ISD::ADD
, dl
, PTy
, Index
, Table
);
3159 EVT MemVT
= EVT::getIntegerVT(*DAG
.getContext(), EntrySize
* 8);
3160 SDValue LD
= DAG
.getExtLoad(ISD::SEXTLOAD
, PTy
, dl
, Chain
, Addr
,
3161 PseudoSourceValue::getJumpTable(), 0, MemVT
,
3164 if (TM
.getRelocationModel() == Reloc::PIC_
) {
3165 // For PIC, the sequence is:
3166 // BRIND(load(Jumptable + index) + RelocBase)
3167 // RelocBase can be JumpTable, GOT or some sort of global base.
3168 Addr
= DAG
.getNode(ISD::ADD
, dl
, PTy
, Addr
,
3169 TLI
.getPICJumpTableRelocBase(Table
, DAG
));
3171 Tmp1
= DAG
.getNode(ISD::BRIND
, dl
, MVT::Other
, LD
.getValue(1), Addr
);
3172 Results
.push_back(Tmp1
);
3176 // Expand brcond's setcc into its constituent parts and create a BR_CC
3178 Tmp1
= Node
->getOperand(0);
3179 Tmp2
= Node
->getOperand(1);
3180 if (Tmp2
.getOpcode() == ISD::SETCC
) {
3181 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, MVT::Other
,
3182 Tmp1
, Tmp2
.getOperand(2),
3183 Tmp2
.getOperand(0), Tmp2
.getOperand(1),
3184 Node
->getOperand(2));
3186 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, MVT::Other
, Tmp1
,
3187 DAG
.getCondCode(ISD::SETNE
), Tmp2
,
3188 DAG
.getConstant(0, Tmp2
.getValueType()),
3189 Node
->getOperand(2));
3191 Results
.push_back(Tmp1
);
3194 Tmp1
= Node
->getOperand(0);
3195 Tmp2
= Node
->getOperand(1);
3196 Tmp3
= Node
->getOperand(2);
3197 LegalizeSetCCCondCode(Node
->getValueType(0), Tmp1
, Tmp2
, Tmp3
, dl
);
3199 // If we expanded the SETCC into an AND/OR, return the new node
3200 if (Tmp2
.getNode() == 0) {
3201 Results
.push_back(Tmp1
);
3205 // Otherwise, SETCC for the given comparison type must be completely
3206 // illegal; expand it into a SELECT_CC.
3207 EVT VT
= Node
->getValueType(0);
3208 Tmp1
= DAG
.getNode(ISD::SELECT_CC
, dl
, VT
, Tmp1
, Tmp2
,
3209 DAG
.getConstant(1, VT
), DAG
.getConstant(0, VT
), Tmp3
);
3210 Results
.push_back(Tmp1
);
3213 case ISD::SELECT_CC
: {
3214 Tmp1
= Node
->getOperand(0); // LHS
3215 Tmp2
= Node
->getOperand(1); // RHS
3216 Tmp3
= Node
->getOperand(2); // True
3217 Tmp4
= Node
->getOperand(3); // False
3218 SDValue CC
= Node
->getOperand(4);
3220 LegalizeSetCCCondCode(TLI
.getSetCCResultType(Tmp1
.getValueType()),
3221 Tmp1
, Tmp2
, CC
, dl
);
3223 assert(!Tmp2
.getNode() && "Can't legalize SELECT_CC with legal condition!");
3224 Tmp2
= DAG
.getConstant(0, Tmp1
.getValueType());
3225 CC
= DAG
.getCondCode(ISD::SETNE
);
3226 Tmp1
= DAG
.getNode(ISD::SELECT_CC
, dl
, Node
->getValueType(0), Tmp1
, Tmp2
,
3228 Results
.push_back(Tmp1
);
3232 Tmp1
= Node
->getOperand(0); // Chain
3233 Tmp2
= Node
->getOperand(2); // LHS
3234 Tmp3
= Node
->getOperand(3); // RHS
3235 Tmp4
= Node
->getOperand(1); // CC
3237 LegalizeSetCCCondCode(TLI
.getSetCCResultType(Tmp2
.getValueType()),
3238 Tmp2
, Tmp3
, Tmp4
, dl
);
3239 LastCALLSEQ_END
= DAG
.getEntryNode();
3241 assert(!Tmp3
.getNode() && "Can't legalize BR_CC with legal condition!");
3242 Tmp3
= DAG
.getConstant(0, Tmp2
.getValueType());
3243 Tmp4
= DAG
.getCondCode(ISD::SETNE
);
3244 Tmp1
= DAG
.getNode(ISD::BR_CC
, dl
, Node
->getValueType(0), Tmp1
, Tmp4
, Tmp2
,
3245 Tmp3
, Node
->getOperand(4));
3246 Results
.push_back(Tmp1
);
3249 case ISD::GLOBAL_OFFSET_TABLE
:
3250 case ISD::GlobalAddress
:
3251 case ISD::GlobalTLSAddress
:
3252 case ISD::ExternalSymbol
:
3253 case ISD::ConstantPool
:
3254 case ISD::JumpTable
:
3255 case ISD::INTRINSIC_W_CHAIN
:
3256 case ISD::INTRINSIC_WO_CHAIN
:
3257 case ISD::INTRINSIC_VOID
:
3258 // FIXME: Custom lowering for these operations shouldn't return null!
3259 for (unsigned i
= 0, e
= Node
->getNumValues(); i
!= e
; ++i
)
3260 Results
.push_back(SDValue(Node
, i
));
3264 void SelectionDAGLegalize::PromoteNode(SDNode
*Node
,
3265 SmallVectorImpl
<SDValue
> &Results
) {
3266 EVT OVT
= Node
->getValueType(0);
3267 if (Node
->getOpcode() == ISD::UINT_TO_FP
||
3268 Node
->getOpcode() == ISD::SINT_TO_FP
||
3269 Node
->getOpcode() == ISD::SETCC
) {
3270 OVT
= Node
->getOperand(0).getValueType();
3272 EVT NVT
= TLI
.getTypeToPromoteTo(Node
->getOpcode(), OVT
);
3273 DebugLoc dl
= Node
->getDebugLoc();
3274 SDValue Tmp1
, Tmp2
, Tmp3
;
3275 switch (Node
->getOpcode()) {
3279 // Zero extend the argument.
3280 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, NVT
, Node
->getOperand(0));
3281 // Perform the larger operation.
3282 Tmp1
= DAG
.getNode(Node
->getOpcode(), dl
, NVT
, Tmp1
);
3283 if (Node
->getOpcode() == ISD::CTTZ
) {
3284 //if Tmp1 == sizeinbits(NVT) then Tmp1 = sizeinbits(Old VT)
3285 Tmp2
= DAG
.getSetCC(dl
, TLI
.getSetCCResultType(NVT
),
3286 Tmp1
, DAG
.getConstant(NVT
.getSizeInBits(), NVT
),
3288 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp2
,
3289 DAG
.getConstant(OVT
.getSizeInBits(), NVT
), Tmp1
);
3290 } else if (Node
->getOpcode() == ISD::CTLZ
) {
3291 // Tmp1 = Tmp1 - (sizeinbits(NVT) - sizeinbits(Old VT))
3292 Tmp1
= DAG
.getNode(ISD::SUB
, dl
, NVT
, Tmp1
,
3293 DAG
.getConstant(NVT
.getSizeInBits() -
3294 OVT
.getSizeInBits(), NVT
));
3296 Results
.push_back(DAG
.getNode(ISD::TRUNCATE
, dl
, OVT
, Tmp1
));
3299 unsigned DiffBits
= NVT
.getSizeInBits() - OVT
.getSizeInBits();
3300 Tmp1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, NVT
, Node
->getOperand(0));
3301 Tmp1
= DAG
.getNode(ISD::BSWAP
, dl
, NVT
, Tmp1
);
3302 Tmp1
= DAG
.getNode(ISD::SRL
, dl
, NVT
, Tmp1
,
3303 DAG
.getConstant(DiffBits
, TLI
.getShiftAmountTy()));
3304 Results
.push_back(Tmp1
);
3307 case ISD::FP_TO_UINT
:
3308 case ISD::FP_TO_SINT
:
3309 Tmp1
= PromoteLegalFP_TO_INT(Node
->getOperand(0), Node
->getValueType(0),
3310 Node
->getOpcode() == ISD::FP_TO_SINT
, dl
);
3311 Results
.push_back(Tmp1
);
3313 case ISD::UINT_TO_FP
:
3314 case ISD::SINT_TO_FP
:
3315 Tmp1
= PromoteLegalINT_TO_FP(Node
->getOperand(0), Node
->getValueType(0),
3316 Node
->getOpcode() == ISD::SINT_TO_FP
, dl
);
3317 Results
.push_back(Tmp1
);
3322 unsigned ExtOp
, TruncOp
;
3323 if (OVT
.isVector()) {
3324 ExtOp
= ISD::BIT_CONVERT
;
3325 TruncOp
= ISD::BIT_CONVERT
;
3327 assert(OVT
.isInteger() && "Cannot promote logic operation");
3328 ExtOp
= ISD::ANY_EXTEND
;
3329 TruncOp
= ISD::TRUNCATE
;
3331 // Promote each of the values to the new type.
3332 Tmp1
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(0));
3333 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3334 // Perform the larger operation, then convert back
3335 Tmp1
= DAG
.getNode(Node
->getOpcode(), dl
, NVT
, Tmp1
, Tmp2
);
3336 Results
.push_back(DAG
.getNode(TruncOp
, dl
, OVT
, Tmp1
));
3340 unsigned ExtOp
, TruncOp
;
3341 if (Node
->getValueType(0).isVector()) {
3342 ExtOp
= ISD::BIT_CONVERT
;
3343 TruncOp
= ISD::BIT_CONVERT
;
3344 } else if (Node
->getValueType(0).isInteger()) {
3345 ExtOp
= ISD::ANY_EXTEND
;
3346 TruncOp
= ISD::TRUNCATE
;
3348 ExtOp
= ISD::FP_EXTEND
;
3349 TruncOp
= ISD::FP_ROUND
;
3351 Tmp1
= Node
->getOperand(0);
3352 // Promote each of the values to the new type.
3353 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3354 Tmp3
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(2));
3355 // Perform the larger operation, then round down.
3356 Tmp1
= DAG
.getNode(ISD::SELECT
, dl
, NVT
, Tmp1
, Tmp2
, Tmp3
);
3357 if (TruncOp
!= ISD::FP_ROUND
)
3358 Tmp1
= DAG
.getNode(TruncOp
, dl
, Node
->getValueType(0), Tmp1
);
3360 Tmp1
= DAG
.getNode(TruncOp
, dl
, Node
->getValueType(0), Tmp1
,
3361 DAG
.getIntPtrConstant(0));
3362 Results
.push_back(Tmp1
);
3365 case ISD::VECTOR_SHUFFLE
: {
3366 SmallVector
<int, 8> Mask
;
3367 cast
<ShuffleVectorSDNode
>(Node
)->getMask(Mask
);
3369 // Cast the two input vectors.
3370 Tmp1
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, NVT
, Node
->getOperand(0));
3371 Tmp2
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, NVT
, Node
->getOperand(1));
3373 // Convert the shuffle mask to the right # elements.
3374 Tmp1
= ShuffleWithNarrowerEltType(NVT
, OVT
, dl
, Tmp1
, Tmp2
, Mask
);
3375 Tmp1
= DAG
.getNode(ISD::BIT_CONVERT
, dl
, OVT
, Tmp1
);
3376 Results
.push_back(Tmp1
);
3380 unsigned ExtOp
= ISD::FP_EXTEND
;
3381 if (NVT
.isInteger()) {
3382 ISD::CondCode CCCode
=
3383 cast
<CondCodeSDNode
>(Node
->getOperand(2))->get();
3384 ExtOp
= isSignedIntSetCC(CCCode
) ? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND
;
3386 Tmp1
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(0));
3387 Tmp2
= DAG
.getNode(ExtOp
, dl
, NVT
, Node
->getOperand(1));
3388 Results
.push_back(DAG
.getNode(ISD::SETCC
, dl
, Node
->getValueType(0),
3389 Tmp1
, Tmp2
, Node
->getOperand(2)));
3395 // SelectionDAG::Legalize - This is the entry point for the file.
3397 void SelectionDAG::Legalize(CodeGenOpt::Level OptLevel
) {
3398 /// run - This is the main entry point to this class.
3400 SelectionDAGLegalize(*this, OptLevel
).LegalizeDAG();