1 //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that ARM uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "ARMISelLowering.h"
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMBaseRegisterInfo.h"
17 #include "ARMCallingConv.h"
18 #include "ARMConstantPoolValue.h"
19 #include "ARMMachineFunctionInfo.h"
20 #include "ARMPerfectShuffle.h"
21 #include "ARMRegisterInfo.h"
22 #include "ARMSelectionDAGInfo.h"
23 #include "ARMSubtarget.h"
24 #include "MCTargetDesc/ARMAddressingModes.h"
25 #include "MCTargetDesc/ARMBaseInfo.h"
26 #include "Utils/ARMBaseInfo.h"
27 #include "llvm/ADT/APFloat.h"
28 #include "llvm/ADT/APInt.h"
29 #include "llvm/ADT/ArrayRef.h"
30 #include "llvm/ADT/BitVector.h"
31 #include "llvm/ADT/DenseMap.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include "llvm/ADT/StringRef.h"
38 #include "llvm/ADT/StringSwitch.h"
39 #include "llvm/ADT/Triple.h"
40 #include "llvm/ADT/Twine.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/CodeGen/CallingConvLower.h"
43 #include "llvm/CodeGen/ISDOpcodes.h"
44 #include "llvm/CodeGen/IntrinsicLowering.h"
45 #include "llvm/CodeGen/MachineBasicBlock.h"
46 #include "llvm/CodeGen/MachineConstantPool.h"
47 #include "llvm/CodeGen/MachineFrameInfo.h"
48 #include "llvm/CodeGen/MachineFunction.h"
49 #include "llvm/CodeGen/MachineInstr.h"
50 #include "llvm/CodeGen/MachineInstrBuilder.h"
51 #include "llvm/CodeGen/MachineJumpTableInfo.h"
52 #include "llvm/CodeGen/MachineMemOperand.h"
53 #include "llvm/CodeGen/MachineOperand.h"
54 #include "llvm/CodeGen/MachineRegisterInfo.h"
55 #include "llvm/CodeGen/RuntimeLibcalls.h"
56 #include "llvm/CodeGen/SelectionDAG.h"
57 #include "llvm/CodeGen/SelectionDAGNodes.h"
58 #include "llvm/CodeGen/TargetInstrInfo.h"
59 #include "llvm/CodeGen/TargetLowering.h"
60 #include "llvm/CodeGen/TargetOpcodes.h"
61 #include "llvm/CodeGen/TargetRegisterInfo.h"
62 #include "llvm/CodeGen/TargetSubtargetInfo.h"
63 #include "llvm/CodeGen/ValueTypes.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/CallingConv.h"
66 #include "llvm/IR/Constant.h"
67 #include "llvm/IR/Constants.h"
68 #include "llvm/IR/DataLayout.h"
69 #include "llvm/IR/DebugLoc.h"
70 #include "llvm/IR/DerivedTypes.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GlobalAlias.h"
73 #include "llvm/IR/GlobalValue.h"
74 #include "llvm/IR/GlobalVariable.h"
75 #include "llvm/IR/IRBuilder.h"
76 #include "llvm/IR/InlineAsm.h"
77 #include "llvm/IR/Instruction.h"
78 #include "llvm/IR/Instructions.h"
79 #include "llvm/IR/IntrinsicInst.h"
80 #include "llvm/IR/Intrinsics.h"
81 #include "llvm/IR/Module.h"
82 #include "llvm/IR/PatternMatch.h"
83 #include "llvm/IR/Type.h"
84 #include "llvm/IR/User.h"
85 #include "llvm/IR/Value.h"
86 #include "llvm/MC/MCInstrDesc.h"
87 #include "llvm/MC/MCInstrItineraries.h"
88 #include "llvm/MC/MCRegisterInfo.h"
89 #include "llvm/MC/MCSchedule.h"
90 #include "llvm/Support/AtomicOrdering.h"
91 #include "llvm/Support/BranchProbability.h"
92 #include "llvm/Support/Casting.h"
93 #include "llvm/Support/CodeGen.h"
94 #include "llvm/Support/CommandLine.h"
95 #include "llvm/Support/Compiler.h"
96 #include "llvm/Support/Debug.h"
97 #include "llvm/Support/ErrorHandling.h"
98 #include "llvm/Support/KnownBits.h"
99 #include "llvm/Support/MachineValueType.h"
100 #include "llvm/Support/MathExtras.h"
101 #include "llvm/Support/raw_ostream.h"
102 #include "llvm/Target/TargetMachine.h"
103 #include "llvm/Target/TargetOptions.h"
115 using namespace llvm
;
116 using namespace llvm::PatternMatch
;
118 #define DEBUG_TYPE "arm-isel"
120 STATISTIC(NumTailCalls
, "Number of tail calls");
121 STATISTIC(NumMovwMovt
, "Number of GAs materialized with movw + movt");
122 STATISTIC(NumLoopByVals
, "Number of loops generated for byval arguments");
123 STATISTIC(NumConstpoolPromoted
,
124 "Number of constants with their storage promoted into constant pools");
127 ARMInterworking("arm-interworking", cl::Hidden
,
128 cl::desc("Enable / disable ARM interworking (for debugging only)"),
131 static cl::opt
<bool> EnableConstpoolPromotion(
132 "arm-promote-constant", cl::Hidden
,
133 cl::desc("Enable / disable promotion of unnamed_addr constants into "
135 cl::init(false)); // FIXME: set to true by default once PR32780 is fixed
136 static cl::opt
<unsigned> ConstpoolPromotionMaxSize(
137 "arm-promote-constant-max-size", cl::Hidden
,
138 cl::desc("Maximum size of constant to promote into a constant pool"),
140 static cl::opt
<unsigned> ConstpoolPromotionMaxTotal(
141 "arm-promote-constant-max-total", cl::Hidden
,
142 cl::desc("Maximum size of ALL constants to promote into a constant pool"),
145 // The APCS parameter registers.
146 static const MCPhysReg GPRArgRegs
[] = {
147 ARM::R0
, ARM::R1
, ARM::R2
, ARM::R3
150 void ARMTargetLowering::addTypeForNEON(MVT VT
, MVT PromotedLdStVT
,
151 MVT PromotedBitwiseVT
) {
152 if (VT
!= PromotedLdStVT
) {
153 setOperationAction(ISD::LOAD
, VT
, Promote
);
154 AddPromotedToType (ISD::LOAD
, VT
, PromotedLdStVT
);
156 setOperationAction(ISD::STORE
, VT
, Promote
);
157 AddPromotedToType (ISD::STORE
, VT
, PromotedLdStVT
);
160 MVT ElemTy
= VT
.getVectorElementType();
161 if (ElemTy
!= MVT::f64
)
162 setOperationAction(ISD::SETCC
, VT
, Custom
);
163 setOperationAction(ISD::INSERT_VECTOR_ELT
, VT
, Custom
);
164 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, VT
, Custom
);
165 if (ElemTy
== MVT::i32
) {
166 setOperationAction(ISD::SINT_TO_FP
, VT
, Custom
);
167 setOperationAction(ISD::UINT_TO_FP
, VT
, Custom
);
168 setOperationAction(ISD::FP_TO_SINT
, VT
, Custom
);
169 setOperationAction(ISD::FP_TO_UINT
, VT
, Custom
);
171 setOperationAction(ISD::SINT_TO_FP
, VT
, Expand
);
172 setOperationAction(ISD::UINT_TO_FP
, VT
, Expand
);
173 setOperationAction(ISD::FP_TO_SINT
, VT
, Expand
);
174 setOperationAction(ISD::FP_TO_UINT
, VT
, Expand
);
176 setOperationAction(ISD::BUILD_VECTOR
, VT
, Custom
);
177 setOperationAction(ISD::VECTOR_SHUFFLE
, VT
, Custom
);
178 setOperationAction(ISD::CONCAT_VECTORS
, VT
, Legal
);
179 setOperationAction(ISD::EXTRACT_SUBVECTOR
, VT
, Legal
);
180 setOperationAction(ISD::SELECT
, VT
, Expand
);
181 setOperationAction(ISD::SELECT_CC
, VT
, Expand
);
182 setOperationAction(ISD::VSELECT
, VT
, Expand
);
183 setOperationAction(ISD::SIGN_EXTEND_INREG
, VT
, Expand
);
184 if (VT
.isInteger()) {
185 setOperationAction(ISD::SHL
, VT
, Custom
);
186 setOperationAction(ISD::SRA
, VT
, Custom
);
187 setOperationAction(ISD::SRL
, VT
, Custom
);
190 // Promote all bit-wise operations.
191 if (VT
.isInteger() && VT
!= PromotedBitwiseVT
) {
192 setOperationAction(ISD::AND
, VT
, Promote
);
193 AddPromotedToType (ISD::AND
, VT
, PromotedBitwiseVT
);
194 setOperationAction(ISD::OR
, VT
, Promote
);
195 AddPromotedToType (ISD::OR
, VT
, PromotedBitwiseVT
);
196 setOperationAction(ISD::XOR
, VT
, Promote
);
197 AddPromotedToType (ISD::XOR
, VT
, PromotedBitwiseVT
);
200 // Neon does not support vector divide/remainder operations.
201 setOperationAction(ISD::SDIV
, VT
, Expand
);
202 setOperationAction(ISD::UDIV
, VT
, Expand
);
203 setOperationAction(ISD::FDIV
, VT
, Expand
);
204 setOperationAction(ISD::SREM
, VT
, Expand
);
205 setOperationAction(ISD::UREM
, VT
, Expand
);
206 setOperationAction(ISD::FREM
, VT
, Expand
);
208 if (!VT
.isFloatingPoint() &&
209 VT
!= MVT::v2i64
&& VT
!= MVT::v1i64
)
210 for (auto Opcode
: {ISD::ABS
, ISD::SMIN
, ISD::SMAX
, ISD::UMIN
, ISD::UMAX
})
211 setOperationAction(Opcode
, VT
, Legal
);
214 void ARMTargetLowering::addDRTypeForNEON(MVT VT
) {
215 addRegisterClass(VT
, &ARM::DPRRegClass
);
216 addTypeForNEON(VT
, MVT::f64
, MVT::v2i32
);
219 void ARMTargetLowering::addQRTypeForNEON(MVT VT
) {
220 addRegisterClass(VT
, &ARM::DPairRegClass
);
221 addTypeForNEON(VT
, MVT::v2f64
, MVT::v4i32
);
224 void ARMTargetLowering::setAllExpand(MVT VT
) {
225 for (unsigned Opc
= 0; Opc
< ISD::BUILTIN_OP_END
; ++Opc
)
226 setOperationAction(Opc
, VT
, Expand
);
228 // We support these really simple operations even on types where all
229 // the actual arithmetic has to be broken down into simpler
230 // operations or turned into library calls.
231 setOperationAction(ISD::BITCAST
, VT
, Legal
);
232 setOperationAction(ISD::LOAD
, VT
, Legal
);
233 setOperationAction(ISD::STORE
, VT
, Legal
);
234 setOperationAction(ISD::UNDEF
, VT
, Legal
);
237 void ARMTargetLowering::addAllExtLoads(const MVT From
, const MVT To
,
238 LegalizeAction Action
) {
239 setLoadExtAction(ISD::EXTLOAD
, From
, To
, Action
);
240 setLoadExtAction(ISD::ZEXTLOAD
, From
, To
, Action
);
241 setLoadExtAction(ISD::SEXTLOAD
, From
, To
, Action
);
244 void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP
) {
245 const MVT IntTypes
[] = { MVT::v16i8
, MVT::v8i16
, MVT::v4i32
};
247 for (auto VT
: IntTypes
) {
248 addRegisterClass(VT
, &ARM::MQPRRegClass
);
249 setOperationAction(ISD::VECTOR_SHUFFLE
, VT
, Custom
);
250 setOperationAction(ISD::INSERT_VECTOR_ELT
, VT
, Custom
);
251 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, VT
, Custom
);
252 setOperationAction(ISD::BUILD_VECTOR
, VT
, Custom
);
253 setOperationAction(ISD::SHL
, VT
, Custom
);
254 setOperationAction(ISD::SRA
, VT
, Custom
);
255 setOperationAction(ISD::SRL
, VT
, Custom
);
256 setOperationAction(ISD::SMIN
, VT
, Legal
);
257 setOperationAction(ISD::SMAX
, VT
, Legal
);
258 setOperationAction(ISD::UMIN
, VT
, Legal
);
259 setOperationAction(ISD::UMAX
, VT
, Legal
);
260 setOperationAction(ISD::ABS
, VT
, Legal
);
261 setOperationAction(ISD::SETCC
, VT
, Custom
);
262 setOperationAction(ISD::MLOAD
, VT
, Custom
);
263 setOperationAction(ISD::MSTORE
, VT
, Legal
);
264 setOperationAction(ISD::CTLZ
, VT
, Legal
);
265 setOperationAction(ISD::CTTZ
, VT
, Custom
);
266 setOperationAction(ISD::BITREVERSE
, VT
, Legal
);
267 setOperationAction(ISD::BSWAP
, VT
, Legal
);
268 setOperationAction(ISD::SADDSAT
, VT
, Legal
);
269 setOperationAction(ISD::UADDSAT
, VT
, Legal
);
270 setOperationAction(ISD::SSUBSAT
, VT
, Legal
);
271 setOperationAction(ISD::USUBSAT
, VT
, Legal
);
273 // No native support for these.
274 setOperationAction(ISD::UDIV
, VT
, Expand
);
275 setOperationAction(ISD::SDIV
, VT
, Expand
);
276 setOperationAction(ISD::UREM
, VT
, Expand
);
277 setOperationAction(ISD::SREM
, VT
, Expand
);
278 setOperationAction(ISD::CTPOP
, VT
, Expand
);
281 setOperationAction(ISD::VECREDUCE_ADD
, VT
, Legal
);
282 setOperationAction(ISD::VECREDUCE_SMAX
, VT
, Legal
);
283 setOperationAction(ISD::VECREDUCE_UMAX
, VT
, Legal
);
284 setOperationAction(ISD::VECREDUCE_SMIN
, VT
, Legal
);
285 setOperationAction(ISD::VECREDUCE_UMIN
, VT
, Legal
);
288 setOperationAction(ISD::SINT_TO_FP
, VT
, Expand
);
289 setOperationAction(ISD::UINT_TO_FP
, VT
, Expand
);
290 setOperationAction(ISD::FP_TO_SINT
, VT
, Expand
);
291 setOperationAction(ISD::FP_TO_UINT
, VT
, Expand
);
294 // Pre and Post inc are supported on loads and stores
295 for (unsigned im
= (unsigned)ISD::PRE_INC
;
296 im
!= (unsigned)ISD::LAST_INDEXED_MODE
; ++im
) {
297 setIndexedLoadAction(im
, VT
, Legal
);
298 setIndexedStoreAction(im
, VT
, Legal
);
302 const MVT FloatTypes
[] = { MVT::v8f16
, MVT::v4f32
};
303 for (auto VT
: FloatTypes
) {
304 addRegisterClass(VT
, &ARM::MQPRRegClass
);
308 // These are legal or custom whether we have MVE.fp or not
309 setOperationAction(ISD::VECTOR_SHUFFLE
, VT
, Custom
);
310 setOperationAction(ISD::INSERT_VECTOR_ELT
, VT
, Custom
);
311 setOperationAction(ISD::INSERT_VECTOR_ELT
, VT
.getVectorElementType(), Custom
);
312 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, VT
, Custom
);
313 setOperationAction(ISD::BUILD_VECTOR
, VT
, Custom
);
314 setOperationAction(ISD::BUILD_VECTOR
, VT
.getVectorElementType(), Custom
);
315 setOperationAction(ISD::SCALAR_TO_VECTOR
, VT
, Legal
);
316 setOperationAction(ISD::SETCC
, VT
, Custom
);
317 setOperationAction(ISD::MLOAD
, VT
, Custom
);
318 setOperationAction(ISD::MSTORE
, VT
, Legal
);
320 // Pre and Post inc are supported on loads and stores
321 for (unsigned im
= (unsigned)ISD::PRE_INC
;
322 im
!= (unsigned)ISD::LAST_INDEXED_MODE
; ++im
) {
323 setIndexedLoadAction(im
, VT
, Legal
);
324 setIndexedStoreAction(im
, VT
, Legal
);
328 setOperationAction(ISD::FMINNUM
, VT
, Legal
);
329 setOperationAction(ISD::FMAXNUM
, VT
, Legal
);
330 setOperationAction(ISD::FROUND
, VT
, Legal
);
332 // No native support for these.
333 setOperationAction(ISD::FDIV
, VT
, Expand
);
334 setOperationAction(ISD::FREM
, VT
, Expand
);
335 setOperationAction(ISD::FSQRT
, VT
, Expand
);
336 setOperationAction(ISD::FSIN
, VT
, Expand
);
337 setOperationAction(ISD::FCOS
, VT
, Expand
);
338 setOperationAction(ISD::FPOW
, VT
, Expand
);
339 setOperationAction(ISD::FLOG
, VT
, Expand
);
340 setOperationAction(ISD::FLOG2
, VT
, Expand
);
341 setOperationAction(ISD::FLOG10
, VT
, Expand
);
342 setOperationAction(ISD::FEXP
, VT
, Expand
);
343 setOperationAction(ISD::FEXP2
, VT
, Expand
);
344 setOperationAction(ISD::FNEARBYINT
, VT
, Expand
);
348 // We 'support' these types up to bitcast/load/store level, regardless of
349 // MVE integer-only / float support. Only doing FP data processing on the FP
350 // vector types is inhibited at integer-only level.
351 const MVT LongTypes
[] = { MVT::v2i64
, MVT::v2f64
};
352 for (auto VT
: LongTypes
) {
353 addRegisterClass(VT
, &ARM::MQPRRegClass
);
355 setOperationAction(ISD::INSERT_VECTOR_ELT
, VT
, Custom
);
356 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, VT
, Custom
);
357 setOperationAction(ISD::BUILD_VECTOR
, VT
, Custom
);
359 // We can do bitwise operations on v2i64 vectors
360 setOperationAction(ISD::AND
, MVT::v2i64
, Legal
);
361 setOperationAction(ISD::OR
, MVT::v2i64
, Legal
);
362 setOperationAction(ISD::XOR
, MVT::v2i64
, Legal
);
364 // It is legal to extload from v4i8 to v4i16 or v4i32.
365 addAllExtLoads(MVT::v8i16
, MVT::v8i8
, Legal
);
366 addAllExtLoads(MVT::v4i32
, MVT::v4i16
, Legal
);
367 addAllExtLoads(MVT::v4i32
, MVT::v4i8
, Legal
);
369 // Some truncating stores are legal too.
370 setTruncStoreAction(MVT::v4i32
, MVT::v4i16
, Legal
);
371 setTruncStoreAction(MVT::v4i32
, MVT::v4i8
, Legal
);
372 setTruncStoreAction(MVT::v8i16
, MVT::v8i8
, Legal
);
374 // Pre and Post inc on these are legal, given the correct extends
375 for (unsigned im
= (unsigned)ISD::PRE_INC
;
376 im
!= (unsigned)ISD::LAST_INDEXED_MODE
; ++im
) {
377 setIndexedLoadAction(im
, MVT::v8i8
, Legal
);
378 setIndexedStoreAction(im
, MVT::v8i8
, Legal
);
379 setIndexedLoadAction(im
, MVT::v4i8
, Legal
);
380 setIndexedStoreAction(im
, MVT::v4i8
, Legal
);
381 setIndexedLoadAction(im
, MVT::v4i16
, Legal
);
382 setIndexedStoreAction(im
, MVT::v4i16
, Legal
);
386 const MVT pTypes
[] = {MVT::v16i1
, MVT::v8i1
, MVT::v4i1
};
387 for (auto VT
: pTypes
) {
388 addRegisterClass(VT
, &ARM::VCCRRegClass
);
389 setOperationAction(ISD::BUILD_VECTOR
, VT
, Custom
);
390 setOperationAction(ISD::VECTOR_SHUFFLE
, VT
, Custom
);
391 setOperationAction(ISD::EXTRACT_SUBVECTOR
, VT
, Custom
);
392 setOperationAction(ISD::CONCAT_VECTORS
, VT
, Custom
);
393 setOperationAction(ISD::INSERT_VECTOR_ELT
, VT
, Custom
);
394 setOperationAction(ISD::EXTRACT_VECTOR_ELT
, VT
, Custom
);
395 setOperationAction(ISD::SETCC
, VT
, Custom
);
396 setOperationAction(ISD::SCALAR_TO_VECTOR
, VT
, Expand
);
397 setOperationAction(ISD::LOAD
, VT
, Custom
);
398 setOperationAction(ISD::STORE
, VT
, Custom
);
402 ARMTargetLowering::ARMTargetLowering(const TargetMachine
&TM
,
403 const ARMSubtarget
&STI
)
404 : TargetLowering(TM
), Subtarget(&STI
) {
405 RegInfo
= Subtarget
->getRegisterInfo();
406 Itins
= Subtarget
->getInstrItineraryData();
408 setBooleanContents(ZeroOrOneBooleanContent
);
409 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent
);
411 if (!Subtarget
->isTargetDarwin() && !Subtarget
->isTargetIOS() &&
412 !Subtarget
->isTargetWatchOS()) {
413 bool IsHFTarget
= TM
.Options
.FloatABIType
== FloatABI::Hard
;
414 for (int LCID
= 0; LCID
< RTLIB::UNKNOWN_LIBCALL
; ++LCID
)
415 setLibcallCallingConv(static_cast<RTLIB::Libcall
>(LCID
),
416 IsHFTarget
? CallingConv::ARM_AAPCS_VFP
417 : CallingConv::ARM_AAPCS
);
420 if (Subtarget
->isTargetMachO()) {
421 // Uses VFP for Thumb libfuncs if available.
422 if (Subtarget
->isThumb() && Subtarget
->hasVFP2Base() &&
423 Subtarget
->hasARMOps() && !Subtarget
->useSoftFloat()) {
424 static const struct {
425 const RTLIB::Libcall Op
;
426 const char * const Name
;
427 const ISD::CondCode Cond
;
429 // Single-precision floating-point arithmetic.
430 { RTLIB::ADD_F32
, "__addsf3vfp", ISD::SETCC_INVALID
},
431 { RTLIB::SUB_F32
, "__subsf3vfp", ISD::SETCC_INVALID
},
432 { RTLIB::MUL_F32
, "__mulsf3vfp", ISD::SETCC_INVALID
},
433 { RTLIB::DIV_F32
, "__divsf3vfp", ISD::SETCC_INVALID
},
435 // Double-precision floating-point arithmetic.
436 { RTLIB::ADD_F64
, "__adddf3vfp", ISD::SETCC_INVALID
},
437 { RTLIB::SUB_F64
, "__subdf3vfp", ISD::SETCC_INVALID
},
438 { RTLIB::MUL_F64
, "__muldf3vfp", ISD::SETCC_INVALID
},
439 { RTLIB::DIV_F64
, "__divdf3vfp", ISD::SETCC_INVALID
},
441 // Single-precision comparisons.
442 { RTLIB::OEQ_F32
, "__eqsf2vfp", ISD::SETNE
},
443 { RTLIB::UNE_F32
, "__nesf2vfp", ISD::SETNE
},
444 { RTLIB::OLT_F32
, "__ltsf2vfp", ISD::SETNE
},
445 { RTLIB::OLE_F32
, "__lesf2vfp", ISD::SETNE
},
446 { RTLIB::OGE_F32
, "__gesf2vfp", ISD::SETNE
},
447 { RTLIB::OGT_F32
, "__gtsf2vfp", ISD::SETNE
},
448 { RTLIB::UO_F32
, "__unordsf2vfp", ISD::SETNE
},
449 { RTLIB::O_F32
, "__unordsf2vfp", ISD::SETEQ
},
451 // Double-precision comparisons.
452 { RTLIB::OEQ_F64
, "__eqdf2vfp", ISD::SETNE
},
453 { RTLIB::UNE_F64
, "__nedf2vfp", ISD::SETNE
},
454 { RTLIB::OLT_F64
, "__ltdf2vfp", ISD::SETNE
},
455 { RTLIB::OLE_F64
, "__ledf2vfp", ISD::SETNE
},
456 { RTLIB::OGE_F64
, "__gedf2vfp", ISD::SETNE
},
457 { RTLIB::OGT_F64
, "__gtdf2vfp", ISD::SETNE
},
458 { RTLIB::UO_F64
, "__unorddf2vfp", ISD::SETNE
},
459 { RTLIB::O_F64
, "__unorddf2vfp", ISD::SETEQ
},
461 // Floating-point to integer conversions.
462 // i64 conversions are done via library routines even when generating VFP
463 // instructions, so use the same ones.
464 { RTLIB::FPTOSINT_F64_I32
, "__fixdfsivfp", ISD::SETCC_INVALID
},
465 { RTLIB::FPTOUINT_F64_I32
, "__fixunsdfsivfp", ISD::SETCC_INVALID
},
466 { RTLIB::FPTOSINT_F32_I32
, "__fixsfsivfp", ISD::SETCC_INVALID
},
467 { RTLIB::FPTOUINT_F32_I32
, "__fixunssfsivfp", ISD::SETCC_INVALID
},
469 // Conversions between floating types.
470 { RTLIB::FPROUND_F64_F32
, "__truncdfsf2vfp", ISD::SETCC_INVALID
},
471 { RTLIB::FPEXT_F32_F64
, "__extendsfdf2vfp", ISD::SETCC_INVALID
},
473 // Integer to floating-point conversions.
474 // i64 conversions are done via library routines even when generating VFP
475 // instructions, so use the same ones.
476 // FIXME: There appears to be some naming inconsistency in ARM libgcc:
477 // e.g., __floatunsidf vs. __floatunssidfvfp.
478 { RTLIB::SINTTOFP_I32_F64
, "__floatsidfvfp", ISD::SETCC_INVALID
},
479 { RTLIB::UINTTOFP_I32_F64
, "__floatunssidfvfp", ISD::SETCC_INVALID
},
480 { RTLIB::SINTTOFP_I32_F32
, "__floatsisfvfp", ISD::SETCC_INVALID
},
481 { RTLIB::UINTTOFP_I32_F32
, "__floatunssisfvfp", ISD::SETCC_INVALID
},
484 for (const auto &LC
: LibraryCalls
) {
485 setLibcallName(LC
.Op
, LC
.Name
);
486 if (LC
.Cond
!= ISD::SETCC_INVALID
)
487 setCmpLibcallCC(LC
.Op
, LC
.Cond
);
492 // These libcalls are not available in 32-bit.
493 setLibcallName(RTLIB::SHL_I128
, nullptr);
494 setLibcallName(RTLIB::SRL_I128
, nullptr);
495 setLibcallName(RTLIB::SRA_I128
, nullptr);
498 if (Subtarget
->isAAPCS_ABI() &&
499 (Subtarget
->isTargetAEABI() || Subtarget
->isTargetGNUAEABI() ||
500 Subtarget
->isTargetMuslAEABI() || Subtarget
->isTargetAndroid())) {
501 static const struct {
502 const RTLIB::Libcall Op
;
503 const char * const Name
;
504 const CallingConv::ID CC
;
505 const ISD::CondCode Cond
;
507 // Double-precision floating-point arithmetic helper functions
508 // RTABI chapter 4.1.2, Table 2
509 { RTLIB::ADD_F64
, "__aeabi_dadd", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
510 { RTLIB::DIV_F64
, "__aeabi_ddiv", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
511 { RTLIB::MUL_F64
, "__aeabi_dmul", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
512 { RTLIB::SUB_F64
, "__aeabi_dsub", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
514 // Double-precision floating-point comparison helper functions
515 // RTABI chapter 4.1.2, Table 3
516 { RTLIB::OEQ_F64
, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS
, ISD::SETNE
},
517 { RTLIB::UNE_F64
, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS
, ISD::SETEQ
},
518 { RTLIB::OLT_F64
, "__aeabi_dcmplt", CallingConv::ARM_AAPCS
, ISD::SETNE
},
519 { RTLIB::OLE_F64
, "__aeabi_dcmple", CallingConv::ARM_AAPCS
, ISD::SETNE
},
520 { RTLIB::OGE_F64
, "__aeabi_dcmpge", CallingConv::ARM_AAPCS
, ISD::SETNE
},
521 { RTLIB::OGT_F64
, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS
, ISD::SETNE
},
522 { RTLIB::UO_F64
, "__aeabi_dcmpun", CallingConv::ARM_AAPCS
, ISD::SETNE
},
523 { RTLIB::O_F64
, "__aeabi_dcmpun", CallingConv::ARM_AAPCS
, ISD::SETEQ
},
525 // Single-precision floating-point arithmetic helper functions
526 // RTABI chapter 4.1.2, Table 4
527 { RTLIB::ADD_F32
, "__aeabi_fadd", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
528 { RTLIB::DIV_F32
, "__aeabi_fdiv", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
529 { RTLIB::MUL_F32
, "__aeabi_fmul", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
530 { RTLIB::SUB_F32
, "__aeabi_fsub", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
532 // Single-precision floating-point comparison helper functions
533 // RTABI chapter 4.1.2, Table 5
534 { RTLIB::OEQ_F32
, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS
, ISD::SETNE
},
535 { RTLIB::UNE_F32
, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS
, ISD::SETEQ
},
536 { RTLIB::OLT_F32
, "__aeabi_fcmplt", CallingConv::ARM_AAPCS
, ISD::SETNE
},
537 { RTLIB::OLE_F32
, "__aeabi_fcmple", CallingConv::ARM_AAPCS
, ISD::SETNE
},
538 { RTLIB::OGE_F32
, "__aeabi_fcmpge", CallingConv::ARM_AAPCS
, ISD::SETNE
},
539 { RTLIB::OGT_F32
, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS
, ISD::SETNE
},
540 { RTLIB::UO_F32
, "__aeabi_fcmpun", CallingConv::ARM_AAPCS
, ISD::SETNE
},
541 { RTLIB::O_F32
, "__aeabi_fcmpun", CallingConv::ARM_AAPCS
, ISD::SETEQ
},
543 // Floating-point to integer conversions.
544 // RTABI chapter 4.1.2, Table 6
545 { RTLIB::FPTOSINT_F64_I32
, "__aeabi_d2iz", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
546 { RTLIB::FPTOUINT_F64_I32
, "__aeabi_d2uiz", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
547 { RTLIB::FPTOSINT_F64_I64
, "__aeabi_d2lz", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
548 { RTLIB::FPTOUINT_F64_I64
, "__aeabi_d2ulz", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
549 { RTLIB::FPTOSINT_F32_I32
, "__aeabi_f2iz", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
550 { RTLIB::FPTOUINT_F32_I32
, "__aeabi_f2uiz", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
551 { RTLIB::FPTOSINT_F32_I64
, "__aeabi_f2lz", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
552 { RTLIB::FPTOUINT_F32_I64
, "__aeabi_f2ulz", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
554 // Conversions between floating types.
555 // RTABI chapter 4.1.2, Table 7
556 { RTLIB::FPROUND_F64_F32
, "__aeabi_d2f", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
557 { RTLIB::FPROUND_F64_F16
, "__aeabi_d2h", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
558 { RTLIB::FPEXT_F32_F64
, "__aeabi_f2d", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
560 // Integer to floating-point conversions.
561 // RTABI chapter 4.1.2, Table 8
562 { RTLIB::SINTTOFP_I32_F64
, "__aeabi_i2d", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
563 { RTLIB::UINTTOFP_I32_F64
, "__aeabi_ui2d", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
564 { RTLIB::SINTTOFP_I64_F64
, "__aeabi_l2d", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
565 { RTLIB::UINTTOFP_I64_F64
, "__aeabi_ul2d", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
566 { RTLIB::SINTTOFP_I32_F32
, "__aeabi_i2f", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
567 { RTLIB::UINTTOFP_I32_F32
, "__aeabi_ui2f", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
568 { RTLIB::SINTTOFP_I64_F32
, "__aeabi_l2f", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
569 { RTLIB::UINTTOFP_I64_F32
, "__aeabi_ul2f", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
571 // Long long helper functions
572 // RTABI chapter 4.2, Table 9
573 { RTLIB::MUL_I64
, "__aeabi_lmul", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
574 { RTLIB::SHL_I64
, "__aeabi_llsl", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
575 { RTLIB::SRL_I64
, "__aeabi_llsr", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
576 { RTLIB::SRA_I64
, "__aeabi_lasr", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
578 // Integer division functions
579 // RTABI chapter 4.3.1
580 { RTLIB::SDIV_I8
, "__aeabi_idiv", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
581 { RTLIB::SDIV_I16
, "__aeabi_idiv", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
582 { RTLIB::SDIV_I32
, "__aeabi_idiv", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
583 { RTLIB::SDIV_I64
, "__aeabi_ldivmod", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
584 { RTLIB::UDIV_I8
, "__aeabi_uidiv", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
585 { RTLIB::UDIV_I16
, "__aeabi_uidiv", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
586 { RTLIB::UDIV_I32
, "__aeabi_uidiv", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
587 { RTLIB::UDIV_I64
, "__aeabi_uldivmod", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
590 for (const auto &LC
: LibraryCalls
) {
591 setLibcallName(LC
.Op
, LC
.Name
);
592 setLibcallCallingConv(LC
.Op
, LC
.CC
);
593 if (LC
.Cond
!= ISD::SETCC_INVALID
)
594 setCmpLibcallCC(LC
.Op
, LC
.Cond
);
597 // EABI dependent RTLIB
598 if (TM
.Options
.EABIVersion
== EABI::EABI4
||
599 TM
.Options
.EABIVersion
== EABI::EABI5
) {
600 static const struct {
601 const RTLIB::Libcall Op
;
602 const char *const Name
;
603 const CallingConv::ID CC
;
604 const ISD::CondCode Cond
;
605 } MemOpsLibraryCalls
[] = {
607 // RTABI chapter 4.3.4
608 { RTLIB::MEMCPY
, "__aeabi_memcpy", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
609 { RTLIB::MEMMOVE
, "__aeabi_memmove", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
610 { RTLIB::MEMSET
, "__aeabi_memset", CallingConv::ARM_AAPCS
, ISD::SETCC_INVALID
},
613 for (const auto &LC
: MemOpsLibraryCalls
) {
614 setLibcallName(LC
.Op
, LC
.Name
);
615 setLibcallCallingConv(LC
.Op
, LC
.CC
);
616 if (LC
.Cond
!= ISD::SETCC_INVALID
)
617 setCmpLibcallCC(LC
.Op
, LC
.Cond
);
622 if (Subtarget
->isTargetWindows()) {
623 static const struct {
624 const RTLIB::Libcall Op
;
625 const char * const Name
;
626 const CallingConv::ID CC
;
628 { RTLIB::FPTOSINT_F32_I64
, "__stoi64", CallingConv::ARM_AAPCS_VFP
},
629 { RTLIB::FPTOSINT_F64_I64
, "__dtoi64", CallingConv::ARM_AAPCS_VFP
},
630 { RTLIB::FPTOUINT_F32_I64
, "__stou64", CallingConv::ARM_AAPCS_VFP
},
631 { RTLIB::FPTOUINT_F64_I64
, "__dtou64", CallingConv::ARM_AAPCS_VFP
},
632 { RTLIB::SINTTOFP_I64_F32
, "__i64tos", CallingConv::ARM_AAPCS_VFP
},
633 { RTLIB::SINTTOFP_I64_F64
, "__i64tod", CallingConv::ARM_AAPCS_VFP
},
634 { RTLIB::UINTTOFP_I64_F32
, "__u64tos", CallingConv::ARM_AAPCS_VFP
},
635 { RTLIB::UINTTOFP_I64_F64
, "__u64tod", CallingConv::ARM_AAPCS_VFP
},
638 for (const auto &LC
: LibraryCalls
) {
639 setLibcallName(LC
.Op
, LC
.Name
);
640 setLibcallCallingConv(LC
.Op
, LC
.CC
);
644 // Use divmod compiler-rt calls for iOS 5.0 and later.
645 if (Subtarget
->isTargetMachO() &&
646 !(Subtarget
->isTargetIOS() &&
647 Subtarget
->getTargetTriple().isOSVersionLT(5, 0))) {
648 setLibcallName(RTLIB::SDIVREM_I32
, "__divmodsi4");
649 setLibcallName(RTLIB::UDIVREM_I32
, "__udivmodsi4");
652 // The half <-> float conversion functions are always soft-float on
653 // non-watchos platforms, but are needed for some targets which use a
654 // hard-float calling convention by default.
655 if (!Subtarget
->isTargetWatchABI()) {
656 if (Subtarget
->isAAPCS_ABI()) {
657 setLibcallCallingConv(RTLIB::FPROUND_F32_F16
, CallingConv::ARM_AAPCS
);
658 setLibcallCallingConv(RTLIB::FPROUND_F64_F16
, CallingConv::ARM_AAPCS
);
659 setLibcallCallingConv(RTLIB::FPEXT_F16_F32
, CallingConv::ARM_AAPCS
);
661 setLibcallCallingConv(RTLIB::FPROUND_F32_F16
, CallingConv::ARM_APCS
);
662 setLibcallCallingConv(RTLIB::FPROUND_F64_F16
, CallingConv::ARM_APCS
);
663 setLibcallCallingConv(RTLIB::FPEXT_F16_F32
, CallingConv::ARM_APCS
);
667 // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have
668 // a __gnu_ prefix (which is the default).
669 if (Subtarget
->isTargetAEABI()) {
670 static const struct {
671 const RTLIB::Libcall Op
;
672 const char * const Name
;
673 const CallingConv::ID CC
;
675 { RTLIB::FPROUND_F32_F16
, "__aeabi_f2h", CallingConv::ARM_AAPCS
},
676 { RTLIB::FPROUND_F64_F16
, "__aeabi_d2h", CallingConv::ARM_AAPCS
},
677 { RTLIB::FPEXT_F16_F32
, "__aeabi_h2f", CallingConv::ARM_AAPCS
},
680 for (const auto &LC
: LibraryCalls
) {
681 setLibcallName(LC
.Op
, LC
.Name
);
682 setLibcallCallingConv(LC
.Op
, LC
.CC
);
686 if (Subtarget
->isThumb1Only())
687 addRegisterClass(MVT::i32
, &ARM::tGPRRegClass
);
689 addRegisterClass(MVT::i32
, &ARM::GPRRegClass
);
691 if (!Subtarget
->useSoftFloat() && !Subtarget
->isThumb1Only() &&
692 Subtarget
->hasFPRegs()) {
693 addRegisterClass(MVT::f32
, &ARM::SPRRegClass
);
694 addRegisterClass(MVT::f64
, &ARM::DPRRegClass
);
695 if (!Subtarget
->hasVFP2Base())
696 setAllExpand(MVT::f32
);
697 if (!Subtarget
->hasFP64())
698 setAllExpand(MVT::f64
);
701 if (Subtarget
->hasFullFP16()) {
702 addRegisterClass(MVT::f16
, &ARM::HPRRegClass
);
703 setOperationAction(ISD::BITCAST
, MVT::i16
, Custom
);
704 setOperationAction(ISD::BITCAST
, MVT::i32
, Custom
);
705 setOperationAction(ISD::BITCAST
, MVT::f16
, Custom
);
707 setOperationAction(ISD::FMINNUM
, MVT::f16
, Legal
);
708 setOperationAction(ISD::FMAXNUM
, MVT::f16
, Legal
);
711 for (MVT VT
: MVT::fixedlen_vector_valuetypes()) {
712 for (MVT InnerVT
: MVT::fixedlen_vector_valuetypes()) {
713 setTruncStoreAction(VT
, InnerVT
, Expand
);
714 addAllExtLoads(VT
, InnerVT
, Expand
);
717 setOperationAction(ISD::MULHS
, VT
, Expand
);
718 setOperationAction(ISD::SMUL_LOHI
, VT
, Expand
);
719 setOperationAction(ISD::MULHU
, VT
, Expand
);
720 setOperationAction(ISD::UMUL_LOHI
, VT
, Expand
);
722 setOperationAction(ISD::BSWAP
, VT
, Expand
);
725 setOperationAction(ISD::ConstantFP
, MVT::f32
, Custom
);
726 setOperationAction(ISD::ConstantFP
, MVT::f64
, Custom
);
728 setOperationAction(ISD::READ_REGISTER
, MVT::i64
, Custom
);
729 setOperationAction(ISD::WRITE_REGISTER
, MVT::i64
, Custom
);
731 if (Subtarget
->hasMVEIntegerOps())
732 addMVEVectorTypes(Subtarget
->hasMVEFloatOps());
734 // Combine low-overhead loop intrinsics so that we can lower i1 types.
735 if (Subtarget
->hasLOB()) {
736 setTargetDAGCombine(ISD::BRCOND
);
737 setTargetDAGCombine(ISD::BR_CC
);
740 if (Subtarget
->hasNEON()) {
741 addDRTypeForNEON(MVT::v2f32
);
742 addDRTypeForNEON(MVT::v8i8
);
743 addDRTypeForNEON(MVT::v4i16
);
744 addDRTypeForNEON(MVT::v2i32
);
745 addDRTypeForNEON(MVT::v1i64
);
747 addQRTypeForNEON(MVT::v4f32
);
748 addQRTypeForNEON(MVT::v2f64
);
749 addQRTypeForNEON(MVT::v16i8
);
750 addQRTypeForNEON(MVT::v8i16
);
751 addQRTypeForNEON(MVT::v4i32
);
752 addQRTypeForNEON(MVT::v2i64
);
754 if (Subtarget
->hasFullFP16()) {
755 addQRTypeForNEON(MVT::v8f16
);
756 addDRTypeForNEON(MVT::v4f16
);
760 if (Subtarget
->hasMVEIntegerOps() || Subtarget
->hasNEON()) {
761 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
762 // none of Neon, MVE or VFP supports any arithmetic operations on it.
763 setOperationAction(ISD::FADD
, MVT::v2f64
, Expand
);
764 setOperationAction(ISD::FSUB
, MVT::v2f64
, Expand
);
765 setOperationAction(ISD::FMUL
, MVT::v2f64
, Expand
);
766 // FIXME: Code duplication: FDIV and FREM are expanded always, see
767 // ARMTargetLowering::addTypeForNEON method for details.
768 setOperationAction(ISD::FDIV
, MVT::v2f64
, Expand
);
769 setOperationAction(ISD::FREM
, MVT::v2f64
, Expand
);
770 // FIXME: Create unittest.
771 // In another words, find a way when "copysign" appears in DAG with vector
773 setOperationAction(ISD::FCOPYSIGN
, MVT::v2f64
, Expand
);
774 // FIXME: Code duplication: SETCC has custom operation action, see
775 // ARMTargetLowering::addTypeForNEON method for details.
776 setOperationAction(ISD::SETCC
, MVT::v2f64
, Expand
);
777 // FIXME: Create unittest for FNEG and for FABS.
778 setOperationAction(ISD::FNEG
, MVT::v2f64
, Expand
);
779 setOperationAction(ISD::FABS
, MVT::v2f64
, Expand
);
780 setOperationAction(ISD::FSQRT
, MVT::v2f64
, Expand
);
781 setOperationAction(ISD::FSIN
, MVT::v2f64
, Expand
);
782 setOperationAction(ISD::FCOS
, MVT::v2f64
, Expand
);
783 setOperationAction(ISD::FPOW
, MVT::v2f64
, Expand
);
784 setOperationAction(ISD::FLOG
, MVT::v2f64
, Expand
);
785 setOperationAction(ISD::FLOG2
, MVT::v2f64
, Expand
);
786 setOperationAction(ISD::FLOG10
, MVT::v2f64
, Expand
);
787 setOperationAction(ISD::FEXP
, MVT::v2f64
, Expand
);
788 setOperationAction(ISD::FEXP2
, MVT::v2f64
, Expand
);
789 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
790 setOperationAction(ISD::FCEIL
, MVT::v2f64
, Expand
);
791 setOperationAction(ISD::FTRUNC
, MVT::v2f64
, Expand
);
792 setOperationAction(ISD::FRINT
, MVT::v2f64
, Expand
);
793 setOperationAction(ISD::FNEARBYINT
, MVT::v2f64
, Expand
);
794 setOperationAction(ISD::FFLOOR
, MVT::v2f64
, Expand
);
795 setOperationAction(ISD::FMA
, MVT::v2f64
, Expand
);
798 if (Subtarget
->hasNEON()) {
799 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
800 // supported for v4f32.
801 setOperationAction(ISD::FSQRT
, MVT::v4f32
, Expand
);
802 setOperationAction(ISD::FSIN
, MVT::v4f32
, Expand
);
803 setOperationAction(ISD::FCOS
, MVT::v4f32
, Expand
);
804 setOperationAction(ISD::FPOW
, MVT::v4f32
, Expand
);
805 setOperationAction(ISD::FLOG
, MVT::v4f32
, Expand
);
806 setOperationAction(ISD::FLOG2
, MVT::v4f32
, Expand
);
807 setOperationAction(ISD::FLOG10
, MVT::v4f32
, Expand
);
808 setOperationAction(ISD::FEXP
, MVT::v4f32
, Expand
);
809 setOperationAction(ISD::FEXP2
, MVT::v4f32
, Expand
);
810 setOperationAction(ISD::FCEIL
, MVT::v4f32
, Expand
);
811 setOperationAction(ISD::FTRUNC
, MVT::v4f32
, Expand
);
812 setOperationAction(ISD::FRINT
, MVT::v4f32
, Expand
);
813 setOperationAction(ISD::FNEARBYINT
, MVT::v4f32
, Expand
);
814 setOperationAction(ISD::FFLOOR
, MVT::v4f32
, Expand
);
816 // Mark v2f32 intrinsics.
817 setOperationAction(ISD::FSQRT
, MVT::v2f32
, Expand
);
818 setOperationAction(ISD::FSIN
, MVT::v2f32
, Expand
);
819 setOperationAction(ISD::FCOS
, MVT::v2f32
, Expand
);
820 setOperationAction(ISD::FPOW
, MVT::v2f32
, Expand
);
821 setOperationAction(ISD::FLOG
, MVT::v2f32
, Expand
);
822 setOperationAction(ISD::FLOG2
, MVT::v2f32
, Expand
);
823 setOperationAction(ISD::FLOG10
, MVT::v2f32
, Expand
);
824 setOperationAction(ISD::FEXP
, MVT::v2f32
, Expand
);
825 setOperationAction(ISD::FEXP2
, MVT::v2f32
, Expand
);
826 setOperationAction(ISD::FCEIL
, MVT::v2f32
, Expand
);
827 setOperationAction(ISD::FTRUNC
, MVT::v2f32
, Expand
);
828 setOperationAction(ISD::FRINT
, MVT::v2f32
, Expand
);
829 setOperationAction(ISD::FNEARBYINT
, MVT::v2f32
, Expand
);
830 setOperationAction(ISD::FFLOOR
, MVT::v2f32
, Expand
);
832 // Neon does not support some operations on v1i64 and v2i64 types.
833 setOperationAction(ISD::MUL
, MVT::v1i64
, Expand
);
834 // Custom handling for some quad-vector types to detect VMULL.
835 setOperationAction(ISD::MUL
, MVT::v8i16
, Custom
);
836 setOperationAction(ISD::MUL
, MVT::v4i32
, Custom
);
837 setOperationAction(ISD::MUL
, MVT::v2i64
, Custom
);
838 // Custom handling for some vector types to avoid expensive expansions
839 setOperationAction(ISD::SDIV
, MVT::v4i16
, Custom
);
840 setOperationAction(ISD::SDIV
, MVT::v8i8
, Custom
);
841 setOperationAction(ISD::UDIV
, MVT::v4i16
, Custom
);
842 setOperationAction(ISD::UDIV
, MVT::v8i8
, Custom
);
843 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
844 // a destination type that is wider than the source, and nor does
845 // it have a FP_TO_[SU]INT instruction with a narrower destination than
847 setOperationAction(ISD::SINT_TO_FP
, MVT::v4i16
, Custom
);
848 setOperationAction(ISD::SINT_TO_FP
, MVT::v8i16
, Custom
);
849 setOperationAction(ISD::UINT_TO_FP
, MVT::v4i16
, Custom
);
850 setOperationAction(ISD::UINT_TO_FP
, MVT::v8i16
, Custom
);
851 setOperationAction(ISD::FP_TO_UINT
, MVT::v4i16
, Custom
);
852 setOperationAction(ISD::FP_TO_UINT
, MVT::v8i16
, Custom
);
853 setOperationAction(ISD::FP_TO_SINT
, MVT::v4i16
, Custom
);
854 setOperationAction(ISD::FP_TO_SINT
, MVT::v8i16
, Custom
);
856 setOperationAction(ISD::FP_ROUND
, MVT::v2f32
, Expand
);
857 setOperationAction(ISD::FP_EXTEND
, MVT::v2f64
, Expand
);
859 // NEON does not have single instruction CTPOP for vectors with element
860 // types wider than 8-bits. However, custom lowering can leverage the
861 // v8i8/v16i8 vcnt instruction.
862 setOperationAction(ISD::CTPOP
, MVT::v2i32
, Custom
);
863 setOperationAction(ISD::CTPOP
, MVT::v4i32
, Custom
);
864 setOperationAction(ISD::CTPOP
, MVT::v4i16
, Custom
);
865 setOperationAction(ISD::CTPOP
, MVT::v8i16
, Custom
);
866 setOperationAction(ISD::CTPOP
, MVT::v1i64
, Custom
);
867 setOperationAction(ISD::CTPOP
, MVT::v2i64
, Custom
);
869 setOperationAction(ISD::CTLZ
, MVT::v1i64
, Expand
);
870 setOperationAction(ISD::CTLZ
, MVT::v2i64
, Expand
);
872 // NEON does not have single instruction CTTZ for vectors.
873 setOperationAction(ISD::CTTZ
, MVT::v8i8
, Custom
);
874 setOperationAction(ISD::CTTZ
, MVT::v4i16
, Custom
);
875 setOperationAction(ISD::CTTZ
, MVT::v2i32
, Custom
);
876 setOperationAction(ISD::CTTZ
, MVT::v1i64
, Custom
);
878 setOperationAction(ISD::CTTZ
, MVT::v16i8
, Custom
);
879 setOperationAction(ISD::CTTZ
, MVT::v8i16
, Custom
);
880 setOperationAction(ISD::CTTZ
, MVT::v4i32
, Custom
);
881 setOperationAction(ISD::CTTZ
, MVT::v2i64
, Custom
);
883 setOperationAction(ISD::CTTZ_ZERO_UNDEF
, MVT::v8i8
, Custom
);
884 setOperationAction(ISD::CTTZ_ZERO_UNDEF
, MVT::v4i16
, Custom
);
885 setOperationAction(ISD::CTTZ_ZERO_UNDEF
, MVT::v2i32
, Custom
);
886 setOperationAction(ISD::CTTZ_ZERO_UNDEF
, MVT::v1i64
, Custom
);
888 setOperationAction(ISD::CTTZ_ZERO_UNDEF
, MVT::v16i8
, Custom
);
889 setOperationAction(ISD::CTTZ_ZERO_UNDEF
, MVT::v8i16
, Custom
);
890 setOperationAction(ISD::CTTZ_ZERO_UNDEF
, MVT::v4i32
, Custom
);
891 setOperationAction(ISD::CTTZ_ZERO_UNDEF
, MVT::v2i64
, Custom
);
893 // NEON only has FMA instructions as of VFP4.
894 if (!Subtarget
->hasVFP4Base()) {
895 setOperationAction(ISD::FMA
, MVT::v2f32
, Expand
);
896 setOperationAction(ISD::FMA
, MVT::v4f32
, Expand
);
899 setTargetDAGCombine(ISD::INTRINSIC_VOID
);
900 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN
);
901 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN
);
902 setTargetDAGCombine(ISD::SHL
);
903 setTargetDAGCombine(ISD::SRL
);
904 setTargetDAGCombine(ISD::SRA
);
905 setTargetDAGCombine(ISD::FP_TO_SINT
);
906 setTargetDAGCombine(ISD::FP_TO_UINT
);
907 setTargetDAGCombine(ISD::FDIV
);
908 setTargetDAGCombine(ISD::LOAD
);
910 // It is legal to extload from v4i8 to v4i16 or v4i32.
911 for (MVT Ty
: {MVT::v8i8
, MVT::v4i8
, MVT::v2i8
, MVT::v4i16
, MVT::v2i16
,
913 for (MVT VT
: MVT::integer_fixedlen_vector_valuetypes()) {
914 setLoadExtAction(ISD::EXTLOAD
, VT
, Ty
, Legal
);
915 setLoadExtAction(ISD::ZEXTLOAD
, VT
, Ty
, Legal
);
916 setLoadExtAction(ISD::SEXTLOAD
, VT
, Ty
, Legal
);
921 if (Subtarget
->hasNEON() || Subtarget
->hasMVEIntegerOps()) {
922 setTargetDAGCombine(ISD::BUILD_VECTOR
);
923 setTargetDAGCombine(ISD::VECTOR_SHUFFLE
);
924 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT
);
925 setTargetDAGCombine(ISD::STORE
);
926 setTargetDAGCombine(ISD::SIGN_EXTEND
);
927 setTargetDAGCombine(ISD::ZERO_EXTEND
);
928 setTargetDAGCombine(ISD::ANY_EXTEND
);
931 if (!Subtarget
->hasFP64()) {
932 // When targeting a floating-point unit with only single-precision
933 // operations, f64 is legal for the few double-precision instructions which
934 // are present However, no double-precision operations other than moves,
935 // loads and stores are provided by the hardware.
936 setOperationAction(ISD::FADD
, MVT::f64
, Expand
);
937 setOperationAction(ISD::FSUB
, MVT::f64
, Expand
);
938 setOperationAction(ISD::FMUL
, MVT::f64
, Expand
);
939 setOperationAction(ISD::FMA
, MVT::f64
, Expand
);
940 setOperationAction(ISD::FDIV
, MVT::f64
, Expand
);
941 setOperationAction(ISD::FREM
, MVT::f64
, Expand
);
942 setOperationAction(ISD::FCOPYSIGN
, MVT::f64
, Expand
);
943 setOperationAction(ISD::FGETSIGN
, MVT::f64
, Expand
);
944 setOperationAction(ISD::FNEG
, MVT::f64
, Expand
);
945 setOperationAction(ISD::FABS
, MVT::f64
, Expand
);
946 setOperationAction(ISD::FSQRT
, MVT::f64
, Expand
);
947 setOperationAction(ISD::FSIN
, MVT::f64
, Expand
);
948 setOperationAction(ISD::FCOS
, MVT::f64
, Expand
);
949 setOperationAction(ISD::FPOW
, MVT::f64
, Expand
);
950 setOperationAction(ISD::FLOG
, MVT::f64
, Expand
);
951 setOperationAction(ISD::FLOG2
, MVT::f64
, Expand
);
952 setOperationAction(ISD::FLOG10
, MVT::f64
, Expand
);
953 setOperationAction(ISD::FEXP
, MVT::f64
, Expand
);
954 setOperationAction(ISD::FEXP2
, MVT::f64
, Expand
);
955 setOperationAction(ISD::FCEIL
, MVT::f64
, Expand
);
956 setOperationAction(ISD::FTRUNC
, MVT::f64
, Expand
);
957 setOperationAction(ISD::FRINT
, MVT::f64
, Expand
);
958 setOperationAction(ISD::FNEARBYINT
, MVT::f64
, Expand
);
959 setOperationAction(ISD::FFLOOR
, MVT::f64
, Expand
);
960 setOperationAction(ISD::SINT_TO_FP
, MVT::i32
, Custom
);
961 setOperationAction(ISD::UINT_TO_FP
, MVT::i32
, Custom
);
962 setOperationAction(ISD::FP_TO_SINT
, MVT::i32
, Custom
);
963 setOperationAction(ISD::FP_TO_UINT
, MVT::i32
, Custom
);
964 setOperationAction(ISD::FP_TO_SINT
, MVT::f64
, Custom
);
965 setOperationAction(ISD::FP_TO_UINT
, MVT::f64
, Custom
);
966 setOperationAction(ISD::FP_ROUND
, MVT::f32
, Custom
);
969 if (!Subtarget
->hasFP64() || !Subtarget
->hasFPARMv8Base()) {
970 setOperationAction(ISD::FP_EXTEND
, MVT::f64
, Custom
);
971 if (Subtarget
->hasFullFP16())
972 setOperationAction(ISD::FP_ROUND
, MVT::f16
, Custom
);
975 if (!Subtarget
->hasFP16())
976 setOperationAction(ISD::FP_EXTEND
, MVT::f32
, Custom
);
978 if (!Subtarget
->hasFP64())
979 setOperationAction(ISD::FP_ROUND
, MVT::f32
, Custom
);
981 computeRegisterProperties(Subtarget
->getRegisterInfo());
983 // ARM does not have floating-point extending loads.
984 for (MVT VT
: MVT::fp_valuetypes()) {
985 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f32
, Expand
);
986 setLoadExtAction(ISD::EXTLOAD
, VT
, MVT::f16
, Expand
);
989 // ... or truncating stores
990 setTruncStoreAction(MVT::f64
, MVT::f32
, Expand
);
991 setTruncStoreAction(MVT::f32
, MVT::f16
, Expand
);
992 setTruncStoreAction(MVT::f64
, MVT::f16
, Expand
);
994 // ARM does not have i1 sign extending load.
995 for (MVT VT
: MVT::integer_valuetypes())
996 setLoadExtAction(ISD::SEXTLOAD
, VT
, MVT::i1
, Promote
);
998 // ARM supports all 4 flavors of integer indexed load / store.
999 if (!Subtarget
->isThumb1Only()) {
1000 for (unsigned im
= (unsigned)ISD::PRE_INC
;
1001 im
!= (unsigned)ISD::LAST_INDEXED_MODE
; ++im
) {
1002 setIndexedLoadAction(im
, MVT::i1
, Legal
);
1003 setIndexedLoadAction(im
, MVT::i8
, Legal
);
1004 setIndexedLoadAction(im
, MVT::i16
, Legal
);
1005 setIndexedLoadAction(im
, MVT::i32
, Legal
);
1006 setIndexedStoreAction(im
, MVT::i1
, Legal
);
1007 setIndexedStoreAction(im
, MVT::i8
, Legal
);
1008 setIndexedStoreAction(im
, MVT::i16
, Legal
);
1009 setIndexedStoreAction(im
, MVT::i32
, Legal
);
1012 // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}.
1013 setIndexedLoadAction(ISD::POST_INC
, MVT::i32
, Legal
);
1014 setIndexedStoreAction(ISD::POST_INC
, MVT::i32
, Legal
);
1017 setOperationAction(ISD::SADDO
, MVT::i32
, Custom
);
1018 setOperationAction(ISD::UADDO
, MVT::i32
, Custom
);
1019 setOperationAction(ISD::SSUBO
, MVT::i32
, Custom
);
1020 setOperationAction(ISD::USUBO
, MVT::i32
, Custom
);
1022 setOperationAction(ISD::ADDCARRY
, MVT::i32
, Custom
);
1023 setOperationAction(ISD::SUBCARRY
, MVT::i32
, Custom
);
1025 // i64 operation support.
1026 setOperationAction(ISD::MUL
, MVT::i64
, Expand
);
1027 setOperationAction(ISD::MULHU
, MVT::i32
, Expand
);
1028 if (Subtarget
->isThumb1Only()) {
1029 setOperationAction(ISD::UMUL_LOHI
, MVT::i32
, Expand
);
1030 setOperationAction(ISD::SMUL_LOHI
, MVT::i32
, Expand
);
1032 if (Subtarget
->isThumb1Only() || !Subtarget
->hasV6Ops()
1033 || (Subtarget
->isThumb2() && !Subtarget
->hasDSP()))
1034 setOperationAction(ISD::MULHS
, MVT::i32
, Expand
);
1036 setOperationAction(ISD::SHL_PARTS
, MVT::i32
, Custom
);
1037 setOperationAction(ISD::SRA_PARTS
, MVT::i32
, Custom
);
1038 setOperationAction(ISD::SRL_PARTS
, MVT::i32
, Custom
);
1039 setOperationAction(ISD::SRL
, MVT::i64
, Custom
);
1040 setOperationAction(ISD::SRA
, MVT::i64
, Custom
);
1041 setOperationAction(ISD::INTRINSIC_VOID
, MVT::Other
, Custom
);
1042 setOperationAction(ISD::INTRINSIC_WO_CHAIN
, MVT::i64
, Custom
);
1044 // MVE lowers 64 bit shifts to lsll and lsrl
1045 // assuming that ISD::SRL and SRA of i64 are already marked custom
1046 if (Subtarget
->hasMVEIntegerOps())
1047 setOperationAction(ISD::SHL
, MVT::i64
, Custom
);
1049 // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1.
1050 if (Subtarget
->isThumb1Only()) {
1051 setOperationAction(ISD::SHL_PARTS
, MVT::i32
, Expand
);
1052 setOperationAction(ISD::SRA_PARTS
, MVT::i32
, Expand
);
1053 setOperationAction(ISD::SRL_PARTS
, MVT::i32
, Expand
);
1056 if (!Subtarget
->isThumb1Only() && Subtarget
->hasV6T2Ops())
1057 setOperationAction(ISD::BITREVERSE
, MVT::i32
, Legal
);
1059 // ARM does not have ROTL.
1060 setOperationAction(ISD::ROTL
, MVT::i32
, Expand
);
1061 for (MVT VT
: MVT::fixedlen_vector_valuetypes()) {
1062 setOperationAction(ISD::ROTL
, VT
, Expand
);
1063 setOperationAction(ISD::ROTR
, VT
, Expand
);
1065 setOperationAction(ISD::CTTZ
, MVT::i32
, Custom
);
1066 setOperationAction(ISD::CTPOP
, MVT::i32
, Expand
);
1067 if (!Subtarget
->hasV5TOps() || Subtarget
->isThumb1Only()) {
1068 setOperationAction(ISD::CTLZ
, MVT::i32
, Expand
);
1069 setOperationAction(ISD::CTLZ_ZERO_UNDEF
, MVT::i32
, LibCall
);
1072 // @llvm.readcyclecounter requires the Performance Monitors extension.
1073 // Default to the 0 expansion on unsupported platforms.
1074 // FIXME: Technically there are older ARM CPUs that have
1075 // implementation-specific ways of obtaining this information.
1076 if (Subtarget
->hasPerfMon())
1077 setOperationAction(ISD::READCYCLECOUNTER
, MVT::i64
, Custom
);
1079 // Only ARMv6 has BSWAP.
1080 if (!Subtarget
->hasV6Ops())
1081 setOperationAction(ISD::BSWAP
, MVT::i32
, Expand
);
1083 bool hasDivide
= Subtarget
->isThumb() ? Subtarget
->hasDivideInThumbMode()
1084 : Subtarget
->hasDivideInARMMode();
1086 // These are expanded into libcalls if the cpu doesn't have HW divider.
1087 setOperationAction(ISD::SDIV
, MVT::i32
, LibCall
);
1088 setOperationAction(ISD::UDIV
, MVT::i32
, LibCall
);
1091 if (Subtarget
->isTargetWindows() && !Subtarget
->hasDivideInThumbMode()) {
1092 setOperationAction(ISD::SDIV
, MVT::i32
, Custom
);
1093 setOperationAction(ISD::UDIV
, MVT::i32
, Custom
);
1095 setOperationAction(ISD::SDIV
, MVT::i64
, Custom
);
1096 setOperationAction(ISD::UDIV
, MVT::i64
, Custom
);
1099 setOperationAction(ISD::SREM
, MVT::i32
, Expand
);
1100 setOperationAction(ISD::UREM
, MVT::i32
, Expand
);
1102 // Register based DivRem for AEABI (RTABI 4.2)
1103 if (Subtarget
->isTargetAEABI() || Subtarget
->isTargetAndroid() ||
1104 Subtarget
->isTargetGNUAEABI() || Subtarget
->isTargetMuslAEABI() ||
1105 Subtarget
->isTargetWindows()) {
1106 setOperationAction(ISD::SREM
, MVT::i64
, Custom
);
1107 setOperationAction(ISD::UREM
, MVT::i64
, Custom
);
1108 HasStandaloneRem
= false;
1110 if (Subtarget
->isTargetWindows()) {
1112 const RTLIB::Libcall Op
;
1113 const char * const Name
;
1114 const CallingConv::ID CC
;
1115 } LibraryCalls
[] = {
1116 { RTLIB::SDIVREM_I8
, "__rt_sdiv", CallingConv::ARM_AAPCS
},
1117 { RTLIB::SDIVREM_I16
, "__rt_sdiv", CallingConv::ARM_AAPCS
},
1118 { RTLIB::SDIVREM_I32
, "__rt_sdiv", CallingConv::ARM_AAPCS
},
1119 { RTLIB::SDIVREM_I64
, "__rt_sdiv64", CallingConv::ARM_AAPCS
},
1121 { RTLIB::UDIVREM_I8
, "__rt_udiv", CallingConv::ARM_AAPCS
},
1122 { RTLIB::UDIVREM_I16
, "__rt_udiv", CallingConv::ARM_AAPCS
},
1123 { RTLIB::UDIVREM_I32
, "__rt_udiv", CallingConv::ARM_AAPCS
},
1124 { RTLIB::UDIVREM_I64
, "__rt_udiv64", CallingConv::ARM_AAPCS
},
1127 for (const auto &LC
: LibraryCalls
) {
1128 setLibcallName(LC
.Op
, LC
.Name
);
1129 setLibcallCallingConv(LC
.Op
, LC
.CC
);
1133 const RTLIB::Libcall Op
;
1134 const char * const Name
;
1135 const CallingConv::ID CC
;
1136 } LibraryCalls
[] = {
1137 { RTLIB::SDIVREM_I8
, "__aeabi_idivmod", CallingConv::ARM_AAPCS
},
1138 { RTLIB::SDIVREM_I16
, "__aeabi_idivmod", CallingConv::ARM_AAPCS
},
1139 { RTLIB::SDIVREM_I32
, "__aeabi_idivmod", CallingConv::ARM_AAPCS
},
1140 { RTLIB::SDIVREM_I64
, "__aeabi_ldivmod", CallingConv::ARM_AAPCS
},
1142 { RTLIB::UDIVREM_I8
, "__aeabi_uidivmod", CallingConv::ARM_AAPCS
},
1143 { RTLIB::UDIVREM_I16
, "__aeabi_uidivmod", CallingConv::ARM_AAPCS
},
1144 { RTLIB::UDIVREM_I32
, "__aeabi_uidivmod", CallingConv::ARM_AAPCS
},
1145 { RTLIB::UDIVREM_I64
, "__aeabi_uldivmod", CallingConv::ARM_AAPCS
},
1148 for (const auto &LC
: LibraryCalls
) {
1149 setLibcallName(LC
.Op
, LC
.Name
);
1150 setLibcallCallingConv(LC
.Op
, LC
.CC
);
1154 setOperationAction(ISD::SDIVREM
, MVT::i32
, Custom
);
1155 setOperationAction(ISD::UDIVREM
, MVT::i32
, Custom
);
1156 setOperationAction(ISD::SDIVREM
, MVT::i64
, Custom
);
1157 setOperationAction(ISD::UDIVREM
, MVT::i64
, Custom
);
1159 setOperationAction(ISD::SDIVREM
, MVT::i32
, Expand
);
1160 setOperationAction(ISD::UDIVREM
, MVT::i32
, Expand
);
1163 if (Subtarget
->isTargetWindows() && Subtarget
->getTargetTriple().isOSMSVCRT())
1164 for (auto &VT
: {MVT::f32
, MVT::f64
})
1165 setOperationAction(ISD::FPOWI
, VT
, Custom
);
1167 setOperationAction(ISD::GlobalAddress
, MVT::i32
, Custom
);
1168 setOperationAction(ISD::ConstantPool
, MVT::i32
, Custom
);
1169 setOperationAction(ISD::GlobalTLSAddress
, MVT::i32
, Custom
);
1170 setOperationAction(ISD::BlockAddress
, MVT::i32
, Custom
);
1172 setOperationAction(ISD::TRAP
, MVT::Other
, Legal
);
1173 setOperationAction(ISD::DEBUGTRAP
, MVT::Other
, Legal
);
1175 // Use the default implementation.
1176 setOperationAction(ISD::VASTART
, MVT::Other
, Custom
);
1177 setOperationAction(ISD::VAARG
, MVT::Other
, Expand
);
1178 setOperationAction(ISD::VACOPY
, MVT::Other
, Expand
);
1179 setOperationAction(ISD::VAEND
, MVT::Other
, Expand
);
1180 setOperationAction(ISD::STACKSAVE
, MVT::Other
, Expand
);
1181 setOperationAction(ISD::STACKRESTORE
, MVT::Other
, Expand
);
1183 if (Subtarget
->isTargetWindows())
1184 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i32
, Custom
);
1186 setOperationAction(ISD::DYNAMIC_STACKALLOC
, MVT::i32
, Expand
);
1188 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
1189 // the default expansion.
1190 InsertFencesForAtomic
= false;
1191 if (Subtarget
->hasAnyDataBarrier() &&
1192 (!Subtarget
->isThumb() || Subtarget
->hasV8MBaselineOps())) {
1193 // ATOMIC_FENCE needs custom lowering; the others should have been expanded
1194 // to ldrex/strex loops already.
1195 setOperationAction(ISD::ATOMIC_FENCE
, MVT::Other
, Custom
);
1196 if (!Subtarget
->isThumb() || !Subtarget
->isMClass())
1197 setOperationAction(ISD::ATOMIC_CMP_SWAP
, MVT::i64
, Custom
);
1199 // On v8, we have particularly efficient implementations of atomic fences
1200 // if they can be combined with nearby atomic loads and stores.
1201 if (!Subtarget
->hasAcquireRelease() ||
1202 getTargetMachine().getOptLevel() == 0) {
1203 // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
1204 InsertFencesForAtomic
= true;
1207 // If there's anything we can use as a barrier, go through custom lowering
1208 // for ATOMIC_FENCE.
1209 // If target has DMB in thumb, Fences can be inserted.
1210 if (Subtarget
->hasDataBarrier())
1211 InsertFencesForAtomic
= true;
1213 setOperationAction(ISD::ATOMIC_FENCE
, MVT::Other
,
1214 Subtarget
->hasAnyDataBarrier() ? Custom
: Expand
);
1216 // Set them all for expansion, which will force libcalls.
1217 setOperationAction(ISD::ATOMIC_CMP_SWAP
, MVT::i32
, Expand
);
1218 setOperationAction(ISD::ATOMIC_SWAP
, MVT::i32
, Expand
);
1219 setOperationAction(ISD::ATOMIC_LOAD_ADD
, MVT::i32
, Expand
);
1220 setOperationAction(ISD::ATOMIC_LOAD_SUB
, MVT::i32
, Expand
);
1221 setOperationAction(ISD::ATOMIC_LOAD_AND
, MVT::i32
, Expand
);
1222 setOperationAction(ISD::ATOMIC_LOAD_OR
, MVT::i32
, Expand
);
1223 setOperationAction(ISD::ATOMIC_LOAD_XOR
, MVT::i32
, Expand
);
1224 setOperationAction(ISD::ATOMIC_LOAD_NAND
, MVT::i32
, Expand
);
1225 setOperationAction(ISD::ATOMIC_LOAD_MIN
, MVT::i32
, Expand
);
1226 setOperationAction(ISD::ATOMIC_LOAD_MAX
, MVT::i32
, Expand
);
1227 setOperationAction(ISD::ATOMIC_LOAD_UMIN
, MVT::i32
, Expand
);
1228 setOperationAction(ISD::ATOMIC_LOAD_UMAX
, MVT::i32
, Expand
);
1229 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
1230 // Unordered/Monotonic case.
1231 if (!InsertFencesForAtomic
) {
1232 setOperationAction(ISD::ATOMIC_LOAD
, MVT::i32
, Custom
);
1233 setOperationAction(ISD::ATOMIC_STORE
, MVT::i32
, Custom
);
1237 setOperationAction(ISD::PREFETCH
, MVT::Other
, Custom
);
1239 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
1240 if (!Subtarget
->hasV6Ops()) {
1241 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i16
, Expand
);
1242 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i8
, Expand
);
1244 setOperationAction(ISD::SIGN_EXTEND_INREG
, MVT::i1
, Expand
);
1246 if (!Subtarget
->useSoftFloat() && Subtarget
->hasFPRegs() &&
1247 !Subtarget
->isThumb1Only()) {
1248 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
1249 // iff target supports vfp2.
1250 setOperationAction(ISD::BITCAST
, MVT::i64
, Custom
);
1251 setOperationAction(ISD::FLT_ROUNDS_
, MVT::i32
, Custom
);
1254 // We want to custom lower some of our intrinsics.
1255 setOperationAction(ISD::INTRINSIC_WO_CHAIN
, MVT::Other
, Custom
);
1256 setOperationAction(ISD::EH_SJLJ_SETJMP
, MVT::i32
, Custom
);
1257 setOperationAction(ISD::EH_SJLJ_LONGJMP
, MVT::Other
, Custom
);
1258 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH
, MVT::Other
, Custom
);
1259 if (Subtarget
->useSjLjEH())
1260 setLibcallName(RTLIB::UNWIND_RESUME
, "_Unwind_SjLj_Resume");
1262 setOperationAction(ISD::SETCC
, MVT::i32
, Expand
);
1263 setOperationAction(ISD::SETCC
, MVT::f32
, Expand
);
1264 setOperationAction(ISD::SETCC
, MVT::f64
, Expand
);
1265 setOperationAction(ISD::SELECT
, MVT::i32
, Custom
);
1266 setOperationAction(ISD::SELECT
, MVT::f32
, Custom
);
1267 setOperationAction(ISD::SELECT
, MVT::f64
, Custom
);
1268 setOperationAction(ISD::SELECT_CC
, MVT::i32
, Custom
);
1269 setOperationAction(ISD::SELECT_CC
, MVT::f32
, Custom
);
1270 setOperationAction(ISD::SELECT_CC
, MVT::f64
, Custom
);
1271 if (Subtarget
->hasFullFP16()) {
1272 setOperationAction(ISD::SETCC
, MVT::f16
, Expand
);
1273 setOperationAction(ISD::SELECT
, MVT::f16
, Custom
);
1274 setOperationAction(ISD::SELECT_CC
, MVT::f16
, Custom
);
1277 setOperationAction(ISD::SETCCCARRY
, MVT::i32
, Custom
);
1279 setOperationAction(ISD::BRCOND
, MVT::Other
, Custom
);
1280 setOperationAction(ISD::BR_CC
, MVT::i32
, Custom
);
1281 if (Subtarget
->hasFullFP16())
1282 setOperationAction(ISD::BR_CC
, MVT::f16
, Custom
);
1283 setOperationAction(ISD::BR_CC
, MVT::f32
, Custom
);
1284 setOperationAction(ISD::BR_CC
, MVT::f64
, Custom
);
1285 setOperationAction(ISD::BR_JT
, MVT::Other
, Custom
);
1287 // We don't support sin/cos/fmod/copysign/pow
1288 setOperationAction(ISD::FSIN
, MVT::f64
, Expand
);
1289 setOperationAction(ISD::FSIN
, MVT::f32
, Expand
);
1290 setOperationAction(ISD::FCOS
, MVT::f32
, Expand
);
1291 setOperationAction(ISD::FCOS
, MVT::f64
, Expand
);
1292 setOperationAction(ISD::FSINCOS
, MVT::f64
, Expand
);
1293 setOperationAction(ISD::FSINCOS
, MVT::f32
, Expand
);
1294 setOperationAction(ISD::FREM
, MVT::f64
, Expand
);
1295 setOperationAction(ISD::FREM
, MVT::f32
, Expand
);
1296 if (!Subtarget
->useSoftFloat() && Subtarget
->hasVFP2Base() &&
1297 !Subtarget
->isThumb1Only()) {
1298 setOperationAction(ISD::FCOPYSIGN
, MVT::f64
, Custom
);
1299 setOperationAction(ISD::FCOPYSIGN
, MVT::f32
, Custom
);
1301 setOperationAction(ISD::FPOW
, MVT::f64
, Expand
);
1302 setOperationAction(ISD::FPOW
, MVT::f32
, Expand
);
1304 if (!Subtarget
->hasVFP4Base()) {
1305 setOperationAction(ISD::FMA
, MVT::f64
, Expand
);
1306 setOperationAction(ISD::FMA
, MVT::f32
, Expand
);
1309 // Various VFP goodness
1310 if (!Subtarget
->useSoftFloat() && !Subtarget
->isThumb1Only()) {
1311 // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
1312 if (!Subtarget
->hasFPARMv8Base() || !Subtarget
->hasFP64()) {
1313 setOperationAction(ISD::FP16_TO_FP
, MVT::f64
, Expand
);
1314 setOperationAction(ISD::FP_TO_FP16
, MVT::f64
, Expand
);
1317 // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
1318 if (!Subtarget
->hasFP16()) {
1319 setOperationAction(ISD::FP16_TO_FP
, MVT::f32
, Expand
);
1320 setOperationAction(ISD::FP_TO_FP16
, MVT::f32
, Expand
);
1324 // Use __sincos_stret if available.
1325 if (getLibcallName(RTLIB::SINCOS_STRET_F32
) != nullptr &&
1326 getLibcallName(RTLIB::SINCOS_STRET_F64
) != nullptr) {
1327 setOperationAction(ISD::FSINCOS
, MVT::f64
, Custom
);
1328 setOperationAction(ISD::FSINCOS
, MVT::f32
, Custom
);
1331 // FP-ARMv8 implements a lot of rounding-like FP operations.
1332 if (Subtarget
->hasFPARMv8Base()) {
1333 setOperationAction(ISD::FFLOOR
, MVT::f32
, Legal
);
1334 setOperationAction(ISD::FCEIL
, MVT::f32
, Legal
);
1335 setOperationAction(ISD::FROUND
, MVT::f32
, Legal
);
1336 setOperationAction(ISD::FTRUNC
, MVT::f32
, Legal
);
1337 setOperationAction(ISD::FNEARBYINT
, MVT::f32
, Legal
);
1338 setOperationAction(ISD::FRINT
, MVT::f32
, Legal
);
1339 setOperationAction(ISD::FMINNUM
, MVT::f32
, Legal
);
1340 setOperationAction(ISD::FMAXNUM
, MVT::f32
, Legal
);
1341 if (Subtarget
->hasNEON()) {
1342 setOperationAction(ISD::FMINNUM
, MVT::v2f32
, Legal
);
1343 setOperationAction(ISD::FMAXNUM
, MVT::v2f32
, Legal
);
1344 setOperationAction(ISD::FMINNUM
, MVT::v4f32
, Legal
);
1345 setOperationAction(ISD::FMAXNUM
, MVT::v4f32
, Legal
);
1348 if (Subtarget
->hasFP64()) {
1349 setOperationAction(ISD::FFLOOR
, MVT::f64
, Legal
);
1350 setOperationAction(ISD::FCEIL
, MVT::f64
, Legal
);
1351 setOperationAction(ISD::FROUND
, MVT::f64
, Legal
);
1352 setOperationAction(ISD::FTRUNC
, MVT::f64
, Legal
);
1353 setOperationAction(ISD::FNEARBYINT
, MVT::f64
, Legal
);
1354 setOperationAction(ISD::FRINT
, MVT::f64
, Legal
);
1355 setOperationAction(ISD::FMINNUM
, MVT::f64
, Legal
);
1356 setOperationAction(ISD::FMAXNUM
, MVT::f64
, Legal
);
1360 // FP16 often need to be promoted to call lib functions
1361 if (Subtarget
->hasFullFP16()) {
1362 setOperationAction(ISD::FREM
, MVT::f16
, Promote
);
1363 setOperationAction(ISD::FCOPYSIGN
, MVT::f16
, Expand
);
1364 setOperationAction(ISD::FSIN
, MVT::f16
, Promote
);
1365 setOperationAction(ISD::FCOS
, MVT::f16
, Promote
);
1366 setOperationAction(ISD::FSINCOS
, MVT::f16
, Promote
);
1367 setOperationAction(ISD::FPOWI
, MVT::f16
, Promote
);
1368 setOperationAction(ISD::FPOW
, MVT::f16
, Promote
);
1369 setOperationAction(ISD::FEXP
, MVT::f16
, Promote
);
1370 setOperationAction(ISD::FEXP2
, MVT::f16
, Promote
);
1371 setOperationAction(ISD::FLOG
, MVT::f16
, Promote
);
1372 setOperationAction(ISD::FLOG10
, MVT::f16
, Promote
);
1373 setOperationAction(ISD::FLOG2
, MVT::f16
, Promote
);
1375 setOperationAction(ISD::FROUND
, MVT::f16
, Legal
);
1378 if (Subtarget
->hasNEON()) {
1379 // vmin and vmax aren't available in a scalar form, so we use
1380 // a NEON instruction with an undef lane instead.
1381 setOperationAction(ISD::FMINIMUM
, MVT::f16
, Legal
);
1382 setOperationAction(ISD::FMAXIMUM
, MVT::f16
, Legal
);
1383 setOperationAction(ISD::FMINIMUM
, MVT::f32
, Legal
);
1384 setOperationAction(ISD::FMAXIMUM
, MVT::f32
, Legal
);
1385 setOperationAction(ISD::FMINIMUM
, MVT::v2f32
, Legal
);
1386 setOperationAction(ISD::FMAXIMUM
, MVT::v2f32
, Legal
);
1387 setOperationAction(ISD::FMINIMUM
, MVT::v4f32
, Legal
);
1388 setOperationAction(ISD::FMAXIMUM
, MVT::v4f32
, Legal
);
1390 if (Subtarget
->hasFullFP16()) {
1391 setOperationAction(ISD::FMINNUM
, MVT::v4f16
, Legal
);
1392 setOperationAction(ISD::FMAXNUM
, MVT::v4f16
, Legal
);
1393 setOperationAction(ISD::FMINNUM
, MVT::v8f16
, Legal
);
1394 setOperationAction(ISD::FMAXNUM
, MVT::v8f16
, Legal
);
1396 setOperationAction(ISD::FMINIMUM
, MVT::v4f16
, Legal
);
1397 setOperationAction(ISD::FMAXIMUM
, MVT::v4f16
, Legal
);
1398 setOperationAction(ISD::FMINIMUM
, MVT::v8f16
, Legal
);
1399 setOperationAction(ISD::FMAXIMUM
, MVT::v8f16
, Legal
);
1403 // We have target-specific dag combine patterns for the following nodes:
1404 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine
1405 setTargetDAGCombine(ISD::ADD
);
1406 setTargetDAGCombine(ISD::SUB
);
1407 setTargetDAGCombine(ISD::MUL
);
1408 setTargetDAGCombine(ISD::AND
);
1409 setTargetDAGCombine(ISD::OR
);
1410 setTargetDAGCombine(ISD::XOR
);
1412 if (Subtarget
->hasV6Ops())
1413 setTargetDAGCombine(ISD::SRL
);
1414 if (Subtarget
->isThumb1Only())
1415 setTargetDAGCombine(ISD::SHL
);
1417 setStackPointerRegisterToSaveRestore(ARM::SP
);
1419 if (Subtarget
->useSoftFloat() || Subtarget
->isThumb1Only() ||
1420 !Subtarget
->hasVFP2Base() || Subtarget
->hasMinSize())
1421 setSchedulingPreference(Sched::RegPressure
);
1423 setSchedulingPreference(Sched::Hybrid
);
1425 //// temporary - rewrite interface to use type
1426 MaxStoresPerMemset
= 8;
1427 MaxStoresPerMemsetOptSize
= 4;
1428 MaxStoresPerMemcpy
= 4; // For @llvm.memcpy -> sequence of stores
1429 MaxStoresPerMemcpyOptSize
= 2;
1430 MaxStoresPerMemmove
= 4; // For @llvm.memmove -> sequence of stores
1431 MaxStoresPerMemmoveOptSize
= 2;
1433 // On ARM arguments smaller than 4 bytes are extended, so all arguments
1434 // are at least 4 bytes aligned.
1435 setMinStackArgumentAlignment(Align(4));
1437 // Prefer likely predicted branches to selects on out-of-order cores.
1438 PredictableSelectIsExpensive
= Subtarget
->getSchedModel().isOutOfOrder();
1440 setPrefLoopAlignment(Align(1ULL << Subtarget
->getPrefLoopLogAlignment()));
1442 setMinFunctionAlignment(Subtarget
->isThumb() ? Align(2) : Align(4));
1444 if (Subtarget
->isThumb() || Subtarget
->isThumb2())
1445 setTargetDAGCombine(ISD::ABS
);
1448 bool ARMTargetLowering::useSoftFloat() const {
1449 return Subtarget
->useSoftFloat();
1452 // FIXME: It might make sense to define the representative register class as the
1453 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
1454 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
1455 // SPR's representative would be DPR_VFP2. This should work well if register
1456 // pressure tracking were modified such that a register use would increment the
1457 // pressure of the register class's representative and all of it's super
1458 // classes' representatives transitively. We have not implemented this because
1459 // of the difficulty prior to coalescing of modeling operand register classes
1460 // due to the common occurrence of cross class copies and subregister insertions
1462 std::pair
<const TargetRegisterClass
*, uint8_t>
1463 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo
*TRI
,
1465 const TargetRegisterClass
*RRC
= nullptr;
1467 switch (VT
.SimpleTy
) {
1469 return TargetLowering::findRepresentativeClass(TRI
, VT
);
1470 // Use DPR as representative register class for all floating point
1471 // and vector types. Since there are 32 SPR registers and 32 DPR registers so
1472 // the cost is 1 for both f32 and f64.
1473 case MVT::f32
: case MVT::f64
: case MVT::v8i8
: case MVT::v4i16
:
1474 case MVT::v2i32
: case MVT::v1i64
: case MVT::v2f32
:
1475 RRC
= &ARM::DPRRegClass
;
1476 // When NEON is used for SP, only half of the register file is available
1477 // because operations that define both SP and DP results will be constrained
1478 // to the VFP2 class (D0-D15). We currently model this constraint prior to
1479 // coalescing by double-counting the SP regs. See the FIXME above.
1480 if (Subtarget
->useNEONForSinglePrecisionFP())
1483 case MVT::v16i8
: case MVT::v8i16
: case MVT::v4i32
: case MVT::v2i64
:
1484 case MVT::v4f32
: case MVT::v2f64
:
1485 RRC
= &ARM::DPRRegClass
;
1489 RRC
= &ARM::DPRRegClass
;
1493 RRC
= &ARM::DPRRegClass
;
1497 return std::make_pair(RRC
, Cost
);
1500 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode
) const {
1501 switch ((ARMISD::NodeType
)Opcode
) {
1502 case ARMISD::FIRST_NUMBER
: break;
1503 case ARMISD::Wrapper
: return "ARMISD::Wrapper";
1504 case ARMISD::WrapperPIC
: return "ARMISD::WrapperPIC";
1505 case ARMISD::WrapperJT
: return "ARMISD::WrapperJT";
1506 case ARMISD::COPY_STRUCT_BYVAL
: return "ARMISD::COPY_STRUCT_BYVAL";
1507 case ARMISD::CALL
: return "ARMISD::CALL";
1508 case ARMISD::CALL_PRED
: return "ARMISD::CALL_PRED";
1509 case ARMISD::CALL_NOLINK
: return "ARMISD::CALL_NOLINK";
1510 case ARMISD::BRCOND
: return "ARMISD::BRCOND";
1511 case ARMISD::BR_JT
: return "ARMISD::BR_JT";
1512 case ARMISD::BR2_JT
: return "ARMISD::BR2_JT";
1513 case ARMISD::RET_FLAG
: return "ARMISD::RET_FLAG";
1514 case ARMISD::INTRET_FLAG
: return "ARMISD::INTRET_FLAG";
1515 case ARMISD::PIC_ADD
: return "ARMISD::PIC_ADD";
1516 case ARMISD::CMP
: return "ARMISD::CMP";
1517 case ARMISD::CMN
: return "ARMISD::CMN";
1518 case ARMISD::CMPZ
: return "ARMISD::CMPZ";
1519 case ARMISD::CMPFP
: return "ARMISD::CMPFP";
1520 case ARMISD::CMPFPw0
: return "ARMISD::CMPFPw0";
1521 case ARMISD::BCC_i64
: return "ARMISD::BCC_i64";
1522 case ARMISD::FMSTAT
: return "ARMISD::FMSTAT";
1524 case ARMISD::CMOV
: return "ARMISD::CMOV";
1525 case ARMISD::SUBS
: return "ARMISD::SUBS";
1527 case ARMISD::SSAT
: return "ARMISD::SSAT";
1528 case ARMISD::USAT
: return "ARMISD::USAT";
1530 case ARMISD::ASRL
: return "ARMISD::ASRL";
1531 case ARMISD::LSRL
: return "ARMISD::LSRL";
1532 case ARMISD::LSLL
: return "ARMISD::LSLL";
1534 case ARMISD::SRL_FLAG
: return "ARMISD::SRL_FLAG";
1535 case ARMISD::SRA_FLAG
: return "ARMISD::SRA_FLAG";
1536 case ARMISD::RRX
: return "ARMISD::RRX";
1538 case ARMISD::ADDC
: return "ARMISD::ADDC";
1539 case ARMISD::ADDE
: return "ARMISD::ADDE";
1540 case ARMISD::SUBC
: return "ARMISD::SUBC";
1541 case ARMISD::SUBE
: return "ARMISD::SUBE";
1542 case ARMISD::LSLS
: return "ARMISD::LSLS";
1544 case ARMISD::VMOVRRD
: return "ARMISD::VMOVRRD";
1545 case ARMISD::VMOVDRR
: return "ARMISD::VMOVDRR";
1546 case ARMISD::VMOVhr
: return "ARMISD::VMOVhr";
1547 case ARMISD::VMOVrh
: return "ARMISD::VMOVrh";
1548 case ARMISD::VMOVSR
: return "ARMISD::VMOVSR";
1550 case ARMISD::EH_SJLJ_SETJMP
: return "ARMISD::EH_SJLJ_SETJMP";
1551 case ARMISD::EH_SJLJ_LONGJMP
: return "ARMISD::EH_SJLJ_LONGJMP";
1552 case ARMISD::EH_SJLJ_SETUP_DISPATCH
: return "ARMISD::EH_SJLJ_SETUP_DISPATCH";
1554 case ARMISD::TC_RETURN
: return "ARMISD::TC_RETURN";
1556 case ARMISD::THREAD_POINTER
:return "ARMISD::THREAD_POINTER";
1558 case ARMISD::DYN_ALLOC
: return "ARMISD::DYN_ALLOC";
1560 case ARMISD::MEMBARRIER_MCR
: return "ARMISD::MEMBARRIER_MCR";
1562 case ARMISD::PRELOAD
: return "ARMISD::PRELOAD";
1564 case ARMISD::WIN__CHKSTK
: return "ARMISD::WIN__CHKSTK";
1565 case ARMISD::WIN__DBZCHK
: return "ARMISD::WIN__DBZCHK";
1567 case ARMISD::PREDICATE_CAST
: return "ARMISD::PREDICATE_CAST";
1568 case ARMISD::VCMP
: return "ARMISD::VCMP";
1569 case ARMISD::VCMPZ
: return "ARMISD::VCMPZ";
1570 case ARMISD::VTST
: return "ARMISD::VTST";
1572 case ARMISD::VSHLs
: return "ARMISD::VSHLs";
1573 case ARMISD::VSHLu
: return "ARMISD::VSHLu";
1574 case ARMISD::VSHLIMM
: return "ARMISD::VSHLIMM";
1575 case ARMISD::VSHRsIMM
: return "ARMISD::VSHRsIMM";
1576 case ARMISD::VSHRuIMM
: return "ARMISD::VSHRuIMM";
1577 case ARMISD::VRSHRsIMM
: return "ARMISD::VRSHRsIMM";
1578 case ARMISD::VRSHRuIMM
: return "ARMISD::VRSHRuIMM";
1579 case ARMISD::VRSHRNIMM
: return "ARMISD::VRSHRNIMM";
1580 case ARMISD::VQSHLsIMM
: return "ARMISD::VQSHLsIMM";
1581 case ARMISD::VQSHLuIMM
: return "ARMISD::VQSHLuIMM";
1582 case ARMISD::VQSHLsuIMM
: return "ARMISD::VQSHLsuIMM";
1583 case ARMISD::VQSHRNsIMM
: return "ARMISD::VQSHRNsIMM";
1584 case ARMISD::VQSHRNuIMM
: return "ARMISD::VQSHRNuIMM";
1585 case ARMISD::VQSHRNsuIMM
: return "ARMISD::VQSHRNsuIMM";
1586 case ARMISD::VQRSHRNsIMM
: return "ARMISD::VQRSHRNsIMM";
1587 case ARMISD::VQRSHRNuIMM
: return "ARMISD::VQRSHRNuIMM";
1588 case ARMISD::VQRSHRNsuIMM
: return "ARMISD::VQRSHRNsuIMM";
1589 case ARMISD::VSLIIMM
: return "ARMISD::VSLIIMM";
1590 case ARMISD::VSRIIMM
: return "ARMISD::VSRIIMM";
1591 case ARMISD::VGETLANEu
: return "ARMISD::VGETLANEu";
1592 case ARMISD::VGETLANEs
: return "ARMISD::VGETLANEs";
1593 case ARMISD::VMOVIMM
: return "ARMISD::VMOVIMM";
1594 case ARMISD::VMVNIMM
: return "ARMISD::VMVNIMM";
1595 case ARMISD::VMOVFPIMM
: return "ARMISD::VMOVFPIMM";
1596 case ARMISD::VDUP
: return "ARMISD::VDUP";
1597 case ARMISD::VDUPLANE
: return "ARMISD::VDUPLANE";
1598 case ARMISD::VEXT
: return "ARMISD::VEXT";
1599 case ARMISD::VREV64
: return "ARMISD::VREV64";
1600 case ARMISD::VREV32
: return "ARMISD::VREV32";
1601 case ARMISD::VREV16
: return "ARMISD::VREV16";
1602 case ARMISD::VZIP
: return "ARMISD::VZIP";
1603 case ARMISD::VUZP
: return "ARMISD::VUZP";
1604 case ARMISD::VTRN
: return "ARMISD::VTRN";
1605 case ARMISD::VTBL1
: return "ARMISD::VTBL1";
1606 case ARMISD::VTBL2
: return "ARMISD::VTBL2";
1607 case ARMISD::VMULLs
: return "ARMISD::VMULLs";
1608 case ARMISD::VMULLu
: return "ARMISD::VMULLu";
1609 case ARMISD::UMAAL
: return "ARMISD::UMAAL";
1610 case ARMISD::UMLAL
: return "ARMISD::UMLAL";
1611 case ARMISD::SMLAL
: return "ARMISD::SMLAL";
1612 case ARMISD::SMLALBB
: return "ARMISD::SMLALBB";
1613 case ARMISD::SMLALBT
: return "ARMISD::SMLALBT";
1614 case ARMISD::SMLALTB
: return "ARMISD::SMLALTB";
1615 case ARMISD::SMLALTT
: return "ARMISD::SMLALTT";
1616 case ARMISD::SMULWB
: return "ARMISD::SMULWB";
1617 case ARMISD::SMULWT
: return "ARMISD::SMULWT";
1618 case ARMISD::SMLALD
: return "ARMISD::SMLALD";
1619 case ARMISD::SMLALDX
: return "ARMISD::SMLALDX";
1620 case ARMISD::SMLSLD
: return "ARMISD::SMLSLD";
1621 case ARMISD::SMLSLDX
: return "ARMISD::SMLSLDX";
1622 case ARMISD::SMMLAR
: return "ARMISD::SMMLAR";
1623 case ARMISD::SMMLSR
: return "ARMISD::SMMLSR";
1624 case ARMISD::BUILD_VECTOR
: return "ARMISD::BUILD_VECTOR";
1625 case ARMISD::BFI
: return "ARMISD::BFI";
1626 case ARMISD::VORRIMM
: return "ARMISD::VORRIMM";
1627 case ARMISD::VBICIMM
: return "ARMISD::VBICIMM";
1628 case ARMISD::VBSL
: return "ARMISD::VBSL";
1629 case ARMISD::MEMCPY
: return "ARMISD::MEMCPY";
1630 case ARMISD::VLD1DUP
: return "ARMISD::VLD1DUP";
1631 case ARMISD::VLD2DUP
: return "ARMISD::VLD2DUP";
1632 case ARMISD::VLD3DUP
: return "ARMISD::VLD3DUP";
1633 case ARMISD::VLD4DUP
: return "ARMISD::VLD4DUP";
1634 case ARMISD::VLD1_UPD
: return "ARMISD::VLD1_UPD";
1635 case ARMISD::VLD2_UPD
: return "ARMISD::VLD2_UPD";
1636 case ARMISD::VLD3_UPD
: return "ARMISD::VLD3_UPD";
1637 case ARMISD::VLD4_UPD
: return "ARMISD::VLD4_UPD";
1638 case ARMISD::VLD2LN_UPD
: return "ARMISD::VLD2LN_UPD";
1639 case ARMISD::VLD3LN_UPD
: return "ARMISD::VLD3LN_UPD";
1640 case ARMISD::VLD4LN_UPD
: return "ARMISD::VLD4LN_UPD";
1641 case ARMISD::VLD1DUP_UPD
: return "ARMISD::VLD1DUP_UPD";
1642 case ARMISD::VLD2DUP_UPD
: return "ARMISD::VLD2DUP_UPD";
1643 case ARMISD::VLD3DUP_UPD
: return "ARMISD::VLD3DUP_UPD";
1644 case ARMISD::VLD4DUP_UPD
: return "ARMISD::VLD4DUP_UPD";
1645 case ARMISD::VST1_UPD
: return "ARMISD::VST1_UPD";
1646 case ARMISD::VST2_UPD
: return "ARMISD::VST2_UPD";
1647 case ARMISD::VST3_UPD
: return "ARMISD::VST3_UPD";
1648 case ARMISD::VST4_UPD
: return "ARMISD::VST4_UPD";
1649 case ARMISD::VST2LN_UPD
: return "ARMISD::VST2LN_UPD";
1650 case ARMISD::VST3LN_UPD
: return "ARMISD::VST3LN_UPD";
1651 case ARMISD::VST4LN_UPD
: return "ARMISD::VST4LN_UPD";
1652 case ARMISD::WLS
: return "ARMISD::WLS";
1653 case ARMISD::LE
: return "ARMISD::LE";
1654 case ARMISD::LOOP_DEC
: return "ARMISD::LOOP_DEC";
1655 case ARMISD::CSINV
: return "ARMISD::CSINV";
1656 case ARMISD::CSNEG
: return "ARMISD::CSNEG";
1657 case ARMISD::CSINC
: return "ARMISD::CSINC";
1662 EVT
ARMTargetLowering::getSetCCResultType(const DataLayout
&DL
, LLVMContext
&,
1665 return getPointerTy(DL
);
1667 // MVE has a predicate register.
1668 if (Subtarget
->hasMVEIntegerOps() &&
1669 (VT
== MVT::v4i32
|| VT
== MVT::v8i16
|| VT
== MVT::v16i8
))
1670 return MVT::getVectorVT(MVT::i1
, VT
.getVectorElementCount());
1671 return VT
.changeVectorElementTypeToInteger();
1674 /// getRegClassFor - Return the register class that should be used for the
1675 /// specified value type.
1676 const TargetRegisterClass
*
1677 ARMTargetLowering::getRegClassFor(MVT VT
, bool isDivergent
) const {
1679 // Map v4i64 to QQ registers but do not make the type legal. Similarly map
1680 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
1681 // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive
1683 if (Subtarget
->hasNEON() || Subtarget
->hasMVEIntegerOps()) {
1684 if (VT
== MVT::v4i64
)
1685 return &ARM::QQPRRegClass
;
1686 if (VT
== MVT::v8i64
)
1687 return &ARM::QQQQPRRegClass
;
1689 return TargetLowering::getRegClassFor(VT
);
1692 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
1693 // source/dest is aligned and the copy size is large enough. We therefore want
1694 // to align such objects passed to memory intrinsics.
1695 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst
*CI
, unsigned &MinSize
,
1696 unsigned &PrefAlign
) const {
1697 if (!isa
<MemIntrinsic
>(CI
))
1700 // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1
1701 // cycle faster than 4-byte aligned LDM.
1702 PrefAlign
= (Subtarget
->hasV6Ops() && !Subtarget
->isMClass() ? 8 : 4);
1706 // Create a fast isel object.
1708 ARMTargetLowering::createFastISel(FunctionLoweringInfo
&funcInfo
,
1709 const TargetLibraryInfo
*libInfo
) const {
1710 return ARM::createFastISel(funcInfo
, libInfo
);
1713 Sched::Preference
ARMTargetLowering::getSchedulingPreference(SDNode
*N
) const {
1714 unsigned NumVals
= N
->getNumValues();
1716 return Sched::RegPressure
;
1718 for (unsigned i
= 0; i
!= NumVals
; ++i
) {
1719 EVT VT
= N
->getValueType(i
);
1720 if (VT
== MVT::Glue
|| VT
== MVT::Other
)
1722 if (VT
.isFloatingPoint() || VT
.isVector())
1726 if (!N
->isMachineOpcode())
1727 return Sched::RegPressure
;
1729 // Load are scheduled for latency even if there instruction itinerary
1730 // is not available.
1731 const TargetInstrInfo
*TII
= Subtarget
->getInstrInfo();
1732 const MCInstrDesc
&MCID
= TII
->get(N
->getMachineOpcode());
1734 if (MCID
.getNumDefs() == 0)
1735 return Sched::RegPressure
;
1736 if (!Itins
->isEmpty() &&
1737 Itins
->getOperandCycle(MCID
.getSchedClass(), 0) > 2)
1740 return Sched::RegPressure
;
1743 //===----------------------------------------------------------------------===//
1745 //===----------------------------------------------------------------------===//
1747 static bool isSRL16(const SDValue
&Op
) {
1748 if (Op
.getOpcode() != ISD::SRL
)
1750 if (auto Const
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(1)))
1751 return Const
->getZExtValue() == 16;
1755 static bool isSRA16(const SDValue
&Op
) {
1756 if (Op
.getOpcode() != ISD::SRA
)
1758 if (auto Const
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(1)))
1759 return Const
->getZExtValue() == 16;
1763 static bool isSHL16(const SDValue
&Op
) {
1764 if (Op
.getOpcode() != ISD::SHL
)
1766 if (auto Const
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(1)))
1767 return Const
->getZExtValue() == 16;
1771 // Check for a signed 16-bit value. We special case SRA because it makes it
1772 // more simple when also looking for SRAs that aren't sign extending a
1773 // smaller value. Without the check, we'd need to take extra care with
1774 // checking order for some operations.
1775 static bool isS16(const SDValue
&Op
, SelectionDAG
&DAG
) {
1777 return isSHL16(Op
.getOperand(0));
1778 return DAG
.ComputeNumSignBits(Op
) == 17;
1781 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
1782 static ARMCC::CondCodes
IntCCToARMCC(ISD::CondCode CC
) {
1784 default: llvm_unreachable("Unknown condition code!");
1785 case ISD::SETNE
: return ARMCC::NE
;
1786 case ISD::SETEQ
: return ARMCC::EQ
;
1787 case ISD::SETGT
: return ARMCC::GT
;
1788 case ISD::SETGE
: return ARMCC::GE
;
1789 case ISD::SETLT
: return ARMCC::LT
;
1790 case ISD::SETLE
: return ARMCC::LE
;
1791 case ISD::SETUGT
: return ARMCC::HI
;
1792 case ISD::SETUGE
: return ARMCC::HS
;
1793 case ISD::SETULT
: return ARMCC::LO
;
1794 case ISD::SETULE
: return ARMCC::LS
;
1798 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
1799 static void FPCCToARMCC(ISD::CondCode CC
, ARMCC::CondCodes
&CondCode
,
1800 ARMCC::CondCodes
&CondCode2
) {
1801 CondCode2
= ARMCC::AL
;
1803 default: llvm_unreachable("Unknown FP condition!");
1805 case ISD::SETOEQ
: CondCode
= ARMCC::EQ
; break;
1807 case ISD::SETOGT
: CondCode
= ARMCC::GT
; break;
1809 case ISD::SETOGE
: CondCode
= ARMCC::GE
; break;
1810 case ISD::SETOLT
: CondCode
= ARMCC::MI
; break;
1811 case ISD::SETOLE
: CondCode
= ARMCC::LS
; break;
1812 case ISD::SETONE
: CondCode
= ARMCC::MI
; CondCode2
= ARMCC::GT
; break;
1813 case ISD::SETO
: CondCode
= ARMCC::VC
; break;
1814 case ISD::SETUO
: CondCode
= ARMCC::VS
; break;
1815 case ISD::SETUEQ
: CondCode
= ARMCC::EQ
; CondCode2
= ARMCC::VS
; break;
1816 case ISD::SETUGT
: CondCode
= ARMCC::HI
; break;
1817 case ISD::SETUGE
: CondCode
= ARMCC::PL
; break;
1819 case ISD::SETULT
: CondCode
= ARMCC::LT
; break;
1821 case ISD::SETULE
: CondCode
= ARMCC::LE
; break;
1823 case ISD::SETUNE
: CondCode
= ARMCC::NE
; break;
1827 //===----------------------------------------------------------------------===//
1828 // Calling Convention Implementation
1829 //===----------------------------------------------------------------------===//
1831 /// getEffectiveCallingConv - Get the effective calling convention, taking into
1832 /// account presence of floating point hardware and calling convention
1833 /// limitations, such as support for variadic functions.
1835 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC
,
1836 bool isVarArg
) const {
1839 report_fatal_error("Unsupported calling convention");
1840 case CallingConv::ARM_AAPCS
:
1841 case CallingConv::ARM_APCS
:
1842 case CallingConv::GHC
:
1844 case CallingConv::PreserveMost
:
1845 return CallingConv::PreserveMost
;
1846 case CallingConv::ARM_AAPCS_VFP
:
1847 case CallingConv::Swift
:
1848 return isVarArg
? CallingConv::ARM_AAPCS
: CallingConv::ARM_AAPCS_VFP
;
1849 case CallingConv::C
:
1850 if (!Subtarget
->isAAPCS_ABI())
1851 return CallingConv::ARM_APCS
;
1852 else if (Subtarget
->hasVFP2Base() && !Subtarget
->isThumb1Only() &&
1853 getTargetMachine().Options
.FloatABIType
== FloatABI::Hard
&&
1855 return CallingConv::ARM_AAPCS_VFP
;
1857 return CallingConv::ARM_AAPCS
;
1858 case CallingConv::Fast
:
1859 case CallingConv::CXX_FAST_TLS
:
1860 if (!Subtarget
->isAAPCS_ABI()) {
1861 if (Subtarget
->hasVFP2Base() && !Subtarget
->isThumb1Only() && !isVarArg
)
1862 return CallingConv::Fast
;
1863 return CallingConv::ARM_APCS
;
1864 } else if (Subtarget
->hasVFP2Base() &&
1865 !Subtarget
->isThumb1Only() && !isVarArg
)
1866 return CallingConv::ARM_AAPCS_VFP
;
1868 return CallingConv::ARM_AAPCS
;
1872 CCAssignFn
*ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC
,
1873 bool isVarArg
) const {
1874 return CCAssignFnForNode(CC
, false, isVarArg
);
1877 CCAssignFn
*ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC
,
1878 bool isVarArg
) const {
1879 return CCAssignFnForNode(CC
, true, isVarArg
);
1882 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
1883 /// CallingConvention.
1884 CCAssignFn
*ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC
,
1886 bool isVarArg
) const {
1887 switch (getEffectiveCallingConv(CC
, isVarArg
)) {
1889 report_fatal_error("Unsupported calling convention");
1890 case CallingConv::ARM_APCS
:
1891 return (Return
? RetCC_ARM_APCS
: CC_ARM_APCS
);
1892 case CallingConv::ARM_AAPCS
:
1893 return (Return
? RetCC_ARM_AAPCS
: CC_ARM_AAPCS
);
1894 case CallingConv::ARM_AAPCS_VFP
:
1895 return (Return
? RetCC_ARM_AAPCS_VFP
: CC_ARM_AAPCS_VFP
);
1896 case CallingConv::Fast
:
1897 return (Return
? RetFastCC_ARM_APCS
: FastCC_ARM_APCS
);
1898 case CallingConv::GHC
:
1899 return (Return
? RetCC_ARM_APCS
: CC_ARM_APCS_GHC
);
1900 case CallingConv::PreserveMost
:
1901 return (Return
? RetCC_ARM_AAPCS
: CC_ARM_AAPCS
);
1905 /// LowerCallResult - Lower the result values of a call into the
1906 /// appropriate copies out of appropriate physical registers.
1907 SDValue
ARMTargetLowering::LowerCallResult(
1908 SDValue Chain
, SDValue InFlag
, CallingConv::ID CallConv
, bool isVarArg
,
1909 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&dl
,
1910 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
, bool isThisReturn
,
1911 SDValue ThisVal
) const {
1912 // Assign locations to each value returned by this call.
1913 SmallVector
<CCValAssign
, 16> RVLocs
;
1914 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), RVLocs
,
1916 CCInfo
.AnalyzeCallResult(Ins
, CCAssignFnForReturn(CallConv
, isVarArg
));
1918 // Copy all of the result registers out of their specified physreg.
1919 for (unsigned i
= 0; i
!= RVLocs
.size(); ++i
) {
1920 CCValAssign VA
= RVLocs
[i
];
1922 // Pass 'this' value directly from the argument to return value, to avoid
1923 // reg unit interference
1924 if (i
== 0 && isThisReturn
) {
1925 assert(!VA
.needsCustom() && VA
.getLocVT() == MVT::i32
&&
1926 "unexpected return calling convention register assignment");
1927 InVals
.push_back(ThisVal
);
1932 if (VA
.needsCustom()) {
1933 // Handle f64 or half of a v2f64.
1934 SDValue Lo
= DAG
.getCopyFromReg(Chain
, dl
, VA
.getLocReg(), MVT::i32
,
1936 Chain
= Lo
.getValue(1);
1937 InFlag
= Lo
.getValue(2);
1938 VA
= RVLocs
[++i
]; // skip ahead to next loc
1939 SDValue Hi
= DAG
.getCopyFromReg(Chain
, dl
, VA
.getLocReg(), MVT::i32
,
1941 Chain
= Hi
.getValue(1);
1942 InFlag
= Hi
.getValue(2);
1943 if (!Subtarget
->isLittle())
1945 Val
= DAG
.getNode(ARMISD::VMOVDRR
, dl
, MVT::f64
, Lo
, Hi
);
1947 if (VA
.getLocVT() == MVT::v2f64
) {
1948 SDValue Vec
= DAG
.getNode(ISD::UNDEF
, dl
, MVT::v2f64
);
1949 Vec
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2f64
, Vec
, Val
,
1950 DAG
.getConstant(0, dl
, MVT::i32
));
1952 VA
= RVLocs
[++i
]; // skip ahead to next loc
1953 Lo
= DAG
.getCopyFromReg(Chain
, dl
, VA
.getLocReg(), MVT::i32
, InFlag
);
1954 Chain
= Lo
.getValue(1);
1955 InFlag
= Lo
.getValue(2);
1956 VA
= RVLocs
[++i
]; // skip ahead to next loc
1957 Hi
= DAG
.getCopyFromReg(Chain
, dl
, VA
.getLocReg(), MVT::i32
, InFlag
);
1958 Chain
= Hi
.getValue(1);
1959 InFlag
= Hi
.getValue(2);
1960 if (!Subtarget
->isLittle())
1962 Val
= DAG
.getNode(ARMISD::VMOVDRR
, dl
, MVT::f64
, Lo
, Hi
);
1963 Val
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2f64
, Vec
, Val
,
1964 DAG
.getConstant(1, dl
, MVT::i32
));
1967 Val
= DAG
.getCopyFromReg(Chain
, dl
, VA
.getLocReg(), VA
.getLocVT(),
1969 Chain
= Val
.getValue(1);
1970 InFlag
= Val
.getValue(2);
1973 switch (VA
.getLocInfo()) {
1974 default: llvm_unreachable("Unknown loc info!");
1975 case CCValAssign::Full
: break;
1976 case CCValAssign::BCvt
:
1977 Val
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getValVT(), Val
);
1981 InVals
.push_back(Val
);
1987 /// LowerMemOpCallTo - Store the argument to the stack.
1988 SDValue
ARMTargetLowering::LowerMemOpCallTo(SDValue Chain
, SDValue StackPtr
,
1989 SDValue Arg
, const SDLoc
&dl
,
1991 const CCValAssign
&VA
,
1992 ISD::ArgFlagsTy Flags
) const {
1993 unsigned LocMemOffset
= VA
.getLocMemOffset();
1994 SDValue PtrOff
= DAG
.getIntPtrConstant(LocMemOffset
, dl
);
1995 PtrOff
= DAG
.getNode(ISD::ADD
, dl
, getPointerTy(DAG
.getDataLayout()),
1997 return DAG
.getStore(
1998 Chain
, dl
, Arg
, PtrOff
,
1999 MachinePointerInfo::getStack(DAG
.getMachineFunction(), LocMemOffset
));
2002 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc
&dl
, SelectionDAG
&DAG
,
2003 SDValue Chain
, SDValue
&Arg
,
2004 RegsToPassVector
&RegsToPass
,
2005 CCValAssign
&VA
, CCValAssign
&NextVA
,
2007 SmallVectorImpl
<SDValue
> &MemOpChains
,
2008 ISD::ArgFlagsTy Flags
) const {
2009 SDValue fmrrd
= DAG
.getNode(ARMISD::VMOVRRD
, dl
,
2010 DAG
.getVTList(MVT::i32
, MVT::i32
), Arg
);
2011 unsigned id
= Subtarget
->isLittle() ? 0 : 1;
2012 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), fmrrd
.getValue(id
)));
2014 if (NextVA
.isRegLoc())
2015 RegsToPass
.push_back(std::make_pair(NextVA
.getLocReg(), fmrrd
.getValue(1-id
)));
2017 assert(NextVA
.isMemLoc());
2018 if (!StackPtr
.getNode())
2019 StackPtr
= DAG
.getCopyFromReg(Chain
, dl
, ARM::SP
,
2020 getPointerTy(DAG
.getDataLayout()));
2022 MemOpChains
.push_back(LowerMemOpCallTo(Chain
, StackPtr
, fmrrd
.getValue(1-id
),
2028 /// LowerCall - Lowering a call into a callseq_start <-
2029 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
2032 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo
&CLI
,
2033 SmallVectorImpl
<SDValue
> &InVals
) const {
2034 SelectionDAG
&DAG
= CLI
.DAG
;
2036 SmallVectorImpl
<ISD::OutputArg
> &Outs
= CLI
.Outs
;
2037 SmallVectorImpl
<SDValue
> &OutVals
= CLI
.OutVals
;
2038 SmallVectorImpl
<ISD::InputArg
> &Ins
= CLI
.Ins
;
2039 SDValue Chain
= CLI
.Chain
;
2040 SDValue Callee
= CLI
.Callee
;
2041 bool &isTailCall
= CLI
.IsTailCall
;
2042 CallingConv::ID CallConv
= CLI
.CallConv
;
2043 bool doesNotRet
= CLI
.DoesNotReturn
;
2044 bool isVarArg
= CLI
.IsVarArg
;
2046 MachineFunction
&MF
= DAG
.getMachineFunction();
2047 MachineFunction::CallSiteInfo CSInfo
;
2048 bool isStructRet
= (Outs
.empty()) ? false : Outs
[0].Flags
.isSRet();
2049 bool isThisReturn
= false;
2050 auto Attr
= MF
.getFunction().getFnAttribute("disable-tail-calls");
2051 bool PreferIndirect
= false;
2053 // Disable tail calls if they're not supported.
2054 if (!Subtarget
->supportsTailCall() || Attr
.getValueAsString() == "true")
2057 if (isa
<GlobalAddressSDNode
>(Callee
)) {
2058 // If we're optimizing for minimum size and the function is called three or
2059 // more times in this block, we can improve codesize by calling indirectly
2060 // as BLXr has a 16-bit encoding.
2061 auto *GV
= cast
<GlobalAddressSDNode
>(Callee
)->getGlobal();
2063 auto *BB
= CLI
.CS
.getParent();
2064 PreferIndirect
= Subtarget
->isThumb() && Subtarget
->hasMinSize() &&
2065 count_if(GV
->users(), [&BB
](const User
*U
) {
2066 return isa
<Instruction
>(U
) &&
2067 cast
<Instruction
>(U
)->getParent() == BB
;
2072 // Check if it's really possible to do a tail call.
2073 isTailCall
= IsEligibleForTailCallOptimization(
2074 Callee
, CallConv
, isVarArg
, isStructRet
,
2075 MF
.getFunction().hasStructRetAttr(), Outs
, OutVals
, Ins
, DAG
,
2077 if (!isTailCall
&& CLI
.CS
&& CLI
.CS
.isMustTailCall())
2078 report_fatal_error("failed to perform tail call elimination on a call "
2079 "site marked musttail");
2080 // We don't support GuaranteedTailCallOpt for ARM, only automatically
2081 // detected sibcalls.
2086 // Analyze operands of the call, assigning locations to each operand.
2087 SmallVector
<CCValAssign
, 16> ArgLocs
;
2088 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), ArgLocs
,
2090 CCInfo
.AnalyzeCallOperands(Outs
, CCAssignFnForCall(CallConv
, isVarArg
));
2092 // Get a count of how many bytes are to be pushed on the stack.
2093 unsigned NumBytes
= CCInfo
.getNextStackOffset();
2096 // For tail calls, memory operands are available in our caller's stack.
2099 // Adjust the stack pointer for the new arguments...
2100 // These operations are automatically eliminated by the prolog/epilog pass
2101 Chain
= DAG
.getCALLSEQ_START(Chain
, NumBytes
, 0, dl
);
2105 DAG
.getCopyFromReg(Chain
, dl
, ARM::SP
, getPointerTy(DAG
.getDataLayout()));
2107 RegsToPassVector RegsToPass
;
2108 SmallVector
<SDValue
, 8> MemOpChains
;
2110 // Walk the register/memloc assignments, inserting copies/loads. In the case
2111 // of tail call optimization, arguments are handled later.
2112 for (unsigned i
= 0, realArgIdx
= 0, e
= ArgLocs
.size();
2114 ++i
, ++realArgIdx
) {
2115 CCValAssign
&VA
= ArgLocs
[i
];
2116 SDValue Arg
= OutVals
[realArgIdx
];
2117 ISD::ArgFlagsTy Flags
= Outs
[realArgIdx
].Flags
;
2118 bool isByVal
= Flags
.isByVal();
2120 // Promote the value if needed.
2121 switch (VA
.getLocInfo()) {
2122 default: llvm_unreachable("Unknown loc info!");
2123 case CCValAssign::Full
: break;
2124 case CCValAssign::SExt
:
2125 Arg
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, VA
.getLocVT(), Arg
);
2127 case CCValAssign::ZExt
:
2128 Arg
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, VA
.getLocVT(), Arg
);
2130 case CCValAssign::AExt
:
2131 Arg
= DAG
.getNode(ISD::ANY_EXTEND
, dl
, VA
.getLocVT(), Arg
);
2133 case CCValAssign::BCvt
:
2134 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getLocVT(), Arg
);
2138 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
2139 if (VA
.needsCustom()) {
2140 if (VA
.getLocVT() == MVT::v2f64
) {
2141 SDValue Op0
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::f64
, Arg
,
2142 DAG
.getConstant(0, dl
, MVT::i32
));
2143 SDValue Op1
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::f64
, Arg
,
2144 DAG
.getConstant(1, dl
, MVT::i32
));
2146 PassF64ArgInRegs(dl
, DAG
, Chain
, Op0
, RegsToPass
,
2147 VA
, ArgLocs
[++i
], StackPtr
, MemOpChains
, Flags
);
2149 VA
= ArgLocs
[++i
]; // skip ahead to next loc
2150 if (VA
.isRegLoc()) {
2151 PassF64ArgInRegs(dl
, DAG
, Chain
, Op1
, RegsToPass
,
2152 VA
, ArgLocs
[++i
], StackPtr
, MemOpChains
, Flags
);
2154 assert(VA
.isMemLoc());
2156 MemOpChains
.push_back(LowerMemOpCallTo(Chain
, StackPtr
, Op1
,
2157 dl
, DAG
, VA
, Flags
));
2160 PassF64ArgInRegs(dl
, DAG
, Chain
, Arg
, RegsToPass
, VA
, ArgLocs
[++i
],
2161 StackPtr
, MemOpChains
, Flags
);
2163 } else if (VA
.isRegLoc()) {
2164 if (realArgIdx
== 0 && Flags
.isReturned() && !Flags
.isSwiftSelf() &&
2165 Outs
[0].VT
== MVT::i32
) {
2166 assert(VA
.getLocVT() == MVT::i32
&&
2167 "unexpected calling convention register assignment");
2168 assert(!Ins
.empty() && Ins
[0].VT
== MVT::i32
&&
2169 "unexpected use of 'returned'");
2170 isThisReturn
= true;
2172 const TargetOptions
&Options
= DAG
.getTarget().Options
;
2173 if (Options
.EnableDebugEntryValues
)
2174 CSInfo
.emplace_back(VA
.getLocReg(), i
);
2175 RegsToPass
.push_back(std::make_pair(VA
.getLocReg(), Arg
));
2176 } else if (isByVal
) {
2177 assert(VA
.isMemLoc());
2178 unsigned offset
= 0;
2180 // True if this byval aggregate will be split between registers
2182 unsigned ByValArgsCount
= CCInfo
.getInRegsParamsCount();
2183 unsigned CurByValIdx
= CCInfo
.getInRegsParamsProcessed();
2185 if (CurByValIdx
< ByValArgsCount
) {
2187 unsigned RegBegin
, RegEnd
;
2188 CCInfo
.getInRegsParamInfo(CurByValIdx
, RegBegin
, RegEnd
);
2191 DAG
.getTargetLoweringInfo().getPointerTy(DAG
.getDataLayout());
2193 for (i
= 0, j
= RegBegin
; j
< RegEnd
; i
++, j
++) {
2194 SDValue Const
= DAG
.getConstant(4*i
, dl
, MVT::i32
);
2195 SDValue AddArg
= DAG
.getNode(ISD::ADD
, dl
, PtrVT
, Arg
, Const
);
2196 SDValue Load
= DAG
.getLoad(PtrVT
, dl
, Chain
, AddArg
,
2197 MachinePointerInfo(),
2198 DAG
.InferPtrAlignment(AddArg
));
2199 MemOpChains
.push_back(Load
.getValue(1));
2200 RegsToPass
.push_back(std::make_pair(j
, Load
));
2203 // If parameter size outsides register area, "offset" value
2204 // helps us to calculate stack slot for remained part properly.
2205 offset
= RegEnd
- RegBegin
;
2207 CCInfo
.nextInRegsParam();
2210 if (Flags
.getByValSize() > 4*offset
) {
2211 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
2212 unsigned LocMemOffset
= VA
.getLocMemOffset();
2213 SDValue StkPtrOff
= DAG
.getIntPtrConstant(LocMemOffset
, dl
);
2214 SDValue Dst
= DAG
.getNode(ISD::ADD
, dl
, PtrVT
, StackPtr
, StkPtrOff
);
2215 SDValue SrcOffset
= DAG
.getIntPtrConstant(4*offset
, dl
);
2216 SDValue Src
= DAG
.getNode(ISD::ADD
, dl
, PtrVT
, Arg
, SrcOffset
);
2217 SDValue SizeNode
= DAG
.getConstant(Flags
.getByValSize() - 4*offset
, dl
,
2219 SDValue AlignNode
= DAG
.getConstant(Flags
.getByValAlign(), dl
,
2222 SDVTList VTs
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
2223 SDValue Ops
[] = { Chain
, Dst
, Src
, SizeNode
, AlignNode
};
2224 MemOpChains
.push_back(DAG
.getNode(ARMISD::COPY_STRUCT_BYVAL
, dl
, VTs
,
2227 } else if (!isTailCall
) {
2228 assert(VA
.isMemLoc());
2230 MemOpChains
.push_back(LowerMemOpCallTo(Chain
, StackPtr
, Arg
,
2231 dl
, DAG
, VA
, Flags
));
2235 if (!MemOpChains
.empty())
2236 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, MemOpChains
);
2238 // Build a sequence of copy-to-reg nodes chained together with token chain
2239 // and flag operands which copy the outgoing args into the appropriate regs.
2241 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
) {
2242 Chain
= DAG
.getCopyToReg(Chain
, dl
, RegsToPass
[i
].first
,
2243 RegsToPass
[i
].second
, InFlag
);
2244 InFlag
= Chain
.getValue(1);
2247 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
2248 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
2249 // node so that legalize doesn't hack it.
2250 bool isDirect
= false;
2252 const TargetMachine
&TM
= getTargetMachine();
2253 const Module
*Mod
= MF
.getFunction().getParent();
2254 const GlobalValue
*GV
= nullptr;
2255 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
))
2256 GV
= G
->getGlobal();
2258 !TM
.shouldAssumeDSOLocal(*Mod
, GV
) && Subtarget
->isTargetMachO();
2260 bool isARMFunc
= !Subtarget
->isThumb() || (isStub
&& !Subtarget
->isMClass());
2261 bool isLocalARMFunc
= false;
2262 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
2263 auto PtrVt
= getPointerTy(DAG
.getDataLayout());
2265 if (Subtarget
->genLongCalls()) {
2266 assert((!isPositionIndependent() || Subtarget
->isTargetWindows()) &&
2267 "long-calls codegen is not position independent!");
2268 // Handle a global address or an external symbol. If it's not one of
2269 // those, the target's already in a register, so we don't need to do
2271 if (isa
<GlobalAddressSDNode
>(Callee
)) {
2272 // Create a constant pool entry for the callee address
2273 unsigned ARMPCLabelIndex
= AFI
->createPICLabelUId();
2274 ARMConstantPoolValue
*CPV
=
2275 ARMConstantPoolConstant::Create(GV
, ARMPCLabelIndex
, ARMCP::CPValue
, 0);
2277 // Get the address of the callee into a register
2278 SDValue CPAddr
= DAG
.getTargetConstantPool(CPV
, PtrVt
, 4);
2279 CPAddr
= DAG
.getNode(ARMISD::Wrapper
, dl
, MVT::i32
, CPAddr
);
2280 Callee
= DAG
.getLoad(
2281 PtrVt
, dl
, DAG
.getEntryNode(), CPAddr
,
2282 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
2283 } else if (ExternalSymbolSDNode
*S
=dyn_cast
<ExternalSymbolSDNode
>(Callee
)) {
2284 const char *Sym
= S
->getSymbol();
2286 // Create a constant pool entry for the callee address
2287 unsigned ARMPCLabelIndex
= AFI
->createPICLabelUId();
2288 ARMConstantPoolValue
*CPV
=
2289 ARMConstantPoolSymbol::Create(*DAG
.getContext(), Sym
,
2290 ARMPCLabelIndex
, 0);
2291 // Get the address of the callee into a register
2292 SDValue CPAddr
= DAG
.getTargetConstantPool(CPV
, PtrVt
, 4);
2293 CPAddr
= DAG
.getNode(ARMISD::Wrapper
, dl
, MVT::i32
, CPAddr
);
2294 Callee
= DAG
.getLoad(
2295 PtrVt
, dl
, DAG
.getEntryNode(), CPAddr
,
2296 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
2298 } else if (isa
<GlobalAddressSDNode
>(Callee
)) {
2299 if (!PreferIndirect
) {
2301 bool isDef
= GV
->isStrongDefinitionForLinker();
2303 // ARM call to a local ARM function is predicable.
2304 isLocalARMFunc
= !Subtarget
->isThumb() && (isDef
|| !ARMInterworking
);
2305 // tBX takes a register source operand.
2306 if (isStub
&& Subtarget
->isThumb1Only() && !Subtarget
->hasV5TOps()) {
2307 assert(Subtarget
->isTargetMachO() && "WrapperPIC use on non-MachO?");
2308 Callee
= DAG
.getNode(
2309 ARMISD::WrapperPIC
, dl
, PtrVt
,
2310 DAG
.getTargetGlobalAddress(GV
, dl
, PtrVt
, 0, ARMII::MO_NONLAZY
));
2311 Callee
= DAG
.getLoad(
2312 PtrVt
, dl
, DAG
.getEntryNode(), Callee
,
2313 MachinePointerInfo::getGOT(DAG
.getMachineFunction()),
2314 /* Alignment = */ 0, MachineMemOperand::MODereferenceable
|
2315 MachineMemOperand::MOInvariant
);
2316 } else if (Subtarget
->isTargetCOFF()) {
2317 assert(Subtarget
->isTargetWindows() &&
2318 "Windows is the only supported COFF target");
2319 unsigned TargetFlags
= GV
->hasDLLImportStorageClass()
2320 ? ARMII::MO_DLLIMPORT
2321 : ARMII::MO_NO_FLAG
;
2322 Callee
= DAG
.getTargetGlobalAddress(GV
, dl
, PtrVt
, /*offset=*/0,
2324 if (GV
->hasDLLImportStorageClass())
2326 DAG
.getLoad(PtrVt
, dl
, DAG
.getEntryNode(),
2327 DAG
.getNode(ARMISD::Wrapper
, dl
, PtrVt
, Callee
),
2328 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
2330 Callee
= DAG
.getTargetGlobalAddress(GV
, dl
, PtrVt
, 0, 0);
2333 } else if (ExternalSymbolSDNode
*S
= dyn_cast
<ExternalSymbolSDNode
>(Callee
)) {
2335 // tBX takes a register source operand.
2336 const char *Sym
= S
->getSymbol();
2337 if (isARMFunc
&& Subtarget
->isThumb1Only() && !Subtarget
->hasV5TOps()) {
2338 unsigned ARMPCLabelIndex
= AFI
->createPICLabelUId();
2339 ARMConstantPoolValue
*CPV
=
2340 ARMConstantPoolSymbol::Create(*DAG
.getContext(), Sym
,
2341 ARMPCLabelIndex
, 4);
2342 SDValue CPAddr
= DAG
.getTargetConstantPool(CPV
, PtrVt
, 4);
2343 CPAddr
= DAG
.getNode(ARMISD::Wrapper
, dl
, MVT::i32
, CPAddr
);
2344 Callee
= DAG
.getLoad(
2345 PtrVt
, dl
, DAG
.getEntryNode(), CPAddr
,
2346 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
2347 SDValue PICLabel
= DAG
.getConstant(ARMPCLabelIndex
, dl
, MVT::i32
);
2348 Callee
= DAG
.getNode(ARMISD::PIC_ADD
, dl
, PtrVt
, Callee
, PICLabel
);
2350 Callee
= DAG
.getTargetExternalSymbol(Sym
, PtrVt
, 0);
2354 // FIXME: handle tail calls differently.
2356 if (Subtarget
->isThumb()) {
2357 if ((!isDirect
|| isARMFunc
) && !Subtarget
->hasV5TOps())
2358 CallOpc
= ARMISD::CALL_NOLINK
;
2360 CallOpc
= ARMISD::CALL
;
2362 if (!isDirect
&& !Subtarget
->hasV5TOps())
2363 CallOpc
= ARMISD::CALL_NOLINK
;
2364 else if (doesNotRet
&& isDirect
&& Subtarget
->hasRetAddrStack() &&
2365 // Emit regular call when code size is the priority
2366 !Subtarget
->hasMinSize())
2367 // "mov lr, pc; b _foo" to avoid confusing the RSP
2368 CallOpc
= ARMISD::CALL_NOLINK
;
2370 CallOpc
= isLocalARMFunc
? ARMISD::CALL_PRED
: ARMISD::CALL
;
2373 std::vector
<SDValue
> Ops
;
2374 Ops
.push_back(Chain
);
2375 Ops
.push_back(Callee
);
2377 // Add argument registers to the end of the list so that they are known live
2379 for (unsigned i
= 0, e
= RegsToPass
.size(); i
!= e
; ++i
)
2380 Ops
.push_back(DAG
.getRegister(RegsToPass
[i
].first
,
2381 RegsToPass
[i
].second
.getValueType()));
2383 // Add a register mask operand representing the call-preserved registers.
2385 const uint32_t *Mask
;
2386 const ARMBaseRegisterInfo
*ARI
= Subtarget
->getRegisterInfo();
2388 // For 'this' returns, use the R0-preserving mask if applicable
2389 Mask
= ARI
->getThisReturnPreservedMask(MF
, CallConv
);
2391 // Set isThisReturn to false if the calling convention is not one that
2392 // allows 'returned' to be modeled in this way, so LowerCallResult does
2393 // not try to pass 'this' straight through
2394 isThisReturn
= false;
2395 Mask
= ARI
->getCallPreservedMask(MF
, CallConv
);
2398 Mask
= ARI
->getCallPreservedMask(MF
, CallConv
);
2400 assert(Mask
&& "Missing call preserved mask for calling convention");
2401 Ops
.push_back(DAG
.getRegisterMask(Mask
));
2404 if (InFlag
.getNode())
2405 Ops
.push_back(InFlag
);
2407 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
2409 MF
.getFrameInfo().setHasTailCall();
2410 SDValue Ret
= DAG
.getNode(ARMISD::TC_RETURN
, dl
, NodeTys
, Ops
);
2411 DAG
.addCallSiteInfo(Ret
.getNode(), std::move(CSInfo
));
2415 // Returns a chain and a flag for retval copy to use.
2416 Chain
= DAG
.getNode(CallOpc
, dl
, NodeTys
, Ops
);
2417 InFlag
= Chain
.getValue(1);
2418 DAG
.addCallSiteInfo(Chain
.getNode(), std::move(CSInfo
));
2420 Chain
= DAG
.getCALLSEQ_END(Chain
, DAG
.getIntPtrConstant(NumBytes
, dl
, true),
2421 DAG
.getIntPtrConstant(0, dl
, true), InFlag
, dl
);
2423 InFlag
= Chain
.getValue(1);
2425 // Handle result values, copying them out of physregs into vregs that we
2427 return LowerCallResult(Chain
, InFlag
, CallConv
, isVarArg
, Ins
, dl
, DAG
,
2428 InVals
, isThisReturn
,
2429 isThisReturn
? OutVals
[0] : SDValue());
2432 /// HandleByVal - Every parameter *after* a byval parameter is passed
2433 /// on the stack. Remember the next parameter register to allocate,
2434 /// and then confiscate the rest of the parameter registers to insure
2436 void ARMTargetLowering::HandleByVal(CCState
*State
, unsigned &Size
,
2437 unsigned Align
) const {
2438 // Byval (as with any stack) slots are always at least 4 byte aligned.
2439 Align
= std::max(Align
, 4U);
2441 unsigned Reg
= State
->AllocateReg(GPRArgRegs
);
2445 unsigned AlignInRegs
= Align
/ 4;
2446 unsigned Waste
= (ARM::R4
- Reg
) % AlignInRegs
;
2447 for (unsigned i
= 0; i
< Waste
; ++i
)
2448 Reg
= State
->AllocateReg(GPRArgRegs
);
2453 unsigned Excess
= 4 * (ARM::R4
- Reg
);
2455 // Special case when NSAA != SP and parameter size greater than size of
2456 // all remained GPR regs. In that case we can't split parameter, we must
2457 // send it to stack. We also must set NCRN to R4, so waste all
2458 // remained registers.
2459 const unsigned NSAAOffset
= State
->getNextStackOffset();
2460 if (NSAAOffset
!= 0 && Size
> Excess
) {
2461 while (State
->AllocateReg(GPRArgRegs
))
2466 // First register for byval parameter is the first register that wasn't
2467 // allocated before this method call, so it would be "reg".
2468 // If parameter is small enough to be saved in range [reg, r4), then
2469 // the end (first after last) register would be reg + param-size-in-regs,
2470 // else parameter would be splitted between registers and stack,
2471 // end register would be r4 in this case.
2472 unsigned ByValRegBegin
= Reg
;
2473 unsigned ByValRegEnd
= std::min
<unsigned>(Reg
+ Size
/ 4, ARM::R4
);
2474 State
->addInRegsParamInfo(ByValRegBegin
, ByValRegEnd
);
2475 // Note, first register is allocated in the beginning of function already,
2476 // allocate remained amount of registers we need.
2477 for (unsigned i
= Reg
+ 1; i
!= ByValRegEnd
; ++i
)
2478 State
->AllocateReg(GPRArgRegs
);
2479 // A byval parameter that is split between registers and memory needs its
2480 // size truncated here.
2481 // In the case where the entire structure fits in registers, we set the
2482 // size in memory to zero.
2483 Size
= std::max
<int>(Size
- Excess
, 0);
2486 /// MatchingStackOffset - Return true if the given stack call argument is
2487 /// already available in the same position (relatively) of the caller's
2488 /// incoming argument stack.
2490 bool MatchingStackOffset(SDValue Arg
, unsigned Offset
, ISD::ArgFlagsTy Flags
,
2491 MachineFrameInfo
&MFI
, const MachineRegisterInfo
*MRI
,
2492 const TargetInstrInfo
*TII
) {
2493 unsigned Bytes
= Arg
.getValueSizeInBits() / 8;
2494 int FI
= std::numeric_limits
<int>::max();
2495 if (Arg
.getOpcode() == ISD::CopyFromReg
) {
2496 unsigned VR
= cast
<RegisterSDNode
>(Arg
.getOperand(1))->getReg();
2497 if (!Register::isVirtualRegister(VR
))
2499 MachineInstr
*Def
= MRI
->getVRegDef(VR
);
2502 if (!Flags
.isByVal()) {
2503 if (!TII
->isLoadFromStackSlot(*Def
, FI
))
2508 } else if (LoadSDNode
*Ld
= dyn_cast
<LoadSDNode
>(Arg
)) {
2509 if (Flags
.isByVal())
2510 // ByVal argument is passed in as a pointer but it's now being
2511 // dereferenced. e.g.
2512 // define @foo(%struct.X* %A) {
2513 // tail call @bar(%struct.X* byval %A)
2516 SDValue Ptr
= Ld
->getBasePtr();
2517 FrameIndexSDNode
*FINode
= dyn_cast
<FrameIndexSDNode
>(Ptr
);
2520 FI
= FINode
->getIndex();
2524 assert(FI
!= std::numeric_limits
<int>::max());
2525 if (!MFI
.isFixedObjectIndex(FI
))
2527 return Offset
== MFI
.getObjectOffset(FI
) && Bytes
== MFI
.getObjectSize(FI
);
2530 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2531 /// for tail call optimization. Targets which want to do tail call
2532 /// optimization should implement this function.
2533 bool ARMTargetLowering::IsEligibleForTailCallOptimization(
2534 SDValue Callee
, CallingConv::ID CalleeCC
, bool isVarArg
,
2535 bool isCalleeStructRet
, bool isCallerStructRet
,
2536 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
2537 const SmallVectorImpl
<SDValue
> &OutVals
,
2538 const SmallVectorImpl
<ISD::InputArg
> &Ins
, SelectionDAG
&DAG
,
2539 const bool isIndirect
) const {
2540 MachineFunction
&MF
= DAG
.getMachineFunction();
2541 const Function
&CallerF
= MF
.getFunction();
2542 CallingConv::ID CallerCC
= CallerF
.getCallingConv();
2544 assert(Subtarget
->supportsTailCall());
2546 // Indirect tail calls cannot be optimized for Thumb1 if the args
2547 // to the call take up r0-r3. The reason is that there are no legal registers
2548 // left to hold the pointer to the function to be called.
2549 if (Subtarget
->isThumb1Only() && Outs
.size() >= 4 &&
2550 (!isa
<GlobalAddressSDNode
>(Callee
.getNode()) || isIndirect
))
2553 // Look for obvious safe cases to perform tail call optimization that do not
2554 // require ABI changes. This is what gcc calls sibcall.
2556 // Exception-handling functions need a special set of instructions to indicate
2557 // a return to the hardware. Tail-calling another function would probably
2559 if (CallerF
.hasFnAttribute("interrupt"))
2562 // Also avoid sibcall optimization if either caller or callee uses struct
2563 // return semantics.
2564 if (isCalleeStructRet
|| isCallerStructRet
)
2567 // Externally-defined functions with weak linkage should not be
2568 // tail-called on ARM when the OS does not support dynamic
2569 // pre-emption of symbols, as the AAELF spec requires normal calls
2570 // to undefined weak functions to be replaced with a NOP or jump to the
2571 // next instruction. The behaviour of branch instructions in this
2572 // situation (as used for tail calls) is implementation-defined, so we
2573 // cannot rely on the linker replacing the tail call with a return.
2574 if (GlobalAddressSDNode
*G
= dyn_cast
<GlobalAddressSDNode
>(Callee
)) {
2575 const GlobalValue
*GV
= G
->getGlobal();
2576 const Triple
&TT
= getTargetMachine().getTargetTriple();
2577 if (GV
->hasExternalWeakLinkage() &&
2578 (!TT
.isOSWindows() || TT
.isOSBinFormatELF() || TT
.isOSBinFormatMachO()))
2582 // Check that the call results are passed in the same way.
2583 LLVMContext
&C
= *DAG
.getContext();
2584 if (!CCState::resultsCompatible(CalleeCC
, CallerCC
, MF
, C
, Ins
,
2585 CCAssignFnForReturn(CalleeCC
, isVarArg
),
2586 CCAssignFnForReturn(CallerCC
, isVarArg
)))
2588 // The callee has to preserve all registers the caller needs to preserve.
2589 const ARMBaseRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
2590 const uint32_t *CallerPreserved
= TRI
->getCallPreservedMask(MF
, CallerCC
);
2591 if (CalleeCC
!= CallerCC
) {
2592 const uint32_t *CalleePreserved
= TRI
->getCallPreservedMask(MF
, CalleeCC
);
2593 if (!TRI
->regmaskSubsetEqual(CallerPreserved
, CalleePreserved
))
2597 // If Caller's vararg or byval argument has been split between registers and
2598 // stack, do not perform tail call, since part of the argument is in caller's
2600 const ARMFunctionInfo
*AFI_Caller
= MF
.getInfo
<ARMFunctionInfo
>();
2601 if (AFI_Caller
->getArgRegsSaveSize())
2604 // If the callee takes no arguments then go on to check the results of the
2606 if (!Outs
.empty()) {
2607 // Check if stack adjustment is needed. For now, do not do this if any
2608 // argument is passed on the stack.
2609 SmallVector
<CCValAssign
, 16> ArgLocs
;
2610 CCState
CCInfo(CalleeCC
, isVarArg
, MF
, ArgLocs
, C
);
2611 CCInfo
.AnalyzeCallOperands(Outs
, CCAssignFnForCall(CalleeCC
, isVarArg
));
2612 if (CCInfo
.getNextStackOffset()) {
2613 // Check if the arguments are already laid out in the right way as
2614 // the caller's fixed stack objects.
2615 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
2616 const MachineRegisterInfo
*MRI
= &MF
.getRegInfo();
2617 const TargetInstrInfo
*TII
= Subtarget
->getInstrInfo();
2618 for (unsigned i
= 0, realArgIdx
= 0, e
= ArgLocs
.size();
2620 ++i
, ++realArgIdx
) {
2621 CCValAssign
&VA
= ArgLocs
[i
];
2622 EVT RegVT
= VA
.getLocVT();
2623 SDValue Arg
= OutVals
[realArgIdx
];
2624 ISD::ArgFlagsTy Flags
= Outs
[realArgIdx
].Flags
;
2625 if (VA
.getLocInfo() == CCValAssign::Indirect
)
2627 if (VA
.needsCustom()) {
2628 // f64 and vector types are split into multiple registers or
2629 // register/stack-slot combinations. The types will not match
2630 // the registers; give up on memory f64 refs until we figure
2631 // out what to do about this.
2634 if (!ArgLocs
[++i
].isRegLoc())
2636 if (RegVT
== MVT::v2f64
) {
2637 if (!ArgLocs
[++i
].isRegLoc())
2639 if (!ArgLocs
[++i
].isRegLoc())
2642 } else if (!VA
.isRegLoc()) {
2643 if (!MatchingStackOffset(Arg
, VA
.getLocMemOffset(), Flags
,
2650 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
2651 if (!parametersInCSRMatch(MRI
, CallerPreserved
, ArgLocs
, OutVals
))
2659 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv
,
2660 MachineFunction
&MF
, bool isVarArg
,
2661 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
2662 LLVMContext
&Context
) const {
2663 SmallVector
<CCValAssign
, 16> RVLocs
;
2664 CCState
CCInfo(CallConv
, isVarArg
, MF
, RVLocs
, Context
);
2665 return CCInfo
.CheckReturn(Outs
, CCAssignFnForReturn(CallConv
, isVarArg
));
2668 static SDValue
LowerInterruptReturn(SmallVectorImpl
<SDValue
> &RetOps
,
2669 const SDLoc
&DL
, SelectionDAG
&DAG
) {
2670 const MachineFunction
&MF
= DAG
.getMachineFunction();
2671 const Function
&F
= MF
.getFunction();
2673 StringRef IntKind
= F
.getFnAttribute("interrupt").getValueAsString();
2675 // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
2676 // version of the "preferred return address". These offsets affect the return
2677 // instruction if this is a return from PL1 without hypervisor extensions.
2678 // IRQ/FIQ: +4 "subs pc, lr, #4"
2679 // SWI: 0 "subs pc, lr, #0"
2680 // ABORT: +4 "subs pc, lr, #4"
2681 // UNDEF: +4/+2 "subs pc, lr, #0"
2682 // UNDEF varies depending on where the exception came from ARM or Thumb
2683 // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
2686 if (IntKind
== "" || IntKind
== "IRQ" || IntKind
== "FIQ" ||
2689 else if (IntKind
== "SWI" || IntKind
== "UNDEF")
2692 report_fatal_error("Unsupported interrupt attribute. If present, value "
2693 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2695 RetOps
.insert(RetOps
.begin() + 1,
2696 DAG
.getConstant(LROffset
, DL
, MVT::i32
, false));
2698 return DAG
.getNode(ARMISD::INTRET_FLAG
, DL
, MVT::Other
, RetOps
);
2702 ARMTargetLowering::LowerReturn(SDValue Chain
, CallingConv::ID CallConv
,
2704 const SmallVectorImpl
<ISD::OutputArg
> &Outs
,
2705 const SmallVectorImpl
<SDValue
> &OutVals
,
2706 const SDLoc
&dl
, SelectionDAG
&DAG
) const {
2707 // CCValAssign - represent the assignment of the return value to a location.
2708 SmallVector
<CCValAssign
, 16> RVLocs
;
2710 // CCState - Info about the registers and stack slots.
2711 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), RVLocs
,
2714 // Analyze outgoing return values.
2715 CCInfo
.AnalyzeReturn(Outs
, CCAssignFnForReturn(CallConv
, isVarArg
));
2718 SmallVector
<SDValue
, 4> RetOps
;
2719 RetOps
.push_back(Chain
); // Operand #0 = Chain (updated below)
2720 bool isLittleEndian
= Subtarget
->isLittle();
2722 MachineFunction
&MF
= DAG
.getMachineFunction();
2723 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
2724 AFI
->setReturnRegsCount(RVLocs
.size());
2726 // Copy the result values into the output registers.
2727 for (unsigned i
= 0, realRVLocIdx
= 0;
2729 ++i
, ++realRVLocIdx
) {
2730 CCValAssign
&VA
= RVLocs
[i
];
2731 assert(VA
.isRegLoc() && "Can only return in registers!");
2733 SDValue Arg
= OutVals
[realRVLocIdx
];
2734 bool ReturnF16
= false;
2736 if (Subtarget
->hasFullFP16() && Subtarget
->isTargetHardFloat()) {
2737 // Half-precision return values can be returned like this:
2739 // t11 f16 = fadd ...
2740 // t12: i16 = bitcast t11
2741 // t13: i32 = zero_extend t12
2742 // t14: f32 = bitcast t13 <~~~~~~~ Arg
2744 // to avoid code generation for bitcasts, we simply set Arg to the node
2745 // that produces the f16 value, t11 in this case.
2747 if (Arg
.getValueType() == MVT::f32
&& Arg
.getOpcode() == ISD::BITCAST
) {
2748 SDValue ZE
= Arg
.getOperand(0);
2749 if (ZE
.getOpcode() == ISD::ZERO_EXTEND
&& ZE
.getValueType() == MVT::i32
) {
2750 SDValue BC
= ZE
.getOperand(0);
2751 if (BC
.getOpcode() == ISD::BITCAST
&& BC
.getValueType() == MVT::i16
) {
2752 Arg
= BC
.getOperand(0);
2759 switch (VA
.getLocInfo()) {
2760 default: llvm_unreachable("Unknown loc info!");
2761 case CCValAssign::Full
: break;
2762 case CCValAssign::BCvt
:
2764 Arg
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getLocVT(), Arg
);
2768 if (VA
.needsCustom()) {
2769 if (VA
.getLocVT() == MVT::v2f64
) {
2770 // Extract the first half and return it in two registers.
2771 SDValue Half
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::f64
, Arg
,
2772 DAG
.getConstant(0, dl
, MVT::i32
));
2773 SDValue HalfGPRs
= DAG
.getNode(ARMISD::VMOVRRD
, dl
,
2774 DAG
.getVTList(MVT::i32
, MVT::i32
), Half
);
2776 Chain
= DAG
.getCopyToReg(Chain
, dl
, VA
.getLocReg(),
2777 HalfGPRs
.getValue(isLittleEndian
? 0 : 1),
2779 Flag
= Chain
.getValue(1);
2780 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
2781 VA
= RVLocs
[++i
]; // skip ahead to next loc
2782 Chain
= DAG
.getCopyToReg(Chain
, dl
, VA
.getLocReg(),
2783 HalfGPRs
.getValue(isLittleEndian
? 1 : 0),
2785 Flag
= Chain
.getValue(1);
2786 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
2787 VA
= RVLocs
[++i
]; // skip ahead to next loc
2789 // Extract the 2nd half and fall through to handle it as an f64 value.
2790 Arg
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::f64
, Arg
,
2791 DAG
.getConstant(1, dl
, MVT::i32
));
2793 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
2795 SDValue fmrrd
= DAG
.getNode(ARMISD::VMOVRRD
, dl
,
2796 DAG
.getVTList(MVT::i32
, MVT::i32
), Arg
);
2797 Chain
= DAG
.getCopyToReg(Chain
, dl
, VA
.getLocReg(),
2798 fmrrd
.getValue(isLittleEndian
? 0 : 1),
2800 Flag
= Chain
.getValue(1);
2801 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(), VA
.getLocVT()));
2802 VA
= RVLocs
[++i
]; // skip ahead to next loc
2803 Chain
= DAG
.getCopyToReg(Chain
, dl
, VA
.getLocReg(),
2804 fmrrd
.getValue(isLittleEndian
? 1 : 0),
2807 Chain
= DAG
.getCopyToReg(Chain
, dl
, VA
.getLocReg(), Arg
, Flag
);
2809 // Guarantee that all emitted copies are
2810 // stuck together, avoiding something bad.
2811 Flag
= Chain
.getValue(1);
2812 RetOps
.push_back(DAG
.getRegister(VA
.getLocReg(),
2813 ReturnF16
? MVT::f16
: VA
.getLocVT()));
2815 const ARMBaseRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
2816 const MCPhysReg
*I
=
2817 TRI
->getCalleeSavedRegsViaCopy(&DAG
.getMachineFunction());
2820 if (ARM::GPRRegClass
.contains(*I
))
2821 RetOps
.push_back(DAG
.getRegister(*I
, MVT::i32
));
2822 else if (ARM::DPRRegClass
.contains(*I
))
2823 RetOps
.push_back(DAG
.getRegister(*I
, MVT::getFloatingPointVT(64)));
2825 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2829 // Update chain and glue.
2832 RetOps
.push_back(Flag
);
2834 // CPUs which aren't M-class use a special sequence to return from
2835 // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
2836 // though we use "subs pc, lr, #N").
2838 // M-class CPUs actually use a normal return sequence with a special
2839 // (hardware-provided) value in LR, so the normal code path works.
2840 if (DAG
.getMachineFunction().getFunction().hasFnAttribute("interrupt") &&
2841 !Subtarget
->isMClass()) {
2842 if (Subtarget
->isThumb1Only())
2843 report_fatal_error("interrupt attribute is not supported in Thumb1");
2844 return LowerInterruptReturn(RetOps
, dl
, DAG
);
2847 return DAG
.getNode(ARMISD::RET_FLAG
, dl
, MVT::Other
, RetOps
);
2850 bool ARMTargetLowering::isUsedByReturnOnly(SDNode
*N
, SDValue
&Chain
) const {
2851 if (N
->getNumValues() != 1)
2853 if (!N
->hasNUsesOfValue(1, 0))
2856 SDValue TCChain
= Chain
;
2857 SDNode
*Copy
= *N
->use_begin();
2858 if (Copy
->getOpcode() == ISD::CopyToReg
) {
2859 // If the copy has a glue operand, we conservatively assume it isn't safe to
2860 // perform a tail call.
2861 if (Copy
->getOperand(Copy
->getNumOperands()-1).getValueType() == MVT::Glue
)
2863 TCChain
= Copy
->getOperand(0);
2864 } else if (Copy
->getOpcode() == ARMISD::VMOVRRD
) {
2865 SDNode
*VMov
= Copy
;
2866 // f64 returned in a pair of GPRs.
2867 SmallPtrSet
<SDNode
*, 2> Copies
;
2868 for (SDNode::use_iterator UI
= VMov
->use_begin(), UE
= VMov
->use_end();
2870 if (UI
->getOpcode() != ISD::CopyToReg
)
2874 if (Copies
.size() > 2)
2877 for (SDNode::use_iterator UI
= VMov
->use_begin(), UE
= VMov
->use_end();
2879 SDValue UseChain
= UI
->getOperand(0);
2880 if (Copies
.count(UseChain
.getNode()))
2884 // We are at the top of this chain.
2885 // If the copy has a glue operand, we conservatively assume it
2886 // isn't safe to perform a tail call.
2887 if (UI
->getOperand(UI
->getNumOperands()-1).getValueType() == MVT::Glue
)
2893 } else if (Copy
->getOpcode() == ISD::BITCAST
) {
2894 // f32 returned in a single GPR.
2895 if (!Copy
->hasOneUse())
2897 Copy
= *Copy
->use_begin();
2898 if (Copy
->getOpcode() != ISD::CopyToReg
|| !Copy
->hasNUsesOfValue(1, 0))
2900 // If the copy has a glue operand, we conservatively assume it isn't safe to
2901 // perform a tail call.
2902 if (Copy
->getOperand(Copy
->getNumOperands()-1).getValueType() == MVT::Glue
)
2904 TCChain
= Copy
->getOperand(0);
2909 bool HasRet
= false;
2910 for (SDNode::use_iterator UI
= Copy
->use_begin(), UE
= Copy
->use_end();
2912 if (UI
->getOpcode() != ARMISD::RET_FLAG
&&
2913 UI
->getOpcode() != ARMISD::INTRET_FLAG
)
2925 bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst
*CI
) const {
2926 if (!Subtarget
->supportsTailCall())
2930 CI
->getParent()->getParent()->getFnAttribute("disable-tail-calls");
2931 if (!CI
->isTailCall() || Attr
.getValueAsString() == "true")
2937 // Trying to write a 64 bit value so need to split into two 32 bit values first,
2938 // and pass the lower and high parts through.
2939 static SDValue
LowerWRITE_REGISTER(SDValue Op
, SelectionDAG
&DAG
) {
2941 SDValue WriteValue
= Op
->getOperand(2);
2943 // This function is only supposed to be called for i64 type argument.
2944 assert(WriteValue
.getValueType() == MVT::i64
2945 && "LowerWRITE_REGISTER called for non-i64 type argument.");
2947 SDValue Lo
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i32
, WriteValue
,
2948 DAG
.getConstant(0, DL
, MVT::i32
));
2949 SDValue Hi
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i32
, WriteValue
,
2950 DAG
.getConstant(1, DL
, MVT::i32
));
2951 SDValue Ops
[] = { Op
->getOperand(0), Op
->getOperand(1), Lo
, Hi
};
2952 return DAG
.getNode(ISD::WRITE_REGISTER
, DL
, MVT::Other
, Ops
);
2955 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
2956 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
2957 // one of the above mentioned nodes. It has to be wrapped because otherwise
2958 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2959 // be used to form addressing mode. These wrapped nodes will be selected
2961 SDValue
ARMTargetLowering::LowerConstantPool(SDValue Op
,
2962 SelectionDAG
&DAG
) const {
2963 EVT PtrVT
= Op
.getValueType();
2964 // FIXME there is no actual debug info here
2966 ConstantPoolSDNode
*CP
= cast
<ConstantPoolSDNode
>(Op
);
2969 // When generating execute-only code Constant Pools must be promoted to the
2970 // global data section. It's a bit ugly that we can't share them across basic
2971 // blocks, but this way we guarantee that execute-only behaves correct with
2972 // position-independent addressing modes.
2973 if (Subtarget
->genExecuteOnly()) {
2974 auto AFI
= DAG
.getMachineFunction().getInfo
<ARMFunctionInfo
>();
2975 auto T
= const_cast<Type
*>(CP
->getType());
2976 auto C
= const_cast<Constant
*>(CP
->getConstVal());
2977 auto M
= const_cast<Module
*>(DAG
.getMachineFunction().
2978 getFunction().getParent());
2979 auto GV
= new GlobalVariable(
2980 *M
, T
, /*isConstant=*/true, GlobalVariable::InternalLinkage
, C
,
2981 Twine(DAG
.getDataLayout().getPrivateGlobalPrefix()) + "CP" +
2982 Twine(DAG
.getMachineFunction().getFunctionNumber()) + "_" +
2983 Twine(AFI
->createPICLabelUId())
2985 SDValue GA
= DAG
.getTargetGlobalAddress(dyn_cast
<GlobalValue
>(GV
),
2987 return LowerGlobalAddress(GA
, DAG
);
2990 if (CP
->isMachineConstantPoolEntry())
2991 Res
= DAG
.getTargetConstantPool(CP
->getMachineCPVal(), PtrVT
,
2992 CP
->getAlignment());
2994 Res
= DAG
.getTargetConstantPool(CP
->getConstVal(), PtrVT
,
2995 CP
->getAlignment());
2996 return DAG
.getNode(ARMISD::Wrapper
, dl
, MVT::i32
, Res
);
2999 unsigned ARMTargetLowering::getJumpTableEncoding() const {
3000 return MachineJumpTableInfo::EK_Inline
;
3003 SDValue
ARMTargetLowering::LowerBlockAddress(SDValue Op
,
3004 SelectionDAG
&DAG
) const {
3005 MachineFunction
&MF
= DAG
.getMachineFunction();
3006 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
3007 unsigned ARMPCLabelIndex
= 0;
3009 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3010 const BlockAddress
*BA
= cast
<BlockAddressSDNode
>(Op
)->getBlockAddress();
3012 bool IsPositionIndependent
= isPositionIndependent() || Subtarget
->isROPI();
3013 if (!IsPositionIndependent
) {
3014 CPAddr
= DAG
.getTargetConstantPool(BA
, PtrVT
, 4);
3016 unsigned PCAdj
= Subtarget
->isThumb() ? 4 : 8;
3017 ARMPCLabelIndex
= AFI
->createPICLabelUId();
3018 ARMConstantPoolValue
*CPV
=
3019 ARMConstantPoolConstant::Create(BA
, ARMPCLabelIndex
,
3020 ARMCP::CPBlockAddress
, PCAdj
);
3021 CPAddr
= DAG
.getTargetConstantPool(CPV
, PtrVT
, 4);
3023 CPAddr
= DAG
.getNode(ARMISD::Wrapper
, DL
, PtrVT
, CPAddr
);
3024 SDValue Result
= DAG
.getLoad(
3025 PtrVT
, DL
, DAG
.getEntryNode(), CPAddr
,
3026 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
3027 if (!IsPositionIndependent
)
3029 SDValue PICLabel
= DAG
.getConstant(ARMPCLabelIndex
, DL
, MVT::i32
);
3030 return DAG
.getNode(ARMISD::PIC_ADD
, DL
, PtrVT
, Result
, PICLabel
);
3033 /// Convert a TLS address reference into the correct sequence of loads
3034 /// and calls to compute the variable's address for Darwin, and return an
3035 /// SDValue containing the final node.
3037 /// Darwin only has one TLS scheme which must be capable of dealing with the
3038 /// fully general situation, in the worst case. This means:
3039 /// + "extern __thread" declaration.
3040 /// + Defined in a possibly unknown dynamic library.
3042 /// The general system is that each __thread variable has a [3 x i32] descriptor
3043 /// which contains information used by the runtime to calculate the address. The
3044 /// only part of this the compiler needs to know about is the first word, which
3045 /// contains a function pointer that must be called with the address of the
3046 /// entire descriptor in "r0".
3048 /// Since this descriptor may be in a different unit, in general access must
3049 /// proceed along the usual ARM rules. A common sequence to produce is:
3051 /// movw rT1, :lower16:_var$non_lazy_ptr
3052 /// movt rT1, :upper16:_var$non_lazy_ptr
3056 /// [...address now in r0...]
3058 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op
,
3059 SelectionDAG
&DAG
) const {
3060 assert(Subtarget
->isTargetDarwin() &&
3061 "This function expects a Darwin target");
3064 // First step is to get the address of the actua global symbol. This is where
3065 // the TLS descriptor lives.
3066 SDValue DescAddr
= LowerGlobalAddressDarwin(Op
, DAG
);
3068 // The first entry in the descriptor is a function pointer that we must call
3069 // to obtain the address of the variable.
3070 SDValue Chain
= DAG
.getEntryNode();
3071 SDValue FuncTLVGet
= DAG
.getLoad(
3072 MVT::i32
, DL
, Chain
, DescAddr
,
3073 MachinePointerInfo::getGOT(DAG
.getMachineFunction()),
3074 /* Alignment = */ 4,
3075 MachineMemOperand::MONonTemporal
| MachineMemOperand::MODereferenceable
|
3076 MachineMemOperand::MOInvariant
);
3077 Chain
= FuncTLVGet
.getValue(1);
3079 MachineFunction
&F
= DAG
.getMachineFunction();
3080 MachineFrameInfo
&MFI
= F
.getFrameInfo();
3081 MFI
.setAdjustsStack(true);
3083 // TLS calls preserve all registers except those that absolutely must be
3084 // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be
3087 getTargetMachine().getSubtargetImpl(F
.getFunction())->getRegisterInfo();
3088 auto ARI
= static_cast<const ARMRegisterInfo
*>(TRI
);
3089 const uint32_t *Mask
= ARI
->getTLSCallPreservedMask(DAG
.getMachineFunction());
3091 // Finally, we can make the call. This is just a degenerate version of a
3092 // normal AArch64 call node: r0 takes the address of the descriptor, and
3093 // returns the address of the variable in this thread.
3094 Chain
= DAG
.getCopyToReg(Chain
, DL
, ARM::R0
, DescAddr
, SDValue());
3096 DAG
.getNode(ARMISD::CALL
, DL
, DAG
.getVTList(MVT::Other
, MVT::Glue
),
3097 Chain
, FuncTLVGet
, DAG
.getRegister(ARM::R0
, MVT::i32
),
3098 DAG
.getRegisterMask(Mask
), Chain
.getValue(1));
3099 return DAG
.getCopyFromReg(Chain
, DL
, ARM::R0
, MVT::i32
, Chain
.getValue(1));
3103 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op
,
3104 SelectionDAG
&DAG
) const {
3105 assert(Subtarget
->isTargetWindows() && "Windows specific TLS lowering");
3107 SDValue Chain
= DAG
.getEntryNode();
3108 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3111 // Load the current TEB (thread environment block)
3112 SDValue Ops
[] = {Chain
,
3113 DAG
.getTargetConstant(Intrinsic::arm_mrc
, DL
, MVT::i32
),
3114 DAG
.getTargetConstant(15, DL
, MVT::i32
),
3115 DAG
.getTargetConstant(0, DL
, MVT::i32
),
3116 DAG
.getTargetConstant(13, DL
, MVT::i32
),
3117 DAG
.getTargetConstant(0, DL
, MVT::i32
),
3118 DAG
.getTargetConstant(2, DL
, MVT::i32
)};
3119 SDValue CurrentTEB
= DAG
.getNode(ISD::INTRINSIC_W_CHAIN
, DL
,
3120 DAG
.getVTList(MVT::i32
, MVT::Other
), Ops
);
3122 SDValue TEB
= CurrentTEB
.getValue(0);
3123 Chain
= CurrentTEB
.getValue(1);
3125 // Load the ThreadLocalStoragePointer from the TEB
3126 // A pointer to the TLS array is located at offset 0x2c from the TEB.
3128 DAG
.getNode(ISD::ADD
, DL
, PtrVT
, TEB
, DAG
.getIntPtrConstant(0x2c, DL
));
3129 TLSArray
= DAG
.getLoad(PtrVT
, DL
, Chain
, TLSArray
, MachinePointerInfo());
3131 // The pointer to the thread's TLS data area is at the TLS Index scaled by 4
3132 // offset into the TLSArray.
3134 // Load the TLS index from the C runtime
3136 DAG
.getTargetExternalSymbol("_tls_index", PtrVT
, ARMII::MO_NO_FLAG
);
3137 TLSIndex
= DAG
.getNode(ARMISD::Wrapper
, DL
, PtrVT
, TLSIndex
);
3138 TLSIndex
= DAG
.getLoad(PtrVT
, DL
, Chain
, TLSIndex
, MachinePointerInfo());
3140 SDValue Slot
= DAG
.getNode(ISD::SHL
, DL
, PtrVT
, TLSIndex
,
3141 DAG
.getConstant(2, DL
, MVT::i32
));
3142 SDValue TLS
= DAG
.getLoad(PtrVT
, DL
, Chain
,
3143 DAG
.getNode(ISD::ADD
, DL
, PtrVT
, TLSArray
, Slot
),
3144 MachinePointerInfo());
3146 // Get the offset of the start of the .tls section (section base)
3147 const auto *GA
= cast
<GlobalAddressSDNode
>(Op
);
3148 auto *CPV
= ARMConstantPoolConstant::Create(GA
->getGlobal(), ARMCP::SECREL
);
3149 SDValue Offset
= DAG
.getLoad(
3150 PtrVT
, DL
, Chain
, DAG
.getNode(ARMISD::Wrapper
, DL
, MVT::i32
,
3151 DAG
.getTargetConstantPool(CPV
, PtrVT
, 4)),
3152 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
3154 return DAG
.getNode(ISD::ADD
, DL
, PtrVT
, TLS
, Offset
);
3157 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
3159 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode
*GA
,
3160 SelectionDAG
&DAG
) const {
3162 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3163 unsigned char PCAdj
= Subtarget
->isThumb() ? 4 : 8;
3164 MachineFunction
&MF
= DAG
.getMachineFunction();
3165 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
3166 unsigned ARMPCLabelIndex
= AFI
->createPICLabelUId();
3167 ARMConstantPoolValue
*CPV
=
3168 ARMConstantPoolConstant::Create(GA
->getGlobal(), ARMPCLabelIndex
,
3169 ARMCP::CPValue
, PCAdj
, ARMCP::TLSGD
, true);
3170 SDValue Argument
= DAG
.getTargetConstantPool(CPV
, PtrVT
, 4);
3171 Argument
= DAG
.getNode(ARMISD::Wrapper
, dl
, MVT::i32
, Argument
);
3172 Argument
= DAG
.getLoad(
3173 PtrVT
, dl
, DAG
.getEntryNode(), Argument
,
3174 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
3175 SDValue Chain
= Argument
.getValue(1);
3177 SDValue PICLabel
= DAG
.getConstant(ARMPCLabelIndex
, dl
, MVT::i32
);
3178 Argument
= DAG
.getNode(ARMISD::PIC_ADD
, dl
, PtrVT
, Argument
, PICLabel
);
3180 // call __tls_get_addr.
3183 Entry
.Node
= Argument
;
3184 Entry
.Ty
= (Type
*) Type::getInt32Ty(*DAG
.getContext());
3185 Args
.push_back(Entry
);
3187 // FIXME: is there useful debug info available here?
3188 TargetLowering::CallLoweringInfo
CLI(DAG
);
3189 CLI
.setDebugLoc(dl
).setChain(Chain
).setLibCallee(
3190 CallingConv::C
, Type::getInt32Ty(*DAG
.getContext()),
3191 DAG
.getExternalSymbol("__tls_get_addr", PtrVT
), std::move(Args
));
3193 std::pair
<SDValue
, SDValue
> CallResult
= LowerCallTo(CLI
);
3194 return CallResult
.first
;
3197 // Lower ISD::GlobalTLSAddress using the "initial exec" or
3198 // "local exec" model.
3200 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode
*GA
,
3202 TLSModel::Model model
) const {
3203 const GlobalValue
*GV
= GA
->getGlobal();
3206 SDValue Chain
= DAG
.getEntryNode();
3207 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3208 // Get the Thread Pointer
3209 SDValue ThreadPointer
= DAG
.getNode(ARMISD::THREAD_POINTER
, dl
, PtrVT
);
3211 if (model
== TLSModel::InitialExec
) {
3212 MachineFunction
&MF
= DAG
.getMachineFunction();
3213 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
3214 unsigned ARMPCLabelIndex
= AFI
->createPICLabelUId();
3215 // Initial exec model.
3216 unsigned char PCAdj
= Subtarget
->isThumb() ? 4 : 8;
3217 ARMConstantPoolValue
*CPV
=
3218 ARMConstantPoolConstant::Create(GA
->getGlobal(), ARMPCLabelIndex
,
3219 ARMCP::CPValue
, PCAdj
, ARMCP::GOTTPOFF
,
3221 Offset
= DAG
.getTargetConstantPool(CPV
, PtrVT
, 4);
3222 Offset
= DAG
.getNode(ARMISD::Wrapper
, dl
, MVT::i32
, Offset
);
3223 Offset
= DAG
.getLoad(
3224 PtrVT
, dl
, Chain
, Offset
,
3225 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
3226 Chain
= Offset
.getValue(1);
3228 SDValue PICLabel
= DAG
.getConstant(ARMPCLabelIndex
, dl
, MVT::i32
);
3229 Offset
= DAG
.getNode(ARMISD::PIC_ADD
, dl
, PtrVT
, Offset
, PICLabel
);
3231 Offset
= DAG
.getLoad(
3232 PtrVT
, dl
, Chain
, Offset
,
3233 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
3236 assert(model
== TLSModel::LocalExec
);
3237 ARMConstantPoolValue
*CPV
=
3238 ARMConstantPoolConstant::Create(GV
, ARMCP::TPOFF
);
3239 Offset
= DAG
.getTargetConstantPool(CPV
, PtrVT
, 4);
3240 Offset
= DAG
.getNode(ARMISD::Wrapper
, dl
, MVT::i32
, Offset
);
3241 Offset
= DAG
.getLoad(
3242 PtrVT
, dl
, Chain
, Offset
,
3243 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
3246 // The address of the thread local variable is the add of the thread
3247 // pointer with the offset of the variable.
3248 return DAG
.getNode(ISD::ADD
, dl
, PtrVT
, ThreadPointer
, Offset
);
3252 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op
, SelectionDAG
&DAG
) const {
3253 GlobalAddressSDNode
*GA
= cast
<GlobalAddressSDNode
>(Op
);
3254 if (DAG
.getTarget().useEmulatedTLS())
3255 return LowerToTLSEmulatedModel(GA
, DAG
);
3257 if (Subtarget
->isTargetDarwin())
3258 return LowerGlobalTLSAddressDarwin(Op
, DAG
);
3260 if (Subtarget
->isTargetWindows())
3261 return LowerGlobalTLSAddressWindows(Op
, DAG
);
3263 // TODO: implement the "local dynamic" model
3264 assert(Subtarget
->isTargetELF() && "Only ELF implemented here");
3265 TLSModel::Model model
= getTargetMachine().getTLSModel(GA
->getGlobal());
3268 case TLSModel::GeneralDynamic
:
3269 case TLSModel::LocalDynamic
:
3270 return LowerToTLSGeneralDynamicModel(GA
, DAG
);
3271 case TLSModel::InitialExec
:
3272 case TLSModel::LocalExec
:
3273 return LowerToTLSExecModels(GA
, DAG
, model
);
3275 llvm_unreachable("bogus TLS model");
3278 /// Return true if all users of V are within function F, looking through
3280 static bool allUsersAreInFunction(const Value
*V
, const Function
*F
) {
3281 SmallVector
<const User
*,4> Worklist
;
3282 for (auto *U
: V
->users())
3283 Worklist
.push_back(U
);
3284 while (!Worklist
.empty()) {
3285 auto *U
= Worklist
.pop_back_val();
3286 if (isa
<ConstantExpr
>(U
)) {
3287 for (auto *UU
: U
->users())
3288 Worklist
.push_back(UU
);
3292 auto *I
= dyn_cast
<Instruction
>(U
);
3293 if (!I
|| I
->getParent()->getParent() != F
)
3299 static SDValue
promoteToConstantPool(const ARMTargetLowering
*TLI
,
3300 const GlobalValue
*GV
, SelectionDAG
&DAG
,
3301 EVT PtrVT
, const SDLoc
&dl
) {
3302 // If we're creating a pool entry for a constant global with unnamed address,
3303 // and the global is small enough, we can emit it inline into the constant pool
3304 // to save ourselves an indirection.
3306 // This is a win if the constant is only used in one function (so it doesn't
3307 // need to be duplicated) or duplicating the constant wouldn't increase code
3308 // size (implying the constant is no larger than 4 bytes).
3309 const Function
&F
= DAG
.getMachineFunction().getFunction();
3311 // We rely on this decision to inline being idemopotent and unrelated to the
3312 // use-site. We know that if we inline a variable at one use site, we'll
3313 // inline it elsewhere too (and reuse the constant pool entry). Fast-isel
3314 // doesn't know about this optimization, so bail out if it's enabled else
3315 // we could decide to inline here (and thus never emit the GV) but require
3316 // the GV from fast-isel generated code.
3317 if (!EnableConstpoolPromotion
||
3318 DAG
.getMachineFunction().getTarget().Options
.EnableFastISel
)
3321 auto *GVar
= dyn_cast
<GlobalVariable
>(GV
);
3322 if (!GVar
|| !GVar
->hasInitializer() ||
3323 !GVar
->isConstant() || !GVar
->hasGlobalUnnamedAddr() ||
3324 !GVar
->hasLocalLinkage())
3327 // If we inline a value that contains relocations, we move the relocations
3328 // from .data to .text. This is not allowed in position-independent code.
3329 auto *Init
= GVar
->getInitializer();
3330 if ((TLI
->isPositionIndependent() || TLI
->getSubtarget()->isROPI()) &&
3331 Init
->needsRelocation())
3334 // The constant islands pass can only really deal with alignment requests
3335 // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote
3336 // any type wanting greater alignment requirements than 4 bytes. We also
3337 // can only promote constants that are multiples of 4 bytes in size or
3338 // are paddable to a multiple of 4. Currently we only try and pad constants
3339 // that are strings for simplicity.
3340 auto *CDAInit
= dyn_cast
<ConstantDataArray
>(Init
);
3341 unsigned Size
= DAG
.getDataLayout().getTypeAllocSize(Init
->getType());
3342 unsigned Align
= DAG
.getDataLayout().getPreferredAlignment(GVar
);
3343 unsigned RequiredPadding
= 4 - (Size
% 4);
3344 bool PaddingPossible
=
3345 RequiredPadding
== 4 || (CDAInit
&& CDAInit
->isString());
3346 if (!PaddingPossible
|| Align
> 4 || Size
> ConstpoolPromotionMaxSize
||
3350 unsigned PaddedSize
= Size
+ ((RequiredPadding
== 4) ? 0 : RequiredPadding
);
3351 MachineFunction
&MF
= DAG
.getMachineFunction();
3352 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
3354 // We can't bloat the constant pool too much, else the ConstantIslands pass
3355 // may fail to converge. If we haven't promoted this global yet (it may have
3356 // multiple uses), and promoting it would increase the constant pool size (Sz
3357 // > 4), ensure we have space to do so up to MaxTotal.
3358 if (!AFI
->getGlobalsPromotedToConstantPool().count(GVar
) && Size
> 4)
3359 if (AFI
->getPromotedConstpoolIncrease() + PaddedSize
- 4 >=
3360 ConstpoolPromotionMaxTotal
)
3363 // This is only valid if all users are in a single function; we can't clone
3364 // the constant in general. The LLVM IR unnamed_addr allows merging
3365 // constants, but not cloning them.
3367 // We could potentially allow cloning if we could prove all uses of the
3368 // constant in the current function don't care about the address, like
3369 // printf format strings. But that isn't implemented for now.
3370 if (!allUsersAreInFunction(GVar
, &F
))
3373 // We're going to inline this global. Pad it out if needed.
3374 if (RequiredPadding
!= 4) {
3375 StringRef S
= CDAInit
->getAsString();
3377 SmallVector
<uint8_t,16> V(S
.size());
3378 std::copy(S
.bytes_begin(), S
.bytes_end(), V
.begin());
3379 while (RequiredPadding
--)
3381 Init
= ConstantDataArray::get(*DAG
.getContext(), V
);
3384 auto CPVal
= ARMConstantPoolConstant::Create(GVar
, Init
);
3386 DAG
.getTargetConstantPool(CPVal
, PtrVT
, /*Align=*/4);
3387 if (!AFI
->getGlobalsPromotedToConstantPool().count(GVar
)) {
3388 AFI
->markGlobalAsPromotedToConstantPool(GVar
);
3389 AFI
->setPromotedConstpoolIncrease(AFI
->getPromotedConstpoolIncrease() +
3392 ++NumConstpoolPromoted
;
3393 return DAG
.getNode(ARMISD::Wrapper
, dl
, MVT::i32
, CPAddr
);
3396 bool ARMTargetLowering::isReadOnly(const GlobalValue
*GV
) const {
3397 if (const GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(GV
))
3398 if (!(GV
= GA
->getBaseObject()))
3400 if (const auto *V
= dyn_cast
<GlobalVariable
>(GV
))
3401 return V
->isConstant();
3402 return isa
<Function
>(GV
);
3405 SDValue
ARMTargetLowering::LowerGlobalAddress(SDValue Op
,
3406 SelectionDAG
&DAG
) const {
3407 switch (Subtarget
->getTargetTriple().getObjectFormat()) {
3408 default: llvm_unreachable("unknown object format");
3410 return LowerGlobalAddressWindows(Op
, DAG
);
3412 return LowerGlobalAddressELF(Op
, DAG
);
3414 return LowerGlobalAddressDarwin(Op
, DAG
);
3418 SDValue
ARMTargetLowering::LowerGlobalAddressELF(SDValue Op
,
3419 SelectionDAG
&DAG
) const {
3420 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3422 const GlobalValue
*GV
= cast
<GlobalAddressSDNode
>(Op
)->getGlobal();
3423 const TargetMachine
&TM
= getTargetMachine();
3424 bool IsRO
= isReadOnly(GV
);
3426 // promoteToConstantPool only if not generating XO text section
3427 if (TM
.shouldAssumeDSOLocal(*GV
->getParent(), GV
) && !Subtarget
->genExecuteOnly())
3428 if (SDValue V
= promoteToConstantPool(this, GV
, DAG
, PtrVT
, dl
))
3431 if (isPositionIndependent()) {
3432 bool UseGOT_PREL
= !TM
.shouldAssumeDSOLocal(*GV
->getParent(), GV
);
3433 SDValue G
= DAG
.getTargetGlobalAddress(GV
, dl
, PtrVT
, 0,
3434 UseGOT_PREL
? ARMII::MO_GOT
: 0);
3435 SDValue Result
= DAG
.getNode(ARMISD::WrapperPIC
, dl
, PtrVT
, G
);
3438 DAG
.getLoad(PtrVT
, dl
, DAG
.getEntryNode(), Result
,
3439 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
3441 } else if (Subtarget
->isROPI() && IsRO
) {
3443 SDValue G
= DAG
.getTargetGlobalAddress(GV
, dl
, PtrVT
);
3444 SDValue Result
= DAG
.getNode(ARMISD::WrapperPIC
, dl
, PtrVT
, G
);
3446 } else if (Subtarget
->isRWPI() && !IsRO
) {
3449 if (Subtarget
->useMovt()) {
3451 SDValue G
= DAG
.getTargetGlobalAddress(GV
, dl
, PtrVT
, 0, ARMII::MO_SBREL
);
3452 RelAddr
= DAG
.getNode(ARMISD::Wrapper
, dl
, PtrVT
, G
);
3453 } else { // use literal pool for address constant
3454 ARMConstantPoolValue
*CPV
=
3455 ARMConstantPoolConstant::Create(GV
, ARMCP::SBREL
);
3456 SDValue CPAddr
= DAG
.getTargetConstantPool(CPV
, PtrVT
, 4);
3457 CPAddr
= DAG
.getNode(ARMISD::Wrapper
, dl
, MVT::i32
, CPAddr
);
3458 RelAddr
= DAG
.getLoad(
3459 PtrVT
, dl
, DAG
.getEntryNode(), CPAddr
,
3460 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
3462 SDValue SB
= DAG
.getCopyFromReg(DAG
.getEntryNode(), dl
, ARM::R9
, PtrVT
);
3463 SDValue Result
= DAG
.getNode(ISD::ADD
, dl
, PtrVT
, SB
, RelAddr
);
3467 // If we have T2 ops, we can materialize the address directly via movt/movw
3468 // pair. This is always cheaper.
3469 if (Subtarget
->useMovt()) {
3471 // FIXME: Once remat is capable of dealing with instructions with register
3472 // operands, expand this into two nodes.
3473 return DAG
.getNode(ARMISD::Wrapper
, dl
, PtrVT
,
3474 DAG
.getTargetGlobalAddress(GV
, dl
, PtrVT
));
3476 SDValue CPAddr
= DAG
.getTargetConstantPool(GV
, PtrVT
, 4);
3477 CPAddr
= DAG
.getNode(ARMISD::Wrapper
, dl
, MVT::i32
, CPAddr
);
3479 PtrVT
, dl
, DAG
.getEntryNode(), CPAddr
,
3480 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
3484 SDValue
ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op
,
3485 SelectionDAG
&DAG
) const {
3486 assert(!Subtarget
->isROPI() && !Subtarget
->isRWPI() &&
3487 "ROPI/RWPI not currently supported for Darwin");
3488 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3490 const GlobalValue
*GV
= cast
<GlobalAddressSDNode
>(Op
)->getGlobal();
3492 if (Subtarget
->useMovt())
3495 // FIXME: Once remat is capable of dealing with instructions with register
3496 // operands, expand this into multiple nodes
3498 isPositionIndependent() ? ARMISD::WrapperPIC
: ARMISD::Wrapper
;
3500 SDValue G
= DAG
.getTargetGlobalAddress(GV
, dl
, PtrVT
, 0, ARMII::MO_NONLAZY
);
3501 SDValue Result
= DAG
.getNode(Wrapper
, dl
, PtrVT
, G
);
3503 if (Subtarget
->isGVIndirectSymbol(GV
))
3504 Result
= DAG
.getLoad(PtrVT
, dl
, DAG
.getEntryNode(), Result
,
3505 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
3509 SDValue
ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op
,
3510 SelectionDAG
&DAG
) const {
3511 assert(Subtarget
->isTargetWindows() && "non-Windows COFF is not supported");
3512 assert(Subtarget
->useMovt() &&
3513 "Windows on ARM expects to use movw/movt");
3514 assert(!Subtarget
->isROPI() && !Subtarget
->isRWPI() &&
3515 "ROPI/RWPI not currently supported for Windows");
3517 const TargetMachine
&TM
= getTargetMachine();
3518 const GlobalValue
*GV
= cast
<GlobalAddressSDNode
>(Op
)->getGlobal();
3519 ARMII::TOF TargetFlags
= ARMII::MO_NO_FLAG
;
3520 if (GV
->hasDLLImportStorageClass())
3521 TargetFlags
= ARMII::MO_DLLIMPORT
;
3522 else if (!TM
.shouldAssumeDSOLocal(*GV
->getParent(), GV
))
3523 TargetFlags
= ARMII::MO_COFFSTUB
;
3524 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3530 // FIXME: Once remat is capable of dealing with instructions with register
3531 // operands, expand this into two nodes.
3532 Result
= DAG
.getNode(ARMISD::Wrapper
, DL
, PtrVT
,
3533 DAG
.getTargetGlobalAddress(GV
, DL
, PtrVT
, /*offset=*/0,
3535 if (TargetFlags
& (ARMII::MO_DLLIMPORT
| ARMII::MO_COFFSTUB
))
3536 Result
= DAG
.getLoad(PtrVT
, DL
, DAG
.getEntryNode(), Result
,
3537 MachinePointerInfo::getGOT(DAG
.getMachineFunction()));
3542 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op
, SelectionDAG
&DAG
) const {
3544 SDValue Val
= DAG
.getConstant(0, dl
, MVT::i32
);
3545 return DAG
.getNode(ARMISD::EH_SJLJ_SETJMP
, dl
,
3546 DAG
.getVTList(MVT::i32
, MVT::Other
), Op
.getOperand(0),
3547 Op
.getOperand(1), Val
);
3551 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op
, SelectionDAG
&DAG
) const {
3553 return DAG
.getNode(ARMISD::EH_SJLJ_LONGJMP
, dl
, MVT::Other
, Op
.getOperand(0),
3554 Op
.getOperand(1), DAG
.getConstant(0, dl
, MVT::i32
));
3557 SDValue
ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op
,
3558 SelectionDAG
&DAG
) const {
3560 return DAG
.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH
, dl
, MVT::Other
,
3564 SDValue
ARMTargetLowering::LowerINTRINSIC_VOID(
3565 SDValue Op
, SelectionDAG
&DAG
, const ARMSubtarget
*Subtarget
) const {
3567 cast
<ConstantSDNode
>(
3568 Op
.getOperand(Op
.getOperand(0).getValueType() == MVT::Other
))
3572 return SDValue(); // Don't custom lower most intrinsics.
3573 case Intrinsic::arm_gnu_eabi_mcount
: {
3574 MachineFunction
&MF
= DAG
.getMachineFunction();
3575 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3577 SDValue Chain
= Op
.getOperand(0);
3578 // call "\01__gnu_mcount_nc"
3579 const ARMBaseRegisterInfo
*ARI
= Subtarget
->getRegisterInfo();
3580 const uint32_t *Mask
=
3581 ARI
->getCallPreservedMask(DAG
.getMachineFunction(), CallingConv::C
);
3582 assert(Mask
&& "Missing call preserved mask for calling convention");
3583 // Mark LR an implicit live-in.
3584 unsigned Reg
= MF
.addLiveIn(ARM::LR
, getRegClassFor(MVT::i32
));
3585 SDValue ReturnAddress
=
3586 DAG
.getCopyFromReg(DAG
.getEntryNode(), dl
, Reg
, PtrVT
);
3587 std::vector
<EVT
> ResultTys
= {MVT::Other
, MVT::Glue
};
3589 DAG
.getTargetExternalSymbol("\01__gnu_mcount_nc", PtrVT
, 0);
3590 SDValue RegisterMask
= DAG
.getRegisterMask(Mask
);
3591 if (Subtarget
->isThumb())
3594 ARM::tBL_PUSHLR
, dl
, ResultTys
,
3595 {ReturnAddress
, DAG
.getTargetConstant(ARMCC::AL
, dl
, PtrVT
),
3596 DAG
.getRegister(0, PtrVT
), Callee
, RegisterMask
, Chain
}),
3599 DAG
.getMachineNode(ARM::BL_PUSHLR
, dl
, ResultTys
,
3600 {ReturnAddress
, Callee
, RegisterMask
, Chain
}),
3607 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op
, SelectionDAG
&DAG
,
3608 const ARMSubtarget
*Subtarget
) const {
3609 unsigned IntNo
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
3612 default: return SDValue(); // Don't custom lower most intrinsics.
3613 case Intrinsic::thread_pointer
: {
3614 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3615 return DAG
.getNode(ARMISD::THREAD_POINTER
, dl
, PtrVT
);
3617 case Intrinsic::eh_sjlj_lsda
: {
3618 MachineFunction
&MF
= DAG
.getMachineFunction();
3619 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
3620 unsigned ARMPCLabelIndex
= AFI
->createPICLabelUId();
3621 EVT PtrVT
= getPointerTy(DAG
.getDataLayout());
3623 bool IsPositionIndependent
= isPositionIndependent();
3624 unsigned PCAdj
= IsPositionIndependent
? (Subtarget
->isThumb() ? 4 : 8) : 0;
3625 ARMConstantPoolValue
*CPV
=
3626 ARMConstantPoolConstant::Create(&MF
.getFunction(), ARMPCLabelIndex
,
3627 ARMCP::CPLSDA
, PCAdj
);
3628 CPAddr
= DAG
.getTargetConstantPool(CPV
, PtrVT
, 4);
3629 CPAddr
= DAG
.getNode(ARMISD::Wrapper
, dl
, MVT::i32
, CPAddr
);
3630 SDValue Result
= DAG
.getLoad(
3631 PtrVT
, dl
, DAG
.getEntryNode(), CPAddr
,
3632 MachinePointerInfo::getConstantPool(DAG
.getMachineFunction()));
3634 if (IsPositionIndependent
) {
3635 SDValue PICLabel
= DAG
.getConstant(ARMPCLabelIndex
, dl
, MVT::i32
);
3636 Result
= DAG
.getNode(ARMISD::PIC_ADD
, dl
, PtrVT
, Result
, PICLabel
);
3640 case Intrinsic::arm_neon_vabs
:
3641 return DAG
.getNode(ISD::ABS
, SDLoc(Op
), Op
.getValueType(),
3643 case Intrinsic::arm_neon_vmulls
:
3644 case Intrinsic::arm_neon_vmullu
: {
3645 unsigned NewOpc
= (IntNo
== Intrinsic::arm_neon_vmulls
)
3646 ? ARMISD::VMULLs
: ARMISD::VMULLu
;
3647 return DAG
.getNode(NewOpc
, SDLoc(Op
), Op
.getValueType(),
3648 Op
.getOperand(1), Op
.getOperand(2));
3650 case Intrinsic::arm_neon_vminnm
:
3651 case Intrinsic::arm_neon_vmaxnm
: {
3652 unsigned NewOpc
= (IntNo
== Intrinsic::arm_neon_vminnm
)
3653 ? ISD::FMINNUM
: ISD::FMAXNUM
;
3654 return DAG
.getNode(NewOpc
, SDLoc(Op
), Op
.getValueType(),
3655 Op
.getOperand(1), Op
.getOperand(2));
3657 case Intrinsic::arm_neon_vminu
:
3658 case Intrinsic::arm_neon_vmaxu
: {
3659 if (Op
.getValueType().isFloatingPoint())
3661 unsigned NewOpc
= (IntNo
== Intrinsic::arm_neon_vminu
)
3662 ? ISD::UMIN
: ISD::UMAX
;
3663 return DAG
.getNode(NewOpc
, SDLoc(Op
), Op
.getValueType(),
3664 Op
.getOperand(1), Op
.getOperand(2));
3666 case Intrinsic::arm_neon_vmins
:
3667 case Intrinsic::arm_neon_vmaxs
: {
3668 // v{min,max}s is overloaded between signed integers and floats.
3669 if (!Op
.getValueType().isFloatingPoint()) {
3670 unsigned NewOpc
= (IntNo
== Intrinsic::arm_neon_vmins
)
3671 ? ISD::SMIN
: ISD::SMAX
;
3672 return DAG
.getNode(NewOpc
, SDLoc(Op
), Op
.getValueType(),
3673 Op
.getOperand(1), Op
.getOperand(2));
3675 unsigned NewOpc
= (IntNo
== Intrinsic::arm_neon_vmins
)
3676 ? ISD::FMINIMUM
: ISD::FMAXIMUM
;
3677 return DAG
.getNode(NewOpc
, SDLoc(Op
), Op
.getValueType(),
3678 Op
.getOperand(1), Op
.getOperand(2));
3680 case Intrinsic::arm_neon_vtbl1
:
3681 return DAG
.getNode(ARMISD::VTBL1
, SDLoc(Op
), Op
.getValueType(),
3682 Op
.getOperand(1), Op
.getOperand(2));
3683 case Intrinsic::arm_neon_vtbl2
:
3684 return DAG
.getNode(ARMISD::VTBL2
, SDLoc(Op
), Op
.getValueType(),
3685 Op
.getOperand(1), Op
.getOperand(2), Op
.getOperand(3));
3689 static SDValue
LowerATOMIC_FENCE(SDValue Op
, SelectionDAG
&DAG
,
3690 const ARMSubtarget
*Subtarget
) {
3692 ConstantSDNode
*SSIDNode
= cast
<ConstantSDNode
>(Op
.getOperand(2));
3693 auto SSID
= static_cast<SyncScope::ID
>(SSIDNode
->getZExtValue());
3694 if (SSID
== SyncScope::SingleThread
)
3697 if (!Subtarget
->hasDataBarrier()) {
3698 // Some ARMv6 cpus can support data barriers with an mcr instruction.
3699 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
3701 assert(Subtarget
->hasV6Ops() && !Subtarget
->isThumb() &&
3702 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
3703 return DAG
.getNode(ARMISD::MEMBARRIER_MCR
, dl
, MVT::Other
, Op
.getOperand(0),
3704 DAG
.getConstant(0, dl
, MVT::i32
));
3707 ConstantSDNode
*OrdN
= cast
<ConstantSDNode
>(Op
.getOperand(1));
3708 AtomicOrdering Ord
= static_cast<AtomicOrdering
>(OrdN
->getZExtValue());
3709 ARM_MB::MemBOpt Domain
= ARM_MB::ISH
;
3710 if (Subtarget
->isMClass()) {
3711 // Only a full system barrier exists in the M-class architectures.
3712 Domain
= ARM_MB::SY
;
3713 } else if (Subtarget
->preferISHSTBarriers() &&
3714 Ord
== AtomicOrdering::Release
) {
3715 // Swift happens to implement ISHST barriers in a way that's compatible with
3716 // Release semantics but weaker than ISH so we'd be fools not to use
3717 // it. Beware: other processors probably don't!
3718 Domain
= ARM_MB::ISHST
;
3721 return DAG
.getNode(ISD::INTRINSIC_VOID
, dl
, MVT::Other
, Op
.getOperand(0),
3722 DAG
.getConstant(Intrinsic::arm_dmb
, dl
, MVT::i32
),
3723 DAG
.getConstant(Domain
, dl
, MVT::i32
));
3726 static SDValue
LowerPREFETCH(SDValue Op
, SelectionDAG
&DAG
,
3727 const ARMSubtarget
*Subtarget
) {
3728 // ARM pre v5TE and Thumb1 does not have preload instructions.
3729 if (!(Subtarget
->isThumb2() ||
3730 (!Subtarget
->isThumb1Only() && Subtarget
->hasV5TEOps())))
3731 // Just preserve the chain.
3732 return Op
.getOperand(0);
3735 unsigned isRead
= ~cast
<ConstantSDNode
>(Op
.getOperand(2))->getZExtValue() & 1;
3737 (!Subtarget
->hasV7Ops() || !Subtarget
->hasMPExtension()))
3738 // ARMv7 with MP extension has PLDW.
3739 return Op
.getOperand(0);
3741 unsigned isData
= cast
<ConstantSDNode
>(Op
.getOperand(4))->getZExtValue();
3742 if (Subtarget
->isThumb()) {
3744 isRead
= ~isRead
& 1;
3745 isData
= ~isData
& 1;
3748 return DAG
.getNode(ARMISD::PRELOAD
, dl
, MVT::Other
, Op
.getOperand(0),
3749 Op
.getOperand(1), DAG
.getConstant(isRead
, dl
, MVT::i32
),
3750 DAG
.getConstant(isData
, dl
, MVT::i32
));
3753 static SDValue
LowerVASTART(SDValue Op
, SelectionDAG
&DAG
) {
3754 MachineFunction
&MF
= DAG
.getMachineFunction();
3755 ARMFunctionInfo
*FuncInfo
= MF
.getInfo
<ARMFunctionInfo
>();
3757 // vastart just stores the address of the VarArgsFrameIndex slot into the
3758 // memory location argument.
3760 EVT PtrVT
= DAG
.getTargetLoweringInfo().getPointerTy(DAG
.getDataLayout());
3761 SDValue FR
= DAG
.getFrameIndex(FuncInfo
->getVarArgsFrameIndex(), PtrVT
);
3762 const Value
*SV
= cast
<SrcValueSDNode
>(Op
.getOperand(2))->getValue();
3763 return DAG
.getStore(Op
.getOperand(0), dl
, FR
, Op
.getOperand(1),
3764 MachinePointerInfo(SV
));
3767 SDValue
ARMTargetLowering::GetF64FormalArgument(CCValAssign
&VA
,
3768 CCValAssign
&NextVA
,
3771 const SDLoc
&dl
) const {
3772 MachineFunction
&MF
= DAG
.getMachineFunction();
3773 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
3775 const TargetRegisterClass
*RC
;
3776 if (AFI
->isThumb1OnlyFunction())
3777 RC
= &ARM::tGPRRegClass
;
3779 RC
= &ARM::GPRRegClass
;
3781 // Transform the arguments stored in physical registers into virtual ones.
3782 unsigned Reg
= MF
.addLiveIn(VA
.getLocReg(), RC
);
3783 SDValue ArgValue
= DAG
.getCopyFromReg(Root
, dl
, Reg
, MVT::i32
);
3786 if (NextVA
.isMemLoc()) {
3787 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
3788 int FI
= MFI
.CreateFixedObject(4, NextVA
.getLocMemOffset(), true);
3790 // Create load node to retrieve arguments from the stack.
3791 SDValue FIN
= DAG
.getFrameIndex(FI
, getPointerTy(DAG
.getDataLayout()));
3792 ArgValue2
= DAG
.getLoad(
3793 MVT::i32
, dl
, Root
, FIN
,
3794 MachinePointerInfo::getFixedStack(DAG
.getMachineFunction(), FI
));
3796 Reg
= MF
.addLiveIn(NextVA
.getLocReg(), RC
);
3797 ArgValue2
= DAG
.getCopyFromReg(Root
, dl
, Reg
, MVT::i32
);
3799 if (!Subtarget
->isLittle())
3800 std::swap (ArgValue
, ArgValue2
);
3801 return DAG
.getNode(ARMISD::VMOVDRR
, dl
, MVT::f64
, ArgValue
, ArgValue2
);
3804 // The remaining GPRs hold either the beginning of variable-argument
3805 // data, or the beginning of an aggregate passed by value (usually
3806 // byval). Either way, we allocate stack slots adjacent to the data
3807 // provided by our caller, and store the unallocated registers there.
3808 // If this is a variadic function, the va_list pointer will begin with
3809 // these values; otherwise, this reassembles a (byval) structure that
3810 // was split between registers and memory.
3811 // Return: The frame index registers were stored into.
3812 int ARMTargetLowering::StoreByValRegs(CCState
&CCInfo
, SelectionDAG
&DAG
,
3813 const SDLoc
&dl
, SDValue
&Chain
,
3814 const Value
*OrigArg
,
3815 unsigned InRegsParamRecordIdx
,
3816 int ArgOffset
, unsigned ArgSize
) const {
3817 // Currently, two use-cases possible:
3818 // Case #1. Non-var-args function, and we meet first byval parameter.
3819 // Setup first unallocated register as first byval register;
3820 // eat all remained registers
3821 // (these two actions are performed by HandleByVal method).
3822 // Then, here, we initialize stack frame with
3823 // "store-reg" instructions.
3824 // Case #2. Var-args function, that doesn't contain byval parameters.
3825 // The same: eat all remained unallocated registers,
3826 // initialize stack frame.
3828 MachineFunction
&MF
= DAG
.getMachineFunction();
3829 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
3830 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
3831 unsigned RBegin
, REnd
;
3832 if (InRegsParamRecordIdx
< CCInfo
.getInRegsParamsCount()) {
3833 CCInfo
.getInRegsParamInfo(InRegsParamRecordIdx
, RBegin
, REnd
);
3835 unsigned RBeginIdx
= CCInfo
.getFirstUnallocated(GPRArgRegs
);
3836 RBegin
= RBeginIdx
== 4 ? (unsigned)ARM::R4
: GPRArgRegs
[RBeginIdx
];
3841 ArgOffset
= -4 * (ARM::R4
- RBegin
);
3843 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
3844 int FrameIndex
= MFI
.CreateFixedObject(ArgSize
, ArgOffset
, false);
3845 SDValue FIN
= DAG
.getFrameIndex(FrameIndex
, PtrVT
);
3847 SmallVector
<SDValue
, 4> MemOps
;
3848 const TargetRegisterClass
*RC
=
3849 AFI
->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
: &ARM::GPRRegClass
;
3851 for (unsigned Reg
= RBegin
, i
= 0; Reg
< REnd
; ++Reg
, ++i
) {
3852 unsigned VReg
= MF
.addLiveIn(Reg
, RC
);
3853 SDValue Val
= DAG
.getCopyFromReg(Chain
, dl
, VReg
, MVT::i32
);
3854 SDValue Store
= DAG
.getStore(Val
.getValue(1), dl
, Val
, FIN
,
3855 MachinePointerInfo(OrigArg
, 4 * i
));
3856 MemOps
.push_back(Store
);
3857 FIN
= DAG
.getNode(ISD::ADD
, dl
, PtrVT
, FIN
, DAG
.getConstant(4, dl
, PtrVT
));
3860 if (!MemOps
.empty())
3861 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
, MemOps
);
3865 // Setup stack frame, the va_list pointer will start from.
3866 void ARMTargetLowering::VarArgStyleRegisters(CCState
&CCInfo
, SelectionDAG
&DAG
,
3867 const SDLoc
&dl
, SDValue
&Chain
,
3869 unsigned TotalArgRegsSaveSize
,
3870 bool ForceMutable
) const {
3871 MachineFunction
&MF
= DAG
.getMachineFunction();
3872 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
3874 // Try to store any remaining integer argument regs
3875 // to their spots on the stack so that they may be loaded by dereferencing
3876 // the result of va_next.
3877 // If there is no regs to be stored, just point address after last
3878 // argument passed via stack.
3879 int FrameIndex
= StoreByValRegs(CCInfo
, DAG
, dl
, Chain
, nullptr,
3880 CCInfo
.getInRegsParamsCount(),
3881 CCInfo
.getNextStackOffset(),
3882 std::max(4U, TotalArgRegsSaveSize
));
3883 AFI
->setVarArgsFrameIndex(FrameIndex
);
3886 SDValue
ARMTargetLowering::LowerFormalArguments(
3887 SDValue Chain
, CallingConv::ID CallConv
, bool isVarArg
,
3888 const SmallVectorImpl
<ISD::InputArg
> &Ins
, const SDLoc
&dl
,
3889 SelectionDAG
&DAG
, SmallVectorImpl
<SDValue
> &InVals
) const {
3890 MachineFunction
&MF
= DAG
.getMachineFunction();
3891 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
3893 ARMFunctionInfo
*AFI
= MF
.getInfo
<ARMFunctionInfo
>();
3895 // Assign locations to all of the incoming arguments.
3896 SmallVector
<CCValAssign
, 16> ArgLocs
;
3897 CCState
CCInfo(CallConv
, isVarArg
, DAG
.getMachineFunction(), ArgLocs
,
3899 CCInfo
.AnalyzeFormalArguments(Ins
, CCAssignFnForCall(CallConv
, isVarArg
));
3901 SmallVector
<SDValue
, 16> ArgValues
;
3903 Function::const_arg_iterator CurOrigArg
= MF
.getFunction().arg_begin();
3904 unsigned CurArgIdx
= 0;
3906 // Initially ArgRegsSaveSize is zero.
3907 // Then we increase this value each time we meet byval parameter.
3908 // We also increase this value in case of varargs function.
3909 AFI
->setArgRegsSaveSize(0);
3911 // Calculate the amount of stack space that we need to allocate to store
3912 // byval and variadic arguments that are passed in registers.
3913 // We need to know this before we allocate the first byval or variadic
3914 // argument, as they will be allocated a stack slot below the CFA (Canonical
3915 // Frame Address, the stack pointer at entry to the function).
3916 unsigned ArgRegBegin
= ARM::R4
;
3917 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
3918 if (CCInfo
.getInRegsParamsProcessed() >= CCInfo
.getInRegsParamsCount())
3921 CCValAssign
&VA
= ArgLocs
[i
];
3922 unsigned Index
= VA
.getValNo();
3923 ISD::ArgFlagsTy Flags
= Ins
[Index
].Flags
;
3924 if (!Flags
.isByVal())
3927 assert(VA
.isMemLoc() && "unexpected byval pointer in reg");
3928 unsigned RBegin
, REnd
;
3929 CCInfo
.getInRegsParamInfo(CCInfo
.getInRegsParamsProcessed(), RBegin
, REnd
);
3930 ArgRegBegin
= std::min(ArgRegBegin
, RBegin
);
3932 CCInfo
.nextInRegsParam();
3934 CCInfo
.rewindByValRegsInfo();
3936 int lastInsIndex
= -1;
3937 if (isVarArg
&& MFI
.hasVAStart()) {
3938 unsigned RegIdx
= CCInfo
.getFirstUnallocated(GPRArgRegs
);
3939 if (RegIdx
!= array_lengthof(GPRArgRegs
))
3940 ArgRegBegin
= std::min(ArgRegBegin
, (unsigned)GPRArgRegs
[RegIdx
]);
3943 unsigned TotalArgRegsSaveSize
= 4 * (ARM::R4
- ArgRegBegin
);
3944 AFI
->setArgRegsSaveSize(TotalArgRegsSaveSize
);
3945 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
3947 for (unsigned i
= 0, e
= ArgLocs
.size(); i
!= e
; ++i
) {
3948 CCValAssign
&VA
= ArgLocs
[i
];
3949 if (Ins
[VA
.getValNo()].isOrigArg()) {
3950 std::advance(CurOrigArg
,
3951 Ins
[VA
.getValNo()].getOrigArgIndex() - CurArgIdx
);
3952 CurArgIdx
= Ins
[VA
.getValNo()].getOrigArgIndex();
3954 // Arguments stored in registers.
3955 if (VA
.isRegLoc()) {
3956 EVT RegVT
= VA
.getLocVT();
3958 if (VA
.needsCustom()) {
3959 // f64 and vector types are split up into multiple registers or
3960 // combinations of registers and stack slots.
3961 if (VA
.getLocVT() == MVT::v2f64
) {
3962 SDValue ArgValue1
= GetF64FormalArgument(VA
, ArgLocs
[++i
],
3964 VA
= ArgLocs
[++i
]; // skip ahead to next loc
3966 if (VA
.isMemLoc()) {
3967 int FI
= MFI
.CreateFixedObject(8, VA
.getLocMemOffset(), true);
3968 SDValue FIN
= DAG
.getFrameIndex(FI
, PtrVT
);
3969 ArgValue2
= DAG
.getLoad(MVT::f64
, dl
, Chain
, FIN
,
3970 MachinePointerInfo::getFixedStack(
3971 DAG
.getMachineFunction(), FI
));
3973 ArgValue2
= GetF64FormalArgument(VA
, ArgLocs
[++i
],
3976 ArgValue
= DAG
.getNode(ISD::UNDEF
, dl
, MVT::v2f64
);
3977 ArgValue
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2f64
,
3978 ArgValue
, ArgValue1
,
3979 DAG
.getIntPtrConstant(0, dl
));
3980 ArgValue
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2f64
,
3981 ArgValue
, ArgValue2
,
3982 DAG
.getIntPtrConstant(1, dl
));
3984 ArgValue
= GetF64FormalArgument(VA
, ArgLocs
[++i
], Chain
, DAG
, dl
);
3986 const TargetRegisterClass
*RC
;
3989 if (RegVT
== MVT::f16
)
3990 RC
= &ARM::HPRRegClass
;
3991 else if (RegVT
== MVT::f32
)
3992 RC
= &ARM::SPRRegClass
;
3993 else if (RegVT
== MVT::f64
|| RegVT
== MVT::v4f16
)
3994 RC
= &ARM::DPRRegClass
;
3995 else if (RegVT
== MVT::v2f64
|| RegVT
== MVT::v8f16
)
3996 RC
= &ARM::QPRRegClass
;
3997 else if (RegVT
== MVT::i32
)
3998 RC
= AFI
->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
3999 : &ARM::GPRRegClass
;
4001 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
4003 // Transform the arguments in physical registers into virtual ones.
4004 unsigned Reg
= MF
.addLiveIn(VA
.getLocReg(), RC
);
4005 ArgValue
= DAG
.getCopyFromReg(Chain
, dl
, Reg
, RegVT
);
4007 // If this value is passed in r0 and has the returned attribute (e.g.
4008 // C++ 'structors), record this fact for later use.
4009 if (VA
.getLocReg() == ARM::R0
&& Ins
[VA
.getValNo()].Flags
.isReturned()) {
4010 AFI
->setPreservesR0();
4014 // If this is an 8 or 16-bit value, it is really passed promoted
4015 // to 32 bits. Insert an assert[sz]ext to capture this, then
4016 // truncate to the right size.
4017 switch (VA
.getLocInfo()) {
4018 default: llvm_unreachable("Unknown loc info!");
4019 case CCValAssign::Full
: break;
4020 case CCValAssign::BCvt
:
4021 ArgValue
= DAG
.getNode(ISD::BITCAST
, dl
, VA
.getValVT(), ArgValue
);
4023 case CCValAssign::SExt
:
4024 ArgValue
= DAG
.getNode(ISD::AssertSext
, dl
, RegVT
, ArgValue
,
4025 DAG
.getValueType(VA
.getValVT()));
4026 ArgValue
= DAG
.getNode(ISD::TRUNCATE
, dl
, VA
.getValVT(), ArgValue
);
4028 case CCValAssign::ZExt
:
4029 ArgValue
= DAG
.getNode(ISD::AssertZext
, dl
, RegVT
, ArgValue
,
4030 DAG
.getValueType(VA
.getValVT()));
4031 ArgValue
= DAG
.getNode(ISD::TRUNCATE
, dl
, VA
.getValVT(), ArgValue
);
4035 InVals
.push_back(ArgValue
);
4036 } else { // VA.isRegLoc()
4038 assert(VA
.isMemLoc());
4039 assert(VA
.getValVT() != MVT::i64
&& "i64 should already be lowered");
4041 int index
= VA
.getValNo();
4043 // Some Ins[] entries become multiple ArgLoc[] entries.
4044 // Process them only once.
4045 if (index
!= lastInsIndex
)
4047 ISD::ArgFlagsTy Flags
= Ins
[index
].Flags
;
4048 // FIXME: For now, all byval parameter objects are marked mutable.
4049 // This can be changed with more analysis.
4050 // In case of tail call optimization mark all arguments mutable.
4051 // Since they could be overwritten by lowering of arguments in case of
4053 if (Flags
.isByVal()) {
4054 assert(Ins
[index
].isOrigArg() &&
4055 "Byval arguments cannot be implicit");
4056 unsigned CurByValIndex
= CCInfo
.getInRegsParamsProcessed();
4058 int FrameIndex
= StoreByValRegs(
4059 CCInfo
, DAG
, dl
, Chain
, &*CurOrigArg
, CurByValIndex
,
4060 VA
.getLocMemOffset(), Flags
.getByValSize());
4061 InVals
.push_back(DAG
.getFrameIndex(FrameIndex
, PtrVT
));
4062 CCInfo
.nextInRegsParam();
4064 unsigned FIOffset
= VA
.getLocMemOffset();
4065 int FI
= MFI
.CreateFixedObject(VA
.getLocVT().getSizeInBits()/8,
4068 // Create load nodes to retrieve arguments from the stack.
4069 SDValue FIN
= DAG
.getFrameIndex(FI
, PtrVT
);
4070 InVals
.push_back(DAG
.getLoad(VA
.getValVT(), dl
, Chain
, FIN
,
4071 MachinePointerInfo::getFixedStack(
4072 DAG
.getMachineFunction(), FI
)));
4074 lastInsIndex
= index
;
4080 if (isVarArg
&& MFI
.hasVAStart())
4081 VarArgStyleRegisters(CCInfo
, DAG
, dl
, Chain
,
4082 CCInfo
.getNextStackOffset(),
4083 TotalArgRegsSaveSize
);
4085 AFI
->setArgumentStackSize(CCInfo
.getNextStackOffset());
4090 /// isFloatingPointZero - Return true if this is +0.0.
4091 static bool isFloatingPointZero(SDValue Op
) {
4092 if (ConstantFPSDNode
*CFP
= dyn_cast
<ConstantFPSDNode
>(Op
))
4093 return CFP
->getValueAPF().isPosZero();
4094 else if (ISD::isEXTLoad(Op
.getNode()) || ISD::isNON_EXTLoad(Op
.getNode())) {
4095 // Maybe this has already been legalized into the constant pool?
4096 if (Op
.getOperand(1).getOpcode() == ARMISD::Wrapper
) {
4097 SDValue WrapperOp
= Op
.getOperand(1).getOperand(0);
4098 if (ConstantPoolSDNode
*CP
= dyn_cast
<ConstantPoolSDNode
>(WrapperOp
))
4099 if (const ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(CP
->getConstVal()))
4100 return CFP
->getValueAPF().isPosZero();
4102 } else if (Op
->getOpcode() == ISD::BITCAST
&&
4103 Op
->getValueType(0) == MVT::f64
) {
4104 // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
4105 // created by LowerConstantFP().
4106 SDValue BitcastOp
= Op
->getOperand(0);
4107 if (BitcastOp
->getOpcode() == ARMISD::VMOVIMM
&&
4108 isNullConstant(BitcastOp
->getOperand(0)))
4114 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
4115 /// the given operands.
4116 SDValue
ARMTargetLowering::getARMCmp(SDValue LHS
, SDValue RHS
, ISD::CondCode CC
,
4117 SDValue
&ARMcc
, SelectionDAG
&DAG
,
4118 const SDLoc
&dl
) const {
4119 if (ConstantSDNode
*RHSC
= dyn_cast
<ConstantSDNode
>(RHS
.getNode())) {
4120 unsigned C
= RHSC
->getZExtValue();
4121 if (!isLegalICmpImmediate((int32_t)C
)) {
4122 // Constant does not fit, try adjusting it by one.
4127 if (C
!= 0x80000000 && isLegalICmpImmediate(C
-1)) {
4128 CC
= (CC
== ISD::SETLT
) ? ISD::SETLE
: ISD::SETGT
;
4129 RHS
= DAG
.getConstant(C
- 1, dl
, MVT::i32
);
4134 if (C
!= 0 && isLegalICmpImmediate(C
-1)) {
4135 CC
= (CC
== ISD::SETULT
) ? ISD::SETULE
: ISD::SETUGT
;
4136 RHS
= DAG
.getConstant(C
- 1, dl
, MVT::i32
);
4141 if (C
!= 0x7fffffff && isLegalICmpImmediate(C
+1)) {
4142 CC
= (CC
== ISD::SETLE
) ? ISD::SETLT
: ISD::SETGE
;
4143 RHS
= DAG
.getConstant(C
+ 1, dl
, MVT::i32
);
4148 if (C
!= 0xffffffff && isLegalICmpImmediate(C
+1)) {
4149 CC
= (CC
== ISD::SETULE
) ? ISD::SETULT
: ISD::SETUGE
;
4150 RHS
= DAG
.getConstant(C
+ 1, dl
, MVT::i32
);
4155 } else if ((ARM_AM::getShiftOpcForNode(LHS
.getOpcode()) != ARM_AM::no_shift
) &&
4156 (ARM_AM::getShiftOpcForNode(RHS
.getOpcode()) == ARM_AM::no_shift
)) {
4157 // In ARM and Thumb-2, the compare instructions can shift their second
4159 CC
= ISD::getSetCCSwappedOperands(CC
);
4160 std::swap(LHS
, RHS
);
4163 // Thumb1 has very limited immediate modes, so turning an "and" into a
4164 // shift can save multiple instructions.
4166 // If we have (x & C1), and C1 is an appropriate mask, we can transform it
4167 // into "((x << n) >> n)". But that isn't necessarily profitable on its
4168 // own. If it's the operand to an unsigned comparison with an immediate,
4169 // we can eliminate one of the shifts: we transform
4170 // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)".
4172 // We avoid transforming cases which aren't profitable due to encoding
4175 // 1. C2 fits into the immediate field of a cmp, and the transformed version
4176 // would not; in that case, we're essentially trading one immediate load for
4178 // 2. C1 is 255 or 65535, so we can use uxtb or uxth.
4179 // 3. C2 is zero; we have other code for this special case.
4181 // FIXME: Figure out profitability for Thumb2; we usually can't save an
4182 // instruction, since the AND is always one instruction anyway, but we could
4183 // use narrow instructions in some cases.
4184 if (Subtarget
->isThumb1Only() && LHS
->getOpcode() == ISD::AND
&&
4185 LHS
->hasOneUse() && isa
<ConstantSDNode
>(LHS
.getOperand(1)) &&
4186 LHS
.getValueType() == MVT::i32
&& isa
<ConstantSDNode
>(RHS
) &&
4187 !isSignedIntSetCC(CC
)) {
4188 unsigned Mask
= cast
<ConstantSDNode
>(LHS
.getOperand(1))->getZExtValue();
4189 auto *RHSC
= cast
<ConstantSDNode
>(RHS
.getNode());
4190 uint64_t RHSV
= RHSC
->getZExtValue();
4191 if (isMask_32(Mask
) && (RHSV
& ~Mask
) == 0 && Mask
!= 255 && Mask
!= 65535) {
4192 unsigned ShiftBits
= countLeadingZeros(Mask
);
4193 if (RHSV
&& (RHSV
> 255 || (RHSV
<< ShiftBits
) <= 255)) {
4194 SDValue ShiftAmt
= DAG
.getConstant(ShiftBits
, dl
, MVT::i32
);
4195 LHS
= DAG
.getNode(ISD::SHL
, dl
, MVT::i32
, LHS
.getOperand(0), ShiftAmt
);
4196 RHS
= DAG
.getConstant(RHSV
<< ShiftBits
, dl
, MVT::i32
);
4201 // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a
4202 // single "lsls x, c+1". The shift sets the "C" and "Z" flags the same
4204 // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and
4205 // some tweaks to the heuristics for the previous and->shift transform.
4206 // FIXME: Optimize cases where the LHS isn't a shift.
4207 if (Subtarget
->isThumb1Only() && LHS
->getOpcode() == ISD::SHL
&&
4208 isa
<ConstantSDNode
>(RHS
) &&
4209 cast
<ConstantSDNode
>(RHS
)->getZExtValue() == 0x80000000U
&&
4210 CC
== ISD::SETUGT
&& isa
<ConstantSDNode
>(LHS
.getOperand(1)) &&
4211 cast
<ConstantSDNode
>(LHS
.getOperand(1))->getZExtValue() < 31) {
4213 cast
<ConstantSDNode
>(LHS
.getOperand(1))->getZExtValue() + 1;
4214 SDValue Shift
= DAG
.getNode(ARMISD::LSLS
, dl
,
4215 DAG
.getVTList(MVT::i32
, MVT::i32
),
4217 DAG
.getConstant(ShiftAmt
, dl
, MVT::i32
));
4218 SDValue Chain
= DAG
.getCopyToReg(DAG
.getEntryNode(), dl
, ARM::CPSR
,
4219 Shift
.getValue(1), SDValue());
4220 ARMcc
= DAG
.getConstant(ARMCC::HI
, dl
, MVT::i32
);
4221 return Chain
.getValue(1);
4224 ARMCC::CondCodes CondCode
= IntCCToARMCC(CC
);
4226 // If the RHS is a constant zero then the V (overflow) flag will never be
4227 // set. This can allow us to simplify GE to PL or LT to MI, which can be
4228 // simpler for other passes (like the peephole optimiser) to deal with.
4229 if (isNullConstant(RHS
)) {
4233 CondCode
= ARMCC::PL
;
4236 CondCode
= ARMCC::MI
;
4241 ARMISD::NodeType CompareType
;
4244 CompareType
= ARMISD::CMP
;
4249 CompareType
= ARMISD::CMPZ
;
4252 ARMcc
= DAG
.getConstant(CondCode
, dl
, MVT::i32
);
4253 return DAG
.getNode(CompareType
, dl
, MVT::Glue
, LHS
, RHS
);
4256 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
4257 SDValue
ARMTargetLowering::getVFPCmp(SDValue LHS
, SDValue RHS
,
4258 SelectionDAG
&DAG
, const SDLoc
&dl
) const {
4259 assert(Subtarget
->hasFP64() || RHS
.getValueType() != MVT::f64
);
4261 if (!isFloatingPointZero(RHS
))
4262 Cmp
= DAG
.getNode(ARMISD::CMPFP
, dl
, MVT::Glue
, LHS
, RHS
);
4264 Cmp
= DAG
.getNode(ARMISD::CMPFPw0
, dl
, MVT::Glue
, LHS
);
4265 return DAG
.getNode(ARMISD::FMSTAT
, dl
, MVT::Glue
, Cmp
);
4268 /// duplicateCmp - Glue values can have only one use, so this function
4269 /// duplicates a comparison node.
4271 ARMTargetLowering::duplicateCmp(SDValue Cmp
, SelectionDAG
&DAG
) const {
4272 unsigned Opc
= Cmp
.getOpcode();
4274 if (Opc
== ARMISD::CMP
|| Opc
== ARMISD::CMPZ
)
4275 return DAG
.getNode(Opc
, DL
, MVT::Glue
, Cmp
.getOperand(0),Cmp
.getOperand(1));
4277 assert(Opc
== ARMISD::FMSTAT
&& "unexpected comparison operation");
4278 Cmp
= Cmp
.getOperand(0);
4279 Opc
= Cmp
.getOpcode();
4280 if (Opc
== ARMISD::CMPFP
)
4281 Cmp
= DAG
.getNode(Opc
, DL
, MVT::Glue
, Cmp
.getOperand(0),Cmp
.getOperand(1));
4283 assert(Opc
== ARMISD::CMPFPw0
&& "unexpected operand of FMSTAT");
4284 Cmp
= DAG
.getNode(Opc
, DL
, MVT::Glue
, Cmp
.getOperand(0));
4286 return DAG
.getNode(ARMISD::FMSTAT
, DL
, MVT::Glue
, Cmp
);
4289 // This function returns three things: the arithmetic computation itself
4290 // (Value), a comparison (OverflowCmp), and a condition code (ARMcc). The
4291 // comparison and the condition code define the case in which the arithmetic
4292 // computation *does not* overflow.
4293 std::pair
<SDValue
, SDValue
>
4294 ARMTargetLowering::getARMXALUOOp(SDValue Op
, SelectionDAG
&DAG
,
4295 SDValue
&ARMcc
) const {
4296 assert(Op
.getValueType() == MVT::i32
&& "Unsupported value type");
4298 SDValue Value
, OverflowCmp
;
4299 SDValue LHS
= Op
.getOperand(0);
4300 SDValue RHS
= Op
.getOperand(1);
4303 // FIXME: We are currently always generating CMPs because we don't support
4304 // generating CMN through the backend. This is not as good as the natural
4305 // CMP case because it causes a register dependency and cannot be folded
4308 switch (Op
.getOpcode()) {
4310 llvm_unreachable("Unknown overflow instruction!");
4312 ARMcc
= DAG
.getConstant(ARMCC::VC
, dl
, MVT::i32
);
4313 Value
= DAG
.getNode(ISD::ADD
, dl
, Op
.getValueType(), LHS
, RHS
);
4314 OverflowCmp
= DAG
.getNode(ARMISD::CMP
, dl
, MVT::Glue
, Value
, LHS
);
4317 ARMcc
= DAG
.getConstant(ARMCC::HS
, dl
, MVT::i32
);
4318 // We use ADDC here to correspond to its use in LowerUnsignedALUO.
4319 // We do not use it in the USUBO case as Value may not be used.
4320 Value
= DAG
.getNode(ARMISD::ADDC
, dl
,
4321 DAG
.getVTList(Op
.getValueType(), MVT::i32
), LHS
, RHS
)
4323 OverflowCmp
= DAG
.getNode(ARMISD::CMP
, dl
, MVT::Glue
, Value
, LHS
);
4326 ARMcc
= DAG
.getConstant(ARMCC::VC
, dl
, MVT::i32
);
4327 Value
= DAG
.getNode(ISD::SUB
, dl
, Op
.getValueType(), LHS
, RHS
);
4328 OverflowCmp
= DAG
.getNode(ARMISD::CMP
, dl
, MVT::Glue
, LHS
, RHS
);
4331 ARMcc
= DAG
.getConstant(ARMCC::HS
, dl
, MVT::i32
);
4332 Value
= DAG
.getNode(ISD::SUB
, dl
, Op
.getValueType(), LHS
, RHS
);
4333 OverflowCmp
= DAG
.getNode(ARMISD::CMP
, dl
, MVT::Glue
, LHS
, RHS
);
4336 // We generate a UMUL_LOHI and then check if the high word is 0.
4337 ARMcc
= DAG
.getConstant(ARMCC::EQ
, dl
, MVT::i32
);
4338 Value
= DAG
.getNode(ISD::UMUL_LOHI
, dl
,
4339 DAG
.getVTList(Op
.getValueType(), Op
.getValueType()),
4341 OverflowCmp
= DAG
.getNode(ARMISD::CMP
, dl
, MVT::Glue
, Value
.getValue(1),
4342 DAG
.getConstant(0, dl
, MVT::i32
));
4343 Value
= Value
.getValue(0); // We only want the low 32 bits for the result.
4346 // We generate a SMUL_LOHI and then check if all the bits of the high word
4347 // are the same as the sign bit of the low word.
4348 ARMcc
= DAG
.getConstant(ARMCC::EQ
, dl
, MVT::i32
);
4349 Value
= DAG
.getNode(ISD::SMUL_LOHI
, dl
,
4350 DAG
.getVTList(Op
.getValueType(), Op
.getValueType()),
4352 OverflowCmp
= DAG
.getNode(ARMISD::CMP
, dl
, MVT::Glue
, Value
.getValue(1),
4353 DAG
.getNode(ISD::SRA
, dl
, Op
.getValueType(),
4355 DAG
.getConstant(31, dl
, MVT::i32
)));
4356 Value
= Value
.getValue(0); // We only want the low 32 bits for the result.
4360 return std::make_pair(Value
, OverflowCmp
);
4364 ARMTargetLowering::LowerSignedALUO(SDValue Op
, SelectionDAG
&DAG
) const {
4365 // Let legalize expand this if it isn't a legal type yet.
4366 if (!DAG
.getTargetLoweringInfo().isTypeLegal(Op
.getValueType()))
4369 SDValue Value
, OverflowCmp
;
4371 std::tie(Value
, OverflowCmp
) = getARMXALUOOp(Op
, DAG
, ARMcc
);
4372 SDValue CCR
= DAG
.getRegister(ARM::CPSR
, MVT::i32
);
4374 // We use 0 and 1 as false and true values.
4375 SDValue TVal
= DAG
.getConstant(1, dl
, MVT::i32
);
4376 SDValue FVal
= DAG
.getConstant(0, dl
, MVT::i32
);
4377 EVT VT
= Op
.getValueType();
4379 SDValue Overflow
= DAG
.getNode(ARMISD::CMOV
, dl
, VT
, TVal
, FVal
,
4380 ARMcc
, CCR
, OverflowCmp
);
4382 SDVTList VTs
= DAG
.getVTList(Op
.getValueType(), MVT::i32
);
4383 return DAG
.getNode(ISD::MERGE_VALUES
, dl
, VTs
, Value
, Overflow
);
4386 static SDValue
ConvertBooleanCarryToCarryFlag(SDValue BoolCarry
,
4387 SelectionDAG
&DAG
) {
4388 SDLoc
DL(BoolCarry
);
4389 EVT CarryVT
= BoolCarry
.getValueType();
4391 // This converts the boolean value carry into the carry flag by doing
4392 // ARMISD::SUBC Carry, 1
4393 SDValue Carry
= DAG
.getNode(ARMISD::SUBC
, DL
,
4394 DAG
.getVTList(CarryVT
, MVT::i32
),
4395 BoolCarry
, DAG
.getConstant(1, DL
, CarryVT
));
4396 return Carry
.getValue(1);
4399 static SDValue
ConvertCarryFlagToBooleanCarry(SDValue Flags
, EVT VT
,
4400 SelectionDAG
&DAG
) {
4403 // Now convert the carry flag into a boolean carry. We do this
4404 // using ARMISD:ADDE 0, 0, Carry
4405 return DAG
.getNode(ARMISD::ADDE
, DL
, DAG
.getVTList(VT
, MVT::i32
),
4406 DAG
.getConstant(0, DL
, MVT::i32
),
4407 DAG
.getConstant(0, DL
, MVT::i32
), Flags
);
4410 SDValue
ARMTargetLowering::LowerUnsignedALUO(SDValue Op
,
4411 SelectionDAG
&DAG
) const {
4412 // Let legalize expand this if it isn't a legal type yet.
4413 if (!DAG
.getTargetLoweringInfo().isTypeLegal(Op
.getValueType()))
4416 SDValue LHS
= Op
.getOperand(0);
4417 SDValue RHS
= Op
.getOperand(1);
4420 EVT VT
= Op
.getValueType();
4421 SDVTList VTs
= DAG
.getVTList(VT
, MVT::i32
);
4424 switch (Op
.getOpcode()) {
4426 llvm_unreachable("Unknown overflow instruction!");
4428 Value
= DAG
.getNode(ARMISD::ADDC
, dl
, VTs
, LHS
, RHS
);
4429 // Convert the carry flag into a boolean value.
4430 Overflow
= ConvertCarryFlagToBooleanCarry(Value
.getValue(1), VT
, DAG
);
4433 Value
= DAG
.getNode(ARMISD::SUBC
, dl
, VTs
, LHS
, RHS
);
4434 // Convert the carry flag into a boolean value.
4435 Overflow
= ConvertCarryFlagToBooleanCarry(Value
.getValue(1), VT
, DAG
);
4436 // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow
4437 // value. So compute 1 - C.
4438 Overflow
= DAG
.getNode(ISD::SUB
, dl
, MVT::i32
,
4439 DAG
.getConstant(1, dl
, MVT::i32
), Overflow
);
4444 return DAG
.getNode(ISD::MERGE_VALUES
, dl
, VTs
, Value
, Overflow
);
4447 SDValue
ARMTargetLowering::LowerSELECT(SDValue Op
, SelectionDAG
&DAG
) const {
4448 SDValue Cond
= Op
.getOperand(0);
4449 SDValue SelectTrue
= Op
.getOperand(1);
4450 SDValue SelectFalse
= Op
.getOperand(2);
4452 unsigned Opc
= Cond
.getOpcode();
4454 if (Cond
.getResNo() == 1 &&
4455 (Opc
== ISD::SADDO
|| Opc
== ISD::UADDO
|| Opc
== ISD::SSUBO
||
4456 Opc
== ISD::USUBO
)) {
4457 if (!DAG
.getTargetLoweringInfo().isTypeLegal(Cond
->getValueType(0)))
4460 SDValue Value
, OverflowCmp
;
4462 std::tie(Value
, OverflowCmp
) = getARMXALUOOp(Cond
, DAG
, ARMcc
);
4463 SDValue CCR
= DAG
.getRegister(ARM::CPSR
, MVT::i32
);
4464 EVT VT
= Op
.getValueType();
4466 return getCMOV(dl
, VT
, SelectTrue
, SelectFalse
, ARMcc
, CCR
,
4472 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
4473 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
4475 if (Cond
.getOpcode() == ARMISD::CMOV
&& Cond
.hasOneUse()) {
4476 const ConstantSDNode
*CMOVTrue
=
4477 dyn_cast
<ConstantSDNode
>(Cond
.getOperand(0));
4478 const ConstantSDNode
*CMOVFalse
=
4479 dyn_cast
<ConstantSDNode
>(Cond
.getOperand(1));
4481 if (CMOVTrue
&& CMOVFalse
) {
4482 unsigned CMOVTrueVal
= CMOVTrue
->getZExtValue();
4483 unsigned CMOVFalseVal
= CMOVFalse
->getZExtValue();
4487 if (CMOVTrueVal
== 1 && CMOVFalseVal
== 0) {
4489 False
= SelectFalse
;
4490 } else if (CMOVTrueVal
== 0 && CMOVFalseVal
== 1) {
4495 if (True
.getNode() && False
.getNode()) {
4496 EVT VT
= Op
.getValueType();
4497 SDValue ARMcc
= Cond
.getOperand(2);
4498 SDValue CCR
= Cond
.getOperand(3);
4499 SDValue Cmp
= duplicateCmp(Cond
.getOperand(4), DAG
);
4500 assert(True
.getValueType() == VT
);
4501 return getCMOV(dl
, VT
, True
, False
, ARMcc
, CCR
, Cmp
, DAG
);
4506 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
4507 // undefined bits before doing a full-word comparison with zero.
4508 Cond
= DAG
.getNode(ISD::AND
, dl
, Cond
.getValueType(), Cond
,
4509 DAG
.getConstant(1, dl
, Cond
.getValueType()));
4511 return DAG
.getSelectCC(dl
, Cond
,
4512 DAG
.getConstant(0, dl
, Cond
.getValueType()),
4513 SelectTrue
, SelectFalse
, ISD::SETNE
);
4516 static void checkVSELConstraints(ISD::CondCode CC
, ARMCC::CondCodes
&CondCode
,
4517 bool &swpCmpOps
, bool &swpVselOps
) {
4518 // Start by selecting the GE condition code for opcodes that return true for
4520 if (CC
== ISD::SETUGE
|| CC
== ISD::SETOGE
|| CC
== ISD::SETOLE
||
4521 CC
== ISD::SETULE
|| CC
== ISD::SETGE
|| CC
== ISD::SETLE
)
4522 CondCode
= ARMCC::GE
;
4524 // and GT for opcodes that return false for 'equality'.
4525 else if (CC
== ISD::SETUGT
|| CC
== ISD::SETOGT
|| CC
== ISD::SETOLT
||
4526 CC
== ISD::SETULT
|| CC
== ISD::SETGT
|| CC
== ISD::SETLT
)
4527 CondCode
= ARMCC::GT
;
4529 // Since we are constrained to GE/GT, if the opcode contains 'less', we need
4530 // to swap the compare operands.
4531 if (CC
== ISD::SETOLE
|| CC
== ISD::SETULE
|| CC
== ISD::SETOLT
||
4532 CC
== ISD::SETULT
|| CC
== ISD::SETLE
|| CC
== ISD::SETLT
)
4535 // Both GT and GE are ordered comparisons, and return false for 'unordered'.
4536 // If we have an unordered opcode, we need to swap the operands to the VSEL
4537 // instruction (effectively negating the condition).
4539 // This also has the effect of swapping which one of 'less' or 'greater'
4540 // returns true, so we also swap the compare operands. It also switches
4541 // whether we return true for 'equality', so we compensate by picking the
4542 // opposite condition code to our original choice.
4543 if (CC
== ISD::SETULE
|| CC
== ISD::SETULT
|| CC
== ISD::SETUGE
||
4544 CC
== ISD::SETUGT
) {
4545 swpCmpOps
= !swpCmpOps
;
4546 swpVselOps
= !swpVselOps
;
4547 CondCode
= CondCode
== ARMCC::GT
? ARMCC::GE
: ARMCC::GT
;
4550 // 'ordered' is 'anything but unordered', so use the VS condition code and
4551 // swap the VSEL operands.
4552 if (CC
== ISD::SETO
) {
4553 CondCode
= ARMCC::VS
;
4557 // 'unordered or not equal' is 'anything but equal', so use the EQ condition
4558 // code and swap the VSEL operands. Also do this if we don't care about the
4560 if (CC
== ISD::SETUNE
|| CC
== ISD::SETNE
) {
4561 CondCode
= ARMCC::EQ
;
4566 SDValue
ARMTargetLowering::getCMOV(const SDLoc
&dl
, EVT VT
, SDValue FalseVal
,
4567 SDValue TrueVal
, SDValue ARMcc
, SDValue CCR
,
4568 SDValue Cmp
, SelectionDAG
&DAG
) const {
4569 if (!Subtarget
->hasFP64() && VT
== MVT::f64
) {
4570 FalseVal
= DAG
.getNode(ARMISD::VMOVRRD
, dl
,
4571 DAG
.getVTList(MVT::i32
, MVT::i32
), FalseVal
);
4572 TrueVal
= DAG
.getNode(ARMISD::VMOVRRD
, dl
,
4573 DAG
.getVTList(MVT::i32
, MVT::i32
), TrueVal
);
4575 SDValue TrueLow
= TrueVal
.getValue(0);
4576 SDValue TrueHigh
= TrueVal
.getValue(1);
4577 SDValue FalseLow
= FalseVal
.getValue(0);
4578 SDValue FalseHigh
= FalseVal
.getValue(1);
4580 SDValue Low
= DAG
.getNode(ARMISD::CMOV
, dl
, MVT::i32
, FalseLow
, TrueLow
,
4582 SDValue High
= DAG
.getNode(ARMISD::CMOV
, dl
, MVT::i32
, FalseHigh
, TrueHigh
,
4583 ARMcc
, CCR
, duplicateCmp(Cmp
, DAG
));
4585 return DAG
.getNode(ARMISD::VMOVDRR
, dl
, MVT::f64
, Low
, High
);
4587 return DAG
.getNode(ARMISD::CMOV
, dl
, VT
, FalseVal
, TrueVal
, ARMcc
, CCR
,
4592 static bool isGTorGE(ISD::CondCode CC
) {
4593 return CC
== ISD::SETGT
|| CC
== ISD::SETGE
;
4596 static bool isLTorLE(ISD::CondCode CC
) {
4597 return CC
== ISD::SETLT
|| CC
== ISD::SETLE
;
4600 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating.
4601 // All of these conditions (and their <= and >= counterparts) will do:
4606 static bool isLowerSaturate(const SDValue LHS
, const SDValue RHS
,
4607 const SDValue TrueVal
, const SDValue FalseVal
,
4608 const ISD::CondCode CC
, const SDValue K
) {
4609 return (isGTorGE(CC
) &&
4610 ((K
== LHS
&& K
== TrueVal
) || (K
== RHS
&& K
== FalseVal
))) ||
4612 ((K
== RHS
&& K
== TrueVal
) || (K
== LHS
&& K
== FalseVal
)));
4615 // Similar to isLowerSaturate(), but checks for upper-saturating conditions.
4616 static bool isUpperSaturate(const SDValue LHS
, const SDValue RHS
,
4617 const SDValue TrueVal
, const SDValue FalseVal
,
4618 const ISD::CondCode CC
, const SDValue K
) {
4619 return (isGTorGE(CC
) &&
4620 ((K
== RHS
&& K
== TrueVal
) || (K
== LHS
&& K
== FalseVal
))) ||
4622 ((K
== LHS
&& K
== TrueVal
) || (K
== RHS
&& K
== FalseVal
)));
4625 // Check if two chained conditionals could be converted into SSAT or USAT.
4627 // SSAT can replace a set of two conditional selectors that bound a number to an
4628 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples:
4630 // x < -k ? -k : (x > k ? k : x)
4631 // x < -k ? -k : (x < k ? x : k)
4632 // x > -k ? (x > k ? k : x) : -k
4633 // x < k ? (x < -k ? -k : x) : k
4636 // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 is
4639 // It returns true if the conversion can be done, false otherwise.
4640 // Additionally, the variable is returned in parameter V, the constant in K and
4641 // usat is set to true if the conditional represents an unsigned saturation
4642 static bool isSaturatingConditional(const SDValue
&Op
, SDValue
&V
,
4643 uint64_t &K
, bool &usat
) {
4644 SDValue LHS1
= Op
.getOperand(0);
4645 SDValue RHS1
= Op
.getOperand(1);
4646 SDValue TrueVal1
= Op
.getOperand(2);
4647 SDValue FalseVal1
= Op
.getOperand(3);
4648 ISD::CondCode CC1
= cast
<CondCodeSDNode
>(Op
.getOperand(4))->get();
4650 const SDValue Op2
= isa
<ConstantSDNode
>(TrueVal1
) ? FalseVal1
: TrueVal1
;
4651 if (Op2
.getOpcode() != ISD::SELECT_CC
)
4654 SDValue LHS2
= Op2
.getOperand(0);
4655 SDValue RHS2
= Op2
.getOperand(1);
4656 SDValue TrueVal2
= Op2
.getOperand(2);
4657 SDValue FalseVal2
= Op2
.getOperand(3);
4658 ISD::CondCode CC2
= cast
<CondCodeSDNode
>(Op2
.getOperand(4))->get();
4660 // Find out which are the constants and which are the variables
4661 // in each conditional
4662 SDValue
*K1
= isa
<ConstantSDNode
>(LHS1
) ? &LHS1
: isa
<ConstantSDNode
>(RHS1
)
4665 SDValue
*K2
= isa
<ConstantSDNode
>(LHS2
) ? &LHS2
: isa
<ConstantSDNode
>(RHS2
)
4668 SDValue K2Tmp
= isa
<ConstantSDNode
>(TrueVal2
) ? TrueVal2
: FalseVal2
;
4669 SDValue V1Tmp
= (K1
&& *K1
== LHS1
) ? RHS1
: LHS1
;
4670 SDValue V2Tmp
= (K2
&& *K2
== LHS2
) ? RHS2
: LHS2
;
4671 SDValue V2
= (K2Tmp
== TrueVal2
) ? FalseVal2
: TrueVal2
;
4673 // We must detect cases where the original operations worked with 16- or
4674 // 8-bit values. In such case, V2Tmp != V2 because the comparison operations
4675 // must work with sign-extended values but the select operations return
4676 // the original non-extended value.
4677 SDValue V2TmpReg
= V2Tmp
;
4678 if (V2Tmp
->getOpcode() == ISD::SIGN_EXTEND_INREG
)
4679 V2TmpReg
= V2Tmp
->getOperand(0);
4681 // Check that the registers and the constants have the correct values
4682 // in both conditionals
4683 if (!K1
|| !K2
|| *K1
== Op2
|| *K2
!= K2Tmp
|| V1Tmp
!= V2Tmp
||
4687 // Figure out which conditional is saturating the lower/upper bound.
4688 const SDValue
*LowerCheckOp
=
4689 isLowerSaturate(LHS1
, RHS1
, TrueVal1
, FalseVal1
, CC1
, *K1
)
4691 : isLowerSaturate(LHS2
, RHS2
, TrueVal2
, FalseVal2
, CC2
, *K2
)
4694 const SDValue
*UpperCheckOp
=
4695 isUpperSaturate(LHS1
, RHS1
, TrueVal1
, FalseVal1
, CC1
, *K1
)
4697 : isUpperSaturate(LHS2
, RHS2
, TrueVal2
, FalseVal2
, CC2
, *K2
)
4701 if (!UpperCheckOp
|| !LowerCheckOp
|| LowerCheckOp
== UpperCheckOp
)
4704 // Check that the constant in the lower-bound check is
4705 // the opposite of the constant in the upper-bound check
4706 // in 1's complement.
4707 int64_t Val1
= cast
<ConstantSDNode
>(*K1
)->getSExtValue();
4708 int64_t Val2
= cast
<ConstantSDNode
>(*K2
)->getSExtValue();
4709 int64_t PosVal
= std::max(Val1
, Val2
);
4710 int64_t NegVal
= std::min(Val1
, Val2
);
4712 if (((Val1
> Val2
&& UpperCheckOp
== &Op
) ||
4713 (Val1
< Val2
&& UpperCheckOp
== &Op2
)) &&
4714 isPowerOf2_64(PosVal
+ 1)) {
4716 // Handle the difference between USAT (unsigned) and SSAT (signed) saturation
4719 else if (NegVal
== 0)
4725 K
= (uint64_t)PosVal
; // At this point, PosVal is guaranteed to be positive
4733 // Check if a condition of the type x < k ? k : x can be converted into a
4734 // bit operation instead of conditional moves.
4735 // Currently this is allowed given:
4736 // - The conditions and values match up
4737 // - k is 0 or -1 (all ones)
4738 // This function will not check the last condition, thats up to the caller
4739 // It returns true if the transformation can be made, and in such case
4740 // returns x in V, and k in SatK.
4741 static bool isLowerSaturatingConditional(const SDValue
&Op
, SDValue
&V
,
4744 SDValue LHS
= Op
.getOperand(0);
4745 SDValue RHS
= Op
.getOperand(1);
4746 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(4))->get();
4747 SDValue TrueVal
= Op
.getOperand(2);
4748 SDValue FalseVal
= Op
.getOperand(3);
4750 SDValue
*K
= isa
<ConstantSDNode
>(LHS
) ? &LHS
: isa
<ConstantSDNode
>(RHS
)
4754 // No constant operation in comparison, early out
4758 SDValue KTmp
= isa
<ConstantSDNode
>(TrueVal
) ? TrueVal
: FalseVal
;
4759 V
= (KTmp
== TrueVal
) ? FalseVal
: TrueVal
;
4760 SDValue VTmp
= (K
&& *K
== LHS
) ? RHS
: LHS
;
4762 // If the constant on left and right side, or variable on left and right,
4763 // does not match, early out
4764 if (*K
!= KTmp
|| V
!= VTmp
)
4767 if (isLowerSaturate(LHS
, RHS
, TrueVal
, FalseVal
, CC
, *K
)) {
4775 bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT
) const {
4777 return !Subtarget
->hasVFP2Base();
4779 return !Subtarget
->hasFP64();
4781 return !Subtarget
->hasFullFP16();
4785 SDValue
ARMTargetLowering::LowerSELECT_CC(SDValue Op
, SelectionDAG
&DAG
) const {
4786 EVT VT
= Op
.getValueType();
4789 // Try to convert two saturating conditional selects into a single SSAT
4791 uint64_t SatConstant
;
4793 if (((!Subtarget
->isThumb() && Subtarget
->hasV6Ops()) || Subtarget
->isThumb2()) &&
4794 isSaturatingConditional(Op
, SatValue
, SatConstant
, SatUSat
)) {
4796 return DAG
.getNode(ARMISD::USAT
, dl
, VT
, SatValue
,
4797 DAG
.getConstant(countTrailingOnes(SatConstant
), dl
, VT
));
4799 return DAG
.getNode(ARMISD::SSAT
, dl
, VT
, SatValue
,
4800 DAG
.getConstant(countTrailingOnes(SatConstant
), dl
, VT
));
4803 // Try to convert expressions of the form x < k ? k : x (and similar forms)
4804 // into more efficient bit operations, which is possible when k is 0 or -1
4805 // On ARM and Thumb-2 which have flexible operand 2 this will result in
4806 // single instructions. On Thumb the shift and the bit operation will be two
4808 // Only allow this transformation on full-width (32-bit) operations
4809 SDValue LowerSatConstant
;
4810 if (VT
== MVT::i32
&&
4811 isLowerSaturatingConditional(Op
, SatValue
, LowerSatConstant
)) {
4812 SDValue ShiftV
= DAG
.getNode(ISD::SRA
, dl
, VT
, SatValue
,
4813 DAG
.getConstant(31, dl
, VT
));
4814 if (isNullConstant(LowerSatConstant
)) {
4815 SDValue NotShiftV
= DAG
.getNode(ISD::XOR
, dl
, VT
, ShiftV
,
4816 DAG
.getAllOnesConstant(dl
, VT
));
4817 return DAG
.getNode(ISD::AND
, dl
, VT
, SatValue
, NotShiftV
);
4818 } else if (isAllOnesConstant(LowerSatConstant
))
4819 return DAG
.getNode(ISD::OR
, dl
, VT
, SatValue
, ShiftV
);
4822 SDValue LHS
= Op
.getOperand(0);
4823 SDValue RHS
= Op
.getOperand(1);
4824 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(4))->get();
4825 SDValue TrueVal
= Op
.getOperand(2);
4826 SDValue FalseVal
= Op
.getOperand(3);
4827 ConstantSDNode
*CFVal
= dyn_cast
<ConstantSDNode
>(FalseVal
);
4828 ConstantSDNode
*CTVal
= dyn_cast
<ConstantSDNode
>(TrueVal
);
4830 if (Subtarget
->hasV8_1MMainlineOps() && CFVal
&& CTVal
&&
4831 LHS
.getValueType() == MVT::i32
&& RHS
.getValueType() == MVT::i32
) {
4832 unsigned TVal
= CTVal
->getZExtValue();
4833 unsigned FVal
= CFVal
->getZExtValue();
4834 unsigned Opcode
= 0;
4836 if (TVal
== ~FVal
) {
4837 Opcode
= ARMISD::CSINV
;
4838 } else if (TVal
== ~FVal
+ 1) {
4839 Opcode
= ARMISD::CSNEG
;
4840 } else if (TVal
+ 1 == FVal
) {
4841 Opcode
= ARMISD::CSINC
;
4842 } else if (TVal
== FVal
+ 1) {
4843 Opcode
= ARMISD::CSINC
;
4844 std::swap(TrueVal
, FalseVal
);
4845 std::swap(TVal
, FVal
);
4846 CC
= ISD::getSetCCInverse(CC
, true);
4850 // If one of the constants is cheaper than another, materialise the
4851 // cheaper one and let the csel generate the other.
4852 if (Opcode
!= ARMISD::CSINC
&&
4853 HasLowerConstantMaterializationCost(FVal
, TVal
, Subtarget
)) {
4854 std::swap(TrueVal
, FalseVal
);
4855 std::swap(TVal
, FVal
);
4856 CC
= ISD::getSetCCInverse(CC
, true);
4859 // Attempt to use ZR checking TVal is 0, possibly inverting the condition
4860 // to get there. CSINC not is invertable like the other two (~(~a) == a,
4861 // -(-a) == a, but (a+1)+1 != a).
4862 if (FVal
== 0 && Opcode
!= ARMISD::CSINC
) {
4863 std::swap(TrueVal
, FalseVal
);
4864 std::swap(TVal
, FVal
);
4865 CC
= ISD::getSetCCInverse(CC
, true);
4868 TrueVal
= DAG
.getRegister(ARM::ZR
, MVT::i32
);
4870 // Drops F's value because we can get it by inverting/negating TVal.
4874 SDValue Cmp
= getARMCmp(LHS
, RHS
, CC
, ARMcc
, DAG
, dl
);
4875 EVT VT
= TrueVal
.getValueType();
4876 return DAG
.getNode(Opcode
, dl
, VT
, TrueVal
, FalseVal
, ARMcc
, Cmp
);
4880 if (isUnsupportedFloatingType(LHS
.getValueType())) {
4881 DAG
.getTargetLoweringInfo().softenSetCCOperands(
4882 DAG
, LHS
.getValueType(), LHS
, RHS
, CC
, dl
, LHS
, RHS
);
4884 // If softenSetCCOperands only returned one value, we should compare it to
4886 if (!RHS
.getNode()) {
4887 RHS
= DAG
.getConstant(0, dl
, LHS
.getValueType());
4892 if (LHS
.getValueType() == MVT::i32
) {
4893 // Try to generate VSEL on ARMv8.
4894 // The VSEL instruction can't use all the usual ARM condition
4895 // codes: it only has two bits to select the condition code, so it's
4896 // constrained to use only GE, GT, VS and EQ.
4898 // To implement all the various ISD::SETXXX opcodes, we sometimes need to
4899 // swap the operands of the previous compare instruction (effectively
4900 // inverting the compare condition, swapping 'less' and 'greater') and
4901 // sometimes need to swap the operands to the VSEL (which inverts the
4902 // condition in the sense of firing whenever the previous condition didn't)
4903 if (Subtarget
->hasFPARMv8Base() && (TrueVal
.getValueType() == MVT::f16
||
4904 TrueVal
.getValueType() == MVT::f32
||
4905 TrueVal
.getValueType() == MVT::f64
)) {
4906 ARMCC::CondCodes CondCode
= IntCCToARMCC(CC
);
4907 if (CondCode
== ARMCC::LT
|| CondCode
== ARMCC::LE
||
4908 CondCode
== ARMCC::VC
|| CondCode
== ARMCC::NE
) {
4909 CC
= ISD::getSetCCInverse(CC
, true);
4910 std::swap(TrueVal
, FalseVal
);
4915 SDValue CCR
= DAG
.getRegister(ARM::CPSR
, MVT::i32
);
4916 SDValue Cmp
= getARMCmp(LHS
, RHS
, CC
, ARMcc
, DAG
, dl
);
4917 // Choose GE over PL, which vsel does now support
4918 if (cast
<ConstantSDNode
>(ARMcc
)->getZExtValue() == ARMCC::PL
)
4919 ARMcc
= DAG
.getConstant(ARMCC::GE
, dl
, MVT::i32
);
4920 return getCMOV(dl
, VT
, FalseVal
, TrueVal
, ARMcc
, CCR
, Cmp
, DAG
);
4923 ARMCC::CondCodes CondCode
, CondCode2
;
4924 FPCCToARMCC(CC
, CondCode
, CondCode2
);
4926 // Normalize the fp compare. If RHS is zero we prefer to keep it there so we
4927 // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we
4928 // must use VSEL (limited condition codes), due to not having conditional f16
4930 if (Subtarget
->hasFPARMv8Base() &&
4931 !(isFloatingPointZero(RHS
) && TrueVal
.getValueType() != MVT::f16
) &&
4932 (TrueVal
.getValueType() == MVT::f16
||
4933 TrueVal
.getValueType() == MVT::f32
||
4934 TrueVal
.getValueType() == MVT::f64
)) {
4935 bool swpCmpOps
= false;
4936 bool swpVselOps
= false;
4937 checkVSELConstraints(CC
, CondCode
, swpCmpOps
, swpVselOps
);
4939 if (CondCode
== ARMCC::GT
|| CondCode
== ARMCC::GE
||
4940 CondCode
== ARMCC::VS
|| CondCode
== ARMCC::EQ
) {
4942 std::swap(LHS
, RHS
);
4944 std::swap(TrueVal
, FalseVal
);
4948 SDValue ARMcc
= DAG
.getConstant(CondCode
, dl
, MVT::i32
);
4949 SDValue Cmp
= getVFPCmp(LHS
, RHS
, DAG
, dl
);
4950 SDValue CCR
= DAG
.getRegister(ARM::CPSR
, MVT::i32
);
4951 SDValue Result
= getCMOV(dl
, VT
, FalseVal
, TrueVal
, ARMcc
, CCR
, Cmp
, DAG
);
4952 if (CondCode2
!= ARMCC::AL
) {
4953 SDValue ARMcc2
= DAG
.getConstant(CondCode2
, dl
, MVT::i32
);
4954 // FIXME: Needs another CMP because flag can have but one use.
4955 SDValue Cmp2
= getVFPCmp(LHS
, RHS
, DAG
, dl
);
4956 Result
= getCMOV(dl
, VT
, Result
, TrueVal
, ARMcc2
, CCR
, Cmp2
, DAG
);
4961 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
4962 /// to morph to an integer compare sequence.
4963 static bool canChangeToInt(SDValue Op
, bool &SeenZero
,
4964 const ARMSubtarget
*Subtarget
) {
4965 SDNode
*N
= Op
.getNode();
4966 if (!N
->hasOneUse())
4967 // Otherwise it requires moving the value from fp to integer registers.
4969 if (!N
->getNumValues())
4971 EVT VT
= Op
.getValueType();
4972 if (VT
!= MVT::f32
&& !Subtarget
->isFPBrccSlow())
4973 // f32 case is generally profitable. f64 case only makes sense when vcmpe +
4974 // vmrs are very slow, e.g. cortex-a8.
4977 if (isFloatingPointZero(Op
)) {
4981 return ISD::isNormalLoad(N
);
4984 static SDValue
bitcastf32Toi32(SDValue Op
, SelectionDAG
&DAG
) {
4985 if (isFloatingPointZero(Op
))
4986 return DAG
.getConstant(0, SDLoc(Op
), MVT::i32
);
4988 if (LoadSDNode
*Ld
= dyn_cast
<LoadSDNode
>(Op
))
4989 return DAG
.getLoad(MVT::i32
, SDLoc(Op
), Ld
->getChain(), Ld
->getBasePtr(),
4990 Ld
->getPointerInfo(), Ld
->getAlignment(),
4991 Ld
->getMemOperand()->getFlags());
4993 llvm_unreachable("Unknown VFP cmp argument!");
4996 static void expandf64Toi32(SDValue Op
, SelectionDAG
&DAG
,
4997 SDValue
&RetVal1
, SDValue
&RetVal2
) {
5000 if (isFloatingPointZero(Op
)) {
5001 RetVal1
= DAG
.getConstant(0, dl
, MVT::i32
);
5002 RetVal2
= DAG
.getConstant(0, dl
, MVT::i32
);
5006 if (LoadSDNode
*Ld
= dyn_cast
<LoadSDNode
>(Op
)) {
5007 SDValue Ptr
= Ld
->getBasePtr();
5009 DAG
.getLoad(MVT::i32
, dl
, Ld
->getChain(), Ptr
, Ld
->getPointerInfo(),
5010 Ld
->getAlignment(), Ld
->getMemOperand()->getFlags());
5012 EVT PtrType
= Ptr
.getValueType();
5013 unsigned NewAlign
= MinAlign(Ld
->getAlignment(), 4);
5014 SDValue NewPtr
= DAG
.getNode(ISD::ADD
, dl
,
5015 PtrType
, Ptr
, DAG
.getConstant(4, dl
, PtrType
));
5016 RetVal2
= DAG
.getLoad(MVT::i32
, dl
, Ld
->getChain(), NewPtr
,
5017 Ld
->getPointerInfo().getWithOffset(4), NewAlign
,
5018 Ld
->getMemOperand()->getFlags());
5022 llvm_unreachable("Unknown VFP cmp argument!");
5025 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
5026 /// f32 and even f64 comparisons to integer ones.
5028 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op
, SelectionDAG
&DAG
) const {
5029 SDValue Chain
= Op
.getOperand(0);
5030 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(1))->get();
5031 SDValue LHS
= Op
.getOperand(2);
5032 SDValue RHS
= Op
.getOperand(3);
5033 SDValue Dest
= Op
.getOperand(4);
5036 bool LHSSeenZero
= false;
5037 bool LHSOk
= canChangeToInt(LHS
, LHSSeenZero
, Subtarget
);
5038 bool RHSSeenZero
= false;
5039 bool RHSOk
= canChangeToInt(RHS
, RHSSeenZero
, Subtarget
);
5040 if (LHSOk
&& RHSOk
&& (LHSSeenZero
|| RHSSeenZero
)) {
5041 // If unsafe fp math optimization is enabled and there are no other uses of
5042 // the CMP operands, and the condition code is EQ or NE, we can optimize it
5043 // to an integer comparison.
5044 if (CC
== ISD::SETOEQ
)
5046 else if (CC
== ISD::SETUNE
)
5049 SDValue Mask
= DAG
.getConstant(0x7fffffff, dl
, MVT::i32
);
5051 if (LHS
.getValueType() == MVT::f32
) {
5052 LHS
= DAG
.getNode(ISD::AND
, dl
, MVT::i32
,
5053 bitcastf32Toi32(LHS
, DAG
), Mask
);
5054 RHS
= DAG
.getNode(ISD::AND
, dl
, MVT::i32
,
5055 bitcastf32Toi32(RHS
, DAG
), Mask
);
5056 SDValue Cmp
= getARMCmp(LHS
, RHS
, CC
, ARMcc
, DAG
, dl
);
5057 SDValue CCR
= DAG
.getRegister(ARM::CPSR
, MVT::i32
);
5058 return DAG
.getNode(ARMISD::BRCOND
, dl
, MVT::Other
,
5059 Chain
, Dest
, ARMcc
, CCR
, Cmp
);
5064 expandf64Toi32(LHS
, DAG
, LHS1
, LHS2
);
5065 expandf64Toi32(RHS
, DAG
, RHS1
, RHS2
);
5066 LHS2
= DAG
.getNode(ISD::AND
, dl
, MVT::i32
, LHS2
, Mask
);
5067 RHS2
= DAG
.getNode(ISD::AND
, dl
, MVT::i32
, RHS2
, Mask
);
5068 ARMCC::CondCodes CondCode
= IntCCToARMCC(CC
);
5069 ARMcc
= DAG
.getConstant(CondCode
, dl
, MVT::i32
);
5070 SDVTList VTList
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
5071 SDValue Ops
[] = { Chain
, ARMcc
, LHS1
, LHS2
, RHS1
, RHS2
, Dest
};
5072 return DAG
.getNode(ARMISD::BCC_i64
, dl
, VTList
, Ops
);
5078 SDValue
ARMTargetLowering::LowerBRCOND(SDValue Op
, SelectionDAG
&DAG
) const {
5079 SDValue Chain
= Op
.getOperand(0);
5080 SDValue Cond
= Op
.getOperand(1);
5081 SDValue Dest
= Op
.getOperand(2);
5084 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
5086 unsigned Opc
= Cond
.getOpcode();
5087 bool OptimizeMul
= (Opc
== ISD::SMULO
|| Opc
== ISD::UMULO
) &&
5088 !Subtarget
->isThumb1Only();
5089 if (Cond
.getResNo() == 1 &&
5090 (Opc
== ISD::SADDO
|| Opc
== ISD::UADDO
|| Opc
== ISD::SSUBO
||
5091 Opc
== ISD::USUBO
|| OptimizeMul
)) {
5092 // Only lower legal XALUO ops.
5093 if (!DAG
.getTargetLoweringInfo().isTypeLegal(Cond
->getValueType(0)))
5096 // The actual operation with overflow check.
5097 SDValue Value
, OverflowCmp
;
5099 std::tie(Value
, OverflowCmp
) = getARMXALUOOp(Cond
, DAG
, ARMcc
);
5101 // Reverse the condition code.
5102 ARMCC::CondCodes CondCode
=
5103 (ARMCC::CondCodes
)cast
<const ConstantSDNode
>(ARMcc
)->getZExtValue();
5104 CondCode
= ARMCC::getOppositeCondition(CondCode
);
5105 ARMcc
= DAG
.getConstant(CondCode
, SDLoc(ARMcc
), MVT::i32
);
5106 SDValue CCR
= DAG
.getRegister(ARM::CPSR
, MVT::i32
);
5108 return DAG
.getNode(ARMISD::BRCOND
, dl
, MVT::Other
, Chain
, Dest
, ARMcc
, CCR
,
5115 SDValue
ARMTargetLowering::LowerBR_CC(SDValue Op
, SelectionDAG
&DAG
) const {
5116 SDValue Chain
= Op
.getOperand(0);
5117 ISD::CondCode CC
= cast
<CondCodeSDNode
>(Op
.getOperand(1))->get();
5118 SDValue LHS
= Op
.getOperand(2);
5119 SDValue RHS
= Op
.getOperand(3);
5120 SDValue Dest
= Op
.getOperand(4);
5123 if (isUnsupportedFloatingType(LHS
.getValueType())) {
5124 DAG
.getTargetLoweringInfo().softenSetCCOperands(
5125 DAG
, LHS
.getValueType(), LHS
, RHS
, CC
, dl
, LHS
, RHS
);
5127 // If softenSetCCOperands only returned one value, we should compare it to
5129 if (!RHS
.getNode()) {
5130 RHS
= DAG
.getConstant(0, dl
, LHS
.getValueType());
5135 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
5137 unsigned Opc
= LHS
.getOpcode();
5138 bool OptimizeMul
= (Opc
== ISD::SMULO
|| Opc
== ISD::UMULO
) &&
5139 !Subtarget
->isThumb1Only();
5140 if (LHS
.getResNo() == 1 && (isOneConstant(RHS
) || isNullConstant(RHS
)) &&
5141 (Opc
== ISD::SADDO
|| Opc
== ISD::UADDO
|| Opc
== ISD::SSUBO
||
5142 Opc
== ISD::USUBO
|| OptimizeMul
) &&
5143 (CC
== ISD::SETEQ
|| CC
== ISD::SETNE
)) {
5144 // Only lower legal XALUO ops.
5145 if (!DAG
.getTargetLoweringInfo().isTypeLegal(LHS
->getValueType(0)))
5148 // The actual operation with overflow check.
5149 SDValue Value
, OverflowCmp
;
5151 std::tie(Value
, OverflowCmp
) = getARMXALUOOp(LHS
.getValue(0), DAG
, ARMcc
);
5153 if ((CC
== ISD::SETNE
) != isOneConstant(RHS
)) {
5154 // Reverse the condition code.
5155 ARMCC::CondCodes CondCode
=
5156 (ARMCC::CondCodes
)cast
<const ConstantSDNode
>(ARMcc
)->getZExtValue();
5157 CondCode
= ARMCC::getOppositeCondition(CondCode
);
5158 ARMcc
= DAG
.getConstant(CondCode
, SDLoc(ARMcc
), MVT::i32
);
5160 SDValue CCR
= DAG
.getRegister(ARM::CPSR
, MVT::i32
);
5162 return DAG
.getNode(ARMISD::BRCOND
, dl
, MVT::Other
, Chain
, Dest
, ARMcc
, CCR
,
5166 if (LHS
.getValueType() == MVT::i32
) {
5168 SDValue Cmp
= getARMCmp(LHS
, RHS
, CC
, ARMcc
, DAG
, dl
);
5169 SDValue CCR
= DAG
.getRegister(ARM::CPSR
, MVT::i32
);
5170 return DAG
.getNode(ARMISD::BRCOND
, dl
, MVT::Other
,
5171 Chain
, Dest
, ARMcc
, CCR
, Cmp
);
5174 if (getTargetMachine().Options
.UnsafeFPMath
&&
5175 (CC
== ISD::SETEQ
|| CC
== ISD::SETOEQ
||
5176 CC
== ISD::SETNE
|| CC
== ISD::SETUNE
)) {
5177 if (SDValue Result
= OptimizeVFPBrcond(Op
, DAG
))
5181 ARMCC::CondCodes CondCode
, CondCode2
;
5182 FPCCToARMCC(CC
, CondCode
, CondCode2
);
5184 SDValue ARMcc
= DAG
.getConstant(CondCode
, dl
, MVT::i32
);
5185 SDValue Cmp
= getVFPCmp(LHS
, RHS
, DAG
, dl
);
5186 SDValue CCR
= DAG
.getRegister(ARM::CPSR
, MVT::i32
);
5187 SDVTList VTList
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
5188 SDValue Ops
[] = { Chain
, Dest
, ARMcc
, CCR
, Cmp
};
5189 SDValue Res
= DAG
.getNode(ARMISD::BRCOND
, dl
, VTList
, Ops
);
5190 if (CondCode2
!= ARMCC::AL
) {
5191 ARMcc
= DAG
.getConstant(CondCode2
, dl
, MVT::i32
);
5192 SDValue Ops
[] = { Res
, Dest
, ARMcc
, CCR
, Res
.getValue(1) };
5193 Res
= DAG
.getNode(ARMISD::BRCOND
, dl
, VTList
, Ops
);
5198 SDValue
ARMTargetLowering::LowerBR_JT(SDValue Op
, SelectionDAG
&DAG
) const {
5199 SDValue Chain
= Op
.getOperand(0);
5200 SDValue Table
= Op
.getOperand(1);
5201 SDValue Index
= Op
.getOperand(2);
5204 EVT PTy
= getPointerTy(DAG
.getDataLayout());
5205 JumpTableSDNode
*JT
= cast
<JumpTableSDNode
>(Table
);
5206 SDValue JTI
= DAG
.getTargetJumpTable(JT
->getIndex(), PTy
);
5207 Table
= DAG
.getNode(ARMISD::WrapperJT
, dl
, MVT::i32
, JTI
);
5208 Index
= DAG
.getNode(ISD::MUL
, dl
, PTy
, Index
, DAG
.getConstant(4, dl
, PTy
));
5209 SDValue Addr
= DAG
.getNode(ISD::ADD
, dl
, PTy
, Table
, Index
);
5210 if (Subtarget
->isThumb2() || (Subtarget
->hasV8MBaselineOps() && Subtarget
->isThumb())) {
5211 // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table
5212 // which does another jump to the destination. This also makes it easier
5213 // to translate it to TBB / TBH later (Thumb2 only).
5214 // FIXME: This might not work if the function is extremely large.
5215 return DAG
.getNode(ARMISD::BR2_JT
, dl
, MVT::Other
, Chain
,
5216 Addr
, Op
.getOperand(2), JTI
);
5218 if (isPositionIndependent() || Subtarget
->isROPI()) {
5220 DAG
.getLoad((EVT
)MVT::i32
, dl
, Chain
, Addr
,
5221 MachinePointerInfo::getJumpTable(DAG
.getMachineFunction()));
5222 Chain
= Addr
.getValue(1);
5223 Addr
= DAG
.getNode(ISD::ADD
, dl
, PTy
, Table
, Addr
);
5224 return DAG
.getNode(ARMISD::BR_JT
, dl
, MVT::Other
, Chain
, Addr
, JTI
);
5227 DAG
.getLoad(PTy
, dl
, Chain
, Addr
,
5228 MachinePointerInfo::getJumpTable(DAG
.getMachineFunction()));
5229 Chain
= Addr
.getValue(1);
5230 return DAG
.getNode(ARMISD::BR_JT
, dl
, MVT::Other
, Chain
, Addr
, JTI
);
5234 static SDValue
LowerVectorFP_TO_INT(SDValue Op
, SelectionDAG
&DAG
) {
5235 EVT VT
= Op
.getValueType();
5238 if (Op
.getValueType().getVectorElementType() == MVT::i32
) {
5239 if (Op
.getOperand(0).getValueType().getVectorElementType() == MVT::f32
)
5241 return DAG
.UnrollVectorOp(Op
.getNode());
5244 const bool HasFullFP16
=
5245 static_cast<const ARMSubtarget
&>(DAG
.getSubtarget()).hasFullFP16();
5248 const EVT OpTy
= Op
.getOperand(0).getValueType();
5249 if (OpTy
== MVT::v4f32
)
5251 else if (OpTy
== MVT::v4f16
&& HasFullFP16
)
5253 else if (OpTy
== MVT::v8f16
&& HasFullFP16
)
5256 llvm_unreachable("Invalid type for custom lowering!");
5258 if (VT
!= MVT::v4i16
&& VT
!= MVT::v8i16
)
5259 return DAG
.UnrollVectorOp(Op
.getNode());
5261 Op
= DAG
.getNode(Op
.getOpcode(), dl
, NewTy
, Op
.getOperand(0));
5262 return DAG
.getNode(ISD::TRUNCATE
, dl
, VT
, Op
);
5265 SDValue
ARMTargetLowering::LowerFP_TO_INT(SDValue Op
, SelectionDAG
&DAG
) const {
5266 EVT VT
= Op
.getValueType();
5268 return LowerVectorFP_TO_INT(Op
, DAG
);
5269 if (isUnsupportedFloatingType(Op
.getOperand(0).getValueType())) {
5271 if (Op
.getOpcode() == ISD::FP_TO_SINT
)
5272 LC
= RTLIB::getFPTOSINT(Op
.getOperand(0).getValueType(),
5275 LC
= RTLIB::getFPTOUINT(Op
.getOperand(0).getValueType(),
5277 MakeLibCallOptions CallOptions
;
5278 return makeLibCall(DAG
, LC
, Op
.getValueType(), Op
.getOperand(0),
5279 CallOptions
, SDLoc(Op
)).first
;
5285 static SDValue
LowerVectorINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
) {
5286 EVT VT
= Op
.getValueType();
5289 if (Op
.getOperand(0).getValueType().getVectorElementType() == MVT::i32
) {
5290 if (VT
.getVectorElementType() == MVT::f32
)
5292 return DAG
.UnrollVectorOp(Op
.getNode());
5295 assert((Op
.getOperand(0).getValueType() == MVT::v4i16
||
5296 Op
.getOperand(0).getValueType() == MVT::v8i16
) &&
5297 "Invalid type for custom lowering!");
5299 const bool HasFullFP16
=
5300 static_cast<const ARMSubtarget
&>(DAG
.getSubtarget()).hasFullFP16();
5303 if (VT
== MVT::v4f32
)
5304 DestVecType
= MVT::v4i32
;
5305 else if (VT
== MVT::v4f16
&& HasFullFP16
)
5306 DestVecType
= MVT::v4i16
;
5307 else if (VT
== MVT::v8f16
&& HasFullFP16
)
5308 DestVecType
= MVT::v8i16
;
5310 return DAG
.UnrollVectorOp(Op
.getNode());
5314 switch (Op
.getOpcode()) {
5315 default: llvm_unreachable("Invalid opcode!");
5316 case ISD::SINT_TO_FP
:
5317 CastOpc
= ISD::SIGN_EXTEND
;
5318 Opc
= ISD::SINT_TO_FP
;
5320 case ISD::UINT_TO_FP
:
5321 CastOpc
= ISD::ZERO_EXTEND
;
5322 Opc
= ISD::UINT_TO_FP
;
5326 Op
= DAG
.getNode(CastOpc
, dl
, DestVecType
, Op
.getOperand(0));
5327 return DAG
.getNode(Opc
, dl
, VT
, Op
);
5330 SDValue
ARMTargetLowering::LowerINT_TO_FP(SDValue Op
, SelectionDAG
&DAG
) const {
5331 EVT VT
= Op
.getValueType();
5333 return LowerVectorINT_TO_FP(Op
, DAG
);
5334 if (isUnsupportedFloatingType(VT
)) {
5336 if (Op
.getOpcode() == ISD::SINT_TO_FP
)
5337 LC
= RTLIB::getSINTTOFP(Op
.getOperand(0).getValueType(),
5340 LC
= RTLIB::getUINTTOFP(Op
.getOperand(0).getValueType(),
5342 MakeLibCallOptions CallOptions
;
5343 return makeLibCall(DAG
, LC
, Op
.getValueType(), Op
.getOperand(0),
5344 CallOptions
, SDLoc(Op
)).first
;
5350 SDValue
ARMTargetLowering::LowerFCOPYSIGN(SDValue Op
, SelectionDAG
&DAG
) const {
5351 // Implement fcopysign with a fabs and a conditional fneg.
5352 SDValue Tmp0
= Op
.getOperand(0);
5353 SDValue Tmp1
= Op
.getOperand(1);
5355 EVT VT
= Op
.getValueType();
5356 EVT SrcVT
= Tmp1
.getValueType();
5357 bool InGPR
= Tmp0
.getOpcode() == ISD::BITCAST
||
5358 Tmp0
.getOpcode() == ARMISD::VMOVDRR
;
5359 bool UseNEON
= !InGPR
&& Subtarget
->hasNEON();
5362 // Use VBSL to copy the sign bit.
5363 unsigned EncodedVal
= ARM_AM::createVMOVModImm(0x6, 0x80);
5364 SDValue Mask
= DAG
.getNode(ARMISD::VMOVIMM
, dl
, MVT::v2i32
,
5365 DAG
.getTargetConstant(EncodedVal
, dl
, MVT::i32
));
5366 EVT OpVT
= (VT
== MVT::f32
) ? MVT::v2i32
: MVT::v1i64
;
5368 Mask
= DAG
.getNode(ARMISD::VSHLIMM
, dl
, OpVT
,
5369 DAG
.getNode(ISD::BITCAST
, dl
, OpVT
, Mask
),
5370 DAG
.getConstant(32, dl
, MVT::i32
));
5371 else /*if (VT == MVT::f32)*/
5372 Tmp0
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, MVT::v2f32
, Tmp0
);
5373 if (SrcVT
== MVT::f32
) {
5374 Tmp1
= DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, MVT::v2f32
, Tmp1
);
5376 Tmp1
= DAG
.getNode(ARMISD::VSHLIMM
, dl
, OpVT
,
5377 DAG
.getNode(ISD::BITCAST
, dl
, OpVT
, Tmp1
),
5378 DAG
.getConstant(32, dl
, MVT::i32
));
5379 } else if (VT
== MVT::f32
)
5380 Tmp1
= DAG
.getNode(ARMISD::VSHRuIMM
, dl
, MVT::v1i64
,
5381 DAG
.getNode(ISD::BITCAST
, dl
, MVT::v1i64
, Tmp1
),
5382 DAG
.getConstant(32, dl
, MVT::i32
));
5383 Tmp0
= DAG
.getNode(ISD::BITCAST
, dl
, OpVT
, Tmp0
);
5384 Tmp1
= DAG
.getNode(ISD::BITCAST
, dl
, OpVT
, Tmp1
);
5386 SDValue AllOnes
= DAG
.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff),
5388 AllOnes
= DAG
.getNode(ARMISD::VMOVIMM
, dl
, MVT::v8i8
, AllOnes
);
5389 SDValue MaskNot
= DAG
.getNode(ISD::XOR
, dl
, OpVT
, Mask
,
5390 DAG
.getNode(ISD::BITCAST
, dl
, OpVT
, AllOnes
));
5392 SDValue Res
= DAG
.getNode(ISD::OR
, dl
, OpVT
,
5393 DAG
.getNode(ISD::AND
, dl
, OpVT
, Tmp1
, Mask
),
5394 DAG
.getNode(ISD::AND
, dl
, OpVT
, Tmp0
, MaskNot
));
5395 if (VT
== MVT::f32
) {
5396 Res
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v2f32
, Res
);
5397 Res
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::f32
, Res
,
5398 DAG
.getConstant(0, dl
, MVT::i32
));
5400 Res
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::f64
, Res
);
5406 // Bitcast operand 1 to i32.
5407 if (SrcVT
== MVT::f64
)
5408 Tmp1
= DAG
.getNode(ARMISD::VMOVRRD
, dl
, DAG
.getVTList(MVT::i32
, MVT::i32
),
5410 Tmp1
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i32
, Tmp1
);
5412 // Or in the signbit with integer operations.
5413 SDValue Mask1
= DAG
.getConstant(0x80000000, dl
, MVT::i32
);
5414 SDValue Mask2
= DAG
.getConstant(0x7fffffff, dl
, MVT::i32
);
5415 Tmp1
= DAG
.getNode(ISD::AND
, dl
, MVT::i32
, Tmp1
, Mask1
);
5416 if (VT
== MVT::f32
) {
5417 Tmp0
= DAG
.getNode(ISD::AND
, dl
, MVT::i32
,
5418 DAG
.getNode(ISD::BITCAST
, dl
, MVT::i32
, Tmp0
), Mask2
);
5419 return DAG
.getNode(ISD::BITCAST
, dl
, MVT::f32
,
5420 DAG
.getNode(ISD::OR
, dl
, MVT::i32
, Tmp0
, Tmp1
));
5423 // f64: Or the high part with signbit and then combine two parts.
5424 Tmp0
= DAG
.getNode(ARMISD::VMOVRRD
, dl
, DAG
.getVTList(MVT::i32
, MVT::i32
),
5426 SDValue Lo
= Tmp0
.getValue(0);
5427 SDValue Hi
= DAG
.getNode(ISD::AND
, dl
, MVT::i32
, Tmp0
.getValue(1), Mask2
);
5428 Hi
= DAG
.getNode(ISD::OR
, dl
, MVT::i32
, Hi
, Tmp1
);
5429 return DAG
.getNode(ARMISD::VMOVDRR
, dl
, MVT::f64
, Lo
, Hi
);
5432 SDValue
ARMTargetLowering::LowerRETURNADDR(SDValue Op
, SelectionDAG
&DAG
) const{
5433 MachineFunction
&MF
= DAG
.getMachineFunction();
5434 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
5435 MFI
.setReturnAddressIsTaken(true);
5437 if (verifyReturnAddressArgumentIsConstant(Op
, DAG
))
5440 EVT VT
= Op
.getValueType();
5442 unsigned Depth
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
5444 SDValue FrameAddr
= LowerFRAMEADDR(Op
, DAG
);
5445 SDValue Offset
= DAG
.getConstant(4, dl
, MVT::i32
);
5446 return DAG
.getLoad(VT
, dl
, DAG
.getEntryNode(),
5447 DAG
.getNode(ISD::ADD
, dl
, VT
, FrameAddr
, Offset
),
5448 MachinePointerInfo());
5451 // Return LR, which contains the return address. Mark it an implicit live-in.
5452 unsigned Reg
= MF
.addLiveIn(ARM::LR
, getRegClassFor(MVT::i32
));
5453 return DAG
.getCopyFromReg(DAG
.getEntryNode(), dl
, Reg
, VT
);
5456 SDValue
ARMTargetLowering::LowerFRAMEADDR(SDValue Op
, SelectionDAG
&DAG
) const {
5457 const ARMBaseRegisterInfo
&ARI
=
5458 *static_cast<const ARMBaseRegisterInfo
*>(RegInfo
);
5459 MachineFunction
&MF
= DAG
.getMachineFunction();
5460 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
5461 MFI
.setFrameAddressIsTaken(true);
5463 EVT VT
= Op
.getValueType();
5464 SDLoc
dl(Op
); // FIXME probably not meaningful
5465 unsigned Depth
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
5466 Register FrameReg
= ARI
.getFrameRegister(MF
);
5467 SDValue FrameAddr
= DAG
.getCopyFromReg(DAG
.getEntryNode(), dl
, FrameReg
, VT
);
5469 FrameAddr
= DAG
.getLoad(VT
, dl
, DAG
.getEntryNode(), FrameAddr
,
5470 MachinePointerInfo());
5474 // FIXME? Maybe this could be a TableGen attribute on some registers and
5475 // this table could be generated automatically from RegInfo.
5476 Register
ARMTargetLowering::getRegisterByName(const char* RegName
, EVT VT
,
5477 const MachineFunction
&MF
) const {
5478 Register Reg
= StringSwitch
<unsigned>(RegName
)
5479 .Case("sp", ARM::SP
)
5483 report_fatal_error(Twine("Invalid register name \""
5484 + StringRef(RegName
) + "\"."));
5487 // Result is 64 bit value so split into two 32 bit values and return as a
5489 static void ExpandREAD_REGISTER(SDNode
*N
, SmallVectorImpl
<SDValue
> &Results
,
5490 SelectionDAG
&DAG
) {
5493 // This function is only supposed to be called for i64 type destination.
5494 assert(N
->getValueType(0) == MVT::i64
5495 && "ExpandREAD_REGISTER called for non-i64 type result.");
5497 SDValue Read
= DAG
.getNode(ISD::READ_REGISTER
, DL
,
5498 DAG
.getVTList(MVT::i32
, MVT::i32
, MVT::Other
),
5502 Results
.push_back(DAG
.getNode(ISD::BUILD_PAIR
, DL
, MVT::i64
, Read
.getValue(0),
5504 Results
.push_back(Read
.getOperand(0));
5507 /// \p BC is a bitcast that is about to be turned into a VMOVDRR.
5508 /// When \p DstVT, the destination type of \p BC, is on the vector
5509 /// register bank and the source of bitcast, \p Op, operates on the same bank,
5510 /// it might be possible to combine them, such that everything stays on the
5511 /// vector register bank.
5512 /// \p return The node that would replace \p BT, if the combine
5514 static SDValue
CombineVMOVDRRCandidateWithVecOp(const SDNode
*BC
,
5515 SelectionDAG
&DAG
) {
5516 SDValue Op
= BC
->getOperand(0);
5517 EVT DstVT
= BC
->getValueType(0);
5519 // The only vector instruction that can produce a scalar (remember,
5520 // since the bitcast was about to be turned into VMOVDRR, the source
5521 // type is i64) from a vector is EXTRACT_VECTOR_ELT.
5522 // Moreover, we can do this combine only if there is one use.
5523 // Finally, if the destination type is not a vector, there is not
5524 // much point on forcing everything on the vector bank.
5525 if (!DstVT
.isVector() || Op
.getOpcode() != ISD::EXTRACT_VECTOR_ELT
||
5529 // If the index is not constant, we will introduce an additional
5530 // multiply that will stick.
5531 // Give up in that case.
5532 ConstantSDNode
*Index
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(1));
5535 unsigned DstNumElt
= DstVT
.getVectorNumElements();
5537 // Compute the new index.
5538 const APInt
&APIntIndex
= Index
->getAPIntValue();
5539 APInt
NewIndex(APIntIndex
.getBitWidth(), DstNumElt
);
5540 NewIndex
*= APIntIndex
;
5541 // Check if the new constant index fits into i32.
5542 if (NewIndex
.getBitWidth() > 32)
5545 // vMTy bitcast(i64 extractelt vNi64 src, i32 index) ->
5546 // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M)
5548 SDValue ExtractSrc
= Op
.getOperand(0);
5549 EVT VecVT
= EVT::getVectorVT(
5550 *DAG
.getContext(), DstVT
.getScalarType(),
5551 ExtractSrc
.getValueType().getVectorNumElements() * DstNumElt
);
5552 SDValue BitCast
= DAG
.getNode(ISD::BITCAST
, dl
, VecVT
, ExtractSrc
);
5553 return DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, DstVT
, BitCast
,
5554 DAG
.getConstant(NewIndex
.getZExtValue(), dl
, MVT::i32
));
5557 /// ExpandBITCAST - If the target supports VFP, this function is called to
5558 /// expand a bit convert where either the source or destination type is i64 to
5559 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64
5560 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
5561 /// vectors), since the legalizer won't know what to do with that.
5562 static SDValue
ExpandBITCAST(SDNode
*N
, SelectionDAG
&DAG
,
5563 const ARMSubtarget
*Subtarget
) {
5564 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
5566 SDValue Op
= N
->getOperand(0);
5568 // This function is only supposed to be called for i64 types, either as the
5569 // source or destination of the bit convert.
5570 EVT SrcVT
= Op
.getValueType();
5571 EVT DstVT
= N
->getValueType(0);
5572 const bool HasFullFP16
= Subtarget
->hasFullFP16();
5574 if (SrcVT
== MVT::f32
&& DstVT
== MVT::i32
) {
5575 // FullFP16: half values are passed in S-registers, and we don't
5576 // need any of the bitcast and moves:
5578 // t2: f32,ch = CopyFromReg t0, Register:f32 %0
5579 // t5: i32 = bitcast t2
5580 // t18: f16 = ARMISD::VMOVhr t5
5581 if (Op
.getOpcode() != ISD::CopyFromReg
||
5582 Op
.getValueType() != MVT::f32
)
5585 auto Move
= N
->use_begin();
5586 if (Move
->getOpcode() != ARMISD::VMOVhr
)
5589 SDValue Ops
[] = { Op
.getOperand(0), Op
.getOperand(1) };
5590 SDValue Copy
= DAG
.getNode(ISD::CopyFromReg
, SDLoc(Op
), MVT::f16
, Ops
);
5591 DAG
.ReplaceAllUsesWith(*Move
, &Copy
);
5595 if (SrcVT
== MVT::i16
&& DstVT
== MVT::f16
) {
5598 // SoftFP: read half-precision arguments:
5601 // t7: i16 = truncate t2 <~~~~ Op
5602 // t8: f16 = bitcast t7 <~~~~ N
5604 if (Op
.getOperand(0).getValueType() == MVT::i32
)
5605 return DAG
.getNode(ARMISD::VMOVhr
, SDLoc(Op
),
5606 MVT::f16
, Op
.getOperand(0));
5611 // Half-precision return values
5612 if (SrcVT
== MVT::f16
&& DstVT
== MVT::i16
) {
5616 // t11: f16 = fadd t8, t10
5617 // t12: i16 = bitcast t11 <~~~ SDNode N
5618 // t13: i32 = zero_extend t12
5619 // t16: ch,glue = CopyToReg t0, Register:i32 %r0, t13
5620 // t17: ch = ARMISD::RET_FLAG t16, Register:i32 %r0, t16:1
5622 // transform this into:
5624 // t20: i32 = ARMISD::VMOVrh t11
5625 // t16: ch,glue = CopyToReg t0, Register:i32 %r0, t20
5627 auto ZeroExtend
= N
->use_begin();
5628 if (N
->use_size() != 1 || ZeroExtend
->getOpcode() != ISD::ZERO_EXTEND
||
5629 ZeroExtend
->getValueType(0) != MVT::i32
)
5632 auto Copy
= ZeroExtend
->use_begin();
5633 if (Copy
->getOpcode() == ISD::CopyToReg
&&
5634 Copy
->use_begin()->getOpcode() == ARMISD::RET_FLAG
) {
5635 SDValue Cvt
= DAG
.getNode(ARMISD::VMOVrh
, SDLoc(Op
), MVT::i32
, Op
);
5636 DAG
.ReplaceAllUsesWith(*ZeroExtend
, &Cvt
);
5642 if (!(SrcVT
== MVT::i64
|| DstVT
== MVT::i64
))
5645 // Turn i64->f64 into VMOVDRR.
5646 if (SrcVT
== MVT::i64
&& TLI
.isTypeLegal(DstVT
)) {
5647 // Do not force values to GPRs (this is what VMOVDRR does for the inputs)
5648 // if we can combine the bitcast with its source.
5649 if (SDValue Val
= CombineVMOVDRRCandidateWithVecOp(N
, DAG
))
5652 SDValue Lo
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
, Op
,
5653 DAG
.getConstant(0, dl
, MVT::i32
));
5654 SDValue Hi
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
, Op
,
5655 DAG
.getConstant(1, dl
, MVT::i32
));
5656 return DAG
.getNode(ISD::BITCAST
, dl
, DstVT
,
5657 DAG
.getNode(ARMISD::VMOVDRR
, dl
, MVT::f64
, Lo
, Hi
));
5660 // Turn f64->i64 into VMOVRRD.
5661 if (DstVT
== MVT::i64
&& TLI
.isTypeLegal(SrcVT
)) {
5663 if (DAG
.getDataLayout().isBigEndian() && SrcVT
.isVector() &&
5664 SrcVT
.getVectorNumElements() > 1)
5665 Cvt
= DAG
.getNode(ARMISD::VMOVRRD
, dl
,
5666 DAG
.getVTList(MVT::i32
, MVT::i32
),
5667 DAG
.getNode(ARMISD::VREV64
, dl
, SrcVT
, Op
));
5669 Cvt
= DAG
.getNode(ARMISD::VMOVRRD
, dl
,
5670 DAG
.getVTList(MVT::i32
, MVT::i32
), Op
);
5671 // Merge the pieces into a single i64 value.
5672 return DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, Cvt
, Cvt
.getValue(1));
5678 /// getZeroVector - Returns a vector of specified type with all zero elements.
5679 /// Zero vectors are used to represent vector negation and in those cases
5680 /// will be implemented with the NEON VNEG instruction. However, VNEG does
5681 /// not support i64 elements, so sometimes the zero vectors will need to be
5682 /// explicitly constructed. Regardless, use a canonical VMOV to create the
5684 static SDValue
getZeroVector(EVT VT
, SelectionDAG
&DAG
, const SDLoc
&dl
) {
5685 assert(VT
.isVector() && "Expected a vector type");
5686 // The canonical modified immediate encoding of a zero vector is....0!
5687 SDValue EncodedVal
= DAG
.getTargetConstant(0, dl
, MVT::i32
);
5688 EVT VmovVT
= VT
.is128BitVector() ? MVT::v4i32
: MVT::v2i32
;
5689 SDValue Vmov
= DAG
.getNode(ARMISD::VMOVIMM
, dl
, VmovVT
, EncodedVal
);
5690 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Vmov
);
5693 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
5694 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
5695 SDValue
ARMTargetLowering::LowerShiftRightParts(SDValue Op
,
5696 SelectionDAG
&DAG
) const {
5697 assert(Op
.getNumOperands() == 3 && "Not a double-shift!");
5698 EVT VT
= Op
.getValueType();
5699 unsigned VTBits
= VT
.getSizeInBits();
5701 SDValue ShOpLo
= Op
.getOperand(0);
5702 SDValue ShOpHi
= Op
.getOperand(1);
5703 SDValue ShAmt
= Op
.getOperand(2);
5705 SDValue CCR
= DAG
.getRegister(ARM::CPSR
, MVT::i32
);
5706 unsigned Opc
= (Op
.getOpcode() == ISD::SRA_PARTS
) ? ISD::SRA
: ISD::SRL
;
5708 assert(Op
.getOpcode() == ISD::SRA_PARTS
|| Op
.getOpcode() == ISD::SRL_PARTS
);
5710 SDValue RevShAmt
= DAG
.getNode(ISD::SUB
, dl
, MVT::i32
,
5711 DAG
.getConstant(VTBits
, dl
, MVT::i32
), ShAmt
);
5712 SDValue Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, ShOpLo
, ShAmt
);
5713 SDValue ExtraShAmt
= DAG
.getNode(ISD::SUB
, dl
, MVT::i32
, ShAmt
,
5714 DAG
.getConstant(VTBits
, dl
, MVT::i32
));
5715 SDValue Tmp2
= DAG
.getNode(ISD::SHL
, dl
, VT
, ShOpHi
, RevShAmt
);
5716 SDValue LoSmallShift
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp1
, Tmp2
);
5717 SDValue LoBigShift
= DAG
.getNode(Opc
, dl
, VT
, ShOpHi
, ExtraShAmt
);
5718 SDValue CmpLo
= getARMCmp(ExtraShAmt
, DAG
.getConstant(0, dl
, MVT::i32
),
5719 ISD::SETGE
, ARMcc
, DAG
, dl
);
5720 SDValue Lo
= DAG
.getNode(ARMISD::CMOV
, dl
, VT
, LoSmallShift
, LoBigShift
,
5723 SDValue HiSmallShift
= DAG
.getNode(Opc
, dl
, VT
, ShOpHi
, ShAmt
);
5724 SDValue HiBigShift
= Opc
== ISD::SRA
5725 ? DAG
.getNode(Opc
, dl
, VT
, ShOpHi
,
5726 DAG
.getConstant(VTBits
- 1, dl
, VT
))
5727 : DAG
.getConstant(0, dl
, VT
);
5728 SDValue CmpHi
= getARMCmp(ExtraShAmt
, DAG
.getConstant(0, dl
, MVT::i32
),
5729 ISD::SETGE
, ARMcc
, DAG
, dl
);
5730 SDValue Hi
= DAG
.getNode(ARMISD::CMOV
, dl
, VT
, HiSmallShift
, HiBigShift
,
5733 SDValue Ops
[2] = { Lo
, Hi
};
5734 return DAG
.getMergeValues(Ops
, dl
);
5737 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
5738 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
5739 SDValue
ARMTargetLowering::LowerShiftLeftParts(SDValue Op
,
5740 SelectionDAG
&DAG
) const {
5741 assert(Op
.getNumOperands() == 3 && "Not a double-shift!");
5742 EVT VT
= Op
.getValueType();
5743 unsigned VTBits
= VT
.getSizeInBits();
5745 SDValue ShOpLo
= Op
.getOperand(0);
5746 SDValue ShOpHi
= Op
.getOperand(1);
5747 SDValue ShAmt
= Op
.getOperand(2);
5749 SDValue CCR
= DAG
.getRegister(ARM::CPSR
, MVT::i32
);
5751 assert(Op
.getOpcode() == ISD::SHL_PARTS
);
5752 SDValue RevShAmt
= DAG
.getNode(ISD::SUB
, dl
, MVT::i32
,
5753 DAG
.getConstant(VTBits
, dl
, MVT::i32
), ShAmt
);
5754 SDValue Tmp1
= DAG
.getNode(ISD::SRL
, dl
, VT
, ShOpLo
, RevShAmt
);
5755 SDValue Tmp2
= DAG
.getNode(ISD::SHL
, dl
, VT
, ShOpHi
, ShAmt
);
5756 SDValue HiSmallShift
= DAG
.getNode(ISD::OR
, dl
, VT
, Tmp1
, Tmp2
);
5758 SDValue ExtraShAmt
= DAG
.getNode(ISD::SUB
, dl
, MVT::i32
, ShAmt
,
5759 DAG
.getConstant(VTBits
, dl
, MVT::i32
));
5760 SDValue HiBigShift
= DAG
.getNode(ISD::SHL
, dl
, VT
, ShOpLo
, ExtraShAmt
);
5761 SDValue CmpHi
= getARMCmp(ExtraShAmt
, DAG
.getConstant(0, dl
, MVT::i32
),
5762 ISD::SETGE
, ARMcc
, DAG
, dl
);
5763 SDValue Hi
= DAG
.getNode(ARMISD::CMOV
, dl
, VT
, HiSmallShift
, HiBigShift
,
5766 SDValue CmpLo
= getARMCmp(ExtraShAmt
, DAG
.getConstant(0, dl
, MVT::i32
),
5767 ISD::SETGE
, ARMcc
, DAG
, dl
);
5768 SDValue LoSmallShift
= DAG
.getNode(ISD::SHL
, dl
, VT
, ShOpLo
, ShAmt
);
5769 SDValue Lo
= DAG
.getNode(ARMISD::CMOV
, dl
, VT
, LoSmallShift
,
5770 DAG
.getConstant(0, dl
, VT
), ARMcc
, CCR
, CmpLo
);
5772 SDValue Ops
[2] = { Lo
, Hi
};
5773 return DAG
.getMergeValues(Ops
, dl
);
5776 SDValue
ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op
,
5777 SelectionDAG
&DAG
) const {
5778 // The rounding mode is in bits 23:22 of the FPSCR.
5779 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
5780 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
5781 // so that the shift + and get folded into a bitfield extract.
5783 SDValue Ops
[] = { DAG
.getEntryNode(),
5784 DAG
.getConstant(Intrinsic::arm_get_fpscr
, dl
, MVT::i32
) };
5786 SDValue FPSCR
= DAG
.getNode(ISD::INTRINSIC_W_CHAIN
, dl
, MVT::i32
, Ops
);
5787 SDValue FltRounds
= DAG
.getNode(ISD::ADD
, dl
, MVT::i32
, FPSCR
,
5788 DAG
.getConstant(1U << 22, dl
, MVT::i32
));
5789 SDValue RMODE
= DAG
.getNode(ISD::SRL
, dl
, MVT::i32
, FltRounds
,
5790 DAG
.getConstant(22, dl
, MVT::i32
));
5791 return DAG
.getNode(ISD::AND
, dl
, MVT::i32
, RMODE
,
5792 DAG
.getConstant(3, dl
, MVT::i32
));
5795 static SDValue
LowerCTTZ(SDNode
*N
, SelectionDAG
&DAG
,
5796 const ARMSubtarget
*ST
) {
5798 EVT VT
= N
->getValueType(0);
5799 if (VT
.isVector() && ST
->hasNEON()) {
5801 // Compute the least significant set bit: LSB = X & -X
5802 SDValue X
= N
->getOperand(0);
5803 SDValue NX
= DAG
.getNode(ISD::SUB
, dl
, VT
, getZeroVector(VT
, DAG
, dl
), X
);
5804 SDValue LSB
= DAG
.getNode(ISD::AND
, dl
, VT
, X
, NX
);
5806 EVT ElemTy
= VT
.getVectorElementType();
5808 if (ElemTy
== MVT::i8
) {
5809 // Compute with: cttz(x) = ctpop(lsb - 1)
5810 SDValue One
= DAG
.getNode(ARMISD::VMOVIMM
, dl
, VT
,
5811 DAG
.getTargetConstant(1, dl
, ElemTy
));
5812 SDValue Bits
= DAG
.getNode(ISD::SUB
, dl
, VT
, LSB
, One
);
5813 return DAG
.getNode(ISD::CTPOP
, dl
, VT
, Bits
);
5816 if ((ElemTy
== MVT::i16
|| ElemTy
== MVT::i32
) &&
5817 (N
->getOpcode() == ISD::CTTZ_ZERO_UNDEF
)) {
5818 // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0
5819 unsigned NumBits
= ElemTy
.getSizeInBits();
5820 SDValue WidthMinus1
=
5821 DAG
.getNode(ARMISD::VMOVIMM
, dl
, VT
,
5822 DAG
.getTargetConstant(NumBits
- 1, dl
, ElemTy
));
5823 SDValue CTLZ
= DAG
.getNode(ISD::CTLZ
, dl
, VT
, LSB
);
5824 return DAG
.getNode(ISD::SUB
, dl
, VT
, WidthMinus1
, CTLZ
);
5827 // Compute with: cttz(x) = ctpop(lsb - 1)
5831 if (ElemTy
== MVT::i64
) {
5832 // Load constant 0xffff'ffff'ffff'ffff to register.
5833 SDValue FF
= DAG
.getNode(ARMISD::VMOVIMM
, dl
, VT
,
5834 DAG
.getTargetConstant(0x1eff, dl
, MVT::i32
));
5835 Bits
= DAG
.getNode(ISD::ADD
, dl
, VT
, LSB
, FF
);
5837 SDValue One
= DAG
.getNode(ARMISD::VMOVIMM
, dl
, VT
,
5838 DAG
.getTargetConstant(1, dl
, ElemTy
));
5839 Bits
= DAG
.getNode(ISD::SUB
, dl
, VT
, LSB
, One
);
5841 return DAG
.getNode(ISD::CTPOP
, dl
, VT
, Bits
);
5844 if (!ST
->hasV6T2Ops())
5847 SDValue rbit
= DAG
.getNode(ISD::BITREVERSE
, dl
, VT
, N
->getOperand(0));
5848 return DAG
.getNode(ISD::CTLZ
, dl
, VT
, rbit
);
5851 static SDValue
LowerCTPOP(SDNode
*N
, SelectionDAG
&DAG
,
5852 const ARMSubtarget
*ST
) {
5853 EVT VT
= N
->getValueType(0);
5856 assert(ST
->hasNEON() && "Custom ctpop lowering requires NEON.");
5857 assert((VT
== MVT::v1i64
|| VT
== MVT::v2i64
|| VT
== MVT::v2i32
||
5858 VT
== MVT::v4i32
|| VT
== MVT::v4i16
|| VT
== MVT::v8i16
) &&
5859 "Unexpected type for custom ctpop lowering");
5861 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
5862 EVT VT8Bit
= VT
.is64BitVector() ? MVT::v8i8
: MVT::v16i8
;
5863 SDValue Res
= DAG
.getBitcast(VT8Bit
, N
->getOperand(0));
5864 Res
= DAG
.getNode(ISD::CTPOP
, DL
, VT8Bit
, Res
);
5866 // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds.
5867 unsigned EltSize
= 8;
5868 unsigned NumElts
= VT
.is64BitVector() ? 8 : 16;
5869 while (EltSize
!= VT
.getScalarSizeInBits()) {
5870 SmallVector
<SDValue
, 8> Ops
;
5871 Ops
.push_back(DAG
.getConstant(Intrinsic::arm_neon_vpaddlu
, DL
,
5872 TLI
.getPointerTy(DAG
.getDataLayout())));
5877 MVT WidenVT
= MVT::getVectorVT(MVT::getIntegerVT(EltSize
), NumElts
);
5878 Res
= DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, DL
, WidenVT
, Ops
);
5884 /// Getvshiftimm - Check if this is a valid build_vector for the immediate
5885 /// operand of a vector shift operation, where all the elements of the
5886 /// build_vector must have the same constant integer value.
5887 static bool getVShiftImm(SDValue Op
, unsigned ElementBits
, int64_t &Cnt
) {
5888 // Ignore bit_converts.
5889 while (Op
.getOpcode() == ISD::BITCAST
)
5890 Op
= Op
.getOperand(0);
5891 BuildVectorSDNode
*BVN
= dyn_cast
<BuildVectorSDNode
>(Op
.getNode());
5892 APInt SplatBits
, SplatUndef
;
5893 unsigned SplatBitSize
;
5896 !BVN
->isConstantSplat(SplatBits
, SplatUndef
, SplatBitSize
, HasAnyUndefs
,
5898 SplatBitSize
> ElementBits
)
5900 Cnt
= SplatBits
.getSExtValue();
5904 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
5905 /// operand of a vector shift left operation. That value must be in the range:
5906 /// 0 <= Value < ElementBits for a left shift; or
5907 /// 0 <= Value <= ElementBits for a long left shift.
5908 static bool isVShiftLImm(SDValue Op
, EVT VT
, bool isLong
, int64_t &Cnt
) {
5909 assert(VT
.isVector() && "vector shift count is not a vector type");
5910 int64_t ElementBits
= VT
.getScalarSizeInBits();
5911 if (!getVShiftImm(Op
, ElementBits
, Cnt
))
5913 return (Cnt
>= 0 && (isLong
? Cnt
- 1 : Cnt
) < ElementBits
);
5916 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
5917 /// operand of a vector shift right operation. For a shift opcode, the value
5918 /// is positive, but for an intrinsic the value count must be negative. The
5919 /// absolute value must be in the range:
5920 /// 1 <= |Value| <= ElementBits for a right shift; or
5921 /// 1 <= |Value| <= ElementBits/2 for a narrow right shift.
5922 static bool isVShiftRImm(SDValue Op
, EVT VT
, bool isNarrow
, bool isIntrinsic
,
5924 assert(VT
.isVector() && "vector shift count is not a vector type");
5925 int64_t ElementBits
= VT
.getScalarSizeInBits();
5926 if (!getVShiftImm(Op
, ElementBits
, Cnt
))
5929 return (Cnt
>= 1 && Cnt
<= (isNarrow
? ElementBits
/ 2 : ElementBits
));
5930 if (Cnt
>= -(isNarrow
? ElementBits
/ 2 : ElementBits
) && Cnt
<= -1) {
5937 static SDValue
LowerShift(SDNode
*N
, SelectionDAG
&DAG
,
5938 const ARMSubtarget
*ST
) {
5939 EVT VT
= N
->getValueType(0);
5946 // We essentially have two forms here. Shift by an immediate and shift by a
5947 // vector register (there are also shift by a gpr, but that is just handled
5948 // with a tablegen pattern). We cannot easily match shift by an immediate in
5949 // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM.
5950 // For shifting by a vector, we don't have VSHR, only VSHL (which can be
5951 // signed or unsigned, and a negative shift indicates a shift right).
5952 if (N
->getOpcode() == ISD::SHL
) {
5953 if (isVShiftLImm(N
->getOperand(1), VT
, false, Cnt
))
5954 return DAG
.getNode(ARMISD::VSHLIMM
, dl
, VT
, N
->getOperand(0),
5955 DAG
.getConstant(Cnt
, dl
, MVT::i32
));
5956 return DAG
.getNode(ARMISD::VSHLu
, dl
, VT
, N
->getOperand(0),
5960 assert((N
->getOpcode() == ISD::SRA
|| N
->getOpcode() == ISD::SRL
) &&
5961 "unexpected vector shift opcode");
5963 if (isVShiftRImm(N
->getOperand(1), VT
, false, false, Cnt
)) {
5964 unsigned VShiftOpc
=
5965 (N
->getOpcode() == ISD::SRA
? ARMISD::VSHRsIMM
: ARMISD::VSHRuIMM
);
5966 return DAG
.getNode(VShiftOpc
, dl
, VT
, N
->getOperand(0),
5967 DAG
.getConstant(Cnt
, dl
, MVT::i32
));
5970 // Other right shifts we don't have operations for (we use a shift left by a
5971 // negative number).
5972 EVT ShiftVT
= N
->getOperand(1).getValueType();
5973 SDValue NegatedCount
= DAG
.getNode(
5974 ISD::SUB
, dl
, ShiftVT
, getZeroVector(ShiftVT
, DAG
, dl
), N
->getOperand(1));
5975 unsigned VShiftOpc
=
5976 (N
->getOpcode() == ISD::SRA
? ARMISD::VSHLs
: ARMISD::VSHLu
);
5977 return DAG
.getNode(VShiftOpc
, dl
, VT
, N
->getOperand(0), NegatedCount
);
5980 static SDValue
Expand64BitShift(SDNode
*N
, SelectionDAG
&DAG
,
5981 const ARMSubtarget
*ST
) {
5982 EVT VT
= N
->getValueType(0);
5985 // We can get here for a node like i32 = ISD::SHL i32, i64
5989 assert((N
->getOpcode() == ISD::SRL
|| N
->getOpcode() == ISD::SRA
||
5990 N
->getOpcode() == ISD::SHL
) &&
5991 "Unknown shift to lower!");
5993 unsigned ShOpc
= N
->getOpcode();
5994 if (ST
->hasMVEIntegerOps()) {
5995 SDValue ShAmt
= N
->getOperand(1);
5996 unsigned ShPartsOpc
= ARMISD::LSLL
;
5997 ConstantSDNode
*Con
= dyn_cast
<ConstantSDNode
>(ShAmt
);
5999 // If the shift amount is greater than 32 or has a greater bitwidth than 64
6000 // then do the default optimisation
6001 if (ShAmt
->getValueType(0).getSizeInBits() > 64 ||
6002 (Con
&& (Con
->getZExtValue() == 0 || Con
->getZExtValue() >= 32)))
6005 // Extract the lower 32 bits of the shift amount if it's not an i32
6006 if (ShAmt
->getValueType(0) != MVT::i32
)
6007 ShAmt
= DAG
.getZExtOrTrunc(ShAmt
, dl
, MVT::i32
);
6009 if (ShOpc
== ISD::SRL
) {
6011 // There is no t2LSRLr instruction so negate and perform an lsll if the
6012 // shift amount is in a register, emulating a right shift.
6013 ShAmt
= DAG
.getNode(ISD::SUB
, dl
, MVT::i32
,
6014 DAG
.getConstant(0, dl
, MVT::i32
), ShAmt
);
6016 // Else generate an lsrl on the immediate shift amount
6017 ShPartsOpc
= ARMISD::LSRL
;
6018 } else if (ShOpc
== ISD::SRA
)
6019 ShPartsOpc
= ARMISD::ASRL
;
6021 // Lower 32 bits of the destination/source
6022 SDValue Lo
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
, N
->getOperand(0),
6023 DAG
.getConstant(0, dl
, MVT::i32
));
6024 // Upper 32 bits of the destination/source
6025 SDValue Hi
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
, N
->getOperand(0),
6026 DAG
.getConstant(1, dl
, MVT::i32
));
6028 // Generate the shift operation as computed above
6029 Lo
= DAG
.getNode(ShPartsOpc
, dl
, DAG
.getVTList(MVT::i32
, MVT::i32
), Lo
, Hi
,
6031 // The upper 32 bits come from the second return value of lsll
6032 Hi
= SDValue(Lo
.getNode(), 1);
6033 return DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, Lo
, Hi
);
6036 // We only lower SRA, SRL of 1 here, all others use generic lowering.
6037 if (!isOneConstant(N
->getOperand(1)) || N
->getOpcode() == ISD::SHL
)
6040 // If we are in thumb mode, we don't have RRX.
6041 if (ST
->isThumb1Only())
6044 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
6045 SDValue Lo
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
, N
->getOperand(0),
6046 DAG
.getConstant(0, dl
, MVT::i32
));
6047 SDValue Hi
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
, N
->getOperand(0),
6048 DAG
.getConstant(1, dl
, MVT::i32
));
6050 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
6051 // captures the result into a carry flag.
6052 unsigned Opc
= N
->getOpcode() == ISD::SRL
? ARMISD::SRL_FLAG
:ARMISD::SRA_FLAG
;
6053 Hi
= DAG
.getNode(Opc
, dl
, DAG
.getVTList(MVT::i32
, MVT::Glue
), Hi
);
6055 // The low part is an ARMISD::RRX operand, which shifts the carry in.
6056 Lo
= DAG
.getNode(ARMISD::RRX
, dl
, MVT::i32
, Lo
, Hi
.getValue(1));
6058 // Merge the pieces into a single i64 value.
6059 return DAG
.getNode(ISD::BUILD_PAIR
, dl
, MVT::i64
, Lo
, Hi
);
6062 static SDValue
LowerVSETCC(SDValue Op
, SelectionDAG
&DAG
,
6063 const ARMSubtarget
*ST
) {
6064 bool Invert
= false;
6066 unsigned Opc
= ARMCC::AL
;
6068 SDValue Op0
= Op
.getOperand(0);
6069 SDValue Op1
= Op
.getOperand(1);
6070 SDValue CC
= Op
.getOperand(2);
6071 EVT VT
= Op
.getValueType();
6072 ISD::CondCode SetCCOpcode
= cast
<CondCodeSDNode
>(CC
)->get();
6077 CmpVT
= Op0
.getValueType().changeVectorElementTypeToInteger();
6079 assert(ST
->hasMVEIntegerOps() &&
6080 "No hardware support for integer vector comparison!");
6082 if (Op
.getValueType().getVectorElementType() != MVT::i1
)
6085 // Make sure we expand floating point setcc to scalar if we do not have
6086 // mve.fp, so that we can handle them from there.
6087 if (Op0
.getValueType().isFloatingPoint() && !ST
->hasMVEFloatOps())
6093 if (Op0
.getValueType().getVectorElementType() == MVT::i64
&&
6094 (SetCCOpcode
== ISD::SETEQ
|| SetCCOpcode
== ISD::SETNE
)) {
6095 // Special-case integer 64-bit equality comparisons. They aren't legal,
6096 // but they can be lowered with a few vector instructions.
6097 unsigned CmpElements
= CmpVT
.getVectorNumElements() * 2;
6098 EVT SplitVT
= EVT::getVectorVT(*DAG
.getContext(), MVT::i32
, CmpElements
);
6099 SDValue CastOp0
= DAG
.getNode(ISD::BITCAST
, dl
, SplitVT
, Op0
);
6100 SDValue CastOp1
= DAG
.getNode(ISD::BITCAST
, dl
, SplitVT
, Op1
);
6101 SDValue Cmp
= DAG
.getNode(ISD::SETCC
, dl
, SplitVT
, CastOp0
, CastOp1
,
6102 DAG
.getCondCode(ISD::SETEQ
));
6103 SDValue Reversed
= DAG
.getNode(ARMISD::VREV64
, dl
, SplitVT
, Cmp
);
6104 SDValue Merged
= DAG
.getNode(ISD::AND
, dl
, SplitVT
, Cmp
, Reversed
);
6105 Merged
= DAG
.getNode(ISD::BITCAST
, dl
, CmpVT
, Merged
);
6106 if (SetCCOpcode
== ISD::SETNE
)
6107 Merged
= DAG
.getNOT(dl
, Merged
, CmpVT
);
6108 Merged
= DAG
.getSExtOrTrunc(Merged
, dl
, VT
);
6112 if (CmpVT
.getVectorElementType() == MVT::i64
)
6113 // 64-bit comparisons are not legal in general.
6116 if (Op1
.getValueType().isFloatingPoint()) {
6117 switch (SetCCOpcode
) {
6118 default: llvm_unreachable("Illegal FP comparison");
6121 if (ST
->hasMVEFloatOps()) {
6122 Opc
= ARMCC::NE
; break;
6124 Invert
= true; LLVM_FALLTHROUGH
;
6127 case ISD::SETEQ
: Opc
= ARMCC::EQ
; break;
6129 case ISD::SETLT
: Swap
= true; LLVM_FALLTHROUGH
;
6131 case ISD::SETGT
: Opc
= ARMCC::GT
; break;
6133 case ISD::SETLE
: Swap
= true; LLVM_FALLTHROUGH
;
6135 case ISD::SETGE
: Opc
= ARMCC::GE
; break;
6136 case ISD::SETUGE
: Swap
= true; LLVM_FALLTHROUGH
;
6137 case ISD::SETULE
: Invert
= true; Opc
= ARMCC::GT
; break;
6138 case ISD::SETUGT
: Swap
= true; LLVM_FALLTHROUGH
;
6139 case ISD::SETULT
: Invert
= true; Opc
= ARMCC::GE
; break;
6140 case ISD::SETUEQ
: Invert
= true; LLVM_FALLTHROUGH
;
6142 // Expand this to (OLT | OGT).
6143 SDValue TmpOp0
= DAG
.getNode(ARMISD::VCMP
, dl
, CmpVT
, Op1
, Op0
,
6144 DAG
.getConstant(ARMCC::GT
, dl
, MVT::i32
));
6145 SDValue TmpOp1
= DAG
.getNode(ARMISD::VCMP
, dl
, CmpVT
, Op0
, Op1
,
6146 DAG
.getConstant(ARMCC::GT
, dl
, MVT::i32
));
6147 SDValue Result
= DAG
.getNode(ISD::OR
, dl
, CmpVT
, TmpOp0
, TmpOp1
);
6149 Result
= DAG
.getNOT(dl
, Result
, VT
);
6152 case ISD::SETUO
: Invert
= true; LLVM_FALLTHROUGH
;
6154 // Expand this to (OLT | OGE).
6155 SDValue TmpOp0
= DAG
.getNode(ARMISD::VCMP
, dl
, CmpVT
, Op1
, Op0
,
6156 DAG
.getConstant(ARMCC::GT
, dl
, MVT::i32
));
6157 SDValue TmpOp1
= DAG
.getNode(ARMISD::VCMP
, dl
, CmpVT
, Op0
, Op1
,
6158 DAG
.getConstant(ARMCC::GE
, dl
, MVT::i32
));
6159 SDValue Result
= DAG
.getNode(ISD::OR
, dl
, CmpVT
, TmpOp0
, TmpOp1
);
6161 Result
= DAG
.getNOT(dl
, Result
, VT
);
6166 // Integer comparisons.
6167 switch (SetCCOpcode
) {
6168 default: llvm_unreachable("Illegal integer comparison");
6170 if (ST
->hasMVEIntegerOps()) {
6171 Opc
= ARMCC::NE
; break;
6173 Invert
= true; LLVM_FALLTHROUGH
;
6175 case ISD::SETEQ
: Opc
= ARMCC::EQ
; break;
6176 case ISD::SETLT
: Swap
= true; LLVM_FALLTHROUGH
;
6177 case ISD::SETGT
: Opc
= ARMCC::GT
; break;
6178 case ISD::SETLE
: Swap
= true; LLVM_FALLTHROUGH
;
6179 case ISD::SETGE
: Opc
= ARMCC::GE
; break;
6180 case ISD::SETULT
: Swap
= true; LLVM_FALLTHROUGH
;
6181 case ISD::SETUGT
: Opc
= ARMCC::HI
; break;
6182 case ISD::SETULE
: Swap
= true; LLVM_FALLTHROUGH
;
6183 case ISD::SETUGE
: Opc
= ARMCC::HS
; break;
6186 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
6187 if (ST
->hasNEON() && Opc
== ARMCC::EQ
) {
6189 if (ISD::isBuildVectorAllZeros(Op1
.getNode()))
6191 else if (ISD::isBuildVectorAllZeros(Op0
.getNode()))
6194 // Ignore bitconvert.
6195 if (AndOp
.getNode() && AndOp
.getOpcode() == ISD::BITCAST
)
6196 AndOp
= AndOp
.getOperand(0);
6198 if (AndOp
.getNode() && AndOp
.getOpcode() == ISD::AND
) {
6199 Op0
= DAG
.getNode(ISD::BITCAST
, dl
, CmpVT
, AndOp
.getOperand(0));
6200 Op1
= DAG
.getNode(ISD::BITCAST
, dl
, CmpVT
, AndOp
.getOperand(1));
6201 SDValue Result
= DAG
.getNode(ARMISD::VTST
, dl
, CmpVT
, Op0
, Op1
);
6203 Result
= DAG
.getNOT(dl
, Result
, VT
);
6210 std::swap(Op0
, Op1
);
6212 // If one of the operands is a constant vector zero, attempt to fold the
6213 // comparison to a specialized compare-against-zero form.
6215 if (ISD::isBuildVectorAllZeros(Op1
.getNode()))
6217 else if (ISD::isBuildVectorAllZeros(Op0
.getNode())) {
6218 if (Opc
== ARMCC::GE
)
6220 else if (Opc
== ARMCC::GT
)
6226 if (SingleOp
.getNode()) {
6227 Result
= DAG
.getNode(ARMISD::VCMPZ
, dl
, CmpVT
, SingleOp
,
6228 DAG
.getConstant(Opc
, dl
, MVT::i32
));
6230 Result
= DAG
.getNode(ARMISD::VCMP
, dl
, CmpVT
, Op0
, Op1
,
6231 DAG
.getConstant(Opc
, dl
, MVT::i32
));
6234 Result
= DAG
.getSExtOrTrunc(Result
, dl
, VT
);
6237 Result
= DAG
.getNOT(dl
, Result
, VT
);
6242 static SDValue
LowerSETCCCARRY(SDValue Op
, SelectionDAG
&DAG
) {
6243 SDValue LHS
= Op
.getOperand(0);
6244 SDValue RHS
= Op
.getOperand(1);
6245 SDValue Carry
= Op
.getOperand(2);
6246 SDValue Cond
= Op
.getOperand(3);
6249 assert(LHS
.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
6251 // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we
6252 // have to invert the carry first.
6253 Carry
= DAG
.getNode(ISD::SUB
, DL
, MVT::i32
,
6254 DAG
.getConstant(1, DL
, MVT::i32
), Carry
);
6255 // This converts the boolean value carry into the carry flag.
6256 Carry
= ConvertBooleanCarryToCarryFlag(Carry
, DAG
);
6258 SDVTList VTs
= DAG
.getVTList(LHS
.getValueType(), MVT::i32
);
6259 SDValue Cmp
= DAG
.getNode(ARMISD::SUBE
, DL
, VTs
, LHS
, RHS
, Carry
);
6261 SDValue FVal
= DAG
.getConstant(0, DL
, MVT::i32
);
6262 SDValue TVal
= DAG
.getConstant(1, DL
, MVT::i32
);
6263 SDValue ARMcc
= DAG
.getConstant(
6264 IntCCToARMCC(cast
<CondCodeSDNode
>(Cond
)->get()), DL
, MVT::i32
);
6265 SDValue CCR
= DAG
.getRegister(ARM::CPSR
, MVT::i32
);
6266 SDValue Chain
= DAG
.getCopyToReg(DAG
.getEntryNode(), DL
, ARM::CPSR
,
6267 Cmp
.getValue(1), SDValue());
6268 return DAG
.getNode(ARMISD::CMOV
, DL
, Op
.getValueType(), FVal
, TVal
, ARMcc
,
6269 CCR
, Chain
.getValue(1));
6272 /// isVMOVModifiedImm - Check if the specified splat value corresponds to a
6273 /// valid vector constant for a NEON or MVE instruction with a "modified
6274 /// immediate" operand (e.g., VMOV). If so, return the encoded value.
6275 static SDValue
isVMOVModifiedImm(uint64_t SplatBits
, uint64_t SplatUndef
,
6276 unsigned SplatBitSize
, SelectionDAG
&DAG
,
6277 const SDLoc
&dl
, EVT
&VT
, bool is128Bits
,
6278 VMOVModImmType type
) {
6279 unsigned OpCmode
, Imm
;
6281 // SplatBitSize is set to the smallest size that splats the vector, so a
6282 // zero vector will always have SplatBitSize == 8. However, NEON modified
6283 // immediate instructions others than VMOV do not support the 8-bit encoding
6284 // of a zero vector, and the default encoding of zero is supposed to be the
6289 switch (SplatBitSize
) {
6291 if (type
!= VMOVModImm
)
6293 // Any 1-byte value is OK. Op=0, Cmode=1110.
6294 assert((SplatBits
& ~0xff) == 0 && "one byte splat value is too big");
6297 VT
= is128Bits
? MVT::v16i8
: MVT::v8i8
;
6301 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
6302 VT
= is128Bits
? MVT::v8i16
: MVT::v4i16
;
6303 if ((SplatBits
& ~0xff) == 0) {
6304 // Value = 0x00nn: Op=x, Cmode=100x.
6309 if ((SplatBits
& ~0xff00) == 0) {
6310 // Value = 0xnn00: Op=x, Cmode=101x.
6312 Imm
= SplatBits
>> 8;
6318 // NEON's 32-bit VMOV supports splat values where:
6319 // * only one byte is nonzero, or
6320 // * the least significant byte is 0xff and the second byte is nonzero, or
6321 // * the least significant 2 bytes are 0xff and the third is nonzero.
6322 VT
= is128Bits
? MVT::v4i32
: MVT::v2i32
;
6323 if ((SplatBits
& ~0xff) == 0) {
6324 // Value = 0x000000nn: Op=x, Cmode=000x.
6329 if ((SplatBits
& ~0xff00) == 0) {
6330 // Value = 0x0000nn00: Op=x, Cmode=001x.
6332 Imm
= SplatBits
>> 8;
6335 if ((SplatBits
& ~0xff0000) == 0) {
6336 // Value = 0x00nn0000: Op=x, Cmode=010x.
6338 Imm
= SplatBits
>> 16;
6341 if ((SplatBits
& ~0xff000000) == 0) {
6342 // Value = 0xnn000000: Op=x, Cmode=011x.
6344 Imm
= SplatBits
>> 24;
6348 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
6349 if (type
== OtherModImm
) return SDValue();
6351 if ((SplatBits
& ~0xffff) == 0 &&
6352 ((SplatBits
| SplatUndef
) & 0xff) == 0xff) {
6353 // Value = 0x0000nnff: Op=x, Cmode=1100.
6355 Imm
= SplatBits
>> 8;
6359 // cmode == 0b1101 is not supported for MVE VMVN
6360 if (type
== MVEVMVNModImm
)
6363 if ((SplatBits
& ~0xffffff) == 0 &&
6364 ((SplatBits
| SplatUndef
) & 0xffff) == 0xffff) {
6365 // Value = 0x00nnffff: Op=x, Cmode=1101.
6367 Imm
= SplatBits
>> 16;
6371 // Note: there are a few 32-bit splat values (specifically: 00ffff00,
6372 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
6373 // VMOV.I32. A (very) minor optimization would be to replicate the value
6374 // and fall through here to test for a valid 64-bit splat. But, then the
6375 // caller would also need to check and handle the change in size.
6379 if (type
!= VMOVModImm
)
6381 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
6382 uint64_t BitMask
= 0xff;
6384 unsigned ImmMask
= 1;
6386 for (int ByteNum
= 0; ByteNum
< 8; ++ByteNum
) {
6387 if (((SplatBits
| SplatUndef
) & BitMask
) == BitMask
) {
6390 } else if ((SplatBits
& BitMask
) != 0) {
6397 if (DAG
.getDataLayout().isBigEndian())
6398 // swap higher and lower 32 bit word
6399 Imm
= ((Imm
& 0xf) << 4) | ((Imm
& 0xf0) >> 4);
6401 // Op=1, Cmode=1110.
6403 VT
= is128Bits
? MVT::v2i64
: MVT::v1i64
;
6408 llvm_unreachable("unexpected size for isVMOVModifiedImm");
6411 unsigned EncodedVal
= ARM_AM::createVMOVModImm(OpCmode
, Imm
);
6412 return DAG
.getTargetConstant(EncodedVal
, dl
, MVT::i32
);
6415 SDValue
ARMTargetLowering::LowerConstantFP(SDValue Op
, SelectionDAG
&DAG
,
6416 const ARMSubtarget
*ST
) const {
6417 EVT VT
= Op
.getValueType();
6418 bool IsDouble
= (VT
== MVT::f64
);
6419 ConstantFPSDNode
*CFP
= cast
<ConstantFPSDNode
>(Op
);
6420 const APFloat
&FPVal
= CFP
->getValueAPF();
6422 // Prevent floating-point constants from using literal loads
6423 // when execute-only is enabled.
6424 if (ST
->genExecuteOnly()) {
6425 // If we can represent the constant as an immediate, don't lower it
6426 if (isFPImmLegal(FPVal
, VT
))
6428 // Otherwise, construct as integer, and move to float register
6429 APInt INTVal
= FPVal
.bitcastToAPInt();
6431 switch (VT
.getSimpleVT().SimpleTy
) {
6433 llvm_unreachable("Unknown floating point type!");
6436 SDValue Lo
= DAG
.getConstant(INTVal
.trunc(32), DL
, MVT::i32
);
6437 SDValue Hi
= DAG
.getConstant(INTVal
.lshr(32).trunc(32), DL
, MVT::i32
);
6438 if (!ST
->isLittle())
6440 return DAG
.getNode(ARMISD::VMOVDRR
, DL
, MVT::f64
, Lo
, Hi
);
6443 return DAG
.getNode(ARMISD::VMOVSR
, DL
, VT
,
6444 DAG
.getConstant(INTVal
, DL
, MVT::i32
));
6448 if (!ST
->hasVFP3Base())
6451 // Use the default (constant pool) lowering for double constants when we have
6453 if (IsDouble
&& !Subtarget
->hasFP64())
6456 // Try splatting with a VMOV.f32...
6457 int ImmVal
= IsDouble
? ARM_AM::getFP64Imm(FPVal
) : ARM_AM::getFP32Imm(FPVal
);
6460 if (IsDouble
|| !ST
->useNEONForSinglePrecisionFP()) {
6461 // We have code in place to select a valid ConstantFP already, no need to
6466 // It's a float and we are trying to use NEON operations where
6467 // possible. Lower it to a splat followed by an extract.
6469 SDValue NewVal
= DAG
.getTargetConstant(ImmVal
, DL
, MVT::i32
);
6470 SDValue VecConstant
= DAG
.getNode(ARMISD::VMOVFPIMM
, DL
, MVT::v2f32
,
6472 return DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, MVT::f32
, VecConstant
,
6473 DAG
.getConstant(0, DL
, MVT::i32
));
6476 // The rest of our options are NEON only, make sure that's allowed before
6478 if (!ST
->hasNEON() || (!IsDouble
&& !ST
->useNEONForSinglePrecisionFP()))
6482 uint64_t iVal
= FPVal
.bitcastToAPInt().getZExtValue();
6484 // It wouldn't really be worth bothering for doubles except for one very
6485 // important value, which does happen to match: 0.0. So make sure we don't do
6487 if (IsDouble
&& (iVal
& 0xffffffff) != (iVal
>> 32))
6490 // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
6491 SDValue NewVal
= isVMOVModifiedImm(iVal
& 0xffffffffU
, 0, 32, DAG
, SDLoc(Op
),
6492 VMovVT
, false, VMOVModImm
);
6493 if (NewVal
!= SDValue()) {
6495 SDValue VecConstant
= DAG
.getNode(ARMISD::VMOVIMM
, DL
, VMovVT
,
6498 return DAG
.getNode(ISD::BITCAST
, DL
, MVT::f64
, VecConstant
);
6500 // It's a float: cast and extract a vector element.
6501 SDValue VecFConstant
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v2f32
,
6503 return DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, MVT::f32
, VecFConstant
,
6504 DAG
.getConstant(0, DL
, MVT::i32
));
6507 // Finally, try a VMVN.i32
6508 NewVal
= isVMOVModifiedImm(~iVal
& 0xffffffffU
, 0, 32, DAG
, SDLoc(Op
), VMovVT
,
6510 if (NewVal
!= SDValue()) {
6512 SDValue VecConstant
= DAG
.getNode(ARMISD::VMVNIMM
, DL
, VMovVT
, NewVal
);
6515 return DAG
.getNode(ISD::BITCAST
, DL
, MVT::f64
, VecConstant
);
6517 // It's a float: cast and extract a vector element.
6518 SDValue VecFConstant
= DAG
.getNode(ISD::BITCAST
, DL
, MVT::v2f32
,
6520 return DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, MVT::f32
, VecFConstant
,
6521 DAG
.getConstant(0, DL
, MVT::i32
));
6527 // check if an VEXT instruction can handle the shuffle mask when the
6528 // vector sources of the shuffle are the same.
6529 static bool isSingletonVEXTMask(ArrayRef
<int> M
, EVT VT
, unsigned &Imm
) {
6530 unsigned NumElts
= VT
.getVectorNumElements();
6532 // Assume that the first shuffle index is not UNDEF. Fail if it is.
6538 // If this is a VEXT shuffle, the immediate value is the index of the first
6539 // element. The other shuffle indices must be the successive elements after
6541 unsigned ExpectedElt
= Imm
;
6542 for (unsigned i
= 1; i
< NumElts
; ++i
) {
6543 // Increment the expected index. If it wraps around, just follow it
6544 // back to index zero and keep going.
6546 if (ExpectedElt
== NumElts
)
6549 if (M
[i
] < 0) continue; // ignore UNDEF indices
6550 if (ExpectedElt
!= static_cast<unsigned>(M
[i
]))
6557 static bool isVEXTMask(ArrayRef
<int> M
, EVT VT
,
6558 bool &ReverseVEXT
, unsigned &Imm
) {
6559 unsigned NumElts
= VT
.getVectorNumElements();
6560 ReverseVEXT
= false;
6562 // Assume that the first shuffle index is not UNDEF. Fail if it is.
6568 // If this is a VEXT shuffle, the immediate value is the index of the first
6569 // element. The other shuffle indices must be the successive elements after
6571 unsigned ExpectedElt
= Imm
;
6572 for (unsigned i
= 1; i
< NumElts
; ++i
) {
6573 // Increment the expected index. If it wraps around, it may still be
6574 // a VEXT but the source vectors must be swapped.
6576 if (ExpectedElt
== NumElts
* 2) {
6581 if (M
[i
] < 0) continue; // ignore UNDEF indices
6582 if (ExpectedElt
!= static_cast<unsigned>(M
[i
]))
6586 // Adjust the index value if the source operands will be swapped.
6593 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
6594 /// instruction with the specified blocksize. (The order of the elements
6595 /// within each block of the vector is reversed.)
6596 static bool isVREVMask(ArrayRef
<int> M
, EVT VT
, unsigned BlockSize
) {
6597 assert((BlockSize
==16 || BlockSize
==32 || BlockSize
==64) &&
6598 "Only possible block sizes for VREV are: 16, 32, 64");
6600 unsigned EltSz
= VT
.getScalarSizeInBits();
6604 unsigned NumElts
= VT
.getVectorNumElements();
6605 unsigned BlockElts
= M
[0] + 1;
6606 // If the first shuffle index is UNDEF, be optimistic.
6608 BlockElts
= BlockSize
/ EltSz
;
6610 if (BlockSize
<= EltSz
|| BlockSize
!= BlockElts
* EltSz
)
6613 for (unsigned i
= 0; i
< NumElts
; ++i
) {
6614 if (M
[i
] < 0) continue; // ignore UNDEF indices
6615 if ((unsigned) M
[i
] != (i
- i
%BlockElts
) + (BlockElts
- 1 - i
%BlockElts
))
6622 static bool isVTBLMask(ArrayRef
<int> M
, EVT VT
) {
6623 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
6624 // range, then 0 is placed into the resulting vector. So pretty much any mask
6625 // of 8 elements can work here.
6626 return VT
== MVT::v8i8
&& M
.size() == 8;
6629 static unsigned SelectPairHalf(unsigned Elements
, ArrayRef
<int> Mask
,
6631 if (Mask
.size() == Elements
* 2)
6632 return Index
/ Elements
;
6633 return Mask
[Index
] == 0 ? 0 : 1;
6636 // Checks whether the shuffle mask represents a vector transpose (VTRN) by
6637 // checking that pairs of elements in the shuffle mask represent the same index
6638 // in each vector, incrementing the expected index by 2 at each step.
6639 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6]
6640 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g}
6642 // WhichResult gives the offset for each element in the mask based on which
6643 // of the two results it belongs to.
6645 // The transpose can be represented either as:
6646 // result1 = shufflevector v1, v2, result1_shuffle_mask
6647 // result2 = shufflevector v1, v2, result2_shuffle_mask
6648 // where v1/v2 and the shuffle masks have the same number of elements
6649 // (here WhichResult (see below) indicates which result is being checked)
6652 // results = shufflevector v1, v2, shuffle_mask
6653 // where both results are returned in one vector and the shuffle mask has twice
6654 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we
6655 // want to check the low half and high half of the shuffle mask as if it were
6657 static bool isVTRNMask(ArrayRef
<int> M
, EVT VT
, unsigned &WhichResult
) {
6658 unsigned EltSz
= VT
.getScalarSizeInBits();
6662 unsigned NumElts
= VT
.getVectorNumElements();
6663 if (M
.size() != NumElts
&& M
.size() != NumElts
*2)
6666 // If the mask is twice as long as the input vector then we need to check the
6667 // upper and lower parts of the mask with a matching value for WhichResult
6668 // FIXME: A mask with only even values will be rejected in case the first
6669 // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only
6670 // M[0] is used to determine WhichResult
6671 for (unsigned i
= 0; i
< M
.size(); i
+= NumElts
) {
6672 WhichResult
= SelectPairHalf(NumElts
, M
, i
);
6673 for (unsigned j
= 0; j
< NumElts
; j
+= 2) {
6674 if ((M
[i
+j
] >= 0 && (unsigned) M
[i
+j
] != j
+ WhichResult
) ||
6675 (M
[i
+j
+1] >= 0 && (unsigned) M
[i
+j
+1] != j
+ NumElts
+ WhichResult
))
6680 if (M
.size() == NumElts
*2)
6686 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
6687 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
6688 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
6689 static bool isVTRN_v_undef_Mask(ArrayRef
<int> M
, EVT VT
, unsigned &WhichResult
){
6690 unsigned EltSz
= VT
.getScalarSizeInBits();
6694 unsigned NumElts
= VT
.getVectorNumElements();
6695 if (M
.size() != NumElts
&& M
.size() != NumElts
*2)
6698 for (unsigned i
= 0; i
< M
.size(); i
+= NumElts
) {
6699 WhichResult
= SelectPairHalf(NumElts
, M
, i
);
6700 for (unsigned j
= 0; j
< NumElts
; j
+= 2) {
6701 if ((M
[i
+j
] >= 0 && (unsigned) M
[i
+j
] != j
+ WhichResult
) ||
6702 (M
[i
+j
+1] >= 0 && (unsigned) M
[i
+j
+1] != j
+ WhichResult
))
6707 if (M
.size() == NumElts
*2)
6713 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking
6714 // that the mask elements are either all even and in steps of size 2 or all odd
6715 // and in steps of size 2.
6716 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6]
6717 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g}
6719 // Requires similar checks to that of isVTRNMask with
6720 // respect the how results are returned.
6721 static bool isVUZPMask(ArrayRef
<int> M
, EVT VT
, unsigned &WhichResult
) {
6722 unsigned EltSz
= VT
.getScalarSizeInBits();
6726 unsigned NumElts
= VT
.getVectorNumElements();
6727 if (M
.size() != NumElts
&& M
.size() != NumElts
*2)
6730 for (unsigned i
= 0; i
< M
.size(); i
+= NumElts
) {
6731 WhichResult
= SelectPairHalf(NumElts
, M
, i
);
6732 for (unsigned j
= 0; j
< NumElts
; ++j
) {
6733 if (M
[i
+j
] >= 0 && (unsigned) M
[i
+j
] != 2 * j
+ WhichResult
)
6738 if (M
.size() == NumElts
*2)
6741 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6742 if (VT
.is64BitVector() && EltSz
== 32)
6748 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
6749 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
6750 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
6751 static bool isVUZP_v_undef_Mask(ArrayRef
<int> M
, EVT VT
, unsigned &WhichResult
){
6752 unsigned EltSz
= VT
.getScalarSizeInBits();
6756 unsigned NumElts
= VT
.getVectorNumElements();
6757 if (M
.size() != NumElts
&& M
.size() != NumElts
*2)
6760 unsigned Half
= NumElts
/ 2;
6761 for (unsigned i
= 0; i
< M
.size(); i
+= NumElts
) {
6762 WhichResult
= SelectPairHalf(NumElts
, M
, i
);
6763 for (unsigned j
= 0; j
< NumElts
; j
+= Half
) {
6764 unsigned Idx
= WhichResult
;
6765 for (unsigned k
= 0; k
< Half
; ++k
) {
6766 int MIdx
= M
[i
+ j
+ k
];
6767 if (MIdx
>= 0 && (unsigned) MIdx
!= Idx
)
6774 if (M
.size() == NumElts
*2)
6777 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6778 if (VT
.is64BitVector() && EltSz
== 32)
6784 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking
6785 // that pairs of elements of the shufflemask represent the same index in each
6786 // vector incrementing sequentially through the vectors.
6787 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5]
6788 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f}
6790 // Requires similar checks to that of isVTRNMask with respect the how results
6792 static bool isVZIPMask(ArrayRef
<int> M
, EVT VT
, unsigned &WhichResult
) {
6793 unsigned EltSz
= VT
.getScalarSizeInBits();
6797 unsigned NumElts
= VT
.getVectorNumElements();
6798 if (M
.size() != NumElts
&& M
.size() != NumElts
*2)
6801 for (unsigned i
= 0; i
< M
.size(); i
+= NumElts
) {
6802 WhichResult
= SelectPairHalf(NumElts
, M
, i
);
6803 unsigned Idx
= WhichResult
* NumElts
/ 2;
6804 for (unsigned j
= 0; j
< NumElts
; j
+= 2) {
6805 if ((M
[i
+j
] >= 0 && (unsigned) M
[i
+j
] != Idx
) ||
6806 (M
[i
+j
+1] >= 0 && (unsigned) M
[i
+j
+1] != Idx
+ NumElts
))
6812 if (M
.size() == NumElts
*2)
6815 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6816 if (VT
.is64BitVector() && EltSz
== 32)
6822 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
6823 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
6824 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
6825 static bool isVZIP_v_undef_Mask(ArrayRef
<int> M
, EVT VT
, unsigned &WhichResult
){
6826 unsigned EltSz
= VT
.getScalarSizeInBits();
6830 unsigned NumElts
= VT
.getVectorNumElements();
6831 if (M
.size() != NumElts
&& M
.size() != NumElts
*2)
6834 for (unsigned i
= 0; i
< M
.size(); i
+= NumElts
) {
6835 WhichResult
= SelectPairHalf(NumElts
, M
, i
);
6836 unsigned Idx
= WhichResult
* NumElts
/ 2;
6837 for (unsigned j
= 0; j
< NumElts
; j
+= 2) {
6838 if ((M
[i
+j
] >= 0 && (unsigned) M
[i
+j
] != Idx
) ||
6839 (M
[i
+j
+1] >= 0 && (unsigned) M
[i
+j
+1] != Idx
))
6845 if (M
.size() == NumElts
*2)
6848 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6849 if (VT
.is64BitVector() && EltSz
== 32)
6855 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN),
6856 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't.
6857 static unsigned isNEONTwoResultShuffleMask(ArrayRef
<int> ShuffleMask
, EVT VT
,
6858 unsigned &WhichResult
,
6861 if (isVTRNMask(ShuffleMask
, VT
, WhichResult
))
6862 return ARMISD::VTRN
;
6863 if (isVUZPMask(ShuffleMask
, VT
, WhichResult
))
6864 return ARMISD::VUZP
;
6865 if (isVZIPMask(ShuffleMask
, VT
, WhichResult
))
6866 return ARMISD::VZIP
;
6869 if (isVTRN_v_undef_Mask(ShuffleMask
, VT
, WhichResult
))
6870 return ARMISD::VTRN
;
6871 if (isVUZP_v_undef_Mask(ShuffleMask
, VT
, WhichResult
))
6872 return ARMISD::VUZP
;
6873 if (isVZIP_v_undef_Mask(ShuffleMask
, VT
, WhichResult
))
6874 return ARMISD::VZIP
;
6879 /// \return true if this is a reverse operation on an vector.
6880 static bool isReverseMask(ArrayRef
<int> M
, EVT VT
) {
6881 unsigned NumElts
= VT
.getVectorNumElements();
6882 // Make sure the mask has the right size.
6883 if (NumElts
!= M
.size())
6886 // Look for <15, ..., 3, -1, 1, 0>.
6887 for (unsigned i
= 0; i
!= NumElts
; ++i
)
6888 if (M
[i
] >= 0 && M
[i
] != (int) (NumElts
- 1 - i
))
6894 // If N is an integer constant that can be moved into a register in one
6895 // instruction, return an SDValue of such a constant (will become a MOV
6896 // instruction). Otherwise return null.
6897 static SDValue
IsSingleInstrConstant(SDValue N
, SelectionDAG
&DAG
,
6898 const ARMSubtarget
*ST
, const SDLoc
&dl
) {
6900 if (!isa
<ConstantSDNode
>(N
))
6902 Val
= cast
<ConstantSDNode
>(N
)->getZExtValue();
6904 if (ST
->isThumb1Only()) {
6905 if (Val
<= 255 || ~Val
<= 255)
6906 return DAG
.getConstant(Val
, dl
, MVT::i32
);
6908 if (ARM_AM::getSOImmVal(Val
) != -1 || ARM_AM::getSOImmVal(~Val
) != -1)
6909 return DAG
.getConstant(Val
, dl
, MVT::i32
);
6914 static SDValue
LowerBUILD_VECTOR_i1(SDValue Op
, SelectionDAG
&DAG
,
6915 const ARMSubtarget
*ST
) {
6917 EVT VT
= Op
.getValueType();
6919 assert(ST
->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!");
6921 unsigned NumElts
= VT
.getVectorNumElements();
6923 unsigned BitsPerBool
;
6927 } else if (NumElts
== 8) {
6930 } else if (NumElts
== 16) {
6936 // If this is a single value copied into all lanes (a splat), we can just sign
6937 // extend that single value
6938 SDValue FirstOp
= Op
.getOperand(0);
6939 if (!isa
<ConstantSDNode
>(FirstOp
) &&
6940 std::all_of(std::next(Op
->op_begin()), Op
->op_end(),
6941 [&FirstOp
](SDUse
&U
) {
6942 return U
.get().isUndef() || U
.get() == FirstOp
;
6944 SDValue Ext
= DAG
.getNode(ISD::SIGN_EXTEND_INREG
, dl
, MVT::i32
, FirstOp
,
6945 DAG
.getValueType(MVT::i1
));
6946 return DAG
.getNode(ARMISD::PREDICATE_CAST
, dl
, Op
.getValueType(), Ext
);
6949 // First create base with bits set where known
6950 unsigned Bits32
= 0;
6951 for (unsigned i
= 0; i
< NumElts
; ++i
) {
6952 SDValue V
= Op
.getOperand(i
);
6953 if (!isa
<ConstantSDNode
>(V
) && !V
.isUndef())
6955 bool BitSet
= V
.isUndef() ? false : cast
<ConstantSDNode
>(V
)->getZExtValue();
6957 Bits32
|= BoolMask
<< (i
* BitsPerBool
);
6960 // Add in unknown nodes
6961 SDValue Base
= DAG
.getNode(ARMISD::PREDICATE_CAST
, dl
, VT
,
6962 DAG
.getConstant(Bits32
, dl
, MVT::i32
));
6963 for (unsigned i
= 0; i
< NumElts
; ++i
) {
6964 SDValue V
= Op
.getOperand(i
);
6965 if (isa
<ConstantSDNode
>(V
) || V
.isUndef())
6967 Base
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, VT
, Base
, V
,
6968 DAG
.getConstant(i
, dl
, MVT::i32
));
6974 // If this is a case we can't handle, return null and let the default
6975 // expansion code take care of it.
6976 SDValue
ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op
, SelectionDAG
&DAG
,
6977 const ARMSubtarget
*ST
) const {
6978 BuildVectorSDNode
*BVN
= cast
<BuildVectorSDNode
>(Op
.getNode());
6980 EVT VT
= Op
.getValueType();
6982 if (ST
->hasMVEIntegerOps() && VT
.getScalarSizeInBits() == 1)
6983 return LowerBUILD_VECTOR_i1(Op
, DAG
, ST
);
6985 APInt SplatBits
, SplatUndef
;
6986 unsigned SplatBitSize
;
6988 if (BVN
->isConstantSplat(SplatBits
, SplatUndef
, SplatBitSize
, HasAnyUndefs
)) {
6989 if (SplatUndef
.isAllOnesValue())
6990 return DAG
.getUNDEF(VT
);
6992 if ((ST
->hasNEON() && SplatBitSize
<= 64) ||
6993 (ST
->hasMVEIntegerOps() && SplatBitSize
<= 32)) {
6994 // Check if an immediate VMOV works.
6996 SDValue Val
= isVMOVModifiedImm(SplatBits
.getZExtValue(),
6997 SplatUndef
.getZExtValue(), SplatBitSize
,
6998 DAG
, dl
, VmovVT
, VT
.is128BitVector(),
7001 if (Val
.getNode()) {
7002 SDValue Vmov
= DAG
.getNode(ARMISD::VMOVIMM
, dl
, VmovVT
, Val
);
7003 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Vmov
);
7006 // Try an immediate VMVN.
7007 uint64_t NegatedImm
= (~SplatBits
).getZExtValue();
7008 Val
= isVMOVModifiedImm(
7009 NegatedImm
, SplatUndef
.getZExtValue(), SplatBitSize
,
7010 DAG
, dl
, VmovVT
, VT
.is128BitVector(),
7011 ST
->hasMVEIntegerOps() ? MVEVMVNModImm
: VMVNModImm
);
7012 if (Val
.getNode()) {
7013 SDValue Vmov
= DAG
.getNode(ARMISD::VMVNIMM
, dl
, VmovVT
, Val
);
7014 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Vmov
);
7017 // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
7018 if ((VT
== MVT::v2f32
|| VT
== MVT::v4f32
) && SplatBitSize
== 32) {
7019 int ImmVal
= ARM_AM::getFP32Imm(SplatBits
);
7021 SDValue Val
= DAG
.getTargetConstant(ImmVal
, dl
, MVT::i32
);
7022 return DAG
.getNode(ARMISD::VMOVFPIMM
, dl
, VT
, Val
);
7028 // Scan through the operands to see if only one value is used.
7030 // As an optimisation, even if more than one value is used it may be more
7031 // profitable to splat with one value then change some lanes.
7033 // Heuristically we decide to do this if the vector has a "dominant" value,
7034 // defined as splatted to more than half of the lanes.
7035 unsigned NumElts
= VT
.getVectorNumElements();
7036 bool isOnlyLowElement
= true;
7037 bool usesOnlyOneValue
= true;
7038 bool hasDominantValue
= false;
7039 bool isConstant
= true;
7041 // Map of the number of times a particular SDValue appears in the
7043 DenseMap
<SDValue
, unsigned> ValueCounts
;
7045 for (unsigned i
= 0; i
< NumElts
; ++i
) {
7046 SDValue V
= Op
.getOperand(i
);
7050 isOnlyLowElement
= false;
7051 if (!isa
<ConstantFPSDNode
>(V
) && !isa
<ConstantSDNode
>(V
))
7054 ValueCounts
.insert(std::make_pair(V
, 0));
7055 unsigned &Count
= ValueCounts
[V
];
7057 // Is this value dominant? (takes up more than half of the lanes)
7058 if (++Count
> (NumElts
/ 2)) {
7059 hasDominantValue
= true;
7063 if (ValueCounts
.size() != 1)
7064 usesOnlyOneValue
= false;
7065 if (!Value
.getNode() && !ValueCounts
.empty())
7066 Value
= ValueCounts
.begin()->first
;
7068 if (ValueCounts
.empty())
7069 return DAG
.getUNDEF(VT
);
7071 // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
7072 // Keep going if we are hitting this case.
7073 if (isOnlyLowElement
&& !ISD::isNormalLoad(Value
.getNode()))
7074 return DAG
.getNode(ISD::SCALAR_TO_VECTOR
, dl
, VT
, Value
);
7076 unsigned EltSize
= VT
.getScalarSizeInBits();
7078 // Use VDUP for non-constant splats. For f32 constant splats, reduce to
7079 // i32 and try again.
7080 if (hasDominantValue
&& EltSize
<= 32) {
7084 // If we are VDUPing a value that comes directly from a vector, that will
7085 // cause an unnecessary move to and from a GPR, where instead we could
7086 // just use VDUPLANE. We can only do this if the lane being extracted
7087 // is at a constant index, as the VDUP from lane instructions only have
7088 // constant-index forms.
7089 ConstantSDNode
*constIndex
;
7090 if (Value
->getOpcode() == ISD::EXTRACT_VECTOR_ELT
&&
7091 (constIndex
= dyn_cast
<ConstantSDNode
>(Value
->getOperand(1)))) {
7092 // We need to create a new undef vector to use for the VDUPLANE if the
7093 // size of the vector from which we get the value is different than the
7094 // size of the vector that we need to create. We will insert the element
7095 // such that the register coalescer will remove unnecessary copies.
7096 if (VT
!= Value
->getOperand(0).getValueType()) {
7097 unsigned index
= constIndex
->getAPIntValue().getLimitedValue() %
7098 VT
.getVectorNumElements();
7099 N
= DAG
.getNode(ARMISD::VDUPLANE
, dl
, VT
,
7100 DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, VT
, DAG
.getUNDEF(VT
),
7101 Value
, DAG
.getConstant(index
, dl
, MVT::i32
)),
7102 DAG
.getConstant(index
, dl
, MVT::i32
));
7104 N
= DAG
.getNode(ARMISD::VDUPLANE
, dl
, VT
,
7105 Value
->getOperand(0), Value
->getOperand(1));
7107 N
= DAG
.getNode(ARMISD::VDUP
, dl
, VT
, Value
);
7109 if (!usesOnlyOneValue
) {
7110 // The dominant value was splatted as 'N', but we now have to insert
7111 // all differing elements.
7112 for (unsigned I
= 0; I
< NumElts
; ++I
) {
7113 if (Op
.getOperand(I
) == Value
)
7115 SmallVector
<SDValue
, 3> Ops
;
7117 Ops
.push_back(Op
.getOperand(I
));
7118 Ops
.push_back(DAG
.getConstant(I
, dl
, MVT::i32
));
7119 N
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, VT
, Ops
);
7124 if (VT
.getVectorElementType().isFloatingPoint()) {
7125 SmallVector
<SDValue
, 8> Ops
;
7126 MVT FVT
= VT
.getVectorElementType().getSimpleVT();
7127 assert(FVT
== MVT::f32
|| FVT
== MVT::f16
);
7128 MVT IVT
= (FVT
== MVT::f32
) ? MVT::i32
: MVT::i16
;
7129 for (unsigned i
= 0; i
< NumElts
; ++i
)
7130 Ops
.push_back(DAG
.getNode(ISD::BITCAST
, dl
, IVT
,
7132 EVT VecVT
= EVT::getVectorVT(*DAG
.getContext(), IVT
, NumElts
);
7133 SDValue Val
= DAG
.getBuildVector(VecVT
, dl
, Ops
);
7134 Val
= LowerBUILD_VECTOR(Val
, DAG
, ST
);
7136 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Val
);
7138 if (usesOnlyOneValue
) {
7139 SDValue Val
= IsSingleInstrConstant(Value
, DAG
, ST
, dl
);
7140 if (isConstant
&& Val
.getNode())
7141 return DAG
.getNode(ARMISD::VDUP
, dl
, VT
, Val
);
7145 // If all elements are constants and the case above didn't get hit, fall back
7146 // to the default expansion, which will generate a load from the constant
7151 // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
7153 SDValue shuffle
= ReconstructShuffle(Op
, DAG
);
7154 if (shuffle
!= SDValue())
7158 if (ST
->hasNEON() && VT
.is128BitVector() && VT
!= MVT::v2f64
&& VT
!= MVT::v4f32
) {
7159 // If we haven't found an efficient lowering, try splitting a 128-bit vector
7160 // into two 64-bit vectors; we might discover a better way to lower it.
7161 SmallVector
<SDValue
, 64> Ops(Op
->op_begin(), Op
->op_begin() + NumElts
);
7162 EVT ExtVT
= VT
.getVectorElementType();
7163 EVT HVT
= EVT::getVectorVT(*DAG
.getContext(), ExtVT
, NumElts
/ 2);
7165 DAG
.getBuildVector(HVT
, dl
, makeArrayRef(&Ops
[0], NumElts
/ 2));
7166 if (Lower
.getOpcode() == ISD::BUILD_VECTOR
)
7167 Lower
= LowerBUILD_VECTOR(Lower
, DAG
, ST
);
7168 SDValue Upper
= DAG
.getBuildVector(
7169 HVT
, dl
, makeArrayRef(&Ops
[NumElts
/ 2], NumElts
/ 2));
7170 if (Upper
.getOpcode() == ISD::BUILD_VECTOR
)
7171 Upper
= LowerBUILD_VECTOR(Upper
, DAG
, ST
);
7173 return DAG
.getNode(ISD::CONCAT_VECTORS
, dl
, VT
, Lower
, Upper
);
7176 // Vectors with 32- or 64-bit elements can be built by directly assigning
7177 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands
7178 // will be legalized.
7179 if (EltSize
>= 32) {
7180 // Do the expansion with floating-point types, since that is what the VFP
7181 // registers are defined to use, and since i64 is not legal.
7182 EVT EltVT
= EVT::getFloatingPointVT(EltSize
);
7183 EVT VecVT
= EVT::getVectorVT(*DAG
.getContext(), EltVT
, NumElts
);
7184 SmallVector
<SDValue
, 8> Ops
;
7185 for (unsigned i
= 0; i
< NumElts
; ++i
)
7186 Ops
.push_back(DAG
.getNode(ISD::BITCAST
, dl
, EltVT
, Op
.getOperand(i
)));
7187 SDValue Val
= DAG
.getNode(ARMISD::BUILD_VECTOR
, dl
, VecVT
, Ops
);
7188 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Val
);
7191 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
7192 // know the default expansion would otherwise fall back on something even
7193 // worse. For a vector with one or two non-undef values, that's
7194 // scalar_to_vector for the elements followed by a shuffle (provided the
7195 // shuffle is valid for the target) and materialization element by element
7196 // on the stack followed by a load for everything else.
7197 if (!isConstant
&& !usesOnlyOneValue
) {
7198 SDValue Vec
= DAG
.getUNDEF(VT
);
7199 for (unsigned i
= 0 ; i
< NumElts
; ++i
) {
7200 SDValue V
= Op
.getOperand(i
);
7203 SDValue LaneIdx
= DAG
.getConstant(i
, dl
, MVT::i32
);
7204 Vec
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, VT
, Vec
, V
, LaneIdx
);
7212 // Gather data to see if the operation can be modelled as a
7213 // shuffle in combination with VEXTs.
7214 SDValue
ARMTargetLowering::ReconstructShuffle(SDValue Op
,
7215 SelectionDAG
&DAG
) const {
7216 assert(Op
.getOpcode() == ISD::BUILD_VECTOR
&& "Unknown opcode!");
7218 EVT VT
= Op
.getValueType();
7219 unsigned NumElts
= VT
.getVectorNumElements();
7221 struct ShuffleSourceInfo
{
7223 unsigned MinElt
= std::numeric_limits
<unsigned>::max();
7224 unsigned MaxElt
= 0;
7226 // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
7227 // be compatible with the shuffle we intend to construct. As a result
7228 // ShuffleVec will be some sliding window into the original Vec.
7231 // Code should guarantee that element i in Vec starts at element "WindowBase
7232 // + i * WindowScale in ShuffleVec".
7234 int WindowScale
= 1;
7236 ShuffleSourceInfo(SDValue Vec
) : Vec(Vec
), ShuffleVec(Vec
) {}
7238 bool operator ==(SDValue OtherVec
) { return Vec
== OtherVec
; }
7241 // First gather all vectors used as an immediate source for this BUILD_VECTOR
7243 SmallVector
<ShuffleSourceInfo
, 2> Sources
;
7244 for (unsigned i
= 0; i
< NumElts
; ++i
) {
7245 SDValue V
= Op
.getOperand(i
);
7248 else if (V
.getOpcode() != ISD::EXTRACT_VECTOR_ELT
) {
7249 // A shuffle can only come from building a vector from various
7250 // elements of other vectors.
7252 } else if (!isa
<ConstantSDNode
>(V
.getOperand(1))) {
7253 // Furthermore, shuffles require a constant mask, whereas extractelts
7254 // accept variable indices.
7258 // Add this element source to the list if it's not already there.
7259 SDValue SourceVec
= V
.getOperand(0);
7260 auto Source
= llvm::find(Sources
, SourceVec
);
7261 if (Source
== Sources
.end())
7262 Source
= Sources
.insert(Sources
.end(), ShuffleSourceInfo(SourceVec
));
7264 // Update the minimum and maximum lane number seen.
7265 unsigned EltNo
= cast
<ConstantSDNode
>(V
.getOperand(1))->getZExtValue();
7266 Source
->MinElt
= std::min(Source
->MinElt
, EltNo
);
7267 Source
->MaxElt
= std::max(Source
->MaxElt
, EltNo
);
7270 // Currently only do something sane when at most two source vectors
7272 if (Sources
.size() > 2)
7275 // Find out the smallest element size among result and two sources, and use
7276 // it as element size to build the shuffle_vector.
7277 EVT SmallestEltTy
= VT
.getVectorElementType();
7278 for (auto &Source
: Sources
) {
7279 EVT SrcEltTy
= Source
.Vec
.getValueType().getVectorElementType();
7280 if (SrcEltTy
.bitsLT(SmallestEltTy
))
7281 SmallestEltTy
= SrcEltTy
;
7283 unsigned ResMultiplier
=
7284 VT
.getScalarSizeInBits() / SmallestEltTy
.getSizeInBits();
7285 NumElts
= VT
.getSizeInBits() / SmallestEltTy
.getSizeInBits();
7286 EVT ShuffleVT
= EVT::getVectorVT(*DAG
.getContext(), SmallestEltTy
, NumElts
);
7288 // If the source vector is too wide or too narrow, we may nevertheless be able
7289 // to construct a compatible shuffle either by concatenating it with UNDEF or
7290 // extracting a suitable range of elements.
7291 for (auto &Src
: Sources
) {
7292 EVT SrcVT
= Src
.ShuffleVec
.getValueType();
7294 if (SrcVT
.getSizeInBits() == VT
.getSizeInBits())
7297 // This stage of the search produces a source with the same element type as
7298 // the original, but with a total width matching the BUILD_VECTOR output.
7299 EVT EltVT
= SrcVT
.getVectorElementType();
7300 unsigned NumSrcElts
= VT
.getSizeInBits() / EltVT
.getSizeInBits();
7301 EVT DestVT
= EVT::getVectorVT(*DAG
.getContext(), EltVT
, NumSrcElts
);
7303 if (SrcVT
.getSizeInBits() < VT
.getSizeInBits()) {
7304 if (2 * SrcVT
.getSizeInBits() != VT
.getSizeInBits())
7306 // We can pad out the smaller vector for free, so if it's part of a
7309 DAG
.getNode(ISD::CONCAT_VECTORS
, dl
, DestVT
, Src
.ShuffleVec
,
7310 DAG
.getUNDEF(Src
.ShuffleVec
.getValueType()));
7314 if (SrcVT
.getSizeInBits() != 2 * VT
.getSizeInBits())
7317 if (Src
.MaxElt
- Src
.MinElt
>= NumSrcElts
) {
7318 // Span too large for a VEXT to cope
7322 if (Src
.MinElt
>= NumSrcElts
) {
7323 // The extraction can just take the second half
7325 DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, DestVT
, Src
.ShuffleVec
,
7326 DAG
.getConstant(NumSrcElts
, dl
, MVT::i32
));
7327 Src
.WindowBase
= -NumSrcElts
;
7328 } else if (Src
.MaxElt
< NumSrcElts
) {
7329 // The extraction can just take the first half
7331 DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, DestVT
, Src
.ShuffleVec
,
7332 DAG
.getConstant(0, dl
, MVT::i32
));
7334 // An actual VEXT is needed
7336 DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, DestVT
, Src
.ShuffleVec
,
7337 DAG
.getConstant(0, dl
, MVT::i32
));
7339 DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, DestVT
, Src
.ShuffleVec
,
7340 DAG
.getConstant(NumSrcElts
, dl
, MVT::i32
));
7342 Src
.ShuffleVec
= DAG
.getNode(ARMISD::VEXT
, dl
, DestVT
, VEXTSrc1
,
7344 DAG
.getConstant(Src
.MinElt
, dl
, MVT::i32
));
7345 Src
.WindowBase
= -Src
.MinElt
;
7349 // Another possible incompatibility occurs from the vector element types. We
7350 // can fix this by bitcasting the source vectors to the same type we intend
7352 for (auto &Src
: Sources
) {
7353 EVT SrcEltTy
= Src
.ShuffleVec
.getValueType().getVectorElementType();
7354 if (SrcEltTy
== SmallestEltTy
)
7356 assert(ShuffleVT
.getVectorElementType() == SmallestEltTy
);
7357 Src
.ShuffleVec
= DAG
.getNode(ISD::BITCAST
, dl
, ShuffleVT
, Src
.ShuffleVec
);
7358 Src
.WindowScale
= SrcEltTy
.getSizeInBits() / SmallestEltTy
.getSizeInBits();
7359 Src
.WindowBase
*= Src
.WindowScale
;
7362 // Final sanity check before we try to actually produce a shuffle.
7363 LLVM_DEBUG(for (auto Src
7365 assert(Src
.ShuffleVec
.getValueType() == ShuffleVT
););
7367 // The stars all align, our next step is to produce the mask for the shuffle.
7368 SmallVector
<int, 8> Mask(ShuffleVT
.getVectorNumElements(), -1);
7369 int BitsPerShuffleLane
= ShuffleVT
.getScalarSizeInBits();
7370 for (unsigned i
= 0; i
< VT
.getVectorNumElements(); ++i
) {
7371 SDValue Entry
= Op
.getOperand(i
);
7372 if (Entry
.isUndef())
7375 auto Src
= llvm::find(Sources
, Entry
.getOperand(0));
7376 int EltNo
= cast
<ConstantSDNode
>(Entry
.getOperand(1))->getSExtValue();
7378 // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
7379 // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
7381 EVT OrigEltTy
= Entry
.getOperand(0).getValueType().getVectorElementType();
7382 int BitsDefined
= std::min(OrigEltTy
.getSizeInBits(),
7383 VT
.getScalarSizeInBits());
7384 int LanesDefined
= BitsDefined
/ BitsPerShuffleLane
;
7386 // This source is expected to fill ResMultiplier lanes of the final shuffle,
7387 // starting at the appropriate offset.
7388 int *LaneMask
= &Mask
[i
* ResMultiplier
];
7390 int ExtractBase
= EltNo
* Src
->WindowScale
+ Src
->WindowBase
;
7391 ExtractBase
+= NumElts
* (Src
- Sources
.begin());
7392 for (int j
= 0; j
< LanesDefined
; ++j
)
7393 LaneMask
[j
] = ExtractBase
+ j
;
7397 // We can't handle more than two sources. This should have already
7398 // been checked before this point.
7399 assert(Sources
.size() <= 2 && "Too many sources!");
7401 SDValue ShuffleOps
[] = { DAG
.getUNDEF(ShuffleVT
), DAG
.getUNDEF(ShuffleVT
) };
7402 for (unsigned i
= 0; i
< Sources
.size(); ++i
)
7403 ShuffleOps
[i
] = Sources
[i
].ShuffleVec
;
7405 SDValue Shuffle
= buildLegalVectorShuffle(ShuffleVT
, dl
, ShuffleOps
[0],
7406 ShuffleOps
[1], Mask
, DAG
);
7409 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Shuffle
);
7412 enum ShuffleOpCodes
{
7413 OP_COPY
= 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
7422 OP_VUZPL
, // VUZP, left result
7423 OP_VUZPR
, // VUZP, right result
7424 OP_VZIPL
, // VZIP, left result
7425 OP_VZIPR
, // VZIP, right result
7426 OP_VTRNL
, // VTRN, left result
7427 OP_VTRNR
// VTRN, right result
7430 static bool isLegalMVEShuffleOp(unsigned PFEntry
) {
7431 unsigned OpNum
= (PFEntry
>> 26) & 0x0F;
7444 /// isShuffleMaskLegal - Targets can use this to indicate that they only
7445 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
7446 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
7447 /// are assumed to be legal.
7448 bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef
<int> M
, EVT VT
) const {
7449 if (VT
.getVectorNumElements() == 4 &&
7450 (VT
.is128BitVector() || VT
.is64BitVector())) {
7451 unsigned PFIndexes
[4];
7452 for (unsigned i
= 0; i
!= 4; ++i
) {
7456 PFIndexes
[i
] = M
[i
];
7459 // Compute the index in the perfect shuffle table.
7460 unsigned PFTableIndex
=
7461 PFIndexes
[0]*9*9*9+PFIndexes
[1]*9*9+PFIndexes
[2]*9+PFIndexes
[3];
7462 unsigned PFEntry
= PerfectShuffleTable
[PFTableIndex
];
7463 unsigned Cost
= (PFEntry
>> 30);
7465 if (Cost
<= 4 && (Subtarget
->hasNEON() || isLegalMVEShuffleOp(PFEntry
)))
7469 bool ReverseVEXT
, isV_UNDEF
;
7470 unsigned Imm
, WhichResult
;
7472 unsigned EltSize
= VT
.getScalarSizeInBits();
7473 if (EltSize
>= 32 ||
7474 ShuffleVectorSDNode::isSplatMask(&M
[0], VT
) ||
7475 ShuffleVectorInst::isIdentityMask(M
) ||
7476 isVREVMask(M
, VT
, 64) ||
7477 isVREVMask(M
, VT
, 32) ||
7478 isVREVMask(M
, VT
, 16))
7480 else if (Subtarget
->hasNEON() &&
7481 (isVEXTMask(M
, VT
, ReverseVEXT
, Imm
) ||
7482 isVTBLMask(M
, VT
) ||
7483 isNEONTwoResultShuffleMask(M
, VT
, WhichResult
, isV_UNDEF
)))
7485 else if (Subtarget
->hasNEON() && (VT
== MVT::v8i16
|| VT
== MVT::v16i8
) &&
7486 isReverseMask(M
, VT
))
7492 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
7493 /// the specified operations to build the shuffle.
7494 static SDValue
GeneratePerfectShuffle(unsigned PFEntry
, SDValue LHS
,
7495 SDValue RHS
, SelectionDAG
&DAG
,
7497 unsigned OpNum
= (PFEntry
>> 26) & 0x0F;
7498 unsigned LHSID
= (PFEntry
>> 13) & ((1 << 13)-1);
7499 unsigned RHSID
= (PFEntry
>> 0) & ((1 << 13)-1);
7501 if (OpNum
== OP_COPY
) {
7502 if (LHSID
== (1*9+2)*9+3) return LHS
;
7503 assert(LHSID
== ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
7507 SDValue OpLHS
, OpRHS
;
7508 OpLHS
= GeneratePerfectShuffle(PerfectShuffleTable
[LHSID
], LHS
, RHS
, DAG
, dl
);
7509 OpRHS
= GeneratePerfectShuffle(PerfectShuffleTable
[RHSID
], LHS
, RHS
, DAG
, dl
);
7510 EVT VT
= OpLHS
.getValueType();
7513 default: llvm_unreachable("Unknown shuffle opcode!");
7515 // VREV divides the vector in half and swaps within the half.
7516 if (VT
.getVectorElementType() == MVT::i32
||
7517 VT
.getVectorElementType() == MVT::f32
)
7518 return DAG
.getNode(ARMISD::VREV64
, dl
, VT
, OpLHS
);
7519 // vrev <4 x i16> -> VREV32
7520 if (VT
.getVectorElementType() == MVT::i16
)
7521 return DAG
.getNode(ARMISD::VREV32
, dl
, VT
, OpLHS
);
7522 // vrev <4 x i8> -> VREV16
7523 assert(VT
.getVectorElementType() == MVT::i8
);
7524 return DAG
.getNode(ARMISD::VREV16
, dl
, VT
, OpLHS
);
7529 return DAG
.getNode(ARMISD::VDUPLANE
, dl
, VT
,
7530 OpLHS
, DAG
.getConstant(OpNum
-OP_VDUP0
, dl
, MVT::i32
));
7534 return DAG
.getNode(ARMISD::VEXT
, dl
, VT
,
7536 DAG
.getConstant(OpNum
- OP_VEXT1
+ 1, dl
, MVT::i32
));
7539 return DAG
.getNode(ARMISD::VUZP
, dl
, DAG
.getVTList(VT
, VT
),
7540 OpLHS
, OpRHS
).getValue(OpNum
-OP_VUZPL
);
7543 return DAG
.getNode(ARMISD::VZIP
, dl
, DAG
.getVTList(VT
, VT
),
7544 OpLHS
, OpRHS
).getValue(OpNum
-OP_VZIPL
);
7547 return DAG
.getNode(ARMISD::VTRN
, dl
, DAG
.getVTList(VT
, VT
),
7548 OpLHS
, OpRHS
).getValue(OpNum
-OP_VTRNL
);
7552 static SDValue
LowerVECTOR_SHUFFLEv8i8(SDValue Op
,
7553 ArrayRef
<int> ShuffleMask
,
7554 SelectionDAG
&DAG
) {
7555 // Check to see if we can use the VTBL instruction.
7556 SDValue V1
= Op
.getOperand(0);
7557 SDValue V2
= Op
.getOperand(1);
7560 SmallVector
<SDValue
, 8> VTBLMask
;
7561 for (ArrayRef
<int>::iterator
7562 I
= ShuffleMask
.begin(), E
= ShuffleMask
.end(); I
!= E
; ++I
)
7563 VTBLMask
.push_back(DAG
.getConstant(*I
, DL
, MVT::i32
));
7565 if (V2
.getNode()->isUndef())
7566 return DAG
.getNode(ARMISD::VTBL1
, DL
, MVT::v8i8
, V1
,
7567 DAG
.getBuildVector(MVT::v8i8
, DL
, VTBLMask
));
7569 return DAG
.getNode(ARMISD::VTBL2
, DL
, MVT::v8i8
, V1
, V2
,
7570 DAG
.getBuildVector(MVT::v8i8
, DL
, VTBLMask
));
7573 static SDValue
LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op
,
7574 SelectionDAG
&DAG
) {
7576 SDValue OpLHS
= Op
.getOperand(0);
7577 EVT VT
= OpLHS
.getValueType();
7579 assert((VT
== MVT::v8i16
|| VT
== MVT::v16i8
) &&
7580 "Expect an v8i16/v16i8 type");
7581 OpLHS
= DAG
.getNode(ARMISD::VREV64
, DL
, VT
, OpLHS
);
7582 // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now,
7583 // extract the first 8 bytes into the top double word and the last 8 bytes
7584 // into the bottom double word. The v8i16 case is similar.
7585 unsigned ExtractNum
= (VT
== MVT::v16i8
) ? 8 : 4;
7586 return DAG
.getNode(ARMISD::VEXT
, DL
, VT
, OpLHS
, OpLHS
,
7587 DAG
.getConstant(ExtractNum
, DL
, MVT::i32
));
7590 static EVT
getVectorTyFromPredicateVector(EVT VT
) {
7591 switch (VT
.getSimpleVT().SimpleTy
) {
7599 llvm_unreachable("Unexpected vector predicate type");
7603 static SDValue
PromoteMVEPredVector(SDLoc dl
, SDValue Pred
, EVT VT
,
7604 SelectionDAG
&DAG
) {
7605 // Converting from boolean predicates to integers involves creating a vector
7606 // of all ones or all zeroes and selecting the lanes based upon the real
7609 DAG
.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), dl
, MVT::i32
);
7610 AllOnes
= DAG
.getNode(ARMISD::VMOVIMM
, dl
, MVT::v16i8
, AllOnes
);
7613 DAG
.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0x0), dl
, MVT::i32
);
7614 AllZeroes
= DAG
.getNode(ARMISD::VMOVIMM
, dl
, MVT::v16i8
, AllZeroes
);
7616 // Get full vector type from predicate type
7617 EVT NewVT
= getVectorTyFromPredicateVector(VT
);
7620 // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast
7621 // this to a v16i1. This cannot be done with an ordinary bitcast because the
7622 // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node,
7623 // since we know in hardware the sizes are really the same.
7624 if (VT
!= MVT::v16i1
)
7625 RecastV1
= DAG
.getNode(ARMISD::PREDICATE_CAST
, dl
, MVT::v16i1
, Pred
);
7629 // Select either all ones or zeroes depending upon the real predicate bits.
7630 SDValue PredAsVector
=
7631 DAG
.getNode(ISD::VSELECT
, dl
, MVT::v16i8
, RecastV1
, AllOnes
, AllZeroes
);
7633 // Recast our new predicate-as-integer v16i8 vector into something
7634 // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate.
7635 return DAG
.getNode(ISD::BITCAST
, dl
, NewVT
, PredAsVector
);
7638 static SDValue
LowerVECTOR_SHUFFLE_i1(SDValue Op
, SelectionDAG
&DAG
,
7639 const ARMSubtarget
*ST
) {
7640 EVT VT
= Op
.getValueType();
7641 ShuffleVectorSDNode
*SVN
= cast
<ShuffleVectorSDNode
>(Op
.getNode());
7642 ArrayRef
<int> ShuffleMask
= SVN
->getMask();
7644 assert(ST
->hasMVEIntegerOps() &&
7645 "No support for vector shuffle of boolean predicates");
7647 SDValue V1
= Op
.getOperand(0);
7649 if (isReverseMask(ShuffleMask
, VT
)) {
7650 SDValue cast
= DAG
.getNode(ARMISD::PREDICATE_CAST
, dl
, MVT::i32
, V1
);
7651 SDValue rbit
= DAG
.getNode(ISD::BITREVERSE
, dl
, MVT::i32
, cast
);
7652 SDValue srl
= DAG
.getNode(ISD::SRL
, dl
, MVT::i32
, rbit
,
7653 DAG
.getConstant(16, dl
, MVT::i32
));
7654 return DAG
.getNode(ARMISD::PREDICATE_CAST
, dl
, VT
, srl
);
7657 // Until we can come up with optimised cases for every single vector
7658 // shuffle in existence we have chosen the least painful strategy. This is
7659 // to essentially promote the boolean predicate to a 8-bit integer, where
7660 // each predicate represents a byte. Then we fall back on a normal integer
7661 // vector shuffle and convert the result back into a predicate vector. In
7662 // many cases the generated code might be even better than scalar code
7663 // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit
7664 // fields in a register into 8 other arbitrary 2-bit fields!
7665 SDValue PredAsVector
= PromoteMVEPredVector(dl
, V1
, VT
, DAG
);
7666 EVT NewVT
= PredAsVector
.getValueType();
7669 SDValue Shuffled
= DAG
.getVectorShuffle(NewVT
, dl
, PredAsVector
,
7670 DAG
.getUNDEF(NewVT
), ShuffleMask
);
7672 // Now return the result of comparing the shuffled vector with zero,
7673 // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
7674 return DAG
.getNode(ARMISD::VCMPZ
, dl
, VT
, Shuffled
,
7675 DAG
.getConstant(ARMCC::NE
, dl
, MVT::i32
));
7678 static SDValue
LowerVECTOR_SHUFFLE(SDValue Op
, SelectionDAG
&DAG
,
7679 const ARMSubtarget
*ST
) {
7680 SDValue V1
= Op
.getOperand(0);
7681 SDValue V2
= Op
.getOperand(1);
7683 EVT VT
= Op
.getValueType();
7684 ShuffleVectorSDNode
*SVN
= cast
<ShuffleVectorSDNode
>(Op
.getNode());
7685 unsigned EltSize
= VT
.getScalarSizeInBits();
7687 if (ST
->hasMVEIntegerOps() && EltSize
== 1)
7688 return LowerVECTOR_SHUFFLE_i1(Op
, DAG
, ST
);
7690 // Convert shuffles that are directly supported on NEON to target-specific
7691 // DAG nodes, instead of keeping them as shuffles and matching them again
7692 // during code selection. This is more efficient and avoids the possibility
7693 // of inconsistencies between legalization and selection.
7694 // FIXME: floating-point vectors should be canonicalized to integer vectors
7695 // of the same time so that they get CSEd properly.
7696 ArrayRef
<int> ShuffleMask
= SVN
->getMask();
7698 if (EltSize
<= 32) {
7699 if (SVN
->isSplat()) {
7700 int Lane
= SVN
->getSplatIndex();
7701 // If this is undef splat, generate it via "just" vdup, if possible.
7702 if (Lane
== -1) Lane
= 0;
7704 // Test if V1 is a SCALAR_TO_VECTOR.
7705 if (Lane
== 0 && V1
.getOpcode() == ISD::SCALAR_TO_VECTOR
) {
7706 return DAG
.getNode(ARMISD::VDUP
, dl
, VT
, V1
.getOperand(0));
7708 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
7709 // (and probably will turn into a SCALAR_TO_VECTOR once legalization
7711 if (Lane
== 0 && V1
.getOpcode() == ISD::BUILD_VECTOR
&&
7712 !isa
<ConstantSDNode
>(V1
.getOperand(0))) {
7713 bool IsScalarToVector
= true;
7714 for (unsigned i
= 1, e
= V1
.getNumOperands(); i
!= e
; ++i
)
7715 if (!V1
.getOperand(i
).isUndef()) {
7716 IsScalarToVector
= false;
7719 if (IsScalarToVector
)
7720 return DAG
.getNode(ARMISD::VDUP
, dl
, VT
, V1
.getOperand(0));
7722 return DAG
.getNode(ARMISD::VDUPLANE
, dl
, VT
, V1
,
7723 DAG
.getConstant(Lane
, dl
, MVT::i32
));
7726 bool ReverseVEXT
= false;
7728 if (ST
->hasNEON() && isVEXTMask(ShuffleMask
, VT
, ReverseVEXT
, Imm
)) {
7731 return DAG
.getNode(ARMISD::VEXT
, dl
, VT
, V1
, V2
,
7732 DAG
.getConstant(Imm
, dl
, MVT::i32
));
7735 if (isVREVMask(ShuffleMask
, VT
, 64))
7736 return DAG
.getNode(ARMISD::VREV64
, dl
, VT
, V1
);
7737 if (isVREVMask(ShuffleMask
, VT
, 32))
7738 return DAG
.getNode(ARMISD::VREV32
, dl
, VT
, V1
);
7739 if (isVREVMask(ShuffleMask
, VT
, 16))
7740 return DAG
.getNode(ARMISD::VREV16
, dl
, VT
, V1
);
7742 if (ST
->hasNEON() && V2
->isUndef() && isSingletonVEXTMask(ShuffleMask
, VT
, Imm
)) {
7743 return DAG
.getNode(ARMISD::VEXT
, dl
, VT
, V1
, V1
,
7744 DAG
.getConstant(Imm
, dl
, MVT::i32
));
7747 // Check for Neon shuffles that modify both input vectors in place.
7748 // If both results are used, i.e., if there are two shuffles with the same
7749 // source operands and with masks corresponding to both results of one of
7750 // these operations, DAG memoization will ensure that a single node is
7751 // used for both shuffles.
7752 unsigned WhichResult
= 0;
7753 bool isV_UNDEF
= false;
7754 if (ST
->hasNEON()) {
7755 if (unsigned ShuffleOpc
= isNEONTwoResultShuffleMask(
7756 ShuffleMask
, VT
, WhichResult
, isV_UNDEF
)) {
7759 return DAG
.getNode(ShuffleOpc
, dl
, DAG
.getVTList(VT
, VT
), V1
, V2
)
7760 .getValue(WhichResult
);
7764 // Also check for these shuffles through CONCAT_VECTORS: we canonicalize
7765 // shuffles that produce a result larger than their operands with:
7766 // shuffle(concat(v1, undef), concat(v2, undef))
7768 // shuffle(concat(v1, v2), undef)
7769 // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine).
7771 // This is useful in the general case, but there are special cases where
7772 // native shuffles produce larger results: the two-result ops.
7774 // Look through the concat when lowering them:
7775 // shuffle(concat(v1, v2), undef)
7777 // concat(VZIP(v1, v2):0, :1)
7779 if (ST
->hasNEON() && V1
->getOpcode() == ISD::CONCAT_VECTORS
&& V2
->isUndef()) {
7780 SDValue SubV1
= V1
->getOperand(0);
7781 SDValue SubV2
= V1
->getOperand(1);
7782 EVT SubVT
= SubV1
.getValueType();
7784 // We expect these to have been canonicalized to -1.
7785 assert(llvm::all_of(ShuffleMask
, [&](int i
) {
7786 return i
< (int)VT
.getVectorNumElements();
7787 }) && "Unexpected shuffle index into UNDEF operand!");
7789 if (unsigned ShuffleOpc
= isNEONTwoResultShuffleMask(
7790 ShuffleMask
, SubVT
, WhichResult
, isV_UNDEF
)) {
7793 assert((WhichResult
== 0) &&
7794 "In-place shuffle of concat can only have one result!");
7795 SDValue Res
= DAG
.getNode(ShuffleOpc
, dl
, DAG
.getVTList(SubVT
, SubVT
),
7797 return DAG
.getNode(ISD::CONCAT_VECTORS
, dl
, VT
, Res
.getValue(0),
7803 // If the shuffle is not directly supported and it has 4 elements, use
7804 // the PerfectShuffle-generated table to synthesize it from other shuffles.
7805 unsigned NumElts
= VT
.getVectorNumElements();
7807 unsigned PFIndexes
[4];
7808 for (unsigned i
= 0; i
!= 4; ++i
) {
7809 if (ShuffleMask
[i
] < 0)
7812 PFIndexes
[i
] = ShuffleMask
[i
];
7815 // Compute the index in the perfect shuffle table.
7816 unsigned PFTableIndex
=
7817 PFIndexes
[0]*9*9*9+PFIndexes
[1]*9*9+PFIndexes
[2]*9+PFIndexes
[3];
7818 unsigned PFEntry
= PerfectShuffleTable
[PFTableIndex
];
7819 unsigned Cost
= (PFEntry
>> 30);
7823 return GeneratePerfectShuffle(PFEntry
, V1
, V2
, DAG
, dl
);
7824 else if (isLegalMVEShuffleOp(PFEntry
)) {
7825 unsigned LHSID
= (PFEntry
>> 13) & ((1 << 13)-1);
7826 unsigned RHSID
= (PFEntry
>> 0) & ((1 << 13)-1);
7827 unsigned PFEntryLHS
= PerfectShuffleTable
[LHSID
];
7828 unsigned PFEntryRHS
= PerfectShuffleTable
[RHSID
];
7829 if (isLegalMVEShuffleOp(PFEntryLHS
) && isLegalMVEShuffleOp(PFEntryRHS
))
7830 return GeneratePerfectShuffle(PFEntry
, V1
, V2
, DAG
, dl
);
7835 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
7836 if (EltSize
>= 32) {
7837 // Do the expansion with floating-point types, since that is what the VFP
7838 // registers are defined to use, and since i64 is not legal.
7839 EVT EltVT
= EVT::getFloatingPointVT(EltSize
);
7840 EVT VecVT
= EVT::getVectorVT(*DAG
.getContext(), EltVT
, NumElts
);
7841 V1
= DAG
.getNode(ISD::BITCAST
, dl
, VecVT
, V1
);
7842 V2
= DAG
.getNode(ISD::BITCAST
, dl
, VecVT
, V2
);
7843 SmallVector
<SDValue
, 8> Ops
;
7844 for (unsigned i
= 0; i
< NumElts
; ++i
) {
7845 if (ShuffleMask
[i
] < 0)
7846 Ops
.push_back(DAG
.getUNDEF(EltVT
));
7848 Ops
.push_back(DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, EltVT
,
7849 ShuffleMask
[i
] < (int)NumElts
? V1
: V2
,
7850 DAG
.getConstant(ShuffleMask
[i
] & (NumElts
-1),
7853 SDValue Val
= DAG
.getNode(ARMISD::BUILD_VECTOR
, dl
, VecVT
, Ops
);
7854 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Val
);
7857 if (ST
->hasNEON() && (VT
== MVT::v8i16
|| VT
== MVT::v16i8
) && isReverseMask(ShuffleMask
, VT
))
7858 return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op
, DAG
);
7860 if (ST
->hasNEON() && VT
== MVT::v8i8
)
7861 if (SDValue NewOp
= LowerVECTOR_SHUFFLEv8i8(Op
, ShuffleMask
, DAG
))
7867 static SDValue
LowerINSERT_VECTOR_ELT_i1(SDValue Op
, SelectionDAG
&DAG
,
7868 const ARMSubtarget
*ST
) {
7869 EVT VecVT
= Op
.getOperand(0).getValueType();
7872 assert(ST
->hasMVEIntegerOps() &&
7873 "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
7876 DAG
.getNode(ARMISD::PREDICATE_CAST
, dl
, MVT::i32
, Op
->getOperand(0));
7877 unsigned Lane
= cast
<ConstantSDNode
>(Op
.getOperand(2))->getZExtValue();
7878 unsigned LaneWidth
=
7879 getVectorTyFromPredicateVector(VecVT
).getScalarSizeInBits() / 8;
7880 unsigned Mask
= ((1 << LaneWidth
) - 1) << Lane
* LaneWidth
;
7881 SDValue Ext
= DAG
.getNode(ISD::SIGN_EXTEND_INREG
, dl
, MVT::i32
,
7882 Op
.getOperand(1), DAG
.getValueType(MVT::i1
));
7883 SDValue BFI
= DAG
.getNode(ARMISD::BFI
, dl
, MVT::i32
, Conv
, Ext
,
7884 DAG
.getConstant(~Mask
, dl
, MVT::i32
));
7885 return DAG
.getNode(ARMISD::PREDICATE_CAST
, dl
, Op
.getValueType(), BFI
);
7888 SDValue
ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op
,
7889 SelectionDAG
&DAG
) const {
7890 // INSERT_VECTOR_ELT is legal only for immediate indexes.
7891 SDValue Lane
= Op
.getOperand(2);
7892 if (!isa
<ConstantSDNode
>(Lane
))
7895 SDValue Elt
= Op
.getOperand(1);
7896 EVT EltVT
= Elt
.getValueType();
7898 if (Subtarget
->hasMVEIntegerOps() &&
7899 Op
.getValueType().getScalarSizeInBits() == 1)
7900 return LowerINSERT_VECTOR_ELT_i1(Op
, DAG
, Subtarget
);
7902 if (getTypeAction(*DAG
.getContext(), EltVT
) ==
7903 TargetLowering::TypePromoteFloat
) {
7904 // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32,
7905 // but the type system will try to do that if we don't intervene.
7906 // Reinterpret any such vector-element insertion as one with the
7907 // corresponding integer types.
7911 EVT IEltVT
= MVT::getIntegerVT(EltVT
.getScalarSizeInBits());
7912 assert(getTypeAction(*DAG
.getContext(), IEltVT
) !=
7913 TargetLowering::TypePromoteFloat
);
7915 SDValue VecIn
= Op
.getOperand(0);
7916 EVT VecVT
= VecIn
.getValueType();
7917 EVT IVecVT
= EVT::getVectorVT(*DAG
.getContext(), IEltVT
,
7918 VecVT
.getVectorNumElements());
7920 SDValue IElt
= DAG
.getNode(ISD::BITCAST
, dl
, IEltVT
, Elt
);
7921 SDValue IVecIn
= DAG
.getNode(ISD::BITCAST
, dl
, IVecVT
, VecIn
);
7922 SDValue IVecOut
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, IVecVT
,
7923 IVecIn
, IElt
, Lane
);
7924 return DAG
.getNode(ISD::BITCAST
, dl
, VecVT
, IVecOut
);
7930 static SDValue
LowerEXTRACT_VECTOR_ELT_i1(SDValue Op
, SelectionDAG
&DAG
,
7931 const ARMSubtarget
*ST
) {
7932 EVT VecVT
= Op
.getOperand(0).getValueType();
7935 assert(ST
->hasMVEIntegerOps() &&
7936 "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
7939 DAG
.getNode(ARMISD::PREDICATE_CAST
, dl
, MVT::i32
, Op
->getOperand(0));
7940 unsigned Lane
= cast
<ConstantSDNode
>(Op
.getOperand(1))->getZExtValue();
7941 unsigned LaneWidth
=
7942 getVectorTyFromPredicateVector(VecVT
).getScalarSizeInBits() / 8;
7943 SDValue Shift
= DAG
.getNode(ISD::SRL
, dl
, MVT::i32
, Conv
,
7944 DAG
.getConstant(Lane
* LaneWidth
, dl
, MVT::i32
));
7948 static SDValue
LowerEXTRACT_VECTOR_ELT(SDValue Op
, SelectionDAG
&DAG
,
7949 const ARMSubtarget
*ST
) {
7950 // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
7951 SDValue Lane
= Op
.getOperand(1);
7952 if (!isa
<ConstantSDNode
>(Lane
))
7955 SDValue Vec
= Op
.getOperand(0);
7956 EVT VT
= Vec
.getValueType();
7958 if (ST
->hasMVEIntegerOps() && VT
.getScalarSizeInBits() == 1)
7959 return LowerEXTRACT_VECTOR_ELT_i1(Op
, DAG
, ST
);
7961 if (Op
.getValueType() == MVT::i32
&& Vec
.getScalarValueSizeInBits() < 32) {
7963 return DAG
.getNode(ARMISD::VGETLANEu
, dl
, MVT::i32
, Vec
, Lane
);
7969 static SDValue
LowerCONCAT_VECTORS_i1(SDValue Op
, SelectionDAG
&DAG
,
7970 const ARMSubtarget
*ST
) {
7971 SDValue V1
= Op
.getOperand(0);
7972 SDValue V2
= Op
.getOperand(1);
7974 EVT VT
= Op
.getValueType();
7975 EVT Op1VT
= V1
.getValueType();
7976 EVT Op2VT
= V2
.getValueType();
7977 unsigned NumElts
= VT
.getVectorNumElements();
7979 assert(Op1VT
== Op2VT
&& "Operand types don't match!");
7980 assert(VT
.getScalarSizeInBits() == 1 &&
7981 "Unexpected custom CONCAT_VECTORS lowering");
7982 assert(ST
->hasMVEIntegerOps() &&
7983 "CONCAT_VECTORS lowering only supported for MVE");
7985 SDValue NewV1
= PromoteMVEPredVector(dl
, V1
, Op1VT
, DAG
);
7986 SDValue NewV2
= PromoteMVEPredVector(dl
, V2
, Op2VT
, DAG
);
7988 // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets
7989 // promoted to v8i16, etc.
7991 MVT ElType
= getVectorTyFromPredicateVector(VT
).getScalarType().getSimpleVT();
7993 // Extract the vector elements from Op1 and Op2 one by one and truncate them
7994 // to be the right size for the destination. For example, if Op1 is v4i1 then
7995 // the promoted vector is v4i32. The result of concatentation gives a v8i1,
7996 // which when promoted is v8i16. That means each i32 element from Op1 needs
7997 // truncating to i16 and inserting in the result.
7998 EVT ConcatVT
= MVT::getVectorVT(ElType
, NumElts
);
7999 SDValue ConVec
= DAG
.getNode(ISD::UNDEF
, dl
, ConcatVT
);
8000 auto ExractInto
= [&DAG
, &dl
](SDValue NewV
, SDValue ConVec
, unsigned &j
) {
8001 EVT NewVT
= NewV
.getValueType();
8002 EVT ConcatVT
= ConVec
.getValueType();
8003 for (unsigned i
= 0, e
= NewVT
.getVectorNumElements(); i
< e
; i
++, j
++) {
8004 SDValue Elt
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i32
, NewV
,
8005 DAG
.getIntPtrConstant(i
, dl
));
8006 ConVec
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, ConcatVT
, ConVec
, Elt
,
8007 DAG
.getConstant(j
, dl
, MVT::i32
));
8012 ConVec
= ExractInto(NewV1
, ConVec
, j
);
8013 ConVec
= ExractInto(NewV2
, ConVec
, j
);
8015 // Now return the result of comparing the subvector with zero,
8016 // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
8017 return DAG
.getNode(ARMISD::VCMPZ
, dl
, VT
, ConVec
,
8018 DAG
.getConstant(ARMCC::NE
, dl
, MVT::i32
));
8021 static SDValue
LowerCONCAT_VECTORS(SDValue Op
, SelectionDAG
&DAG
,
8022 const ARMSubtarget
*ST
) {
8023 EVT VT
= Op
->getValueType(0);
8024 if (ST
->hasMVEIntegerOps() && VT
.getScalarSizeInBits() == 1)
8025 return LowerCONCAT_VECTORS_i1(Op
, DAG
, ST
);
8027 // The only time a CONCAT_VECTORS operation can have legal types is when
8028 // two 64-bit vectors are concatenated to a 128-bit vector.
8029 assert(Op
.getValueType().is128BitVector() && Op
.getNumOperands() == 2 &&
8030 "unexpected CONCAT_VECTORS");
8032 SDValue Val
= DAG
.getUNDEF(MVT::v2f64
);
8033 SDValue Op0
= Op
.getOperand(0);
8034 SDValue Op1
= Op
.getOperand(1);
8036 Val
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2f64
, Val
,
8037 DAG
.getNode(ISD::BITCAST
, dl
, MVT::f64
, Op0
),
8038 DAG
.getIntPtrConstant(0, dl
));
8040 Val
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, MVT::v2f64
, Val
,
8041 DAG
.getNode(ISD::BITCAST
, dl
, MVT::f64
, Op1
),
8042 DAG
.getIntPtrConstant(1, dl
));
8043 return DAG
.getNode(ISD::BITCAST
, dl
, Op
.getValueType(), Val
);
8046 static SDValue
LowerEXTRACT_SUBVECTOR(SDValue Op
, SelectionDAG
&DAG
,
8047 const ARMSubtarget
*ST
) {
8048 SDValue V1
= Op
.getOperand(0);
8049 SDValue V2
= Op
.getOperand(1);
8051 EVT VT
= Op
.getValueType();
8052 EVT Op1VT
= V1
.getValueType();
8053 unsigned NumElts
= VT
.getVectorNumElements();
8054 unsigned Index
= cast
<ConstantSDNode
>(V2
)->getZExtValue();
8056 assert(VT
.getScalarSizeInBits() == 1 &&
8057 "Unexpected custom EXTRACT_SUBVECTOR lowering");
8058 assert(ST
->hasMVEIntegerOps() &&
8059 "EXTRACT_SUBVECTOR lowering only supported for MVE");
8061 SDValue NewV1
= PromoteMVEPredVector(dl
, V1
, Op1VT
, DAG
);
8063 // We now have Op1 promoted to a vector of integers, where v8i1 gets
8064 // promoted to v8i16, etc.
8066 MVT ElType
= getVectorTyFromPredicateVector(VT
).getScalarType().getSimpleVT();
8068 EVT SubVT
= MVT::getVectorVT(ElType
, NumElts
);
8069 SDValue SubVec
= DAG
.getNode(ISD::UNDEF
, dl
, SubVT
);
8070 for (unsigned i
= Index
, j
= 0; i
< (Index
+ NumElts
); i
++, j
++) {
8071 SDValue Elt
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i32
, NewV1
,
8072 DAG
.getIntPtrConstant(i
, dl
));
8073 SubVec
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, SubVT
, SubVec
, Elt
,
8074 DAG
.getConstant(j
, dl
, MVT::i32
));
8077 // Now return the result of comparing the subvector with zero,
8078 // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
8079 return DAG
.getNode(ARMISD::VCMPZ
, dl
, VT
, SubVec
,
8080 DAG
.getConstant(ARMCC::NE
, dl
, MVT::i32
));
8083 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
8084 /// element has been zero/sign-extended, depending on the isSigned parameter,
8085 /// from an integer type half its size.
8086 static bool isExtendedBUILD_VECTOR(SDNode
*N
, SelectionDAG
&DAG
,
8088 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
8089 EVT VT
= N
->getValueType(0);
8090 if (VT
== MVT::v2i64
&& N
->getOpcode() == ISD::BITCAST
) {
8091 SDNode
*BVN
= N
->getOperand(0).getNode();
8092 if (BVN
->getValueType(0) != MVT::v4i32
||
8093 BVN
->getOpcode() != ISD::BUILD_VECTOR
)
8095 unsigned LoElt
= DAG
.getDataLayout().isBigEndian() ? 1 : 0;
8096 unsigned HiElt
= 1 - LoElt
;
8097 ConstantSDNode
*Lo0
= dyn_cast
<ConstantSDNode
>(BVN
->getOperand(LoElt
));
8098 ConstantSDNode
*Hi0
= dyn_cast
<ConstantSDNode
>(BVN
->getOperand(HiElt
));
8099 ConstantSDNode
*Lo1
= dyn_cast
<ConstantSDNode
>(BVN
->getOperand(LoElt
+2));
8100 ConstantSDNode
*Hi1
= dyn_cast
<ConstantSDNode
>(BVN
->getOperand(HiElt
+2));
8101 if (!Lo0
|| !Hi0
|| !Lo1
|| !Hi1
)
8104 if (Hi0
->getSExtValue() == Lo0
->getSExtValue() >> 32 &&
8105 Hi1
->getSExtValue() == Lo1
->getSExtValue() >> 32)
8108 if (Hi0
->isNullValue() && Hi1
->isNullValue())
8114 if (N
->getOpcode() != ISD::BUILD_VECTOR
)
8117 for (unsigned i
= 0, e
= N
->getNumOperands(); i
!= e
; ++i
) {
8118 SDNode
*Elt
= N
->getOperand(i
).getNode();
8119 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Elt
)) {
8120 unsigned EltSize
= VT
.getScalarSizeInBits();
8121 unsigned HalfSize
= EltSize
/ 2;
8123 if (!isIntN(HalfSize
, C
->getSExtValue()))
8126 if (!isUIntN(HalfSize
, C
->getZExtValue()))
8137 /// isSignExtended - Check if a node is a vector value that is sign-extended
8138 /// or a constant BUILD_VECTOR with sign-extended elements.
8139 static bool isSignExtended(SDNode
*N
, SelectionDAG
&DAG
) {
8140 if (N
->getOpcode() == ISD::SIGN_EXTEND
|| ISD::isSEXTLoad(N
))
8142 if (isExtendedBUILD_VECTOR(N
, DAG
, true))
8147 /// isZeroExtended - Check if a node is a vector value that is zero-extended
8148 /// or a constant BUILD_VECTOR with zero-extended elements.
8149 static bool isZeroExtended(SDNode
*N
, SelectionDAG
&DAG
) {
8150 if (N
->getOpcode() == ISD::ZERO_EXTEND
|| ISD::isZEXTLoad(N
))
8152 if (isExtendedBUILD_VECTOR(N
, DAG
, false))
8157 static EVT
getExtensionTo64Bits(const EVT
&OrigVT
) {
8158 if (OrigVT
.getSizeInBits() >= 64)
8161 assert(OrigVT
.isSimple() && "Expecting a simple value type");
8163 MVT::SimpleValueType OrigSimpleTy
= OrigVT
.getSimpleVT().SimpleTy
;
8164 switch (OrigSimpleTy
) {
8165 default: llvm_unreachable("Unexpected Vector Type");
8174 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total
8175 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL.
8176 /// We insert the required extension here to get the vector to fill a D register.
8177 static SDValue
AddRequiredExtensionForVMULL(SDValue N
, SelectionDAG
&DAG
,
8180 unsigned ExtOpcode
) {
8181 // The vector originally had a size of OrigTy. It was then extended to ExtTy.
8182 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
8183 // 64-bits we need to insert a new extension so that it will be 64-bits.
8184 assert(ExtTy
.is128BitVector() && "Unexpected extension size");
8185 if (OrigTy
.getSizeInBits() >= 64)
8188 // Must extend size to at least 64 bits to be used as an operand for VMULL.
8189 EVT NewVT
= getExtensionTo64Bits(OrigTy
);
8191 return DAG
.getNode(ExtOpcode
, SDLoc(N
), NewVT
, N
);
8194 /// SkipLoadExtensionForVMULL - return a load of the original vector size that
8195 /// does not do any sign/zero extension. If the original vector is less
8196 /// than 64 bits, an appropriate extension will be added after the load to
8197 /// reach a total size of 64 bits. We have to add the extension separately
8198 /// because ARM does not have a sign/zero extending load for vectors.
8199 static SDValue
SkipLoadExtensionForVMULL(LoadSDNode
*LD
, SelectionDAG
& DAG
) {
8200 EVT ExtendedTy
= getExtensionTo64Bits(LD
->getMemoryVT());
8202 // The load already has the right type.
8203 if (ExtendedTy
== LD
->getMemoryVT())
8204 return DAG
.getLoad(LD
->getMemoryVT(), SDLoc(LD
), LD
->getChain(),
8205 LD
->getBasePtr(), LD
->getPointerInfo(),
8206 LD
->getAlignment(), LD
->getMemOperand()->getFlags());
8208 // We need to create a zextload/sextload. We cannot just create a load
8209 // followed by a zext/zext node because LowerMUL is also run during normal
8210 // operation legalization where we can't create illegal types.
8211 return DAG
.getExtLoad(LD
->getExtensionType(), SDLoc(LD
), ExtendedTy
,
8212 LD
->getChain(), LD
->getBasePtr(), LD
->getPointerInfo(),
8213 LD
->getMemoryVT(), LD
->getAlignment(),
8214 LD
->getMemOperand()->getFlags());
8217 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND,
8218 /// extending load, or BUILD_VECTOR with extended elements, return the
8219 /// unextended value. The unextended vector should be 64 bits so that it can
8220 /// be used as an operand to a VMULL instruction. If the original vector size
8221 /// before extension is less than 64 bits we add a an extension to resize
8222 /// the vector to 64 bits.
8223 static SDValue
SkipExtensionForVMULL(SDNode
*N
, SelectionDAG
&DAG
) {
8224 if (N
->getOpcode() == ISD::SIGN_EXTEND
|| N
->getOpcode() == ISD::ZERO_EXTEND
)
8225 return AddRequiredExtensionForVMULL(N
->getOperand(0), DAG
,
8226 N
->getOperand(0)->getValueType(0),
8230 if (LoadSDNode
*LD
= dyn_cast
<LoadSDNode
>(N
)) {
8231 assert((ISD::isSEXTLoad(LD
) || ISD::isZEXTLoad(LD
)) &&
8232 "Expected extending load");
8234 SDValue newLoad
= SkipLoadExtensionForVMULL(LD
, DAG
);
8235 DAG
.ReplaceAllUsesOfValueWith(SDValue(LD
, 1), newLoad
.getValue(1));
8236 unsigned Opcode
= ISD::isSEXTLoad(LD
) ? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND
;
8238 DAG
.getNode(Opcode
, SDLoc(newLoad
), LD
->getValueType(0), newLoad
);
8239 DAG
.ReplaceAllUsesOfValueWith(SDValue(LD
, 0), extLoad
);
8244 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will
8245 // have been legalized as a BITCAST from v4i32.
8246 if (N
->getOpcode() == ISD::BITCAST
) {
8247 SDNode
*BVN
= N
->getOperand(0).getNode();
8248 assert(BVN
->getOpcode() == ISD::BUILD_VECTOR
&&
8249 BVN
->getValueType(0) == MVT::v4i32
&& "expected v4i32 BUILD_VECTOR");
8250 unsigned LowElt
= DAG
.getDataLayout().isBigEndian() ? 1 : 0;
8251 return DAG
.getBuildVector(
8252 MVT::v2i32
, SDLoc(N
),
8253 {BVN
->getOperand(LowElt
), BVN
->getOperand(LowElt
+ 2)});
8255 // Construct a new BUILD_VECTOR with elements truncated to half the size.
8256 assert(N
->getOpcode() == ISD::BUILD_VECTOR
&& "expected BUILD_VECTOR");
8257 EVT VT
= N
->getValueType(0);
8258 unsigned EltSize
= VT
.getScalarSizeInBits() / 2;
8259 unsigned NumElts
= VT
.getVectorNumElements();
8260 MVT TruncVT
= MVT::getIntegerVT(EltSize
);
8261 SmallVector
<SDValue
, 8> Ops
;
8263 for (unsigned i
= 0; i
!= NumElts
; ++i
) {
8264 ConstantSDNode
*C
= cast
<ConstantSDNode
>(N
->getOperand(i
));
8265 const APInt
&CInt
= C
->getAPIntValue();
8266 // Element types smaller than 32 bits are not legal, so use i32 elements.
8267 // The values are implicitly truncated so sext vs. zext doesn't matter.
8268 Ops
.push_back(DAG
.getConstant(CInt
.zextOrTrunc(32), dl
, MVT::i32
));
8270 return DAG
.getBuildVector(MVT::getVectorVT(TruncVT
, NumElts
), dl
, Ops
);
8273 static bool isAddSubSExt(SDNode
*N
, SelectionDAG
&DAG
) {
8274 unsigned Opcode
= N
->getOpcode();
8275 if (Opcode
== ISD::ADD
|| Opcode
== ISD::SUB
) {
8276 SDNode
*N0
= N
->getOperand(0).getNode();
8277 SDNode
*N1
= N
->getOperand(1).getNode();
8278 return N0
->hasOneUse() && N1
->hasOneUse() &&
8279 isSignExtended(N0
, DAG
) && isSignExtended(N1
, DAG
);
8284 static bool isAddSubZExt(SDNode
*N
, SelectionDAG
&DAG
) {
8285 unsigned Opcode
= N
->getOpcode();
8286 if (Opcode
== ISD::ADD
|| Opcode
== ISD::SUB
) {
8287 SDNode
*N0
= N
->getOperand(0).getNode();
8288 SDNode
*N1
= N
->getOperand(1).getNode();
8289 return N0
->hasOneUse() && N1
->hasOneUse() &&
8290 isZeroExtended(N0
, DAG
) && isZeroExtended(N1
, DAG
);
8295 static SDValue
LowerMUL(SDValue Op
, SelectionDAG
&DAG
) {
8296 // Multiplications are only custom-lowered for 128-bit vectors so that
8297 // VMULL can be detected. Otherwise v2i64 multiplications are not legal.
8298 EVT VT
= Op
.getValueType();
8299 assert(VT
.is128BitVector() && VT
.isInteger() &&
8300 "unexpected type for custom-lowering ISD::MUL");
8301 SDNode
*N0
= Op
.getOperand(0).getNode();
8302 SDNode
*N1
= Op
.getOperand(1).getNode();
8303 unsigned NewOpc
= 0;
8305 bool isN0SExt
= isSignExtended(N0
, DAG
);
8306 bool isN1SExt
= isSignExtended(N1
, DAG
);
8307 if (isN0SExt
&& isN1SExt
)
8308 NewOpc
= ARMISD::VMULLs
;
8310 bool isN0ZExt
= isZeroExtended(N0
, DAG
);
8311 bool isN1ZExt
= isZeroExtended(N1
, DAG
);
8312 if (isN0ZExt
&& isN1ZExt
)
8313 NewOpc
= ARMISD::VMULLu
;
8314 else if (isN1SExt
|| isN1ZExt
) {
8315 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
8316 // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
8317 if (isN1SExt
&& isAddSubSExt(N0
, DAG
)) {
8318 NewOpc
= ARMISD::VMULLs
;
8320 } else if (isN1ZExt
&& isAddSubZExt(N0
, DAG
)) {
8321 NewOpc
= ARMISD::VMULLu
;
8323 } else if (isN0ZExt
&& isAddSubZExt(N1
, DAG
)) {
8325 NewOpc
= ARMISD::VMULLu
;
8331 if (VT
== MVT::v2i64
)
8332 // Fall through to expand this. It is not legal.
8335 // Other vector multiplications are legal.
8340 // Legalize to a VMULL instruction.
8343 SDValue Op1
= SkipExtensionForVMULL(N1
, DAG
);
8345 Op0
= SkipExtensionForVMULL(N0
, DAG
);
8346 assert(Op0
.getValueType().is64BitVector() &&
8347 Op1
.getValueType().is64BitVector() &&
8348 "unexpected types for extended operands to VMULL");
8349 return DAG
.getNode(NewOpc
, DL
, VT
, Op0
, Op1
);
8352 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
8353 // isel lowering to take advantage of no-stall back to back vmul + vmla.
8360 SDValue N00
= SkipExtensionForVMULL(N0
->getOperand(0).getNode(), DAG
);
8361 SDValue N01
= SkipExtensionForVMULL(N0
->getOperand(1).getNode(), DAG
);
8362 EVT Op1VT
= Op1
.getValueType();
8363 return DAG
.getNode(N0
->getOpcode(), DL
, VT
,
8364 DAG
.getNode(NewOpc
, DL
, VT
,
8365 DAG
.getNode(ISD::BITCAST
, DL
, Op1VT
, N00
), Op1
),
8366 DAG
.getNode(NewOpc
, DL
, VT
,
8367 DAG
.getNode(ISD::BITCAST
, DL
, Op1VT
, N01
), Op1
));
8370 static SDValue
LowerSDIV_v4i8(SDValue X
, SDValue Y
, const SDLoc
&dl
,
8371 SelectionDAG
&DAG
) {
8372 // TODO: Should this propagate fast-math-flags?
8375 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
8376 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
8377 X
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, MVT::v4i32
, X
);
8378 Y
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, MVT::v4i32
, Y
);
8379 X
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, MVT::v4f32
, X
);
8380 Y
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, MVT::v4f32
, Y
);
8381 // Get reciprocal estimate.
8382 // float4 recip = vrecpeq_f32(yf);
8383 Y
= DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, dl
, MVT::v4f32
,
8384 DAG
.getConstant(Intrinsic::arm_neon_vrecpe
, dl
, MVT::i32
),
8386 // Because char has a smaller range than uchar, we can actually get away
8387 // without any newton steps. This requires that we use a weird bias
8388 // of 0xb000, however (again, this has been exhaustively tested).
8389 // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
8390 X
= DAG
.getNode(ISD::FMUL
, dl
, MVT::v4f32
, X
, Y
);
8391 X
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v4i32
, X
);
8392 Y
= DAG
.getConstant(0xb000, dl
, MVT::v4i32
);
8393 X
= DAG
.getNode(ISD::ADD
, dl
, MVT::v4i32
, X
, Y
);
8394 X
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v4f32
, X
);
8395 // Convert back to short.
8396 X
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, MVT::v4i32
, X
);
8397 X
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::v4i16
, X
);
8401 static SDValue
LowerSDIV_v4i16(SDValue N0
, SDValue N1
, const SDLoc
&dl
,
8402 SelectionDAG
&DAG
) {
8403 // TODO: Should this propagate fast-math-flags?
8406 // Convert to float.
8407 // float4 yf = vcvt_f32_s32(vmovl_s16(y));
8408 // float4 xf = vcvt_f32_s32(vmovl_s16(x));
8409 N0
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, MVT::v4i32
, N0
);
8410 N1
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, MVT::v4i32
, N1
);
8411 N0
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, MVT::v4f32
, N0
);
8412 N1
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, MVT::v4f32
, N1
);
8414 // Use reciprocal estimate and one refinement step.
8415 // float4 recip = vrecpeq_f32(yf);
8416 // recip *= vrecpsq_f32(yf, recip);
8417 N2
= DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, dl
, MVT::v4f32
,
8418 DAG
.getConstant(Intrinsic::arm_neon_vrecpe
, dl
, MVT::i32
),
8420 N1
= DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, dl
, MVT::v4f32
,
8421 DAG
.getConstant(Intrinsic::arm_neon_vrecps
, dl
, MVT::i32
),
8423 N2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::v4f32
, N1
, N2
);
8424 // Because short has a smaller range than ushort, we can actually get away
8425 // with only a single newton step. This requires that we use a weird bias
8426 // of 89, however (again, this has been exhaustively tested).
8427 // float4 result = as_float4(as_int4(xf*recip) + 0x89);
8428 N0
= DAG
.getNode(ISD::FMUL
, dl
, MVT::v4f32
, N0
, N2
);
8429 N0
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v4i32
, N0
);
8430 N1
= DAG
.getConstant(0x89, dl
, MVT::v4i32
);
8431 N0
= DAG
.getNode(ISD::ADD
, dl
, MVT::v4i32
, N0
, N1
);
8432 N0
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v4f32
, N0
);
8433 // Convert back to integer and return.
8434 // return vmovn_s32(vcvt_s32_f32(result));
8435 N0
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, MVT::v4i32
, N0
);
8436 N0
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::v4i16
, N0
);
8440 static SDValue
LowerSDIV(SDValue Op
, SelectionDAG
&DAG
,
8441 const ARMSubtarget
*ST
) {
8442 EVT VT
= Op
.getValueType();
8443 assert((VT
== MVT::v4i16
|| VT
== MVT::v8i8
) &&
8444 "unexpected type for custom-lowering ISD::SDIV");
8447 SDValue N0
= Op
.getOperand(0);
8448 SDValue N1
= Op
.getOperand(1);
8451 if (VT
== MVT::v8i8
) {
8452 N0
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, MVT::v8i16
, N0
);
8453 N1
= DAG
.getNode(ISD::SIGN_EXTEND
, dl
, MVT::v8i16
, N1
);
8455 N2
= DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, MVT::v4i16
, N0
,
8456 DAG
.getIntPtrConstant(4, dl
));
8457 N3
= DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, MVT::v4i16
, N1
,
8458 DAG
.getIntPtrConstant(4, dl
));
8459 N0
= DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, MVT::v4i16
, N0
,
8460 DAG
.getIntPtrConstant(0, dl
));
8461 N1
= DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, MVT::v4i16
, N1
,
8462 DAG
.getIntPtrConstant(0, dl
));
8464 N0
= LowerSDIV_v4i8(N0
, N1
, dl
, DAG
); // v4i16
8465 N2
= LowerSDIV_v4i8(N2
, N3
, dl
, DAG
); // v4i16
8467 N0
= DAG
.getNode(ISD::CONCAT_VECTORS
, dl
, MVT::v8i16
, N0
, N2
);
8468 N0
= LowerCONCAT_VECTORS(N0
, DAG
, ST
);
8470 N0
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::v8i8
, N0
);
8473 return LowerSDIV_v4i16(N0
, N1
, dl
, DAG
);
8476 static SDValue
LowerUDIV(SDValue Op
, SelectionDAG
&DAG
,
8477 const ARMSubtarget
*ST
) {
8478 // TODO: Should this propagate fast-math-flags?
8479 EVT VT
= Op
.getValueType();
8480 assert((VT
== MVT::v4i16
|| VT
== MVT::v8i8
) &&
8481 "unexpected type for custom-lowering ISD::UDIV");
8484 SDValue N0
= Op
.getOperand(0);
8485 SDValue N1
= Op
.getOperand(1);
8488 if (VT
== MVT::v8i8
) {
8489 N0
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, MVT::v8i16
, N0
);
8490 N1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, MVT::v8i16
, N1
);
8492 N2
= DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, MVT::v4i16
, N0
,
8493 DAG
.getIntPtrConstant(4, dl
));
8494 N3
= DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, MVT::v4i16
, N1
,
8495 DAG
.getIntPtrConstant(4, dl
));
8496 N0
= DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, MVT::v4i16
, N0
,
8497 DAG
.getIntPtrConstant(0, dl
));
8498 N1
= DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, MVT::v4i16
, N1
,
8499 DAG
.getIntPtrConstant(0, dl
));
8501 N0
= LowerSDIV_v4i16(N0
, N1
, dl
, DAG
); // v4i16
8502 N2
= LowerSDIV_v4i16(N2
, N3
, dl
, DAG
); // v4i16
8504 N0
= DAG
.getNode(ISD::CONCAT_VECTORS
, dl
, MVT::v8i16
, N0
, N2
);
8505 N0
= LowerCONCAT_VECTORS(N0
, DAG
, ST
);
8507 N0
= DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, dl
, MVT::v8i8
,
8508 DAG
.getConstant(Intrinsic::arm_neon_vqmovnsu
, dl
,
8514 // v4i16 sdiv ... Convert to float.
8515 // float4 yf = vcvt_f32_s32(vmovl_u16(y));
8516 // float4 xf = vcvt_f32_s32(vmovl_u16(x));
8517 N0
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, MVT::v4i32
, N0
);
8518 N1
= DAG
.getNode(ISD::ZERO_EXTEND
, dl
, MVT::v4i32
, N1
);
8519 N0
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, MVT::v4f32
, N0
);
8520 SDValue BN1
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, MVT::v4f32
, N1
);
8522 // Use reciprocal estimate and two refinement steps.
8523 // float4 recip = vrecpeq_f32(yf);
8524 // recip *= vrecpsq_f32(yf, recip);
8525 // recip *= vrecpsq_f32(yf, recip);
8526 N2
= DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, dl
, MVT::v4f32
,
8527 DAG
.getConstant(Intrinsic::arm_neon_vrecpe
, dl
, MVT::i32
),
8529 N1
= DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, dl
, MVT::v4f32
,
8530 DAG
.getConstant(Intrinsic::arm_neon_vrecps
, dl
, MVT::i32
),
8532 N2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::v4f32
, N1
, N2
);
8533 N1
= DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, dl
, MVT::v4f32
,
8534 DAG
.getConstant(Intrinsic::arm_neon_vrecps
, dl
, MVT::i32
),
8536 N2
= DAG
.getNode(ISD::FMUL
, dl
, MVT::v4f32
, N1
, N2
);
8537 // Simply multiplying by the reciprocal estimate can leave us a few ulps
8538 // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
8539 // and that it will never cause us to return an answer too large).
8540 // float4 result = as_float4(as_int4(xf*recip) + 2);
8541 N0
= DAG
.getNode(ISD::FMUL
, dl
, MVT::v4f32
, N0
, N2
);
8542 N0
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v4i32
, N0
);
8543 N1
= DAG
.getConstant(2, dl
, MVT::v4i32
);
8544 N0
= DAG
.getNode(ISD::ADD
, dl
, MVT::v4i32
, N0
, N1
);
8545 N0
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::v4f32
, N0
);
8546 // Convert back to integer and return.
8547 // return vmovn_u32(vcvt_s32_f32(result));
8548 N0
= DAG
.getNode(ISD::FP_TO_SINT
, dl
, MVT::v4i32
, N0
);
8549 N0
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::v4i16
, N0
);
8553 static SDValue
LowerADDSUBCARRY(SDValue Op
, SelectionDAG
&DAG
) {
8554 SDNode
*N
= Op
.getNode();
8555 EVT VT
= N
->getValueType(0);
8556 SDVTList VTs
= DAG
.getVTList(VT
, MVT::i32
);
8558 SDValue Carry
= Op
.getOperand(2);
8563 if (Op
.getOpcode() == ISD::ADDCARRY
) {
8564 // This converts the boolean value carry into the carry flag.
8565 Carry
= ConvertBooleanCarryToCarryFlag(Carry
, DAG
);
8567 // Do the addition proper using the carry flag we wanted.
8568 Result
= DAG
.getNode(ARMISD::ADDE
, DL
, VTs
, Op
.getOperand(0),
8569 Op
.getOperand(1), Carry
);
8571 // Now convert the carry flag into a boolean value.
8572 Carry
= ConvertCarryFlagToBooleanCarry(Result
.getValue(1), VT
, DAG
);
8574 // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we
8575 // have to invert the carry first.
8576 Carry
= DAG
.getNode(ISD::SUB
, DL
, MVT::i32
,
8577 DAG
.getConstant(1, DL
, MVT::i32
), Carry
);
8578 // This converts the boolean value carry into the carry flag.
8579 Carry
= ConvertBooleanCarryToCarryFlag(Carry
, DAG
);
8581 // Do the subtraction proper using the carry flag we wanted.
8582 Result
= DAG
.getNode(ARMISD::SUBE
, DL
, VTs
, Op
.getOperand(0),
8583 Op
.getOperand(1), Carry
);
8585 // Now convert the carry flag into a boolean value.
8586 Carry
= ConvertCarryFlagToBooleanCarry(Result
.getValue(1), VT
, DAG
);
8587 // But the carry returned by ARMISD::SUBE is not a borrow as expected
8588 // by ISD::SUBCARRY, so compute 1 - C.
8589 Carry
= DAG
.getNode(ISD::SUB
, DL
, MVT::i32
,
8590 DAG
.getConstant(1, DL
, MVT::i32
), Carry
);
8593 // Return both values.
8594 return DAG
.getNode(ISD::MERGE_VALUES
, DL
, N
->getVTList(), Result
, Carry
);
8597 SDValue
ARMTargetLowering::LowerFSINCOS(SDValue Op
, SelectionDAG
&DAG
) const {
8598 assert(Subtarget
->isTargetDarwin());
8600 // For iOS, we want to call an alternative entry point: __sincos_stret,
8601 // return values are passed via sret.
8603 SDValue Arg
= Op
.getOperand(0);
8604 EVT ArgVT
= Arg
.getValueType();
8605 Type
*ArgTy
= ArgVT
.getTypeForEVT(*DAG
.getContext());
8606 auto PtrVT
= getPointerTy(DAG
.getDataLayout());
8608 MachineFrameInfo
&MFI
= DAG
.getMachineFunction().getFrameInfo();
8609 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
8611 // Pair of floats / doubles used to pass the result.
8612 Type
*RetTy
= StructType::get(ArgTy
, ArgTy
);
8613 auto &DL
= DAG
.getDataLayout();
8616 bool ShouldUseSRet
= Subtarget
->isAPCS_ABI();
8618 if (ShouldUseSRet
) {
8619 // Create stack object for sret.
8620 const uint64_t ByteSize
= DL
.getTypeAllocSize(RetTy
);
8621 const unsigned StackAlign
= DL
.getPrefTypeAlignment(RetTy
);
8622 int FrameIdx
= MFI
.CreateStackObject(ByteSize
, StackAlign
, false);
8623 SRet
= DAG
.getFrameIndex(FrameIdx
, TLI
.getPointerTy(DL
));
8627 Entry
.Ty
= RetTy
->getPointerTo();
8628 Entry
.IsSExt
= false;
8629 Entry
.IsZExt
= false;
8630 Entry
.IsSRet
= true;
8631 Args
.push_back(Entry
);
8632 RetTy
= Type::getVoidTy(*DAG
.getContext());
8638 Entry
.IsSExt
= false;
8639 Entry
.IsZExt
= false;
8640 Args
.push_back(Entry
);
8643 (ArgVT
== MVT::f64
) ? RTLIB::SINCOS_STRET_F64
: RTLIB::SINCOS_STRET_F32
;
8644 const char *LibcallName
= getLibcallName(LC
);
8645 CallingConv::ID CC
= getLibcallCallingConv(LC
);
8646 SDValue Callee
= DAG
.getExternalSymbol(LibcallName
, getPointerTy(DL
));
8648 TargetLowering::CallLoweringInfo
CLI(DAG
);
8650 .setChain(DAG
.getEntryNode())
8651 .setCallee(CC
, RetTy
, Callee
, std::move(Args
))
8652 .setDiscardResult(ShouldUseSRet
);
8653 std::pair
<SDValue
, SDValue
> CallResult
= LowerCallTo(CLI
);
8656 return CallResult
.first
;
8659 DAG
.getLoad(ArgVT
, dl
, CallResult
.second
, SRet
, MachinePointerInfo());
8661 // Address of cos field.
8662 SDValue Add
= DAG
.getNode(ISD::ADD
, dl
, PtrVT
, SRet
,
8663 DAG
.getIntPtrConstant(ArgVT
.getStoreSize(), dl
));
8665 DAG
.getLoad(ArgVT
, dl
, LoadSin
.getValue(1), Add
, MachinePointerInfo());
8667 SDVTList Tys
= DAG
.getVTList(ArgVT
, ArgVT
);
8668 return DAG
.getNode(ISD::MERGE_VALUES
, dl
, Tys
,
8669 LoadSin
.getValue(0), LoadCos
.getValue(0));
8672 SDValue
ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op
, SelectionDAG
&DAG
,
8674 SDValue
&Chain
) const {
8675 EVT VT
= Op
.getValueType();
8676 assert((VT
== MVT::i32
|| VT
== MVT::i64
) &&
8677 "unexpected type for custom lowering DIV");
8680 const auto &DL
= DAG
.getDataLayout();
8681 const auto &TLI
= DAG
.getTargetLoweringInfo();
8683 const char *Name
= nullptr;
8685 Name
= (VT
== MVT::i32
) ? "__rt_sdiv" : "__rt_sdiv64";
8687 Name
= (VT
== MVT::i32
) ? "__rt_udiv" : "__rt_udiv64";
8689 SDValue ES
= DAG
.getExternalSymbol(Name
, TLI
.getPointerTy(DL
));
8691 ARMTargetLowering::ArgListTy Args
;
8693 for (auto AI
: {1, 0}) {
8695 Arg
.Node
= Op
.getOperand(AI
);
8696 Arg
.Ty
= Arg
.Node
.getValueType().getTypeForEVT(*DAG
.getContext());
8697 Args
.push_back(Arg
);
8700 CallLoweringInfo
CLI(DAG
);
8703 .setCallee(CallingConv::ARM_AAPCS_VFP
, VT
.getTypeForEVT(*DAG
.getContext()),
8704 ES
, std::move(Args
));
8706 return LowerCallTo(CLI
).first
;
8709 // This is a code size optimisation: return the original SDIV node to
8710 // DAGCombiner when we don't want to expand SDIV into a sequence of
8711 // instructions, and an empty node otherwise which will cause the
8712 // SDIV to be expanded in DAGCombine.
8714 ARMTargetLowering::BuildSDIVPow2(SDNode
*N
, const APInt
&Divisor
,
8716 SmallVectorImpl
<SDNode
*> &Created
) const {
8717 // TODO: Support SREM
8718 if (N
->getOpcode() != ISD::SDIV
)
8721 const auto &ST
= static_cast<const ARMSubtarget
&>(DAG
.getSubtarget());
8722 const bool MinSize
= ST
.hasMinSize();
8723 const bool HasDivide
= ST
.isThumb() ? ST
.hasDivideInThumbMode()
8724 : ST
.hasDivideInARMMode();
8726 // Don't touch vector types; rewriting this may lead to scalarizing
8728 if (N
->getOperand(0).getValueType().isVector())
8731 // Bail if MinSize is not set, and also for both ARM and Thumb mode we need
8732 // hwdiv support for this to be really profitable.
8733 if (!(MinSize
&& HasDivide
))
8736 // ARM mode is a bit simpler than Thumb: we can handle large power
8737 // of 2 immediates with 1 mov instruction; no further checks required,
8738 // just return the sdiv node.
8740 return SDValue(N
, 0);
8742 // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV,
8743 // and thus lose the code size benefits of a MOVS that requires only 2.
8744 // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here,
8745 // but as it's doing exactly this, it's not worth the trouble to get TTI.
8746 if (Divisor
.sgt(128))
8749 return SDValue(N
, 0);
8752 SDValue
ARMTargetLowering::LowerDIV_Windows(SDValue Op
, SelectionDAG
&DAG
,
8753 bool Signed
) const {
8754 assert(Op
.getValueType() == MVT::i32
&&
8755 "unexpected type for custom lowering DIV");
8758 SDValue DBZCHK
= DAG
.getNode(ARMISD::WIN__DBZCHK
, dl
, MVT::Other
,
8759 DAG
.getEntryNode(), Op
.getOperand(1));
8761 return LowerWindowsDIVLibCall(Op
, DAG
, Signed
, DBZCHK
);
8764 static SDValue
WinDBZCheckDenominator(SelectionDAG
&DAG
, SDNode
*N
, SDValue InChain
) {
8766 SDValue Op
= N
->getOperand(1);
8767 if (N
->getValueType(0) == MVT::i32
)
8768 return DAG
.getNode(ARMISD::WIN__DBZCHK
, DL
, MVT::Other
, InChain
, Op
);
8769 SDValue Lo
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i32
, Op
,
8770 DAG
.getConstant(0, DL
, MVT::i32
));
8771 SDValue Hi
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, DL
, MVT::i32
, Op
,
8772 DAG
.getConstant(1, DL
, MVT::i32
));
8773 return DAG
.getNode(ARMISD::WIN__DBZCHK
, DL
, MVT::Other
, InChain
,
8774 DAG
.getNode(ISD::OR
, DL
, MVT::i32
, Lo
, Hi
));
8777 void ARMTargetLowering::ExpandDIV_Windows(
8778 SDValue Op
, SelectionDAG
&DAG
, bool Signed
,
8779 SmallVectorImpl
<SDValue
> &Results
) const {
8780 const auto &DL
= DAG
.getDataLayout();
8781 const auto &TLI
= DAG
.getTargetLoweringInfo();
8783 assert(Op
.getValueType() == MVT::i64
&&
8784 "unexpected type for custom lowering DIV");
8787 SDValue DBZCHK
= WinDBZCheckDenominator(DAG
, Op
.getNode(), DAG
.getEntryNode());
8789 SDValue Result
= LowerWindowsDIVLibCall(Op
, DAG
, Signed
, DBZCHK
);
8791 SDValue Lower
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Result
);
8792 SDValue Upper
= DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, Result
,
8793 DAG
.getConstant(32, dl
, TLI
.getPointerTy(DL
)));
8794 Upper
= DAG
.getNode(ISD::TRUNCATE
, dl
, MVT::i32
, Upper
);
8796 Results
.push_back(Lower
);
8797 Results
.push_back(Upper
);
8800 static SDValue
LowerPredicateLoad(SDValue Op
, SelectionDAG
&DAG
) {
8801 LoadSDNode
*LD
= cast
<LoadSDNode
>(Op
.getNode());
8802 EVT MemVT
= LD
->getMemoryVT();
8803 assert((MemVT
== MVT::v4i1
|| MemVT
== MVT::v8i1
|| MemVT
== MVT::v16i1
) &&
8804 "Expected a predicate type!");
8805 assert(MemVT
== Op
.getValueType());
8806 assert(LD
->getExtensionType() == ISD::NON_EXTLOAD
&&
8807 "Expected a non-extending load");
8808 assert(LD
->isUnindexed() && "Expected a unindexed load");
8810 // The basic MVE VLDR on a v4i1/v8i1 actually loads the entire 16bit
8811 // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We
8812 // need to make sure that 8/4 bits are actually loaded into the correct
8813 // place, which means loading the value and then shuffling the values into
8814 // the bottom bits of the predicate.
8815 // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect
8819 SDValue Load
= DAG
.getExtLoad(
8820 ISD::EXTLOAD
, dl
, MVT::i32
, LD
->getChain(), LD
->getBasePtr(),
8821 EVT::getIntegerVT(*DAG
.getContext(), MemVT
.getSizeInBits()),
8822 LD
->getMemOperand());
8823 SDValue Pred
= DAG
.getNode(ARMISD::PREDICATE_CAST
, dl
, MVT::v16i1
, Load
);
8824 if (MemVT
!= MVT::v16i1
)
8825 Pred
= DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, dl
, MemVT
, Pred
,
8826 DAG
.getConstant(0, dl
, MVT::i32
));
8827 return DAG
.getMergeValues({Pred
, Load
.getValue(1)}, dl
);
8830 static SDValue
LowerPredicateStore(SDValue Op
, SelectionDAG
&DAG
) {
8831 StoreSDNode
*ST
= cast
<StoreSDNode
>(Op
.getNode());
8832 EVT MemVT
= ST
->getMemoryVT();
8833 assert((MemVT
== MVT::v4i1
|| MemVT
== MVT::v8i1
|| MemVT
== MVT::v16i1
) &&
8834 "Expected a predicate type!");
8835 assert(MemVT
== ST
->getValue().getValueType());
8836 assert(!ST
->isTruncatingStore() && "Expected a non-extending store");
8837 assert(ST
->isUnindexed() && "Expected a unindexed store");
8839 // Only store the v4i1 or v8i1 worth of bits, via a buildvector with top bits
8840 // unset and a scalar store.
8842 SDValue Build
= ST
->getValue();
8843 if (MemVT
!= MVT::v16i1
) {
8844 SmallVector
<SDValue
, 16> Ops
;
8845 for (unsigned I
= 0; I
< MemVT
.getVectorNumElements(); I
++)
8846 Ops
.push_back(DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::i32
, Build
,
8847 DAG
.getConstant(I
, dl
, MVT::i32
)));
8848 for (unsigned I
= MemVT
.getVectorNumElements(); I
< 16; I
++)
8849 Ops
.push_back(DAG
.getUNDEF(MVT::i32
));
8850 Build
= DAG
.getNode(ISD::BUILD_VECTOR
, dl
, MVT::v16i1
, Ops
);
8852 SDValue GRP
= DAG
.getNode(ARMISD::PREDICATE_CAST
, dl
, MVT::i32
, Build
);
8853 return DAG
.getTruncStore(
8854 ST
->getChain(), dl
, GRP
, ST
->getBasePtr(),
8855 EVT::getIntegerVT(*DAG
.getContext(), MemVT
.getSizeInBits()),
8856 ST
->getMemOperand());
8859 static SDValue
LowerMLOAD(SDValue Op
, SelectionDAG
&DAG
) {
8860 MaskedLoadSDNode
*N
= cast
<MaskedLoadSDNode
>(Op
.getNode());
8861 MVT VT
= Op
.getSimpleValueType();
8862 SDValue Mask
= N
->getMask();
8863 SDValue PassThru
= N
->getPassThru();
8866 if (ISD::isBuildVectorAllZeros(PassThru
.getNode()) ||
8867 (PassThru
->getOpcode() == ARMISD::VMOVIMM
&&
8868 isNullConstant(PassThru
->getOperand(0))))
8871 // MVE Masked loads use zero as the passthru value. Here we convert undef to
8872 // zero too, and other values are lowered to a select.
8873 SDValue ZeroVec
= DAG
.getNode(ARMISD::VMOVIMM
, dl
, VT
,
8874 DAG
.getTargetConstant(0, dl
, MVT::i32
));
8875 SDValue NewLoad
= DAG
.getMaskedLoad(
8876 VT
, dl
, N
->getChain(), N
->getBasePtr(), Mask
, ZeroVec
, N
->getMemoryVT(),
8877 N
->getMemOperand(), N
->getExtensionType(), N
->isExpandingLoad());
8878 SDValue Combo
= NewLoad
;
8879 if (!PassThru
.isUndef())
8880 Combo
= DAG
.getNode(ISD::VSELECT
, dl
, VT
, Mask
, NewLoad
, PassThru
);
8881 return DAG
.getMergeValues({Combo
, NewLoad
.getValue(1)}, dl
);
8884 static SDValue
LowerAtomicLoadStore(SDValue Op
, SelectionDAG
&DAG
) {
8885 if (isStrongerThanMonotonic(cast
<AtomicSDNode
>(Op
)->getOrdering()))
8886 // Acquire/Release load/store is not legal for targets without a dmb or
8887 // equivalent available.
8890 // Monotonic load/store is legal for all targets.
8894 static void ReplaceREADCYCLECOUNTER(SDNode
*N
,
8895 SmallVectorImpl
<SDValue
> &Results
,
8897 const ARMSubtarget
*Subtarget
) {
8899 // Under Power Management extensions, the cycle-count is:
8900 // mrc p15, #0, <Rt>, c9, c13, #0
8901 SDValue Ops
[] = { N
->getOperand(0), // Chain
8902 DAG
.getTargetConstant(Intrinsic::arm_mrc
, DL
, MVT::i32
),
8903 DAG
.getTargetConstant(15, DL
, MVT::i32
),
8904 DAG
.getTargetConstant(0, DL
, MVT::i32
),
8905 DAG
.getTargetConstant(9, DL
, MVT::i32
),
8906 DAG
.getTargetConstant(13, DL
, MVT::i32
),
8907 DAG
.getTargetConstant(0, DL
, MVT::i32
)
8910 SDValue Cycles32
= DAG
.getNode(ISD::INTRINSIC_W_CHAIN
, DL
,
8911 DAG
.getVTList(MVT::i32
, MVT::Other
), Ops
);
8912 Results
.push_back(DAG
.getNode(ISD::BUILD_PAIR
, DL
, MVT::i64
, Cycles32
,
8913 DAG
.getConstant(0, DL
, MVT::i32
)));
8914 Results
.push_back(Cycles32
.getValue(1));
8917 static SDValue
createGPRPairNode(SelectionDAG
&DAG
, SDValue V
) {
8918 SDLoc
dl(V
.getNode());
8919 SDValue VLo
= DAG
.getAnyExtOrTrunc(V
, dl
, MVT::i32
);
8920 SDValue VHi
= DAG
.getAnyExtOrTrunc(
8921 DAG
.getNode(ISD::SRL
, dl
, MVT::i64
, V
, DAG
.getConstant(32, dl
, MVT::i32
)),
8923 bool isBigEndian
= DAG
.getDataLayout().isBigEndian();
8925 std::swap (VLo
, VHi
);
8927 DAG
.getTargetConstant(ARM::GPRPairRegClassID
, dl
, MVT::i32
);
8928 SDValue SubReg0
= DAG
.getTargetConstant(ARM::gsub_0
, dl
, MVT::i32
);
8929 SDValue SubReg1
= DAG
.getTargetConstant(ARM::gsub_1
, dl
, MVT::i32
);
8930 const SDValue Ops
[] = { RegClass
, VLo
, SubReg0
, VHi
, SubReg1
};
8932 DAG
.getMachineNode(TargetOpcode::REG_SEQUENCE
, dl
, MVT::Untyped
, Ops
), 0);
8935 static void ReplaceCMP_SWAP_64Results(SDNode
*N
,
8936 SmallVectorImpl
<SDValue
> & Results
,
8937 SelectionDAG
&DAG
) {
8938 assert(N
->getValueType(0) == MVT::i64
&&
8939 "AtomicCmpSwap on types less than 64 should be legal");
8940 SDValue Ops
[] = {N
->getOperand(1),
8941 createGPRPairNode(DAG
, N
->getOperand(2)),
8942 createGPRPairNode(DAG
, N
->getOperand(3)),
8944 SDNode
*CmpSwap
= DAG
.getMachineNode(
8945 ARM::CMP_SWAP_64
, SDLoc(N
),
8946 DAG
.getVTList(MVT::Untyped
, MVT::i32
, MVT::Other
), Ops
);
8948 MachineMemOperand
*MemOp
= cast
<MemSDNode
>(N
)->getMemOperand();
8949 DAG
.setNodeMemRefs(cast
<MachineSDNode
>(CmpSwap
), {MemOp
});
8951 bool isBigEndian
= DAG
.getDataLayout().isBigEndian();
8954 DAG
.getTargetExtractSubreg(isBigEndian
? ARM::gsub_1
: ARM::gsub_0
,
8955 SDLoc(N
), MVT::i32
, SDValue(CmpSwap
, 0)));
8957 DAG
.getTargetExtractSubreg(isBigEndian
? ARM::gsub_0
: ARM::gsub_1
,
8958 SDLoc(N
), MVT::i32
, SDValue(CmpSwap
, 0)));
8959 Results
.push_back(SDValue(CmpSwap
, 2));
8962 static SDValue
LowerFPOWI(SDValue Op
, const ARMSubtarget
&Subtarget
,
8963 SelectionDAG
&DAG
) {
8964 const auto &TLI
= DAG
.getTargetLoweringInfo();
8966 assert(Subtarget
.getTargetTriple().isOSMSVCRT() &&
8967 "Custom lowering is MSVCRT specific!");
8970 SDValue Val
= Op
.getOperand(0);
8971 MVT Ty
= Val
->getSimpleValueType(0);
8972 SDValue Exponent
= DAG
.getNode(ISD::SINT_TO_FP
, dl
, Ty
, Op
.getOperand(1));
8973 SDValue Callee
= DAG
.getExternalSymbol(Ty
== MVT::f32
? "powf" : "pow",
8974 TLI
.getPointerTy(DAG
.getDataLayout()));
8976 TargetLowering::ArgListTy Args
;
8977 TargetLowering::ArgListEntry Entry
;
8980 Entry
.Ty
= Val
.getValueType().getTypeForEVT(*DAG
.getContext());
8981 Entry
.IsZExt
= true;
8982 Args
.push_back(Entry
);
8984 Entry
.Node
= Exponent
;
8985 Entry
.Ty
= Exponent
.getValueType().getTypeForEVT(*DAG
.getContext());
8986 Entry
.IsZExt
= true;
8987 Args
.push_back(Entry
);
8989 Type
*LCRTy
= Val
.getValueType().getTypeForEVT(*DAG
.getContext());
8991 // In the in-chain to the call is the entry node If we are emitting a
8992 // tailcall, the chain will be mutated if the node has a non-entry input
8994 SDValue InChain
= DAG
.getEntryNode();
8995 SDValue TCChain
= InChain
;
8997 const Function
&F
= DAG
.getMachineFunction().getFunction();
8998 bool IsTC
= TLI
.isInTailCallPosition(DAG
, Op
.getNode(), TCChain
) &&
8999 F
.getReturnType() == LCRTy
;
9003 TargetLowering::CallLoweringInfo
CLI(DAG
);
9006 .setCallee(CallingConv::ARM_AAPCS_VFP
, LCRTy
, Callee
, std::move(Args
))
9008 std::pair
<SDValue
, SDValue
> CI
= TLI
.LowerCallTo(CLI
);
9010 // Return the chain (the DAG root) if it is a tail call
9011 return !CI
.second
.getNode() ? DAG
.getRoot() : CI
.first
;
9014 SDValue
ARMTargetLowering::LowerOperation(SDValue Op
, SelectionDAG
&DAG
) const {
9015 LLVM_DEBUG(dbgs() << "Lowering node: "; Op
.dump());
9016 switch (Op
.getOpcode()) {
9017 default: llvm_unreachable("Don't know how to custom lower this!");
9018 case ISD::WRITE_REGISTER
: return LowerWRITE_REGISTER(Op
, DAG
);
9019 case ISD::ConstantPool
: return LowerConstantPool(Op
, DAG
);
9020 case ISD::BlockAddress
: return LowerBlockAddress(Op
, DAG
);
9021 case ISD::GlobalAddress
: return LowerGlobalAddress(Op
, DAG
);
9022 case ISD::GlobalTLSAddress
: return LowerGlobalTLSAddress(Op
, DAG
);
9023 case ISD::SELECT
: return LowerSELECT(Op
, DAG
);
9024 case ISD::SELECT_CC
: return LowerSELECT_CC(Op
, DAG
);
9025 case ISD::BRCOND
: return LowerBRCOND(Op
, DAG
);
9026 case ISD::BR_CC
: return LowerBR_CC(Op
, DAG
);
9027 case ISD::BR_JT
: return LowerBR_JT(Op
, DAG
);
9028 case ISD::VASTART
: return LowerVASTART(Op
, DAG
);
9029 case ISD::ATOMIC_FENCE
: return LowerATOMIC_FENCE(Op
, DAG
, Subtarget
);
9030 case ISD::PREFETCH
: return LowerPREFETCH(Op
, DAG
, Subtarget
);
9031 case ISD::SINT_TO_FP
:
9032 case ISD::UINT_TO_FP
: return LowerINT_TO_FP(Op
, DAG
);
9033 case ISD::FP_TO_SINT
:
9034 case ISD::FP_TO_UINT
: return LowerFP_TO_INT(Op
, DAG
);
9035 case ISD::FCOPYSIGN
: return LowerFCOPYSIGN(Op
, DAG
);
9036 case ISD::RETURNADDR
: return LowerRETURNADDR(Op
, DAG
);
9037 case ISD::FRAMEADDR
: return LowerFRAMEADDR(Op
, DAG
);
9038 case ISD::EH_SJLJ_SETJMP
: return LowerEH_SJLJ_SETJMP(Op
, DAG
);
9039 case ISD::EH_SJLJ_LONGJMP
: return LowerEH_SJLJ_LONGJMP(Op
, DAG
);
9040 case ISD::EH_SJLJ_SETUP_DISPATCH
: return LowerEH_SJLJ_SETUP_DISPATCH(Op
, DAG
);
9041 case ISD::INTRINSIC_VOID
: return LowerINTRINSIC_VOID(Op
, DAG
, Subtarget
);
9042 case ISD::INTRINSIC_WO_CHAIN
: return LowerINTRINSIC_WO_CHAIN(Op
, DAG
,
9044 case ISD::BITCAST
: return ExpandBITCAST(Op
.getNode(), DAG
, Subtarget
);
9047 case ISD::SRA
: return LowerShift(Op
.getNode(), DAG
, Subtarget
);
9048 case ISD::SREM
: return LowerREM(Op
.getNode(), DAG
);
9049 case ISD::UREM
: return LowerREM(Op
.getNode(), DAG
);
9050 case ISD::SHL_PARTS
: return LowerShiftLeftParts(Op
, DAG
);
9051 case ISD::SRL_PARTS
:
9052 case ISD::SRA_PARTS
: return LowerShiftRightParts(Op
, DAG
);
9054 case ISD::CTTZ_ZERO_UNDEF
: return LowerCTTZ(Op
.getNode(), DAG
, Subtarget
);
9055 case ISD::CTPOP
: return LowerCTPOP(Op
.getNode(), DAG
, Subtarget
);
9056 case ISD::SETCC
: return LowerVSETCC(Op
, DAG
, Subtarget
);
9057 case ISD::SETCCCARRY
: return LowerSETCCCARRY(Op
, DAG
);
9058 case ISD::ConstantFP
: return LowerConstantFP(Op
, DAG
, Subtarget
);
9059 case ISD::BUILD_VECTOR
: return LowerBUILD_VECTOR(Op
, DAG
, Subtarget
);
9060 case ISD::VECTOR_SHUFFLE
: return LowerVECTOR_SHUFFLE(Op
, DAG
, Subtarget
);
9061 case ISD::EXTRACT_SUBVECTOR
: return LowerEXTRACT_SUBVECTOR(Op
, DAG
, Subtarget
);
9062 case ISD::INSERT_VECTOR_ELT
: return LowerINSERT_VECTOR_ELT(Op
, DAG
);
9063 case ISD::EXTRACT_VECTOR_ELT
: return LowerEXTRACT_VECTOR_ELT(Op
, DAG
, Subtarget
);
9064 case ISD::CONCAT_VECTORS
: return LowerCONCAT_VECTORS(Op
, DAG
, Subtarget
);
9065 case ISD::FLT_ROUNDS_
: return LowerFLT_ROUNDS_(Op
, DAG
);
9066 case ISD::MUL
: return LowerMUL(Op
, DAG
);
9068 if (Subtarget
->isTargetWindows() && !Op
.getValueType().isVector())
9069 return LowerDIV_Windows(Op
, DAG
, /* Signed */ true);
9070 return LowerSDIV(Op
, DAG
, Subtarget
);
9072 if (Subtarget
->isTargetWindows() && !Op
.getValueType().isVector())
9073 return LowerDIV_Windows(Op
, DAG
, /* Signed */ false);
9074 return LowerUDIV(Op
, DAG
, Subtarget
);
9076 case ISD::SUBCARRY
: return LowerADDSUBCARRY(Op
, DAG
);
9079 return LowerSignedALUO(Op
, DAG
);
9082 return LowerUnsignedALUO(Op
, DAG
);
9084 return LowerPredicateLoad(Op
, DAG
);
9086 return LowerPredicateStore(Op
, DAG
);
9088 return LowerMLOAD(Op
, DAG
);
9089 case ISD::ATOMIC_LOAD
:
9090 case ISD::ATOMIC_STORE
: return LowerAtomicLoadStore(Op
, DAG
);
9091 case ISD::FSINCOS
: return LowerFSINCOS(Op
, DAG
);
9093 case ISD::UDIVREM
: return LowerDivRem(Op
, DAG
);
9094 case ISD::DYNAMIC_STACKALLOC
:
9095 if (Subtarget
->isTargetWindows())
9096 return LowerDYNAMIC_STACKALLOC(Op
, DAG
);
9097 llvm_unreachable("Don't know how to custom lower this!");
9098 case ISD::FP_ROUND
: return LowerFP_ROUND(Op
, DAG
);
9099 case ISD::FP_EXTEND
: return LowerFP_EXTEND(Op
, DAG
);
9100 case ISD::FPOWI
: return LowerFPOWI(Op
, *Subtarget
, DAG
);
9101 case ARMISD::WIN__DBZCHK
: return SDValue();
9105 static void ReplaceLongIntrinsic(SDNode
*N
, SmallVectorImpl
<SDValue
> &Results
,
9106 SelectionDAG
&DAG
) {
9107 unsigned IntNo
= cast
<ConstantSDNode
>(N
->getOperand(0))->getZExtValue();
9109 if (IntNo
== Intrinsic::arm_smlald
)
9110 Opc
= ARMISD::SMLALD
;
9111 else if (IntNo
== Intrinsic::arm_smlaldx
)
9112 Opc
= ARMISD::SMLALDX
;
9113 else if (IntNo
== Intrinsic::arm_smlsld
)
9114 Opc
= ARMISD::SMLSLD
;
9115 else if (IntNo
== Intrinsic::arm_smlsldx
)
9116 Opc
= ARMISD::SMLSLDX
;
9121 SDValue Lo
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
9123 DAG
.getConstant(0, dl
, MVT::i32
));
9124 SDValue Hi
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, MVT::i32
,
9126 DAG
.getConstant(1, dl
, MVT::i32
));
9128 SDValue LongMul
= DAG
.getNode(Opc
, dl
,
9129 DAG
.getVTList(MVT::i32
, MVT::i32
),
9130 N
->getOperand(1), N
->getOperand(2),
9132 Results
.push_back(LongMul
.getValue(0));
9133 Results
.push_back(LongMul
.getValue(1));
9136 /// ReplaceNodeResults - Replace the results of node with an illegal result
9137 /// type with new values built out of custom code.
9138 void ARMTargetLowering::ReplaceNodeResults(SDNode
*N
,
9139 SmallVectorImpl
<SDValue
> &Results
,
9140 SelectionDAG
&DAG
) const {
9142 switch (N
->getOpcode()) {
9144 llvm_unreachable("Don't know how to custom expand this!");
9145 case ISD::READ_REGISTER
:
9146 ExpandREAD_REGISTER(N
, Results
, DAG
);
9149 Res
= ExpandBITCAST(N
, DAG
, Subtarget
);
9154 Res
= Expand64BitShift(N
, DAG
, Subtarget
);
9158 Res
= LowerREM(N
, DAG
);
9162 Res
= LowerDivRem(SDValue(N
, 0), DAG
);
9163 assert(Res
.getNumOperands() == 2 && "DivRem needs two values");
9164 Results
.push_back(Res
.getValue(0));
9165 Results
.push_back(Res
.getValue(1));
9167 case ISD::READCYCLECOUNTER
:
9168 ReplaceREADCYCLECOUNTER(N
, Results
, DAG
, Subtarget
);
9172 assert(Subtarget
->isTargetWindows() && "can only expand DIV on Windows");
9173 return ExpandDIV_Windows(SDValue(N
, 0), DAG
, N
->getOpcode() == ISD::SDIV
,
9175 case ISD::ATOMIC_CMP_SWAP
:
9176 ReplaceCMP_SWAP_64Results(N
, Results
, DAG
);
9178 case ISD::INTRINSIC_WO_CHAIN
:
9179 return ReplaceLongIntrinsic(N
, Results
, DAG
);
9181 lowerABS(N
, Results
, DAG
);
9186 Results
.push_back(Res
);
9189 //===----------------------------------------------------------------------===//
9190 // ARM Scheduler Hooks
9191 //===----------------------------------------------------------------------===//
9193 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
9194 /// registers the function context.
9195 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr
&MI
,
9196 MachineBasicBlock
*MBB
,
9197 MachineBasicBlock
*DispatchBB
,
9199 assert(!Subtarget
->isROPI() && !Subtarget
->isRWPI() &&
9200 "ROPI/RWPI not currently supported with SjLj");
9201 const TargetInstrInfo
*TII
= Subtarget
->getInstrInfo();
9202 DebugLoc dl
= MI
.getDebugLoc();
9203 MachineFunction
*MF
= MBB
->getParent();
9204 MachineRegisterInfo
*MRI
= &MF
->getRegInfo();
9205 MachineConstantPool
*MCP
= MF
->getConstantPool();
9206 ARMFunctionInfo
*AFI
= MF
->getInfo
<ARMFunctionInfo
>();
9207 const Function
&F
= MF
->getFunction();
9209 bool isThumb
= Subtarget
->isThumb();
9210 bool isThumb2
= Subtarget
->isThumb2();
9212 unsigned PCLabelId
= AFI
->createPICLabelUId();
9213 unsigned PCAdj
= (isThumb
|| isThumb2
) ? 4 : 8;
9214 ARMConstantPoolValue
*CPV
=
9215 ARMConstantPoolMBB::Create(F
.getContext(), DispatchBB
, PCLabelId
, PCAdj
);
9216 unsigned CPI
= MCP
->getConstantPoolIndex(CPV
, 4);
9218 const TargetRegisterClass
*TRC
= isThumb
? &ARM::tGPRRegClass
9219 : &ARM::GPRRegClass
;
9221 // Grab constant pool and fixed stack memory operands.
9222 MachineMemOperand
*CPMMO
=
9223 MF
->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF
),
9224 MachineMemOperand::MOLoad
, 4, 4);
9226 MachineMemOperand
*FIMMOSt
=
9227 MF
->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF
, FI
),
9228 MachineMemOperand::MOStore
, 4, 4);
9230 // Load the address of the dispatch MBB into the jump buffer.
9232 // Incoming value: jbuf
9233 // ldr.n r5, LCPI1_1
9236 // str r5, [$jbuf, #+4] ; &jbuf[1]
9237 Register NewVReg1
= MRI
->createVirtualRegister(TRC
);
9238 BuildMI(*MBB
, MI
, dl
, TII
->get(ARM::t2LDRpci
), NewVReg1
)
9239 .addConstantPoolIndex(CPI
)
9240 .addMemOperand(CPMMO
)
9241 .add(predOps(ARMCC::AL
));
9242 // Set the low bit because of thumb mode.
9243 Register NewVReg2
= MRI
->createVirtualRegister(TRC
);
9244 BuildMI(*MBB
, MI
, dl
, TII
->get(ARM::t2ORRri
), NewVReg2
)
9245 .addReg(NewVReg1
, RegState::Kill
)
9247 .add(predOps(ARMCC::AL
))
9249 Register NewVReg3
= MRI
->createVirtualRegister(TRC
);
9250 BuildMI(*MBB
, MI
, dl
, TII
->get(ARM::tPICADD
), NewVReg3
)
9251 .addReg(NewVReg2
, RegState::Kill
)
9253 BuildMI(*MBB
, MI
, dl
, TII
->get(ARM::t2STRi12
))
9254 .addReg(NewVReg3
, RegState::Kill
)
9256 .addImm(36) // &jbuf[1] :: pc
9257 .addMemOperand(FIMMOSt
)
9258 .add(predOps(ARMCC::AL
));
9259 } else if (isThumb
) {
9260 // Incoming value: jbuf
9261 // ldr.n r1, LCPI1_4
9265 // add r2, $jbuf, #+4 ; &jbuf[1]
9267 Register NewVReg1
= MRI
->createVirtualRegister(TRC
);
9268 BuildMI(*MBB
, MI
, dl
, TII
->get(ARM::tLDRpci
), NewVReg1
)
9269 .addConstantPoolIndex(CPI
)
9270 .addMemOperand(CPMMO
)
9271 .add(predOps(ARMCC::AL
));
9272 Register NewVReg2
= MRI
->createVirtualRegister(TRC
);
9273 BuildMI(*MBB
, MI
, dl
, TII
->get(ARM::tPICADD
), NewVReg2
)
9274 .addReg(NewVReg1
, RegState::Kill
)
9276 // Set the low bit because of thumb mode.
9277 Register NewVReg3
= MRI
->createVirtualRegister(TRC
);
9278 BuildMI(*MBB
, MI
, dl
, TII
->get(ARM::tMOVi8
), NewVReg3
)
9279 .addReg(ARM::CPSR
, RegState::Define
)
9281 .add(predOps(ARMCC::AL
));
9282 Register NewVReg4
= MRI
->createVirtualRegister(TRC
);
9283 BuildMI(*MBB
, MI
, dl
, TII
->get(ARM::tORR
), NewVReg4
)
9284 .addReg(ARM::CPSR
, RegState::Define
)
9285 .addReg(NewVReg2
, RegState::Kill
)
9286 .addReg(NewVReg3
, RegState::Kill
)
9287 .add(predOps(ARMCC::AL
));
9288 Register NewVReg5
= MRI
->createVirtualRegister(TRC
);
9289 BuildMI(*MBB
, MI
, dl
, TII
->get(ARM::tADDframe
), NewVReg5
)
9291 .addImm(36); // &jbuf[1] :: pc
9292 BuildMI(*MBB
, MI
, dl
, TII
->get(ARM::tSTRi
))
9293 .addReg(NewVReg4
, RegState::Kill
)
9294 .addReg(NewVReg5
, RegState::Kill
)
9296 .addMemOperand(FIMMOSt
)
9297 .add(predOps(ARMCC::AL
));
9299 // Incoming value: jbuf
9302 // str r1, [$jbuf, #+4] ; &jbuf[1]
9303 Register NewVReg1
= MRI
->createVirtualRegister(TRC
);
9304 BuildMI(*MBB
, MI
, dl
, TII
->get(ARM::LDRi12
), NewVReg1
)
9305 .addConstantPoolIndex(CPI
)
9307 .addMemOperand(CPMMO
)
9308 .add(predOps(ARMCC::AL
));
9309 Register NewVReg2
= MRI
->createVirtualRegister(TRC
);
9310 BuildMI(*MBB
, MI
, dl
, TII
->get(ARM::PICADD
), NewVReg2
)
9311 .addReg(NewVReg1
, RegState::Kill
)
9313 .add(predOps(ARMCC::AL
));
9314 BuildMI(*MBB
, MI
, dl
, TII
->get(ARM::STRi12
))
9315 .addReg(NewVReg2
, RegState::Kill
)
9317 .addImm(36) // &jbuf[1] :: pc
9318 .addMemOperand(FIMMOSt
)
9319 .add(predOps(ARMCC::AL
));
9323 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr
&MI
,
9324 MachineBasicBlock
*MBB
) const {
9325 const TargetInstrInfo
*TII
= Subtarget
->getInstrInfo();
9326 DebugLoc dl
= MI
.getDebugLoc();
9327 MachineFunction
*MF
= MBB
->getParent();
9328 MachineRegisterInfo
*MRI
= &MF
->getRegInfo();
9329 MachineFrameInfo
&MFI
= MF
->getFrameInfo();
9330 int FI
= MFI
.getFunctionContextIndex();
9332 const TargetRegisterClass
*TRC
= Subtarget
->isThumb() ? &ARM::tGPRRegClass
9333 : &ARM::GPRnopcRegClass
;
9335 // Get a mapping of the call site numbers to all of the landing pads they're
9337 DenseMap
<unsigned, SmallVector
<MachineBasicBlock
*, 2>> CallSiteNumToLPad
;
9338 unsigned MaxCSNum
= 0;
9339 for (MachineFunction::iterator BB
= MF
->begin(), E
= MF
->end(); BB
!= E
;
9341 if (!BB
->isEHPad()) continue;
9343 // FIXME: We should assert that the EH_LABEL is the first MI in the landing
9345 for (MachineBasicBlock::iterator
9346 II
= BB
->begin(), IE
= BB
->end(); II
!= IE
; ++II
) {
9347 if (!II
->isEHLabel()) continue;
9349 MCSymbol
*Sym
= II
->getOperand(0).getMCSymbol();
9350 if (!MF
->hasCallSiteLandingPad(Sym
)) continue;
9352 SmallVectorImpl
<unsigned> &CallSiteIdxs
= MF
->getCallSiteLandingPad(Sym
);
9353 for (SmallVectorImpl
<unsigned>::iterator
9354 CSI
= CallSiteIdxs
.begin(), CSE
= CallSiteIdxs
.end();
9355 CSI
!= CSE
; ++CSI
) {
9356 CallSiteNumToLPad
[*CSI
].push_back(&*BB
);
9357 MaxCSNum
= std::max(MaxCSNum
, *CSI
);
9363 // Get an ordered list of the machine basic blocks for the jump table.
9364 std::vector
<MachineBasicBlock
*> LPadList
;
9365 SmallPtrSet
<MachineBasicBlock
*, 32> InvokeBBs
;
9366 LPadList
.reserve(CallSiteNumToLPad
.size());
9367 for (unsigned I
= 1; I
<= MaxCSNum
; ++I
) {
9368 SmallVectorImpl
<MachineBasicBlock
*> &MBBList
= CallSiteNumToLPad
[I
];
9369 for (SmallVectorImpl
<MachineBasicBlock
*>::iterator
9370 II
= MBBList
.begin(), IE
= MBBList
.end(); II
!= IE
; ++II
) {
9371 LPadList
.push_back(*II
);
9372 InvokeBBs
.insert((*II
)->pred_begin(), (*II
)->pred_end());
9376 assert(!LPadList
.empty() &&
9377 "No landing pad destinations for the dispatch jump table!");
9379 // Create the jump table and associated information.
9380 MachineJumpTableInfo
*JTI
=
9381 MF
->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline
);
9382 unsigned MJTI
= JTI
->createJumpTableIndex(LPadList
);
9384 // Create the MBBs for the dispatch code.
9386 // Shove the dispatch's address into the return slot in the function context.
9387 MachineBasicBlock
*DispatchBB
= MF
->CreateMachineBasicBlock();
9388 DispatchBB
->setIsEHPad();
9390 MachineBasicBlock
*TrapBB
= MF
->CreateMachineBasicBlock();
9391 unsigned trap_opcode
;
9392 if (Subtarget
->isThumb())
9393 trap_opcode
= ARM::tTRAP
;
9395 trap_opcode
= Subtarget
->useNaClTrap() ? ARM::TRAPNaCl
: ARM::TRAP
;
9397 BuildMI(TrapBB
, dl
, TII
->get(trap_opcode
));
9398 DispatchBB
->addSuccessor(TrapBB
);
9400 MachineBasicBlock
*DispContBB
= MF
->CreateMachineBasicBlock();
9401 DispatchBB
->addSuccessor(DispContBB
);
9404 MF
->insert(MF
->end(), DispatchBB
);
9405 MF
->insert(MF
->end(), DispContBB
);
9406 MF
->insert(MF
->end(), TrapBB
);
9408 // Insert code into the entry block that creates and registers the function
9410 SetupEntryBlockForSjLj(MI
, MBB
, DispatchBB
, FI
);
9412 MachineMemOperand
*FIMMOLd
= MF
->getMachineMemOperand(
9413 MachinePointerInfo::getFixedStack(*MF
, FI
),
9414 MachineMemOperand::MOLoad
| MachineMemOperand::MOVolatile
, 4, 4);
9416 MachineInstrBuilder MIB
;
9417 MIB
= BuildMI(DispatchBB
, dl
, TII
->get(ARM::Int_eh_sjlj_dispatchsetup
));
9419 const ARMBaseInstrInfo
*AII
= static_cast<const ARMBaseInstrInfo
*>(TII
);
9420 const ARMBaseRegisterInfo
&RI
= AII
->getRegisterInfo();
9422 // Add a register mask with no preserved registers. This results in all
9423 // registers being marked as clobbered. This can't work if the dispatch block
9424 // is in a Thumb1 function and is linked with ARM code which uses the FP
9425 // registers, as there is no way to preserve the FP registers in Thumb1 mode.
9426 MIB
.addRegMask(RI
.getSjLjDispatchPreservedMask(*MF
));
9428 bool IsPositionIndependent
= isPositionIndependent();
9429 unsigned NumLPads
= LPadList
.size();
9430 if (Subtarget
->isThumb2()) {
9431 Register NewVReg1
= MRI
->createVirtualRegister(TRC
);
9432 BuildMI(DispatchBB
, dl
, TII
->get(ARM::t2LDRi12
), NewVReg1
)
9435 .addMemOperand(FIMMOLd
)
9436 .add(predOps(ARMCC::AL
));
9438 if (NumLPads
< 256) {
9439 BuildMI(DispatchBB
, dl
, TII
->get(ARM::t2CMPri
))
9441 .addImm(LPadList
.size())
9442 .add(predOps(ARMCC::AL
));
9444 Register VReg1
= MRI
->createVirtualRegister(TRC
);
9445 BuildMI(DispatchBB
, dl
, TII
->get(ARM::t2MOVi16
), VReg1
)
9446 .addImm(NumLPads
& 0xFFFF)
9447 .add(predOps(ARMCC::AL
));
9449 unsigned VReg2
= VReg1
;
9450 if ((NumLPads
& 0xFFFF0000) != 0) {
9451 VReg2
= MRI
->createVirtualRegister(TRC
);
9452 BuildMI(DispatchBB
, dl
, TII
->get(ARM::t2MOVTi16
), VReg2
)
9454 .addImm(NumLPads
>> 16)
9455 .add(predOps(ARMCC::AL
));
9458 BuildMI(DispatchBB
, dl
, TII
->get(ARM::t2CMPrr
))
9461 .add(predOps(ARMCC::AL
));
9464 BuildMI(DispatchBB
, dl
, TII
->get(ARM::t2Bcc
))
9469 Register NewVReg3
= MRI
->createVirtualRegister(TRC
);
9470 BuildMI(DispContBB
, dl
, TII
->get(ARM::t2LEApcrelJT
), NewVReg3
)
9471 .addJumpTableIndex(MJTI
)
9472 .add(predOps(ARMCC::AL
));
9474 Register NewVReg4
= MRI
->createVirtualRegister(TRC
);
9475 BuildMI(DispContBB
, dl
, TII
->get(ARM::t2ADDrs
), NewVReg4
)
9476 .addReg(NewVReg3
, RegState::Kill
)
9478 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl
, 2))
9479 .add(predOps(ARMCC::AL
))
9482 BuildMI(DispContBB
, dl
, TII
->get(ARM::t2BR_JT
))
9483 .addReg(NewVReg4
, RegState::Kill
)
9485 .addJumpTableIndex(MJTI
);
9486 } else if (Subtarget
->isThumb()) {
9487 Register NewVReg1
= MRI
->createVirtualRegister(TRC
);
9488 BuildMI(DispatchBB
, dl
, TII
->get(ARM::tLDRspi
), NewVReg1
)
9491 .addMemOperand(FIMMOLd
)
9492 .add(predOps(ARMCC::AL
));
9494 if (NumLPads
< 256) {
9495 BuildMI(DispatchBB
, dl
, TII
->get(ARM::tCMPi8
))
9498 .add(predOps(ARMCC::AL
));
9500 MachineConstantPool
*ConstantPool
= MF
->getConstantPool();
9501 Type
*Int32Ty
= Type::getInt32Ty(MF
->getFunction().getContext());
9502 const Constant
*C
= ConstantInt::get(Int32Ty
, NumLPads
);
9504 // MachineConstantPool wants an explicit alignment.
9505 unsigned Align
= MF
->getDataLayout().getPrefTypeAlignment(Int32Ty
);
9507 Align
= MF
->getDataLayout().getTypeAllocSize(C
->getType());
9508 unsigned Idx
= ConstantPool
->getConstantPoolIndex(C
, Align
);
9510 Register VReg1
= MRI
->createVirtualRegister(TRC
);
9511 BuildMI(DispatchBB
, dl
, TII
->get(ARM::tLDRpci
))
9512 .addReg(VReg1
, RegState::Define
)
9513 .addConstantPoolIndex(Idx
)
9514 .add(predOps(ARMCC::AL
));
9515 BuildMI(DispatchBB
, dl
, TII
->get(ARM::tCMPr
))
9518 .add(predOps(ARMCC::AL
));
9521 BuildMI(DispatchBB
, dl
, TII
->get(ARM::tBcc
))
9526 Register NewVReg2
= MRI
->createVirtualRegister(TRC
);
9527 BuildMI(DispContBB
, dl
, TII
->get(ARM::tLSLri
), NewVReg2
)
9528 .addReg(ARM::CPSR
, RegState::Define
)
9531 .add(predOps(ARMCC::AL
));
9533 Register NewVReg3
= MRI
->createVirtualRegister(TRC
);
9534 BuildMI(DispContBB
, dl
, TII
->get(ARM::tLEApcrelJT
), NewVReg3
)
9535 .addJumpTableIndex(MJTI
)
9536 .add(predOps(ARMCC::AL
));
9538 Register NewVReg4
= MRI
->createVirtualRegister(TRC
);
9539 BuildMI(DispContBB
, dl
, TII
->get(ARM::tADDrr
), NewVReg4
)
9540 .addReg(ARM::CPSR
, RegState::Define
)
9541 .addReg(NewVReg2
, RegState::Kill
)
9543 .add(predOps(ARMCC::AL
));
9545 MachineMemOperand
*JTMMOLd
= MF
->getMachineMemOperand(
9546 MachinePointerInfo::getJumpTable(*MF
), MachineMemOperand::MOLoad
, 4, 4);
9548 Register NewVReg5
= MRI
->createVirtualRegister(TRC
);
9549 BuildMI(DispContBB
, dl
, TII
->get(ARM::tLDRi
), NewVReg5
)
9550 .addReg(NewVReg4
, RegState::Kill
)
9552 .addMemOperand(JTMMOLd
)
9553 .add(predOps(ARMCC::AL
));
9555 unsigned NewVReg6
= NewVReg5
;
9556 if (IsPositionIndependent
) {
9557 NewVReg6
= MRI
->createVirtualRegister(TRC
);
9558 BuildMI(DispContBB
, dl
, TII
->get(ARM::tADDrr
), NewVReg6
)
9559 .addReg(ARM::CPSR
, RegState::Define
)
9560 .addReg(NewVReg5
, RegState::Kill
)
9562 .add(predOps(ARMCC::AL
));
9565 BuildMI(DispContBB
, dl
, TII
->get(ARM::tBR_JTr
))
9566 .addReg(NewVReg6
, RegState::Kill
)
9567 .addJumpTableIndex(MJTI
);
9569 Register NewVReg1
= MRI
->createVirtualRegister(TRC
);
9570 BuildMI(DispatchBB
, dl
, TII
->get(ARM::LDRi12
), NewVReg1
)
9573 .addMemOperand(FIMMOLd
)
9574 .add(predOps(ARMCC::AL
));
9576 if (NumLPads
< 256) {
9577 BuildMI(DispatchBB
, dl
, TII
->get(ARM::CMPri
))
9580 .add(predOps(ARMCC::AL
));
9581 } else if (Subtarget
->hasV6T2Ops() && isUInt
<16>(NumLPads
)) {
9582 Register VReg1
= MRI
->createVirtualRegister(TRC
);
9583 BuildMI(DispatchBB
, dl
, TII
->get(ARM::MOVi16
), VReg1
)
9584 .addImm(NumLPads
& 0xFFFF)
9585 .add(predOps(ARMCC::AL
));
9587 unsigned VReg2
= VReg1
;
9588 if ((NumLPads
& 0xFFFF0000) != 0) {
9589 VReg2
= MRI
->createVirtualRegister(TRC
);
9590 BuildMI(DispatchBB
, dl
, TII
->get(ARM::MOVTi16
), VReg2
)
9592 .addImm(NumLPads
>> 16)
9593 .add(predOps(ARMCC::AL
));
9596 BuildMI(DispatchBB
, dl
, TII
->get(ARM::CMPrr
))
9599 .add(predOps(ARMCC::AL
));
9601 MachineConstantPool
*ConstantPool
= MF
->getConstantPool();
9602 Type
*Int32Ty
= Type::getInt32Ty(MF
->getFunction().getContext());
9603 const Constant
*C
= ConstantInt::get(Int32Ty
, NumLPads
);
9605 // MachineConstantPool wants an explicit alignment.
9606 unsigned Align
= MF
->getDataLayout().getPrefTypeAlignment(Int32Ty
);
9608 Align
= MF
->getDataLayout().getTypeAllocSize(C
->getType());
9609 unsigned Idx
= ConstantPool
->getConstantPoolIndex(C
, Align
);
9611 Register VReg1
= MRI
->createVirtualRegister(TRC
);
9612 BuildMI(DispatchBB
, dl
, TII
->get(ARM::LDRcp
))
9613 .addReg(VReg1
, RegState::Define
)
9614 .addConstantPoolIndex(Idx
)
9616 .add(predOps(ARMCC::AL
));
9617 BuildMI(DispatchBB
, dl
, TII
->get(ARM::CMPrr
))
9619 .addReg(VReg1
, RegState::Kill
)
9620 .add(predOps(ARMCC::AL
));
9623 BuildMI(DispatchBB
, dl
, TII
->get(ARM::Bcc
))
9628 Register NewVReg3
= MRI
->createVirtualRegister(TRC
);
9629 BuildMI(DispContBB
, dl
, TII
->get(ARM::MOVsi
), NewVReg3
)
9631 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl
, 2))
9632 .add(predOps(ARMCC::AL
))
9634 Register NewVReg4
= MRI
->createVirtualRegister(TRC
);
9635 BuildMI(DispContBB
, dl
, TII
->get(ARM::LEApcrelJT
), NewVReg4
)
9636 .addJumpTableIndex(MJTI
)
9637 .add(predOps(ARMCC::AL
));
9639 MachineMemOperand
*JTMMOLd
= MF
->getMachineMemOperand(
9640 MachinePointerInfo::getJumpTable(*MF
), MachineMemOperand::MOLoad
, 4, 4);
9641 Register NewVReg5
= MRI
->createVirtualRegister(TRC
);
9642 BuildMI(DispContBB
, dl
, TII
->get(ARM::LDRrs
), NewVReg5
)
9643 .addReg(NewVReg3
, RegState::Kill
)
9646 .addMemOperand(JTMMOLd
)
9647 .add(predOps(ARMCC::AL
));
9649 if (IsPositionIndependent
) {
9650 BuildMI(DispContBB
, dl
, TII
->get(ARM::BR_JTadd
))
9651 .addReg(NewVReg5
, RegState::Kill
)
9653 .addJumpTableIndex(MJTI
);
9655 BuildMI(DispContBB
, dl
, TII
->get(ARM::BR_JTr
))
9656 .addReg(NewVReg5
, RegState::Kill
)
9657 .addJumpTableIndex(MJTI
);
9661 // Add the jump table entries as successors to the MBB.
9662 SmallPtrSet
<MachineBasicBlock
*, 8> SeenMBBs
;
9663 for (std::vector
<MachineBasicBlock
*>::iterator
9664 I
= LPadList
.begin(), E
= LPadList
.end(); I
!= E
; ++I
) {
9665 MachineBasicBlock
*CurMBB
= *I
;
9666 if (SeenMBBs
.insert(CurMBB
).second
)
9667 DispContBB
->addSuccessor(CurMBB
);
9670 // N.B. the order the invoke BBs are processed in doesn't matter here.
9671 const MCPhysReg
*SavedRegs
= RI
.getCalleeSavedRegs(MF
);
9672 SmallVector
<MachineBasicBlock
*, 64> MBBLPads
;
9673 for (MachineBasicBlock
*BB
: InvokeBBs
) {
9675 // Remove the landing pad successor from the invoke block and replace it
9676 // with the new dispatch block.
9677 SmallVector
<MachineBasicBlock
*, 4> Successors(BB
->succ_begin(),
9679 while (!Successors
.empty()) {
9680 MachineBasicBlock
*SMBB
= Successors
.pop_back_val();
9681 if (SMBB
->isEHPad()) {
9682 BB
->removeSuccessor(SMBB
);
9683 MBBLPads
.push_back(SMBB
);
9687 BB
->addSuccessor(DispatchBB
, BranchProbability::getZero());
9688 BB
->normalizeSuccProbs();
9690 // Find the invoke call and mark all of the callee-saved registers as
9691 // 'implicit defined' so that they're spilled. This prevents code from
9692 // moving instructions to before the EH block, where they will never be
9694 for (MachineBasicBlock::reverse_iterator
9695 II
= BB
->rbegin(), IE
= BB
->rend(); II
!= IE
; ++II
) {
9696 if (!II
->isCall()) continue;
9698 DenseMap
<unsigned, bool> DefRegs
;
9699 for (MachineInstr::mop_iterator
9700 OI
= II
->operands_begin(), OE
= II
->operands_end();
9702 if (!OI
->isReg()) continue;
9703 DefRegs
[OI
->getReg()] = true;
9706 MachineInstrBuilder
MIB(*MF
, &*II
);
9708 for (unsigned i
= 0; SavedRegs
[i
] != 0; ++i
) {
9709 unsigned Reg
= SavedRegs
[i
];
9710 if (Subtarget
->isThumb2() &&
9711 !ARM::tGPRRegClass
.contains(Reg
) &&
9712 !ARM::hGPRRegClass
.contains(Reg
))
9714 if (Subtarget
->isThumb1Only() && !ARM::tGPRRegClass
.contains(Reg
))
9716 if (!Subtarget
->isThumb() && !ARM::GPRRegClass
.contains(Reg
))
9719 MIB
.addReg(Reg
, RegState::ImplicitDefine
| RegState::Dead
);
9726 // Mark all former landing pads as non-landing pads. The dispatch is the only
9728 for (SmallVectorImpl
<MachineBasicBlock
*>::iterator
9729 I
= MBBLPads
.begin(), E
= MBBLPads
.end(); I
!= E
; ++I
)
9730 (*I
)->setIsEHPad(false);
9732 // The instruction is gone now.
9733 MI
.eraseFromParent();
9737 MachineBasicBlock
*OtherSucc(MachineBasicBlock
*MBB
, MachineBasicBlock
*Succ
) {
9738 for (MachineBasicBlock::succ_iterator I
= MBB
->succ_begin(),
9739 E
= MBB
->succ_end(); I
!= E
; ++I
)
9742 llvm_unreachable("Expecting a BB with two successors!");
9745 /// Return the load opcode for a given load size. If load size >= 8,
9746 /// neon opcode will be returned.
9747 static unsigned getLdOpcode(unsigned LdSize
, bool IsThumb1
, bool IsThumb2
) {
9749 return LdSize
== 16 ? ARM::VLD1q32wb_fixed
9750 : LdSize
== 8 ? ARM::VLD1d32wb_fixed
: 0;
9752 return LdSize
== 4 ? ARM::tLDRi
9753 : LdSize
== 2 ? ARM::tLDRHi
9754 : LdSize
== 1 ? ARM::tLDRBi
: 0;
9756 return LdSize
== 4 ? ARM::t2LDR_POST
9757 : LdSize
== 2 ? ARM::t2LDRH_POST
9758 : LdSize
== 1 ? ARM::t2LDRB_POST
: 0;
9759 return LdSize
== 4 ? ARM::LDR_POST_IMM
9760 : LdSize
== 2 ? ARM::LDRH_POST
9761 : LdSize
== 1 ? ARM::LDRB_POST_IMM
: 0;
9764 /// Return the store opcode for a given store size. If store size >= 8,
9765 /// neon opcode will be returned.
9766 static unsigned getStOpcode(unsigned StSize
, bool IsThumb1
, bool IsThumb2
) {
9768 return StSize
== 16 ? ARM::VST1q32wb_fixed
9769 : StSize
== 8 ? ARM::VST1d32wb_fixed
: 0;
9771 return StSize
== 4 ? ARM::tSTRi
9772 : StSize
== 2 ? ARM::tSTRHi
9773 : StSize
== 1 ? ARM::tSTRBi
: 0;
9775 return StSize
== 4 ? ARM::t2STR_POST
9776 : StSize
== 2 ? ARM::t2STRH_POST
9777 : StSize
== 1 ? ARM::t2STRB_POST
: 0;
9778 return StSize
== 4 ? ARM::STR_POST_IMM
9779 : StSize
== 2 ? ARM::STRH_POST
9780 : StSize
== 1 ? ARM::STRB_POST_IMM
: 0;
9783 /// Emit a post-increment load operation with given size. The instructions
9784 /// will be added to BB at Pos.
9785 static void emitPostLd(MachineBasicBlock
*BB
, MachineBasicBlock::iterator Pos
,
9786 const TargetInstrInfo
*TII
, const DebugLoc
&dl
,
9787 unsigned LdSize
, unsigned Data
, unsigned AddrIn
,
9788 unsigned AddrOut
, bool IsThumb1
, bool IsThumb2
) {
9789 unsigned LdOpc
= getLdOpcode(LdSize
, IsThumb1
, IsThumb2
);
9790 assert(LdOpc
!= 0 && "Should have a load opcode");
9792 BuildMI(*BB
, Pos
, dl
, TII
->get(LdOpc
), Data
)
9793 .addReg(AddrOut
, RegState::Define
)
9796 .add(predOps(ARMCC::AL
));
9797 } else if (IsThumb1
) {
9798 // load + update AddrIn
9799 BuildMI(*BB
, Pos
, dl
, TII
->get(LdOpc
), Data
)
9802 .add(predOps(ARMCC::AL
));
9803 BuildMI(*BB
, Pos
, dl
, TII
->get(ARM::tADDi8
), AddrOut
)
9804 .add(t1CondCodeOp())
9807 .add(predOps(ARMCC::AL
));
9808 } else if (IsThumb2
) {
9809 BuildMI(*BB
, Pos
, dl
, TII
->get(LdOpc
), Data
)
9810 .addReg(AddrOut
, RegState::Define
)
9813 .add(predOps(ARMCC::AL
));
9815 BuildMI(*BB
, Pos
, dl
, TII
->get(LdOpc
), Data
)
9816 .addReg(AddrOut
, RegState::Define
)
9820 .add(predOps(ARMCC::AL
));
9824 /// Emit a post-increment store operation with given size. The instructions
9825 /// will be added to BB at Pos.
9826 static void emitPostSt(MachineBasicBlock
*BB
, MachineBasicBlock::iterator Pos
,
9827 const TargetInstrInfo
*TII
, const DebugLoc
&dl
,
9828 unsigned StSize
, unsigned Data
, unsigned AddrIn
,
9829 unsigned AddrOut
, bool IsThumb1
, bool IsThumb2
) {
9830 unsigned StOpc
= getStOpcode(StSize
, IsThumb1
, IsThumb2
);
9831 assert(StOpc
!= 0 && "Should have a store opcode");
9833 BuildMI(*BB
, Pos
, dl
, TII
->get(StOpc
), AddrOut
)
9837 .add(predOps(ARMCC::AL
));
9838 } else if (IsThumb1
) {
9839 // store + update AddrIn
9840 BuildMI(*BB
, Pos
, dl
, TII
->get(StOpc
))
9844 .add(predOps(ARMCC::AL
));
9845 BuildMI(*BB
, Pos
, dl
, TII
->get(ARM::tADDi8
), AddrOut
)
9846 .add(t1CondCodeOp())
9849 .add(predOps(ARMCC::AL
));
9850 } else if (IsThumb2
) {
9851 BuildMI(*BB
, Pos
, dl
, TII
->get(StOpc
), AddrOut
)
9855 .add(predOps(ARMCC::AL
));
9857 BuildMI(*BB
, Pos
, dl
, TII
->get(StOpc
), AddrOut
)
9862 .add(predOps(ARMCC::AL
));
9867 ARMTargetLowering::EmitStructByval(MachineInstr
&MI
,
9868 MachineBasicBlock
*BB
) const {
9869 // This pseudo instruction has 3 operands: dst, src, size
9870 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
9871 // Otherwise, we will generate unrolled scalar copies.
9872 const TargetInstrInfo
*TII
= Subtarget
->getInstrInfo();
9873 const BasicBlock
*LLVM_BB
= BB
->getBasicBlock();
9874 MachineFunction::iterator It
= ++BB
->getIterator();
9876 Register dest
= MI
.getOperand(0).getReg();
9877 Register src
= MI
.getOperand(1).getReg();
9878 unsigned SizeVal
= MI
.getOperand(2).getImm();
9879 unsigned Align
= MI
.getOperand(3).getImm();
9880 DebugLoc dl
= MI
.getDebugLoc();
9882 MachineFunction
*MF
= BB
->getParent();
9883 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
9884 unsigned UnitSize
= 0;
9885 const TargetRegisterClass
*TRC
= nullptr;
9886 const TargetRegisterClass
*VecTRC
= nullptr;
9888 bool IsThumb1
= Subtarget
->isThumb1Only();
9889 bool IsThumb2
= Subtarget
->isThumb2();
9890 bool IsThumb
= Subtarget
->isThumb();
9894 } else if (Align
& 2) {
9897 // Check whether we can use NEON instructions.
9898 if (!MF
->getFunction().hasFnAttribute(Attribute::NoImplicitFloat
) &&
9899 Subtarget
->hasNEON()) {
9900 if ((Align
% 16 == 0) && SizeVal
>= 16)
9902 else if ((Align
% 8 == 0) && SizeVal
>= 8)
9905 // Can't use NEON instructions.
9910 // Select the correct opcode and register class for unit size load/store
9911 bool IsNeon
= UnitSize
>= 8;
9912 TRC
= IsThumb
? &ARM::tGPRRegClass
: &ARM::GPRRegClass
;
9914 VecTRC
= UnitSize
== 16 ? &ARM::DPairRegClass
9915 : UnitSize
== 8 ? &ARM::DPRRegClass
9918 unsigned BytesLeft
= SizeVal
% UnitSize
;
9919 unsigned LoopSize
= SizeVal
- BytesLeft
;
9921 if (SizeVal
<= Subtarget
->getMaxInlineSizeThreshold()) {
9922 // Use LDR and STR to copy.
9923 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize)
9924 // [destOut] = STR_POST(scratch, destIn, UnitSize)
9925 unsigned srcIn
= src
;
9926 unsigned destIn
= dest
;
9927 for (unsigned i
= 0; i
< LoopSize
; i
+=UnitSize
) {
9928 Register srcOut
= MRI
.createVirtualRegister(TRC
);
9929 Register destOut
= MRI
.createVirtualRegister(TRC
);
9930 Register scratch
= MRI
.createVirtualRegister(IsNeon
? VecTRC
: TRC
);
9931 emitPostLd(BB
, MI
, TII
, dl
, UnitSize
, scratch
, srcIn
, srcOut
,
9932 IsThumb1
, IsThumb2
);
9933 emitPostSt(BB
, MI
, TII
, dl
, UnitSize
, scratch
, destIn
, destOut
,
9934 IsThumb1
, IsThumb2
);
9939 // Handle the leftover bytes with LDRB and STRB.
9940 // [scratch, srcOut] = LDRB_POST(srcIn, 1)
9941 // [destOut] = STRB_POST(scratch, destIn, 1)
9942 for (unsigned i
= 0; i
< BytesLeft
; i
++) {
9943 Register srcOut
= MRI
.createVirtualRegister(TRC
);
9944 Register destOut
= MRI
.createVirtualRegister(TRC
);
9945 Register scratch
= MRI
.createVirtualRegister(TRC
);
9946 emitPostLd(BB
, MI
, TII
, dl
, 1, scratch
, srcIn
, srcOut
,
9947 IsThumb1
, IsThumb2
);
9948 emitPostSt(BB
, MI
, TII
, dl
, 1, scratch
, destIn
, destOut
,
9949 IsThumb1
, IsThumb2
);
9953 MI
.eraseFromParent(); // The instruction is gone now.
9957 // Expand the pseudo op to a loop.
9960 // movw varEnd, # --> with thumb2
9962 // ldrcp varEnd, idx --> without thumb2
9963 // fallthrough --> loopMBB
9965 // PHI varPhi, varEnd, varLoop
9966 // PHI srcPhi, src, srcLoop
9967 // PHI destPhi, dst, destLoop
9968 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
9969 // [destLoop] = STR_POST(scratch, destPhi, UnitSize)
9970 // subs varLoop, varPhi, #UnitSize
9972 // fallthrough --> exitMBB
9974 // epilogue to handle left-over bytes
9975 // [scratch, srcOut] = LDRB_POST(srcLoop, 1)
9976 // [destOut] = STRB_POST(scratch, destLoop, 1)
9977 MachineBasicBlock
*loopMBB
= MF
->CreateMachineBasicBlock(LLVM_BB
);
9978 MachineBasicBlock
*exitMBB
= MF
->CreateMachineBasicBlock(LLVM_BB
);
9979 MF
->insert(It
, loopMBB
);
9980 MF
->insert(It
, exitMBB
);
9982 // Transfer the remainder of BB and its successor edges to exitMBB.
9983 exitMBB
->splice(exitMBB
->begin(), BB
,
9984 std::next(MachineBasicBlock::iterator(MI
)), BB
->end());
9985 exitMBB
->transferSuccessorsAndUpdatePHIs(BB
);
9987 // Load an immediate to varEnd.
9988 Register varEnd
= MRI
.createVirtualRegister(TRC
);
9989 if (Subtarget
->useMovt()) {
9990 unsigned Vtmp
= varEnd
;
9991 if ((LoopSize
& 0xFFFF0000) != 0)
9992 Vtmp
= MRI
.createVirtualRegister(TRC
);
9993 BuildMI(BB
, dl
, TII
->get(IsThumb
? ARM::t2MOVi16
: ARM::MOVi16
), Vtmp
)
9994 .addImm(LoopSize
& 0xFFFF)
9995 .add(predOps(ARMCC::AL
));
9997 if ((LoopSize
& 0xFFFF0000) != 0)
9998 BuildMI(BB
, dl
, TII
->get(IsThumb
? ARM::t2MOVTi16
: ARM::MOVTi16
), varEnd
)
10000 .addImm(LoopSize
>> 16)
10001 .add(predOps(ARMCC::AL
));
10003 MachineConstantPool
*ConstantPool
= MF
->getConstantPool();
10004 Type
*Int32Ty
= Type::getInt32Ty(MF
->getFunction().getContext());
10005 const Constant
*C
= ConstantInt::get(Int32Ty
, LoopSize
);
10007 // MachineConstantPool wants an explicit alignment.
10008 unsigned Align
= MF
->getDataLayout().getPrefTypeAlignment(Int32Ty
);
10010 Align
= MF
->getDataLayout().getTypeAllocSize(C
->getType());
10011 unsigned Idx
= ConstantPool
->getConstantPoolIndex(C
, Align
);
10012 MachineMemOperand
*CPMMO
=
10013 MF
->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF
),
10014 MachineMemOperand::MOLoad
, 4, 4);
10017 BuildMI(*BB
, MI
, dl
, TII
->get(ARM::tLDRpci
))
10018 .addReg(varEnd
, RegState::Define
)
10019 .addConstantPoolIndex(Idx
)
10020 .add(predOps(ARMCC::AL
))
10021 .addMemOperand(CPMMO
);
10023 BuildMI(*BB
, MI
, dl
, TII
->get(ARM::LDRcp
))
10024 .addReg(varEnd
, RegState::Define
)
10025 .addConstantPoolIndex(Idx
)
10027 .add(predOps(ARMCC::AL
))
10028 .addMemOperand(CPMMO
);
10030 BB
->addSuccessor(loopMBB
);
10032 // Generate the loop body:
10033 // varPhi = PHI(varLoop, varEnd)
10034 // srcPhi = PHI(srcLoop, src)
10035 // destPhi = PHI(destLoop, dst)
10036 MachineBasicBlock
*entryBB
= BB
;
10038 Register varLoop
= MRI
.createVirtualRegister(TRC
);
10039 Register varPhi
= MRI
.createVirtualRegister(TRC
);
10040 Register srcLoop
= MRI
.createVirtualRegister(TRC
);
10041 Register srcPhi
= MRI
.createVirtualRegister(TRC
);
10042 Register destLoop
= MRI
.createVirtualRegister(TRC
);
10043 Register destPhi
= MRI
.createVirtualRegister(TRC
);
10045 BuildMI(*BB
, BB
->begin(), dl
, TII
->get(ARM::PHI
), varPhi
)
10046 .addReg(varLoop
).addMBB(loopMBB
)
10047 .addReg(varEnd
).addMBB(entryBB
);
10048 BuildMI(BB
, dl
, TII
->get(ARM::PHI
), srcPhi
)
10049 .addReg(srcLoop
).addMBB(loopMBB
)
10050 .addReg(src
).addMBB(entryBB
);
10051 BuildMI(BB
, dl
, TII
->get(ARM::PHI
), destPhi
)
10052 .addReg(destLoop
).addMBB(loopMBB
)
10053 .addReg(dest
).addMBB(entryBB
);
10055 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
10056 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz)
10057 Register scratch
= MRI
.createVirtualRegister(IsNeon
? VecTRC
: TRC
);
10058 emitPostLd(BB
, BB
->end(), TII
, dl
, UnitSize
, scratch
, srcPhi
, srcLoop
,
10059 IsThumb1
, IsThumb2
);
10060 emitPostSt(BB
, BB
->end(), TII
, dl
, UnitSize
, scratch
, destPhi
, destLoop
,
10061 IsThumb1
, IsThumb2
);
10063 // Decrement loop variable by UnitSize.
10065 BuildMI(*BB
, BB
->end(), dl
, TII
->get(ARM::tSUBi8
), varLoop
)
10066 .add(t1CondCodeOp())
10069 .add(predOps(ARMCC::AL
));
10071 MachineInstrBuilder MIB
=
10072 BuildMI(*BB
, BB
->end(), dl
,
10073 TII
->get(IsThumb2
? ARM::t2SUBri
: ARM::SUBri
), varLoop
);
10076 .add(predOps(ARMCC::AL
))
10077 .add(condCodeOp());
10078 MIB
->getOperand(5).setReg(ARM::CPSR
);
10079 MIB
->getOperand(5).setIsDef(true);
10081 BuildMI(*BB
, BB
->end(), dl
,
10082 TII
->get(IsThumb1
? ARM::tBcc
: IsThumb2
? ARM::t2Bcc
: ARM::Bcc
))
10083 .addMBB(loopMBB
).addImm(ARMCC::NE
).addReg(ARM::CPSR
);
10085 // loopMBB can loop back to loopMBB or fall through to exitMBB.
10086 BB
->addSuccessor(loopMBB
);
10087 BB
->addSuccessor(exitMBB
);
10089 // Add epilogue to handle BytesLeft.
10091 auto StartOfExit
= exitMBB
->begin();
10093 // [scratch, srcOut] = LDRB_POST(srcLoop, 1)
10094 // [destOut] = STRB_POST(scratch, destLoop, 1)
10095 unsigned srcIn
= srcLoop
;
10096 unsigned destIn
= destLoop
;
10097 for (unsigned i
= 0; i
< BytesLeft
; i
++) {
10098 Register srcOut
= MRI
.createVirtualRegister(TRC
);
10099 Register destOut
= MRI
.createVirtualRegister(TRC
);
10100 Register scratch
= MRI
.createVirtualRegister(TRC
);
10101 emitPostLd(BB
, StartOfExit
, TII
, dl
, 1, scratch
, srcIn
, srcOut
,
10102 IsThumb1
, IsThumb2
);
10103 emitPostSt(BB
, StartOfExit
, TII
, dl
, 1, scratch
, destIn
, destOut
,
10104 IsThumb1
, IsThumb2
);
10109 MI
.eraseFromParent(); // The instruction is gone now.
10113 MachineBasicBlock
*
10114 ARMTargetLowering::EmitLowered__chkstk(MachineInstr
&MI
,
10115 MachineBasicBlock
*MBB
) const {
10116 const TargetMachine
&TM
= getTargetMachine();
10117 const TargetInstrInfo
&TII
= *Subtarget
->getInstrInfo();
10118 DebugLoc DL
= MI
.getDebugLoc();
10120 assert(Subtarget
->isTargetWindows() &&
10121 "__chkstk is only supported on Windows");
10122 assert(Subtarget
->isThumb2() && "Windows on ARM requires Thumb-2 mode");
10124 // __chkstk takes the number of words to allocate on the stack in R4, and
10125 // returns the stack adjustment in number of bytes in R4. This will not
10126 // clober any other registers (other than the obvious lr).
10128 // Although, technically, IP should be considered a register which may be
10129 // clobbered, the call itself will not touch it. Windows on ARM is a pure
10130 // thumb-2 environment, so there is no interworking required. As a result, we
10131 // do not expect a veneer to be emitted by the linker, clobbering IP.
10133 // Each module receives its own copy of __chkstk, so no import thunk is
10134 // required, again, ensuring that IP is not clobbered.
10136 // Finally, although some linkers may theoretically provide a trampoline for
10137 // out of range calls (which is quite common due to a 32M range limitation of
10138 // branches for Thumb), we can generate the long-call version via
10139 // -mcmodel=large, alleviating the need for the trampoline which may clobber
10142 switch (TM
.getCodeModel()) {
10143 case CodeModel::Tiny
:
10144 llvm_unreachable("Tiny code model not available on ARM.");
10145 case CodeModel::Small
:
10146 case CodeModel::Medium
:
10147 case CodeModel::Kernel
:
10148 BuildMI(*MBB
, MI
, DL
, TII
.get(ARM::tBL
))
10149 .add(predOps(ARMCC::AL
))
10150 .addExternalSymbol("__chkstk")
10151 .addReg(ARM::R4
, RegState::Implicit
| RegState::Kill
)
10152 .addReg(ARM::R4
, RegState::Implicit
| RegState::Define
)
10154 RegState::Implicit
| RegState::Define
| RegState::Dead
)
10156 RegState::Implicit
| RegState::Define
| RegState::Dead
);
10158 case CodeModel::Large
: {
10159 MachineRegisterInfo
&MRI
= MBB
->getParent()->getRegInfo();
10160 Register Reg
= MRI
.createVirtualRegister(&ARM::rGPRRegClass
);
10162 BuildMI(*MBB
, MI
, DL
, TII
.get(ARM::t2MOVi32imm
), Reg
)
10163 .addExternalSymbol("__chkstk");
10164 BuildMI(*MBB
, MI
, DL
, TII
.get(ARM::tBLXr
))
10165 .add(predOps(ARMCC::AL
))
10166 .addReg(Reg
, RegState::Kill
)
10167 .addReg(ARM::R4
, RegState::Implicit
| RegState::Kill
)
10168 .addReg(ARM::R4
, RegState::Implicit
| RegState::Define
)
10170 RegState::Implicit
| RegState::Define
| RegState::Dead
)
10172 RegState::Implicit
| RegState::Define
| RegState::Dead
);
10177 BuildMI(*MBB
, MI
, DL
, TII
.get(ARM::t2SUBrr
), ARM::SP
)
10178 .addReg(ARM::SP
, RegState::Kill
)
10179 .addReg(ARM::R4
, RegState::Kill
)
10180 .setMIFlags(MachineInstr::FrameSetup
)
10181 .add(predOps(ARMCC::AL
))
10182 .add(condCodeOp());
10184 MI
.eraseFromParent();
10188 MachineBasicBlock
*
10189 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr
&MI
,
10190 MachineBasicBlock
*MBB
) const {
10191 DebugLoc DL
= MI
.getDebugLoc();
10192 MachineFunction
*MF
= MBB
->getParent();
10193 const TargetInstrInfo
*TII
= Subtarget
->getInstrInfo();
10195 MachineBasicBlock
*ContBB
= MF
->CreateMachineBasicBlock();
10196 MF
->insert(++MBB
->getIterator(), ContBB
);
10197 ContBB
->splice(ContBB
->begin(), MBB
,
10198 std::next(MachineBasicBlock::iterator(MI
)), MBB
->end());
10199 ContBB
->transferSuccessorsAndUpdatePHIs(MBB
);
10200 MBB
->addSuccessor(ContBB
);
10202 MachineBasicBlock
*TrapBB
= MF
->CreateMachineBasicBlock();
10203 BuildMI(TrapBB
, DL
, TII
->get(ARM::t__brkdiv0
));
10204 MF
->push_back(TrapBB
);
10205 MBB
->addSuccessor(TrapBB
);
10207 BuildMI(*MBB
, MI
, DL
, TII
->get(ARM::tCMPi8
))
10208 .addReg(MI
.getOperand(0).getReg())
10210 .add(predOps(ARMCC::AL
));
10211 BuildMI(*MBB
, MI
, DL
, TII
->get(ARM::t2Bcc
))
10214 .addReg(ARM::CPSR
);
10216 MI
.eraseFromParent();
10220 // The CPSR operand of SelectItr might be missing a kill marker
10221 // because there were multiple uses of CPSR, and ISel didn't know
10222 // which to mark. Figure out whether SelectItr should have had a
10223 // kill marker, and set it if it should. Returns the correct kill
10225 static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr
,
10226 MachineBasicBlock
* BB
,
10227 const TargetRegisterInfo
* TRI
) {
10228 // Scan forward through BB for a use/def of CPSR.
10229 MachineBasicBlock::iterator
miI(std::next(SelectItr
));
10230 for (MachineBasicBlock::iterator miE
= BB
->end(); miI
!= miE
; ++miI
) {
10231 const MachineInstr
& mi
= *miI
;
10232 if (mi
.readsRegister(ARM::CPSR
))
10234 if (mi
.definesRegister(ARM::CPSR
))
10235 break; // Should have kill-flag - update below.
10238 // If we hit the end of the block, check whether CPSR is live into a
10240 if (miI
== BB
->end()) {
10241 for (MachineBasicBlock::succ_iterator sItr
= BB
->succ_begin(),
10242 sEnd
= BB
->succ_end();
10243 sItr
!= sEnd
; ++sItr
) {
10244 MachineBasicBlock
* succ
= *sItr
;
10245 if (succ
->isLiveIn(ARM::CPSR
))
10250 // We found a def, or hit the end of the basic block and CPSR wasn't live
10251 // out. SelectMI should have a kill flag on CPSR.
10252 SelectItr
->addRegisterKilled(ARM::CPSR
, TRI
);
10256 MachineBasicBlock
*
10257 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr
&MI
,
10258 MachineBasicBlock
*BB
) const {
10259 const TargetInstrInfo
*TII
= Subtarget
->getInstrInfo();
10260 DebugLoc dl
= MI
.getDebugLoc();
10261 bool isThumb2
= Subtarget
->isThumb2();
10262 switch (MI
.getOpcode()) {
10265 llvm_unreachable("Unexpected instr type to insert");
10268 // Thumb1 post-indexed loads are really just single-register LDMs.
10269 case ARM::tLDR_postidx
: {
10270 MachineOperand
Def(MI
.getOperand(1));
10271 BuildMI(*BB
, MI
, dl
, TII
->get(ARM::tLDMIA_UPD
))
10273 .add(MI
.getOperand(2)) // Rn
10274 .add(MI
.getOperand(3)) // PredImm
10275 .add(MI
.getOperand(4)) // PredReg
10276 .add(MI
.getOperand(0)) // Rt
10278 MI
.eraseFromParent();
10282 // The Thumb2 pre-indexed stores have the same MI operands, they just
10283 // define them differently in the .td files from the isel patterns, so
10284 // they need pseudos.
10285 case ARM::t2STR_preidx
:
10286 MI
.setDesc(TII
->get(ARM::t2STR_PRE
));
10288 case ARM::t2STRB_preidx
:
10289 MI
.setDesc(TII
->get(ARM::t2STRB_PRE
));
10291 case ARM::t2STRH_preidx
:
10292 MI
.setDesc(TII
->get(ARM::t2STRH_PRE
));
10295 case ARM::STRi_preidx
:
10296 case ARM::STRBi_preidx
: {
10297 unsigned NewOpc
= MI
.getOpcode() == ARM::STRi_preidx
? ARM::STR_PRE_IMM
10298 : ARM::STRB_PRE_IMM
;
10299 // Decode the offset.
10300 unsigned Offset
= MI
.getOperand(4).getImm();
10301 bool isSub
= ARM_AM::getAM2Op(Offset
) == ARM_AM::sub
;
10302 Offset
= ARM_AM::getAM2Offset(Offset
);
10306 MachineMemOperand
*MMO
= *MI
.memoperands_begin();
10307 BuildMI(*BB
, MI
, dl
, TII
->get(NewOpc
))
10308 .add(MI
.getOperand(0)) // Rn_wb
10309 .add(MI
.getOperand(1)) // Rt
10310 .add(MI
.getOperand(2)) // Rn
10311 .addImm(Offset
) // offset (skip GPR==zero_reg)
10312 .add(MI
.getOperand(5)) // pred
10313 .add(MI
.getOperand(6))
10314 .addMemOperand(MMO
);
10315 MI
.eraseFromParent();
10318 case ARM::STRr_preidx
:
10319 case ARM::STRBr_preidx
:
10320 case ARM::STRH_preidx
: {
10322 switch (MI
.getOpcode()) {
10323 default: llvm_unreachable("unexpected opcode!");
10324 case ARM::STRr_preidx
: NewOpc
= ARM::STR_PRE_REG
; break;
10325 case ARM::STRBr_preidx
: NewOpc
= ARM::STRB_PRE_REG
; break;
10326 case ARM::STRH_preidx
: NewOpc
= ARM::STRH_PRE
; break;
10328 MachineInstrBuilder MIB
= BuildMI(*BB
, MI
, dl
, TII
->get(NewOpc
));
10329 for (unsigned i
= 0; i
< MI
.getNumOperands(); ++i
)
10330 MIB
.add(MI
.getOperand(i
));
10331 MI
.eraseFromParent();
10335 case ARM::tMOVCCr_pseudo
: {
10336 // To "insert" a SELECT_CC instruction, we actually have to insert the
10337 // diamond control-flow pattern. The incoming instruction knows the
10338 // destination vreg to set, the condition code register to branch on, the
10339 // true/false values to select between, and a branch opcode to use.
10340 const BasicBlock
*LLVM_BB
= BB
->getBasicBlock();
10341 MachineFunction::iterator It
= ++BB
->getIterator();
10346 // cmpTY ccX, r1, r2
10348 // fallthrough --> copy0MBB
10349 MachineBasicBlock
*thisMBB
= BB
;
10350 MachineFunction
*F
= BB
->getParent();
10351 MachineBasicBlock
*copy0MBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
10352 MachineBasicBlock
*sinkMBB
= F
->CreateMachineBasicBlock(LLVM_BB
);
10353 F
->insert(It
, copy0MBB
);
10354 F
->insert(It
, sinkMBB
);
10356 // Check whether CPSR is live past the tMOVCCr_pseudo.
10357 const TargetRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
10358 if (!MI
.killsRegister(ARM::CPSR
) &&
10359 !checkAndUpdateCPSRKill(MI
, thisMBB
, TRI
)) {
10360 copy0MBB
->addLiveIn(ARM::CPSR
);
10361 sinkMBB
->addLiveIn(ARM::CPSR
);
10364 // Transfer the remainder of BB and its successor edges to sinkMBB.
10365 sinkMBB
->splice(sinkMBB
->begin(), BB
,
10366 std::next(MachineBasicBlock::iterator(MI
)), BB
->end());
10367 sinkMBB
->transferSuccessorsAndUpdatePHIs(BB
);
10369 BB
->addSuccessor(copy0MBB
);
10370 BB
->addSuccessor(sinkMBB
);
10372 BuildMI(BB
, dl
, TII
->get(ARM::tBcc
))
10374 .addImm(MI
.getOperand(3).getImm())
10375 .addReg(MI
.getOperand(4).getReg());
10378 // %FalseValue = ...
10379 // # fallthrough to sinkMBB
10382 // Update machine-CFG edges
10383 BB
->addSuccessor(sinkMBB
);
10386 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
10389 BuildMI(*BB
, BB
->begin(), dl
, TII
->get(ARM::PHI
), MI
.getOperand(0).getReg())
10390 .addReg(MI
.getOperand(1).getReg())
10392 .addReg(MI
.getOperand(2).getReg())
10395 MI
.eraseFromParent(); // The pseudo instruction is gone now.
10400 case ARM::BCCZi64
: {
10401 // If there is an unconditional branch to the other successor, remove it.
10402 BB
->erase(std::next(MachineBasicBlock::iterator(MI
)), BB
->end());
10404 // Compare both parts that make up the double comparison separately for
10406 bool RHSisZero
= MI
.getOpcode() == ARM::BCCZi64
;
10408 Register LHS1
= MI
.getOperand(1).getReg();
10409 Register LHS2
= MI
.getOperand(2).getReg();
10411 BuildMI(BB
, dl
, TII
->get(isThumb2
? ARM::t2CMPri
: ARM::CMPri
))
10414 .add(predOps(ARMCC::AL
));
10415 BuildMI(BB
, dl
, TII
->get(isThumb2
? ARM::t2CMPri
: ARM::CMPri
))
10416 .addReg(LHS2
).addImm(0)
10417 .addImm(ARMCC::EQ
).addReg(ARM::CPSR
);
10419 Register RHS1
= MI
.getOperand(3).getReg();
10420 Register RHS2
= MI
.getOperand(4).getReg();
10421 BuildMI(BB
, dl
, TII
->get(isThumb2
? ARM::t2CMPrr
: ARM::CMPrr
))
10424 .add(predOps(ARMCC::AL
));
10425 BuildMI(BB
, dl
, TII
->get(isThumb2
? ARM::t2CMPrr
: ARM::CMPrr
))
10426 .addReg(LHS2
).addReg(RHS2
)
10427 .addImm(ARMCC::EQ
).addReg(ARM::CPSR
);
10430 MachineBasicBlock
*destMBB
= MI
.getOperand(RHSisZero
? 3 : 5).getMBB();
10431 MachineBasicBlock
*exitMBB
= OtherSucc(BB
, destMBB
);
10432 if (MI
.getOperand(0).getImm() == ARMCC::NE
)
10433 std::swap(destMBB
, exitMBB
);
10435 BuildMI(BB
, dl
, TII
->get(isThumb2
? ARM::t2Bcc
: ARM::Bcc
))
10436 .addMBB(destMBB
).addImm(ARMCC::EQ
).addReg(ARM::CPSR
);
10438 BuildMI(BB
, dl
, TII
->get(ARM::t2B
))
10440 .add(predOps(ARMCC::AL
));
10442 BuildMI(BB
, dl
, TII
->get(ARM::B
)) .addMBB(exitMBB
);
10444 MI
.eraseFromParent(); // The pseudo instruction is gone now.
10448 case ARM::Int_eh_sjlj_setjmp
:
10449 case ARM::Int_eh_sjlj_setjmp_nofp
:
10450 case ARM::tInt_eh_sjlj_setjmp
:
10451 case ARM::t2Int_eh_sjlj_setjmp
:
10452 case ARM::t2Int_eh_sjlj_setjmp_nofp
:
10455 case ARM::Int_eh_sjlj_setup_dispatch
:
10456 EmitSjLjDispatchBlock(MI
, BB
);
10461 // To insert an ABS instruction, we have to insert the
10462 // diamond control-flow pattern. The incoming instruction knows the
10463 // source vreg to test against 0, the destination vreg to set,
10464 // the condition code register to branch on, the
10465 // true/false values to select between, and a branch opcode to use.
10470 // BCC (branch to SinkBB if V0 >= 0)
10471 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0)
10472 // SinkBB: V1 = PHI(V2, V3)
10473 const BasicBlock
*LLVM_BB
= BB
->getBasicBlock();
10474 MachineFunction::iterator BBI
= ++BB
->getIterator();
10475 MachineFunction
*Fn
= BB
->getParent();
10476 MachineBasicBlock
*RSBBB
= Fn
->CreateMachineBasicBlock(LLVM_BB
);
10477 MachineBasicBlock
*SinkBB
= Fn
->CreateMachineBasicBlock(LLVM_BB
);
10478 Fn
->insert(BBI
, RSBBB
);
10479 Fn
->insert(BBI
, SinkBB
);
10481 Register ABSSrcReg
= MI
.getOperand(1).getReg();
10482 Register ABSDstReg
= MI
.getOperand(0).getReg();
10483 bool ABSSrcKIll
= MI
.getOperand(1).isKill();
10484 bool isThumb2
= Subtarget
->isThumb2();
10485 MachineRegisterInfo
&MRI
= Fn
->getRegInfo();
10486 // In Thumb mode S must not be specified if source register is the SP or
10487 // PC and if destination register is the SP, so restrict register class
10488 Register NewRsbDstReg
= MRI
.createVirtualRegister(
10489 isThumb2
? &ARM::rGPRRegClass
: &ARM::GPRRegClass
);
10491 // Transfer the remainder of BB and its successor edges to sinkMBB.
10492 SinkBB
->splice(SinkBB
->begin(), BB
,
10493 std::next(MachineBasicBlock::iterator(MI
)), BB
->end());
10494 SinkBB
->transferSuccessorsAndUpdatePHIs(BB
);
10496 BB
->addSuccessor(RSBBB
);
10497 BB
->addSuccessor(SinkBB
);
10499 // fall through to SinkMBB
10500 RSBBB
->addSuccessor(SinkBB
);
10502 // insert a cmp at the end of BB
10503 BuildMI(BB
, dl
, TII
->get(isThumb2
? ARM::t2CMPri
: ARM::CMPri
))
10506 .add(predOps(ARMCC::AL
));
10508 // insert a bcc with opposite CC to ARMCC::MI at the end of BB
10510 TII
->get(isThumb2
? ARM::t2Bcc
: ARM::Bcc
)).addMBB(SinkBB
)
10511 .addImm(ARMCC::getOppositeCondition(ARMCC::MI
)).addReg(ARM::CPSR
);
10513 // insert rsbri in RSBBB
10514 // Note: BCC and rsbri will be converted into predicated rsbmi
10515 // by if-conversion pass
10516 BuildMI(*RSBBB
, RSBBB
->begin(), dl
,
10517 TII
->get(isThumb2
? ARM::t2RSBri
: ARM::RSBri
), NewRsbDstReg
)
10518 .addReg(ABSSrcReg
, ABSSrcKIll
? RegState::Kill
: 0)
10520 .add(predOps(ARMCC::AL
))
10521 .add(condCodeOp());
10523 // insert PHI in SinkBB,
10524 // reuse ABSDstReg to not change uses of ABS instruction
10525 BuildMI(*SinkBB
, SinkBB
->begin(), dl
,
10526 TII
->get(ARM::PHI
), ABSDstReg
)
10527 .addReg(NewRsbDstReg
).addMBB(RSBBB
)
10528 .addReg(ABSSrcReg
).addMBB(BB
);
10530 // remove ABS instruction
10531 MI
.eraseFromParent();
10533 // return last added BB
10536 case ARM::COPY_STRUCT_BYVAL_I32
:
10538 return EmitStructByval(MI
, BB
);
10539 case ARM::WIN__CHKSTK
:
10540 return EmitLowered__chkstk(MI
, BB
);
10541 case ARM::WIN__DBZCHK
:
10542 return EmitLowered__dbzchk(MI
, BB
);
10546 /// Attaches vregs to MEMCPY that it will use as scratch registers
10547 /// when it is expanded into LDM/STM. This is done as a post-isel lowering
10548 /// instead of as a custom inserter because we need the use list from the SDNode.
10549 static void attachMEMCPYScratchRegs(const ARMSubtarget
*Subtarget
,
10550 MachineInstr
&MI
, const SDNode
*Node
) {
10551 bool isThumb1
= Subtarget
->isThumb1Only();
10553 DebugLoc DL
= MI
.getDebugLoc();
10554 MachineFunction
*MF
= MI
.getParent()->getParent();
10555 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
10556 MachineInstrBuilder
MIB(*MF
, MI
);
10558 // If the new dst/src is unused mark it as dead.
10559 if (!Node
->hasAnyUseOfValue(0)) {
10560 MI
.getOperand(0).setIsDead(true);
10562 if (!Node
->hasAnyUseOfValue(1)) {
10563 MI
.getOperand(1).setIsDead(true);
10566 // The MEMCPY both defines and kills the scratch registers.
10567 for (unsigned I
= 0; I
!= MI
.getOperand(4).getImm(); ++I
) {
10568 Register TmpReg
= MRI
.createVirtualRegister(isThumb1
? &ARM::tGPRRegClass
10569 : &ARM::GPRRegClass
);
10570 MIB
.addReg(TmpReg
, RegState::Define
|RegState::Dead
);
10574 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr
&MI
,
10575 SDNode
*Node
) const {
10576 if (MI
.getOpcode() == ARM::MEMCPY
) {
10577 attachMEMCPYScratchRegs(Subtarget
, MI
, Node
);
10581 const MCInstrDesc
*MCID
= &MI
.getDesc();
10582 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
10583 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional
10584 // operand is still set to noreg. If needed, set the optional operand's
10585 // register to CPSR, and remove the redundant implicit def.
10587 // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR).
10589 // Rename pseudo opcodes.
10590 unsigned NewOpc
= convertAddSubFlagsOpcode(MI
.getOpcode());
10593 const ARMBaseInstrInfo
*TII
= Subtarget
->getInstrInfo();
10594 MCID
= &TII
->get(NewOpc
);
10596 assert(MCID
->getNumOperands() ==
10597 MI
.getDesc().getNumOperands() + 5 - MI
.getDesc().getSize()
10598 && "converted opcode should be the same except for cc_out"
10599 " (and, on Thumb1, pred)");
10603 // Add the optional cc_out operand
10604 MI
.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
10606 // On Thumb1, move all input operands to the end, then add the predicate
10607 if (Subtarget
->isThumb1Only()) {
10608 for (unsigned c
= MCID
->getNumOperands() - 4; c
--;) {
10609 MI
.addOperand(MI
.getOperand(1));
10610 MI
.RemoveOperand(1);
10613 // Restore the ties
10614 for (unsigned i
= MI
.getNumOperands(); i
--;) {
10615 const MachineOperand
& op
= MI
.getOperand(i
);
10616 if (op
.isReg() && op
.isUse()) {
10617 int DefIdx
= MCID
->getOperandConstraint(i
, MCOI::TIED_TO
);
10619 MI
.tieOperands(DefIdx
, i
);
10623 MI
.addOperand(MachineOperand::CreateImm(ARMCC::AL
));
10624 MI
.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false));
10627 ccOutIdx
= MCID
->getNumOperands() - 1;
10629 ccOutIdx
= MCID
->getNumOperands() - 1;
10631 // Any ARM instruction that sets the 's' bit should specify an optional
10632 // "cc_out" operand in the last operand position.
10633 if (!MI
.hasOptionalDef() || !MCID
->OpInfo
[ccOutIdx
].isOptionalDef()) {
10634 assert(!NewOpc
&& "Optional cc_out operand required");
10637 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it
10638 // since we already have an optional CPSR def.
10639 bool definesCPSR
= false;
10640 bool deadCPSR
= false;
10641 for (unsigned i
= MCID
->getNumOperands(), e
= MI
.getNumOperands(); i
!= e
;
10643 const MachineOperand
&MO
= MI
.getOperand(i
);
10644 if (MO
.isReg() && MO
.isDef() && MO
.getReg() == ARM::CPSR
) {
10645 definesCPSR
= true;
10648 MI
.RemoveOperand(i
);
10652 if (!definesCPSR
) {
10653 assert(!NewOpc
&& "Optional cc_out operand required");
10656 assert(deadCPSR
== !Node
->hasAnyUseOfValue(1) && "inconsistent dead flag");
10658 assert(!MI
.getOperand(ccOutIdx
).getReg() &&
10659 "expect uninitialized optional cc_out operand");
10660 // Thumb1 instructions must have the S bit even if the CPSR is dead.
10661 if (!Subtarget
->isThumb1Only())
10665 // If this instruction was defined with an optional CPSR def and its dag node
10666 // had a live implicit CPSR def, then activate the optional CPSR def.
10667 MachineOperand
&MO
= MI
.getOperand(ccOutIdx
);
10668 MO
.setReg(ARM::CPSR
);
10672 //===----------------------------------------------------------------------===//
10673 // ARM Optimization Hooks
10674 //===----------------------------------------------------------------------===//
10676 // Helper function that checks if N is a null or all ones constant.
10677 static inline bool isZeroOrAllOnes(SDValue N
, bool AllOnes
) {
10678 return AllOnes
? isAllOnesConstant(N
) : isNullConstant(N
);
10681 // Return true if N is conditionally 0 or all ones.
10682 // Detects these expressions where cc is an i1 value:
10684 // (select cc 0, y) [AllOnes=0]
10685 // (select cc y, 0) [AllOnes=0]
10686 // (zext cc) [AllOnes=0]
10687 // (sext cc) [AllOnes=0/1]
10688 // (select cc -1, y) [AllOnes=1]
10689 // (select cc y, -1) [AllOnes=1]
10691 // Invert is set when N is the null/all ones constant when CC is false.
10692 // OtherOp is set to the alternative value of N.
10693 static bool isConditionalZeroOrAllOnes(SDNode
*N
, bool AllOnes
,
10694 SDValue
&CC
, bool &Invert
,
10696 SelectionDAG
&DAG
) {
10697 switch (N
->getOpcode()) {
10698 default: return false;
10699 case ISD::SELECT
: {
10700 CC
= N
->getOperand(0);
10701 SDValue N1
= N
->getOperand(1);
10702 SDValue N2
= N
->getOperand(2);
10703 if (isZeroOrAllOnes(N1
, AllOnes
)) {
10708 if (isZeroOrAllOnes(N2
, AllOnes
)) {
10715 case ISD::ZERO_EXTEND
:
10716 // (zext cc) can never be the all ones value.
10720 case ISD::SIGN_EXTEND
: {
10722 EVT VT
= N
->getValueType(0);
10723 CC
= N
->getOperand(0);
10724 if (CC
.getValueType() != MVT::i1
|| CC
.getOpcode() != ISD::SETCC
)
10728 // When looking for an AllOnes constant, N is an sext, and the 'other'
10730 OtherOp
= DAG
.getConstant(0, dl
, VT
);
10731 else if (N
->getOpcode() == ISD::ZERO_EXTEND
)
10732 // When looking for a 0 constant, N can be zext or sext.
10733 OtherOp
= DAG
.getConstant(1, dl
, VT
);
10735 OtherOp
= DAG
.getConstant(APInt::getAllOnesValue(VT
.getSizeInBits()), dl
,
10742 // Combine a constant select operand into its use:
10744 // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
10745 // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
10746 // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1]
10747 // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
10748 // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
10750 // The transform is rejected if the select doesn't have a constant operand that
10751 // is null, or all ones when AllOnes is set.
10753 // Also recognize sext/zext from i1:
10755 // (add (zext cc), x) -> (select cc (add x, 1), x)
10756 // (add (sext cc), x) -> (select cc (add x, -1), x)
10758 // These transformations eventually create predicated instructions.
10760 // @param N The node to transform.
10761 // @param Slct The N operand that is a select.
10762 // @param OtherOp The other N operand (x above).
10763 // @param DCI Context.
10764 // @param AllOnes Require the select constant to be all ones instead of null.
10765 // @returns The new node, or SDValue() on failure.
10767 SDValue
combineSelectAndUse(SDNode
*N
, SDValue Slct
, SDValue OtherOp
,
10768 TargetLowering::DAGCombinerInfo
&DCI
,
10769 bool AllOnes
= false) {
10770 SelectionDAG
&DAG
= DCI
.DAG
;
10771 EVT VT
= N
->getValueType(0);
10772 SDValue NonConstantVal
;
10774 bool SwapSelectOps
;
10775 if (!isConditionalZeroOrAllOnes(Slct
.getNode(), AllOnes
, CCOp
, SwapSelectOps
,
10776 NonConstantVal
, DAG
))
10779 // Slct is now know to be the desired identity constant when CC is true.
10780 SDValue TrueVal
= OtherOp
;
10781 SDValue FalseVal
= DAG
.getNode(N
->getOpcode(), SDLoc(N
), VT
,
10782 OtherOp
, NonConstantVal
);
10783 // Unless SwapSelectOps says CC should be false.
10785 std::swap(TrueVal
, FalseVal
);
10787 return DAG
.getNode(ISD::SELECT
, SDLoc(N
), VT
,
10788 CCOp
, TrueVal
, FalseVal
);
10791 // Attempt combineSelectAndUse on each operand of a commutative operator N.
10793 SDValue
combineSelectAndUseCommutative(SDNode
*N
, bool AllOnes
,
10794 TargetLowering::DAGCombinerInfo
&DCI
) {
10795 SDValue N0
= N
->getOperand(0);
10796 SDValue N1
= N
->getOperand(1);
10797 if (N0
.getNode()->hasOneUse())
10798 if (SDValue Result
= combineSelectAndUse(N
, N0
, N1
, DCI
, AllOnes
))
10800 if (N1
.getNode()->hasOneUse())
10801 if (SDValue Result
= combineSelectAndUse(N
, N1
, N0
, DCI
, AllOnes
))
10806 static bool IsVUZPShuffleNode(SDNode
*N
) {
10807 // VUZP shuffle node.
10808 if (N
->getOpcode() == ARMISD::VUZP
)
10811 // "VUZP" on i32 is an alias for VTRN.
10812 if (N
->getOpcode() == ARMISD::VTRN
&& N
->getValueType(0) == MVT::v2i32
)
10818 static SDValue
AddCombineToVPADD(SDNode
*N
, SDValue N0
, SDValue N1
,
10819 TargetLowering::DAGCombinerInfo
&DCI
,
10820 const ARMSubtarget
*Subtarget
) {
10821 // Look for ADD(VUZP.0, VUZP.1).
10822 if (!IsVUZPShuffleNode(N0
.getNode()) || N0
.getNode() != N1
.getNode() ||
10826 // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD.
10827 if (!N
->getValueType(0).is64BitVector())
10831 SelectionDAG
&DAG
= DCI
.DAG
;
10832 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
10834 SDNode
*Unzip
= N0
.getNode();
10835 EVT VT
= N
->getValueType(0);
10837 SmallVector
<SDValue
, 8> Ops
;
10838 Ops
.push_back(DAG
.getConstant(Intrinsic::arm_neon_vpadd
, dl
,
10839 TLI
.getPointerTy(DAG
.getDataLayout())));
10840 Ops
.push_back(Unzip
->getOperand(0));
10841 Ops
.push_back(Unzip
->getOperand(1));
10843 return DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, dl
, VT
, Ops
);
10846 static SDValue
AddCombineVUZPToVPADDL(SDNode
*N
, SDValue N0
, SDValue N1
,
10847 TargetLowering::DAGCombinerInfo
&DCI
,
10848 const ARMSubtarget
*Subtarget
) {
10849 // Check for two extended operands.
10850 if (!(N0
.getOpcode() == ISD::SIGN_EXTEND
&&
10851 N1
.getOpcode() == ISD::SIGN_EXTEND
) &&
10852 !(N0
.getOpcode() == ISD::ZERO_EXTEND
&&
10853 N1
.getOpcode() == ISD::ZERO_EXTEND
))
10856 SDValue N00
= N0
.getOperand(0);
10857 SDValue N10
= N1
.getOperand(0);
10859 // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1))
10860 if (!IsVUZPShuffleNode(N00
.getNode()) || N00
.getNode() != N10
.getNode() ||
10864 // We only recognize Q register paddl here; this can't be reached until
10865 // after type legalization.
10866 if (!N00
.getValueType().is64BitVector() ||
10867 !N0
.getValueType().is128BitVector())
10870 // Generate vpaddl.
10871 SelectionDAG
&DAG
= DCI
.DAG
;
10872 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
10874 EVT VT
= N
->getValueType(0);
10876 SmallVector
<SDValue
, 8> Ops
;
10877 // Form vpaddl.sN or vpaddl.uN depending on the kind of extension.
10879 if (N0
.getOpcode() == ISD::SIGN_EXTEND
)
10880 Opcode
= Intrinsic::arm_neon_vpaddls
;
10882 Opcode
= Intrinsic::arm_neon_vpaddlu
;
10883 Ops
.push_back(DAG
.getConstant(Opcode
, dl
,
10884 TLI
.getPointerTy(DAG
.getDataLayout())));
10885 EVT ElemTy
= N00
.getValueType().getVectorElementType();
10886 unsigned NumElts
= VT
.getVectorNumElements();
10887 EVT ConcatVT
= EVT::getVectorVT(*DAG
.getContext(), ElemTy
, NumElts
* 2);
10888 SDValue Concat
= DAG
.getNode(ISD::CONCAT_VECTORS
, SDLoc(N
), ConcatVT
,
10889 N00
.getOperand(0), N00
.getOperand(1));
10890 Ops
.push_back(Concat
);
10892 return DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, dl
, VT
, Ops
);
10895 // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in
10896 // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is
10897 // much easier to match.
10899 AddCombineBUILD_VECTORToVPADDL(SDNode
*N
, SDValue N0
, SDValue N1
,
10900 TargetLowering::DAGCombinerInfo
&DCI
,
10901 const ARMSubtarget
*Subtarget
) {
10902 // Only perform optimization if after legalize, and if NEON is available. We
10903 // also expected both operands to be BUILD_VECTORs.
10904 if (DCI
.isBeforeLegalize() || !Subtarget
->hasNEON()
10905 || N0
.getOpcode() != ISD::BUILD_VECTOR
10906 || N1
.getOpcode() != ISD::BUILD_VECTOR
)
10909 // Check output type since VPADDL operand elements can only be 8, 16, or 32.
10910 EVT VT
= N
->getValueType(0);
10911 if (!VT
.isInteger() || VT
.getVectorElementType() == MVT::i64
)
10914 // Check that the vector operands are of the right form.
10915 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR
10916 // operands, where N is the size of the formed vector.
10917 // Each EXTRACT_VECTOR should have the same input vector and odd or even
10918 // index such that we have a pair wise add pattern.
10920 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing.
10921 if (N0
->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT
)
10923 SDValue Vec
= N0
->getOperand(0)->getOperand(0);
10924 SDNode
*V
= Vec
.getNode();
10925 unsigned nextIndex
= 0;
10927 // For each operands to the ADD which are BUILD_VECTORs,
10928 // check to see if each of their operands are an EXTRACT_VECTOR with
10929 // the same vector and appropriate index.
10930 for (unsigned i
= 0, e
= N0
->getNumOperands(); i
!= e
; ++i
) {
10931 if (N0
->getOperand(i
)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
10932 && N1
->getOperand(i
)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
) {
10934 SDValue ExtVec0
= N0
->getOperand(i
);
10935 SDValue ExtVec1
= N1
->getOperand(i
);
10937 // First operand is the vector, verify its the same.
10938 if (V
!= ExtVec0
->getOperand(0).getNode() ||
10939 V
!= ExtVec1
->getOperand(0).getNode())
10942 // Second is the constant, verify its correct.
10943 ConstantSDNode
*C0
= dyn_cast
<ConstantSDNode
>(ExtVec0
->getOperand(1));
10944 ConstantSDNode
*C1
= dyn_cast
<ConstantSDNode
>(ExtVec1
->getOperand(1));
10946 // For the constant, we want to see all the even or all the odd.
10947 if (!C0
|| !C1
|| C0
->getZExtValue() != nextIndex
10948 || C1
->getZExtValue() != nextIndex
+1)
10951 // Increment index.
10957 // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure
10958 // we're using the entire input vector, otherwise there's a size/legality
10959 // mismatch somewhere.
10960 if (nextIndex
!= Vec
.getValueType().getVectorNumElements() ||
10961 Vec
.getValueType().getVectorElementType() == VT
.getVectorElementType())
10964 // Create VPADDL node.
10965 SelectionDAG
&DAG
= DCI
.DAG
;
10966 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
10970 // Build operand list.
10971 SmallVector
<SDValue
, 8> Ops
;
10972 Ops
.push_back(DAG
.getConstant(Intrinsic::arm_neon_vpaddls
, dl
,
10973 TLI
.getPointerTy(DAG
.getDataLayout())));
10975 // Input is the vector.
10976 Ops
.push_back(Vec
);
10978 // Get widened type and narrowed type.
10980 unsigned numElem
= VT
.getVectorNumElements();
10982 EVT inputLaneType
= Vec
.getValueType().getVectorElementType();
10983 switch (inputLaneType
.getSimpleVT().SimpleTy
) {
10984 case MVT::i8
: widenType
= MVT::getVectorVT(MVT::i16
, numElem
); break;
10985 case MVT::i16
: widenType
= MVT::getVectorVT(MVT::i32
, numElem
); break;
10986 case MVT::i32
: widenType
= MVT::getVectorVT(MVT::i64
, numElem
); break;
10988 llvm_unreachable("Invalid vector element type for padd optimization.");
10991 SDValue tmp
= DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, dl
, widenType
, Ops
);
10992 unsigned ExtOp
= VT
.bitsGT(tmp
.getValueType()) ? ISD::ANY_EXTEND
: ISD::TRUNCATE
;
10993 return DAG
.getNode(ExtOp
, dl
, VT
, tmp
);
10996 static SDValue
findMUL_LOHI(SDValue V
) {
10997 if (V
->getOpcode() == ISD::UMUL_LOHI
||
10998 V
->getOpcode() == ISD::SMUL_LOHI
)
11003 static SDValue
AddCombineTo64BitSMLAL16(SDNode
*AddcNode
, SDNode
*AddeNode
,
11004 TargetLowering::DAGCombinerInfo
&DCI
,
11005 const ARMSubtarget
*Subtarget
) {
11006 if (Subtarget
->isThumb()) {
11007 if (!Subtarget
->hasDSP())
11009 } else if (!Subtarget
->hasV5TEOps())
11012 // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and
11013 // accumulates the product into a 64-bit value. The 16-bit values will
11014 // be sign extended somehow or SRA'd into 32-bit values
11015 // (addc (adde (mul 16bit, 16bit), lo), hi)
11016 SDValue Mul
= AddcNode
->getOperand(0);
11017 SDValue Lo
= AddcNode
->getOperand(1);
11018 if (Mul
.getOpcode() != ISD::MUL
) {
11019 Lo
= AddcNode
->getOperand(0);
11020 Mul
= AddcNode
->getOperand(1);
11021 if (Mul
.getOpcode() != ISD::MUL
)
11025 SDValue SRA
= AddeNode
->getOperand(0);
11026 SDValue Hi
= AddeNode
->getOperand(1);
11027 if (SRA
.getOpcode() != ISD::SRA
) {
11028 SRA
= AddeNode
->getOperand(1);
11029 Hi
= AddeNode
->getOperand(0);
11030 if (SRA
.getOpcode() != ISD::SRA
)
11033 if (auto Const
= dyn_cast
<ConstantSDNode
>(SRA
.getOperand(1))) {
11034 if (Const
->getZExtValue() != 31)
11039 if (SRA
.getOperand(0) != Mul
)
11042 SelectionDAG
&DAG
= DCI
.DAG
;
11043 SDLoc
dl(AddcNode
);
11044 unsigned Opcode
= 0;
11048 if (isS16(Mul
.getOperand(0), DAG
) && isS16(Mul
.getOperand(1), DAG
)) {
11049 Opcode
= ARMISD::SMLALBB
;
11050 Op0
= Mul
.getOperand(0);
11051 Op1
= Mul
.getOperand(1);
11052 } else if (isS16(Mul
.getOperand(0), DAG
) && isSRA16(Mul
.getOperand(1))) {
11053 Opcode
= ARMISD::SMLALBT
;
11054 Op0
= Mul
.getOperand(0);
11055 Op1
= Mul
.getOperand(1).getOperand(0);
11056 } else if (isSRA16(Mul
.getOperand(0)) && isS16(Mul
.getOperand(1), DAG
)) {
11057 Opcode
= ARMISD::SMLALTB
;
11058 Op0
= Mul
.getOperand(0).getOperand(0);
11059 Op1
= Mul
.getOperand(1);
11060 } else if (isSRA16(Mul
.getOperand(0)) && isSRA16(Mul
.getOperand(1))) {
11061 Opcode
= ARMISD::SMLALTT
;
11062 Op0
= Mul
->getOperand(0).getOperand(0);
11063 Op1
= Mul
->getOperand(1).getOperand(0);
11069 SDValue SMLAL
= DAG
.getNode(Opcode
, dl
, DAG
.getVTList(MVT::i32
, MVT::i32
),
11071 // Replace the ADDs' nodes uses by the MLA node's values.
11072 SDValue
HiMLALResult(SMLAL
.getNode(), 1);
11073 SDValue
LoMLALResult(SMLAL
.getNode(), 0);
11075 DAG
.ReplaceAllUsesOfValueWith(SDValue(AddcNode
, 0), LoMLALResult
);
11076 DAG
.ReplaceAllUsesOfValueWith(SDValue(AddeNode
, 0), HiMLALResult
);
11078 // Return original node to notify the driver to stop replacing.
11079 SDValue
resNode(AddcNode
, 0);
11083 static SDValue
AddCombineTo64bitMLAL(SDNode
*AddeSubeNode
,
11084 TargetLowering::DAGCombinerInfo
&DCI
,
11085 const ARMSubtarget
*Subtarget
) {
11086 // Look for multiply add opportunities.
11087 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where
11088 // each add nodes consumes a value from ISD::UMUL_LOHI and there is
11089 // a glue link from the first add to the second add.
11090 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by
11091 // a S/UMLAL instruction.
11094 // V \ [no multiline comment]
11100 // In the special case where only the higher part of a signed result is used
11101 // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts
11102 // a constant with the exact value of 0x80000000, we recognize we are dealing
11103 // with a "rounded multiply and add" (or subtract) and transform it into
11104 // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively.
11106 assert((AddeSubeNode
->getOpcode() == ARMISD::ADDE
||
11107 AddeSubeNode
->getOpcode() == ARMISD::SUBE
) &&
11108 "Expect an ADDE or SUBE");
11110 assert(AddeSubeNode
->getNumOperands() == 3 &&
11111 AddeSubeNode
->getOperand(2).getValueType() == MVT::i32
&&
11112 "ADDE node has the wrong inputs");
11114 // Check that we are chained to the right ADDC or SUBC node.
11115 SDNode
*AddcSubcNode
= AddeSubeNode
->getOperand(2).getNode();
11116 if ((AddeSubeNode
->getOpcode() == ARMISD::ADDE
&&
11117 AddcSubcNode
->getOpcode() != ARMISD::ADDC
) ||
11118 (AddeSubeNode
->getOpcode() == ARMISD::SUBE
&&
11119 AddcSubcNode
->getOpcode() != ARMISD::SUBC
))
11122 SDValue AddcSubcOp0
= AddcSubcNode
->getOperand(0);
11123 SDValue AddcSubcOp1
= AddcSubcNode
->getOperand(1);
11125 // Check if the two operands are from the same mul_lohi node.
11126 if (AddcSubcOp0
.getNode() == AddcSubcOp1
.getNode())
11129 assert(AddcSubcNode
->getNumValues() == 2 &&
11130 AddcSubcNode
->getValueType(0) == MVT::i32
&&
11131 "Expect ADDC with two result values. First: i32");
11133 // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it
11134 // maybe a SMLAL which multiplies two 16-bit values.
11135 if (AddeSubeNode
->getOpcode() == ARMISD::ADDE
&&
11136 AddcSubcOp0
->getOpcode() != ISD::UMUL_LOHI
&&
11137 AddcSubcOp0
->getOpcode() != ISD::SMUL_LOHI
&&
11138 AddcSubcOp1
->getOpcode() != ISD::UMUL_LOHI
&&
11139 AddcSubcOp1
->getOpcode() != ISD::SMUL_LOHI
)
11140 return AddCombineTo64BitSMLAL16(AddcSubcNode
, AddeSubeNode
, DCI
, Subtarget
);
11142 // Check for the triangle shape.
11143 SDValue AddeSubeOp0
= AddeSubeNode
->getOperand(0);
11144 SDValue AddeSubeOp1
= AddeSubeNode
->getOperand(1);
11146 // Make sure that the ADDE/SUBE operands are not coming from the same node.
11147 if (AddeSubeOp0
.getNode() == AddeSubeOp1
.getNode())
11150 // Find the MUL_LOHI node walking up ADDE/SUBE's operands.
11151 bool IsLeftOperandMUL
= false;
11152 SDValue MULOp
= findMUL_LOHI(AddeSubeOp0
);
11153 if (MULOp
== SDValue())
11154 MULOp
= findMUL_LOHI(AddeSubeOp1
);
11156 IsLeftOperandMUL
= true;
11157 if (MULOp
== SDValue())
11160 // Figure out the right opcode.
11161 unsigned Opc
= MULOp
->getOpcode();
11162 unsigned FinalOpc
= (Opc
== ISD::SMUL_LOHI
) ? ARMISD::SMLAL
: ARMISD::UMLAL
;
11164 // Figure out the high and low input values to the MLAL node.
11165 SDValue
*HiAddSub
= nullptr;
11166 SDValue
*LoMul
= nullptr;
11167 SDValue
*LowAddSub
= nullptr;
11169 // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI.
11170 if ((AddeSubeOp0
!= MULOp
.getValue(1)) && (AddeSubeOp1
!= MULOp
.getValue(1)))
11173 if (IsLeftOperandMUL
)
11174 HiAddSub
= &AddeSubeOp1
;
11176 HiAddSub
= &AddeSubeOp0
;
11178 // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node
11179 // whose low result is fed to the ADDC/SUBC we are checking.
11181 if (AddcSubcOp0
== MULOp
.getValue(0)) {
11182 LoMul
= &AddcSubcOp0
;
11183 LowAddSub
= &AddcSubcOp1
;
11185 if (AddcSubcOp1
== MULOp
.getValue(0)) {
11186 LoMul
= &AddcSubcOp1
;
11187 LowAddSub
= &AddcSubcOp0
;
11193 // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC
11194 // the replacement below will create a cycle.
11195 if (AddcSubcNode
== HiAddSub
->getNode() ||
11196 AddcSubcNode
->isPredecessorOf(HiAddSub
->getNode()))
11199 // Create the merged node.
11200 SelectionDAG
&DAG
= DCI
.DAG
;
11202 // Start building operand list.
11203 SmallVector
<SDValue
, 8> Ops
;
11204 Ops
.push_back(LoMul
->getOperand(0));
11205 Ops
.push_back(LoMul
->getOperand(1));
11207 // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be
11208 // the case, we must be doing signed multiplication and only use the higher
11209 // part of the result of the MLAL, furthermore the LowAddSub must be a constant
11210 // addition or subtraction with the value of 0x800000.
11211 if (Subtarget
->hasV6Ops() && Subtarget
->hasDSP() && Subtarget
->useMulOps() &&
11212 FinalOpc
== ARMISD::SMLAL
&& !AddeSubeNode
->hasAnyUseOfValue(1) &&
11213 LowAddSub
->getNode()->getOpcode() == ISD::Constant
&&
11214 static_cast<ConstantSDNode
*>(LowAddSub
->getNode())->getZExtValue() ==
11216 Ops
.push_back(*HiAddSub
);
11217 if (AddcSubcNode
->getOpcode() == ARMISD::SUBC
) {
11218 FinalOpc
= ARMISD::SMMLSR
;
11220 FinalOpc
= ARMISD::SMMLAR
;
11222 SDValue NewNode
= DAG
.getNode(FinalOpc
, SDLoc(AddcSubcNode
), MVT::i32
, Ops
);
11223 DAG
.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode
, 0), NewNode
);
11225 return SDValue(AddeSubeNode
, 0);
11226 } else if (AddcSubcNode
->getOpcode() == ARMISD::SUBC
)
11227 // SMMLS is generated during instruction selection and the rest of this
11228 // function can not handle the case where AddcSubcNode is a SUBC.
11231 // Finish building the operand list for {U/S}MLAL
11232 Ops
.push_back(*LowAddSub
);
11233 Ops
.push_back(*HiAddSub
);
11235 SDValue MLALNode
= DAG
.getNode(FinalOpc
, SDLoc(AddcSubcNode
),
11236 DAG
.getVTList(MVT::i32
, MVT::i32
), Ops
);
11238 // Replace the ADDs' nodes uses by the MLA node's values.
11239 SDValue
HiMLALResult(MLALNode
.getNode(), 1);
11240 DAG
.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode
, 0), HiMLALResult
);
11242 SDValue
LoMLALResult(MLALNode
.getNode(), 0);
11243 DAG
.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode
, 0), LoMLALResult
);
11245 // Return original node to notify the driver to stop replacing.
11246 return SDValue(AddeSubeNode
, 0);
11249 static SDValue
AddCombineTo64bitUMAAL(SDNode
*AddeNode
,
11250 TargetLowering::DAGCombinerInfo
&DCI
,
11251 const ARMSubtarget
*Subtarget
) {
11252 // UMAAL is similar to UMLAL except that it adds two unsigned values.
11253 // While trying to combine for the other MLAL nodes, first search for the
11254 // chance to use UMAAL. Check if Addc uses a node which has already
11255 // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde
11256 // as the addend, and it's handled in PerformUMLALCombine.
11258 if (!Subtarget
->hasV6Ops() || !Subtarget
->hasDSP())
11259 return AddCombineTo64bitMLAL(AddeNode
, DCI
, Subtarget
);
11261 // Check that we have a glued ADDC node.
11262 SDNode
* AddcNode
= AddeNode
->getOperand(2).getNode();
11263 if (AddcNode
->getOpcode() != ARMISD::ADDC
)
11266 // Find the converted UMAAL or quit if it doesn't exist.
11267 SDNode
*UmlalNode
= nullptr;
11269 if (AddcNode
->getOperand(0).getOpcode() == ARMISD::UMLAL
) {
11270 UmlalNode
= AddcNode
->getOperand(0).getNode();
11271 AddHi
= AddcNode
->getOperand(1);
11272 } else if (AddcNode
->getOperand(1).getOpcode() == ARMISD::UMLAL
) {
11273 UmlalNode
= AddcNode
->getOperand(1).getNode();
11274 AddHi
= AddcNode
->getOperand(0);
11276 return AddCombineTo64bitMLAL(AddeNode
, DCI
, Subtarget
);
11279 // The ADDC should be glued to an ADDE node, which uses the same UMLAL as
11280 // the ADDC as well as Zero.
11281 if (!isNullConstant(UmlalNode
->getOperand(3)))
11284 if ((isNullConstant(AddeNode
->getOperand(0)) &&
11285 AddeNode
->getOperand(1).getNode() == UmlalNode
) ||
11286 (AddeNode
->getOperand(0).getNode() == UmlalNode
&&
11287 isNullConstant(AddeNode
->getOperand(1)))) {
11288 SelectionDAG
&DAG
= DCI
.DAG
;
11289 SDValue Ops
[] = { UmlalNode
->getOperand(0), UmlalNode
->getOperand(1),
11290 UmlalNode
->getOperand(2), AddHi
};
11291 SDValue UMAAL
= DAG
.getNode(ARMISD::UMAAL
, SDLoc(AddcNode
),
11292 DAG
.getVTList(MVT::i32
, MVT::i32
), Ops
);
11294 // Replace the ADDs' nodes uses by the UMAAL node's values.
11295 DAG
.ReplaceAllUsesOfValueWith(SDValue(AddeNode
, 0), SDValue(UMAAL
.getNode(), 1));
11296 DAG
.ReplaceAllUsesOfValueWith(SDValue(AddcNode
, 0), SDValue(UMAAL
.getNode(), 0));
11298 // Return original node to notify the driver to stop replacing.
11299 return SDValue(AddeNode
, 0);
11304 static SDValue
PerformUMLALCombine(SDNode
*N
, SelectionDAG
&DAG
,
11305 const ARMSubtarget
*Subtarget
) {
11306 if (!Subtarget
->hasV6Ops() || !Subtarget
->hasDSP())
11309 // Check that we have a pair of ADDC and ADDE as operands.
11310 // Both addends of the ADDE must be zero.
11311 SDNode
* AddcNode
= N
->getOperand(2).getNode();
11312 SDNode
* AddeNode
= N
->getOperand(3).getNode();
11313 if ((AddcNode
->getOpcode() == ARMISD::ADDC
) &&
11314 (AddeNode
->getOpcode() == ARMISD::ADDE
) &&
11315 isNullConstant(AddeNode
->getOperand(0)) &&
11316 isNullConstant(AddeNode
->getOperand(1)) &&
11317 (AddeNode
->getOperand(2).getNode() == AddcNode
))
11318 return DAG
.getNode(ARMISD::UMAAL
, SDLoc(N
),
11319 DAG
.getVTList(MVT::i32
, MVT::i32
),
11320 {N
->getOperand(0), N
->getOperand(1),
11321 AddcNode
->getOperand(0), AddcNode
->getOperand(1)});
11326 static SDValue
PerformAddcSubcCombine(SDNode
*N
,
11327 TargetLowering::DAGCombinerInfo
&DCI
,
11328 const ARMSubtarget
*Subtarget
) {
11329 SelectionDAG
&DAG(DCI
.DAG
);
11331 if (N
->getOpcode() == ARMISD::SUBC
) {
11332 // (SUBC (ADDE 0, 0, C), 1) -> C
11333 SDValue LHS
= N
->getOperand(0);
11334 SDValue RHS
= N
->getOperand(1);
11335 if (LHS
->getOpcode() == ARMISD::ADDE
&&
11336 isNullConstant(LHS
->getOperand(0)) &&
11337 isNullConstant(LHS
->getOperand(1)) && isOneConstant(RHS
)) {
11338 return DCI
.CombineTo(N
, SDValue(N
, 0), LHS
->getOperand(2));
11342 if (Subtarget
->isThumb1Only()) {
11343 SDValue RHS
= N
->getOperand(1);
11344 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(RHS
)) {
11345 int32_t imm
= C
->getSExtValue();
11346 if (imm
< 0 && imm
> std::numeric_limits
<int>::min()) {
11348 RHS
= DAG
.getConstant(-imm
, DL
, MVT::i32
);
11349 unsigned Opcode
= (N
->getOpcode() == ARMISD::ADDC
) ? ARMISD::SUBC
11351 return DAG
.getNode(Opcode
, DL
, N
->getVTList(), N
->getOperand(0), RHS
);
11359 static SDValue
PerformAddeSubeCombine(SDNode
*N
,
11360 TargetLowering::DAGCombinerInfo
&DCI
,
11361 const ARMSubtarget
*Subtarget
) {
11362 if (Subtarget
->isThumb1Only()) {
11363 SelectionDAG
&DAG
= DCI
.DAG
;
11364 SDValue RHS
= N
->getOperand(1);
11365 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(RHS
)) {
11366 int64_t imm
= C
->getSExtValue();
11370 // The with-carry-in form matches bitwise not instead of the negation.
11371 // Effectively, the inverse interpretation of the carry flag already
11372 // accounts for part of the negation.
11373 RHS
= DAG
.getConstant(~imm
, DL
, MVT::i32
);
11375 unsigned Opcode
= (N
->getOpcode() == ARMISD::ADDE
) ? ARMISD::SUBE
11377 return DAG
.getNode(Opcode
, DL
, N
->getVTList(),
11378 N
->getOperand(0), RHS
, N
->getOperand(2));
11381 } else if (N
->getOperand(1)->getOpcode() == ISD::SMUL_LOHI
) {
11382 return AddCombineTo64bitMLAL(N
, DCI
, Subtarget
);
11387 static SDValue
PerformABSCombine(SDNode
*N
,
11388 TargetLowering::DAGCombinerInfo
&DCI
,
11389 const ARMSubtarget
*Subtarget
) {
11391 SelectionDAG
&DAG
= DCI
.DAG
;
11392 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
11394 if (TLI
.isOperationLegal(N
->getOpcode(), N
->getValueType(0)))
11397 if (!TLI
.expandABS(N
, res
, DAG
))
11403 /// PerformADDECombine - Target-specific dag combine transform from
11404 /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or
11405 /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
11406 static SDValue
PerformADDECombine(SDNode
*N
,
11407 TargetLowering::DAGCombinerInfo
&DCI
,
11408 const ARMSubtarget
*Subtarget
) {
11409 // Only ARM and Thumb2 support UMLAL/SMLAL.
11410 if (Subtarget
->isThumb1Only())
11411 return PerformAddeSubeCombine(N
, DCI
, Subtarget
);
11413 // Only perform the checks after legalize when the pattern is available.
11414 if (DCI
.isBeforeLegalize()) return SDValue();
11416 return AddCombineTo64bitUMAAL(N
, DCI
, Subtarget
);
11419 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
11420 /// operands N0 and N1. This is a helper for PerformADDCombine that is
11421 /// called with the default operands, and if that fails, with commuted
11423 static SDValue
PerformADDCombineWithOperands(SDNode
*N
, SDValue N0
, SDValue N1
,
11424 TargetLowering::DAGCombinerInfo
&DCI
,
11425 const ARMSubtarget
*Subtarget
){
11426 // Attempt to create vpadd for this add.
11427 if (SDValue Result
= AddCombineToVPADD(N
, N0
, N1
, DCI
, Subtarget
))
11430 // Attempt to create vpaddl for this add.
11431 if (SDValue Result
= AddCombineVUZPToVPADDL(N
, N0
, N1
, DCI
, Subtarget
))
11433 if (SDValue Result
= AddCombineBUILD_VECTORToVPADDL(N
, N0
, N1
, DCI
,
11437 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
11438 if (N0
.getNode()->hasOneUse())
11439 if (SDValue Result
= combineSelectAndUse(N
, N0
, N1
, DCI
))
11445 ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode
*N
,
11446 CombineLevel Level
) const {
11447 if (Level
== BeforeLegalizeTypes
)
11450 if (N
->getOpcode() != ISD::SHL
)
11453 if (Subtarget
->isThumb1Only()) {
11454 // Avoid making expensive immediates by commuting shifts. (This logic
11455 // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted
11457 if (N
->getOpcode() != ISD::SHL
)
11459 SDValue N1
= N
->getOperand(0);
11460 if (N1
->getOpcode() != ISD::ADD
&& N1
->getOpcode() != ISD::AND
&&
11461 N1
->getOpcode() != ISD::OR
&& N1
->getOpcode() != ISD::XOR
)
11463 if (auto *Const
= dyn_cast
<ConstantSDNode
>(N1
->getOperand(1))) {
11464 if (Const
->getAPIntValue().ult(256))
11466 if (N1
->getOpcode() == ISD::ADD
&& Const
->getAPIntValue().slt(0) &&
11467 Const
->getAPIntValue().sgt(-256))
11473 // Turn off commute-with-shift transform after legalization, so it doesn't
11474 // conflict with PerformSHLSimplify. (We could try to detect when
11475 // PerformSHLSimplify would trigger more precisely, but it isn't
11476 // really necessary.)
11480 bool ARMTargetLowering::shouldFoldConstantShiftPairToMask(
11481 const SDNode
*N
, CombineLevel Level
) const {
11482 if (!Subtarget
->isThumb1Only())
11485 if (Level
== BeforeLegalizeTypes
)
11491 bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT
) const {
11492 if (!Subtarget
->hasNEON()) {
11493 if (Subtarget
->isThumb1Only())
11494 return VT
.getScalarSizeInBits() <= 32;
11497 return VT
.isScalarInteger();
11500 static SDValue
PerformSHLSimplify(SDNode
*N
,
11501 TargetLowering::DAGCombinerInfo
&DCI
,
11502 const ARMSubtarget
*ST
) {
11503 // Allow the generic combiner to identify potential bswaps.
11504 if (DCI
.isBeforeLegalize())
11507 // DAG combiner will fold:
11508 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
11509 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2
11510 // Other code patterns that can be also be modified have the following form:
11511 // b + ((a << 1) | 510)
11512 // b + ((a << 1) & 510)
11513 // b + ((a << 1) ^ 510)
11514 // b + ((a << 1) + 510)
11516 // Many instructions can perform the shift for free, but it requires both
11517 // the operands to be registers. If c1 << c2 is too large, a mov immediate
11518 // instruction will needed. So, unfold back to the original pattern if:
11519 // - if c1 and c2 are small enough that they don't require mov imms.
11520 // - the user(s) of the node can perform an shl
11522 // No shifted operands for 16-bit instructions.
11523 if (ST
->isThumb() && ST
->isThumb1Only())
11526 // Check that all the users could perform the shl themselves.
11527 for (auto U
: N
->uses()) {
11528 switch(U
->getOpcode()) {
11538 // Check that the user isn't already using a constant because there
11539 // aren't any instructions that support an immediate operand and a
11540 // shifted operand.
11541 if (isa
<ConstantSDNode
>(U
->getOperand(0)) ||
11542 isa
<ConstantSDNode
>(U
->getOperand(1)))
11545 // Check that it's not already using a shift.
11546 if (U
->getOperand(0).getOpcode() == ISD::SHL
||
11547 U
->getOperand(1).getOpcode() == ISD::SHL
)
11553 if (N
->getOpcode() != ISD::ADD
&& N
->getOpcode() != ISD::OR
&&
11554 N
->getOpcode() != ISD::XOR
&& N
->getOpcode() != ISD::AND
)
11557 if (N
->getOperand(0).getOpcode() != ISD::SHL
)
11560 SDValue SHL
= N
->getOperand(0);
11562 auto *C1ShlC2
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1));
11563 auto *C2
= dyn_cast
<ConstantSDNode
>(SHL
.getOperand(1));
11564 if (!C1ShlC2
|| !C2
)
11567 APInt C2Int
= C2
->getAPIntValue();
11568 APInt C1Int
= C1ShlC2
->getAPIntValue();
11570 // Check that performing a lshr will not lose any information.
11571 APInt Mask
= APInt::getHighBitsSet(C2Int
.getBitWidth(),
11572 C2Int
.getBitWidth() - C2
->getZExtValue());
11573 if ((C1Int
& Mask
) != C1Int
)
11576 // Shift the first constant.
11577 C1Int
.lshrInPlace(C2Int
);
11579 // The immediates are encoded as an 8-bit value that can be rotated.
11580 auto LargeImm
= [](const APInt
&Imm
) {
11581 unsigned Zeros
= Imm
.countLeadingZeros() + Imm
.countTrailingZeros();
11582 return Imm
.getBitWidth() - Zeros
> 8;
11585 if (LargeImm(C1Int
) || LargeImm(C2Int
))
11588 SelectionDAG
&DAG
= DCI
.DAG
;
11590 SDValue X
= SHL
.getOperand(0);
11591 SDValue BinOp
= DAG
.getNode(N
->getOpcode(), dl
, MVT::i32
, X
,
11592 DAG
.getConstant(C1Int
, dl
, MVT::i32
));
11593 // Shift left to compensate for the lshr of C1Int.
11594 SDValue Res
= DAG
.getNode(ISD::SHL
, dl
, MVT::i32
, BinOp
, SHL
.getOperand(1));
11596 LLVM_DEBUG(dbgs() << "Simplify shl use:\n"; SHL
.getOperand(0).dump();
11597 SHL
.dump(); N
->dump());
11598 LLVM_DEBUG(dbgs() << "Into:\n"; X
.dump(); BinOp
.dump(); Res
.dump());
11603 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
11605 static SDValue
PerformADDCombine(SDNode
*N
,
11606 TargetLowering::DAGCombinerInfo
&DCI
,
11607 const ARMSubtarget
*Subtarget
) {
11608 SDValue N0
= N
->getOperand(0);
11609 SDValue N1
= N
->getOperand(1);
11611 // Only works one way, because it needs an immediate operand.
11612 if (SDValue Result
= PerformSHLSimplify(N
, DCI
, Subtarget
))
11615 // First try with the default operand order.
11616 if (SDValue Result
= PerformADDCombineWithOperands(N
, N0
, N1
, DCI
, Subtarget
))
11619 // If that didn't work, try again with the operands commuted.
11620 return PerformADDCombineWithOperands(N
, N1
, N0
, DCI
, Subtarget
);
11623 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
11625 static SDValue
PerformSUBCombine(SDNode
*N
,
11626 TargetLowering::DAGCombinerInfo
&DCI
) {
11627 SDValue N0
= N
->getOperand(0);
11628 SDValue N1
= N
->getOperand(1);
11630 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
11631 if (N1
.getNode()->hasOneUse())
11632 if (SDValue Result
= combineSelectAndUse(N
, N1
, N0
, DCI
))
11638 /// PerformVMULCombine
11639 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the
11640 /// special multiplier accumulator forwarding.
11641 /// vmul d3, d0, d2
11642 /// vmla d3, d1, d2
11644 /// vadd d3, d0, d1
11645 /// vmul d3, d3, d2
11646 // However, for (A + B) * (A + B),
11653 static SDValue
PerformVMULCombine(SDNode
*N
,
11654 TargetLowering::DAGCombinerInfo
&DCI
,
11655 const ARMSubtarget
*Subtarget
) {
11656 if (!Subtarget
->hasVMLxForwarding())
11659 SelectionDAG
&DAG
= DCI
.DAG
;
11660 SDValue N0
= N
->getOperand(0);
11661 SDValue N1
= N
->getOperand(1);
11662 unsigned Opcode
= N0
.getOpcode();
11663 if (Opcode
!= ISD::ADD
&& Opcode
!= ISD::SUB
&&
11664 Opcode
!= ISD::FADD
&& Opcode
!= ISD::FSUB
) {
11665 Opcode
= N1
.getOpcode();
11666 if (Opcode
!= ISD::ADD
&& Opcode
!= ISD::SUB
&&
11667 Opcode
!= ISD::FADD
&& Opcode
!= ISD::FSUB
)
11675 EVT VT
= N
->getValueType(0);
11677 SDValue N00
= N0
->getOperand(0);
11678 SDValue N01
= N0
->getOperand(1);
11679 return DAG
.getNode(Opcode
, DL
, VT
,
11680 DAG
.getNode(ISD::MUL
, DL
, VT
, N00
, N1
),
11681 DAG
.getNode(ISD::MUL
, DL
, VT
, N01
, N1
));
11684 static SDValue
PerformMULCombine(SDNode
*N
,
11685 TargetLowering::DAGCombinerInfo
&DCI
,
11686 const ARMSubtarget
*Subtarget
) {
11687 SelectionDAG
&DAG
= DCI
.DAG
;
11689 if (Subtarget
->isThumb1Only())
11692 if (DCI
.isBeforeLegalize() || DCI
.isCalledByLegalizer())
11695 EVT VT
= N
->getValueType(0);
11696 if (VT
.is64BitVector() || VT
.is128BitVector())
11697 return PerformVMULCombine(N
, DCI
, Subtarget
);
11698 if (VT
!= MVT::i32
)
11701 ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1));
11705 int64_t MulAmt
= C
->getSExtValue();
11706 unsigned ShiftAmt
= countTrailingZeros
<uint64_t>(MulAmt
);
11708 ShiftAmt
= ShiftAmt
& (32 - 1);
11709 SDValue V
= N
->getOperand(0);
11713 MulAmt
>>= ShiftAmt
;
11716 if (isPowerOf2_32(MulAmt
- 1)) {
11717 // (mul x, 2^N + 1) => (add (shl x, N), x)
11718 Res
= DAG
.getNode(ISD::ADD
, DL
, VT
,
11720 DAG
.getNode(ISD::SHL
, DL
, VT
,
11722 DAG
.getConstant(Log2_32(MulAmt
- 1), DL
,
11724 } else if (isPowerOf2_32(MulAmt
+ 1)) {
11725 // (mul x, 2^N - 1) => (sub (shl x, N), x)
11726 Res
= DAG
.getNode(ISD::SUB
, DL
, VT
,
11727 DAG
.getNode(ISD::SHL
, DL
, VT
,
11729 DAG
.getConstant(Log2_32(MulAmt
+ 1), DL
,
11735 uint64_t MulAmtAbs
= -MulAmt
;
11736 if (isPowerOf2_32(MulAmtAbs
+ 1)) {
11737 // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
11738 Res
= DAG
.getNode(ISD::SUB
, DL
, VT
,
11740 DAG
.getNode(ISD::SHL
, DL
, VT
,
11742 DAG
.getConstant(Log2_32(MulAmtAbs
+ 1), DL
,
11744 } else if (isPowerOf2_32(MulAmtAbs
- 1)) {
11745 // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
11746 Res
= DAG
.getNode(ISD::ADD
, DL
, VT
,
11748 DAG
.getNode(ISD::SHL
, DL
, VT
,
11750 DAG
.getConstant(Log2_32(MulAmtAbs
- 1), DL
,
11752 Res
= DAG
.getNode(ISD::SUB
, DL
, VT
,
11753 DAG
.getConstant(0, DL
, MVT::i32
), Res
);
11759 Res
= DAG
.getNode(ISD::SHL
, DL
, VT
,
11760 Res
, DAG
.getConstant(ShiftAmt
, DL
, MVT::i32
));
11762 // Do not add new nodes to DAG combiner worklist.
11763 DCI
.CombineTo(N
, Res
, false);
11767 static SDValue
CombineANDShift(SDNode
*N
,
11768 TargetLowering::DAGCombinerInfo
&DCI
,
11769 const ARMSubtarget
*Subtarget
) {
11770 // Allow DAGCombine to pattern-match before we touch the canonical form.
11771 if (DCI
.isBeforeLegalize() || DCI
.isCalledByLegalizer())
11774 if (N
->getValueType(0) != MVT::i32
)
11777 ConstantSDNode
*N1C
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1));
11781 uint32_t C1
= (uint32_t)N1C
->getZExtValue();
11782 // Don't transform uxtb/uxth.
11783 if (C1
== 255 || C1
== 65535)
11786 SDNode
*N0
= N
->getOperand(0).getNode();
11787 if (!N0
->hasOneUse())
11790 if (N0
->getOpcode() != ISD::SHL
&& N0
->getOpcode() != ISD::SRL
)
11793 bool LeftShift
= N0
->getOpcode() == ISD::SHL
;
11795 ConstantSDNode
*N01C
= dyn_cast
<ConstantSDNode
>(N0
->getOperand(1));
11799 uint32_t C2
= (uint32_t)N01C
->getZExtValue();
11800 if (!C2
|| C2
>= 32)
11803 // Clear irrelevant bits in the mask.
11809 SelectionDAG
&DAG
= DCI
.DAG
;
11812 // We have a pattern of the form "(and (shl x, c2) c1)" or
11813 // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to
11814 // transform to a pair of shifts, to save materializing c1.
11816 // First pattern: right shift, then mask off leading bits.
11817 // FIXME: Use demanded bits?
11818 if (!LeftShift
&& isMask_32(C1
)) {
11819 uint32_t C3
= countLeadingZeros(C1
);
11821 SDValue SHL
= DAG
.getNode(ISD::SHL
, DL
, MVT::i32
, N0
->getOperand(0),
11822 DAG
.getConstant(C3
- C2
, DL
, MVT::i32
));
11823 return DAG
.getNode(ISD::SRL
, DL
, MVT::i32
, SHL
,
11824 DAG
.getConstant(C3
, DL
, MVT::i32
));
11828 // First pattern, reversed: left shift, then mask off trailing bits.
11829 if (LeftShift
&& isMask_32(~C1
)) {
11830 uint32_t C3
= countTrailingZeros(C1
);
11832 SDValue SHL
= DAG
.getNode(ISD::SRL
, DL
, MVT::i32
, N0
->getOperand(0),
11833 DAG
.getConstant(C3
- C2
, DL
, MVT::i32
));
11834 return DAG
.getNode(ISD::SHL
, DL
, MVT::i32
, SHL
,
11835 DAG
.getConstant(C3
, DL
, MVT::i32
));
11839 // Second pattern: left shift, then mask off leading bits.
11840 // FIXME: Use demanded bits?
11841 if (LeftShift
&& isShiftedMask_32(C1
)) {
11842 uint32_t Trailing
= countTrailingZeros(C1
);
11843 uint32_t C3
= countLeadingZeros(C1
);
11844 if (Trailing
== C2
&& C2
+ C3
< 32) {
11845 SDValue SHL
= DAG
.getNode(ISD::SHL
, DL
, MVT::i32
, N0
->getOperand(0),
11846 DAG
.getConstant(C2
+ C3
, DL
, MVT::i32
));
11847 return DAG
.getNode(ISD::SRL
, DL
, MVT::i32
, SHL
,
11848 DAG
.getConstant(C3
, DL
, MVT::i32
));
11852 // Second pattern, reversed: right shift, then mask off trailing bits.
11853 // FIXME: Handle other patterns of known/demanded bits.
11854 if (!LeftShift
&& isShiftedMask_32(C1
)) {
11855 uint32_t Leading
= countLeadingZeros(C1
);
11856 uint32_t C3
= countTrailingZeros(C1
);
11857 if (Leading
== C2
&& C2
+ C3
< 32) {
11858 SDValue SHL
= DAG
.getNode(ISD::SRL
, DL
, MVT::i32
, N0
->getOperand(0),
11859 DAG
.getConstant(C2
+ C3
, DL
, MVT::i32
));
11860 return DAG
.getNode(ISD::SHL
, DL
, MVT::i32
, SHL
,
11861 DAG
.getConstant(C3
, DL
, MVT::i32
));
11865 // FIXME: Transform "(and (shl x, c2) c1)" ->
11866 // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than
11871 static SDValue
PerformANDCombine(SDNode
*N
,
11872 TargetLowering::DAGCombinerInfo
&DCI
,
11873 const ARMSubtarget
*Subtarget
) {
11874 // Attempt to use immediate-form VBIC
11875 BuildVectorSDNode
*BVN
= dyn_cast
<BuildVectorSDNode
>(N
->getOperand(1));
11877 EVT VT
= N
->getValueType(0);
11878 SelectionDAG
&DAG
= DCI
.DAG
;
11880 if(!DAG
.getTargetLoweringInfo().isTypeLegal(VT
))
11883 APInt SplatBits
, SplatUndef
;
11884 unsigned SplatBitSize
;
11886 if (BVN
&& Subtarget
->hasNEON() &&
11887 BVN
->isConstantSplat(SplatBits
, SplatUndef
, SplatBitSize
, HasAnyUndefs
)) {
11888 if (SplatBitSize
<= 64) {
11890 SDValue Val
= isVMOVModifiedImm((~SplatBits
).getZExtValue(),
11891 SplatUndef
.getZExtValue(), SplatBitSize
,
11892 DAG
, dl
, VbicVT
, VT
.is128BitVector(),
11894 if (Val
.getNode()) {
11896 DAG
.getNode(ISD::BITCAST
, dl
, VbicVT
, N
->getOperand(0));
11897 SDValue Vbic
= DAG
.getNode(ARMISD::VBICIMM
, dl
, VbicVT
, Input
, Val
);
11898 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Vbic
);
11903 if (!Subtarget
->isThumb1Only()) {
11904 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
11905 if (SDValue Result
= combineSelectAndUseCommutative(N
, true, DCI
))
11908 if (SDValue Result
= PerformSHLSimplify(N
, DCI
, Subtarget
))
11912 if (Subtarget
->isThumb1Only())
11913 if (SDValue Result
= CombineANDShift(N
, DCI
, Subtarget
))
11919 // Try combining OR nodes to SMULWB, SMULWT.
11920 static SDValue
PerformORCombineToSMULWBT(SDNode
*OR
,
11921 TargetLowering::DAGCombinerInfo
&DCI
,
11922 const ARMSubtarget
*Subtarget
) {
11923 if (!Subtarget
->hasV6Ops() ||
11924 (Subtarget
->isThumb() &&
11925 (!Subtarget
->hasThumb2() || !Subtarget
->hasDSP())))
11928 SDValue SRL
= OR
->getOperand(0);
11929 SDValue SHL
= OR
->getOperand(1);
11931 if (SRL
.getOpcode() != ISD::SRL
|| SHL
.getOpcode() != ISD::SHL
) {
11932 SRL
= OR
->getOperand(1);
11933 SHL
= OR
->getOperand(0);
11935 if (!isSRL16(SRL
) || !isSHL16(SHL
))
11938 // The first operands to the shifts need to be the two results from the
11939 // same smul_lohi node.
11940 if ((SRL
.getOperand(0).getNode() != SHL
.getOperand(0).getNode()) ||
11941 SRL
.getOperand(0).getOpcode() != ISD::SMUL_LOHI
)
11944 SDNode
*SMULLOHI
= SRL
.getOperand(0).getNode();
11945 if (SRL
.getOperand(0) != SDValue(SMULLOHI
, 0) ||
11946 SHL
.getOperand(0) != SDValue(SMULLOHI
, 1))
11950 // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16)))
11951 // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments.
11952 // For SMUWB the 16-bit value will signed extended somehow.
11953 // For SMULWT only the SRA is required.
11954 // Check both sides of SMUL_LOHI
11955 SDValue OpS16
= SMULLOHI
->getOperand(0);
11956 SDValue OpS32
= SMULLOHI
->getOperand(1);
11958 SelectionDAG
&DAG
= DCI
.DAG
;
11959 if (!isS16(OpS16
, DAG
) && !isSRA16(OpS16
)) {
11961 OpS32
= SMULLOHI
->getOperand(0);
11965 unsigned Opcode
= 0;
11966 if (isS16(OpS16
, DAG
))
11967 Opcode
= ARMISD::SMULWB
;
11968 else if (isSRA16(OpS16
)) {
11969 Opcode
= ARMISD::SMULWT
;
11970 OpS16
= OpS16
->getOperand(0);
11975 SDValue Res
= DAG
.getNode(Opcode
, dl
, MVT::i32
, OpS32
, OpS16
);
11976 DAG
.ReplaceAllUsesOfValueWith(SDValue(OR
, 0), Res
);
11977 return SDValue(OR
, 0);
11980 static SDValue
PerformORCombineToBFI(SDNode
*N
,
11981 TargetLowering::DAGCombinerInfo
&DCI
,
11982 const ARMSubtarget
*Subtarget
) {
11983 // BFI is only available on V6T2+
11984 if (Subtarget
->isThumb1Only() || !Subtarget
->hasV6T2Ops())
11987 EVT VT
= N
->getValueType(0);
11988 SDValue N0
= N
->getOperand(0);
11989 SDValue N1
= N
->getOperand(1);
11990 SelectionDAG
&DAG
= DCI
.DAG
;
11992 // 1) or (and A, mask), val => ARMbfi A, val, mask
11993 // iff (val & mask) == val
11995 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
11996 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
11997 // && mask == ~mask2
11998 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
11999 // && ~mask == mask2
12000 // (i.e., copy a bitfield value into another bitfield of the same width)
12002 if (VT
!= MVT::i32
)
12005 SDValue N00
= N0
.getOperand(0);
12007 // The value and the mask need to be constants so we can verify this is
12008 // actually a bitfield set. If the mask is 0xffff, we can do better
12009 // via a movt instruction, so don't use BFI in that case.
12010 SDValue MaskOp
= N0
.getOperand(1);
12011 ConstantSDNode
*MaskC
= dyn_cast
<ConstantSDNode
>(MaskOp
);
12014 unsigned Mask
= MaskC
->getZExtValue();
12015 if (Mask
== 0xffff)
12018 // Case (1): or (and A, mask), val => ARMbfi A, val, mask
12019 ConstantSDNode
*N1C
= dyn_cast
<ConstantSDNode
>(N1
);
12021 unsigned Val
= N1C
->getZExtValue();
12022 if ((Val
& ~Mask
) != Val
)
12025 if (ARM::isBitFieldInvertedMask(Mask
)) {
12026 Val
>>= countTrailingZeros(~Mask
);
12028 Res
= DAG
.getNode(ARMISD::BFI
, DL
, VT
, N00
,
12029 DAG
.getConstant(Val
, DL
, MVT::i32
),
12030 DAG
.getConstant(Mask
, DL
, MVT::i32
));
12032 DCI
.CombineTo(N
, Res
, false);
12033 // Return value from the original node to inform the combiner than N is
12035 return SDValue(N
, 0);
12037 } else if (N1
.getOpcode() == ISD::AND
) {
12038 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
12039 ConstantSDNode
*N11C
= dyn_cast
<ConstantSDNode
>(N1
.getOperand(1));
12042 unsigned Mask2
= N11C
->getZExtValue();
12044 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern
12046 if (ARM::isBitFieldInvertedMask(Mask
) &&
12047 (Mask
== ~Mask2
)) {
12048 // The pack halfword instruction works better for masks that fit it,
12049 // so use that when it's available.
12050 if (Subtarget
->hasDSP() &&
12051 (Mask
== 0xffff || Mask
== 0xffff0000))
12054 unsigned amt
= countTrailingZeros(Mask2
);
12055 Res
= DAG
.getNode(ISD::SRL
, DL
, VT
, N1
.getOperand(0),
12056 DAG
.getConstant(amt
, DL
, MVT::i32
));
12057 Res
= DAG
.getNode(ARMISD::BFI
, DL
, VT
, N00
, Res
,
12058 DAG
.getConstant(Mask
, DL
, MVT::i32
));
12059 DCI
.CombineTo(N
, Res
, false);
12060 // Return value from the original node to inform the combiner than N is
12062 return SDValue(N
, 0);
12063 } else if (ARM::isBitFieldInvertedMask(~Mask
) &&
12064 (~Mask
== Mask2
)) {
12065 // The pack halfword instruction works better for masks that fit it,
12066 // so use that when it's available.
12067 if (Subtarget
->hasDSP() &&
12068 (Mask2
== 0xffff || Mask2
== 0xffff0000))
12071 unsigned lsb
= countTrailingZeros(Mask
);
12072 Res
= DAG
.getNode(ISD::SRL
, DL
, VT
, N00
,
12073 DAG
.getConstant(lsb
, DL
, MVT::i32
));
12074 Res
= DAG
.getNode(ARMISD::BFI
, DL
, VT
, N1
.getOperand(0), Res
,
12075 DAG
.getConstant(Mask2
, DL
, MVT::i32
));
12076 DCI
.CombineTo(N
, Res
, false);
12077 // Return value from the original node to inform the combiner than N is
12079 return SDValue(N
, 0);
12083 if (DAG
.MaskedValueIsZero(N1
, MaskC
->getAPIntValue()) &&
12084 N00
.getOpcode() == ISD::SHL
&& isa
<ConstantSDNode
>(N00
.getOperand(1)) &&
12085 ARM::isBitFieldInvertedMask(~Mask
)) {
12086 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
12087 // where lsb(mask) == #shamt and masked bits of B are known zero.
12088 SDValue ShAmt
= N00
.getOperand(1);
12089 unsigned ShAmtC
= cast
<ConstantSDNode
>(ShAmt
)->getZExtValue();
12090 unsigned LSB
= countTrailingZeros(Mask
);
12094 Res
= DAG
.getNode(ARMISD::BFI
, DL
, VT
, N1
, N00
.getOperand(0),
12095 DAG
.getConstant(~Mask
, DL
, MVT::i32
));
12097 DCI
.CombineTo(N
, Res
, false);
12098 // Return value from the original node to inform the combiner than N is
12100 return SDValue(N
, 0);
12106 static bool isValidMVECond(unsigned CC
, bool IsFloat
) {
12123 static SDValue
PerformORCombine_i1(SDNode
*N
,
12124 TargetLowering::DAGCombinerInfo
&DCI
,
12125 const ARMSubtarget
*Subtarget
) {
12126 // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain
12127 // together with predicates
12128 EVT VT
= N
->getValueType(0);
12129 SDValue N0
= N
->getOperand(0);
12130 SDValue N1
= N
->getOperand(1);
12132 ARMCC::CondCodes CondCode0
= ARMCC::AL
;
12133 ARMCC::CondCodes CondCode1
= ARMCC::AL
;
12134 if (N0
->getOpcode() == ARMISD::VCMP
)
12135 CondCode0
= (ARMCC::CondCodes
)cast
<const ConstantSDNode
>(N0
->getOperand(2))
12137 else if (N0
->getOpcode() == ARMISD::VCMPZ
)
12138 CondCode0
= (ARMCC::CondCodes
)cast
<const ConstantSDNode
>(N0
->getOperand(1))
12140 if (N1
->getOpcode() == ARMISD::VCMP
)
12141 CondCode1
= (ARMCC::CondCodes
)cast
<const ConstantSDNode
>(N1
->getOperand(2))
12143 else if (N1
->getOpcode() == ARMISD::VCMPZ
)
12144 CondCode1
= (ARMCC::CondCodes
)cast
<const ConstantSDNode
>(N1
->getOperand(1))
12147 if (CondCode0
== ARMCC::AL
|| CondCode1
== ARMCC::AL
)
12150 unsigned Opposite0
= ARMCC::getOppositeCondition(CondCode0
);
12151 unsigned Opposite1
= ARMCC::getOppositeCondition(CondCode1
);
12153 if (!isValidMVECond(Opposite0
,
12154 N0
->getOperand(0)->getValueType(0).isFloatingPoint()) ||
12155 !isValidMVECond(Opposite1
,
12156 N1
->getOperand(0)->getValueType(0).isFloatingPoint()))
12159 SmallVector
<SDValue
, 4> Ops0
;
12160 Ops0
.push_back(N0
->getOperand(0));
12161 if (N0
->getOpcode() == ARMISD::VCMP
)
12162 Ops0
.push_back(N0
->getOperand(1));
12163 Ops0
.push_back(DCI
.DAG
.getConstant(Opposite0
, SDLoc(N0
), MVT::i32
));
12164 SmallVector
<SDValue
, 4> Ops1
;
12165 Ops1
.push_back(N1
->getOperand(0));
12166 if (N1
->getOpcode() == ARMISD::VCMP
)
12167 Ops1
.push_back(N1
->getOperand(1));
12168 Ops1
.push_back(DCI
.DAG
.getConstant(Opposite1
, SDLoc(N1
), MVT::i32
));
12170 SDValue NewN0
= DCI
.DAG
.getNode(N0
->getOpcode(), SDLoc(N0
), VT
, Ops0
);
12171 SDValue NewN1
= DCI
.DAG
.getNode(N1
->getOpcode(), SDLoc(N1
), VT
, Ops1
);
12172 SDValue And
= DCI
.DAG
.getNode(ISD::AND
, SDLoc(N
), VT
, NewN0
, NewN1
);
12173 return DCI
.DAG
.getNode(ISD::XOR
, SDLoc(N
), VT
, And
,
12174 DCI
.DAG
.getAllOnesConstant(SDLoc(N
), VT
));
12177 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR
12178 static SDValue
PerformORCombine(SDNode
*N
,
12179 TargetLowering::DAGCombinerInfo
&DCI
,
12180 const ARMSubtarget
*Subtarget
) {
12181 // Attempt to use immediate-form VORR
12182 BuildVectorSDNode
*BVN
= dyn_cast
<BuildVectorSDNode
>(N
->getOperand(1));
12184 EVT VT
= N
->getValueType(0);
12185 SelectionDAG
&DAG
= DCI
.DAG
;
12187 if(!DAG
.getTargetLoweringInfo().isTypeLegal(VT
))
12190 APInt SplatBits
, SplatUndef
;
12191 unsigned SplatBitSize
;
12193 if (BVN
&& Subtarget
->hasNEON() &&
12194 BVN
->isConstantSplat(SplatBits
, SplatUndef
, SplatBitSize
, HasAnyUndefs
)) {
12195 if (SplatBitSize
<= 64) {
12197 SDValue Val
= isVMOVModifiedImm(SplatBits
.getZExtValue(),
12198 SplatUndef
.getZExtValue(), SplatBitSize
,
12199 DAG
, dl
, VorrVT
, VT
.is128BitVector(),
12201 if (Val
.getNode()) {
12203 DAG
.getNode(ISD::BITCAST
, dl
, VorrVT
, N
->getOperand(0));
12204 SDValue Vorr
= DAG
.getNode(ARMISD::VORRIMM
, dl
, VorrVT
, Input
, Val
);
12205 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Vorr
);
12210 if (!Subtarget
->isThumb1Only()) {
12211 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
12212 if (SDValue Result
= combineSelectAndUseCommutative(N
, false, DCI
))
12214 if (SDValue Result
= PerformORCombineToSMULWBT(N
, DCI
, Subtarget
))
12218 SDValue N0
= N
->getOperand(0);
12219 SDValue N1
= N
->getOperand(1);
12221 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
12222 if (Subtarget
->hasNEON() && N1
.getOpcode() == ISD::AND
&& VT
.isVector() &&
12223 DAG
.getTargetLoweringInfo().isTypeLegal(VT
)) {
12225 // The code below optimizes (or (and X, Y), Z).
12226 // The AND operand needs to have a single user to make these optimizations
12228 if (N0
.getOpcode() != ISD::AND
|| !N0
.hasOneUse())
12232 unsigned SplatBitSize
;
12235 APInt SplatBits0
, SplatBits1
;
12236 BuildVectorSDNode
*BVN0
= dyn_cast
<BuildVectorSDNode
>(N0
->getOperand(1));
12237 BuildVectorSDNode
*BVN1
= dyn_cast
<BuildVectorSDNode
>(N1
->getOperand(1));
12238 // Ensure that the second operand of both ands are constants
12239 if (BVN0
&& BVN0
->isConstantSplat(SplatBits0
, SplatUndef
, SplatBitSize
,
12240 HasAnyUndefs
) && !HasAnyUndefs
) {
12241 if (BVN1
&& BVN1
->isConstantSplat(SplatBits1
, SplatUndef
, SplatBitSize
,
12242 HasAnyUndefs
) && !HasAnyUndefs
) {
12243 // Ensure that the bit width of the constants are the same and that
12244 // the splat arguments are logical inverses as per the pattern we
12245 // are trying to simplify.
12246 if (SplatBits0
.getBitWidth() == SplatBits1
.getBitWidth() &&
12247 SplatBits0
== ~SplatBits1
) {
12248 // Canonicalize the vector type to make instruction selection
12250 EVT CanonicalVT
= VT
.is128BitVector() ? MVT::v4i32
: MVT::v2i32
;
12251 SDValue Result
= DAG
.getNode(ARMISD::VBSL
, dl
, CanonicalVT
,
12254 N1
->getOperand(0));
12255 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, Result
);
12261 if (Subtarget
->hasMVEIntegerOps() &&
12262 (VT
== MVT::v4i1
|| VT
== MVT::v8i1
|| VT
== MVT::v16i1
))
12263 return PerformORCombine_i1(N
, DCI
, Subtarget
);
12265 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
12267 if (N0
.getOpcode() == ISD::AND
&& N0
.hasOneUse()) {
12268 if (SDValue Res
= PerformORCombineToBFI(N
, DCI
, Subtarget
))
12272 if (SDValue Result
= PerformSHLSimplify(N
, DCI
, Subtarget
))
12278 static SDValue
PerformXORCombine(SDNode
*N
,
12279 TargetLowering::DAGCombinerInfo
&DCI
,
12280 const ARMSubtarget
*Subtarget
) {
12281 EVT VT
= N
->getValueType(0);
12282 SelectionDAG
&DAG
= DCI
.DAG
;
12284 if(!DAG
.getTargetLoweringInfo().isTypeLegal(VT
))
12287 if (!Subtarget
->isThumb1Only()) {
12288 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
12289 if (SDValue Result
= combineSelectAndUseCommutative(N
, false, DCI
))
12292 if (SDValue Result
= PerformSHLSimplify(N
, DCI
, Subtarget
))
12299 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it,
12300 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and
12301 // their position in "to" (Rd).
12302 static SDValue
ParseBFI(SDNode
*N
, APInt
&ToMask
, APInt
&FromMask
) {
12303 assert(N
->getOpcode() == ARMISD::BFI
);
12305 SDValue From
= N
->getOperand(1);
12306 ToMask
= ~cast
<ConstantSDNode
>(N
->getOperand(2))->getAPIntValue();
12307 FromMask
= APInt::getLowBitsSet(ToMask
.getBitWidth(), ToMask
.countPopulation());
12309 // If the Base came from a SHR #C, we can deduce that it is really testing bit
12310 // #C in the base of the SHR.
12311 if (From
->getOpcode() == ISD::SRL
&&
12312 isa
<ConstantSDNode
>(From
->getOperand(1))) {
12313 APInt Shift
= cast
<ConstantSDNode
>(From
->getOperand(1))->getAPIntValue();
12314 assert(Shift
.getLimitedValue() < 32 && "Shift too large!");
12315 FromMask
<<= Shift
.getLimitedValue(31);
12316 From
= From
->getOperand(0);
12322 // If A and B contain one contiguous set of bits, does A | B == A . B?
12324 // Neither A nor B must be zero.
12325 static bool BitsProperlyConcatenate(const APInt
&A
, const APInt
&B
) {
12326 unsigned LastActiveBitInA
= A
.countTrailingZeros();
12327 unsigned FirstActiveBitInB
= B
.getBitWidth() - B
.countLeadingZeros() - 1;
12328 return LastActiveBitInA
- 1 == FirstActiveBitInB
;
12331 static SDValue
FindBFIToCombineWith(SDNode
*N
) {
12332 // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with,
12334 APInt ToMask
, FromMask
;
12335 SDValue From
= ParseBFI(N
, ToMask
, FromMask
);
12336 SDValue To
= N
->getOperand(0);
12338 // Now check for a compatible BFI to merge with. We can pass through BFIs that
12339 // aren't compatible, but not if they set the same bit in their destination as
12340 // we do (or that of any BFI we're going to combine with).
12342 APInt CombinedToMask
= ToMask
;
12343 while (V
.getOpcode() == ARMISD::BFI
) {
12344 APInt NewToMask
, NewFromMask
;
12345 SDValue NewFrom
= ParseBFI(V
.getNode(), NewToMask
, NewFromMask
);
12346 if (NewFrom
!= From
) {
12347 // This BFI has a different base. Keep going.
12348 CombinedToMask
|= NewToMask
;
12349 V
= V
.getOperand(0);
12353 // Do the written bits conflict with any we've seen so far?
12354 if ((NewToMask
& CombinedToMask
).getBoolValue())
12355 // Conflicting bits - bail out because going further is unsafe.
12358 // Are the new bits contiguous when combined with the old bits?
12359 if (BitsProperlyConcatenate(ToMask
, NewToMask
) &&
12360 BitsProperlyConcatenate(FromMask
, NewFromMask
))
12362 if (BitsProperlyConcatenate(NewToMask
, ToMask
) &&
12363 BitsProperlyConcatenate(NewFromMask
, FromMask
))
12366 // We've seen a write to some bits, so track it.
12367 CombinedToMask
|= NewToMask
;
12369 V
= V
.getOperand(0);
12375 static SDValue
PerformBFICombine(SDNode
*N
,
12376 TargetLowering::DAGCombinerInfo
&DCI
) {
12377 SDValue N1
= N
->getOperand(1);
12378 if (N1
.getOpcode() == ISD::AND
) {
12379 // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
12380 // the bits being cleared by the AND are not demanded by the BFI.
12381 ConstantSDNode
*N11C
= dyn_cast
<ConstantSDNode
>(N1
.getOperand(1));
12384 unsigned InvMask
= cast
<ConstantSDNode
>(N
->getOperand(2))->getZExtValue();
12385 unsigned LSB
= countTrailingZeros(~InvMask
);
12386 unsigned Width
= (32 - countLeadingZeros(~InvMask
)) - LSB
;
12388 static_cast<unsigned>(std::numeric_limits
<unsigned>::digits
) &&
12389 "undefined behavior");
12390 unsigned Mask
= (1u << Width
) - 1;
12391 unsigned Mask2
= N11C
->getZExtValue();
12392 if ((Mask
& (~Mask2
)) == 0)
12393 return DCI
.DAG
.getNode(ARMISD::BFI
, SDLoc(N
), N
->getValueType(0),
12394 N
->getOperand(0), N1
.getOperand(0),
12396 } else if (N
->getOperand(0).getOpcode() == ARMISD::BFI
) {
12397 // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes.
12398 // Keep track of any consecutive bits set that all come from the same base
12399 // value. We can combine these together into a single BFI.
12400 SDValue CombineBFI
= FindBFIToCombineWith(N
);
12401 if (CombineBFI
== SDValue())
12404 // We've found a BFI.
12405 APInt ToMask1
, FromMask1
;
12406 SDValue From1
= ParseBFI(N
, ToMask1
, FromMask1
);
12408 APInt ToMask2
, FromMask2
;
12409 SDValue From2
= ParseBFI(CombineBFI
.getNode(), ToMask2
, FromMask2
);
12410 assert(From1
== From2
);
12413 // First, unlink CombineBFI.
12414 DCI
.DAG
.ReplaceAllUsesWith(CombineBFI
, CombineBFI
.getOperand(0));
12415 // Then create a new BFI, combining the two together.
12416 APInt NewFromMask
= FromMask1
| FromMask2
;
12417 APInt NewToMask
= ToMask1
| ToMask2
;
12419 EVT VT
= N
->getValueType(0);
12422 if (NewFromMask
[0] == 0)
12423 From1
= DCI
.DAG
.getNode(
12424 ISD::SRL
, dl
, VT
, From1
,
12425 DCI
.DAG
.getConstant(NewFromMask
.countTrailingZeros(), dl
, VT
));
12426 return DCI
.DAG
.getNode(ARMISD::BFI
, dl
, VT
, N
->getOperand(0), From1
,
12427 DCI
.DAG
.getConstant(~NewToMask
, dl
, VT
));
12432 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for
12433 /// ARMISD::VMOVRRD.
12434 static SDValue
PerformVMOVRRDCombine(SDNode
*N
,
12435 TargetLowering::DAGCombinerInfo
&DCI
,
12436 const ARMSubtarget
*Subtarget
) {
12437 // vmovrrd(vmovdrr x, y) -> x,y
12438 SDValue InDouble
= N
->getOperand(0);
12439 if (InDouble
.getOpcode() == ARMISD::VMOVDRR
&& Subtarget
->hasFP64())
12440 return DCI
.CombineTo(N
, InDouble
.getOperand(0), InDouble
.getOperand(1));
12442 // vmovrrd(load f64) -> (load i32), (load i32)
12443 SDNode
*InNode
= InDouble
.getNode();
12444 if (ISD::isNormalLoad(InNode
) && InNode
->hasOneUse() &&
12445 InNode
->getValueType(0) == MVT::f64
&&
12446 InNode
->getOperand(1).getOpcode() == ISD::FrameIndex
&&
12447 !cast
<LoadSDNode
>(InNode
)->isVolatile()) {
12448 // TODO: Should this be done for non-FrameIndex operands?
12449 LoadSDNode
*LD
= cast
<LoadSDNode
>(InNode
);
12451 SelectionDAG
&DAG
= DCI
.DAG
;
12453 SDValue BasePtr
= LD
->getBasePtr();
12455 DAG
.getLoad(MVT::i32
, DL
, LD
->getChain(), BasePtr
, LD
->getPointerInfo(),
12456 LD
->getAlignment(), LD
->getMemOperand()->getFlags());
12458 SDValue OffsetPtr
= DAG
.getNode(ISD::ADD
, DL
, MVT::i32
, BasePtr
,
12459 DAG
.getConstant(4, DL
, MVT::i32
));
12461 SDValue NewLD2
= DAG
.getLoad(MVT::i32
, DL
, LD
->getChain(), OffsetPtr
,
12462 LD
->getPointerInfo().getWithOffset(4),
12463 std::min(4U, LD
->getAlignment()),
12464 LD
->getMemOperand()->getFlags());
12466 DAG
.ReplaceAllUsesOfValueWith(SDValue(LD
, 1), NewLD2
.getValue(1));
12467 if (DCI
.DAG
.getDataLayout().isBigEndian())
12468 std::swap (NewLD1
, NewLD2
);
12469 SDValue Result
= DCI
.CombineTo(N
, NewLD1
, NewLD2
);
12476 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for
12477 /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands.
12478 static SDValue
PerformVMOVDRRCombine(SDNode
*N
, SelectionDAG
&DAG
) {
12479 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
12480 SDValue Op0
= N
->getOperand(0);
12481 SDValue Op1
= N
->getOperand(1);
12482 if (Op0
.getOpcode() == ISD::BITCAST
)
12483 Op0
= Op0
.getOperand(0);
12484 if (Op1
.getOpcode() == ISD::BITCAST
)
12485 Op1
= Op1
.getOperand(0);
12486 if (Op0
.getOpcode() == ARMISD::VMOVRRD
&&
12487 Op0
.getNode() == Op1
.getNode() &&
12488 Op0
.getResNo() == 0 && Op1
.getResNo() == 1)
12489 return DAG
.getNode(ISD::BITCAST
, SDLoc(N
),
12490 N
->getValueType(0), Op0
.getOperand(0));
12494 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
12495 /// are normal, non-volatile loads. If so, it is profitable to bitcast an
12496 /// i64 vector to have f64 elements, since the value can then be loaded
12497 /// directly into a VFP register.
12498 static bool hasNormalLoadOperand(SDNode
*N
) {
12499 unsigned NumElts
= N
->getValueType(0).getVectorNumElements();
12500 for (unsigned i
= 0; i
< NumElts
; ++i
) {
12501 SDNode
*Elt
= N
->getOperand(i
).getNode();
12502 if (ISD::isNormalLoad(Elt
) && !cast
<LoadSDNode
>(Elt
)->isVolatile())
12508 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
12509 /// ISD::BUILD_VECTOR.
12510 static SDValue
PerformBUILD_VECTORCombine(SDNode
*N
,
12511 TargetLowering::DAGCombinerInfo
&DCI
,
12512 const ARMSubtarget
*Subtarget
) {
12513 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
12514 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value
12515 // into a pair of GPRs, which is fine when the value is used as a scalar,
12516 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
12517 SelectionDAG
&DAG
= DCI
.DAG
;
12518 if (N
->getNumOperands() == 2)
12519 if (SDValue RV
= PerformVMOVDRRCombine(N
, DAG
))
12522 // Load i64 elements as f64 values so that type legalization does not split
12523 // them up into i32 values.
12524 EVT VT
= N
->getValueType(0);
12525 if (VT
.getVectorElementType() != MVT::i64
|| !hasNormalLoadOperand(N
))
12528 SmallVector
<SDValue
, 8> Ops
;
12529 unsigned NumElts
= VT
.getVectorNumElements();
12530 for (unsigned i
= 0; i
< NumElts
; ++i
) {
12531 SDValue V
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::f64
, N
->getOperand(i
));
12533 // Make the DAGCombiner fold the bitcast.
12534 DCI
.AddToWorklist(V
.getNode());
12536 EVT FloatVT
= EVT::getVectorVT(*DAG
.getContext(), MVT::f64
, NumElts
);
12537 SDValue BV
= DAG
.getBuildVector(FloatVT
, dl
, Ops
);
12538 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, BV
);
12541 /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
12543 PerformARMBUILD_VECTORCombine(SDNode
*N
, TargetLowering::DAGCombinerInfo
&DCI
) {
12544 // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR.
12545 // At that time, we may have inserted bitcasts from integer to float.
12546 // If these bitcasts have survived DAGCombine, change the lowering of this
12547 // BUILD_VECTOR in something more vector friendly, i.e., that does not
12548 // force to use floating point types.
12550 // Make sure we can change the type of the vector.
12551 // This is possible iff:
12552 // 1. The vector is only used in a bitcast to a integer type. I.e.,
12553 // 1.1. Vector is used only once.
12554 // 1.2. Use is a bit convert to an integer type.
12555 // 2. The size of its operands are 32-bits (64-bits are not legal).
12556 EVT VT
= N
->getValueType(0);
12557 EVT EltVT
= VT
.getVectorElementType();
12559 // Check 1.1. and 2.
12560 if (EltVT
.getSizeInBits() != 32 || !N
->hasOneUse())
12563 // By construction, the input type must be float.
12564 assert(EltVT
== MVT::f32
&& "Unexpected type!");
12567 SDNode
*Use
= *N
->use_begin();
12568 if (Use
->getOpcode() != ISD::BITCAST
||
12569 Use
->getValueType(0).isFloatingPoint())
12572 // Check profitability.
12573 // Model is, if more than half of the relevant operands are bitcast from
12574 // i32, turn the build_vector into a sequence of insert_vector_elt.
12575 // Relevant operands are everything that is not statically
12576 // (i.e., at compile time) bitcasted.
12577 unsigned NumOfBitCastedElts
= 0;
12578 unsigned NumElts
= VT
.getVectorNumElements();
12579 unsigned NumOfRelevantElts
= NumElts
;
12580 for (unsigned Idx
= 0; Idx
< NumElts
; ++Idx
) {
12581 SDValue Elt
= N
->getOperand(Idx
);
12582 if (Elt
->getOpcode() == ISD::BITCAST
) {
12583 // Assume only bit cast to i32 will go away.
12584 if (Elt
->getOperand(0).getValueType() == MVT::i32
)
12585 ++NumOfBitCastedElts
;
12586 } else if (Elt
.isUndef() || isa
<ConstantSDNode
>(Elt
))
12587 // Constants are statically casted, thus do not count them as
12588 // relevant operands.
12589 --NumOfRelevantElts
;
12592 // Check if more than half of the elements require a non-free bitcast.
12593 if (NumOfBitCastedElts
<= NumOfRelevantElts
/ 2)
12596 SelectionDAG
&DAG
= DCI
.DAG
;
12597 // Create the new vector type.
12598 EVT VecVT
= EVT::getVectorVT(*DAG
.getContext(), MVT::i32
, NumElts
);
12599 // Check if the type is legal.
12600 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
12601 if (!TLI
.isTypeLegal(VecVT
))
12605 // ARMISD::BUILD_VECTOR E1, E2, ..., EN.
12606 // => BITCAST INSERT_VECTOR_ELT
12607 // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1),
12608 // (BITCAST EN), N.
12609 SDValue Vec
= DAG
.getUNDEF(VecVT
);
12611 for (unsigned Idx
= 0 ; Idx
< NumElts
; ++Idx
) {
12612 SDValue V
= N
->getOperand(Idx
);
12615 if (V
.getOpcode() == ISD::BITCAST
&&
12616 V
->getOperand(0).getValueType() == MVT::i32
)
12617 // Fold obvious case.
12618 V
= V
.getOperand(0);
12620 V
= DAG
.getNode(ISD::BITCAST
, SDLoc(V
), MVT::i32
, V
);
12621 // Make the DAGCombiner fold the bitcasts.
12622 DCI
.AddToWorklist(V
.getNode());
12624 SDValue LaneIdx
= DAG
.getConstant(Idx
, dl
, MVT::i32
);
12625 Vec
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, VecVT
, Vec
, V
, LaneIdx
);
12627 Vec
= DAG
.getNode(ISD::BITCAST
, dl
, VT
, Vec
);
12628 // Make the DAGCombiner fold the bitcasts.
12629 DCI
.AddToWorklist(Vec
.getNode());
12634 PerformPREDICATE_CASTCombine(SDNode
*N
, TargetLowering::DAGCombinerInfo
&DCI
) {
12635 EVT VT
= N
->getValueType(0);
12636 SDValue Op
= N
->getOperand(0);
12639 // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x)
12640 if (Op
->getOpcode() == ARMISD::PREDICATE_CAST
) {
12641 // If the valuetypes are the same, we can remove the cast entirely.
12642 if (Op
->getOperand(0).getValueType() == VT
)
12643 return Op
->getOperand(0);
12644 return DCI
.DAG
.getNode(ARMISD::PREDICATE_CAST
, dl
,
12645 Op
->getOperand(0).getValueType(), Op
->getOperand(0));
12651 /// PerformInsertEltCombine - Target-specific dag combine xforms for
12652 /// ISD::INSERT_VECTOR_ELT.
12653 static SDValue
PerformInsertEltCombine(SDNode
*N
,
12654 TargetLowering::DAGCombinerInfo
&DCI
) {
12655 // Bitcast an i64 load inserted into a vector to f64.
12656 // Otherwise, the i64 value will be legalized to a pair of i32 values.
12657 EVT VT
= N
->getValueType(0);
12658 SDNode
*Elt
= N
->getOperand(1).getNode();
12659 if (VT
.getVectorElementType() != MVT::i64
||
12660 !ISD::isNormalLoad(Elt
) || cast
<LoadSDNode
>(Elt
)->isVolatile())
12663 SelectionDAG
&DAG
= DCI
.DAG
;
12665 EVT FloatVT
= EVT::getVectorVT(*DAG
.getContext(), MVT::f64
,
12666 VT
.getVectorNumElements());
12667 SDValue Vec
= DAG
.getNode(ISD::BITCAST
, dl
, FloatVT
, N
->getOperand(0));
12668 SDValue V
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::f64
, N
->getOperand(1));
12669 // Make the DAGCombiner fold the bitcasts.
12670 DCI
.AddToWorklist(Vec
.getNode());
12671 DCI
.AddToWorklist(V
.getNode());
12672 SDValue InsElt
= DAG
.getNode(ISD::INSERT_VECTOR_ELT
, dl
, FloatVT
,
12673 Vec
, V
, N
->getOperand(2));
12674 return DAG
.getNode(ISD::BITCAST
, dl
, VT
, InsElt
);
12677 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
12678 /// ISD::VECTOR_SHUFFLE.
12679 static SDValue
PerformVECTOR_SHUFFLECombine(SDNode
*N
, SelectionDAG
&DAG
) {
12680 // The LLVM shufflevector instruction does not require the shuffle mask
12681 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
12682 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the
12683 // operands do not match the mask length, they are extended by concatenating
12684 // them with undef vectors. That is probably the right thing for other
12685 // targets, but for NEON it is better to concatenate two double-register
12686 // size vector operands into a single quad-register size vector. Do that
12687 // transformation here:
12688 // shuffle(concat(v1, undef), concat(v2, undef)) ->
12689 // shuffle(concat(v1, v2), undef)
12690 SDValue Op0
= N
->getOperand(0);
12691 SDValue Op1
= N
->getOperand(1);
12692 if (Op0
.getOpcode() != ISD::CONCAT_VECTORS
||
12693 Op1
.getOpcode() != ISD::CONCAT_VECTORS
||
12694 Op0
.getNumOperands() != 2 ||
12695 Op1
.getNumOperands() != 2)
12697 SDValue Concat0Op1
= Op0
.getOperand(1);
12698 SDValue Concat1Op1
= Op1
.getOperand(1);
12699 if (!Concat0Op1
.isUndef() || !Concat1Op1
.isUndef())
12701 // Skip the transformation if any of the types are illegal.
12702 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
12703 EVT VT
= N
->getValueType(0);
12704 if (!TLI
.isTypeLegal(VT
) ||
12705 !TLI
.isTypeLegal(Concat0Op1
.getValueType()) ||
12706 !TLI
.isTypeLegal(Concat1Op1
.getValueType()))
12709 SDValue NewConcat
= DAG
.getNode(ISD::CONCAT_VECTORS
, SDLoc(N
), VT
,
12710 Op0
.getOperand(0), Op1
.getOperand(0));
12711 // Translate the shuffle mask.
12712 SmallVector
<int, 16> NewMask
;
12713 unsigned NumElts
= VT
.getVectorNumElements();
12714 unsigned HalfElts
= NumElts
/2;
12715 ShuffleVectorSDNode
*SVN
= cast
<ShuffleVectorSDNode
>(N
);
12716 for (unsigned n
= 0; n
< NumElts
; ++n
) {
12717 int MaskElt
= SVN
->getMaskElt(n
);
12719 if (MaskElt
< (int)HalfElts
)
12721 else if (MaskElt
>= (int)NumElts
&& MaskElt
< (int)(NumElts
+ HalfElts
))
12722 NewElt
= HalfElts
+ MaskElt
- NumElts
;
12723 NewMask
.push_back(NewElt
);
12725 return DAG
.getVectorShuffle(VT
, SDLoc(N
), NewConcat
,
12726 DAG
.getUNDEF(VT
), NewMask
);
12729 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
12730 /// NEON load/store intrinsics, and generic vector load/stores, to merge
12731 /// base address updates.
12732 /// For generic load/stores, the memory type is assumed to be a vector.
12733 /// The caller is assumed to have checked legality.
12734 static SDValue
CombineBaseUpdate(SDNode
*N
,
12735 TargetLowering::DAGCombinerInfo
&DCI
) {
12736 SelectionDAG
&DAG
= DCI
.DAG
;
12737 const bool isIntrinsic
= (N
->getOpcode() == ISD::INTRINSIC_VOID
||
12738 N
->getOpcode() == ISD::INTRINSIC_W_CHAIN
);
12739 const bool isStore
= N
->getOpcode() == ISD::STORE
;
12740 const unsigned AddrOpIdx
= ((isIntrinsic
|| isStore
) ? 2 : 1);
12741 SDValue Addr
= N
->getOperand(AddrOpIdx
);
12742 MemSDNode
*MemN
= cast
<MemSDNode
>(N
);
12745 // Search for a use of the address operand that is an increment.
12746 for (SDNode::use_iterator UI
= Addr
.getNode()->use_begin(),
12747 UE
= Addr
.getNode()->use_end(); UI
!= UE
; ++UI
) {
12748 SDNode
*User
= *UI
;
12749 if (User
->getOpcode() != ISD::ADD
||
12750 UI
.getUse().getResNo() != Addr
.getResNo())
12753 // Check that the add is independent of the load/store. Otherwise, folding
12754 // it would create a cycle. We can avoid searching through Addr as it's a
12755 // predecessor to both.
12756 SmallPtrSet
<const SDNode
*, 32> Visited
;
12757 SmallVector
<const SDNode
*, 16> Worklist
;
12758 Visited
.insert(Addr
.getNode());
12759 Worklist
.push_back(N
);
12760 Worklist
.push_back(User
);
12761 if (SDNode::hasPredecessorHelper(N
, Visited
, Worklist
) ||
12762 SDNode::hasPredecessorHelper(User
, Visited
, Worklist
))
12765 // Find the new opcode for the updating load/store.
12766 bool isLoadOp
= true;
12767 bool isLaneOp
= false;
12768 unsigned NewOpc
= 0;
12769 unsigned NumVecs
= 0;
12771 unsigned IntNo
= cast
<ConstantSDNode
>(N
->getOperand(1))->getZExtValue();
12773 default: llvm_unreachable("unexpected intrinsic for Neon base update");
12774 case Intrinsic::arm_neon_vld1
: NewOpc
= ARMISD::VLD1_UPD
;
12775 NumVecs
= 1; break;
12776 case Intrinsic::arm_neon_vld2
: NewOpc
= ARMISD::VLD2_UPD
;
12777 NumVecs
= 2; break;
12778 case Intrinsic::arm_neon_vld3
: NewOpc
= ARMISD::VLD3_UPD
;
12779 NumVecs
= 3; break;
12780 case Intrinsic::arm_neon_vld4
: NewOpc
= ARMISD::VLD4_UPD
;
12781 NumVecs
= 4; break;
12782 case Intrinsic::arm_neon_vld2dup
:
12783 case Intrinsic::arm_neon_vld3dup
:
12784 case Intrinsic::arm_neon_vld4dup
:
12785 // TODO: Support updating VLDxDUP nodes. For now, we just skip
12786 // combining base updates for such intrinsics.
12788 case Intrinsic::arm_neon_vld2lane
: NewOpc
= ARMISD::VLD2LN_UPD
;
12789 NumVecs
= 2; isLaneOp
= true; break;
12790 case Intrinsic::arm_neon_vld3lane
: NewOpc
= ARMISD::VLD3LN_UPD
;
12791 NumVecs
= 3; isLaneOp
= true; break;
12792 case Intrinsic::arm_neon_vld4lane
: NewOpc
= ARMISD::VLD4LN_UPD
;
12793 NumVecs
= 4; isLaneOp
= true; break;
12794 case Intrinsic::arm_neon_vst1
: NewOpc
= ARMISD::VST1_UPD
;
12795 NumVecs
= 1; isLoadOp
= false; break;
12796 case Intrinsic::arm_neon_vst2
: NewOpc
= ARMISD::VST2_UPD
;
12797 NumVecs
= 2; isLoadOp
= false; break;
12798 case Intrinsic::arm_neon_vst3
: NewOpc
= ARMISD::VST3_UPD
;
12799 NumVecs
= 3; isLoadOp
= false; break;
12800 case Intrinsic::arm_neon_vst4
: NewOpc
= ARMISD::VST4_UPD
;
12801 NumVecs
= 4; isLoadOp
= false; break;
12802 case Intrinsic::arm_neon_vst2lane
: NewOpc
= ARMISD::VST2LN_UPD
;
12803 NumVecs
= 2; isLoadOp
= false; isLaneOp
= true; break;
12804 case Intrinsic::arm_neon_vst3lane
: NewOpc
= ARMISD::VST3LN_UPD
;
12805 NumVecs
= 3; isLoadOp
= false; isLaneOp
= true; break;
12806 case Intrinsic::arm_neon_vst4lane
: NewOpc
= ARMISD::VST4LN_UPD
;
12807 NumVecs
= 4; isLoadOp
= false; isLaneOp
= true; break;
12811 switch (N
->getOpcode()) {
12812 default: llvm_unreachable("unexpected opcode for Neon base update");
12813 case ARMISD::VLD1DUP
: NewOpc
= ARMISD::VLD1DUP_UPD
; NumVecs
= 1; break;
12814 case ARMISD::VLD2DUP
: NewOpc
= ARMISD::VLD2DUP_UPD
; NumVecs
= 2; break;
12815 case ARMISD::VLD3DUP
: NewOpc
= ARMISD::VLD3DUP_UPD
; NumVecs
= 3; break;
12816 case ARMISD::VLD4DUP
: NewOpc
= ARMISD::VLD4DUP_UPD
; NumVecs
= 4; break;
12817 case ISD::LOAD
: NewOpc
= ARMISD::VLD1_UPD
;
12818 NumVecs
= 1; isLaneOp
= false; break;
12819 case ISD::STORE
: NewOpc
= ARMISD::VST1_UPD
;
12820 NumVecs
= 1; isLaneOp
= false; isLoadOp
= false; break;
12824 // Find the size of memory referenced by the load/store.
12827 VecTy
= N
->getValueType(0);
12828 } else if (isIntrinsic
) {
12829 VecTy
= N
->getOperand(AddrOpIdx
+1).getValueType();
12831 assert(isStore
&& "Node has to be a load, a store, or an intrinsic!");
12832 VecTy
= N
->getOperand(1).getValueType();
12835 unsigned NumBytes
= NumVecs
* VecTy
.getSizeInBits() / 8;
12837 NumBytes
/= VecTy
.getVectorNumElements();
12839 // If the increment is a constant, it must match the memory ref size.
12840 SDValue Inc
= User
->getOperand(User
->getOperand(0) == Addr
? 1 : 0);
12841 ConstantSDNode
*CInc
= dyn_cast
<ConstantSDNode
>(Inc
.getNode());
12842 if (NumBytes
>= 3 * 16 && (!CInc
|| CInc
->getZExtValue() != NumBytes
)) {
12843 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
12844 // separate instructions that make it harder to use a non-constant update.
12848 // OK, we found an ADD we can fold into the base update.
12849 // Now, create a _UPD node, taking care of not breaking alignment.
12851 EVT AlignedVecTy
= VecTy
;
12852 unsigned Alignment
= MemN
->getAlignment();
12854 // If this is a less-than-standard-aligned load/store, change the type to
12855 // match the standard alignment.
12856 // The alignment is overlooked when selecting _UPD variants; and it's
12857 // easier to introduce bitcasts here than fix that.
12858 // There are 3 ways to get to this base-update combine:
12859 // - intrinsics: they are assumed to be properly aligned (to the standard
12860 // alignment of the memory type), so we don't need to do anything.
12861 // - ARMISD::VLDx nodes: they are only generated from the aforementioned
12862 // intrinsics, so, likewise, there's nothing to do.
12863 // - generic load/store instructions: the alignment is specified as an
12864 // explicit operand, rather than implicitly as the standard alignment
12865 // of the memory type (like the intrisics). We need to change the
12866 // memory type to match the explicit alignment. That way, we don't
12867 // generate non-standard-aligned ARMISD::VLDx nodes.
12868 if (isa
<LSBaseSDNode
>(N
)) {
12869 if (Alignment
== 0)
12871 if (Alignment
< VecTy
.getScalarSizeInBits() / 8) {
12872 MVT EltTy
= MVT::getIntegerVT(Alignment
* 8);
12873 assert(NumVecs
== 1 && "Unexpected multi-element generic load/store.");
12874 assert(!isLaneOp
&& "Unexpected generic load/store lane.");
12875 unsigned NumElts
= NumBytes
/ (EltTy
.getSizeInBits() / 8);
12876 AlignedVecTy
= MVT::getVectorVT(EltTy
, NumElts
);
12878 // Don't set an explicit alignment on regular load/stores that we want
12879 // to transform to VLD/VST 1_UPD nodes.
12880 // This matches the behavior of regular load/stores, which only get an
12881 // explicit alignment if the MMO alignment is larger than the standard
12882 // alignment of the memory type.
12883 // Intrinsics, however, always get an explicit alignment, set to the
12884 // alignment of the MMO.
12888 // Create the new updating load/store node.
12889 // First, create an SDVTList for the new updating node's results.
12891 unsigned NumResultVecs
= (isLoadOp
? NumVecs
: 0);
12893 for (n
= 0; n
< NumResultVecs
; ++n
)
12894 Tys
[n
] = AlignedVecTy
;
12895 Tys
[n
++] = MVT::i32
;
12896 Tys
[n
] = MVT::Other
;
12897 SDVTList SDTys
= DAG
.getVTList(makeArrayRef(Tys
, NumResultVecs
+2));
12899 // Then, gather the new node's operands.
12900 SmallVector
<SDValue
, 8> Ops
;
12901 Ops
.push_back(N
->getOperand(0)); // incoming chain
12902 Ops
.push_back(N
->getOperand(AddrOpIdx
));
12903 Ops
.push_back(Inc
);
12905 if (StoreSDNode
*StN
= dyn_cast
<StoreSDNode
>(N
)) {
12906 // Try to match the intrinsic's signature
12907 Ops
.push_back(StN
->getValue());
12909 // Loads (and of course intrinsics) match the intrinsics' signature,
12910 // so just add all but the alignment operand.
12911 for (unsigned i
= AddrOpIdx
+ 1; i
< N
->getNumOperands() - 1; ++i
)
12912 Ops
.push_back(N
->getOperand(i
));
12915 // For all node types, the alignment operand is always the last one.
12916 Ops
.push_back(DAG
.getConstant(Alignment
, dl
, MVT::i32
));
12918 // If this is a non-standard-aligned STORE, the penultimate operand is the
12919 // stored value. Bitcast it to the aligned type.
12920 if (AlignedVecTy
!= VecTy
&& N
->getOpcode() == ISD::STORE
) {
12921 SDValue
&StVal
= Ops
[Ops
.size()-2];
12922 StVal
= DAG
.getNode(ISD::BITCAST
, dl
, AlignedVecTy
, StVal
);
12925 EVT LoadVT
= isLaneOp
? VecTy
.getVectorElementType() : AlignedVecTy
;
12926 SDValue UpdN
= DAG
.getMemIntrinsicNode(NewOpc
, dl
, SDTys
, Ops
, LoadVT
,
12927 MemN
->getMemOperand());
12929 // Update the uses.
12930 SmallVector
<SDValue
, 5> NewResults
;
12931 for (unsigned i
= 0; i
< NumResultVecs
; ++i
)
12932 NewResults
.push_back(SDValue(UpdN
.getNode(), i
));
12934 // If this is an non-standard-aligned LOAD, the first result is the loaded
12935 // value. Bitcast it to the expected result type.
12936 if (AlignedVecTy
!= VecTy
&& N
->getOpcode() == ISD::LOAD
) {
12937 SDValue
&LdVal
= NewResults
[0];
12938 LdVal
= DAG
.getNode(ISD::BITCAST
, dl
, VecTy
, LdVal
);
12941 NewResults
.push_back(SDValue(UpdN
.getNode(), NumResultVecs
+1)); // chain
12942 DCI
.CombineTo(N
, NewResults
);
12943 DCI
.CombineTo(User
, SDValue(UpdN
.getNode(), NumResultVecs
));
12950 static SDValue
PerformVLDCombine(SDNode
*N
,
12951 TargetLowering::DAGCombinerInfo
&DCI
) {
12952 if (DCI
.isBeforeLegalize() || DCI
.isCalledByLegalizer())
12955 return CombineBaseUpdate(N
, DCI
);
12958 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
12959 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
12960 /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and
12962 static bool CombineVLDDUP(SDNode
*N
, TargetLowering::DAGCombinerInfo
&DCI
) {
12963 SelectionDAG
&DAG
= DCI
.DAG
;
12964 EVT VT
= N
->getValueType(0);
12965 // vldN-dup instructions only support 64-bit vectors for N > 1.
12966 if (!VT
.is64BitVector())
12969 // Check if the VDUPLANE operand is a vldN-dup intrinsic.
12970 SDNode
*VLD
= N
->getOperand(0).getNode();
12971 if (VLD
->getOpcode() != ISD::INTRINSIC_W_CHAIN
)
12973 unsigned NumVecs
= 0;
12974 unsigned NewOpc
= 0;
12975 unsigned IntNo
= cast
<ConstantSDNode
>(VLD
->getOperand(1))->getZExtValue();
12976 if (IntNo
== Intrinsic::arm_neon_vld2lane
) {
12978 NewOpc
= ARMISD::VLD2DUP
;
12979 } else if (IntNo
== Intrinsic::arm_neon_vld3lane
) {
12981 NewOpc
= ARMISD::VLD3DUP
;
12982 } else if (IntNo
== Intrinsic::arm_neon_vld4lane
) {
12984 NewOpc
= ARMISD::VLD4DUP
;
12989 // First check that all the vldN-lane uses are VDUPLANEs and that the lane
12990 // numbers match the load.
12991 unsigned VLDLaneNo
=
12992 cast
<ConstantSDNode
>(VLD
->getOperand(NumVecs
+3))->getZExtValue();
12993 for (SDNode::use_iterator UI
= VLD
->use_begin(), UE
= VLD
->use_end();
12995 // Ignore uses of the chain result.
12996 if (UI
.getUse().getResNo() == NumVecs
)
12998 SDNode
*User
= *UI
;
12999 if (User
->getOpcode() != ARMISD::VDUPLANE
||
13000 VLDLaneNo
!= cast
<ConstantSDNode
>(User
->getOperand(1))->getZExtValue())
13004 // Create the vldN-dup node.
13007 for (n
= 0; n
< NumVecs
; ++n
)
13009 Tys
[n
] = MVT::Other
;
13010 SDVTList SDTys
= DAG
.getVTList(makeArrayRef(Tys
, NumVecs
+1));
13011 SDValue Ops
[] = { VLD
->getOperand(0), VLD
->getOperand(2) };
13012 MemIntrinsicSDNode
*VLDMemInt
= cast
<MemIntrinsicSDNode
>(VLD
);
13013 SDValue VLDDup
= DAG
.getMemIntrinsicNode(NewOpc
, SDLoc(VLD
), SDTys
,
13014 Ops
, VLDMemInt
->getMemoryVT(),
13015 VLDMemInt
->getMemOperand());
13017 // Update the uses.
13018 for (SDNode::use_iterator UI
= VLD
->use_begin(), UE
= VLD
->use_end();
13020 unsigned ResNo
= UI
.getUse().getResNo();
13021 // Ignore uses of the chain result.
13022 if (ResNo
== NumVecs
)
13024 SDNode
*User
= *UI
;
13025 DCI
.CombineTo(User
, SDValue(VLDDup
.getNode(), ResNo
));
13028 // Now the vldN-lane intrinsic is dead except for its chain result.
13029 // Update uses of the chain.
13030 std::vector
<SDValue
> VLDDupResults
;
13031 for (unsigned n
= 0; n
< NumVecs
; ++n
)
13032 VLDDupResults
.push_back(SDValue(VLDDup
.getNode(), n
));
13033 VLDDupResults
.push_back(SDValue(VLDDup
.getNode(), NumVecs
));
13034 DCI
.CombineTo(VLD
, VLDDupResults
);
13039 /// PerformVDUPLANECombine - Target-specific dag combine xforms for
13040 /// ARMISD::VDUPLANE.
13041 static SDValue
PerformVDUPLANECombine(SDNode
*N
,
13042 TargetLowering::DAGCombinerInfo
&DCI
) {
13043 SDValue Op
= N
->getOperand(0);
13045 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
13046 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
13047 if (CombineVLDDUP(N
, DCI
))
13048 return SDValue(N
, 0);
13050 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
13051 // redundant. Ignore bit_converts for now; element sizes are checked below.
13052 while (Op
.getOpcode() == ISD::BITCAST
)
13053 Op
= Op
.getOperand(0);
13054 if (Op
.getOpcode() != ARMISD::VMOVIMM
&& Op
.getOpcode() != ARMISD::VMVNIMM
)
13057 // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
13058 unsigned EltSize
= Op
.getScalarValueSizeInBits();
13059 // The canonical VMOV for a zero vector uses a 32-bit element size.
13060 unsigned Imm
= cast
<ConstantSDNode
>(Op
.getOperand(0))->getZExtValue();
13062 if (ARM_AM::decodeVMOVModImm(Imm
, EltBits
) == 0)
13064 EVT VT
= N
->getValueType(0);
13065 if (EltSize
> VT
.getScalarSizeInBits())
13068 return DCI
.DAG
.getNode(ISD::BITCAST
, SDLoc(N
), VT
, Op
);
13071 /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
13072 static SDValue
PerformVDUPCombine(SDNode
*N
,
13073 TargetLowering::DAGCombinerInfo
&DCI
,
13074 const ARMSubtarget
*Subtarget
) {
13075 SelectionDAG
&DAG
= DCI
.DAG
;
13076 SDValue Op
= N
->getOperand(0);
13078 if (!Subtarget
->hasNEON())
13081 // Match VDUP(LOAD) -> VLD1DUP.
13082 // We match this pattern here rather than waiting for isel because the
13083 // transform is only legal for unindexed loads.
13084 LoadSDNode
*LD
= dyn_cast
<LoadSDNode
>(Op
.getNode());
13085 if (LD
&& Op
.hasOneUse() && LD
->isUnindexed() &&
13086 LD
->getMemoryVT() == N
->getValueType(0).getVectorElementType()) {
13087 SDValue Ops
[] = { LD
->getOperand(0), LD
->getOperand(1),
13088 DAG
.getConstant(LD
->getAlignment(), SDLoc(N
), MVT::i32
) };
13089 SDVTList SDTys
= DAG
.getVTList(N
->getValueType(0), MVT::Other
);
13090 SDValue VLDDup
= DAG
.getMemIntrinsicNode(ARMISD::VLD1DUP
, SDLoc(N
), SDTys
,
13091 Ops
, LD
->getMemoryVT(),
13092 LD
->getMemOperand());
13093 DAG
.ReplaceAllUsesOfValueWith(SDValue(LD
, 1), VLDDup
.getValue(1));
13100 static SDValue
PerformLOADCombine(SDNode
*N
,
13101 TargetLowering::DAGCombinerInfo
&DCI
) {
13102 EVT VT
= N
->getValueType(0);
13104 // If this is a legal vector load, try to combine it into a VLD1_UPD.
13105 if (ISD::isNormalLoad(N
) && VT
.isVector() &&
13106 DCI
.DAG
.getTargetLoweringInfo().isTypeLegal(VT
))
13107 return CombineBaseUpdate(N
, DCI
);
13112 // Optimize trunc store (of multiple scalars) to shuffle and store. First,
13113 // pack all of the elements in one place. Next, store to memory in fewer
13115 static SDValue
PerformTruncatingStoreCombine(StoreSDNode
*St
,
13116 SelectionDAG
&DAG
) {
13117 SDValue StVal
= St
->getValue();
13118 EVT VT
= StVal
.getValueType();
13119 if (!St
->isTruncatingStore() || !VT
.isVector())
13121 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
13122 EVT StVT
= St
->getMemoryVT();
13123 unsigned NumElems
= VT
.getVectorNumElements();
13124 assert(StVT
!= VT
&& "Cannot truncate to the same type");
13125 unsigned FromEltSz
= VT
.getScalarSizeInBits();
13126 unsigned ToEltSz
= StVT
.getScalarSizeInBits();
13128 // From, To sizes and ElemCount must be pow of two
13129 if (!isPowerOf2_32(NumElems
* FromEltSz
* ToEltSz
))
13132 // We are going to use the original vector elt for storing.
13133 // Accumulated smaller vector elements must be a multiple of the store size.
13134 if (0 != (NumElems
* FromEltSz
) % ToEltSz
)
13137 unsigned SizeRatio
= FromEltSz
/ ToEltSz
;
13138 assert(SizeRatio
* NumElems
* ToEltSz
== VT
.getSizeInBits());
13140 // Create a type on which we perform the shuffle.
13141 EVT WideVecVT
= EVT::getVectorVT(*DAG
.getContext(), StVT
.getScalarType(),
13142 NumElems
* SizeRatio
);
13143 assert(WideVecVT
.getSizeInBits() == VT
.getSizeInBits());
13146 SDValue WideVec
= DAG
.getNode(ISD::BITCAST
, DL
, WideVecVT
, StVal
);
13147 SmallVector
<int, 8> ShuffleVec(NumElems
* SizeRatio
, -1);
13148 for (unsigned i
= 0; i
< NumElems
; ++i
)
13149 ShuffleVec
[i
] = DAG
.getDataLayout().isBigEndian() ? (i
+ 1) * SizeRatio
- 1
13152 // Can't shuffle using an illegal type.
13153 if (!TLI
.isTypeLegal(WideVecVT
))
13156 SDValue Shuff
= DAG
.getVectorShuffle(
13157 WideVecVT
, DL
, WideVec
, DAG
.getUNDEF(WideVec
.getValueType()), ShuffleVec
);
13158 // At this point all of the data is stored at the bottom of the
13159 // register. We now need to save it to mem.
13161 // Find the largest store unit
13162 MVT StoreType
= MVT::i8
;
13163 for (MVT Tp
: MVT::integer_valuetypes()) {
13164 if (TLI
.isTypeLegal(Tp
) && Tp
.getSizeInBits() <= NumElems
* ToEltSz
)
13167 // Didn't find a legal store type.
13168 if (!TLI
.isTypeLegal(StoreType
))
13171 // Bitcast the original vector into a vector of store-size units
13173 EVT::getVectorVT(*DAG
.getContext(), StoreType
,
13174 VT
.getSizeInBits() / EVT(StoreType
).getSizeInBits());
13175 assert(StoreVecVT
.getSizeInBits() == VT
.getSizeInBits());
13176 SDValue ShuffWide
= DAG
.getNode(ISD::BITCAST
, DL
, StoreVecVT
, Shuff
);
13177 SmallVector
<SDValue
, 8> Chains
;
13178 SDValue Increment
= DAG
.getConstant(StoreType
.getSizeInBits() / 8, DL
,
13179 TLI
.getPointerTy(DAG
.getDataLayout()));
13180 SDValue BasePtr
= St
->getBasePtr();
13182 // Perform one or more big stores into memory.
13183 unsigned E
= (ToEltSz
* NumElems
) / StoreType
.getSizeInBits();
13184 for (unsigned I
= 0; I
< E
; I
++) {
13185 SDValue SubVec
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, DL
, StoreType
,
13186 ShuffWide
, DAG
.getIntPtrConstant(I
, DL
));
13188 DAG
.getStore(St
->getChain(), DL
, SubVec
, BasePtr
, St
->getPointerInfo(),
13189 St
->getAlignment(), St
->getMemOperand()->getFlags());
13191 DAG
.getNode(ISD::ADD
, DL
, BasePtr
.getValueType(), BasePtr
, Increment
);
13192 Chains
.push_back(Ch
);
13194 return DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, Chains
);
13197 // Try taking a single vector store from an truncate (which would otherwise turn
13198 // into an expensive buildvector) and splitting it into a series of narrowing
13200 static SDValue
PerformSplittingToNarrowingStores(StoreSDNode
*St
,
13201 SelectionDAG
&DAG
) {
13202 if (!St
->isSimple() || St
->isTruncatingStore() || !St
->isUnindexed())
13204 SDValue Trunc
= St
->getValue();
13205 if (Trunc
->getOpcode() != ISD::TRUNCATE
)
13207 EVT FromVT
= Trunc
->getOperand(0).getValueType();
13208 EVT ToVT
= Trunc
.getValueType();
13209 if (!ToVT
.isVector())
13211 assert(FromVT
.getVectorNumElements() == ToVT
.getVectorNumElements());
13212 EVT ToEltVT
= ToVT
.getVectorElementType();
13213 EVT FromEltVT
= FromVT
.getVectorElementType();
13215 unsigned NumElements
= 0;
13216 if (FromEltVT
== MVT::i32
&& (ToEltVT
== MVT::i16
|| ToEltVT
== MVT::i8
))
13218 if (FromEltVT
== MVT::i16
&& ToEltVT
== MVT::i8
)
13220 if (NumElements
== 0 || FromVT
.getVectorNumElements() == NumElements
||
13221 FromVT
.getVectorNumElements() % NumElements
!= 0)
13225 // Details about the old store
13226 SDValue Ch
= St
->getChain();
13227 SDValue BasePtr
= St
->getBasePtr();
13228 unsigned Alignment
= St
->getOriginalAlignment();
13229 MachineMemOperand::Flags MMOFlags
= St
->getMemOperand()->getFlags();
13230 AAMDNodes AAInfo
= St
->getAAInfo();
13232 EVT NewFromVT
= EVT::getVectorVT(*DAG
.getContext(), FromEltVT
, NumElements
);
13233 EVT NewToVT
= EVT::getVectorVT(*DAG
.getContext(), ToEltVT
, NumElements
);
13235 SmallVector
<SDValue
, 4> Stores
;
13236 for (unsigned i
= 0; i
< FromVT
.getVectorNumElements() / NumElements
; i
++) {
13237 unsigned NewOffset
= i
* NumElements
* ToEltVT
.getSizeInBits() / 8;
13238 SDValue NewPtr
= DAG
.getObjectPtrOffset(DL
, BasePtr
, NewOffset
);
13241 DAG
.getNode(ISD::EXTRACT_SUBVECTOR
, DL
, NewFromVT
, Trunc
.getOperand(0),
13242 DAG
.getConstant(i
* NumElements
, DL
, MVT::i32
));
13243 SDValue Store
= DAG
.getTruncStore(
13244 Ch
, DL
, Extract
, NewPtr
, St
->getPointerInfo().getWithOffset(NewOffset
),
13245 NewToVT
, Alignment
, MMOFlags
, AAInfo
);
13246 Stores
.push_back(Store
);
13248 return DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
, Stores
);
13251 /// PerformSTORECombine - Target-specific dag combine xforms for
13253 static SDValue
PerformSTORECombine(SDNode
*N
,
13254 TargetLowering::DAGCombinerInfo
&DCI
,
13255 const ARMSubtarget
*Subtarget
) {
13256 StoreSDNode
*St
= cast
<StoreSDNode
>(N
);
13257 if (St
->isVolatile())
13259 SDValue StVal
= St
->getValue();
13260 EVT VT
= StVal
.getValueType();
13262 if (Subtarget
->hasNEON())
13263 if (SDValue Store
= PerformTruncatingStoreCombine(St
, DCI
.DAG
))
13266 if (Subtarget
->hasMVEIntegerOps())
13267 if (SDValue NewToken
= PerformSplittingToNarrowingStores(St
, DCI
.DAG
))
13270 if (!ISD::isNormalStore(St
))
13273 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
13274 // ARM stores of arguments in the same cache line.
13275 if (StVal
.getNode()->getOpcode() == ARMISD::VMOVDRR
&&
13276 StVal
.getNode()->hasOneUse()) {
13277 SelectionDAG
&DAG
= DCI
.DAG
;
13278 bool isBigEndian
= DAG
.getDataLayout().isBigEndian();
13280 SDValue BasePtr
= St
->getBasePtr();
13281 SDValue NewST1
= DAG
.getStore(
13282 St
->getChain(), DL
, StVal
.getNode()->getOperand(isBigEndian
? 1 : 0),
13283 BasePtr
, St
->getPointerInfo(), St
->getAlignment(),
13284 St
->getMemOperand()->getFlags());
13286 SDValue OffsetPtr
= DAG
.getNode(ISD::ADD
, DL
, MVT::i32
, BasePtr
,
13287 DAG
.getConstant(4, DL
, MVT::i32
));
13288 return DAG
.getStore(NewST1
.getValue(0), DL
,
13289 StVal
.getNode()->getOperand(isBigEndian
? 0 : 1),
13290 OffsetPtr
, St
->getPointerInfo(),
13291 std::min(4U, St
->getAlignment() / 2),
13292 St
->getMemOperand()->getFlags());
13295 if (StVal
.getValueType() == MVT::i64
&&
13296 StVal
.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT
) {
13298 // Bitcast an i64 store extracted from a vector to f64.
13299 // Otherwise, the i64 value will be legalized to a pair of i32 values.
13300 SelectionDAG
&DAG
= DCI
.DAG
;
13302 SDValue IntVec
= StVal
.getOperand(0);
13303 EVT FloatVT
= EVT::getVectorVT(*DAG
.getContext(), MVT::f64
,
13304 IntVec
.getValueType().getVectorNumElements());
13305 SDValue Vec
= DAG
.getNode(ISD::BITCAST
, dl
, FloatVT
, IntVec
);
13306 SDValue ExtElt
= DAG
.getNode(ISD::EXTRACT_VECTOR_ELT
, dl
, MVT::f64
,
13307 Vec
, StVal
.getOperand(1));
13309 SDValue V
= DAG
.getNode(ISD::BITCAST
, dl
, MVT::i64
, ExtElt
);
13310 // Make the DAGCombiner fold the bitcasts.
13311 DCI
.AddToWorklist(Vec
.getNode());
13312 DCI
.AddToWorklist(ExtElt
.getNode());
13313 DCI
.AddToWorklist(V
.getNode());
13314 return DAG
.getStore(St
->getChain(), dl
, V
, St
->getBasePtr(),
13315 St
->getPointerInfo(), St
->getAlignment(),
13316 St
->getMemOperand()->getFlags(), St
->getAAInfo());
13319 // If this is a legal vector store, try to combine it into a VST1_UPD.
13320 if (Subtarget
->hasNEON() && ISD::isNormalStore(N
) && VT
.isVector() &&
13321 DCI
.DAG
.getTargetLoweringInfo().isTypeLegal(VT
))
13322 return CombineBaseUpdate(N
, DCI
);
13327 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
13328 /// can replace combinations of VMUL and VCVT (floating-point to integer)
13329 /// when the VMUL has a constant operand that is a power of 2.
13331 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
13332 /// vmul.f32 d16, d17, d16
13333 /// vcvt.s32.f32 d16, d16
13335 /// vcvt.s32.f32 d16, d16, #3
13336 static SDValue
PerformVCVTCombine(SDNode
*N
, SelectionDAG
&DAG
,
13337 const ARMSubtarget
*Subtarget
) {
13338 if (!Subtarget
->hasNEON())
13341 SDValue Op
= N
->getOperand(0);
13342 if (!Op
.getValueType().isVector() || !Op
.getValueType().isSimple() ||
13343 Op
.getOpcode() != ISD::FMUL
)
13346 SDValue ConstVec
= Op
->getOperand(1);
13347 if (!isa
<BuildVectorSDNode
>(ConstVec
))
13350 MVT FloatTy
= Op
.getSimpleValueType().getVectorElementType();
13351 uint32_t FloatBits
= FloatTy
.getSizeInBits();
13352 MVT IntTy
= N
->getSimpleValueType(0).getVectorElementType();
13353 uint32_t IntBits
= IntTy
.getSizeInBits();
13354 unsigned NumLanes
= Op
.getValueType().getVectorNumElements();
13355 if (FloatBits
!= 32 || IntBits
> 32 || (NumLanes
!= 4 && NumLanes
!= 2)) {
13356 // These instructions only exist converting from f32 to i32. We can handle
13357 // smaller integers by generating an extra truncate, but larger ones would
13358 // be lossy. We also can't handle anything other than 2 or 4 lanes, since
13359 // these intructions only support v2i32/v4i32 types.
13363 BitVector UndefElements
;
13364 BuildVectorSDNode
*BV
= cast
<BuildVectorSDNode
>(ConstVec
);
13365 int32_t C
= BV
->getConstantFPSplatPow2ToLog2Int(&UndefElements
, 33);
13366 if (C
== -1 || C
== 0 || C
> 32)
13370 bool isSigned
= N
->getOpcode() == ISD::FP_TO_SINT
;
13371 unsigned IntrinsicOpcode
= isSigned
? Intrinsic::arm_neon_vcvtfp2fxs
:
13372 Intrinsic::arm_neon_vcvtfp2fxu
;
13373 SDValue FixConv
= DAG
.getNode(
13374 ISD::INTRINSIC_WO_CHAIN
, dl
, NumLanes
== 2 ? MVT::v2i32
: MVT::v4i32
,
13375 DAG
.getConstant(IntrinsicOpcode
, dl
, MVT::i32
), Op
->getOperand(0),
13376 DAG
.getConstant(C
, dl
, MVT::i32
));
13378 if (IntBits
< FloatBits
)
13379 FixConv
= DAG
.getNode(ISD::TRUNCATE
, dl
, N
->getValueType(0), FixConv
);
13384 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
13385 /// can replace combinations of VCVT (integer to floating-point) and VDIV
13386 /// when the VDIV has a constant operand that is a power of 2.
13388 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
13389 /// vcvt.f32.s32 d16, d16
13390 /// vdiv.f32 d16, d17, d16
13392 /// vcvt.f32.s32 d16, d16, #3
13393 static SDValue
PerformVDIVCombine(SDNode
*N
, SelectionDAG
&DAG
,
13394 const ARMSubtarget
*Subtarget
) {
13395 if (!Subtarget
->hasNEON())
13398 SDValue Op
= N
->getOperand(0);
13399 unsigned OpOpcode
= Op
.getNode()->getOpcode();
13400 if (!N
->getValueType(0).isVector() || !N
->getValueType(0).isSimple() ||
13401 (OpOpcode
!= ISD::SINT_TO_FP
&& OpOpcode
!= ISD::UINT_TO_FP
))
13404 SDValue ConstVec
= N
->getOperand(1);
13405 if (!isa
<BuildVectorSDNode
>(ConstVec
))
13408 MVT FloatTy
= N
->getSimpleValueType(0).getVectorElementType();
13409 uint32_t FloatBits
= FloatTy
.getSizeInBits();
13410 MVT IntTy
= Op
.getOperand(0).getSimpleValueType().getVectorElementType();
13411 uint32_t IntBits
= IntTy
.getSizeInBits();
13412 unsigned NumLanes
= Op
.getValueType().getVectorNumElements();
13413 if (FloatBits
!= 32 || IntBits
> 32 || (NumLanes
!= 4 && NumLanes
!= 2)) {
13414 // These instructions only exist converting from i32 to f32. We can handle
13415 // smaller integers by generating an extra extend, but larger ones would
13416 // be lossy. We also can't handle anything other than 2 or 4 lanes, since
13417 // these intructions only support v2i32/v4i32 types.
13421 BitVector UndefElements
;
13422 BuildVectorSDNode
*BV
= cast
<BuildVectorSDNode
>(ConstVec
);
13423 int32_t C
= BV
->getConstantFPSplatPow2ToLog2Int(&UndefElements
, 33);
13424 if (C
== -1 || C
== 0 || C
> 32)
13428 bool isSigned
= OpOpcode
== ISD::SINT_TO_FP
;
13429 SDValue ConvInput
= Op
.getOperand(0);
13430 if (IntBits
< FloatBits
)
13431 ConvInput
= DAG
.getNode(isSigned
? ISD::SIGN_EXTEND
: ISD::ZERO_EXTEND
,
13432 dl
, NumLanes
== 2 ? MVT::v2i32
: MVT::v4i32
,
13435 unsigned IntrinsicOpcode
= isSigned
? Intrinsic::arm_neon_vcvtfxs2fp
:
13436 Intrinsic::arm_neon_vcvtfxu2fp
;
13437 return DAG
.getNode(ISD::INTRINSIC_WO_CHAIN
, dl
,
13439 DAG
.getConstant(IntrinsicOpcode
, dl
, MVT::i32
),
13440 ConvInput
, DAG
.getConstant(C
, dl
, MVT::i32
));
13443 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
13444 static SDValue
PerformIntrinsicCombine(SDNode
*N
, SelectionDAG
&DAG
) {
13445 unsigned IntNo
= cast
<ConstantSDNode
>(N
->getOperand(0))->getZExtValue();
13448 // Don't do anything for most intrinsics.
13451 // Vector shifts: check for immediate versions and lower them.
13452 // Note: This is done during DAG combining instead of DAG legalizing because
13453 // the build_vectors for 64-bit vector element shift counts are generally
13454 // not legal, and it is hard to see their values after they get legalized to
13455 // loads from a constant pool.
13456 case Intrinsic::arm_neon_vshifts
:
13457 case Intrinsic::arm_neon_vshiftu
:
13458 case Intrinsic::arm_neon_vrshifts
:
13459 case Intrinsic::arm_neon_vrshiftu
:
13460 case Intrinsic::arm_neon_vrshiftn
:
13461 case Intrinsic::arm_neon_vqshifts
:
13462 case Intrinsic::arm_neon_vqshiftu
:
13463 case Intrinsic::arm_neon_vqshiftsu
:
13464 case Intrinsic::arm_neon_vqshiftns
:
13465 case Intrinsic::arm_neon_vqshiftnu
:
13466 case Intrinsic::arm_neon_vqshiftnsu
:
13467 case Intrinsic::arm_neon_vqrshiftns
:
13468 case Intrinsic::arm_neon_vqrshiftnu
:
13469 case Intrinsic::arm_neon_vqrshiftnsu
: {
13470 EVT VT
= N
->getOperand(1).getValueType();
13472 unsigned VShiftOpc
= 0;
13475 case Intrinsic::arm_neon_vshifts
:
13476 case Intrinsic::arm_neon_vshiftu
:
13477 if (isVShiftLImm(N
->getOperand(2), VT
, false, Cnt
)) {
13478 VShiftOpc
= ARMISD::VSHLIMM
;
13481 if (isVShiftRImm(N
->getOperand(2), VT
, false, true, Cnt
)) {
13482 VShiftOpc
= (IntNo
== Intrinsic::arm_neon_vshifts
? ARMISD::VSHRsIMM
13483 : ARMISD::VSHRuIMM
);
13488 case Intrinsic::arm_neon_vrshifts
:
13489 case Intrinsic::arm_neon_vrshiftu
:
13490 if (isVShiftRImm(N
->getOperand(2), VT
, false, true, Cnt
))
13494 case Intrinsic::arm_neon_vqshifts
:
13495 case Intrinsic::arm_neon_vqshiftu
:
13496 if (isVShiftLImm(N
->getOperand(2), VT
, false, Cnt
))
13500 case Intrinsic::arm_neon_vqshiftsu
:
13501 if (isVShiftLImm(N
->getOperand(2), VT
, false, Cnt
))
13503 llvm_unreachable("invalid shift count for vqshlu intrinsic");
13505 case Intrinsic::arm_neon_vrshiftn
:
13506 case Intrinsic::arm_neon_vqshiftns
:
13507 case Intrinsic::arm_neon_vqshiftnu
:
13508 case Intrinsic::arm_neon_vqshiftnsu
:
13509 case Intrinsic::arm_neon_vqrshiftns
:
13510 case Intrinsic::arm_neon_vqrshiftnu
:
13511 case Intrinsic::arm_neon_vqrshiftnsu
:
13512 // Narrowing shifts require an immediate right shift.
13513 if (isVShiftRImm(N
->getOperand(2), VT
, true, true, Cnt
))
13515 llvm_unreachable("invalid shift count for narrowing vector shift "
13519 llvm_unreachable("unhandled vector shift");
13523 case Intrinsic::arm_neon_vshifts
:
13524 case Intrinsic::arm_neon_vshiftu
:
13525 // Opcode already set above.
13527 case Intrinsic::arm_neon_vrshifts
:
13528 VShiftOpc
= ARMISD::VRSHRsIMM
;
13530 case Intrinsic::arm_neon_vrshiftu
:
13531 VShiftOpc
= ARMISD::VRSHRuIMM
;
13533 case Intrinsic::arm_neon_vrshiftn
:
13534 VShiftOpc
= ARMISD::VRSHRNIMM
;
13536 case Intrinsic::arm_neon_vqshifts
:
13537 VShiftOpc
= ARMISD::VQSHLsIMM
;
13539 case Intrinsic::arm_neon_vqshiftu
:
13540 VShiftOpc
= ARMISD::VQSHLuIMM
;
13542 case Intrinsic::arm_neon_vqshiftsu
:
13543 VShiftOpc
= ARMISD::VQSHLsuIMM
;
13545 case Intrinsic::arm_neon_vqshiftns
:
13546 VShiftOpc
= ARMISD::VQSHRNsIMM
;
13548 case Intrinsic::arm_neon_vqshiftnu
:
13549 VShiftOpc
= ARMISD::VQSHRNuIMM
;
13551 case Intrinsic::arm_neon_vqshiftnsu
:
13552 VShiftOpc
= ARMISD::VQSHRNsuIMM
;
13554 case Intrinsic::arm_neon_vqrshiftns
:
13555 VShiftOpc
= ARMISD::VQRSHRNsIMM
;
13557 case Intrinsic::arm_neon_vqrshiftnu
:
13558 VShiftOpc
= ARMISD::VQRSHRNuIMM
;
13560 case Intrinsic::arm_neon_vqrshiftnsu
:
13561 VShiftOpc
= ARMISD::VQRSHRNsuIMM
;
13566 return DAG
.getNode(VShiftOpc
, dl
, N
->getValueType(0),
13567 N
->getOperand(1), DAG
.getConstant(Cnt
, dl
, MVT::i32
));
13570 case Intrinsic::arm_neon_vshiftins
: {
13571 EVT VT
= N
->getOperand(1).getValueType();
13573 unsigned VShiftOpc
= 0;
13575 if (isVShiftLImm(N
->getOperand(3), VT
, false, Cnt
))
13576 VShiftOpc
= ARMISD::VSLIIMM
;
13577 else if (isVShiftRImm(N
->getOperand(3), VT
, false, true, Cnt
))
13578 VShiftOpc
= ARMISD::VSRIIMM
;
13580 llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
13584 return DAG
.getNode(VShiftOpc
, dl
, N
->getValueType(0),
13585 N
->getOperand(1), N
->getOperand(2),
13586 DAG
.getConstant(Cnt
, dl
, MVT::i32
));
13589 case Intrinsic::arm_neon_vqrshifts
:
13590 case Intrinsic::arm_neon_vqrshiftu
:
13591 // No immediate versions of these to check for.
13598 /// PerformShiftCombine - Checks for immediate versions of vector shifts and
13599 /// lowers them. As with the vector shift intrinsics, this is done during DAG
13600 /// combining instead of DAG legalizing because the build_vectors for 64-bit
13601 /// vector element shift counts are generally not legal, and it is hard to see
13602 /// their values after they get legalized to loads from a constant pool.
13603 static SDValue
PerformShiftCombine(SDNode
*N
,
13604 TargetLowering::DAGCombinerInfo
&DCI
,
13605 const ARMSubtarget
*ST
) {
13606 SelectionDAG
&DAG
= DCI
.DAG
;
13607 EVT VT
= N
->getValueType(0);
13608 if (N
->getOpcode() == ISD::SRL
&& VT
== MVT::i32
&& ST
->hasV6Ops()) {
13609 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high
13610 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16.
13611 SDValue N1
= N
->getOperand(1);
13612 if (ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(N1
)) {
13613 SDValue N0
= N
->getOperand(0);
13614 if (C
->getZExtValue() == 16 && N0
.getOpcode() == ISD::BSWAP
&&
13615 DAG
.MaskedValueIsZero(N0
.getOperand(0),
13616 APInt::getHighBitsSet(32, 16)))
13617 return DAG
.getNode(ISD::ROTR
, SDLoc(N
), VT
, N0
, N1
);
13621 if (ST
->isThumb1Only() && N
->getOpcode() == ISD::SHL
&& VT
== MVT::i32
&&
13622 N
->getOperand(0)->getOpcode() == ISD::AND
&&
13623 N
->getOperand(0)->hasOneUse()) {
13624 if (DCI
.isBeforeLegalize() || DCI
.isCalledByLegalizer())
13626 // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't
13627 // usually show up because instcombine prefers to canonicalize it to
13628 // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come
13629 // out of GEP lowering in some cases.
13630 SDValue N0
= N
->getOperand(0);
13631 ConstantSDNode
*ShiftAmtNode
= dyn_cast
<ConstantSDNode
>(N
->getOperand(1));
13634 uint32_t ShiftAmt
= static_cast<uint32_t>(ShiftAmtNode
->getZExtValue());
13635 ConstantSDNode
*AndMaskNode
= dyn_cast
<ConstantSDNode
>(N0
->getOperand(1));
13638 uint32_t AndMask
= static_cast<uint32_t>(AndMaskNode
->getZExtValue());
13639 // Don't transform uxtb/uxth.
13640 if (AndMask
== 255 || AndMask
== 65535)
13642 if (isMask_32(AndMask
)) {
13643 uint32_t MaskedBits
= countLeadingZeros(AndMask
);
13644 if (MaskedBits
> ShiftAmt
) {
13646 SDValue SHL
= DAG
.getNode(ISD::SHL
, DL
, MVT::i32
, N0
->getOperand(0),
13647 DAG
.getConstant(MaskedBits
, DL
, MVT::i32
));
13648 return DAG
.getNode(
13649 ISD::SRL
, DL
, MVT::i32
, SHL
,
13650 DAG
.getConstant(MaskedBits
- ShiftAmt
, DL
, MVT::i32
));
13655 // Nothing to be done for scalar shifts.
13656 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
13657 if (!VT
.isVector() || !TLI
.isTypeLegal(VT
))
13659 if (ST
->hasMVEIntegerOps() && VT
== MVT::v2i64
)
13664 switch (N
->getOpcode()) {
13665 default: llvm_unreachable("unexpected shift opcode");
13668 if (isVShiftLImm(N
->getOperand(1), VT
, false, Cnt
)) {
13670 return DAG
.getNode(ARMISD::VSHLIMM
, dl
, VT
, N
->getOperand(0),
13671 DAG
.getConstant(Cnt
, dl
, MVT::i32
));
13677 if (isVShiftRImm(N
->getOperand(1), VT
, false, false, Cnt
)) {
13678 unsigned VShiftOpc
=
13679 (N
->getOpcode() == ISD::SRA
? ARMISD::VSHRsIMM
: ARMISD::VSHRuIMM
);
13681 return DAG
.getNode(VShiftOpc
, dl
, VT
, N
->getOperand(0),
13682 DAG
.getConstant(Cnt
, dl
, MVT::i32
));
13688 // Look for a sign/zero extend of a larger than legal load. This can be split
13689 // into two extending loads, which are simpler to deal with than an arbitrary
13691 static SDValue
PerformSplittingToWideningLoad(SDNode
*N
, SelectionDAG
&DAG
) {
13692 SDValue N0
= N
->getOperand(0);
13693 if (N0
.getOpcode() != ISD::LOAD
)
13695 LoadSDNode
*LD
= cast
<LoadSDNode
>(N0
.getNode());
13696 if (!LD
->isSimple() || !N0
.hasOneUse() || LD
->isIndexed() ||
13697 LD
->getExtensionType() != ISD::NON_EXTLOAD
)
13699 EVT FromVT
= LD
->getValueType(0);
13700 EVT ToVT
= N
->getValueType(0);
13701 if (!ToVT
.isVector())
13703 assert(FromVT
.getVectorNumElements() == ToVT
.getVectorNumElements());
13704 EVT ToEltVT
= ToVT
.getVectorElementType();
13705 EVT FromEltVT
= FromVT
.getVectorElementType();
13707 unsigned NumElements
= 0;
13708 if (ToEltVT
== MVT::i32
&& (FromEltVT
== MVT::i16
|| FromEltVT
== MVT::i8
))
13710 if (ToEltVT
== MVT::i16
&& FromEltVT
== MVT::i8
)
13712 if (NumElements
== 0 ||
13713 FromVT
.getVectorNumElements() == NumElements
||
13714 FromVT
.getVectorNumElements() % NumElements
!= 0 ||
13715 !isPowerOf2_32(NumElements
))
13719 // Details about the old load
13720 SDValue Ch
= LD
->getChain();
13721 SDValue BasePtr
= LD
->getBasePtr();
13722 unsigned Alignment
= LD
->getOriginalAlignment();
13723 MachineMemOperand::Flags MMOFlags
= LD
->getMemOperand()->getFlags();
13724 AAMDNodes AAInfo
= LD
->getAAInfo();
13726 ISD::LoadExtType NewExtType
=
13727 N
->getOpcode() == ISD::SIGN_EXTEND
? ISD::SEXTLOAD
: ISD::ZEXTLOAD
;
13728 SDValue Offset
= DAG
.getUNDEF(BasePtr
.getValueType());
13729 EVT NewFromVT
= FromVT
.getHalfNumVectorElementsVT(*DAG
.getContext());
13730 EVT NewToVT
= ToVT
.getHalfNumVectorElementsVT(*DAG
.getContext());
13731 unsigned NewOffset
= NewFromVT
.getSizeInBits() / 8;
13732 SDValue NewPtr
= DAG
.getObjectPtrOffset(DL
, BasePtr
, NewOffset
);
13734 // Split the load in half, each side of which is extended separately. This
13735 // is good enough, as legalisation will take it from there. They are either
13736 // already legal or they will be split further into something that is
13739 DAG
.getLoad(ISD::UNINDEXED
, NewExtType
, NewToVT
, DL
, Ch
, BasePtr
, Offset
,
13740 LD
->getPointerInfo(), NewFromVT
, Alignment
, MMOFlags
, AAInfo
);
13742 DAG
.getLoad(ISD::UNINDEXED
, NewExtType
, NewToVT
, DL
, Ch
, NewPtr
, Offset
,
13743 LD
->getPointerInfo().getWithOffset(NewOffset
), NewFromVT
,
13744 Alignment
, MMOFlags
, AAInfo
);
13746 SDValue NewChain
= DAG
.getNode(ISD::TokenFactor
, DL
, MVT::Other
,
13747 SDValue(NewLoad1
.getNode(), 1),
13748 SDValue(NewLoad2
.getNode(), 1));
13749 DAG
.ReplaceAllUsesOfValueWith(SDValue(LD
, 1), NewChain
);
13750 return DAG
.getNode(ISD::CONCAT_VECTORS
, DL
, ToVT
, NewLoad1
, NewLoad2
);
13753 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
13754 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
13755 static SDValue
PerformExtendCombine(SDNode
*N
, SelectionDAG
&DAG
,
13756 const ARMSubtarget
*ST
) {
13757 SDValue N0
= N
->getOperand(0);
13759 // Check for sign- and zero-extensions of vector extract operations of 8-
13760 // and 16-bit vector elements. NEON supports these directly. They are
13761 // handled during DAG combining because type legalization will promote them
13762 // to 32-bit types and it is messy to recognize the operations after that.
13763 if (ST
->hasNEON() && N0
.getOpcode() == ISD::EXTRACT_VECTOR_ELT
) {
13764 SDValue Vec
= N0
.getOperand(0);
13765 SDValue Lane
= N0
.getOperand(1);
13766 EVT VT
= N
->getValueType(0);
13767 EVT EltVT
= N0
.getValueType();
13768 const TargetLowering
&TLI
= DAG
.getTargetLoweringInfo();
13770 if (VT
== MVT::i32
&&
13771 (EltVT
== MVT::i8
|| EltVT
== MVT::i16
) &&
13772 TLI
.isTypeLegal(Vec
.getValueType()) &&
13773 isa
<ConstantSDNode
>(Lane
)) {
13776 switch (N
->getOpcode()) {
13777 default: llvm_unreachable("unexpected opcode");
13778 case ISD::SIGN_EXTEND
:
13779 Opc
= ARMISD::VGETLANEs
;
13781 case ISD::ZERO_EXTEND
:
13782 case ISD::ANY_EXTEND
:
13783 Opc
= ARMISD::VGETLANEu
;
13786 return DAG
.getNode(Opc
, SDLoc(N
), VT
, Vec
, Lane
);
13790 if (ST
->hasMVEIntegerOps())
13791 if (SDValue NewLoad
= PerformSplittingToWideningLoad(N
, DAG
))
13797 static const APInt
*isPowerOf2Constant(SDValue V
) {
13798 ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(V
);
13801 const APInt
*CV
= &C
->getAPIntValue();
13802 return CV
->isPowerOf2() ? CV
: nullptr;
13805 SDValue
ARMTargetLowering::PerformCMOVToBFICombine(SDNode
*CMOV
, SelectionDAG
&DAG
) const {
13806 // If we have a CMOV, OR and AND combination such as:
13811 // * CN is a single bit;
13812 // * All bits covered by CM are known zero in y
13814 // Then we can convert this into a sequence of BFI instructions. This will
13815 // always be a win if CM is a single bit, will always be no worse than the
13816 // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is
13817 // three bits (due to the extra IT instruction).
13819 SDValue Op0
= CMOV
->getOperand(0);
13820 SDValue Op1
= CMOV
->getOperand(1);
13821 auto CCNode
= cast
<ConstantSDNode
>(CMOV
->getOperand(2));
13822 auto CC
= CCNode
->getAPIntValue().getLimitedValue();
13823 SDValue CmpZ
= CMOV
->getOperand(4);
13825 // The compare must be against zero.
13826 if (!isNullConstant(CmpZ
->getOperand(1)))
13829 assert(CmpZ
->getOpcode() == ARMISD::CMPZ
);
13830 SDValue And
= CmpZ
->getOperand(0);
13831 if (And
->getOpcode() != ISD::AND
)
13833 const APInt
*AndC
= isPowerOf2Constant(And
->getOperand(1));
13836 SDValue X
= And
->getOperand(0);
13838 if (CC
== ARMCC::EQ
) {
13839 // We're performing an "equal to zero" compare. Swap the operands so we
13840 // canonicalize on a "not equal to zero" compare.
13841 std::swap(Op0
, Op1
);
13843 assert(CC
== ARMCC::NE
&& "How can a CMPZ node not be EQ or NE?");
13846 if (Op1
->getOpcode() != ISD::OR
)
13849 ConstantSDNode
*OrC
= dyn_cast
<ConstantSDNode
>(Op1
->getOperand(1));
13852 SDValue Y
= Op1
->getOperand(0);
13857 // Now, is it profitable to continue?
13858 APInt OrCI
= OrC
->getAPIntValue();
13859 unsigned Heuristic
= Subtarget
->isThumb() ? 3 : 2;
13860 if (OrCI
.countPopulation() > Heuristic
)
13863 // Lastly, can we determine that the bits defined by OrCI
13865 KnownBits Known
= DAG
.computeKnownBits(Y
);
13866 if ((OrCI
& Known
.Zero
) != OrCI
)
13869 // OK, we can do the combine.
13872 EVT VT
= X
.getValueType();
13873 unsigned BitInX
= AndC
->logBase2();
13876 // We must shift X first.
13877 X
= DAG
.getNode(ISD::SRL
, dl
, VT
, X
,
13878 DAG
.getConstant(BitInX
, dl
, VT
));
13881 for (unsigned BitInY
= 0, NumActiveBits
= OrCI
.getActiveBits();
13882 BitInY
< NumActiveBits
; ++BitInY
) {
13883 if (OrCI
[BitInY
] == 0)
13885 APInt
Mask(VT
.getSizeInBits(), 0);
13886 Mask
.setBit(BitInY
);
13887 V
= DAG
.getNode(ARMISD::BFI
, dl
, VT
, V
, X
,
13888 // Confusingly, the operand is an *inverted* mask.
13889 DAG
.getConstant(~Mask
, dl
, VT
));
13895 // Given N, the value controlling the conditional branch, search for the loop
13896 // intrinsic, returning it, along with how the value is used. We need to handle
13897 // patterns such as the following:
13898 // (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit)
13899 // (brcond (setcc (loop.decrement), 0, eq), exit)
13900 // (brcond (setcc (loop.decrement), 0, ne), header)
13901 static SDValue
SearchLoopIntrinsic(SDValue N
, ISD::CondCode
&CC
, int &Imm
,
13903 switch (N
->getOpcode()) {
13907 if (!isa
<ConstantSDNode
>(N
.getOperand(1)))
13909 if (!cast
<ConstantSDNode
>(N
.getOperand(1))->isOne())
13912 return SearchLoopIntrinsic(N
.getOperand(0), CC
, Imm
, Negate
);
13915 auto *Const
= dyn_cast
<ConstantSDNode
>(N
.getOperand(1));
13918 if (Const
->isNullValue())
13920 else if (Const
->isOne())
13924 CC
= cast
<CondCodeSDNode
>(N
.getOperand(2))->get();
13925 return SearchLoopIntrinsic(N
->getOperand(0), CC
, Imm
, Negate
);
13927 case ISD::INTRINSIC_W_CHAIN
: {
13928 unsigned IntOp
= cast
<ConstantSDNode
>(N
.getOperand(1))->getZExtValue();
13929 if (IntOp
!= Intrinsic::test_set_loop_iterations
&&
13930 IntOp
!= Intrinsic::loop_decrement_reg
)
13938 static SDValue
PerformHWLoopCombine(SDNode
*N
,
13939 TargetLowering::DAGCombinerInfo
&DCI
,
13940 const ARMSubtarget
*ST
) {
13942 // The hwloop intrinsics that we're interested are used for control-flow,
13943 // either for entering or exiting the loop:
13944 // - test.set.loop.iterations will test whether its operand is zero. If it
13945 // is zero, the proceeding branch should not enter the loop.
13946 // - loop.decrement.reg also tests whether its operand is zero. If it is
13947 // zero, the proceeding branch should not branch back to the beginning of
13949 // So here, we need to check that how the brcond is using the result of each
13950 // of the intrinsics to ensure that we're branching to the right place at the
13956 bool Negate
= false;
13957 SDValue Chain
= N
->getOperand(0);
13960 if (N
->getOpcode() == ISD::BRCOND
) {
13962 Cond
= N
->getOperand(1);
13963 Dest
= N
->getOperand(2);
13965 assert(N
->getOpcode() == ISD::BR_CC
&& "Expected BRCOND or BR_CC!");
13966 CC
= cast
<CondCodeSDNode
>(N
->getOperand(1))->get();
13967 Cond
= N
->getOperand(2);
13968 Dest
= N
->getOperand(4);
13969 if (auto *Const
= dyn_cast
<ConstantSDNode
>(N
->getOperand(3))) {
13970 if (!Const
->isOne() && !Const
->isNullValue())
13972 Imm
= Const
->getZExtValue();
13977 SDValue Int
= SearchLoopIntrinsic(Cond
, CC
, Imm
, Negate
);
13982 CC
= ISD::getSetCCInverse(CC
, true);
13984 auto IsTrueIfZero
= [](ISD::CondCode CC
, int Imm
) {
13985 return (CC
== ISD::SETEQ
&& Imm
== 0) ||
13986 (CC
== ISD::SETNE
&& Imm
== 1) ||
13987 (CC
== ISD::SETLT
&& Imm
== 1) ||
13988 (CC
== ISD::SETULT
&& Imm
== 1);
13991 auto IsFalseIfZero
= [](ISD::CondCode CC
, int Imm
) {
13992 return (CC
== ISD::SETEQ
&& Imm
== 1) ||
13993 (CC
== ISD::SETNE
&& Imm
== 0) ||
13994 (CC
== ISD::SETGT
&& Imm
== 0) ||
13995 (CC
== ISD::SETUGT
&& Imm
== 0) ||
13996 (CC
== ISD::SETGE
&& Imm
== 1) ||
13997 (CC
== ISD::SETUGE
&& Imm
== 1);
14000 assert((IsTrueIfZero(CC
, Imm
) || IsFalseIfZero(CC
, Imm
)) &&
14001 "unsupported condition");
14004 SelectionDAG
&DAG
= DCI
.DAG
;
14005 SDValue Elements
= Int
.getOperand(2);
14006 unsigned IntOp
= cast
<ConstantSDNode
>(Int
->getOperand(1))->getZExtValue();
14007 assert((N
->hasOneUse() && N
->use_begin()->getOpcode() == ISD::BR
)
14008 && "expected single br user");
14009 SDNode
*Br
= *N
->use_begin();
14010 SDValue OtherTarget
= Br
->getOperand(1);
14012 // Update the unconditional branch to branch to the given Dest.
14013 auto UpdateUncondBr
= [](SDNode
*Br
, SDValue Dest
, SelectionDAG
&DAG
) {
14014 SDValue NewBrOps
[] = { Br
->getOperand(0), Dest
};
14015 SDValue NewBr
= DAG
.getNode(ISD::BR
, SDLoc(Br
), MVT::Other
, NewBrOps
);
14016 DAG
.ReplaceAllUsesOfValueWith(SDValue(Br
, 0), NewBr
);
14019 if (IntOp
== Intrinsic::test_set_loop_iterations
) {
14021 // We expect this 'instruction' to branch when the counter is zero.
14022 if (IsTrueIfZero(CC
, Imm
)) {
14023 SDValue Ops
[] = { Chain
, Elements
, Dest
};
14024 Res
= DAG
.getNode(ARMISD::WLS
, dl
, MVT::Other
, Ops
);
14026 // The logic is the reverse of what we need for WLS, so find the other
14027 // basic block target: the target of the proceeding br.
14028 UpdateUncondBr(Br
, Dest
, DAG
);
14030 SDValue Ops
[] = { Chain
, Elements
, OtherTarget
};
14031 Res
= DAG
.getNode(ARMISD::WLS
, dl
, MVT::Other
, Ops
);
14033 DAG
.ReplaceAllUsesOfValueWith(Int
.getValue(1), Int
.getOperand(0));
14036 SDValue Size
= DAG
.getTargetConstant(
14037 cast
<ConstantSDNode
>(Int
.getOperand(3))->getZExtValue(), dl
, MVT::i32
);
14038 SDValue Args
[] = { Int
.getOperand(0), Elements
, Size
, };
14039 SDValue LoopDec
= DAG
.getNode(ARMISD::LOOP_DEC
, dl
,
14040 DAG
.getVTList(MVT::i32
, MVT::Other
), Args
);
14041 DAG
.ReplaceAllUsesWith(Int
.getNode(), LoopDec
.getNode());
14043 // We expect this instruction to branch when the count is not zero.
14044 SDValue Target
= IsFalseIfZero(CC
, Imm
) ? Dest
: OtherTarget
;
14046 // Update the unconditional branch to target the loop preheader if we've
14047 // found the condition has been reversed.
14048 if (Target
== OtherTarget
)
14049 UpdateUncondBr(Br
, Dest
, DAG
);
14051 Chain
= DAG
.getNode(ISD::TokenFactor
, dl
, MVT::Other
,
14052 SDValue(LoopDec
.getNode(), 1), Chain
);
14054 SDValue EndArgs
[] = { Chain
, SDValue(LoopDec
.getNode(), 0), Target
};
14055 return DAG
.getNode(ARMISD::LE
, dl
, MVT::Other
, EndArgs
);
14060 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
14062 ARMTargetLowering::PerformBRCONDCombine(SDNode
*N
, SelectionDAG
&DAG
) const {
14063 SDValue Cmp
= N
->getOperand(4);
14064 if (Cmp
.getOpcode() != ARMISD::CMPZ
)
14065 // Only looking at NE cases.
14068 EVT VT
= N
->getValueType(0);
14070 SDValue LHS
= Cmp
.getOperand(0);
14071 SDValue RHS
= Cmp
.getOperand(1);
14072 SDValue Chain
= N
->getOperand(0);
14073 SDValue BB
= N
->getOperand(1);
14074 SDValue ARMcc
= N
->getOperand(2);
14075 ARMCC::CondCodes CC
=
14076 (ARMCC::CondCodes
)cast
<ConstantSDNode
>(ARMcc
)->getZExtValue();
14078 // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
14079 // -> (brcond Chain BB CC CPSR Cmp)
14080 if (CC
== ARMCC::NE
&& LHS
.getOpcode() == ISD::AND
&& LHS
->hasOneUse() &&
14081 LHS
->getOperand(0)->getOpcode() == ARMISD::CMOV
&&
14082 LHS
->getOperand(0)->hasOneUse()) {
14083 auto *LHS00C
= dyn_cast
<ConstantSDNode
>(LHS
->getOperand(0)->getOperand(0));
14084 auto *LHS01C
= dyn_cast
<ConstantSDNode
>(LHS
->getOperand(0)->getOperand(1));
14085 auto *LHS1C
= dyn_cast
<ConstantSDNode
>(LHS
->getOperand(1));
14086 auto *RHSC
= dyn_cast
<ConstantSDNode
>(RHS
);
14087 if ((LHS00C
&& LHS00C
->getZExtValue() == 0) &&
14088 (LHS01C
&& LHS01C
->getZExtValue() == 1) &&
14089 (LHS1C
&& LHS1C
->getZExtValue() == 1) &&
14090 (RHSC
&& RHSC
->getZExtValue() == 0)) {
14091 return DAG
.getNode(
14092 ARMISD::BRCOND
, dl
, VT
, Chain
, BB
, LHS
->getOperand(0)->getOperand(2),
14093 LHS
->getOperand(0)->getOperand(3), LHS
->getOperand(0)->getOperand(4));
14100 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
14102 ARMTargetLowering::PerformCMOVCombine(SDNode
*N
, SelectionDAG
&DAG
) const {
14103 SDValue Cmp
= N
->getOperand(4);
14104 if (Cmp
.getOpcode() != ARMISD::CMPZ
)
14105 // Only looking at EQ and NE cases.
14108 EVT VT
= N
->getValueType(0);
14110 SDValue LHS
= Cmp
.getOperand(0);
14111 SDValue RHS
= Cmp
.getOperand(1);
14112 SDValue FalseVal
= N
->getOperand(0);
14113 SDValue TrueVal
= N
->getOperand(1);
14114 SDValue ARMcc
= N
->getOperand(2);
14115 ARMCC::CondCodes CC
=
14116 (ARMCC::CondCodes
)cast
<ConstantSDNode
>(ARMcc
)->getZExtValue();
14118 // BFI is only available on V6T2+.
14119 if (!Subtarget
->isThumb1Only() && Subtarget
->hasV6T2Ops()) {
14120 SDValue R
= PerformCMOVToBFICombine(N
, DAG
);
14141 /// FIXME: Turn this into a target neutral optimization?
14143 if (CC
== ARMCC::NE
&& FalseVal
== RHS
&& FalseVal
!= LHS
) {
14144 Res
= DAG
.getNode(ARMISD::CMOV
, dl
, VT
, LHS
, TrueVal
, ARMcc
,
14145 N
->getOperand(3), Cmp
);
14146 } else if (CC
== ARMCC::EQ
&& TrueVal
== RHS
) {
14148 SDValue NewCmp
= getARMCmp(LHS
, RHS
, ISD::SETNE
, ARMcc
, DAG
, dl
);
14149 Res
= DAG
.getNode(ARMISD::CMOV
, dl
, VT
, LHS
, FalseVal
, ARMcc
,
14150 N
->getOperand(3), NewCmp
);
14153 // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0))
14154 // -> (cmov F T CC CPSR Cmp)
14155 if (CC
== ARMCC::NE
&& LHS
.getOpcode() == ARMISD::CMOV
&& LHS
->hasOneUse()) {
14156 auto *LHS0C
= dyn_cast
<ConstantSDNode
>(LHS
->getOperand(0));
14157 auto *LHS1C
= dyn_cast
<ConstantSDNode
>(LHS
->getOperand(1));
14158 auto *RHSC
= dyn_cast
<ConstantSDNode
>(RHS
);
14159 if ((LHS0C
&& LHS0C
->getZExtValue() == 0) &&
14160 (LHS1C
&& LHS1C
->getZExtValue() == 1) &&
14161 (RHSC
&& RHSC
->getZExtValue() == 0)) {
14162 return DAG
.getNode(ARMISD::CMOV
, dl
, VT
, FalseVal
, TrueVal
,
14163 LHS
->getOperand(2), LHS
->getOperand(3),
14164 LHS
->getOperand(4));
14168 if (!VT
.isInteger())
14171 // Materialize a boolean comparison for integers so we can avoid branching.
14172 if (isNullConstant(FalseVal
)) {
14173 if (CC
== ARMCC::EQ
&& isOneConstant(TrueVal
)) {
14174 if (!Subtarget
->isThumb1Only() && Subtarget
->hasV5TOps()) {
14175 // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it
14176 // right 5 bits will make that 32 be 1, otherwise it will be 0.
14177 // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5
14178 SDValue Sub
= DAG
.getNode(ISD::SUB
, dl
, VT
, LHS
, RHS
);
14179 Res
= DAG
.getNode(ISD::SRL
, dl
, VT
, DAG
.getNode(ISD::CTLZ
, dl
, VT
, Sub
),
14180 DAG
.getConstant(5, dl
, MVT::i32
));
14182 // CMOV 0, 1, ==, (CMPZ x, y) ->
14183 // (ADDCARRY (SUB x, y), t:0, t:1)
14184 // where t = (SUBCARRY 0, (SUB x, y), 0)
14186 // The SUBCARRY computes 0 - (x - y) and this will give a borrow when
14187 // x != y. In other words, a carry C == 1 when x == y, C == 0
14189 // The final ADDCARRY computes
14190 // x - y + (0 - (x - y)) + C == C
14191 SDValue Sub
= DAG
.getNode(ISD::SUB
, dl
, VT
, LHS
, RHS
);
14192 SDVTList VTs
= DAG
.getVTList(VT
, MVT::i32
);
14193 SDValue Neg
= DAG
.getNode(ISD::USUBO
, dl
, VTs
, FalseVal
, Sub
);
14194 // ISD::SUBCARRY returns a borrow but we want the carry here
14197 DAG
.getNode(ISD::SUB
, dl
, MVT::i32
,
14198 DAG
.getConstant(1, dl
, MVT::i32
), Neg
.getValue(1));
14199 Res
= DAG
.getNode(ISD::ADDCARRY
, dl
, VTs
, Sub
, Neg
, Carry
);
14201 } else if (CC
== ARMCC::NE
&& !isNullConstant(RHS
) &&
14202 (!Subtarget
->isThumb1Only() || isPowerOf2Constant(TrueVal
))) {
14203 // This seems pointless but will allow us to combine it further below.
14204 // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
14206 DAG
.getNode(ARMISD::SUBS
, dl
, DAG
.getVTList(VT
, MVT::i32
), LHS
, RHS
);
14207 SDValue CPSRGlue
= DAG
.getCopyToReg(DAG
.getEntryNode(), dl
, ARM::CPSR
,
14208 Sub
.getValue(1), SDValue());
14209 Res
= DAG
.getNode(ARMISD::CMOV
, dl
, VT
, Sub
, TrueVal
, ARMcc
,
14210 N
->getOperand(3), CPSRGlue
.getValue(1));
14213 } else if (isNullConstant(TrueVal
)) {
14214 if (CC
== ARMCC::EQ
&& !isNullConstant(RHS
) &&
14215 (!Subtarget
->isThumb1Only() || isPowerOf2Constant(FalseVal
))) {
14216 // This seems pointless but will allow us to combine it further below
14217 // Note that we change == for != as this is the dual for the case above.
14218 // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
14220 DAG
.getNode(ARMISD::SUBS
, dl
, DAG
.getVTList(VT
, MVT::i32
), LHS
, RHS
);
14221 SDValue CPSRGlue
= DAG
.getCopyToReg(DAG
.getEntryNode(), dl
, ARM::CPSR
,
14222 Sub
.getValue(1), SDValue());
14223 Res
= DAG
.getNode(ARMISD::CMOV
, dl
, VT
, Sub
, FalseVal
,
14224 DAG
.getConstant(ARMCC::NE
, dl
, MVT::i32
),
14225 N
->getOperand(3), CPSRGlue
.getValue(1));
14230 // On Thumb1, the DAG above may be further combined if z is a power of 2
14232 // CMOV (SUBS x, y), z, !=, (SUBS x, y):1 ->
14233 // t1 = (USUBO (SUB x, y), 1)
14234 // t2 = (SUBCARRY (SUB x, y), t1:0, t1:1)
14235 // Result = if K != 0 then (SHL t2:0, K) else t2:0
14237 // This also handles the special case of comparing against zero; it's
14238 // essentially, the same pattern, except there's no SUBS:
14239 // CMOV x, z, !=, (CMPZ x, 0) ->
14240 // t1 = (USUBO x, 1)
14241 // t2 = (SUBCARRY x, t1:0, t1:1)
14242 // Result = if K != 0 then (SHL t2:0, K) else t2:0
14243 const APInt
*TrueConst
;
14244 if (Subtarget
->isThumb1Only() && CC
== ARMCC::NE
&&
14245 ((FalseVal
.getOpcode() == ARMISD::SUBS
&&
14246 FalseVal
.getOperand(0) == LHS
&& FalseVal
.getOperand(1) == RHS
) ||
14247 (FalseVal
== LHS
&& isNullConstant(RHS
))) &&
14248 (TrueConst
= isPowerOf2Constant(TrueVal
))) {
14249 SDVTList VTs
= DAG
.getVTList(VT
, MVT::i32
);
14250 unsigned ShiftAmount
= TrueConst
->logBase2();
14252 TrueVal
= DAG
.getConstant(1, dl
, VT
);
14253 SDValue Subc
= DAG
.getNode(ISD::USUBO
, dl
, VTs
, FalseVal
, TrueVal
);
14254 Res
= DAG
.getNode(ISD::SUBCARRY
, dl
, VTs
, FalseVal
, Subc
, Subc
.getValue(1));
14257 Res
= DAG
.getNode(ISD::SHL
, dl
, VT
, Res
,
14258 DAG
.getConstant(ShiftAmount
, dl
, MVT::i32
));
14261 if (Res
.getNode()) {
14262 KnownBits Known
= DAG
.computeKnownBits(SDValue(N
,0));
14263 // Capture demanded bits information that would be otherwise lost.
14264 if (Known
.Zero
== 0xfffffffe)
14265 Res
= DAG
.getNode(ISD::AssertZext
, dl
, MVT::i32
, Res
,
14266 DAG
.getValueType(MVT::i1
));
14267 else if (Known
.Zero
== 0xffffff00)
14268 Res
= DAG
.getNode(ISD::AssertZext
, dl
, MVT::i32
, Res
,
14269 DAG
.getValueType(MVT::i8
));
14270 else if (Known
.Zero
== 0xffff0000)
14271 Res
= DAG
.getNode(ISD::AssertZext
, dl
, MVT::i32
, Res
,
14272 DAG
.getValueType(MVT::i16
));
14278 SDValue
ARMTargetLowering::PerformDAGCombine(SDNode
*N
,
14279 DAGCombinerInfo
&DCI
) const {
14280 switch (N
->getOpcode()) {
14282 case ISD::ABS
: return PerformABSCombine(N
, DCI
, Subtarget
);
14283 case ARMISD::ADDE
: return PerformADDECombine(N
, DCI
, Subtarget
);
14284 case ARMISD::UMLAL
: return PerformUMLALCombine(N
, DCI
.DAG
, Subtarget
);
14285 case ISD::ADD
: return PerformADDCombine(N
, DCI
, Subtarget
);
14286 case ISD::SUB
: return PerformSUBCombine(N
, DCI
);
14287 case ISD::MUL
: return PerformMULCombine(N
, DCI
, Subtarget
);
14288 case ISD::OR
: return PerformORCombine(N
, DCI
, Subtarget
);
14289 case ISD::XOR
: return PerformXORCombine(N
, DCI
, Subtarget
);
14290 case ISD::AND
: return PerformANDCombine(N
, DCI
, Subtarget
);
14292 case ISD::BR_CC
: return PerformHWLoopCombine(N
, DCI
, Subtarget
);
14294 case ARMISD::SUBC
: return PerformAddcSubcCombine(N
, DCI
, Subtarget
);
14295 case ARMISD::SUBE
: return PerformAddeSubeCombine(N
, DCI
, Subtarget
);
14296 case ARMISD::BFI
: return PerformBFICombine(N
, DCI
);
14297 case ARMISD::VMOVRRD
: return PerformVMOVRRDCombine(N
, DCI
, Subtarget
);
14298 case ARMISD::VMOVDRR
: return PerformVMOVDRRCombine(N
, DCI
.DAG
);
14299 case ISD::STORE
: return PerformSTORECombine(N
, DCI
, Subtarget
);
14300 case ISD::BUILD_VECTOR
: return PerformBUILD_VECTORCombine(N
, DCI
, Subtarget
);
14301 case ISD::INSERT_VECTOR_ELT
: return PerformInsertEltCombine(N
, DCI
);
14302 case ISD::VECTOR_SHUFFLE
: return PerformVECTOR_SHUFFLECombine(N
, DCI
.DAG
);
14303 case ARMISD::VDUPLANE
: return PerformVDUPLANECombine(N
, DCI
);
14304 case ARMISD::VDUP
: return PerformVDUPCombine(N
, DCI
, Subtarget
);
14305 case ISD::FP_TO_SINT
:
14306 case ISD::FP_TO_UINT
:
14307 return PerformVCVTCombine(N
, DCI
.DAG
, Subtarget
);
14309 return PerformVDIVCombine(N
, DCI
.DAG
, Subtarget
);
14310 case ISD::INTRINSIC_WO_CHAIN
: return PerformIntrinsicCombine(N
, DCI
.DAG
);
14314 return PerformShiftCombine(N
, DCI
, Subtarget
);
14315 case ISD::SIGN_EXTEND
:
14316 case ISD::ZERO_EXTEND
:
14317 case ISD::ANY_EXTEND
: return PerformExtendCombine(N
, DCI
.DAG
, Subtarget
);
14318 case ARMISD::CMOV
: return PerformCMOVCombine(N
, DCI
.DAG
);
14319 case ARMISD::BRCOND
: return PerformBRCONDCombine(N
, DCI
.DAG
);
14320 case ISD::LOAD
: return PerformLOADCombine(N
, DCI
);
14321 case ARMISD::VLD1DUP
:
14322 case ARMISD::VLD2DUP
:
14323 case ARMISD::VLD3DUP
:
14324 case ARMISD::VLD4DUP
:
14325 return PerformVLDCombine(N
, DCI
);
14326 case ARMISD::BUILD_VECTOR
:
14327 return PerformARMBUILD_VECTORCombine(N
, DCI
);
14328 case ARMISD::PREDICATE_CAST
:
14329 return PerformPREDICATE_CASTCombine(N
, DCI
);
14330 case ARMISD::SMULWB
: {
14331 unsigned BitWidth
= N
->getValueType(0).getSizeInBits();
14332 APInt DemandedMask
= APInt::getLowBitsSet(BitWidth
, 16);
14333 if (SimplifyDemandedBits(N
->getOperand(1), DemandedMask
, DCI
))
14337 case ARMISD::SMULWT
: {
14338 unsigned BitWidth
= N
->getValueType(0).getSizeInBits();
14339 APInt DemandedMask
= APInt::getHighBitsSet(BitWidth
, 16);
14340 if (SimplifyDemandedBits(N
->getOperand(1), DemandedMask
, DCI
))
14344 case ARMISD::SMLALBB
: {
14345 unsigned BitWidth
= N
->getValueType(0).getSizeInBits();
14346 APInt DemandedMask
= APInt::getLowBitsSet(BitWidth
, 16);
14347 if ((SimplifyDemandedBits(N
->getOperand(0), DemandedMask
, DCI
)) ||
14348 (SimplifyDemandedBits(N
->getOperand(1), DemandedMask
, DCI
)))
14352 case ARMISD::SMLALBT
: {
14353 unsigned LowWidth
= N
->getOperand(0).getValueType().getSizeInBits();
14354 APInt LowMask
= APInt::getLowBitsSet(LowWidth
, 16);
14355 unsigned HighWidth
= N
->getOperand(1).getValueType().getSizeInBits();
14356 APInt HighMask
= APInt::getHighBitsSet(HighWidth
, 16);
14357 if ((SimplifyDemandedBits(N
->getOperand(0), LowMask
, DCI
)) ||
14358 (SimplifyDemandedBits(N
->getOperand(1), HighMask
, DCI
)))
14362 case ARMISD::SMLALTB
: {
14363 unsigned HighWidth
= N
->getOperand(0).getValueType().getSizeInBits();
14364 APInt HighMask
= APInt::getHighBitsSet(HighWidth
, 16);
14365 unsigned LowWidth
= N
->getOperand(1).getValueType().getSizeInBits();
14366 APInt LowMask
= APInt::getLowBitsSet(LowWidth
, 16);
14367 if ((SimplifyDemandedBits(N
->getOperand(0), HighMask
, DCI
)) ||
14368 (SimplifyDemandedBits(N
->getOperand(1), LowMask
, DCI
)))
14372 case ARMISD::SMLALTT
: {
14373 unsigned BitWidth
= N
->getValueType(0).getSizeInBits();
14374 APInt DemandedMask
= APInt::getHighBitsSet(BitWidth
, 16);
14375 if ((SimplifyDemandedBits(N
->getOperand(0), DemandedMask
, DCI
)) ||
14376 (SimplifyDemandedBits(N
->getOperand(1), DemandedMask
, DCI
)))
14380 case ISD::INTRINSIC_VOID
:
14381 case ISD::INTRINSIC_W_CHAIN
:
14382 switch (cast
<ConstantSDNode
>(N
->getOperand(1))->getZExtValue()) {
14383 case Intrinsic::arm_neon_vld1
:
14384 case Intrinsic::arm_neon_vld1x2
:
14385 case Intrinsic::arm_neon_vld1x3
:
14386 case Intrinsic::arm_neon_vld1x4
:
14387 case Intrinsic::arm_neon_vld2
:
14388 case Intrinsic::arm_neon_vld3
:
14389 case Intrinsic::arm_neon_vld4
:
14390 case Intrinsic::arm_neon_vld2lane
:
14391 case Intrinsic::arm_neon_vld3lane
:
14392 case Intrinsic::arm_neon_vld4lane
:
14393 case Intrinsic::arm_neon_vld2dup
:
14394 case Intrinsic::arm_neon_vld3dup
:
14395 case Intrinsic::arm_neon_vld4dup
:
14396 case Intrinsic::arm_neon_vst1
:
14397 case Intrinsic::arm_neon_vst1x2
:
14398 case Intrinsic::arm_neon_vst1x3
:
14399 case Intrinsic::arm_neon_vst1x4
:
14400 case Intrinsic::arm_neon_vst2
:
14401 case Intrinsic::arm_neon_vst3
:
14402 case Intrinsic::arm_neon_vst4
:
14403 case Intrinsic::arm_neon_vst2lane
:
14404 case Intrinsic::arm_neon_vst3lane
:
14405 case Intrinsic::arm_neon_vst4lane
:
14406 return PerformVLDCombine(N
, DCI
);
14414 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc
,
14416 return (VT
== MVT::f32
) && (Opc
== ISD::LOAD
|| Opc
== ISD::STORE
);
14419 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT
, unsigned,
14420 unsigned Alignment
,
14421 MachineMemOperand::Flags
,
14422 bool *Fast
) const {
14423 // Depends what it gets converted into if the type is weird.
14424 if (!VT
.isSimple())
14427 // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
14428 bool AllowsUnaligned
= Subtarget
->allowsUnalignedMem();
14429 auto Ty
= VT
.getSimpleVT().SimpleTy
;
14431 if (Ty
== MVT::i8
|| Ty
== MVT::i16
|| Ty
== MVT::i32
) {
14432 // Unaligned access can use (for example) LRDB, LRDH, LDR
14433 if (AllowsUnaligned
) {
14435 *Fast
= Subtarget
->hasV7Ops();
14440 if (Ty
== MVT::f64
|| Ty
== MVT::v2f64
) {
14441 // For any little-endian targets with neon, we can support unaligned ld/st
14442 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
14443 // A big-endian target may also explicitly support unaligned accesses
14444 if (Subtarget
->hasNEON() && (AllowsUnaligned
|| Subtarget
->isLittle())) {
14451 if (!Subtarget
->hasMVEIntegerOps())
14454 // These are for predicates
14455 if ((Ty
== MVT::v16i1
|| Ty
== MVT::v8i1
|| Ty
== MVT::v4i1
)) {
14461 // These are for truncated stores/narrowing loads. They are fine so long as
14462 // the alignment is at least the size of the item being loaded
14463 if ((Ty
== MVT::v4i8
|| Ty
== MVT::v8i8
|| Ty
== MVT::v4i16
) &&
14464 Alignment
>= VT
.getScalarSizeInBits() / 8) {
14470 // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and
14471 // VSTRW.U32 all store the vector register in exactly the same format, and
14472 // differ only in the range of their immediate offset field and the required
14473 // alignment. So there is always a store that can be used, regardless of
14476 // For big endian, that is not the case. But can still emit a (VSTRB.U8;
14477 // VREV64.8) pair and get the same effect. This will likely be better than
14478 // aligning the vector through the stack.
14479 if (Ty
== MVT::v16i8
|| Ty
== MVT::v8i16
|| Ty
== MVT::v8f16
||
14480 Ty
== MVT::v4i32
|| Ty
== MVT::v4f32
|| Ty
== MVT::v2i64
||
14481 Ty
== MVT::v2f64
) {
14490 static bool memOpAlign(unsigned DstAlign
, unsigned SrcAlign
,
14491 unsigned AlignCheck
) {
14492 return ((SrcAlign
== 0 || SrcAlign
% AlignCheck
== 0) &&
14493 (DstAlign
== 0 || DstAlign
% AlignCheck
== 0));
14496 EVT
ARMTargetLowering::getOptimalMemOpType(
14497 uint64_t Size
, unsigned DstAlign
, unsigned SrcAlign
, bool IsMemset
,
14498 bool ZeroMemset
, bool MemcpyStrSrc
,
14499 const AttributeList
&FuncAttributes
) const {
14500 // See if we can use NEON instructions for this...
14501 if ((!IsMemset
|| ZeroMemset
) && Subtarget
->hasNEON() &&
14502 !FuncAttributes
.hasFnAttribute(Attribute::NoImplicitFloat
)) {
14505 (memOpAlign(SrcAlign
, DstAlign
, 16) ||
14506 (allowsMisalignedMemoryAccesses(MVT::v2f64
, 0, 1,
14507 MachineMemOperand::MONone
, &Fast
) &&
14510 } else if (Size
>= 8 &&
14511 (memOpAlign(SrcAlign
, DstAlign
, 8) ||
14512 (allowsMisalignedMemoryAccesses(
14513 MVT::f64
, 0, 1, MachineMemOperand::MONone
, &Fast
) &&
14519 // Let the target-independent logic figure it out.
14523 // 64-bit integers are split into their high and low parts and held in two
14524 // different registers, so the trunc is free since the low register can just
14526 bool ARMTargetLowering::isTruncateFree(Type
*SrcTy
, Type
*DstTy
) const {
14527 if (!SrcTy
->isIntegerTy() || !DstTy
->isIntegerTy())
14529 unsigned SrcBits
= SrcTy
->getPrimitiveSizeInBits();
14530 unsigned DestBits
= DstTy
->getPrimitiveSizeInBits();
14531 return (SrcBits
== 64 && DestBits
== 32);
14534 bool ARMTargetLowering::isTruncateFree(EVT SrcVT
, EVT DstVT
) const {
14535 if (SrcVT
.isVector() || DstVT
.isVector() || !SrcVT
.isInteger() ||
14536 !DstVT
.isInteger())
14538 unsigned SrcBits
= SrcVT
.getSizeInBits();
14539 unsigned DestBits
= DstVT
.getSizeInBits();
14540 return (SrcBits
== 64 && DestBits
== 32);
14543 bool ARMTargetLowering::isZExtFree(SDValue Val
, EVT VT2
) const {
14544 if (Val
.getOpcode() != ISD::LOAD
)
14547 EVT VT1
= Val
.getValueType();
14548 if (!VT1
.isSimple() || !VT1
.isInteger() ||
14549 !VT2
.isSimple() || !VT2
.isInteger())
14552 switch (VT1
.getSimpleVT().SimpleTy
) {
14557 // 8-bit and 16-bit loads implicitly zero-extend to 32-bits.
14564 bool ARMTargetLowering::isFNegFree(EVT VT
) const {
14565 if (!VT
.isSimple())
14568 // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that
14569 // negate values directly (fneg is free). So, we don't want to let the DAG
14570 // combiner rewrite fneg into xors and some other instructions. For f16 and
14571 // FullFP16 argument passing, some bitcast nodes may be introduced,
14572 // triggering this DAG combine rewrite, so we are avoiding that with this.
14573 switch (VT
.getSimpleVT().SimpleTy
) {
14576 return Subtarget
->hasFullFP16();
14582 /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
14583 /// of the vector elements.
14584 static bool areExtractExts(Value
*Ext1
, Value
*Ext2
) {
14585 auto areExtDoubled
= [](Instruction
*Ext
) {
14586 return Ext
->getType()->getScalarSizeInBits() ==
14587 2 * Ext
->getOperand(0)->getType()->getScalarSizeInBits();
14590 if (!match(Ext1
, m_ZExtOrSExt(m_Value())) ||
14591 !match(Ext2
, m_ZExtOrSExt(m_Value())) ||
14592 !areExtDoubled(cast
<Instruction
>(Ext1
)) ||
14593 !areExtDoubled(cast
<Instruction
>(Ext2
)))
14599 /// Check if sinking \p I's operands to I's basic block is profitable, because
14600 /// the operands can be folded into a target instruction, e.g.
14601 /// sext/zext can be folded into vsubl.
14602 bool ARMTargetLowering::shouldSinkOperands(Instruction
*I
,
14603 SmallVectorImpl
<Use
*> &Ops
) const {
14604 if (!I
->getType()->isVectorTy())
14607 if (Subtarget
->hasNEON()) {
14608 switch (I
->getOpcode()) {
14609 case Instruction::Sub
:
14610 case Instruction::Add
: {
14611 if (!areExtractExts(I
->getOperand(0), I
->getOperand(1)))
14613 Ops
.push_back(&I
->getOperandUse(0));
14614 Ops
.push_back(&I
->getOperandUse(1));
14622 if (!Subtarget
->hasMVEIntegerOps())
14625 auto IsSinker
= [](Instruction
*I
, int Operand
) {
14626 switch (I
->getOpcode()) {
14627 case Instruction::Add
:
14628 case Instruction::Mul
:
14630 case Instruction::Sub
:
14631 return Operand
== 1;
14638 if (!isa
<ShuffleVectorInst
>(I
->getOperand(Op
)))
14640 if (!IsSinker(I
, Op
))
14642 if (!match(I
->getOperand(Op
),
14643 m_ShuffleVector(m_InsertElement(m_Undef(), m_Value(), m_ZeroInt()),
14644 m_Undef(), m_Zero()))) {
14647 Instruction
*Shuffle
= cast
<Instruction
>(I
->getOperand(Op
));
14648 // All uses of the shuffle should be sunk to avoid duplicating it across gpr
14649 // and vector registers
14650 for (Use
&U
: Shuffle
->uses()) {
14651 Instruction
*Insn
= cast
<Instruction
>(U
.getUser());
14652 if (!IsSinker(Insn
, U
.getOperandNo()))
14655 Ops
.push_back(&Shuffle
->getOperandUse(0));
14656 Ops
.push_back(&I
->getOperandUse(Op
));
14660 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal
) const {
14661 EVT VT
= ExtVal
.getValueType();
14663 if (!isTypeLegal(VT
))
14666 // Don't create a loadext if we can fold the extension into a wide/long
14668 // If there's more than one user instruction, the loadext is desirable no
14669 // matter what. There can be two uses by the same instruction.
14670 if (ExtVal
->use_empty() ||
14671 !ExtVal
->use_begin()->isOnlyUserOf(ExtVal
.getNode()))
14674 SDNode
*U
= *ExtVal
->use_begin();
14675 if ((U
->getOpcode() == ISD::ADD
|| U
->getOpcode() == ISD::SUB
||
14676 U
->getOpcode() == ISD::SHL
|| U
->getOpcode() == ARMISD::VSHLIMM
))
14682 bool ARMTargetLowering::allowTruncateForTailCall(Type
*Ty1
, Type
*Ty2
) const {
14683 if (!Ty1
->isIntegerTy() || !Ty2
->isIntegerTy())
14686 if (!isTypeLegal(EVT::getEVT(Ty1
)))
14689 assert(Ty1
->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
14691 // Assuming the caller doesn't have a zeroext or signext return parameter,
14692 // truncation all the way down to i1 is valid.
14696 int ARMTargetLowering::getScalingFactorCost(const DataLayout
&DL
,
14697 const AddrMode
&AM
, Type
*Ty
,
14698 unsigned AS
) const {
14699 if (isLegalAddressingMode(DL
, AM
, Ty
, AS
)) {
14700 if (Subtarget
->hasFPAO())
14701 return AM
.Scale
< 0 ? 1 : 0; // positive offsets execute faster
14707 static bool isLegalT1AddressImmediate(int64_t V
, EVT VT
) {
14711 unsigned Scale
= 1;
14712 switch (VT
.getSimpleVT().SimpleTy
) {
14722 // On thumb1 we load most things (i32, i64, floats, etc) with a LDR
14728 if ((V
& (Scale
- 1)) != 0)
14730 return isUInt
<5>(V
/ Scale
);
14733 static bool isLegalT2AddressImmediate(int64_t V
, EVT VT
,
14734 const ARMSubtarget
*Subtarget
) {
14735 if (!VT
.isInteger() && !VT
.isFloatingPoint())
14737 if (VT
.isVector() && Subtarget
->hasNEON())
14739 if (VT
.isVector() && VT
.isFloatingPoint() && Subtarget
->hasMVEIntegerOps() &&
14740 !Subtarget
->hasMVEFloatOps())
14743 bool IsNeg
= false;
14749 unsigned NumBytes
= std::max(VT
.getSizeInBits() / 8, 1U);
14751 // MVE: size * imm7
14752 if (VT
.isVector() && Subtarget
->hasMVEIntegerOps()) {
14753 switch (VT
.getSimpleVT().getVectorElementType().SimpleTy
) {
14756 return isShiftedUInt
<7,2>(V
);
14759 return isShiftedUInt
<7,1>(V
);
14761 return isUInt
<7>(V
);
14767 // half VLDR: 2 * imm8
14768 if (VT
.isFloatingPoint() && NumBytes
== 2 && Subtarget
->hasFPRegs16())
14769 return isShiftedUInt
<8, 1>(V
);
14770 // VLDR and LDRD: 4 * imm8
14771 if ((VT
.isFloatingPoint() && Subtarget
->hasVFP2Base()) || NumBytes
== 8)
14772 return isShiftedUInt
<8, 2>(V
);
14774 if (NumBytes
== 1 || NumBytes
== 2 || NumBytes
== 4) {
14775 // + imm12 or - imm8
14777 return isUInt
<8>(V
);
14778 return isUInt
<12>(V
);
14784 /// isLegalAddressImmediate - Return true if the integer value can be used
14785 /// as the offset of the target addressing mode for load / store of the
14787 static bool isLegalAddressImmediate(int64_t V
, EVT VT
,
14788 const ARMSubtarget
*Subtarget
) {
14792 if (!VT
.isSimple())
14795 if (Subtarget
->isThumb1Only())
14796 return isLegalT1AddressImmediate(V
, VT
);
14797 else if (Subtarget
->isThumb2())
14798 return isLegalT2AddressImmediate(V
, VT
, Subtarget
);
14803 switch (VT
.getSimpleVT().SimpleTy
) {
14804 default: return false;
14809 return isUInt
<12>(V
);
14812 return isUInt
<8>(V
);
14815 if (!Subtarget
->hasVFP2Base()) // FIXME: NEON?
14817 return isShiftedUInt
<8, 2>(V
);
14821 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode
&AM
,
14823 int Scale
= AM
.Scale
;
14827 switch (VT
.getSimpleVT().SimpleTy
) {
14828 default: return false;
14836 Scale
= Scale
& ~1;
14837 return Scale
== 2 || Scale
== 4 || Scale
== 8;
14839 // FIXME: What are we trying to model here? ldrd doesn't have an r + r
14840 // version in Thumb mode.
14844 // r * 2 (this can be lowered to r + r).
14845 if (!AM
.HasBaseReg
&& Scale
== 2)
14849 // Note, we allow "void" uses (basically, uses that aren't loads or
14850 // stores), because arm allows folding a scale into many arithmetic
14851 // operations. This should be made more precise and revisited later.
14853 // Allow r << imm, but the imm has to be a multiple of two.
14854 if (Scale
& 1) return false;
14855 return isPowerOf2_32(Scale
);
14859 bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode
&AM
,
14861 const int Scale
= AM
.Scale
;
14863 // Negative scales are not supported in Thumb1.
14867 // Thumb1 addressing modes do not support register scaling excepting the
14868 // following cases:
14869 // 1. Scale == 1 means no scaling.
14870 // 2. Scale == 2 this can be lowered to r + r if there is no base register.
14871 return (Scale
== 1) || (!AM
.HasBaseReg
&& Scale
== 2);
14874 /// isLegalAddressingMode - Return true if the addressing mode represented
14875 /// by AM is legal for this target, for a load/store of the specified type.
14876 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout
&DL
,
14877 const AddrMode
&AM
, Type
*Ty
,
14878 unsigned AS
, Instruction
*I
) const {
14879 EVT VT
= getValueType(DL
, Ty
, true);
14880 if (!isLegalAddressImmediate(AM
.BaseOffs
, VT
, Subtarget
))
14883 // Can never fold addr of global into load/store.
14887 switch (AM
.Scale
) {
14888 case 0: // no scale reg, must be "r+i" or "r", or "i".
14891 // ARM doesn't support any R+R*scale+imm addr modes.
14895 if (!VT
.isSimple())
14898 if (Subtarget
->isThumb1Only())
14899 return isLegalT1ScaledAddressingMode(AM
, VT
);
14901 if (Subtarget
->isThumb2())
14902 return isLegalT2ScaledAddressingMode(AM
, VT
);
14904 int Scale
= AM
.Scale
;
14905 switch (VT
.getSimpleVT().SimpleTy
) {
14906 default: return false;
14910 if (Scale
< 0) Scale
= -Scale
;
14914 return isPowerOf2_32(Scale
& ~1);
14918 if (Scale
== 1 || (AM
.HasBaseReg
&& Scale
== -1))
14920 // r * 2 (this can be lowered to r + r).
14921 if (!AM
.HasBaseReg
&& Scale
== 2)
14926 // Note, we allow "void" uses (basically, uses that aren't loads or
14927 // stores), because arm allows folding a scale into many arithmetic
14928 // operations. This should be made more precise and revisited later.
14930 // Allow r << imm, but the imm has to be a multiple of two.
14931 if (Scale
& 1) return false;
14932 return isPowerOf2_32(Scale
);
14938 /// isLegalICmpImmediate - Return true if the specified immediate is legal
14939 /// icmp immediate, that is the target has icmp instructions which can compare
14940 /// a register against the immediate without having to materialize the
14941 /// immediate into a register.
14942 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm
) const {
14943 // Thumb2 and ARM modes can use cmn for negative immediates.
14944 if (!Subtarget
->isThumb())
14945 return ARM_AM::getSOImmVal((uint32_t)Imm
) != -1 ||
14946 ARM_AM::getSOImmVal(-(uint32_t)Imm
) != -1;
14947 if (Subtarget
->isThumb2())
14948 return ARM_AM::getT2SOImmVal((uint32_t)Imm
) != -1 ||
14949 ARM_AM::getT2SOImmVal(-(uint32_t)Imm
) != -1;
14950 // Thumb1 doesn't have cmn, and only 8-bit immediates.
14951 return Imm
>= 0 && Imm
<= 255;
14954 /// isLegalAddImmediate - Return true if the specified immediate is a legal add
14955 /// *or sub* immediate, that is the target has add or sub instructions which can
14956 /// add a register with the immediate without having to materialize the
14957 /// immediate into a register.
14958 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm
) const {
14959 // Same encoding for add/sub, just flip the sign.
14960 int64_t AbsImm
= std::abs(Imm
);
14961 if (!Subtarget
->isThumb())
14962 return ARM_AM::getSOImmVal(AbsImm
) != -1;
14963 if (Subtarget
->isThumb2())
14964 return ARM_AM::getT2SOImmVal(AbsImm
) != -1;
14965 // Thumb1 only has 8-bit unsigned immediate.
14966 return AbsImm
>= 0 && AbsImm
<= 255;
14969 static bool getARMIndexedAddressParts(SDNode
*Ptr
, EVT VT
,
14970 bool isSEXTLoad
, SDValue
&Base
,
14971 SDValue
&Offset
, bool &isInc
,
14972 SelectionDAG
&DAG
) {
14973 if (Ptr
->getOpcode() != ISD::ADD
&& Ptr
->getOpcode() != ISD::SUB
)
14976 if (VT
== MVT::i16
|| ((VT
== MVT::i8
|| VT
== MVT::i1
) && isSEXTLoad
)) {
14977 // AddressingMode 3
14978 Base
= Ptr
->getOperand(0);
14979 if (ConstantSDNode
*RHS
= dyn_cast
<ConstantSDNode
>(Ptr
->getOperand(1))) {
14980 int RHSC
= (int)RHS
->getZExtValue();
14981 if (RHSC
< 0 && RHSC
> -256) {
14982 assert(Ptr
->getOpcode() == ISD::ADD
);
14984 Offset
= DAG
.getConstant(-RHSC
, SDLoc(Ptr
), RHS
->getValueType(0));
14988 isInc
= (Ptr
->getOpcode() == ISD::ADD
);
14989 Offset
= Ptr
->getOperand(1);
14991 } else if (VT
== MVT::i32
|| VT
== MVT::i8
|| VT
== MVT::i1
) {
14992 // AddressingMode 2
14993 if (ConstantSDNode
*RHS
= dyn_cast
<ConstantSDNode
>(Ptr
->getOperand(1))) {
14994 int RHSC
= (int)RHS
->getZExtValue();
14995 if (RHSC
< 0 && RHSC
> -0x1000) {
14996 assert(Ptr
->getOpcode() == ISD::ADD
);
14998 Offset
= DAG
.getConstant(-RHSC
, SDLoc(Ptr
), RHS
->getValueType(0));
14999 Base
= Ptr
->getOperand(0);
15004 if (Ptr
->getOpcode() == ISD::ADD
) {
15006 ARM_AM::ShiftOpc ShOpcVal
=
15007 ARM_AM::getShiftOpcForNode(Ptr
->getOperand(0).getOpcode());
15008 if (ShOpcVal
!= ARM_AM::no_shift
) {
15009 Base
= Ptr
->getOperand(1);
15010 Offset
= Ptr
->getOperand(0);
15012 Base
= Ptr
->getOperand(0);
15013 Offset
= Ptr
->getOperand(1);
15018 isInc
= (Ptr
->getOpcode() == ISD::ADD
);
15019 Base
= Ptr
->getOperand(0);
15020 Offset
= Ptr
->getOperand(1);
15024 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
15028 static bool getT2IndexedAddressParts(SDNode
*Ptr
, EVT VT
,
15029 bool isSEXTLoad
, SDValue
&Base
,
15030 SDValue
&Offset
, bool &isInc
,
15031 SelectionDAG
&DAG
) {
15032 if (Ptr
->getOpcode() != ISD::ADD
&& Ptr
->getOpcode() != ISD::SUB
)
15035 Base
= Ptr
->getOperand(0);
15036 if (ConstantSDNode
*RHS
= dyn_cast
<ConstantSDNode
>(Ptr
->getOperand(1))) {
15037 int RHSC
= (int)RHS
->getZExtValue();
15038 if (RHSC
< 0 && RHSC
> -0x100) { // 8 bits.
15039 assert(Ptr
->getOpcode() == ISD::ADD
);
15041 Offset
= DAG
.getConstant(-RHSC
, SDLoc(Ptr
), RHS
->getValueType(0));
15043 } else if (RHSC
> 0 && RHSC
< 0x100) { // 8 bit, no zero.
15044 isInc
= Ptr
->getOpcode() == ISD::ADD
;
15045 Offset
= DAG
.getConstant(RHSC
, SDLoc(Ptr
), RHS
->getValueType(0));
15053 static bool getMVEIndexedAddressParts(SDNode
*Ptr
, EVT VT
, unsigned Align
,
15054 bool isSEXTLoad
, bool isLE
, SDValue
&Base
,
15055 SDValue
&Offset
, bool &isInc
,
15056 SelectionDAG
&DAG
) {
15057 if (Ptr
->getOpcode() != ISD::ADD
&& Ptr
->getOpcode() != ISD::SUB
)
15059 if (!isa
<ConstantSDNode
>(Ptr
->getOperand(1)))
15062 ConstantSDNode
*RHS
= cast
<ConstantSDNode
>(Ptr
->getOperand(1));
15063 int RHSC
= (int)RHS
->getZExtValue();
15065 auto IsInRange
= [&](int RHSC
, int Limit
, int Scale
) {
15066 if (RHSC
< 0 && RHSC
> -Limit
* Scale
&& RHSC
% Scale
== 0) {
15067 assert(Ptr
->getOpcode() == ISD::ADD
);
15069 Offset
= DAG
.getConstant(-RHSC
, SDLoc(Ptr
), RHS
->getValueType(0));
15071 } else if (RHSC
> 0 && RHSC
< Limit
* Scale
&& RHSC
% Scale
== 0) {
15072 isInc
= Ptr
->getOpcode() == ISD::ADD
;
15073 Offset
= DAG
.getConstant(RHSC
, SDLoc(Ptr
), RHS
->getValueType(0));
15079 // Try to find a matching instruction based on s/zext, Alignment, Offset and
15081 Base
= Ptr
->getOperand(0);
15082 if (VT
== MVT::v4i16
) {
15083 if (Align
>= 2 && IsInRange(RHSC
, 0x80, 2))
15085 } else if (VT
== MVT::v4i8
|| VT
== MVT::v8i8
) {
15086 if (IsInRange(RHSC
, 0x80, 1))
15088 } else if (Align
>= 4 && (isLE
|| VT
== MVT::v4i32
|| VT
== MVT::v4f32
) &&
15089 IsInRange(RHSC
, 0x80, 4))
15091 else if (Align
>= 2 && (isLE
|| VT
== MVT::v8i16
|| VT
== MVT::v8f16
) &&
15092 IsInRange(RHSC
, 0x80, 2))
15094 else if ((isLE
|| VT
== MVT::v16i8
) && IsInRange(RHSC
, 0x80, 1))
15099 /// getPreIndexedAddressParts - returns true by value, base pointer and
15100 /// offset pointer and addressing mode by reference if the node's address
15101 /// can be legally represented as pre-indexed load / store address.
15103 ARMTargetLowering::getPreIndexedAddressParts(SDNode
*N
, SDValue
&Base
,
15105 ISD::MemIndexedMode
&AM
,
15106 SelectionDAG
&DAG
) const {
15107 if (Subtarget
->isThumb1Only())
15113 bool isSEXTLoad
= false;
15114 if (LoadSDNode
*LD
= dyn_cast
<LoadSDNode
>(N
)) {
15115 Ptr
= LD
->getBasePtr();
15116 VT
= LD
->getMemoryVT();
15117 Align
= LD
->getAlignment();
15118 isSEXTLoad
= LD
->getExtensionType() == ISD::SEXTLOAD
;
15119 } else if (StoreSDNode
*ST
= dyn_cast
<StoreSDNode
>(N
)) {
15120 Ptr
= ST
->getBasePtr();
15121 VT
= ST
->getMemoryVT();
15122 Align
= ST
->getAlignment();
15127 bool isLegal
= false;
15129 isLegal
= Subtarget
->hasMVEIntegerOps() &&
15130 getMVEIndexedAddressParts(Ptr
.getNode(), VT
, Align
, isSEXTLoad
,
15131 Subtarget
->isLittle(), Base
, Offset
,
15134 if (Subtarget
->isThumb2())
15135 isLegal
= getT2IndexedAddressParts(Ptr
.getNode(), VT
, isSEXTLoad
, Base
,
15136 Offset
, isInc
, DAG
);
15138 isLegal
= getARMIndexedAddressParts(Ptr
.getNode(), VT
, isSEXTLoad
, Base
,
15139 Offset
, isInc
, DAG
);
15144 AM
= isInc
? ISD::PRE_INC
: ISD::PRE_DEC
;
15148 /// getPostIndexedAddressParts - returns true by value, base pointer and
15149 /// offset pointer and addressing mode by reference if this node can be
15150 /// combined with a load / store to form a post-indexed load / store.
15151 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode
*N
, SDNode
*Op
,
15154 ISD::MemIndexedMode
&AM
,
15155 SelectionDAG
&DAG
) const {
15159 bool isSEXTLoad
= false, isNonExt
;
15160 if (LoadSDNode
*LD
= dyn_cast
<LoadSDNode
>(N
)) {
15161 VT
= LD
->getMemoryVT();
15162 Ptr
= LD
->getBasePtr();
15163 Align
= LD
->getAlignment();
15164 isSEXTLoad
= LD
->getExtensionType() == ISD::SEXTLOAD
;
15165 isNonExt
= LD
->getExtensionType() == ISD::NON_EXTLOAD
;
15166 } else if (StoreSDNode
*ST
= dyn_cast
<StoreSDNode
>(N
)) {
15167 VT
= ST
->getMemoryVT();
15168 Ptr
= ST
->getBasePtr();
15169 Align
= ST
->getAlignment();
15170 isNonExt
= !ST
->isTruncatingStore();
15174 if (Subtarget
->isThumb1Only()) {
15175 // Thumb-1 can do a limited post-inc load or store as an updating LDM. It
15176 // must be non-extending/truncating, i32, with an offset of 4.
15177 assert(Op
->getValueType(0) == MVT::i32
&& "Non-i32 post-inc op?!");
15178 if (Op
->getOpcode() != ISD::ADD
|| !isNonExt
)
15180 auto *RHS
= dyn_cast
<ConstantSDNode
>(Op
->getOperand(1));
15181 if (!RHS
|| RHS
->getZExtValue() != 4)
15184 Offset
= Op
->getOperand(1);
15185 Base
= Op
->getOperand(0);
15186 AM
= ISD::POST_INC
;
15191 bool isLegal
= false;
15193 isLegal
= Subtarget
->hasMVEIntegerOps() &&
15194 getMVEIndexedAddressParts(Op
, VT
, Align
, isSEXTLoad
,
15195 Subtarget
->isLittle(), Base
, Offset
,
15198 if (Subtarget
->isThumb2())
15199 isLegal
= getT2IndexedAddressParts(Op
, VT
, isSEXTLoad
, Base
, Offset
,
15202 isLegal
= getARMIndexedAddressParts(Op
, VT
, isSEXTLoad
, Base
, Offset
,
15209 // Swap base ptr and offset to catch more post-index load / store when
15210 // it's legal. In Thumb2 mode, offset must be an immediate.
15211 if (Ptr
== Offset
&& Op
->getOpcode() == ISD::ADD
&&
15212 !Subtarget
->isThumb2())
15213 std::swap(Base
, Offset
);
15215 // Post-indexed load / store update the base pointer.
15220 AM
= isInc
? ISD::POST_INC
: ISD::POST_DEC
;
15224 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op
,
15226 const APInt
&DemandedElts
,
15227 const SelectionDAG
&DAG
,
15228 unsigned Depth
) const {
15229 unsigned BitWidth
= Known
.getBitWidth();
15231 switch (Op
.getOpcode()) {
15237 // Special cases when we convert a carry to a boolean.
15238 if (Op
.getResNo() == 0) {
15239 SDValue LHS
= Op
.getOperand(0);
15240 SDValue RHS
= Op
.getOperand(1);
15241 // (ADDE 0, 0, C) will give us a single bit.
15242 if (Op
->getOpcode() == ARMISD::ADDE
&& isNullConstant(LHS
) &&
15243 isNullConstant(RHS
)) {
15244 Known
.Zero
|= APInt::getHighBitsSet(BitWidth
, BitWidth
- 1);
15249 case ARMISD::CMOV
: {
15250 // Bits are known zero/one if known on the LHS and RHS.
15251 Known
= DAG
.computeKnownBits(Op
.getOperand(0), Depth
+1);
15252 if (Known
.isUnknown())
15255 KnownBits KnownRHS
= DAG
.computeKnownBits(Op
.getOperand(1), Depth
+1);
15256 Known
.Zero
&= KnownRHS
.Zero
;
15257 Known
.One
&= KnownRHS
.One
;
15260 case ISD::INTRINSIC_W_CHAIN
: {
15261 ConstantSDNode
*CN
= cast
<ConstantSDNode
>(Op
->getOperand(1));
15262 Intrinsic::ID IntID
= static_cast<Intrinsic::ID
>(CN
->getZExtValue());
15265 case Intrinsic::arm_ldaex
:
15266 case Intrinsic::arm_ldrex
: {
15267 EVT VT
= cast
<MemIntrinsicSDNode
>(Op
)->getMemoryVT();
15268 unsigned MemBits
= VT
.getScalarSizeInBits();
15269 Known
.Zero
|= APInt::getHighBitsSet(BitWidth
, BitWidth
- MemBits
);
15274 case ARMISD::BFI
: {
15275 // Conservatively, we can recurse down the first operand
15276 // and just mask out all affected bits.
15277 Known
= DAG
.computeKnownBits(Op
.getOperand(0), Depth
+ 1);
15279 // The operand to BFI is already a mask suitable for removing the bits it
15281 ConstantSDNode
*CI
= cast
<ConstantSDNode
>(Op
.getOperand(2));
15282 const APInt
&Mask
= CI
->getAPIntValue();
15283 Known
.Zero
&= Mask
;
15287 case ARMISD::VGETLANEs
:
15288 case ARMISD::VGETLANEu
: {
15289 const SDValue
&SrcSV
= Op
.getOperand(0);
15290 EVT VecVT
= SrcSV
.getValueType();
15291 assert(VecVT
.isVector() && "VGETLANE expected a vector type");
15292 const unsigned NumSrcElts
= VecVT
.getVectorNumElements();
15293 ConstantSDNode
*Pos
= cast
<ConstantSDNode
>(Op
.getOperand(1).getNode());
15294 assert(Pos
->getAPIntValue().ult(NumSrcElts
) &&
15295 "VGETLANE index out of bounds");
15296 unsigned Idx
= Pos
->getZExtValue();
15297 APInt DemandedElt
= APInt::getOneBitSet(NumSrcElts
, Idx
);
15298 Known
= DAG
.computeKnownBits(SrcSV
, DemandedElt
, Depth
+ 1);
15300 EVT VT
= Op
.getValueType();
15301 const unsigned DstSz
= VT
.getScalarSizeInBits();
15302 const unsigned SrcSz
= VecVT
.getVectorElementType().getSizeInBits();
15304 assert(SrcSz
== Known
.getBitWidth());
15305 assert(DstSz
> SrcSz
);
15306 if (Op
.getOpcode() == ARMISD::VGETLANEs
)
15307 Known
= Known
.sext(DstSz
);
15309 Known
= Known
.zext(DstSz
, true /* extended bits are known zero */);
15311 assert(DstSz
== Known
.getBitWidth());
15318 ARMTargetLowering::targetShrinkDemandedConstant(SDValue Op
,
15319 const APInt
&DemandedAPInt
,
15320 TargetLoweringOpt
&TLO
) const {
15321 // Delay optimization, so we don't have to deal with illegal types, or block
15326 // Only optimize AND for now.
15327 if (Op
.getOpcode() != ISD::AND
)
15330 EVT VT
= Op
.getValueType();
15336 assert(VT
== MVT::i32
&& "Unexpected integer type");
15338 // Make sure the RHS really is a constant.
15339 ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Op
.getOperand(1));
15343 unsigned Mask
= C
->getZExtValue();
15345 unsigned Demanded
= DemandedAPInt
.getZExtValue();
15346 unsigned ShrunkMask
= Mask
& Demanded
;
15347 unsigned ExpandedMask
= Mask
| ~Demanded
;
15349 // If the mask is all zeros, let the target-independent code replace the
15350 // result with zero.
15351 if (ShrunkMask
== 0)
15354 // If the mask is all ones, erase the AND. (Currently, the target-independent
15355 // code won't do this, so we have to do it explicitly to avoid an infinite
15356 // loop in obscure cases.)
15357 if (ExpandedMask
== ~0U)
15358 return TLO
.CombineTo(Op
, Op
.getOperand(0));
15360 auto IsLegalMask
= [ShrunkMask
, ExpandedMask
](unsigned Mask
) -> bool {
15361 return (ShrunkMask
& Mask
) == ShrunkMask
&& (~ExpandedMask
& Mask
) == 0;
15363 auto UseMask
= [Mask
, Op
, VT
, &TLO
](unsigned NewMask
) -> bool {
15364 if (NewMask
== Mask
)
15367 SDValue NewC
= TLO
.DAG
.getConstant(NewMask
, DL
, VT
);
15368 SDValue NewOp
= TLO
.DAG
.getNode(ISD::AND
, DL
, VT
, Op
.getOperand(0), NewC
);
15369 return TLO
.CombineTo(Op
, NewOp
);
15372 // Prefer uxtb mask.
15373 if (IsLegalMask(0xFF))
15374 return UseMask(0xFF);
15376 // Prefer uxth mask.
15377 if (IsLegalMask(0xFFFF))
15378 return UseMask(0xFFFF);
15380 // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2.
15381 // FIXME: Prefer a contiguous sequence of bits for other optimizations.
15382 if (ShrunkMask
< 256)
15383 return UseMask(ShrunkMask
);
15385 // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2.
15386 // FIXME: Prefer a contiguous sequence of bits for other optimizations.
15387 if ((int)ExpandedMask
<= -2 && (int)ExpandedMask
>= -256)
15388 return UseMask(ExpandedMask
);
15390 // Potential improvements:
15392 // We could try to recognize lsls+lsrs or lsrs+lsls pairs here.
15393 // We could try to prefer Thumb1 immediates which can be lowered to a
15394 // two-instruction sequence.
15395 // We could try to recognize more legal ARM/Thumb2 immediates here.
15401 //===----------------------------------------------------------------------===//
15402 // ARM Inline Assembly Support
15403 //===----------------------------------------------------------------------===//
15405 bool ARMTargetLowering::ExpandInlineAsm(CallInst
*CI
) const {
15406 // Looking for "rev" which is V6+.
15407 if (!Subtarget
->hasV6Ops())
15410 InlineAsm
*IA
= cast
<InlineAsm
>(CI
->getCalledValue());
15411 std::string AsmStr
= IA
->getAsmString();
15412 SmallVector
<StringRef
, 4> AsmPieces
;
15413 SplitString(AsmStr
, AsmPieces
, ";\n");
15415 switch (AsmPieces
.size()) {
15416 default: return false;
15418 AsmStr
= AsmPieces
[0];
15420 SplitString(AsmStr
, AsmPieces
, " \t,");
15423 if (AsmPieces
.size() == 3 &&
15424 AsmPieces
[0] == "rev" && AsmPieces
[1] == "$0" && AsmPieces
[2] == "$1" &&
15425 IA
->getConstraintString().compare(0, 4, "=l,l") == 0) {
15426 IntegerType
*Ty
= dyn_cast
<IntegerType
>(CI
->getType());
15427 if (Ty
&& Ty
->getBitWidth() == 32)
15428 return IntrinsicLowering::LowerToByteSwap(CI
);
15436 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT
) const {
15437 // At this point, we have to lower this constraint to something else, so we
15438 // lower it to an "r" or "w". However, by doing this we will force the result
15439 // to be in register, while the X constraint is much more permissive.
15441 // Although we are correct (we are free to emit anything, without
15442 // constraints), we might break use cases that would expect us to be more
15443 // efficient and emit something else.
15444 if (!Subtarget
->hasVFP2Base())
15446 if (ConstraintVT
.isFloatingPoint())
15448 if (ConstraintVT
.isVector() && Subtarget
->hasNEON() &&
15449 (ConstraintVT
.getSizeInBits() == 64 ||
15450 ConstraintVT
.getSizeInBits() == 128))
15456 /// getConstraintType - Given a constraint letter, return the type of
15457 /// constraint it is for this target.
15458 ARMTargetLowering::ConstraintType
15459 ARMTargetLowering::getConstraintType(StringRef Constraint
) const {
15460 unsigned S
= Constraint
.size();
15462 switch (Constraint
[0]) {
15464 case 'l': return C_RegisterClass
;
15465 case 'w': return C_RegisterClass
;
15466 case 'h': return C_RegisterClass
;
15467 case 'x': return C_RegisterClass
;
15468 case 't': return C_RegisterClass
;
15469 case 'j': return C_Immediate
; // Constant for movw.
15470 // An address with a single base register. Due to the way we
15471 // currently handle addresses it is the same as an 'r' memory constraint.
15472 case 'Q': return C_Memory
;
15474 } else if (S
== 2) {
15475 switch (Constraint
[0]) {
15477 case 'T': return C_RegisterClass
;
15478 // All 'U+' constraints are addresses.
15479 case 'U': return C_Memory
;
15482 return TargetLowering::getConstraintType(Constraint
);
15485 /// Examine constraint type and operand type and determine a weight value.
15486 /// This object must already have been set up with the operand type
15487 /// and the current alternative constraint selected.
15488 TargetLowering::ConstraintWeight
15489 ARMTargetLowering::getSingleConstraintMatchWeight(
15490 AsmOperandInfo
&info
, const char *constraint
) const {
15491 ConstraintWeight weight
= CW_Invalid
;
15492 Value
*CallOperandVal
= info
.CallOperandVal
;
15493 // If we don't have a value, we can't do a match,
15494 // but allow it at the lowest weight.
15495 if (!CallOperandVal
)
15497 Type
*type
= CallOperandVal
->getType();
15498 // Look at the constraint type.
15499 switch (*constraint
) {
15501 weight
= TargetLowering::getSingleConstraintMatchWeight(info
, constraint
);
15504 if (type
->isIntegerTy()) {
15505 if (Subtarget
->isThumb())
15506 weight
= CW_SpecificReg
;
15508 weight
= CW_Register
;
15512 if (type
->isFloatingPointTy())
15513 weight
= CW_Register
;
15519 using RCPair
= std::pair
<unsigned, const TargetRegisterClass
*>;
15521 RCPair
ARMTargetLowering::getRegForInlineAsmConstraint(
15522 const TargetRegisterInfo
*TRI
, StringRef Constraint
, MVT VT
) const {
15523 switch (Constraint
.size()) {
15525 // GCC ARM Constraint Letters
15526 switch (Constraint
[0]) {
15527 case 'l': // Low regs or general regs.
15528 if (Subtarget
->isThumb())
15529 return RCPair(0U, &ARM::tGPRRegClass
);
15530 return RCPair(0U, &ARM::GPRRegClass
);
15531 case 'h': // High regs or no regs.
15532 if (Subtarget
->isThumb())
15533 return RCPair(0U, &ARM::hGPRRegClass
);
15536 if (Subtarget
->isThumb1Only())
15537 return RCPair(0U, &ARM::tGPRRegClass
);
15538 return RCPair(0U, &ARM::GPRRegClass
);
15540 if (VT
== MVT::Other
)
15542 if (VT
== MVT::f32
)
15543 return RCPair(0U, &ARM::SPRRegClass
);
15544 if (VT
.getSizeInBits() == 64)
15545 return RCPair(0U, &ARM::DPRRegClass
);
15546 if (VT
.getSizeInBits() == 128)
15547 return RCPair(0U, &ARM::QPRRegClass
);
15550 if (VT
== MVT::Other
)
15552 if (VT
== MVT::f32
)
15553 return RCPair(0U, &ARM::SPR_8RegClass
);
15554 if (VT
.getSizeInBits() == 64)
15555 return RCPair(0U, &ARM::DPR_8RegClass
);
15556 if (VT
.getSizeInBits() == 128)
15557 return RCPair(0U, &ARM::QPR_8RegClass
);
15560 if (VT
== MVT::Other
)
15562 if (VT
== MVT::f32
|| VT
== MVT::i32
)
15563 return RCPair(0U, &ARM::SPRRegClass
);
15564 if (VT
.getSizeInBits() == 64)
15565 return RCPair(0U, &ARM::DPR_VFP2RegClass
);
15566 if (VT
.getSizeInBits() == 128)
15567 return RCPair(0U, &ARM::QPR_VFP2RegClass
);
15573 if (Constraint
[0] == 'T') {
15574 switch (Constraint
[1]) {
15578 return RCPair(0U, &ARM::tGPREvenRegClass
);
15580 return RCPair(0U, &ARM::tGPROddRegClass
);
15589 if (StringRef("{cc}").equals_lower(Constraint
))
15590 return std::make_pair(unsigned(ARM::CPSR
), &ARM::CCRRegClass
);
15592 return TargetLowering::getRegForInlineAsmConstraint(TRI
, Constraint
, VT
);
15595 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15596 /// vector. If it is invalid, don't add anything to Ops.
15597 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op
,
15598 std::string
&Constraint
,
15599 std::vector
<SDValue
>&Ops
,
15600 SelectionDAG
&DAG
) const {
15603 // Currently only support length 1 constraints.
15604 if (Constraint
.length() != 1) return;
15606 char ConstraintLetter
= Constraint
[0];
15607 switch (ConstraintLetter
) {
15610 case 'I': case 'J': case 'K': case 'L':
15611 case 'M': case 'N': case 'O':
15612 ConstantSDNode
*C
= dyn_cast
<ConstantSDNode
>(Op
);
15616 int64_t CVal64
= C
->getSExtValue();
15617 int CVal
= (int) CVal64
;
15618 // None of these constraints allow values larger than 32 bits. Check
15619 // that the value fits in an int.
15620 if (CVal
!= CVal64
)
15623 switch (ConstraintLetter
) {
15625 // Constant suitable for movw, must be between 0 and
15627 if (Subtarget
->hasV6T2Ops() || (Subtarget
->hasV8MBaselineOps()))
15628 if (CVal
>= 0 && CVal
<= 65535)
15632 if (Subtarget
->isThumb1Only()) {
15633 // This must be a constant between 0 and 255, for ADD
15635 if (CVal
>= 0 && CVal
<= 255)
15637 } else if (Subtarget
->isThumb2()) {
15638 // A constant that can be used as an immediate value in a
15639 // data-processing instruction.
15640 if (ARM_AM::getT2SOImmVal(CVal
) != -1)
15643 // A constant that can be used as an immediate value in a
15644 // data-processing instruction.
15645 if (ARM_AM::getSOImmVal(CVal
) != -1)
15651 if (Subtarget
->isThumb1Only()) {
15652 // This must be a constant between -255 and -1, for negated ADD
15653 // immediates. This can be used in GCC with an "n" modifier that
15654 // prints the negated value, for use with SUB instructions. It is
15655 // not useful otherwise but is implemented for compatibility.
15656 if (CVal
>= -255 && CVal
<= -1)
15659 // This must be a constant between -4095 and 4095. It is not clear
15660 // what this constraint is intended for. Implemented for
15661 // compatibility with GCC.
15662 if (CVal
>= -4095 && CVal
<= 4095)
15668 if (Subtarget
->isThumb1Only()) {
15669 // A 32-bit value where only one byte has a nonzero value. Exclude
15670 // zero to match GCC. This constraint is used by GCC internally for
15671 // constants that can be loaded with a move/shift combination.
15672 // It is not useful otherwise but is implemented for compatibility.
15673 if (CVal
!= 0 && ARM_AM::isThumbImmShiftedVal(CVal
))
15675 } else if (Subtarget
->isThumb2()) {
15676 // A constant whose bitwise inverse can be used as an immediate
15677 // value in a data-processing instruction. This can be used in GCC
15678 // with a "B" modifier that prints the inverted value, for use with
15679 // BIC and MVN instructions. It is not useful otherwise but is
15680 // implemented for compatibility.
15681 if (ARM_AM::getT2SOImmVal(~CVal
) != -1)
15684 // A constant whose bitwise inverse can be used as an immediate
15685 // value in a data-processing instruction. This can be used in GCC
15686 // with a "B" modifier that prints the inverted value, for use with
15687 // BIC and MVN instructions. It is not useful otherwise but is
15688 // implemented for compatibility.
15689 if (ARM_AM::getSOImmVal(~CVal
) != -1)
15695 if (Subtarget
->isThumb1Only()) {
15696 // This must be a constant between -7 and 7,
15697 // for 3-operand ADD/SUB immediate instructions.
15698 if (CVal
>= -7 && CVal
< 7)
15700 } else if (Subtarget
->isThumb2()) {
15701 // A constant whose negation can be used as an immediate value in a
15702 // data-processing instruction. This can be used in GCC with an "n"
15703 // modifier that prints the negated value, for use with SUB
15704 // instructions. It is not useful otherwise but is implemented for
15706 if (ARM_AM::getT2SOImmVal(-CVal
) != -1)
15709 // A constant whose negation can be used as an immediate value in a
15710 // data-processing instruction. This can be used in GCC with an "n"
15711 // modifier that prints the negated value, for use with SUB
15712 // instructions. It is not useful otherwise but is implemented for
15714 if (ARM_AM::getSOImmVal(-CVal
) != -1)
15720 if (Subtarget
->isThumb1Only()) {
15721 // This must be a multiple of 4 between 0 and 1020, for
15722 // ADD sp + immediate.
15723 if ((CVal
>= 0 && CVal
<= 1020) && ((CVal
& 3) == 0))
15726 // A power of two or a constant between 0 and 32. This is used in
15727 // GCC for the shift amount on shifted register operands, but it is
15728 // useful in general for any shift amounts.
15729 if ((CVal
>= 0 && CVal
<= 32) || ((CVal
& (CVal
- 1)) == 0))
15735 if (Subtarget
->isThumb1Only()) {
15736 // This must be a constant between 0 and 31, for shift amounts.
15737 if (CVal
>= 0 && CVal
<= 31)
15743 if (Subtarget
->isThumb1Only()) {
15744 // This must be a multiple of 4 between -508 and 508, for
15745 // ADD/SUB sp = sp + immediate.
15746 if ((CVal
>= -508 && CVal
<= 508) && ((CVal
& 3) == 0))
15751 Result
= DAG
.getTargetConstant(CVal
, SDLoc(Op
), Op
.getValueType());
15755 if (Result
.getNode()) {
15756 Ops
.push_back(Result
);
15759 return TargetLowering::LowerAsmOperandForConstraint(Op
, Constraint
, Ops
, DAG
);
15762 static RTLIB::Libcall
getDivRemLibcall(
15763 const SDNode
*N
, MVT::SimpleValueType SVT
) {
15764 assert((N
->getOpcode() == ISD::SDIVREM
|| N
->getOpcode() == ISD::UDIVREM
||
15765 N
->getOpcode() == ISD::SREM
|| N
->getOpcode() == ISD::UREM
) &&
15766 "Unhandled Opcode in getDivRemLibcall");
15767 bool isSigned
= N
->getOpcode() == ISD::SDIVREM
||
15768 N
->getOpcode() == ISD::SREM
;
15771 default: llvm_unreachable("Unexpected request for libcall!");
15772 case MVT::i8
: LC
= isSigned
? RTLIB::SDIVREM_I8
: RTLIB::UDIVREM_I8
; break;
15773 case MVT::i16
: LC
= isSigned
? RTLIB::SDIVREM_I16
: RTLIB::UDIVREM_I16
; break;
15774 case MVT::i32
: LC
= isSigned
? RTLIB::SDIVREM_I32
: RTLIB::UDIVREM_I32
; break;
15775 case MVT::i64
: LC
= isSigned
? RTLIB::SDIVREM_I64
: RTLIB::UDIVREM_I64
; break;
15780 static TargetLowering::ArgListTy
getDivRemArgList(
15781 const SDNode
*N
, LLVMContext
*Context
, const ARMSubtarget
*Subtarget
) {
15782 assert((N
->getOpcode() == ISD::SDIVREM
|| N
->getOpcode() == ISD::UDIVREM
||
15783 N
->getOpcode() == ISD::SREM
|| N
->getOpcode() == ISD::UREM
) &&
15784 "Unhandled Opcode in getDivRemArgList");
15785 bool isSigned
= N
->getOpcode() == ISD::SDIVREM
||
15786 N
->getOpcode() == ISD::SREM
;
15787 TargetLowering::ArgListTy Args
;
15788 TargetLowering::ArgListEntry Entry
;
15789 for (unsigned i
= 0, e
= N
->getNumOperands(); i
!= e
; ++i
) {
15790 EVT ArgVT
= N
->getOperand(i
).getValueType();
15791 Type
*ArgTy
= ArgVT
.getTypeForEVT(*Context
);
15792 Entry
.Node
= N
->getOperand(i
);
15794 Entry
.IsSExt
= isSigned
;
15795 Entry
.IsZExt
= !isSigned
;
15796 Args
.push_back(Entry
);
15798 if (Subtarget
->isTargetWindows() && Args
.size() >= 2)
15799 std::swap(Args
[0], Args
[1]);
15803 SDValue
ARMTargetLowering::LowerDivRem(SDValue Op
, SelectionDAG
&DAG
) const {
15804 assert((Subtarget
->isTargetAEABI() || Subtarget
->isTargetAndroid() ||
15805 Subtarget
->isTargetGNUAEABI() || Subtarget
->isTargetMuslAEABI() ||
15806 Subtarget
->isTargetWindows()) &&
15807 "Register-based DivRem lowering only");
15808 unsigned Opcode
= Op
->getOpcode();
15809 assert((Opcode
== ISD::SDIVREM
|| Opcode
== ISD::UDIVREM
) &&
15810 "Invalid opcode for Div/Rem lowering");
15811 bool isSigned
= (Opcode
== ISD::SDIVREM
);
15812 EVT VT
= Op
->getValueType(0);
15813 Type
*Ty
= VT
.getTypeForEVT(*DAG
.getContext());
15816 // If the target has hardware divide, use divide + multiply + subtract:
15818 // rem = a - b * div
15819 // return {div, rem}
15820 // This should be lowered into UDIV/SDIV + MLS later on.
15821 bool hasDivide
= Subtarget
->isThumb() ? Subtarget
->hasDivideInThumbMode()
15822 : Subtarget
->hasDivideInARMMode();
15823 if (hasDivide
&& Op
->getValueType(0).isSimple() &&
15824 Op
->getSimpleValueType(0) == MVT::i32
) {
15825 unsigned DivOpcode
= isSigned
? ISD::SDIV
: ISD::UDIV
;
15826 const SDValue Dividend
= Op
->getOperand(0);
15827 const SDValue Divisor
= Op
->getOperand(1);
15828 SDValue Div
= DAG
.getNode(DivOpcode
, dl
, VT
, Dividend
, Divisor
);
15829 SDValue Mul
= DAG
.getNode(ISD::MUL
, dl
, VT
, Div
, Divisor
);
15830 SDValue Rem
= DAG
.getNode(ISD::SUB
, dl
, VT
, Dividend
, Mul
);
15832 SDValue Values
[2] = {Div
, Rem
};
15833 return DAG
.getNode(ISD::MERGE_VALUES
, dl
, DAG
.getVTList(VT
, VT
), Values
);
15836 RTLIB::Libcall LC
= getDivRemLibcall(Op
.getNode(),
15837 VT
.getSimpleVT().SimpleTy
);
15838 SDValue InChain
= DAG
.getEntryNode();
15840 TargetLowering::ArgListTy Args
= getDivRemArgList(Op
.getNode(),
15844 SDValue Callee
= DAG
.getExternalSymbol(getLibcallName(LC
),
15845 getPointerTy(DAG
.getDataLayout()));
15847 Type
*RetTy
= StructType::get(Ty
, Ty
);
15849 if (Subtarget
->isTargetWindows())
15850 InChain
= WinDBZCheckDenominator(DAG
, Op
.getNode(), InChain
);
15852 TargetLowering::CallLoweringInfo
CLI(DAG
);
15853 CLI
.setDebugLoc(dl
).setChain(InChain
)
15854 .setCallee(getLibcallCallingConv(LC
), RetTy
, Callee
, std::move(Args
))
15855 .setInRegister().setSExtResult(isSigned
).setZExtResult(!isSigned
);
15857 std::pair
<SDValue
, SDValue
> CallInfo
= LowerCallTo(CLI
);
15858 return CallInfo
.first
;
15861 // Lowers REM using divmod helpers
15862 // see RTABI section 4.2/4.3
15863 SDValue
ARMTargetLowering::LowerREM(SDNode
*N
, SelectionDAG
&DAG
) const {
15864 // Build return types (div and rem)
15865 std::vector
<Type
*> RetTyParams
;
15866 Type
*RetTyElement
;
15868 switch (N
->getValueType(0).getSimpleVT().SimpleTy
) {
15869 default: llvm_unreachable("Unexpected request for libcall!");
15870 case MVT::i8
: RetTyElement
= Type::getInt8Ty(*DAG
.getContext()); break;
15871 case MVT::i16
: RetTyElement
= Type::getInt16Ty(*DAG
.getContext()); break;
15872 case MVT::i32
: RetTyElement
= Type::getInt32Ty(*DAG
.getContext()); break;
15873 case MVT::i64
: RetTyElement
= Type::getInt64Ty(*DAG
.getContext()); break;
15876 RetTyParams
.push_back(RetTyElement
);
15877 RetTyParams
.push_back(RetTyElement
);
15878 ArrayRef
<Type
*> ret
= ArrayRef
<Type
*>(RetTyParams
);
15879 Type
*RetTy
= StructType::get(*DAG
.getContext(), ret
);
15881 RTLIB::Libcall LC
= getDivRemLibcall(N
, N
->getValueType(0).getSimpleVT().
15883 SDValue InChain
= DAG
.getEntryNode();
15884 TargetLowering::ArgListTy Args
= getDivRemArgList(N
, DAG
.getContext(),
15886 bool isSigned
= N
->getOpcode() == ISD::SREM
;
15887 SDValue Callee
= DAG
.getExternalSymbol(getLibcallName(LC
),
15888 getPointerTy(DAG
.getDataLayout()));
15890 if (Subtarget
->isTargetWindows())
15891 InChain
= WinDBZCheckDenominator(DAG
, N
, InChain
);
15894 CallLoweringInfo
CLI(DAG
);
15895 CLI
.setChain(InChain
)
15896 .setCallee(CallingConv::ARM_AAPCS
, RetTy
, Callee
, std::move(Args
))
15897 .setSExtResult(isSigned
).setZExtResult(!isSigned
).setDebugLoc(SDLoc(N
));
15898 std::pair
<SDValue
, SDValue
> CallResult
= LowerCallTo(CLI
);
15900 // Return second (rem) result operand (first contains div)
15901 SDNode
*ResNode
= CallResult
.first
.getNode();
15902 assert(ResNode
->getNumOperands() == 2 && "divmod should return two operands");
15903 return ResNode
->getOperand(1);
15907 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op
, SelectionDAG
&DAG
) const {
15908 assert(Subtarget
->isTargetWindows() && "unsupported target platform");
15912 SDValue Chain
= Op
.getOperand(0);
15913 SDValue Size
= Op
.getOperand(1);
15915 if (DAG
.getMachineFunction().getFunction().hasFnAttribute(
15916 "no-stack-arg-probe")) {
15917 unsigned Align
= cast
<ConstantSDNode
>(Op
.getOperand(2))->getZExtValue();
15918 SDValue SP
= DAG
.getCopyFromReg(Chain
, DL
, ARM::SP
, MVT::i32
);
15919 Chain
= SP
.getValue(1);
15920 SP
= DAG
.getNode(ISD::SUB
, DL
, MVT::i32
, SP
, Size
);
15922 SP
= DAG
.getNode(ISD::AND
, DL
, MVT::i32
, SP
.getValue(0),
15923 DAG
.getConstant(-(uint64_t)Align
, DL
, MVT::i32
));
15924 Chain
= DAG
.getCopyToReg(Chain
, DL
, ARM::SP
, SP
);
15925 SDValue Ops
[2] = { SP
, Chain
};
15926 return DAG
.getMergeValues(Ops
, DL
);
15929 SDValue Words
= DAG
.getNode(ISD::SRL
, DL
, MVT::i32
, Size
,
15930 DAG
.getConstant(2, DL
, MVT::i32
));
15933 Chain
= DAG
.getCopyToReg(Chain
, DL
, ARM::R4
, Words
, Flag
);
15934 Flag
= Chain
.getValue(1);
15936 SDVTList NodeTys
= DAG
.getVTList(MVT::Other
, MVT::Glue
);
15937 Chain
= DAG
.getNode(ARMISD::WIN__CHKSTK
, DL
, NodeTys
, Chain
, Flag
);
15939 SDValue NewSP
= DAG
.getCopyFromReg(Chain
, DL
, ARM::SP
, MVT::i32
);
15940 Chain
= NewSP
.getValue(1);
15942 SDValue Ops
[2] = { NewSP
, Chain
};
15943 return DAG
.getMergeValues(Ops
, DL
);
15946 SDValue
ARMTargetLowering::LowerFP_EXTEND(SDValue Op
, SelectionDAG
&DAG
) const {
15947 SDValue SrcVal
= Op
.getOperand(0);
15948 const unsigned DstSz
= Op
.getValueType().getSizeInBits();
15949 const unsigned SrcSz
= SrcVal
.getValueType().getSizeInBits();
15950 assert(DstSz
> SrcSz
&& DstSz
<= 64 && SrcSz
>= 16 &&
15951 "Unexpected type for custom-lowering FP_EXTEND");
15953 assert((!Subtarget
->hasFP64() || !Subtarget
->hasFPARMv8Base()) &&
15954 "With both FP DP and 16, any FP conversion is legal!");
15956 assert(!(DstSz
== 32 && Subtarget
->hasFP16()) &&
15957 "With FP16, 16 to 32 conversion is legal!");
15959 // Either we are converting from 16 -> 64, without FP16 and/or
15960 // FP.double-precision or without Armv8-fp. So we must do it in two
15962 // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32
15963 // without FP16. So we must do a function call.
15966 MakeLibCallOptions CallOptions
;
15968 // Instruction from 16 -> 32
15969 if (Subtarget
->hasFP16())
15970 SrcVal
= DAG
.getNode(ISD::FP_EXTEND
, Loc
, MVT::f32
, SrcVal
);
15971 // Lib call from 16 -> 32
15973 LC
= RTLIB::getFPEXT(MVT::f16
, MVT::f32
);
15974 assert(LC
!= RTLIB::UNKNOWN_LIBCALL
&&
15975 "Unexpected type for custom-lowering FP_EXTEND");
15977 makeLibCall(DAG
, LC
, MVT::f32
, SrcVal
, CallOptions
, Loc
).first
;
15983 // For sure now SrcVal is 32 bits
15984 if (Subtarget
->hasFP64()) // Instruction from 32 -> 64
15985 return DAG
.getNode(ISD::FP_EXTEND
, Loc
, MVT::f64
, SrcVal
);
15987 LC
= RTLIB::getFPEXT(MVT::f32
, MVT::f64
);
15988 assert(LC
!= RTLIB::UNKNOWN_LIBCALL
&&
15989 "Unexpected type for custom-lowering FP_EXTEND");
15990 return makeLibCall(DAG
, LC
, MVT::f64
, SrcVal
, CallOptions
, Loc
).first
;
15993 SDValue
ARMTargetLowering::LowerFP_ROUND(SDValue Op
, SelectionDAG
&DAG
) const {
15994 SDValue SrcVal
= Op
.getOperand(0);
15995 EVT SrcVT
= SrcVal
.getValueType();
15996 EVT DstVT
= Op
.getValueType();
15997 const unsigned DstSz
= Op
.getValueType().getSizeInBits();
15998 const unsigned SrcSz
= SrcVT
.getSizeInBits();
16000 assert(DstSz
< SrcSz
&& SrcSz
<= 64 && DstSz
>= 16 &&
16001 "Unexpected type for custom-lowering FP_ROUND");
16003 assert((!Subtarget
->hasFP64() || !Subtarget
->hasFPARMv8Base()) &&
16004 "With both FP DP and 16, any FP conversion is legal!");
16008 // Instruction from 32 -> 16 if hasFP16 is valid
16009 if (SrcSz
== 32 && Subtarget
->hasFP16())
16012 // Lib call from 32 -> 16 / 64 -> [32, 16]
16013 RTLIB::Libcall LC
= RTLIB::getFPROUND(SrcVT
, DstVT
);
16014 assert(LC
!= RTLIB::UNKNOWN_LIBCALL
&&
16015 "Unexpected type for custom-lowering FP_ROUND");
16016 MakeLibCallOptions CallOptions
;
16017 return makeLibCall(DAG
, LC
, DstVT
, SrcVal
, CallOptions
, Loc
).first
;
16020 void ARMTargetLowering::lowerABS(SDNode
*N
, SmallVectorImpl
<SDValue
> &Results
,
16021 SelectionDAG
&DAG
) const {
16022 assert(N
->getValueType(0) == MVT::i64
&& "Unexpected type (!= i64) on ABS.");
16023 MVT HalfT
= MVT::i32
;
16025 SDValue Hi
, Lo
, Tmp
;
16027 if (!isOperationLegalOrCustom(ISD::ADDCARRY
, HalfT
) ||
16028 !isOperationLegalOrCustom(ISD::UADDO
, HalfT
))
16031 unsigned OpTypeBits
= HalfT
.getScalarSizeInBits();
16032 SDVTList VTList
= DAG
.getVTList(HalfT
, MVT::i1
);
16034 Lo
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, HalfT
, N
->getOperand(0),
16035 DAG
.getConstant(0, dl
, HalfT
));
16036 Hi
= DAG
.getNode(ISD::EXTRACT_ELEMENT
, dl
, HalfT
, N
->getOperand(0),
16037 DAG
.getConstant(1, dl
, HalfT
));
16039 Tmp
= DAG
.getNode(ISD::SRA
, dl
, HalfT
, Hi
,
16040 DAG
.getConstant(OpTypeBits
- 1, dl
,
16041 getShiftAmountTy(HalfT
, DAG
.getDataLayout())));
16042 Lo
= DAG
.getNode(ISD::UADDO
, dl
, VTList
, Tmp
, Lo
);
16043 Hi
= DAG
.getNode(ISD::ADDCARRY
, dl
, VTList
, Tmp
, Hi
,
16044 SDValue(Lo
.getNode(), 1));
16045 Hi
= DAG
.getNode(ISD::XOR
, dl
, HalfT
, Tmp
, Hi
);
16046 Lo
= DAG
.getNode(ISD::XOR
, dl
, HalfT
, Tmp
, Lo
);
16048 Results
.push_back(Lo
);
16049 Results
.push_back(Hi
);
16053 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode
*GA
) const {
16054 // The ARM target isn't yet aware of offsets.
16058 bool ARM::isBitFieldInvertedMask(unsigned v
) {
16059 if (v
== 0xffffffff)
16062 // there can be 1's on either or both "outsides", all the "inside"
16063 // bits must be 0's
16064 return isShiftedMask_32(~v
);
16067 /// isFPImmLegal - Returns true if the target can instruction select the
16068 /// specified FP immediate natively. If false, the legalizer will
16069 /// materialize the FP immediate as a load from a constant pool.
16070 bool ARMTargetLowering::isFPImmLegal(const APFloat
&Imm
, EVT VT
,
16071 bool ForCodeSize
) const {
16072 if (!Subtarget
->hasVFP3Base())
16074 if (VT
== MVT::f16
&& Subtarget
->hasFullFP16())
16075 return ARM_AM::getFP16Imm(Imm
) != -1;
16076 if (VT
== MVT::f32
)
16077 return ARM_AM::getFP32Imm(Imm
) != -1;
16078 if (VT
== MVT::f64
&& Subtarget
->hasFP64())
16079 return ARM_AM::getFP64Imm(Imm
) != -1;
16083 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
16084 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
16085 /// specified in the intrinsic calls.
16086 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo
&Info
,
16088 MachineFunction
&MF
,
16089 unsigned Intrinsic
) const {
16090 switch (Intrinsic
) {
16091 case Intrinsic::arm_neon_vld1
:
16092 case Intrinsic::arm_neon_vld2
:
16093 case Intrinsic::arm_neon_vld3
:
16094 case Intrinsic::arm_neon_vld4
:
16095 case Intrinsic::arm_neon_vld2lane
:
16096 case Intrinsic::arm_neon_vld3lane
:
16097 case Intrinsic::arm_neon_vld4lane
:
16098 case Intrinsic::arm_neon_vld2dup
:
16099 case Intrinsic::arm_neon_vld3dup
:
16100 case Intrinsic::arm_neon_vld4dup
: {
16101 Info
.opc
= ISD::INTRINSIC_W_CHAIN
;
16102 // Conservatively set memVT to the entire set of vectors loaded.
16103 auto &DL
= I
.getCalledFunction()->getParent()->getDataLayout();
16104 uint64_t NumElts
= DL
.getTypeSizeInBits(I
.getType()) / 64;
16105 Info
.memVT
= EVT::getVectorVT(I
.getType()->getContext(), MVT::i64
, NumElts
);
16106 Info
.ptrVal
= I
.getArgOperand(0);
16108 Value
*AlignArg
= I
.getArgOperand(I
.getNumArgOperands() - 1);
16109 Info
.align
= MaybeAlign(cast
<ConstantInt
>(AlignArg
)->getZExtValue());
16110 // volatile loads with NEON intrinsics not supported
16111 Info
.flags
= MachineMemOperand::MOLoad
;
16114 case Intrinsic::arm_neon_vld1x2
:
16115 case Intrinsic::arm_neon_vld1x3
:
16116 case Intrinsic::arm_neon_vld1x4
: {
16117 Info
.opc
= ISD::INTRINSIC_W_CHAIN
;
16118 // Conservatively set memVT to the entire set of vectors loaded.
16119 auto &DL
= I
.getCalledFunction()->getParent()->getDataLayout();
16120 uint64_t NumElts
= DL
.getTypeSizeInBits(I
.getType()) / 64;
16121 Info
.memVT
= EVT::getVectorVT(I
.getType()->getContext(), MVT::i64
, NumElts
);
16122 Info
.ptrVal
= I
.getArgOperand(I
.getNumArgOperands() - 1);
16124 Info
.align
.reset();
16125 // volatile loads with NEON intrinsics not supported
16126 Info
.flags
= MachineMemOperand::MOLoad
;
16129 case Intrinsic::arm_neon_vst1
:
16130 case Intrinsic::arm_neon_vst2
:
16131 case Intrinsic::arm_neon_vst3
:
16132 case Intrinsic::arm_neon_vst4
:
16133 case Intrinsic::arm_neon_vst2lane
:
16134 case Intrinsic::arm_neon_vst3lane
:
16135 case Intrinsic::arm_neon_vst4lane
: {
16136 Info
.opc
= ISD::INTRINSIC_VOID
;
16137 // Conservatively set memVT to the entire set of vectors stored.
16138 auto &DL
= I
.getCalledFunction()->getParent()->getDataLayout();
16139 unsigned NumElts
= 0;
16140 for (unsigned ArgI
= 1, ArgE
= I
.getNumArgOperands(); ArgI
< ArgE
; ++ArgI
) {
16141 Type
*ArgTy
= I
.getArgOperand(ArgI
)->getType();
16142 if (!ArgTy
->isVectorTy())
16144 NumElts
+= DL
.getTypeSizeInBits(ArgTy
) / 64;
16146 Info
.memVT
= EVT::getVectorVT(I
.getType()->getContext(), MVT::i64
, NumElts
);
16147 Info
.ptrVal
= I
.getArgOperand(0);
16149 Value
*AlignArg
= I
.getArgOperand(I
.getNumArgOperands() - 1);
16150 Info
.align
= MaybeAlign(cast
<ConstantInt
>(AlignArg
)->getZExtValue());
16151 // volatile stores with NEON intrinsics not supported
16152 Info
.flags
= MachineMemOperand::MOStore
;
16155 case Intrinsic::arm_neon_vst1x2
:
16156 case Intrinsic::arm_neon_vst1x3
:
16157 case Intrinsic::arm_neon_vst1x4
: {
16158 Info
.opc
= ISD::INTRINSIC_VOID
;
16159 // Conservatively set memVT to the entire set of vectors stored.
16160 auto &DL
= I
.getCalledFunction()->getParent()->getDataLayout();
16161 unsigned NumElts
= 0;
16162 for (unsigned ArgI
= 1, ArgE
= I
.getNumArgOperands(); ArgI
< ArgE
; ++ArgI
) {
16163 Type
*ArgTy
= I
.getArgOperand(ArgI
)->getType();
16164 if (!ArgTy
->isVectorTy())
16166 NumElts
+= DL
.getTypeSizeInBits(ArgTy
) / 64;
16168 Info
.memVT
= EVT::getVectorVT(I
.getType()->getContext(), MVT::i64
, NumElts
);
16169 Info
.ptrVal
= I
.getArgOperand(0);
16171 Info
.align
.reset();
16172 // volatile stores with NEON intrinsics not supported
16173 Info
.flags
= MachineMemOperand::MOStore
;
16176 case Intrinsic::arm_ldaex
:
16177 case Intrinsic::arm_ldrex
: {
16178 auto &DL
= I
.getCalledFunction()->getParent()->getDataLayout();
16179 PointerType
*PtrTy
= cast
<PointerType
>(I
.getArgOperand(0)->getType());
16180 Info
.opc
= ISD::INTRINSIC_W_CHAIN
;
16181 Info
.memVT
= MVT::getVT(PtrTy
->getElementType());
16182 Info
.ptrVal
= I
.getArgOperand(0);
16184 Info
.align
= MaybeAlign(DL
.getABITypeAlignment(PtrTy
->getElementType()));
16185 Info
.flags
= MachineMemOperand::MOLoad
| MachineMemOperand::MOVolatile
;
16188 case Intrinsic::arm_stlex
:
16189 case Intrinsic::arm_strex
: {
16190 auto &DL
= I
.getCalledFunction()->getParent()->getDataLayout();
16191 PointerType
*PtrTy
= cast
<PointerType
>(I
.getArgOperand(1)->getType());
16192 Info
.opc
= ISD::INTRINSIC_W_CHAIN
;
16193 Info
.memVT
= MVT::getVT(PtrTy
->getElementType());
16194 Info
.ptrVal
= I
.getArgOperand(1);
16196 Info
.align
= MaybeAlign(DL
.getABITypeAlignment(PtrTy
->getElementType()));
16197 Info
.flags
= MachineMemOperand::MOStore
| MachineMemOperand::MOVolatile
;
16200 case Intrinsic::arm_stlexd
:
16201 case Intrinsic::arm_strexd
:
16202 Info
.opc
= ISD::INTRINSIC_W_CHAIN
;
16203 Info
.memVT
= MVT::i64
;
16204 Info
.ptrVal
= I
.getArgOperand(2);
16206 Info
.align
= Align(8);
16207 Info
.flags
= MachineMemOperand::MOStore
| MachineMemOperand::MOVolatile
;
16210 case Intrinsic::arm_ldaexd
:
16211 case Intrinsic::arm_ldrexd
:
16212 Info
.opc
= ISD::INTRINSIC_W_CHAIN
;
16213 Info
.memVT
= MVT::i64
;
16214 Info
.ptrVal
= I
.getArgOperand(0);
16216 Info
.align
= Align(8);
16217 Info
.flags
= MachineMemOperand::MOLoad
| MachineMemOperand::MOVolatile
;
16227 /// Returns true if it is beneficial to convert a load of a constant
16228 /// to just the constant itself.
16229 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt
&Imm
,
16231 assert(Ty
->isIntegerTy());
16233 unsigned Bits
= Ty
->getPrimitiveSizeInBits();
16234 if (Bits
== 0 || Bits
> 32)
16239 bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT
, EVT SrcVT
,
16240 unsigned Index
) const {
16241 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR
, ResVT
))
16244 return (Index
== 0 || Index
== ResVT
.getVectorNumElements());
16247 Instruction
* ARMTargetLowering::makeDMB(IRBuilder
<> &Builder
,
16248 ARM_MB::MemBOpt Domain
) const {
16249 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
16251 // First, if the target has no DMB, see what fallback we can use.
16252 if (!Subtarget
->hasDataBarrier()) {
16253 // Some ARMv6 cpus can support data barriers with an mcr instruction.
16254 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
16256 if (Subtarget
->hasV6Ops() && !Subtarget
->isThumb()) {
16257 Function
*MCR
= Intrinsic::getDeclaration(M
, Intrinsic::arm_mcr
);
16258 Value
* args
[6] = {Builder
.getInt32(15), Builder
.getInt32(0),
16259 Builder
.getInt32(0), Builder
.getInt32(7),
16260 Builder
.getInt32(10), Builder
.getInt32(5)};
16261 return Builder
.CreateCall(MCR
, args
);
16263 // Instead of using barriers, atomic accesses on these subtargets use
16265 llvm_unreachable("makeDMB on a target so old that it has no barriers");
16268 Function
*DMB
= Intrinsic::getDeclaration(M
, Intrinsic::arm_dmb
);
16269 // Only a full system barrier exists in the M-class architectures.
16270 Domain
= Subtarget
->isMClass() ? ARM_MB::SY
: Domain
;
16271 Constant
*CDomain
= Builder
.getInt32(Domain
);
16272 return Builder
.CreateCall(DMB
, CDomain
);
16276 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
16277 Instruction
*ARMTargetLowering::emitLeadingFence(IRBuilder
<> &Builder
,
16279 AtomicOrdering Ord
) const {
16281 case AtomicOrdering::NotAtomic
:
16282 case AtomicOrdering::Unordered
:
16283 llvm_unreachable("Invalid fence: unordered/non-atomic");
16284 case AtomicOrdering::Monotonic
:
16285 case AtomicOrdering::Acquire
:
16286 return nullptr; // Nothing to do
16287 case AtomicOrdering::SequentiallyConsistent
:
16288 if (!Inst
->hasAtomicStore())
16289 return nullptr; // Nothing to do
16291 case AtomicOrdering::Release
:
16292 case AtomicOrdering::AcquireRelease
:
16293 if (Subtarget
->preferISHSTBarriers())
16294 return makeDMB(Builder
, ARM_MB::ISHST
);
16295 // FIXME: add a comment with a link to documentation justifying this.
16297 return makeDMB(Builder
, ARM_MB::ISH
);
16299 llvm_unreachable("Unknown fence ordering in emitLeadingFence");
16302 Instruction
*ARMTargetLowering::emitTrailingFence(IRBuilder
<> &Builder
,
16304 AtomicOrdering Ord
) const {
16306 case AtomicOrdering::NotAtomic
:
16307 case AtomicOrdering::Unordered
:
16308 llvm_unreachable("Invalid fence: unordered/not-atomic");
16309 case AtomicOrdering::Monotonic
:
16310 case AtomicOrdering::Release
:
16311 return nullptr; // Nothing to do
16312 case AtomicOrdering::Acquire
:
16313 case AtomicOrdering::AcquireRelease
:
16314 case AtomicOrdering::SequentiallyConsistent
:
16315 return makeDMB(Builder
, ARM_MB::ISH
);
16317 llvm_unreachable("Unknown fence ordering in emitTrailingFence");
16320 // Loads and stores less than 64-bits are already atomic; ones above that
16321 // are doomed anyway, so defer to the default libcall and blame the OS when
16322 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
16323 // anything for those.
16324 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst
*SI
) const {
16325 unsigned Size
= SI
->getValueOperand()->getType()->getPrimitiveSizeInBits();
16326 return (Size
== 64) && !Subtarget
->isMClass();
16329 // Loads and stores less than 64-bits are already atomic; ones above that
16330 // are doomed anyway, so defer to the default libcall and blame the OS when
16331 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
16332 // anything for those.
16333 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that
16334 // guarantee, see DDI0406C ARM architecture reference manual,
16335 // sections A8.8.72-74 LDRD)
16336 TargetLowering::AtomicExpansionKind
16337 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst
*LI
) const {
16338 unsigned Size
= LI
->getType()->getPrimitiveSizeInBits();
16339 return ((Size
== 64) && !Subtarget
->isMClass()) ? AtomicExpansionKind::LLOnly
16340 : AtomicExpansionKind::None
;
16343 // For the real atomic operations, we have ldrex/strex up to 32 bits,
16344 // and up to 64 bits on the non-M profiles
16345 TargetLowering::AtomicExpansionKind
16346 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst
*AI
) const {
16347 if (AI
->isFloatingPointOperation())
16348 return AtomicExpansionKind::CmpXChg
;
16350 unsigned Size
= AI
->getType()->getPrimitiveSizeInBits();
16351 bool hasAtomicRMW
= !Subtarget
->isThumb() || Subtarget
->hasV8MBaselineOps();
16352 return (Size
<= (Subtarget
->isMClass() ? 32U : 64U) && hasAtomicRMW
)
16353 ? AtomicExpansionKind::LLSC
16354 : AtomicExpansionKind::None
;
16357 TargetLowering::AtomicExpansionKind
16358 ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst
*AI
) const {
16359 // At -O0, fast-regalloc cannot cope with the live vregs necessary to
16360 // implement cmpxchg without spilling. If the address being exchanged is also
16361 // on the stack and close enough to the spill slot, this can lead to a
16362 // situation where the monitor always gets cleared and the atomic operation
16363 // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
16364 bool HasAtomicCmpXchg
=
16365 !Subtarget
->isThumb() || Subtarget
->hasV8MBaselineOps();
16366 if (getTargetMachine().getOptLevel() != 0 && HasAtomicCmpXchg
)
16367 return AtomicExpansionKind::LLSC
;
16368 return AtomicExpansionKind::None
;
16371 bool ARMTargetLowering::shouldInsertFencesForAtomic(
16372 const Instruction
*I
) const {
16373 return InsertFencesForAtomic
;
16376 // This has so far only been implemented for MachO.
16377 bool ARMTargetLowering::useLoadStackGuardNode() const {
16378 return Subtarget
->isTargetMachO();
16381 void ARMTargetLowering::insertSSPDeclarations(Module
&M
) const {
16382 if (!Subtarget
->getTargetTriple().isWindowsMSVCEnvironment())
16383 return TargetLowering::insertSSPDeclarations(M
);
16385 // MSVC CRT has a global variable holding security cookie.
16386 M
.getOrInsertGlobal("__security_cookie",
16387 Type::getInt8PtrTy(M
.getContext()));
16389 // MSVC CRT has a function to validate security cookie.
16390 FunctionCallee SecurityCheckCookie
= M
.getOrInsertFunction(
16391 "__security_check_cookie", Type::getVoidTy(M
.getContext()),
16392 Type::getInt8PtrTy(M
.getContext()));
16393 if (Function
*F
= dyn_cast
<Function
>(SecurityCheckCookie
.getCallee()))
16394 F
->addAttribute(1, Attribute::AttrKind::InReg
);
16397 Value
*ARMTargetLowering::getSDagStackGuard(const Module
&M
) const {
16398 // MSVC CRT has a global variable holding security cookie.
16399 if (Subtarget
->getTargetTriple().isWindowsMSVCEnvironment())
16400 return M
.getGlobalVariable("__security_cookie");
16401 return TargetLowering::getSDagStackGuard(M
);
16404 Function
*ARMTargetLowering::getSSPStackGuardCheck(const Module
&M
) const {
16405 // MSVC CRT has a function to validate security cookie.
16406 if (Subtarget
->getTargetTriple().isWindowsMSVCEnvironment())
16407 return M
.getFunction("__security_check_cookie");
16408 return TargetLowering::getSSPStackGuardCheck(M
);
16411 bool ARMTargetLowering::canCombineStoreAndExtract(Type
*VectorTy
, Value
*Idx
,
16412 unsigned &Cost
) const {
16413 // If we do not have NEON, vector types are not natively supported.
16414 if (!Subtarget
->hasNEON())
16417 // Floating point values and vector values map to the same register file.
16418 // Therefore, although we could do a store extract of a vector type, this is
16419 // better to leave at float as we have more freedom in the addressing mode for
16421 if (VectorTy
->isFPOrFPVectorTy())
16424 // If the index is unknown at compile time, this is very expensive to lower
16425 // and it is not possible to combine the store with the extract.
16426 if (!isa
<ConstantInt
>(Idx
))
16429 assert(VectorTy
->isVectorTy() && "VectorTy is not a vector type");
16430 unsigned BitWidth
= cast
<VectorType
>(VectorTy
)->getBitWidth();
16431 // We can do a store + vector extract on any vector that fits perfectly in a D
16433 if (BitWidth
== 64 || BitWidth
== 128) {
16440 bool ARMTargetLowering::isCheapToSpeculateCttz() const {
16441 return Subtarget
->hasV6T2Ops();
16444 bool ARMTargetLowering::isCheapToSpeculateCtlz() const {
16445 return Subtarget
->hasV6T2Ops();
16448 bool ARMTargetLowering::shouldExpandShift(SelectionDAG
&DAG
, SDNode
*N
) const {
16449 return !Subtarget
->hasMinSize();
16452 Value
*ARMTargetLowering::emitLoadLinked(IRBuilder
<> &Builder
, Value
*Addr
,
16453 AtomicOrdering Ord
) const {
16454 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
16455 Type
*ValTy
= cast
<PointerType
>(Addr
->getType())->getElementType();
16456 bool IsAcquire
= isAcquireOrStronger(Ord
);
16458 // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
16459 // intrinsic must return {i32, i32} and we have to recombine them into a
16460 // single i64 here.
16461 if (ValTy
->getPrimitiveSizeInBits() == 64) {
16462 Intrinsic::ID Int
=
16463 IsAcquire
? Intrinsic::arm_ldaexd
: Intrinsic::arm_ldrexd
;
16464 Function
*Ldrex
= Intrinsic::getDeclaration(M
, Int
);
16466 Addr
= Builder
.CreateBitCast(Addr
, Type::getInt8PtrTy(M
->getContext()));
16467 Value
*LoHi
= Builder
.CreateCall(Ldrex
, Addr
, "lohi");
16469 Value
*Lo
= Builder
.CreateExtractValue(LoHi
, 0, "lo");
16470 Value
*Hi
= Builder
.CreateExtractValue(LoHi
, 1, "hi");
16471 if (!Subtarget
->isLittle())
16472 std::swap (Lo
, Hi
);
16473 Lo
= Builder
.CreateZExt(Lo
, ValTy
, "lo64");
16474 Hi
= Builder
.CreateZExt(Hi
, ValTy
, "hi64");
16475 return Builder
.CreateOr(
16476 Lo
, Builder
.CreateShl(Hi
, ConstantInt::get(ValTy
, 32)), "val64");
16479 Type
*Tys
[] = { Addr
->getType() };
16480 Intrinsic::ID Int
= IsAcquire
? Intrinsic::arm_ldaex
: Intrinsic::arm_ldrex
;
16481 Function
*Ldrex
= Intrinsic::getDeclaration(M
, Int
, Tys
);
16483 return Builder
.CreateTruncOrBitCast(
16484 Builder
.CreateCall(Ldrex
, Addr
),
16485 cast
<PointerType
>(Addr
->getType())->getElementType());
16488 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
16489 IRBuilder
<> &Builder
) const {
16490 if (!Subtarget
->hasV7Ops())
16492 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
16493 Builder
.CreateCall(Intrinsic::getDeclaration(M
, Intrinsic::arm_clrex
));
16496 Value
*ARMTargetLowering::emitStoreConditional(IRBuilder
<> &Builder
, Value
*Val
,
16498 AtomicOrdering Ord
) const {
16499 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
16500 bool IsRelease
= isReleaseOrStronger(Ord
);
16502 // Since the intrinsics must have legal type, the i64 intrinsics take two
16503 // parameters: "i32, i32". We must marshal Val into the appropriate form
16504 // before the call.
16505 if (Val
->getType()->getPrimitiveSizeInBits() == 64) {
16506 Intrinsic::ID Int
=
16507 IsRelease
? Intrinsic::arm_stlexd
: Intrinsic::arm_strexd
;
16508 Function
*Strex
= Intrinsic::getDeclaration(M
, Int
);
16509 Type
*Int32Ty
= Type::getInt32Ty(M
->getContext());
16511 Value
*Lo
= Builder
.CreateTrunc(Val
, Int32Ty
, "lo");
16512 Value
*Hi
= Builder
.CreateTrunc(Builder
.CreateLShr(Val
, 32), Int32Ty
, "hi");
16513 if (!Subtarget
->isLittle())
16515 Addr
= Builder
.CreateBitCast(Addr
, Type::getInt8PtrTy(M
->getContext()));
16516 return Builder
.CreateCall(Strex
, {Lo
, Hi
, Addr
});
16519 Intrinsic::ID Int
= IsRelease
? Intrinsic::arm_stlex
: Intrinsic::arm_strex
;
16520 Type
*Tys
[] = { Addr
->getType() };
16521 Function
*Strex
= Intrinsic::getDeclaration(M
, Int
, Tys
);
16523 return Builder
.CreateCall(
16524 Strex
, {Builder
.CreateZExtOrBitCast(
16525 Val
, Strex
->getFunctionType()->getParamType(0)),
16530 bool ARMTargetLowering::alignLoopsWithOptSize() const {
16531 return Subtarget
->isMClass();
16534 /// A helper function for determining the number of interleaved accesses we
16535 /// will generate when lowering accesses of the given type.
16537 ARMTargetLowering::getNumInterleavedAccesses(VectorType
*VecTy
,
16538 const DataLayout
&DL
) const {
16539 return (DL
.getTypeSizeInBits(VecTy
) + 127) / 128;
16542 bool ARMTargetLowering::isLegalInterleavedAccessType(
16543 VectorType
*VecTy
, const DataLayout
&DL
) const {
16545 unsigned VecSize
= DL
.getTypeSizeInBits(VecTy
);
16546 unsigned ElSize
= DL
.getTypeSizeInBits(VecTy
->getElementType());
16548 // Ensure the vector doesn't have f16 elements. Even though we could do an
16549 // i16 vldN, we can't hold the f16 vectors and will end up converting via
16551 if (VecTy
->getElementType()->isHalfTy())
16554 // Ensure the number of vector elements is greater than 1.
16555 if (VecTy
->getNumElements() < 2)
16558 // Ensure the element type is legal.
16559 if (ElSize
!= 8 && ElSize
!= 16 && ElSize
!= 32)
16562 // Ensure the total vector size is 64 or a multiple of 128. Types larger than
16563 // 128 will be split into multiple interleaved accesses.
16564 return VecSize
== 64 || VecSize
% 128 == 0;
16567 unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const {
16568 if (Subtarget
->hasNEON())
16570 return TargetLoweringBase::getMaxSupportedInterleaveFactor();
16573 /// Lower an interleaved load into a vldN intrinsic.
16575 /// E.g. Lower an interleaved load (Factor = 2):
16576 /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
16577 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements
16578 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements
16581 /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
16582 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
16583 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
16584 bool ARMTargetLowering::lowerInterleavedLoad(
16585 LoadInst
*LI
, ArrayRef
<ShuffleVectorInst
*> Shuffles
,
16586 ArrayRef
<unsigned> Indices
, unsigned Factor
) const {
16587 assert(Factor
>= 2 && Factor
<= getMaxSupportedInterleaveFactor() &&
16588 "Invalid interleave factor");
16589 assert(!Shuffles
.empty() && "Empty shufflevector input");
16590 assert(Shuffles
.size() == Indices
.size() &&
16591 "Unmatched number of shufflevectors and indices");
16593 VectorType
*VecTy
= Shuffles
[0]->getType();
16594 Type
*EltTy
= VecTy
->getVectorElementType();
16596 const DataLayout
&DL
= LI
->getModule()->getDataLayout();
16598 // Skip if we do not have NEON and skip illegal vector types. We can
16599 // "legalize" wide vector types into multiple interleaved accesses as long as
16600 // the vector types are divisible by 128.
16601 if (!Subtarget
->hasNEON() || !isLegalInterleavedAccessType(VecTy
, DL
))
16604 unsigned NumLoads
= getNumInterleavedAccesses(VecTy
, DL
);
16606 // A pointer vector can not be the return type of the ldN intrinsics. Need to
16607 // load integer vectors first and then convert to pointer vectors.
16608 if (EltTy
->isPointerTy())
16610 VectorType::get(DL
.getIntPtrType(EltTy
), VecTy
->getVectorNumElements());
16612 IRBuilder
<> Builder(LI
);
16614 // The base address of the load.
16615 Value
*BaseAddr
= LI
->getPointerOperand();
16617 if (NumLoads
> 1) {
16618 // If we're going to generate more than one load, reset the sub-vector type
16619 // to something legal.
16620 VecTy
= VectorType::get(VecTy
->getVectorElementType(),
16621 VecTy
->getVectorNumElements() / NumLoads
);
16623 // We will compute the pointer operand of each load from the original base
16624 // address using GEPs. Cast the base address to a pointer to the scalar
16626 BaseAddr
= Builder
.CreateBitCast(
16627 BaseAddr
, VecTy
->getVectorElementType()->getPointerTo(
16628 LI
->getPointerAddressSpace()));
16631 assert(isTypeLegal(EVT::getEVT(VecTy
)) && "Illegal vldN vector type!");
16633 Type
*Int8Ptr
= Builder
.getInt8PtrTy(LI
->getPointerAddressSpace());
16634 Type
*Tys
[] = {VecTy
, Int8Ptr
};
16635 static const Intrinsic::ID LoadInts
[3] = {Intrinsic::arm_neon_vld2
,
16636 Intrinsic::arm_neon_vld3
,
16637 Intrinsic::arm_neon_vld4
};
16638 Function
*VldnFunc
=
16639 Intrinsic::getDeclaration(LI
->getModule(), LoadInts
[Factor
- 2], Tys
);
16641 // Holds sub-vectors extracted from the load intrinsic return values. The
16642 // sub-vectors are associated with the shufflevector instructions they will
16644 DenseMap
<ShuffleVectorInst
*, SmallVector
<Value
*, 4>> SubVecs
;
16646 for (unsigned LoadCount
= 0; LoadCount
< NumLoads
; ++LoadCount
) {
16647 // If we're generating more than one load, compute the base address of
16648 // subsequent loads as an offset from the previous.
16651 Builder
.CreateConstGEP1_32(VecTy
->getVectorElementType(), BaseAddr
,
16652 VecTy
->getVectorNumElements() * Factor
);
16654 SmallVector
<Value
*, 2> Ops
;
16655 Ops
.push_back(Builder
.CreateBitCast(BaseAddr
, Int8Ptr
));
16656 Ops
.push_back(Builder
.getInt32(LI
->getAlignment()));
16658 CallInst
*VldN
= Builder
.CreateCall(VldnFunc
, Ops
, "vldN");
16660 // Replace uses of each shufflevector with the corresponding vector loaded
16662 for (unsigned i
= 0; i
< Shuffles
.size(); i
++) {
16663 ShuffleVectorInst
*SV
= Shuffles
[i
];
16664 unsigned Index
= Indices
[i
];
16666 Value
*SubVec
= Builder
.CreateExtractValue(VldN
, Index
);
16668 // Convert the integer vector to pointer vector if the element is pointer.
16669 if (EltTy
->isPointerTy())
16670 SubVec
= Builder
.CreateIntToPtr(
16671 SubVec
, VectorType::get(SV
->getType()->getVectorElementType(),
16672 VecTy
->getVectorNumElements()));
16674 SubVecs
[SV
].push_back(SubVec
);
16678 // Replace uses of the shufflevector instructions with the sub-vectors
16679 // returned by the load intrinsic. If a shufflevector instruction is
16680 // associated with more than one sub-vector, those sub-vectors will be
16681 // concatenated into a single wide vector.
16682 for (ShuffleVectorInst
*SVI
: Shuffles
) {
16683 auto &SubVec
= SubVecs
[SVI
];
16685 SubVec
.size() > 1 ? concatenateVectors(Builder
, SubVec
) : SubVec
[0];
16686 SVI
->replaceAllUsesWith(WideVec
);
16692 /// Lower an interleaved store into a vstN intrinsic.
16694 /// E.g. Lower an interleaved store (Factor = 3):
16695 /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
16696 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
16697 /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
16700 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
16701 /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
16702 /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
16703 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
16705 /// Note that the new shufflevectors will be removed and we'll only generate one
16706 /// vst3 instruction in CodeGen.
16708 /// Example for a more general valid mask (Factor 3). Lower:
16709 /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
16710 /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
16711 /// store <12 x i32> %i.vec, <12 x i32>* %ptr
16714 /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
16715 /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
16716 /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
16717 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
16718 bool ARMTargetLowering::lowerInterleavedStore(StoreInst
*SI
,
16719 ShuffleVectorInst
*SVI
,
16720 unsigned Factor
) const {
16721 assert(Factor
>= 2 && Factor
<= getMaxSupportedInterleaveFactor() &&
16722 "Invalid interleave factor");
16724 VectorType
*VecTy
= SVI
->getType();
16725 assert(VecTy
->getVectorNumElements() % Factor
== 0 &&
16726 "Invalid interleaved store");
16728 unsigned LaneLen
= VecTy
->getVectorNumElements() / Factor
;
16729 Type
*EltTy
= VecTy
->getVectorElementType();
16730 VectorType
*SubVecTy
= VectorType::get(EltTy
, LaneLen
);
16732 const DataLayout
&DL
= SI
->getModule()->getDataLayout();
16734 // Skip if we do not have NEON and skip illegal vector types. We can
16735 // "legalize" wide vector types into multiple interleaved accesses as long as
16736 // the vector types are divisible by 128.
16737 if (!Subtarget
->hasNEON() || !isLegalInterleavedAccessType(SubVecTy
, DL
))
16740 unsigned NumStores
= getNumInterleavedAccesses(SubVecTy
, DL
);
16742 Value
*Op0
= SVI
->getOperand(0);
16743 Value
*Op1
= SVI
->getOperand(1);
16744 IRBuilder
<> Builder(SI
);
16746 // StN intrinsics don't support pointer vectors as arguments. Convert pointer
16747 // vectors to integer vectors.
16748 if (EltTy
->isPointerTy()) {
16749 Type
*IntTy
= DL
.getIntPtrType(EltTy
);
16751 // Convert to the corresponding integer vector.
16753 VectorType::get(IntTy
, Op0
->getType()->getVectorNumElements());
16754 Op0
= Builder
.CreatePtrToInt(Op0
, IntVecTy
);
16755 Op1
= Builder
.CreatePtrToInt(Op1
, IntVecTy
);
16757 SubVecTy
= VectorType::get(IntTy
, LaneLen
);
16760 // The base address of the store.
16761 Value
*BaseAddr
= SI
->getPointerOperand();
16763 if (NumStores
> 1) {
16764 // If we're going to generate more than one store, reset the lane length
16765 // and sub-vector type to something legal.
16766 LaneLen
/= NumStores
;
16767 SubVecTy
= VectorType::get(SubVecTy
->getVectorElementType(), LaneLen
);
16769 // We will compute the pointer operand of each store from the original base
16770 // address using GEPs. Cast the base address to a pointer to the scalar
16772 BaseAddr
= Builder
.CreateBitCast(
16773 BaseAddr
, SubVecTy
->getVectorElementType()->getPointerTo(
16774 SI
->getPointerAddressSpace()));
16777 assert(isTypeLegal(EVT::getEVT(SubVecTy
)) && "Illegal vstN vector type!");
16779 auto Mask
= SVI
->getShuffleMask();
16781 Type
*Int8Ptr
= Builder
.getInt8PtrTy(SI
->getPointerAddressSpace());
16782 Type
*Tys
[] = {Int8Ptr
, SubVecTy
};
16783 static const Intrinsic::ID StoreInts
[3] = {Intrinsic::arm_neon_vst2
,
16784 Intrinsic::arm_neon_vst3
,
16785 Intrinsic::arm_neon_vst4
};
16787 for (unsigned StoreCount
= 0; StoreCount
< NumStores
; ++StoreCount
) {
16788 // If we generating more than one store, we compute the base address of
16789 // subsequent stores as an offset from the previous.
16790 if (StoreCount
> 0)
16791 BaseAddr
= Builder
.CreateConstGEP1_32(SubVecTy
->getVectorElementType(),
16792 BaseAddr
, LaneLen
* Factor
);
16794 SmallVector
<Value
*, 6> Ops
;
16795 Ops
.push_back(Builder
.CreateBitCast(BaseAddr
, Int8Ptr
));
16797 Function
*VstNFunc
=
16798 Intrinsic::getDeclaration(SI
->getModule(), StoreInts
[Factor
- 2], Tys
);
16800 // Split the shufflevector operands into sub vectors for the new vstN call.
16801 for (unsigned i
= 0; i
< Factor
; i
++) {
16802 unsigned IdxI
= StoreCount
* LaneLen
* Factor
+ i
;
16803 if (Mask
[IdxI
] >= 0) {
16804 Ops
.push_back(Builder
.CreateShuffleVector(
16805 Op0
, Op1
, createSequentialMask(Builder
, Mask
[IdxI
], LaneLen
, 0)));
16807 unsigned StartMask
= 0;
16808 for (unsigned j
= 1; j
< LaneLen
; j
++) {
16809 unsigned IdxJ
= StoreCount
* LaneLen
* Factor
+ j
;
16810 if (Mask
[IdxJ
* Factor
+ IdxI
] >= 0) {
16811 StartMask
= Mask
[IdxJ
* Factor
+ IdxI
] - IdxJ
;
16815 // Note: If all elements in a chunk are undefs, StartMask=0!
16816 // Note: Filling undef gaps with random elements is ok, since
16817 // those elements were being written anyway (with undefs).
16818 // In the case of all undefs we're defaulting to using elems from 0
16819 // Note: StartMask cannot be negative, it's checked in
16820 // isReInterleaveMask
16821 Ops
.push_back(Builder
.CreateShuffleVector(
16822 Op0
, Op1
, createSequentialMask(Builder
, StartMask
, LaneLen
, 0)));
16826 Ops
.push_back(Builder
.getInt32(SI
->getAlignment()));
16827 Builder
.CreateCall(VstNFunc
, Ops
);
16840 static bool isHomogeneousAggregate(Type
*Ty
, HABaseType
&Base
,
16841 uint64_t &Members
) {
16842 if (auto *ST
= dyn_cast
<StructType
>(Ty
)) {
16843 for (unsigned i
= 0; i
< ST
->getNumElements(); ++i
) {
16844 uint64_t SubMembers
= 0;
16845 if (!isHomogeneousAggregate(ST
->getElementType(i
), Base
, SubMembers
))
16847 Members
+= SubMembers
;
16849 } else if (auto *AT
= dyn_cast
<ArrayType
>(Ty
)) {
16850 uint64_t SubMembers
= 0;
16851 if (!isHomogeneousAggregate(AT
->getElementType(), Base
, SubMembers
))
16853 Members
+= SubMembers
* AT
->getNumElements();
16854 } else if (Ty
->isFloatTy()) {
16855 if (Base
!= HA_UNKNOWN
&& Base
!= HA_FLOAT
)
16859 } else if (Ty
->isDoubleTy()) {
16860 if (Base
!= HA_UNKNOWN
&& Base
!= HA_DOUBLE
)
16864 } else if (auto *VT
= dyn_cast
<VectorType
>(Ty
)) {
16871 return VT
->getBitWidth() == 64;
16873 return VT
->getBitWidth() == 128;
16875 switch (VT
->getBitWidth()) {
16888 return (Members
> 0 && Members
<= 4);
16891 /// Return the correct alignment for the current calling convention.
16893 ARMTargetLowering::getABIAlignmentForCallingConv(Type
*ArgTy
,
16894 DataLayout DL
) const {
16895 if (!ArgTy
->isVectorTy())
16896 return DL
.getABITypeAlignment(ArgTy
);
16898 // Avoid over-aligning vector parameters. It would require realigning the
16899 // stack and waste space for no real benefit.
16900 return std::min(DL
.getABITypeAlignment(ArgTy
),
16901 (unsigned)DL
.getStackAlignment().value());
16904 /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
16905 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when
16906 /// passing according to AAPCS rules.
16907 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
16908 Type
*Ty
, CallingConv::ID CallConv
, bool isVarArg
) const {
16909 if (getEffectiveCallingConv(CallConv
, isVarArg
) !=
16910 CallingConv::ARM_AAPCS_VFP
)
16913 HABaseType Base
= HA_UNKNOWN
;
16914 uint64_t Members
= 0;
16915 bool IsHA
= isHomogeneousAggregate(Ty
, Base
, Members
);
16916 LLVM_DEBUG(dbgs() << "isHA: " << IsHA
<< " "; Ty
->dump());
16918 bool IsIntArray
= Ty
->isArrayTy() && Ty
->getArrayElementType()->isIntegerTy();
16919 return IsHA
|| IsIntArray
;
16922 unsigned ARMTargetLowering::getExceptionPointerRegister(
16923 const Constant
*PersonalityFn
) const {
16924 // Platforms which do not use SjLj EH may return values in these registers
16925 // via the personality function.
16926 return Subtarget
->useSjLjEH() ? ARM::NoRegister
: ARM::R0
;
16929 unsigned ARMTargetLowering::getExceptionSelectorRegister(
16930 const Constant
*PersonalityFn
) const {
16931 // Platforms which do not use SjLj EH may return values in these registers
16932 // via the personality function.
16933 return Subtarget
->useSjLjEH() ? ARM::NoRegister
: ARM::R1
;
16936 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock
*Entry
) const {
16937 // Update IsSplitCSR in ARMFunctionInfo.
16938 ARMFunctionInfo
*AFI
= Entry
->getParent()->getInfo
<ARMFunctionInfo
>();
16939 AFI
->setIsSplitCSR(true);
16942 void ARMTargetLowering::insertCopiesSplitCSR(
16943 MachineBasicBlock
*Entry
,
16944 const SmallVectorImpl
<MachineBasicBlock
*> &Exits
) const {
16945 const ARMBaseRegisterInfo
*TRI
= Subtarget
->getRegisterInfo();
16946 const MCPhysReg
*IStart
= TRI
->getCalleeSavedRegsViaCopy(Entry
->getParent());
16950 const TargetInstrInfo
*TII
= Subtarget
->getInstrInfo();
16951 MachineRegisterInfo
*MRI
= &Entry
->getParent()->getRegInfo();
16952 MachineBasicBlock::iterator MBBI
= Entry
->begin();
16953 for (const MCPhysReg
*I
= IStart
; *I
; ++I
) {
16954 const TargetRegisterClass
*RC
= nullptr;
16955 if (ARM::GPRRegClass
.contains(*I
))
16956 RC
= &ARM::GPRRegClass
;
16957 else if (ARM::DPRRegClass
.contains(*I
))
16958 RC
= &ARM::DPRRegClass
;
16960 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
16962 Register NewVR
= MRI
->createVirtualRegister(RC
);
16963 // Create copy from CSR to a virtual register.
16964 // FIXME: this currently does not emit CFI pseudo-instructions, it works
16965 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
16966 // nounwind. If we want to generalize this later, we may need to emit
16967 // CFI pseudo-instructions.
16968 assert(Entry
->getParent()->getFunction().hasFnAttribute(
16969 Attribute::NoUnwind
) &&
16970 "Function should be nounwind in insertCopiesSplitCSR!");
16971 Entry
->addLiveIn(*I
);
16972 BuildMI(*Entry
, MBBI
, DebugLoc(), TII
->get(TargetOpcode::COPY
), NewVR
)
16975 // Insert the copy-back instructions right before the terminator.
16976 for (auto *Exit
: Exits
)
16977 BuildMI(*Exit
, Exit
->getFirstTerminator(), DebugLoc(),
16978 TII
->get(TargetOpcode::COPY
), *I
)
16983 void ARMTargetLowering::finalizeLowering(MachineFunction
&MF
) const {
16984 MF
.getFrameInfo().computeMaxCallFrameSize(MF
);
16985 TargetLoweringBase::finalizeLowering(MF
);