1 //===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the lowering of LLVM calls to machine code calls for
13 //===----------------------------------------------------------------------===//
15 #include "AArch64CallLowering.h"
16 #include "AArch64ISelLowering.h"
17 #include "AArch64MachineFunctionInfo.h"
18 #include "AArch64RegisterInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/Analysis/ObjCARCUtil.h"
23 #include "llvm/CodeGen/Analysis.h"
24 #include "llvm/CodeGen/CallingConvLower.h"
25 #include "llvm/CodeGen/FunctionLoweringInfo.h"
26 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
27 #include "llvm/CodeGen/GlobalISel/Utils.h"
28 #include "llvm/CodeGen/LowLevelTypeUtils.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineMemOperand.h"
34 #include "llvm/CodeGen/MachineOperand.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/MachineValueType.h"
37 #include "llvm/CodeGen/TargetRegisterInfo.h"
38 #include "llvm/CodeGen/TargetSubtargetInfo.h"
39 #include "llvm/CodeGen/ValueTypes.h"
40 #include "llvm/IR/Argument.h"
41 #include "llvm/IR/Attributes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/Value.h"
50 #define DEBUG_TYPE "aarch64-call-lowering"
54 AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering
&TLI
)
55 : CallLowering(&TLI
) {}
57 static void applyStackPassedSmallTypeDAGHack(EVT OrigVT
, MVT
&ValVT
,
59 // If ValVT is i1/i8/i16, we should set LocVT to i8/i8/i16. This is a legacy
60 // hack because the DAG calls the assignment function with pre-legalized
61 // register typed values, not the raw type.
63 // This hack is not applied to return values which are not passed on the
65 if (OrigVT
== MVT::i1
|| OrigVT
== MVT::i8
)
66 ValVT
= LocVT
= MVT::i8
;
67 else if (OrigVT
== MVT::i16
)
68 ValVT
= LocVT
= MVT::i16
;
71 // Account for i1/i8/i16 stack passed value hack
72 static LLT
getStackValueStoreTypeHack(const CCValAssign
&VA
) {
73 const MVT ValVT
= VA
.getValVT();
74 return (ValVT
== MVT::i8
|| ValVT
== MVT::i16
) ? LLT(ValVT
)
80 struct AArch64IncomingValueAssigner
81 : public CallLowering::IncomingValueAssigner
{
82 AArch64IncomingValueAssigner(CCAssignFn
*AssignFn_
,
83 CCAssignFn
*AssignFnVarArg_
)
84 : IncomingValueAssigner(AssignFn_
, AssignFnVarArg_
) {}
86 bool assignArg(unsigned ValNo
, EVT OrigVT
, MVT ValVT
, MVT LocVT
,
87 CCValAssign::LocInfo LocInfo
,
88 const CallLowering::ArgInfo
&Info
, ISD::ArgFlagsTy Flags
,
89 CCState
&State
) override
{
90 applyStackPassedSmallTypeDAGHack(OrigVT
, ValVT
, LocVT
);
91 return IncomingValueAssigner::assignArg(ValNo
, OrigVT
, ValVT
, LocVT
,
92 LocInfo
, Info
, Flags
, State
);
96 struct AArch64OutgoingValueAssigner
97 : public CallLowering::OutgoingValueAssigner
{
98 const AArch64Subtarget
&Subtarget
;
100 /// Track if this is used for a return instead of function argument
101 /// passing. We apply a hack to i1/i8/i16 stack passed values, but do not use
102 /// stack passed returns for them and cannot apply the type adjustment.
105 AArch64OutgoingValueAssigner(CCAssignFn
*AssignFn_
,
106 CCAssignFn
*AssignFnVarArg_
,
107 const AArch64Subtarget
&Subtarget_
,
109 : OutgoingValueAssigner(AssignFn_
, AssignFnVarArg_
),
110 Subtarget(Subtarget_
), IsReturn(IsReturn
) {}
112 bool assignArg(unsigned ValNo
, EVT OrigVT
, MVT ValVT
, MVT LocVT
,
113 CCValAssign::LocInfo LocInfo
,
114 const CallLowering::ArgInfo
&Info
, ISD::ArgFlagsTy Flags
,
115 CCState
&State
) override
{
116 bool IsCalleeWin
= Subtarget
.isCallingConvWin64(State
.getCallingConv());
117 bool UseVarArgsCCForFixed
= IsCalleeWin
&& State
.isVarArg();
120 if (Info
.IsFixed
&& !UseVarArgsCCForFixed
) {
122 applyStackPassedSmallTypeDAGHack(OrigVT
, ValVT
, LocVT
);
123 Res
= AssignFn(ValNo
, ValVT
, LocVT
, LocInfo
, Flags
, State
);
125 Res
= AssignFnVarArg(ValNo
, ValVT
, LocVT
, LocInfo
, Flags
, State
);
127 StackSize
= State
.getStackSize();
132 struct IncomingArgHandler
: public CallLowering::IncomingValueHandler
{
133 IncomingArgHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
)
134 : IncomingValueHandler(MIRBuilder
, MRI
) {}
136 Register
getStackAddress(uint64_t Size
, int64_t Offset
,
137 MachinePointerInfo
&MPO
,
138 ISD::ArgFlagsTy Flags
) override
{
139 auto &MFI
= MIRBuilder
.getMF().getFrameInfo();
141 // Byval is assumed to be writable memory, but other stack passed arguments
143 const bool IsImmutable
= !Flags
.isByVal();
145 int FI
= MFI
.CreateFixedObject(Size
, Offset
, IsImmutable
);
146 MPO
= MachinePointerInfo::getFixedStack(MIRBuilder
.getMF(), FI
);
147 auto AddrReg
= MIRBuilder
.buildFrameIndex(LLT::pointer(0, 64), FI
);
148 return AddrReg
.getReg(0);
151 LLT
getStackValueStoreType(const DataLayout
&DL
, const CCValAssign
&VA
,
152 ISD::ArgFlagsTy Flags
) const override
{
153 // For pointers, we just need to fixup the integer types reported in the
155 if (Flags
.isPointer())
156 return CallLowering::ValueHandler::getStackValueStoreType(DL
, VA
, Flags
);
157 return getStackValueStoreTypeHack(VA
);
160 void assignValueToReg(Register ValVReg
, Register PhysReg
,
161 const CCValAssign
&VA
) override
{
162 markPhysRegUsed(PhysReg
);
163 IncomingValueHandler::assignValueToReg(ValVReg
, PhysReg
, VA
);
166 void assignValueToAddress(Register ValVReg
, Register Addr
, LLT MemTy
,
167 const MachinePointerInfo
&MPO
,
168 const CCValAssign
&VA
) override
{
169 MachineFunction
&MF
= MIRBuilder
.getMF();
171 LLT
ValTy(VA
.getValVT());
172 LLT
LocTy(VA
.getLocVT());
174 // Fixup the types for the DAG compatibility hack.
175 if (VA
.getValVT() == MVT::i8
|| VA
.getValVT() == MVT::i16
)
176 std::swap(ValTy
, LocTy
);
178 // The calling code knows if this is a pointer or not, we're only touching
179 // the LocTy for the i8/i16 hack.
180 assert(LocTy
.getSizeInBits() == MemTy
.getSizeInBits());
184 auto MMO
= MF
.getMachineMemOperand(
185 MPO
, MachineMemOperand::MOLoad
| MachineMemOperand::MOInvariant
, LocTy
,
186 inferAlignFromPtrInfo(MF
, MPO
));
188 switch (VA
.getLocInfo()) {
189 case CCValAssign::LocInfo::ZExt
:
190 MIRBuilder
.buildLoadInstr(TargetOpcode::G_ZEXTLOAD
, ValVReg
, Addr
, *MMO
);
192 case CCValAssign::LocInfo::SExt
:
193 MIRBuilder
.buildLoadInstr(TargetOpcode::G_SEXTLOAD
, ValVReg
, Addr
, *MMO
);
196 MIRBuilder
.buildLoad(ValVReg
, Addr
, *MMO
);
201 /// How the physical register gets marked varies between formal
202 /// parameters (it's a basic-block live-in), and a call instruction
203 /// (it's an implicit-def of the BL).
204 virtual void markPhysRegUsed(MCRegister PhysReg
) = 0;
207 struct FormalArgHandler
: public IncomingArgHandler
{
208 FormalArgHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
)
209 : IncomingArgHandler(MIRBuilder
, MRI
) {}
211 void markPhysRegUsed(MCRegister PhysReg
) override
{
212 MIRBuilder
.getMRI()->addLiveIn(PhysReg
);
213 MIRBuilder
.getMBB().addLiveIn(PhysReg
);
217 struct CallReturnHandler
: public IncomingArgHandler
{
218 CallReturnHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
219 MachineInstrBuilder MIB
)
220 : IncomingArgHandler(MIRBuilder
, MRI
), MIB(MIB
) {}
222 void markPhysRegUsed(MCRegister PhysReg
) override
{
223 MIB
.addDef(PhysReg
, RegState::Implicit
);
226 MachineInstrBuilder MIB
;
229 /// A special return arg handler for "returned" attribute arg calls.
230 struct ReturnedArgCallReturnHandler
: public CallReturnHandler
{
231 ReturnedArgCallReturnHandler(MachineIRBuilder
&MIRBuilder
,
232 MachineRegisterInfo
&MRI
,
233 MachineInstrBuilder MIB
)
234 : CallReturnHandler(MIRBuilder
, MRI
, MIB
) {}
236 void markPhysRegUsed(MCRegister PhysReg
) override
{}
239 struct OutgoingArgHandler
: public CallLowering::OutgoingValueHandler
{
240 OutgoingArgHandler(MachineIRBuilder
&MIRBuilder
, MachineRegisterInfo
&MRI
,
241 MachineInstrBuilder MIB
, bool IsTailCall
= false,
243 : OutgoingValueHandler(MIRBuilder
, MRI
), MIB(MIB
), IsTailCall(IsTailCall
),
245 Subtarget(MIRBuilder
.getMF().getSubtarget
<AArch64Subtarget
>()) {}
247 Register
getStackAddress(uint64_t Size
, int64_t Offset
,
248 MachinePointerInfo
&MPO
,
249 ISD::ArgFlagsTy Flags
) override
{
250 MachineFunction
&MF
= MIRBuilder
.getMF();
251 LLT p0
= LLT::pointer(0, 64);
252 LLT s64
= LLT::scalar(64);
255 assert(!Flags
.isByVal() && "byval unhandled with tail calls");
258 int FI
= MF
.getFrameInfo().CreateFixedObject(Size
, Offset
, true);
259 auto FIReg
= MIRBuilder
.buildFrameIndex(p0
, FI
);
260 MPO
= MachinePointerInfo::getFixedStack(MF
, FI
);
261 return FIReg
.getReg(0);
265 SPReg
= MIRBuilder
.buildCopy(p0
, Register(AArch64::SP
)).getReg(0);
267 auto OffsetReg
= MIRBuilder
.buildConstant(s64
, Offset
);
269 auto AddrReg
= MIRBuilder
.buildPtrAdd(p0
, SPReg
, OffsetReg
);
271 MPO
= MachinePointerInfo::getStack(MF
, Offset
);
272 return AddrReg
.getReg(0);
275 /// We need to fixup the reported store size for certain value types because
276 /// we invert the interpretation of ValVT and LocVT in certain cases. This is
277 /// for compatability with the DAG call lowering implementation, which we're
278 /// currently building on top of.
279 LLT
getStackValueStoreType(const DataLayout
&DL
, const CCValAssign
&VA
,
280 ISD::ArgFlagsTy Flags
) const override
{
281 if (Flags
.isPointer())
282 return CallLowering::ValueHandler::getStackValueStoreType(DL
, VA
, Flags
);
283 return getStackValueStoreTypeHack(VA
);
286 void assignValueToReg(Register ValVReg
, Register PhysReg
,
287 const CCValAssign
&VA
) override
{
288 MIB
.addUse(PhysReg
, RegState::Implicit
);
289 Register ExtReg
= extendRegister(ValVReg
, VA
);
290 MIRBuilder
.buildCopy(PhysReg
, ExtReg
);
293 void assignValueToAddress(Register ValVReg
, Register Addr
, LLT MemTy
,
294 const MachinePointerInfo
&MPO
,
295 const CCValAssign
&VA
) override
{
296 MachineFunction
&MF
= MIRBuilder
.getMF();
297 auto MMO
= MF
.getMachineMemOperand(MPO
, MachineMemOperand::MOStore
, MemTy
,
298 inferAlignFromPtrInfo(MF
, MPO
));
299 MIRBuilder
.buildStore(ValVReg
, Addr
, *MMO
);
302 void assignValueToAddress(const CallLowering::ArgInfo
&Arg
, unsigned RegIndex
,
303 Register Addr
, LLT MemTy
,
304 const MachinePointerInfo
&MPO
,
305 const CCValAssign
&VA
) override
{
306 unsigned MaxSize
= MemTy
.getSizeInBytes() * 8;
307 // For varargs, we always want to extend them to 8 bytes, in which case
308 // we disable setting a max.
312 Register ValVReg
= Arg
.Regs
[RegIndex
];
313 if (VA
.getLocInfo() != CCValAssign::LocInfo::FPExt
) {
314 MVT LocVT
= VA
.getLocVT();
315 MVT ValVT
= VA
.getValVT();
317 if (VA
.getValVT() == MVT::i8
|| VA
.getValVT() == MVT::i16
) {
318 std::swap(ValVT
, LocVT
);
319 MemTy
= LLT(VA
.getValVT());
322 ValVReg
= extendRegister(ValVReg
, VA
, MaxSize
);
324 // The store does not cover the full allocated stack slot.
325 MemTy
= LLT(VA
.getValVT());
328 assignValueToAddress(ValVReg
, Addr
, MemTy
, MPO
, VA
);
331 MachineInstrBuilder MIB
;
335 /// For tail calls, the byte offset of the call's argument area from the
336 /// callee's. Unused elsewhere.
339 // Cache the SP register vreg if we need it more than once in this call site.
342 const AArch64Subtarget
&Subtarget
;
346 static bool doesCalleeRestoreStack(CallingConv::ID CallConv
, bool TailCallOpt
) {
347 return (CallConv
== CallingConv::Fast
&& TailCallOpt
) ||
348 CallConv
== CallingConv::Tail
|| CallConv
== CallingConv::SwiftTail
;
351 bool AArch64CallLowering::lowerReturn(MachineIRBuilder
&MIRBuilder
,
353 ArrayRef
<Register
> VRegs
,
354 FunctionLoweringInfo
&FLI
,
355 Register SwiftErrorVReg
) const {
356 auto MIB
= MIRBuilder
.buildInstrNoInsert(AArch64::RET_ReallyLR
);
357 assert(((Val
&& !VRegs
.empty()) || (!Val
&& VRegs
.empty())) &&
358 "Return value without a vreg");
361 if (!FLI
.CanLowerReturn
) {
362 insertSRetStores(MIRBuilder
, Val
->getType(), VRegs
, FLI
.DemoteRegister
);
363 } else if (!VRegs
.empty()) {
364 MachineFunction
&MF
= MIRBuilder
.getMF();
365 const Function
&F
= MF
.getFunction();
366 const AArch64Subtarget
&Subtarget
= MF
.getSubtarget
<AArch64Subtarget
>();
368 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
369 const AArch64TargetLowering
&TLI
= *getTLI
<AArch64TargetLowering
>();
370 CCAssignFn
*AssignFn
= TLI
.CCAssignFnForReturn(F
.getCallingConv());
371 auto &DL
= F
.getParent()->getDataLayout();
372 LLVMContext
&Ctx
= Val
->getType()->getContext();
374 SmallVector
<EVT
, 4> SplitEVTs
;
375 ComputeValueVTs(TLI
, DL
, Val
->getType(), SplitEVTs
);
376 assert(VRegs
.size() == SplitEVTs
.size() &&
377 "For each split Type there should be exactly one VReg.");
379 SmallVector
<ArgInfo
, 8> SplitArgs
;
380 CallingConv::ID CC
= F
.getCallingConv();
382 for (unsigned i
= 0; i
< SplitEVTs
.size(); ++i
) {
383 Register CurVReg
= VRegs
[i
];
384 ArgInfo CurArgInfo
= ArgInfo
{CurVReg
, SplitEVTs
[i
].getTypeForEVT(Ctx
), 0};
385 setArgFlags(CurArgInfo
, AttributeList::ReturnIndex
, DL
, F
);
387 // i1 is a special case because SDAG i1 true is naturally zero extended
388 // when widened using ANYEXT. We need to do it explicitly here.
389 auto &Flags
= CurArgInfo
.Flags
[0];
390 if (MRI
.getType(CurVReg
).getSizeInBits() == 1 && !Flags
.isSExt() &&
392 CurVReg
= MIRBuilder
.buildZExt(LLT::scalar(8), CurVReg
).getReg(0);
393 } else if (TLI
.getNumRegistersForCallingConv(Ctx
, CC
, SplitEVTs
[i
]) ==
395 // Some types will need extending as specified by the CC.
396 MVT NewVT
= TLI
.getRegisterTypeForCallingConv(Ctx
, CC
, SplitEVTs
[i
]);
397 if (EVT(NewVT
) != SplitEVTs
[i
]) {
398 unsigned ExtendOp
= TargetOpcode::G_ANYEXT
;
399 if (F
.getAttributes().hasRetAttr(Attribute::SExt
))
400 ExtendOp
= TargetOpcode::G_SEXT
;
401 else if (F
.getAttributes().hasRetAttr(Attribute::ZExt
))
402 ExtendOp
= TargetOpcode::G_ZEXT
;
405 LLT
OldLLT(MVT::getVT(CurArgInfo
.Ty
));
406 CurArgInfo
.Ty
= EVT(NewVT
).getTypeForEVT(Ctx
);
407 // Instead of an extend, we might have a vector type which needs
408 // padding with more elements, e.g. <2 x half> -> <4 x half>.
409 if (NewVT
.isVector()) {
410 if (OldLLT
.isVector()) {
411 if (NewLLT
.getNumElements() > OldLLT
.getNumElements()) {
414 MIRBuilder
.buildPadVectorWithUndefElements(NewLLT
, CurVReg
)
417 // Just do a vector extend.
418 CurVReg
= MIRBuilder
.buildInstr(ExtendOp
, {NewLLT
}, {CurVReg
})
421 } else if (NewLLT
.getNumElements() >= 2 &&
422 NewLLT
.getNumElements() <= 8) {
423 // We need to pad a <1 x S> type to <2/4/8 x S>. Since we don't
424 // have <1 x S> vector types in GISel we use a build_vector
425 // instead of a vector merge/concat.
427 MIRBuilder
.buildPadVectorWithUndefElements(NewLLT
, CurVReg
)
430 LLVM_DEBUG(dbgs() << "Could not handle ret ty\n");
434 // If the split EVT was a <1 x T> vector, and NewVT is T, then we
435 // don't have to do anything since we don't distinguish between the
437 if (NewLLT
!= MRI
.getType(CurVReg
)) {
439 CurVReg
= MIRBuilder
.buildInstr(ExtendOp
, {NewLLT
}, {CurVReg
})
445 if (CurVReg
!= CurArgInfo
.Regs
[0]) {
446 CurArgInfo
.Regs
[0] = CurVReg
;
447 // Reset the arg flags after modifying CurVReg.
448 setArgFlags(CurArgInfo
, AttributeList::ReturnIndex
, DL
, F
);
450 splitToValueTypes(CurArgInfo
, SplitArgs
, DL
, CC
);
453 AArch64OutgoingValueAssigner
Assigner(AssignFn
, AssignFn
, Subtarget
,
455 OutgoingArgHandler
Handler(MIRBuilder
, MRI
, MIB
);
456 Success
= determineAndHandleAssignments(Handler
, Assigner
, SplitArgs
,
457 MIRBuilder
, CC
, F
.isVarArg());
460 if (SwiftErrorVReg
) {
461 MIB
.addUse(AArch64::X21
, RegState::Implicit
);
462 MIRBuilder
.buildCopy(AArch64::X21
, SwiftErrorVReg
);
465 MIRBuilder
.insertInstr(MIB
);
469 bool AArch64CallLowering::canLowerReturn(MachineFunction
&MF
,
470 CallingConv::ID CallConv
,
471 SmallVectorImpl
<BaseArgInfo
> &Outs
,
472 bool IsVarArg
) const {
473 SmallVector
<CCValAssign
, 16> ArgLocs
;
474 const auto &TLI
= *getTLI
<AArch64TargetLowering
>();
475 CCState
CCInfo(CallConv
, IsVarArg
, MF
, ArgLocs
,
476 MF
.getFunction().getContext());
478 return checkReturn(CCInfo
, Outs
, TLI
.CCAssignFnForReturn(CallConv
));
481 /// Helper function to compute forwarded registers for musttail calls. Computes
482 /// the forwarded registers, sets MBB liveness, and emits COPY instructions that
483 /// can be used to save + restore registers later.
484 static void handleMustTailForwardedRegisters(MachineIRBuilder
&MIRBuilder
,
485 CCAssignFn
*AssignFn
) {
486 MachineBasicBlock
&MBB
= MIRBuilder
.getMBB();
487 MachineFunction
&MF
= MIRBuilder
.getMF();
488 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
490 if (!MFI
.hasMustTailInVarArgFunc())
493 AArch64FunctionInfo
*FuncInfo
= MF
.getInfo
<AArch64FunctionInfo
>();
494 const Function
&F
= MF
.getFunction();
495 assert(F
.isVarArg() && "Expected F to be vararg?");
497 // Compute the set of forwarded registers. The rest are scratch.
498 SmallVector
<CCValAssign
, 16> ArgLocs
;
499 CCState
CCInfo(F
.getCallingConv(), /*IsVarArg=*/true, MF
, ArgLocs
,
501 SmallVector
<MVT
, 2> RegParmTypes
;
502 RegParmTypes
.push_back(MVT::i64
);
503 RegParmTypes
.push_back(MVT::f128
);
505 // Later on, we can use this vector to restore the registers if necessary.
506 SmallVectorImpl
<ForwardedRegister
> &Forwards
=
507 FuncInfo
->getForwardedMustTailRegParms();
508 CCInfo
.analyzeMustTailForwardedRegisters(Forwards
, RegParmTypes
, AssignFn
);
510 // Conservatively forward X8, since it might be used for an aggregate
512 if (!CCInfo
.isAllocated(AArch64::X8
)) {
513 Register X8VReg
= MF
.addLiveIn(AArch64::X8
, &AArch64::GPR64RegClass
);
514 Forwards
.push_back(ForwardedRegister(X8VReg
, AArch64::X8
, MVT::i64
));
517 // Add the forwards to the MachineBasicBlock and MachineFunction.
518 for (const auto &F
: Forwards
) {
519 MBB
.addLiveIn(F
.PReg
);
520 MIRBuilder
.buildCopy(Register(F
.VReg
), Register(F
.PReg
));
524 bool AArch64CallLowering::fallBackToDAGISel(const MachineFunction
&MF
) const {
525 auto &F
= MF
.getFunction();
526 if (F
.getReturnType()->isScalableTy() ||
527 llvm::any_of(F
.args(), [](const Argument
&A
) {
528 return A
.getType()->isScalableTy();
531 const auto &ST
= MF
.getSubtarget
<AArch64Subtarget
>();
532 if (!ST
.hasNEON() || !ST
.hasFPARMv8()) {
533 LLVM_DEBUG(dbgs() << "Falling back to SDAG because we don't support no-NEON\n");
538 if (Attrs
.hasZAState() || Attrs
.hasStreamingInterfaceOrBody() ||
539 Attrs
.hasStreamingCompatibleInterface())
545 void AArch64CallLowering::saveVarArgRegisters(
546 MachineIRBuilder
&MIRBuilder
, CallLowering::IncomingValueHandler
&Handler
,
547 CCState
&CCInfo
) const {
548 auto GPRArgRegs
= AArch64::getGPRArgRegs();
549 auto FPRArgRegs
= AArch64::getFPRArgRegs();
551 MachineFunction
&MF
= MIRBuilder
.getMF();
552 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
553 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
554 AArch64FunctionInfo
*FuncInfo
= MF
.getInfo
<AArch64FunctionInfo
>();
555 auto &Subtarget
= MF
.getSubtarget
<AArch64Subtarget
>();
557 Subtarget
.isCallingConvWin64(CCInfo
.getCallingConv());
558 const LLT p0
= LLT::pointer(0, 64);
559 const LLT s64
= LLT::scalar(64);
561 unsigned FirstVariadicGPR
= CCInfo
.getFirstUnallocated(GPRArgRegs
);
562 unsigned NumVariadicGPRArgRegs
= GPRArgRegs
.size() - FirstVariadicGPR
+ 1;
564 unsigned GPRSaveSize
= 8 * (GPRArgRegs
.size() - FirstVariadicGPR
);
566 if (GPRSaveSize
!= 0) {
568 GPRIdx
= MFI
.CreateFixedObject(GPRSaveSize
,
569 -static_cast<int>(GPRSaveSize
), false);
570 if (GPRSaveSize
& 15)
571 // The extra size here, if triggered, will always be 8.
572 MFI
.CreateFixedObject(16 - (GPRSaveSize
& 15),
573 -static_cast<int>(alignTo(GPRSaveSize
, 16)),
576 GPRIdx
= MFI
.CreateStackObject(GPRSaveSize
, Align(8), false);
578 auto FIN
= MIRBuilder
.buildFrameIndex(p0
, GPRIdx
);
580 MIRBuilder
.buildConstant(MRI
.createGenericVirtualRegister(s64
), 8);
582 for (unsigned i
= FirstVariadicGPR
; i
< GPRArgRegs
.size(); ++i
) {
583 Register Val
= MRI
.createGenericVirtualRegister(s64
);
584 Handler
.assignValueToReg(
586 CCValAssign::getReg(i
+ MF
.getFunction().getNumOperands(), MVT::i64
,
587 GPRArgRegs
[i
], MVT::i64
, CCValAssign::Full
));
588 auto MPO
= IsWin64CC
? MachinePointerInfo::getFixedStack(
589 MF
, GPRIdx
, (i
- FirstVariadicGPR
) * 8)
590 : MachinePointerInfo::getStack(MF
, i
* 8);
591 MIRBuilder
.buildStore(Val
, FIN
, MPO
, inferAlignFromPtrInfo(MF
, MPO
));
593 FIN
= MIRBuilder
.buildPtrAdd(MRI
.createGenericVirtualRegister(p0
),
594 FIN
.getReg(0), Offset
);
597 FuncInfo
->setVarArgsGPRIndex(GPRIdx
);
598 FuncInfo
->setVarArgsGPRSize(GPRSaveSize
);
600 if (Subtarget
.hasFPARMv8() && !IsWin64CC
) {
601 unsigned FirstVariadicFPR
= CCInfo
.getFirstUnallocated(FPRArgRegs
);
603 unsigned FPRSaveSize
= 16 * (FPRArgRegs
.size() - FirstVariadicFPR
);
605 if (FPRSaveSize
!= 0) {
606 FPRIdx
= MFI
.CreateStackObject(FPRSaveSize
, Align(16), false);
608 auto FIN
= MIRBuilder
.buildFrameIndex(p0
, FPRIdx
);
610 MIRBuilder
.buildConstant(MRI
.createGenericVirtualRegister(s64
), 16);
612 for (unsigned i
= FirstVariadicFPR
; i
< FPRArgRegs
.size(); ++i
) {
613 Register Val
= MRI
.createGenericVirtualRegister(LLT::scalar(128));
614 Handler
.assignValueToReg(
617 i
+ MF
.getFunction().getNumOperands() + NumVariadicGPRArgRegs
,
618 MVT::f128
, FPRArgRegs
[i
], MVT::f128
, CCValAssign::Full
));
620 auto MPO
= MachinePointerInfo::getStack(MF
, i
* 16);
621 MIRBuilder
.buildStore(Val
, FIN
, MPO
, inferAlignFromPtrInfo(MF
, MPO
));
623 FIN
= MIRBuilder
.buildPtrAdd(MRI
.createGenericVirtualRegister(p0
),
624 FIN
.getReg(0), Offset
);
627 FuncInfo
->setVarArgsFPRIndex(FPRIdx
);
628 FuncInfo
->setVarArgsFPRSize(FPRSaveSize
);
632 bool AArch64CallLowering::lowerFormalArguments(
633 MachineIRBuilder
&MIRBuilder
, const Function
&F
,
634 ArrayRef
<ArrayRef
<Register
>> VRegs
, FunctionLoweringInfo
&FLI
) const {
635 MachineFunction
&MF
= MIRBuilder
.getMF();
636 MachineBasicBlock
&MBB
= MIRBuilder
.getMBB();
637 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
638 auto &DL
= F
.getParent()->getDataLayout();
639 auto &Subtarget
= MF
.getSubtarget
<AArch64Subtarget
>();
640 // TODO: Support Arm64EC
641 bool IsWin64
= Subtarget
.isCallingConvWin64(F
.getCallingConv()) && !Subtarget
.isWindowsArm64EC();
643 SmallVector
<ArgInfo
, 8> SplitArgs
;
644 SmallVector
<std::pair
<Register
, Register
>> BoolArgs
;
646 // Insert the hidden sret parameter if the return value won't fit in the
648 if (!FLI
.CanLowerReturn
)
649 insertSRetIncomingArgument(F
, SplitArgs
, FLI
.DemoteRegister
, MRI
, DL
);
652 for (auto &Arg
: F
.args()) {
653 if (DL
.getTypeStoreSize(Arg
.getType()).isZero())
656 ArgInfo OrigArg
{VRegs
[i
], Arg
, i
};
657 setArgFlags(OrigArg
, i
+ AttributeList::FirstArgIndex
, DL
, F
);
659 // i1 arguments are zero-extended to i8 by the caller. Emit a
660 // hint to reflect this.
661 if (OrigArg
.Ty
->isIntegerTy(1)) {
662 assert(OrigArg
.Regs
.size() == 1 &&
663 MRI
.getType(OrigArg
.Regs
[0]).getSizeInBits() == 1 &&
664 "Unexpected registers used for i1 arg");
666 auto &Flags
= OrigArg
.Flags
[0];
667 if (!Flags
.isZExt() && !Flags
.isSExt()) {
668 // Lower i1 argument as i8, and insert AssertZExt + Trunc later.
669 Register OrigReg
= OrigArg
.Regs
[0];
670 Register WideReg
= MRI
.createGenericVirtualRegister(LLT::scalar(8));
671 OrigArg
.Regs
[0] = WideReg
;
672 BoolArgs
.push_back({OrigReg
, WideReg
});
676 if (Arg
.hasAttribute(Attribute::SwiftAsync
))
677 MF
.getInfo
<AArch64FunctionInfo
>()->setHasSwiftAsyncContext(true);
679 splitToValueTypes(OrigArg
, SplitArgs
, DL
, F
.getCallingConv());
684 MIRBuilder
.setInstr(*MBB
.begin());
686 const AArch64TargetLowering
&TLI
= *getTLI
<AArch64TargetLowering
>();
687 CCAssignFn
*AssignFn
= TLI
.CCAssignFnForCall(F
.getCallingConv(), IsWin64
&& F
.isVarArg());
689 AArch64IncomingValueAssigner
Assigner(AssignFn
, AssignFn
);
690 FormalArgHandler
Handler(MIRBuilder
, MRI
);
691 SmallVector
<CCValAssign
, 16> ArgLocs
;
692 CCState
CCInfo(F
.getCallingConv(), F
.isVarArg(), MF
, ArgLocs
, F
.getContext());
693 if (!determineAssignments(Assigner
, SplitArgs
, CCInfo
) ||
694 !handleAssignments(Handler
, SplitArgs
, CCInfo
, ArgLocs
, MIRBuilder
))
697 if (!BoolArgs
.empty()) {
698 for (auto &KV
: BoolArgs
) {
699 Register OrigReg
= KV
.first
;
700 Register WideReg
= KV
.second
;
701 LLT WideTy
= MRI
.getType(WideReg
);
702 assert(MRI
.getType(OrigReg
).getScalarSizeInBits() == 1 &&
703 "Unexpected bit size of a bool arg");
704 MIRBuilder
.buildTrunc(
705 OrigReg
, MIRBuilder
.buildAssertZExt(WideTy
, WideReg
, 1).getReg(0));
709 AArch64FunctionInfo
*FuncInfo
= MF
.getInfo
<AArch64FunctionInfo
>();
710 uint64_t StackSize
= Assigner
.StackSize
;
712 if ((!Subtarget
.isTargetDarwin() && !Subtarget
.isWindowsArm64EC()) || IsWin64
) {
713 // The AAPCS variadic function ABI is identical to the non-variadic
714 // one. As a result there may be more arguments in registers and we should
715 // save them for future reference.
716 // Win64 variadic functions also pass arguments in registers, but all
717 // float arguments are passed in integer registers.
718 saveVarArgRegisters(MIRBuilder
, Handler
, CCInfo
);
719 } else if (Subtarget
.isWindowsArm64EC()) {
723 // We currently pass all varargs at 8-byte alignment, or 4 in ILP32.
724 StackSize
= alignTo(Assigner
.StackSize
, Subtarget
.isTargetILP32() ? 4 : 8);
726 auto &MFI
= MIRBuilder
.getMF().getFrameInfo();
727 FuncInfo
->setVarArgsStackIndex(MFI
.CreateFixedObject(4, StackSize
, true));
730 if (doesCalleeRestoreStack(F
.getCallingConv(),
731 MF
.getTarget().Options
.GuaranteedTailCallOpt
)) {
732 // We have a non-standard ABI, so why not make full use of the stack that
733 // we're going to pop? It must be aligned to 16 B in any case.
734 StackSize
= alignTo(StackSize
, 16);
736 // If we're expected to restore the stack (e.g. fastcc), then we'll be
737 // adding a multiple of 16.
738 FuncInfo
->setArgumentStackToRestore(StackSize
);
740 // Our own callers will guarantee that the space is free by giving an
741 // aligned value to CALLSEQ_START.
744 // When we tail call, we need to check if the callee's arguments
745 // will fit on the caller's stack. So, whenever we lower formal arguments,
746 // we should keep track of this information, since we might lower a tail call
747 // in this function later.
748 FuncInfo
->setBytesInStackArgArea(StackSize
);
750 if (Subtarget
.hasCustomCallingConv())
751 Subtarget
.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF
);
753 handleMustTailForwardedRegisters(MIRBuilder
, AssignFn
);
755 // Move back to the end of the basic block.
756 MIRBuilder
.setMBB(MBB
);
761 /// Return true if the calling convention is one that we can guarantee TCO for.
762 static bool canGuaranteeTCO(CallingConv::ID CC
, bool GuaranteeTailCalls
) {
763 return (CC
== CallingConv::Fast
&& GuaranteeTailCalls
) ||
764 CC
== CallingConv::Tail
|| CC
== CallingConv::SwiftTail
;
767 /// Return true if we might ever do TCO for calls with this calling convention.
768 static bool mayTailCallThisCC(CallingConv::ID CC
) {
771 case CallingConv::PreserveMost
:
772 case CallingConv::PreserveAll
:
773 case CallingConv::Swift
:
774 case CallingConv::SwiftTail
:
775 case CallingConv::Tail
:
776 case CallingConv::Fast
:
783 /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for
785 static std::pair
<CCAssignFn
*, CCAssignFn
*>
786 getAssignFnsForCC(CallingConv::ID CC
, const AArch64TargetLowering
&TLI
) {
787 return {TLI
.CCAssignFnForCall(CC
, false), TLI
.CCAssignFnForCall(CC
, true)};
790 bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(
791 CallLoweringInfo
&Info
, MachineFunction
&MF
,
792 SmallVectorImpl
<ArgInfo
> &InArgs
) const {
793 const Function
&CallerF
= MF
.getFunction();
794 CallingConv::ID CalleeCC
= Info
.CallConv
;
795 CallingConv::ID CallerCC
= CallerF
.getCallingConv();
797 // If the calling conventions match, then everything must be the same.
798 if (CalleeCC
== CallerCC
)
801 // Check if the caller and callee will handle arguments in the same way.
802 const AArch64TargetLowering
&TLI
= *getTLI
<AArch64TargetLowering
>();
803 CCAssignFn
*CalleeAssignFnFixed
;
804 CCAssignFn
*CalleeAssignFnVarArg
;
805 std::tie(CalleeAssignFnFixed
, CalleeAssignFnVarArg
) =
806 getAssignFnsForCC(CalleeCC
, TLI
);
808 CCAssignFn
*CallerAssignFnFixed
;
809 CCAssignFn
*CallerAssignFnVarArg
;
810 std::tie(CallerAssignFnFixed
, CallerAssignFnVarArg
) =
811 getAssignFnsForCC(CallerCC
, TLI
);
813 AArch64IncomingValueAssigner
CalleeAssigner(CalleeAssignFnFixed
,
814 CalleeAssignFnVarArg
);
815 AArch64IncomingValueAssigner
CallerAssigner(CallerAssignFnFixed
,
816 CallerAssignFnVarArg
);
818 if (!resultsCompatible(Info
, MF
, InArgs
, CalleeAssigner
, CallerAssigner
))
821 // Make sure that the caller and callee preserve all of the same registers.
822 auto TRI
= MF
.getSubtarget
<AArch64Subtarget
>().getRegisterInfo();
823 const uint32_t *CallerPreserved
= TRI
->getCallPreservedMask(MF
, CallerCC
);
824 const uint32_t *CalleePreserved
= TRI
->getCallPreservedMask(MF
, CalleeCC
);
825 if (MF
.getSubtarget
<AArch64Subtarget
>().hasCustomCallingConv()) {
826 TRI
->UpdateCustomCallPreservedMask(MF
, &CallerPreserved
);
827 TRI
->UpdateCustomCallPreservedMask(MF
, &CalleePreserved
);
830 return TRI
->regmaskSubsetEqual(CallerPreserved
, CalleePreserved
);
833 bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(
834 CallLoweringInfo
&Info
, MachineFunction
&MF
,
835 SmallVectorImpl
<ArgInfo
> &OrigOutArgs
) const {
836 // If there are no outgoing arguments, then we are done.
837 if (OrigOutArgs
.empty())
840 const Function
&CallerF
= MF
.getFunction();
841 LLVMContext
&Ctx
= CallerF
.getContext();
842 CallingConv::ID CalleeCC
= Info
.CallConv
;
843 CallingConv::ID CallerCC
= CallerF
.getCallingConv();
844 const AArch64TargetLowering
&TLI
= *getTLI
<AArch64TargetLowering
>();
845 const AArch64Subtarget
&Subtarget
= MF
.getSubtarget
<AArch64Subtarget
>();
847 CCAssignFn
*AssignFnFixed
;
848 CCAssignFn
*AssignFnVarArg
;
849 std::tie(AssignFnFixed
, AssignFnVarArg
) = getAssignFnsForCC(CalleeCC
, TLI
);
851 // We have outgoing arguments. Make sure that we can tail call with them.
852 SmallVector
<CCValAssign
, 16> OutLocs
;
853 CCState
OutInfo(CalleeCC
, false, MF
, OutLocs
, Ctx
);
855 AArch64OutgoingValueAssigner
CalleeAssigner(AssignFnFixed
, AssignFnVarArg
,
856 Subtarget
, /*IsReturn*/ false);
857 // determineAssignments() may modify argument flags, so make a copy.
858 SmallVector
<ArgInfo
, 8> OutArgs
;
859 append_range(OutArgs
, OrigOutArgs
);
860 if (!determineAssignments(CalleeAssigner
, OutArgs
, OutInfo
)) {
861 LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");
865 // Make sure that they can fit on the caller's stack.
866 const AArch64FunctionInfo
*FuncInfo
= MF
.getInfo
<AArch64FunctionInfo
>();
867 if (OutInfo
.getStackSize() > FuncInfo
->getBytesInStackArgArea()) {
868 LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");
872 // Verify that the parameters in callee-saved registers match.
873 // TODO: Port this over to CallLowering as general code once swiftself is
875 auto TRI
= MF
.getSubtarget
<AArch64Subtarget
>().getRegisterInfo();
876 const uint32_t *CallerPreservedMask
= TRI
->getCallPreservedMask(MF
, CallerCC
);
877 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
880 // Be conservative and disallow variadic memory operands to match SDAG's
882 // FIXME: If the caller's calling convention is C, then we can
883 // potentially use its argument area. However, for cases like fastcc,
884 // we can't do anything.
885 for (unsigned i
= 0; i
< OutLocs
.size(); ++i
) {
886 auto &ArgLoc
= OutLocs
[i
];
887 if (ArgLoc
.isRegLoc())
892 << "... Cannot tail call vararg function with stack arguments\n");
897 return parametersInCSRMatch(MRI
, CallerPreservedMask
, OutLocs
, OutArgs
);
900 bool AArch64CallLowering::isEligibleForTailCallOptimization(
901 MachineIRBuilder
&MIRBuilder
, CallLoweringInfo
&Info
,
902 SmallVectorImpl
<ArgInfo
> &InArgs
,
903 SmallVectorImpl
<ArgInfo
> &OutArgs
) const {
905 // Must pass all target-independent checks in order to tail call optimize.
906 if (!Info
.IsTailCall
)
909 CallingConv::ID CalleeCC
= Info
.CallConv
;
910 MachineFunction
&MF
= MIRBuilder
.getMF();
911 const Function
&CallerF
= MF
.getFunction();
913 LLVM_DEBUG(dbgs() << "Attempting to lower call as tail call\n");
915 if (Info
.SwiftErrorVReg
) {
916 // TODO: We should handle this.
917 // Note that this is also handled by the check for no outgoing arguments.
918 // Proactively disabling this though, because the swifterror handling in
919 // lowerCall inserts a COPY *after* the location of the call.
920 LLVM_DEBUG(dbgs() << "... Cannot handle tail calls with swifterror yet.\n");
924 if (!mayTailCallThisCC(CalleeCC
)) {
925 LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n");
929 // Byval parameters hand the function a pointer directly into the stack area
930 // we want to reuse during a tail call. Working around this *is* possible (see
933 // FIXME: In AArch64ISelLowering, this isn't worked around. Can/should we try
936 // On Windows, "inreg" attributes signify non-aggregate indirect returns.
937 // In this case, it is necessary to save/restore X0 in the callee. Tail
938 // call opt interferes with this. So we disable tail call opt when the
939 // caller has an argument with "inreg" attribute.
941 // FIXME: Check whether the callee also has an "inreg" argument.
943 // When the caller has a swifterror argument, we don't want to tail call
944 // because would have to move into the swifterror register before the
946 if (any_of(CallerF
.args(), [](const Argument
&A
) {
947 return A
.hasByValAttr() || A
.hasInRegAttr() || A
.hasSwiftErrorAttr();
949 LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval, "
950 "inreg, or swifterror arguments\n");
954 // Externally-defined functions with weak linkage should not be
955 // tail-called on AArch64 when the OS does not support dynamic
956 // pre-emption of symbols, as the AAELF spec requires normal calls
957 // to undefined weak functions to be replaced with a NOP or jump to the
958 // next instruction. The behaviour of branch instructions in this
959 // situation (as used for tail calls) is implementation-defined, so we
960 // cannot rely on the linker replacing the tail call with a return.
961 if (Info
.Callee
.isGlobal()) {
962 const GlobalValue
*GV
= Info
.Callee
.getGlobal();
963 const Triple
&TT
= MF
.getTarget().getTargetTriple();
964 if (GV
->hasExternalWeakLinkage() &&
965 (!TT
.isOSWindows() || TT
.isOSBinFormatELF() ||
966 TT
.isOSBinFormatMachO())) {
967 LLVM_DEBUG(dbgs() << "... Cannot tail call externally-defined function "
968 "with weak linkage for this OS.\n");
973 // If we have -tailcallopt, then we're done.
974 if (canGuaranteeTCO(CalleeCC
, MF
.getTarget().Options
.GuaranteedTailCallOpt
))
975 return CalleeCC
== CallerF
.getCallingConv();
977 // We don't have -tailcallopt, so we're allowed to change the ABI (sibcall).
978 // Try to find cases where we can do that.
980 // I want anyone implementing a new calling convention to think long and hard
981 // about this assert.
982 assert((!Info
.IsVarArg
|| CalleeCC
== CallingConv::C
) &&
983 "Unexpected variadic calling convention");
985 // Verify that the incoming and outgoing arguments from the callee are
986 // safe to tail call.
987 if (!doCallerAndCalleePassArgsTheSameWay(Info
, MF
, InArgs
)) {
990 << "... Caller and callee have incompatible calling conventions.\n");
994 if (!areCalleeOutgoingArgsTailCallable(Info
, MF
, OutArgs
))
998 dbgs() << "... Call is eligible for tail call optimization.\n");
1002 static unsigned getCallOpcode(const MachineFunction
&CallerF
, bool IsIndirect
,
1005 return IsIndirect
? getBLRCallOpcode(CallerF
) : (unsigned)AArch64::BL
;
1008 return AArch64::TCRETURNdi
;
1010 // When BTI is enabled, we need to use TCRETURNriBTI to make sure that we use
1012 if (CallerF
.getInfo
<AArch64FunctionInfo
>()->branchTargetEnforcement())
1013 return AArch64::TCRETURNriBTI
;
1015 return AArch64::TCRETURNri
;
1018 static const uint32_t *
1019 getMaskForArgs(SmallVectorImpl
<AArch64CallLowering::ArgInfo
> &OutArgs
,
1020 AArch64CallLowering::CallLoweringInfo
&Info
,
1021 const AArch64RegisterInfo
&TRI
, MachineFunction
&MF
) {
1022 const uint32_t *Mask
;
1023 if (!OutArgs
.empty() && OutArgs
[0].Flags
[0].isReturned()) {
1024 // For 'this' returns, use the X0-preserving mask if applicable
1025 Mask
= TRI
.getThisReturnPreservedMask(MF
, Info
.CallConv
);
1027 OutArgs
[0].Flags
[0].setReturned(false);
1028 Mask
= TRI
.getCallPreservedMask(MF
, Info
.CallConv
);
1031 Mask
= TRI
.getCallPreservedMask(MF
, Info
.CallConv
);
1036 bool AArch64CallLowering::lowerTailCall(
1037 MachineIRBuilder
&MIRBuilder
, CallLoweringInfo
&Info
,
1038 SmallVectorImpl
<ArgInfo
> &OutArgs
) const {
1039 MachineFunction
&MF
= MIRBuilder
.getMF();
1040 const Function
&F
= MF
.getFunction();
1041 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
1042 const AArch64TargetLowering
&TLI
= *getTLI
<AArch64TargetLowering
>();
1043 AArch64FunctionInfo
*FuncInfo
= MF
.getInfo
<AArch64FunctionInfo
>();
1045 // True when we're tail calling, but without -tailcallopt.
1046 bool IsSibCall
= !MF
.getTarget().Options
.GuaranteedTailCallOpt
&&
1047 Info
.CallConv
!= CallingConv::Tail
&&
1048 Info
.CallConv
!= CallingConv::SwiftTail
;
1050 // TODO: Right now, regbankselect doesn't know how to handle the rtcGPR64
1051 // register class. Until we can do that, we should fall back here.
1052 if (MF
.getInfo
<AArch64FunctionInfo
>()->branchTargetEnforcement()) {
1054 dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n");
1058 // Find out which ABI gets to decide where things go.
1059 CallingConv::ID CalleeCC
= Info
.CallConv
;
1060 CCAssignFn
*AssignFnFixed
;
1061 CCAssignFn
*AssignFnVarArg
;
1062 std::tie(AssignFnFixed
, AssignFnVarArg
) = getAssignFnsForCC(CalleeCC
, TLI
);
1064 MachineInstrBuilder CallSeqStart
;
1066 CallSeqStart
= MIRBuilder
.buildInstr(AArch64::ADJCALLSTACKDOWN
);
1068 unsigned Opc
= getCallOpcode(MF
, Info
.Callee
.isReg(), true);
1069 auto MIB
= MIRBuilder
.buildInstrNoInsert(Opc
);
1070 MIB
.add(Info
.Callee
);
1072 // Byte offset for the tail call. When we are sibcalling, this will always
1076 // Tell the call which registers are clobbered.
1077 const AArch64Subtarget
&Subtarget
= MF
.getSubtarget
<AArch64Subtarget
>();
1078 auto TRI
= Subtarget
.getRegisterInfo();
1079 const uint32_t *Mask
= TRI
->getCallPreservedMask(MF
, CalleeCC
);
1080 if (Subtarget
.hasCustomCallingConv())
1081 TRI
->UpdateCustomCallPreservedMask(MF
, &Mask
);
1082 MIB
.addRegMask(Mask
);
1085 MIB
->setCFIType(MF
, Info
.CFIType
->getZExtValue());
1087 if (TRI
->isAnyArgRegReserved(MF
))
1088 TRI
->emitReservedArgRegCallError(MF
);
1090 // FPDiff is the byte offset of the call's argument area from the callee's.
1091 // Stores to callee stack arguments will be placed in FixedStackSlots offset
1092 // by this amount for a tail call. In a sibling call it must be 0 because the
1093 // caller will deallocate the entire stack and the callee still expects its
1094 // arguments to begin at SP+0.
1097 // This will be 0 for sibcalls, potentially nonzero for tail calls produced
1098 // by -tailcallopt. For sibcalls, the memory operands for the call are
1099 // already available in the caller's incoming argument space.
1100 unsigned NumBytes
= 0;
1102 // We aren't sibcalling, so we need to compute FPDiff. We need to do this
1103 // before handling assignments, because FPDiff must be known for memory
1105 unsigned NumReusableBytes
= FuncInfo
->getBytesInStackArgArea();
1106 SmallVector
<CCValAssign
, 16> OutLocs
;
1107 CCState
OutInfo(CalleeCC
, false, MF
, OutLocs
, F
.getContext());
1109 AArch64OutgoingValueAssigner
CalleeAssigner(AssignFnFixed
, AssignFnVarArg
,
1110 Subtarget
, /*IsReturn*/ false);
1111 if (!determineAssignments(CalleeAssigner
, OutArgs
, OutInfo
))
1114 // The callee will pop the argument stack as a tail call. Thus, we must
1115 // keep it 16-byte aligned.
1116 NumBytes
= alignTo(OutInfo
.getStackSize(), 16);
1118 // FPDiff will be negative if this tail call requires more space than we
1119 // would automatically have in our incoming argument space. Positive if we
1120 // actually shrink the stack.
1121 FPDiff
= NumReusableBytes
- NumBytes
;
1123 // Update the required reserved area if this is the tail call requiring the
1124 // most argument stack space.
1125 if (FPDiff
< 0 && FuncInfo
->getTailCallReservedStack() < (unsigned)-FPDiff
)
1126 FuncInfo
->setTailCallReservedStack(-FPDiff
);
1128 // The stack pointer must be 16-byte aligned at all times it's used for a
1129 // memory operation, which in practice means at *all* times and in
1130 // particular across call boundaries. Therefore our own arguments started at
1131 // a 16-byte aligned SP and the delta applied for the tail call should
1132 // satisfy the same constraint.
1133 assert(FPDiff
% 16 == 0 && "unaligned stack on tail call");
1136 const auto &Forwards
= FuncInfo
->getForwardedMustTailRegParms();
1138 AArch64OutgoingValueAssigner
Assigner(AssignFnFixed
, AssignFnVarArg
,
1139 Subtarget
, /*IsReturn*/ false);
1141 // Do the actual argument marshalling.
1142 OutgoingArgHandler
Handler(MIRBuilder
, MRI
, MIB
,
1143 /*IsTailCall*/ true, FPDiff
);
1144 if (!determineAndHandleAssignments(Handler
, Assigner
, OutArgs
, MIRBuilder
,
1145 CalleeCC
, Info
.IsVarArg
))
1148 Mask
= getMaskForArgs(OutArgs
, Info
, *TRI
, MF
);
1150 if (Info
.IsVarArg
&& Info
.IsMustTailCall
) {
1151 // Now we know what's being passed to the function. Add uses to the call for
1152 // the forwarded registers that we *aren't* passing as parameters. This will
1153 // preserve the copies we build earlier.
1154 for (const auto &F
: Forwards
) {
1155 Register ForwardedReg
= F
.PReg
;
1156 // If the register is already passed, or aliases a register which is
1157 // already being passed, then skip it.
1158 if (any_of(MIB
->uses(), [&ForwardedReg
, &TRI
](const MachineOperand
&Use
) {
1161 return TRI
->regsOverlap(Use
.getReg(), ForwardedReg
);
1165 // We aren't passing it already, so we should add it to the call.
1166 MIRBuilder
.buildCopy(ForwardedReg
, Register(F
.VReg
));
1167 MIB
.addReg(ForwardedReg
, RegState::Implicit
);
1171 // If we have -tailcallopt, we need to adjust the stack. We'll do the call
1172 // sequence start and end here.
1174 MIB
->getOperand(1).setImm(FPDiff
);
1175 CallSeqStart
.addImm(0).addImm(0);
1176 // End the call sequence *before* emitting the call. Normally, we would
1177 // tidy the frame up after the call. However, here, we've laid out the
1178 // parameters so that when SP is reset, they will be in the correct
1180 MIRBuilder
.buildInstr(AArch64::ADJCALLSTACKUP
).addImm(0).addImm(0);
1183 // Now we can add the actual call instruction to the correct basic block.
1184 MIRBuilder
.insertInstr(MIB
);
1186 // If Callee is a reg, since it is used by a target specific instruction,
1187 // it must have a register class matching the constraint of that instruction.
1188 if (MIB
->getOperand(0).isReg())
1189 constrainOperandRegClass(MF
, *TRI
, MRI
, *MF
.getSubtarget().getInstrInfo(),
1190 *MF
.getSubtarget().getRegBankInfo(), *MIB
,
1191 MIB
->getDesc(), MIB
->getOperand(0), 0);
1193 MF
.getFrameInfo().setHasTailCall();
1194 Info
.LoweredTailCall
= true;
1198 bool AArch64CallLowering::lowerCall(MachineIRBuilder
&MIRBuilder
,
1199 CallLoweringInfo
&Info
) const {
1200 MachineFunction
&MF
= MIRBuilder
.getMF();
1201 const Function
&F
= MF
.getFunction();
1202 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
1203 auto &DL
= F
.getParent()->getDataLayout();
1204 const AArch64TargetLowering
&TLI
= *getTLI
<AArch64TargetLowering
>();
1205 const AArch64Subtarget
&Subtarget
= MF
.getSubtarget
<AArch64Subtarget
>();
1207 // Arm64EC has extra requirements for varargs calls; bail out for now.
1208 if (Info
.IsVarArg
&& Subtarget
.isWindowsArm64EC())
1211 SmallVector
<ArgInfo
, 8> OutArgs
;
1212 for (auto &OrigArg
: Info
.OrigArgs
) {
1213 splitToValueTypes(OrigArg
, OutArgs
, DL
, Info
.CallConv
);
1214 // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
1215 auto &Flags
= OrigArg
.Flags
[0];
1216 if (OrigArg
.Ty
->isIntegerTy(1) && !Flags
.isSExt() && !Flags
.isZExt()) {
1217 ArgInfo
&OutArg
= OutArgs
.back();
1218 assert(OutArg
.Regs
.size() == 1 &&
1219 MRI
.getType(OutArg
.Regs
[0]).getSizeInBits() == 1 &&
1220 "Unexpected registers used for i1 arg");
1222 // We cannot use a ZExt ArgInfo flag here, because it will
1223 // zero-extend the argument to i32 instead of just i8.
1225 MIRBuilder
.buildZExt(LLT::scalar(8), OutArg
.Regs
[0]).getReg(0);
1226 LLVMContext
&Ctx
= MF
.getFunction().getContext();
1227 OutArg
.Ty
= Type::getInt8Ty(Ctx
);
1231 SmallVector
<ArgInfo
, 8> InArgs
;
1232 if (!Info
.OrigRet
.Ty
->isVoidTy())
1233 splitToValueTypes(Info
.OrigRet
, InArgs
, DL
, Info
.CallConv
);
1235 // If we can lower as a tail call, do that instead.
1236 bool CanTailCallOpt
=
1237 isEligibleForTailCallOptimization(MIRBuilder
, Info
, InArgs
, OutArgs
);
1239 // We must emit a tail call if we have musttail.
1240 if (Info
.IsMustTailCall
&& !CanTailCallOpt
) {
1241 // There are types of incoming/outgoing arguments we can't handle yet, so
1242 // it doesn't make sense to actually die here like in ISelLowering. Instead,
1243 // fall back to SelectionDAG and let it try to handle this.
1244 LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n");
1248 Info
.IsTailCall
= CanTailCallOpt
;
1250 return lowerTailCall(MIRBuilder
, Info
, OutArgs
);
1252 // Find out which ABI gets to decide where things go.
1253 CCAssignFn
*AssignFnFixed
;
1254 CCAssignFn
*AssignFnVarArg
;
1255 std::tie(AssignFnFixed
, AssignFnVarArg
) =
1256 getAssignFnsForCC(Info
.CallConv
, TLI
);
1258 MachineInstrBuilder CallSeqStart
;
1259 CallSeqStart
= MIRBuilder
.buildInstr(AArch64::ADJCALLSTACKDOWN
);
1261 // Create a temporarily-floating call instruction so we can add the implicit
1262 // uses of arg registers.
1265 // Calls with operand bundle "clang.arc.attachedcall" are special. They should
1266 // be expanded to the call, directly followed by a special marker sequence and
1267 // a call to an ObjC library function.
1268 if (Info
.CB
&& objcarc::hasAttachedCallOpBundle(Info
.CB
))
1269 Opc
= AArch64::BLR_RVMARKER
;
1270 // A call to a returns twice function like setjmp must be followed by a bti
1272 else if (Info
.CB
&& Info
.CB
->hasFnAttr(Attribute::ReturnsTwice
) &&
1273 !Subtarget
.noBTIAtReturnTwice() &&
1274 MF
.getInfo
<AArch64FunctionInfo
>()->branchTargetEnforcement())
1275 Opc
= AArch64::BLR_BTI
;
1277 Opc
= getCallOpcode(MF
, Info
.Callee
.isReg(), false);
1279 auto MIB
= MIRBuilder
.buildInstrNoInsert(Opc
);
1280 unsigned CalleeOpNo
= 0;
1282 if (Opc
== AArch64::BLR_RVMARKER
) {
1283 // Add a target global address for the retainRV/claimRV runtime function
1284 // just before the call target.
1285 Function
*ARCFn
= *objcarc::getAttachedARCFunction(Info
.CB
);
1286 MIB
.addGlobalAddress(ARCFn
);
1288 } else if (Info
.CFIType
) {
1289 MIB
->setCFIType(MF
, Info
.CFIType
->getZExtValue());
1292 MIB
.add(Info
.Callee
);
1294 // Tell the call which registers are clobbered.
1295 const uint32_t *Mask
;
1296 const auto *TRI
= Subtarget
.getRegisterInfo();
1298 AArch64OutgoingValueAssigner
Assigner(AssignFnFixed
, AssignFnVarArg
,
1299 Subtarget
, /*IsReturn*/ false);
1300 // Do the actual argument marshalling.
1301 OutgoingArgHandler
Handler(MIRBuilder
, MRI
, MIB
, /*IsReturn*/ false);
1302 if (!determineAndHandleAssignments(Handler
, Assigner
, OutArgs
, MIRBuilder
,
1303 Info
.CallConv
, Info
.IsVarArg
))
1306 Mask
= getMaskForArgs(OutArgs
, Info
, *TRI
, MF
);
1308 if (MF
.getSubtarget
<AArch64Subtarget
>().hasCustomCallingConv())
1309 TRI
->UpdateCustomCallPreservedMask(MF
, &Mask
);
1310 MIB
.addRegMask(Mask
);
1312 if (TRI
->isAnyArgRegReserved(MF
))
1313 TRI
->emitReservedArgRegCallError(MF
);
1315 // Now we can add the actual call instruction to the correct basic block.
1316 MIRBuilder
.insertInstr(MIB
);
1318 uint64_t CalleePopBytes
=
1319 doesCalleeRestoreStack(Info
.CallConv
,
1320 MF
.getTarget().Options
.GuaranteedTailCallOpt
)
1321 ? alignTo(Assigner
.StackSize
, 16)
1324 CallSeqStart
.addImm(Assigner
.StackSize
).addImm(0);
1325 MIRBuilder
.buildInstr(AArch64::ADJCALLSTACKUP
)
1326 .addImm(Assigner
.StackSize
)
1327 .addImm(CalleePopBytes
);
1329 // If Callee is a reg, since it is used by a target specific
1330 // instruction, it must have a register class matching the
1331 // constraint of that instruction.
1332 if (MIB
->getOperand(CalleeOpNo
).isReg())
1333 constrainOperandRegClass(MF
, *TRI
, MRI
, *Subtarget
.getInstrInfo(),
1334 *Subtarget
.getRegBankInfo(), *MIB
, MIB
->getDesc(),
1335 MIB
->getOperand(CalleeOpNo
), CalleeOpNo
);
1337 // Finally we can copy the returned value back into its virtual-register. In
1338 // symmetry with the arguments, the physical register must be an
1339 // implicit-define of the call instruction.
1340 if (Info
.CanLowerReturn
&& !Info
.OrigRet
.Ty
->isVoidTy()) {
1341 CCAssignFn
*RetAssignFn
= TLI
.CCAssignFnForReturn(Info
.CallConv
);
1342 CallReturnHandler
Handler(MIRBuilder
, MRI
, MIB
);
1343 bool UsingReturnedArg
=
1344 !OutArgs
.empty() && OutArgs
[0].Flags
[0].isReturned();
1346 AArch64OutgoingValueAssigner
Assigner(RetAssignFn
, RetAssignFn
, Subtarget
,
1347 /*IsReturn*/ false);
1348 ReturnedArgCallReturnHandler
ReturnedArgHandler(MIRBuilder
, MRI
, MIB
);
1349 if (!determineAndHandleAssignments(
1350 UsingReturnedArg
? ReturnedArgHandler
: Handler
, Assigner
, InArgs
,
1351 MIRBuilder
, Info
.CallConv
, Info
.IsVarArg
,
1352 UsingReturnedArg
? ArrayRef(OutArgs
[0].Regs
) : std::nullopt
))
1356 if (Info
.SwiftErrorVReg
) {
1357 MIB
.addDef(AArch64::X21
, RegState::Implicit
);
1358 MIRBuilder
.buildCopy(Info
.SwiftErrorVReg
, Register(AArch64::X21
));
1361 if (!Info
.CanLowerReturn
) {
1362 insertSRetLoads(MIRBuilder
, Info
.OrigRet
.Ty
, Info
.OrigRet
.Regs
,
1363 Info
.DemoteRegister
, Info
.DemoteStackIndex
);
1368 bool AArch64CallLowering::isTypeIsValidForThisReturn(EVT Ty
) const {
1369 return Ty
.getSizeInBits() == 64;