1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit Builtin calls as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "TargetInfo.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "CGObjCRuntime.h"
18 #include "clang/Basic/TargetInfo.h"
19 #include "clang/AST/APValue.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/Basic/TargetBuiltins.h"
23 #include "llvm/Intrinsics.h"
24 #include "llvm/Target/TargetData.h"
25 using namespace clang
;
26 using namespace CodeGen
;
29 static void EmitMemoryBarrier(CodeGenFunction
&CGF
,
30 bool LoadLoad
, bool LoadStore
,
31 bool StoreLoad
, bool StoreStore
,
33 Value
*True
= CGF
.Builder
.getTrue();
34 Value
*False
= CGF
.Builder
.getFalse();
35 Value
*C
[5] = { LoadLoad
? True
: False
,
36 LoadStore
? True
: False
,
37 StoreLoad
? True
: False
,
38 StoreStore
? True
: False
,
39 Device
? True
: False
};
40 CGF
.Builder
.CreateCall(CGF
.CGM
.getIntrinsic(Intrinsic::memory_barrier
),
44 /// Emit the conversions required to turn the given value into an
45 /// integer of the given size.
46 static Value
*EmitToInt(CodeGenFunction
&CGF
, llvm::Value
*V
,
47 QualType T
, const llvm::IntegerType
*IntType
) {
48 V
= CGF
.EmitToMemory(V
, T
);
50 if (V
->getType()->isPointerTy())
51 return CGF
.Builder
.CreatePtrToInt(V
, IntType
);
53 assert(V
->getType() == IntType
);
57 static Value
*EmitFromInt(CodeGenFunction
&CGF
, llvm::Value
*V
,
58 QualType T
, const llvm::Type
*ResultType
) {
59 V
= CGF
.EmitFromMemory(V
, T
);
61 if (ResultType
->isPointerTy())
62 return CGF
.Builder
.CreateIntToPtr(V
, ResultType
);
64 assert(V
->getType() == ResultType
);
68 // The atomic builtins are also full memory barriers. This is a utility for
69 // wrapping a call to the builtins with memory barriers.
70 static Value
*EmitCallWithBarrier(CodeGenFunction
&CGF
, Value
*Fn
,
71 Value
**ArgBegin
, Value
**ArgEnd
) {
72 // FIXME: We need a target hook for whether this applies to device memory or
76 // Create barriers both before and after the call.
77 EmitMemoryBarrier(CGF
, true, true, true, true, Device
);
78 Value
*Result
= CGF
.Builder
.CreateCall(Fn
, ArgBegin
, ArgEnd
);
79 EmitMemoryBarrier(CGF
, true, true, true, true, Device
);
83 /// Utility to insert an atomic instruction based on Instrinsic::ID
84 /// and the expression node.
85 static RValue
EmitBinaryAtomic(CodeGenFunction
&CGF
,
86 Intrinsic::ID Id
, const CallExpr
*E
) {
87 QualType T
= E
->getType();
88 assert(E
->getArg(0)->getType()->isPointerType());
89 assert(CGF
.getContext().hasSameUnqualifiedType(T
,
90 E
->getArg(0)->getType()->getPointeeType()));
91 assert(CGF
.getContext().hasSameUnqualifiedType(T
, E
->getArg(1)->getType()));
93 llvm::Value
*DestPtr
= CGF
.EmitScalarExpr(E
->getArg(0));
95 cast
<llvm::PointerType
>(DestPtr
->getType())->getAddressSpace();
97 const llvm::IntegerType
*IntType
=
98 llvm::IntegerType::get(CGF
.getLLVMContext(),
99 CGF
.getContext().getTypeSize(T
));
100 const llvm::Type
*IntPtrType
= IntType
->getPointerTo(AddrSpace
);
102 const llvm::Type
*IntrinsicTypes
[2] = { IntType
, IntPtrType
};
103 llvm::Value
*AtomF
= CGF
.CGM
.getIntrinsic(Id
, IntrinsicTypes
, 2);
105 llvm::Value
*Args
[2];
106 Args
[0] = CGF
.Builder
.CreateBitCast(DestPtr
, IntPtrType
);
107 Args
[1] = CGF
.EmitScalarExpr(E
->getArg(1));
108 const llvm::Type
*ValueType
= Args
[1]->getType();
109 Args
[1] = EmitToInt(CGF
, Args
[1], T
, IntType
);
111 llvm::Value
*Result
= EmitCallWithBarrier(CGF
, AtomF
, Args
, Args
+ 2);
112 Result
= EmitFromInt(CGF
, Result
, T
, ValueType
);
113 return RValue::get(Result
);
116 /// Utility to insert an atomic instruction based Instrinsic::ID and
117 /// the expression node, where the return value is the result of the
119 static RValue
EmitBinaryAtomicPost(CodeGenFunction
&CGF
,
120 Intrinsic::ID Id
, const CallExpr
*E
,
121 Instruction::BinaryOps Op
) {
122 QualType T
= E
->getType();
123 assert(E
->getArg(0)->getType()->isPointerType());
124 assert(CGF
.getContext().hasSameUnqualifiedType(T
,
125 E
->getArg(0)->getType()->getPointeeType()));
126 assert(CGF
.getContext().hasSameUnqualifiedType(T
, E
->getArg(1)->getType()));
128 llvm::Value
*DestPtr
= CGF
.EmitScalarExpr(E
->getArg(0));
130 cast
<llvm::PointerType
>(DestPtr
->getType())->getAddressSpace();
132 const llvm::IntegerType
*IntType
=
133 llvm::IntegerType::get(CGF
.getLLVMContext(),
134 CGF
.getContext().getTypeSize(T
));
135 const llvm::Type
*IntPtrType
= IntType
->getPointerTo(AddrSpace
);
137 const llvm::Type
*IntrinsicTypes
[2] = { IntType
, IntPtrType
};
138 llvm::Value
*AtomF
= CGF
.CGM
.getIntrinsic(Id
, IntrinsicTypes
, 2);
140 llvm::Value
*Args
[2];
141 Args
[1] = CGF
.EmitScalarExpr(E
->getArg(1));
142 const llvm::Type
*ValueType
= Args
[1]->getType();
143 Args
[1] = EmitToInt(CGF
, Args
[1], T
, IntType
);
144 Args
[0] = CGF
.Builder
.CreateBitCast(DestPtr
, IntPtrType
);
146 llvm::Value
*Result
= EmitCallWithBarrier(CGF
, AtomF
, Args
, Args
+ 2);
147 Result
= CGF
.Builder
.CreateBinOp(Op
, Result
, Args
[1]);
148 Result
= EmitFromInt(CGF
, Result
, T
, ValueType
);
149 return RValue::get(Result
);
152 /// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
153 /// which must be a scalar floating point type.
154 static Value
*EmitFAbs(CodeGenFunction
&CGF
, Value
*V
, QualType ValTy
) {
155 const BuiltinType
*ValTyP
= ValTy
->getAs
<BuiltinType
>();
156 assert(ValTyP
&& "isn't scalar fp type!");
159 switch (ValTyP
->getKind()) {
160 default: assert(0 && "Isn't a scalar fp type!");
161 case BuiltinType::Float
: FnName
= "fabsf"; break;
162 case BuiltinType::Double
: FnName
= "fabs"; break;
163 case BuiltinType::LongDouble
: FnName
= "fabsl"; break;
166 // The prototype is something that takes and returns whatever V's type is.
167 llvm::FunctionType
*FT
= llvm::FunctionType::get(V
->getType(), V
->getType(),
169 llvm::Value
*Fn
= CGF
.CGM
.CreateRuntimeFunction(FT
, FnName
);
171 return CGF
.Builder
.CreateCall(Fn
, V
, "abs");
174 RValue
CodeGenFunction::EmitBuiltinExpr(const FunctionDecl
*FD
,
175 unsigned BuiltinID
, const CallExpr
*E
) {
176 // See if we can constant fold this builtin. If so, don't emit it at all.
177 Expr::EvalResult Result
;
178 if (E
->Evaluate(Result
, CGM
.getContext()) &&
179 !Result
.hasSideEffects()) {
180 if (Result
.Val
.isInt())
181 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
182 Result
.Val
.getInt()));
183 if (Result
.Val
.isFloat())
184 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
185 Result
.Val
.getFloat()));
189 default: break; // Handle intrinsics and libm functions below.
190 case Builtin::BI__builtin___CFStringMakeConstantString
:
191 case Builtin::BI__builtin___NSStringMakeConstantString
:
192 return RValue::get(CGM
.EmitConstantExpr(E
, E
->getType(), 0));
193 case Builtin::BI__builtin_stdarg_start
:
194 case Builtin::BI__builtin_va_start
:
195 case Builtin::BI__builtin_va_end
: {
196 Value
*ArgValue
= EmitVAListRef(E
->getArg(0));
197 const llvm::Type
*DestType
= Int8PtrTy
;
198 if (ArgValue
->getType() != DestType
)
199 ArgValue
= Builder
.CreateBitCast(ArgValue
, DestType
,
200 ArgValue
->getName().data());
202 Intrinsic::ID inst
= (BuiltinID
== Builtin::BI__builtin_va_end
) ?
203 Intrinsic::vaend
: Intrinsic::vastart
;
204 return RValue::get(Builder
.CreateCall(CGM
.getIntrinsic(inst
), ArgValue
));
206 case Builtin::BI__builtin_va_copy
: {
207 Value
*DstPtr
= EmitVAListRef(E
->getArg(0));
208 Value
*SrcPtr
= EmitVAListRef(E
->getArg(1));
210 const llvm::Type
*Type
= Int8PtrTy
;
212 DstPtr
= Builder
.CreateBitCast(DstPtr
, Type
);
213 SrcPtr
= Builder
.CreateBitCast(SrcPtr
, Type
);
214 return RValue::get(Builder
.CreateCall2(CGM
.getIntrinsic(Intrinsic::vacopy
),
217 case Builtin::BI__builtin_abs
: {
218 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
220 Value
*NegOp
= Builder
.CreateNeg(ArgValue
, "neg");
222 Builder
.CreateICmpSGE(ArgValue
,
223 llvm::Constant::getNullValue(ArgValue
->getType()),
226 Builder
.CreateSelect(CmpResult
, ArgValue
, NegOp
, "abs");
228 return RValue::get(Result
);
230 case Builtin::BI__builtin_ctz
:
231 case Builtin::BI__builtin_ctzl
:
232 case Builtin::BI__builtin_ctzll
: {
233 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
235 const llvm::Type
*ArgType
= ArgValue
->getType();
236 Value
*F
= CGM
.getIntrinsic(Intrinsic::cttz
, &ArgType
, 1);
238 const llvm::Type
*ResultType
= ConvertType(E
->getType());
239 Value
*Result
= Builder
.CreateCall(F
, ArgValue
, "tmp");
240 if (Result
->getType() != ResultType
)
241 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
243 return RValue::get(Result
);
245 case Builtin::BI__builtin_clz
:
246 case Builtin::BI__builtin_clzl
:
247 case Builtin::BI__builtin_clzll
: {
248 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
250 const llvm::Type
*ArgType
= ArgValue
->getType();
251 Value
*F
= CGM
.getIntrinsic(Intrinsic::ctlz
, &ArgType
, 1);
253 const llvm::Type
*ResultType
= ConvertType(E
->getType());
254 Value
*Result
= Builder
.CreateCall(F
, ArgValue
, "tmp");
255 if (Result
->getType() != ResultType
)
256 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
258 return RValue::get(Result
);
260 case Builtin::BI__builtin_ffs
:
261 case Builtin::BI__builtin_ffsl
:
262 case Builtin::BI__builtin_ffsll
: {
263 // ffs(x) -> x ? cttz(x) + 1 : 0
264 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
266 const llvm::Type
*ArgType
= ArgValue
->getType();
267 Value
*F
= CGM
.getIntrinsic(Intrinsic::cttz
, &ArgType
, 1);
269 const llvm::Type
*ResultType
= ConvertType(E
->getType());
270 Value
*Tmp
= Builder
.CreateAdd(Builder
.CreateCall(F
, ArgValue
, "tmp"),
271 llvm::ConstantInt::get(ArgType
, 1), "tmp");
272 Value
*Zero
= llvm::Constant::getNullValue(ArgType
);
273 Value
*IsZero
= Builder
.CreateICmpEQ(ArgValue
, Zero
, "iszero");
274 Value
*Result
= Builder
.CreateSelect(IsZero
, Zero
, Tmp
, "ffs");
275 if (Result
->getType() != ResultType
)
276 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
278 return RValue::get(Result
);
280 case Builtin::BI__builtin_parity
:
281 case Builtin::BI__builtin_parityl
:
282 case Builtin::BI__builtin_parityll
: {
283 // parity(x) -> ctpop(x) & 1
284 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
286 const llvm::Type
*ArgType
= ArgValue
->getType();
287 Value
*F
= CGM
.getIntrinsic(Intrinsic::ctpop
, &ArgType
, 1);
289 const llvm::Type
*ResultType
= ConvertType(E
->getType());
290 Value
*Tmp
= Builder
.CreateCall(F
, ArgValue
, "tmp");
291 Value
*Result
= Builder
.CreateAnd(Tmp
, llvm::ConstantInt::get(ArgType
, 1),
293 if (Result
->getType() != ResultType
)
294 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
296 return RValue::get(Result
);
298 case Builtin::BI__builtin_popcount
:
299 case Builtin::BI__builtin_popcountl
:
300 case Builtin::BI__builtin_popcountll
: {
301 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
303 const llvm::Type
*ArgType
= ArgValue
->getType();
304 Value
*F
= CGM
.getIntrinsic(Intrinsic::ctpop
, &ArgType
, 1);
306 const llvm::Type
*ResultType
= ConvertType(E
->getType());
307 Value
*Result
= Builder
.CreateCall(F
, ArgValue
, "tmp");
308 if (Result
->getType() != ResultType
)
309 Result
= Builder
.CreateIntCast(Result
, ResultType
, /*isSigned*/true,
311 return RValue::get(Result
);
313 case Builtin::BI__builtin_expect
: {
314 // FIXME: pass expect through to LLVM
315 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
316 if (E
->getArg(1)->HasSideEffects(getContext()))
317 (void)EmitScalarExpr(E
->getArg(1));
318 return RValue::get(ArgValue
);
320 case Builtin::BI__builtin_bswap32
:
321 case Builtin::BI__builtin_bswap64
: {
322 Value
*ArgValue
= EmitScalarExpr(E
->getArg(0));
323 const llvm::Type
*ArgType
= ArgValue
->getType();
324 Value
*F
= CGM
.getIntrinsic(Intrinsic::bswap
, &ArgType
, 1);
325 return RValue::get(Builder
.CreateCall(F
, ArgValue
, "tmp"));
327 case Builtin::BI__builtin_object_size
: {
328 // We pass this builtin onto the optimizer so that it can
329 // figure out the object size in more complex cases.
330 const llvm::Type
*ResType
[] = {
331 ConvertType(E
->getType())
334 // LLVM only supports 0 and 2, make sure that we pass along that
336 Value
*Ty
= EmitScalarExpr(E
->getArg(1));
337 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Ty
);
339 uint64_t val
= CI
->getZExtValue();
340 CI
= ConstantInt::get(Builder
.getInt1Ty(), (val
& 0x2) >> 1);
342 Value
*F
= CGM
.getIntrinsic(Intrinsic::objectsize
, ResType
, 1);
343 return RValue::get(Builder
.CreateCall2(F
,
344 EmitScalarExpr(E
->getArg(0)),
347 case Builtin::BI__builtin_prefetch
: {
348 Value
*Locality
, *RW
, *Address
= EmitScalarExpr(E
->getArg(0));
349 // FIXME: Technically these constants should of type 'int', yes?
350 RW
= (E
->getNumArgs() > 1) ? EmitScalarExpr(E
->getArg(1)) :
351 llvm::ConstantInt::get(Int32Ty
, 0);
352 Locality
= (E
->getNumArgs() > 2) ? EmitScalarExpr(E
->getArg(2)) :
353 llvm::ConstantInt::get(Int32Ty
, 3);
354 Value
*Data
= llvm::ConstantInt::get(Int32Ty
, 1);
355 Value
*F
= CGM
.getIntrinsic(Intrinsic::prefetch
, 0, 0);
356 return RValue::get(Builder
.CreateCall4(F
, Address
, RW
, Locality
, Data
));
358 case Builtin::BI__builtin_trap
: {
359 Value
*F
= CGM
.getIntrinsic(Intrinsic::trap
, 0, 0);
360 return RValue::get(Builder
.CreateCall(F
));
362 case Builtin::BI__builtin_unreachable
: {
364 EmitBranch(getTrapBB());
366 Builder
.CreateUnreachable();
368 // We do need to preserve an insertion point.
369 EmitBlock(createBasicBlock("unreachable.cont"));
371 return RValue::get(0);
374 case Builtin::BI__builtin_powi
:
375 case Builtin::BI__builtin_powif
:
376 case Builtin::BI__builtin_powil
: {
377 Value
*Base
= EmitScalarExpr(E
->getArg(0));
378 Value
*Exponent
= EmitScalarExpr(E
->getArg(1));
379 const llvm::Type
*ArgType
= Base
->getType();
380 Value
*F
= CGM
.getIntrinsic(Intrinsic::powi
, &ArgType
, 1);
381 return RValue::get(Builder
.CreateCall2(F
, Base
, Exponent
, "tmp"));
384 case Builtin::BI__builtin_isgreater
:
385 case Builtin::BI__builtin_isgreaterequal
:
386 case Builtin::BI__builtin_isless
:
387 case Builtin::BI__builtin_islessequal
:
388 case Builtin::BI__builtin_islessgreater
:
389 case Builtin::BI__builtin_isunordered
: {
390 // Ordered comparisons: we know the arguments to these are matching scalar
391 // floating point values.
392 Value
*LHS
= EmitScalarExpr(E
->getArg(0));
393 Value
*RHS
= EmitScalarExpr(E
->getArg(1));
396 default: assert(0 && "Unknown ordered comparison");
397 case Builtin::BI__builtin_isgreater
:
398 LHS
= Builder
.CreateFCmpOGT(LHS
, RHS
, "cmp");
400 case Builtin::BI__builtin_isgreaterequal
:
401 LHS
= Builder
.CreateFCmpOGE(LHS
, RHS
, "cmp");
403 case Builtin::BI__builtin_isless
:
404 LHS
= Builder
.CreateFCmpOLT(LHS
, RHS
, "cmp");
406 case Builtin::BI__builtin_islessequal
:
407 LHS
= Builder
.CreateFCmpOLE(LHS
, RHS
, "cmp");
409 case Builtin::BI__builtin_islessgreater
:
410 LHS
= Builder
.CreateFCmpONE(LHS
, RHS
, "cmp");
412 case Builtin::BI__builtin_isunordered
:
413 LHS
= Builder
.CreateFCmpUNO(LHS
, RHS
, "cmp");
416 // ZExt bool to int type.
417 return RValue::get(Builder
.CreateZExt(LHS
, ConvertType(E
->getType()),
420 case Builtin::BI__builtin_isnan
: {
421 Value
*V
= EmitScalarExpr(E
->getArg(0));
422 V
= Builder
.CreateFCmpUNO(V
, V
, "cmp");
423 return RValue::get(Builder
.CreateZExt(V
, ConvertType(E
->getType()), "tmp"));
426 case Builtin::BI__builtin_isinf
: {
427 // isinf(x) --> fabs(x) == infinity
428 Value
*V
= EmitScalarExpr(E
->getArg(0));
429 V
= EmitFAbs(*this, V
, E
->getArg(0)->getType());
431 V
= Builder
.CreateFCmpOEQ(V
, ConstantFP::getInfinity(V
->getType()),"isinf");
432 return RValue::get(Builder
.CreateZExt(V
, ConvertType(E
->getType()), "tmp"));
435 // TODO: BI__builtin_isinf_sign
436 // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0
438 case Builtin::BI__builtin_isnormal
: {
439 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
440 Value
*V
= EmitScalarExpr(E
->getArg(0));
441 Value
*Eq
= Builder
.CreateFCmpOEQ(V
, V
, "iseq");
443 Value
*Abs
= EmitFAbs(*this, V
, E
->getArg(0)->getType());
444 Value
*IsLessThanInf
=
445 Builder
.CreateFCmpULT(Abs
, ConstantFP::getInfinity(V
->getType()),"isinf");
446 APFloat Smallest
= APFloat::getSmallestNormalized(
447 getContext().getFloatTypeSemantics(E
->getArg(0)->getType()));
449 Builder
.CreateFCmpUGE(Abs
, ConstantFP::get(V
->getContext(), Smallest
),
451 V
= Builder
.CreateAnd(Eq
, IsLessThanInf
, "and");
452 V
= Builder
.CreateAnd(V
, IsNormal
, "and");
453 return RValue::get(Builder
.CreateZExt(V
, ConvertType(E
->getType())));
456 case Builtin::BI__builtin_isfinite
: {
457 // isfinite(x) --> x == x && fabs(x) != infinity; }
458 Value
*V
= EmitScalarExpr(E
->getArg(0));
459 Value
*Eq
= Builder
.CreateFCmpOEQ(V
, V
, "iseq");
461 Value
*Abs
= EmitFAbs(*this, V
, E
->getArg(0)->getType());
463 Builder
.CreateFCmpUNE(Abs
, ConstantFP::getInfinity(V
->getType()),"isinf");
465 V
= Builder
.CreateAnd(Eq
, IsNotInf
, "and");
466 return RValue::get(Builder
.CreateZExt(V
, ConvertType(E
->getType())));
469 case Builtin::BI__builtin_fpclassify
: {
470 Value
*V
= EmitScalarExpr(E
->getArg(5));
471 const llvm::Type
*Ty
= ConvertType(E
->getArg(5)->getType());
474 BasicBlock
*Begin
= Builder
.GetInsertBlock();
475 BasicBlock
*End
= createBasicBlock("fpclassify_end", this->CurFn
);
476 Builder
.SetInsertPoint(End
);
478 Builder
.CreatePHI(ConvertType(E
->getArg(0)->getType()), 4,
479 "fpclassify_result");
481 // if (V==0) return FP_ZERO
482 Builder
.SetInsertPoint(Begin
);
483 Value
*IsZero
= Builder
.CreateFCmpOEQ(V
, Constant::getNullValue(Ty
),
485 Value
*ZeroLiteral
= EmitScalarExpr(E
->getArg(4));
486 BasicBlock
*NotZero
= createBasicBlock("fpclassify_not_zero", this->CurFn
);
487 Builder
.CreateCondBr(IsZero
, End
, NotZero
);
488 Result
->addIncoming(ZeroLiteral
, Begin
);
490 // if (V != V) return FP_NAN
491 Builder
.SetInsertPoint(NotZero
);
492 Value
*IsNan
= Builder
.CreateFCmpUNO(V
, V
, "cmp");
493 Value
*NanLiteral
= EmitScalarExpr(E
->getArg(0));
494 BasicBlock
*NotNan
= createBasicBlock("fpclassify_not_nan", this->CurFn
);
495 Builder
.CreateCondBr(IsNan
, End
, NotNan
);
496 Result
->addIncoming(NanLiteral
, NotZero
);
498 // if (fabs(V) == infinity) return FP_INFINITY
499 Builder
.SetInsertPoint(NotNan
);
500 Value
*VAbs
= EmitFAbs(*this, V
, E
->getArg(5)->getType());
502 Builder
.CreateFCmpOEQ(VAbs
, ConstantFP::getInfinity(V
->getType()),
504 Value
*InfLiteral
= EmitScalarExpr(E
->getArg(1));
505 BasicBlock
*NotInf
= createBasicBlock("fpclassify_not_inf", this->CurFn
);
506 Builder
.CreateCondBr(IsInf
, End
, NotInf
);
507 Result
->addIncoming(InfLiteral
, NotNan
);
509 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
510 Builder
.SetInsertPoint(NotInf
);
511 APFloat Smallest
= APFloat::getSmallestNormalized(
512 getContext().getFloatTypeSemantics(E
->getArg(5)->getType()));
514 Builder
.CreateFCmpUGE(VAbs
, ConstantFP::get(V
->getContext(), Smallest
),
516 Value
*NormalResult
=
517 Builder
.CreateSelect(IsNormal
, EmitScalarExpr(E
->getArg(2)),
518 EmitScalarExpr(E
->getArg(3)));
519 Builder
.CreateBr(End
);
520 Result
->addIncoming(NormalResult
, NotInf
);
523 Builder
.SetInsertPoint(End
);
524 return RValue::get(Result
);
527 case Builtin::BIalloca
:
528 case Builtin::BI__builtin_alloca
: {
529 Value
*Size
= EmitScalarExpr(E
->getArg(0));
530 return RValue::get(Builder
.CreateAlloca(Builder
.getInt8Ty(), Size
, "tmp"));
532 case Builtin::BIbzero
:
533 case Builtin::BI__builtin_bzero
: {
534 Value
*Address
= EmitScalarExpr(E
->getArg(0));
535 Value
*SizeVal
= EmitScalarExpr(E
->getArg(1));
536 Builder
.CreateMemSet(Address
, Builder
.getInt8(0), SizeVal
, 1, false);
537 return RValue::get(Address
);
539 case Builtin::BImemcpy
:
540 case Builtin::BI__builtin_memcpy
: {
541 Value
*Address
= EmitScalarExpr(E
->getArg(0));
542 Value
*SrcAddr
= EmitScalarExpr(E
->getArg(1));
543 Value
*SizeVal
= EmitScalarExpr(E
->getArg(2));
544 Builder
.CreateMemCpy(Address
, SrcAddr
, SizeVal
, 1, false);
545 return RValue::get(Address
);
548 case Builtin::BI__builtin___memcpy_chk
: {
549 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
550 if (!E
->getArg(2)->isEvaluatable(CGM
.getContext()) ||
551 !E
->getArg(3)->isEvaluatable(CGM
.getContext()))
553 llvm::APSInt Size
= E
->getArg(2)->EvaluateAsInt(CGM
.getContext());
554 llvm::APSInt DstSize
= E
->getArg(3)->EvaluateAsInt(CGM
.getContext());
555 if (Size
.ugt(DstSize
))
557 Value
*Dest
= EmitScalarExpr(E
->getArg(0));
558 Value
*Src
= EmitScalarExpr(E
->getArg(1));
559 Value
*SizeVal
= llvm::ConstantInt::get(Builder
.getContext(), Size
);
560 Builder
.CreateMemCpy(Dest
, Src
, SizeVal
, 1, false);
561 return RValue::get(Dest
);
564 case Builtin::BI__builtin_objc_memmove_collectable
: {
565 Value
*Address
= EmitScalarExpr(E
->getArg(0));
566 Value
*SrcAddr
= EmitScalarExpr(E
->getArg(1));
567 Value
*SizeVal
= EmitScalarExpr(E
->getArg(2));
568 CGM
.getObjCRuntime().EmitGCMemmoveCollectable(*this,
569 Address
, SrcAddr
, SizeVal
);
570 return RValue::get(Address
);
573 case Builtin::BI__builtin___memmove_chk
: {
574 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
575 if (!E
->getArg(2)->isEvaluatable(CGM
.getContext()) ||
576 !E
->getArg(3)->isEvaluatable(CGM
.getContext()))
578 llvm::APSInt Size
= E
->getArg(2)->EvaluateAsInt(CGM
.getContext());
579 llvm::APSInt DstSize
= E
->getArg(3)->EvaluateAsInt(CGM
.getContext());
580 if (Size
.ugt(DstSize
))
582 Value
*Dest
= EmitScalarExpr(E
->getArg(0));
583 Value
*Src
= EmitScalarExpr(E
->getArg(1));
584 Value
*SizeVal
= llvm::ConstantInt::get(Builder
.getContext(), Size
);
585 Builder
.CreateMemMove(Dest
, Src
, SizeVal
, 1, false);
586 return RValue::get(Dest
);
589 case Builtin::BImemmove
:
590 case Builtin::BI__builtin_memmove
: {
591 Value
*Address
= EmitScalarExpr(E
->getArg(0));
592 Value
*SrcAddr
= EmitScalarExpr(E
->getArg(1));
593 Value
*SizeVal
= EmitScalarExpr(E
->getArg(2));
594 Builder
.CreateMemMove(Address
, SrcAddr
, SizeVal
, 1, false);
595 return RValue::get(Address
);
597 case Builtin::BImemset
:
598 case Builtin::BI__builtin_memset
: {
599 Value
*Address
= EmitScalarExpr(E
->getArg(0));
600 Value
*ByteVal
= Builder
.CreateTrunc(EmitScalarExpr(E
->getArg(1)),
601 Builder
.getInt8Ty());
602 Value
*SizeVal
= EmitScalarExpr(E
->getArg(2));
603 Builder
.CreateMemSet(Address
, ByteVal
, SizeVal
, 1, false);
604 return RValue::get(Address
);
606 case Builtin::BI__builtin___memset_chk
: {
607 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
608 if (!E
->getArg(2)->isEvaluatable(CGM
.getContext()) ||
609 !E
->getArg(3)->isEvaluatable(CGM
.getContext()))
611 llvm::APSInt Size
= E
->getArg(2)->EvaluateAsInt(CGM
.getContext());
612 llvm::APSInt DstSize
= E
->getArg(3)->EvaluateAsInt(CGM
.getContext());
613 if (Size
.ugt(DstSize
))
615 Value
*Address
= EmitScalarExpr(E
->getArg(0));
616 Value
*ByteVal
= Builder
.CreateTrunc(EmitScalarExpr(E
->getArg(1)),
617 Builder
.getInt8Ty());
618 Value
*SizeVal
= llvm::ConstantInt::get(Builder
.getContext(), Size
);
619 Builder
.CreateMemSet(Address
, ByteVal
, SizeVal
, 1, false);
621 return RValue::get(Address
);
623 case Builtin::BI__builtin_dwarf_cfa
: {
624 // The offset in bytes from the first argument to the CFA.
626 // Why on earth is this in the frontend? Is there any reason at
627 // all that the backend can't reasonably determine this while
628 // lowering llvm.eh.dwarf.cfa()?
630 // TODO: If there's a satisfactory reason, add a target hook for
631 // this instead of hard-coding 0, which is correct for most targets.
634 Value
*F
= CGM
.getIntrinsic(Intrinsic::eh_dwarf_cfa
, 0, 0);
635 return RValue::get(Builder
.CreateCall(F
,
636 llvm::ConstantInt::get(Int32Ty
, Offset
)));
638 case Builtin::BI__builtin_return_address
: {
639 Value
*Depth
= EmitScalarExpr(E
->getArg(0));
640 Depth
= Builder
.CreateIntCast(Depth
, Int32Ty
, false, "tmp");
641 Value
*F
= CGM
.getIntrinsic(Intrinsic::returnaddress
, 0, 0);
642 return RValue::get(Builder
.CreateCall(F
, Depth
));
644 case Builtin::BI__builtin_frame_address
: {
645 Value
*Depth
= EmitScalarExpr(E
->getArg(0));
646 Depth
= Builder
.CreateIntCast(Depth
, Int32Ty
, false, "tmp");
647 Value
*F
= CGM
.getIntrinsic(Intrinsic::frameaddress
, 0, 0);
648 return RValue::get(Builder
.CreateCall(F
, Depth
));
650 case Builtin::BI__builtin_extract_return_addr
: {
651 Value
*Address
= EmitScalarExpr(E
->getArg(0));
652 Value
*Result
= getTargetHooks().decodeReturnAddress(*this, Address
);
653 return RValue::get(Result
);
655 case Builtin::BI__builtin_frob_return_addr
: {
656 Value
*Address
= EmitScalarExpr(E
->getArg(0));
657 Value
*Result
= getTargetHooks().encodeReturnAddress(*this, Address
);
658 return RValue::get(Result
);
660 case Builtin::BI__builtin_dwarf_sp_column
: {
661 const llvm::IntegerType
*Ty
662 = cast
<llvm::IntegerType
>(ConvertType(E
->getType()));
663 int Column
= getTargetHooks().getDwarfEHStackPointer(CGM
);
665 CGM
.ErrorUnsupported(E
, "__builtin_dwarf_sp_column");
666 return RValue::get(llvm::UndefValue::get(Ty
));
668 return RValue::get(llvm::ConstantInt::get(Ty
, Column
, true));
670 case Builtin::BI__builtin_init_dwarf_reg_size_table
: {
671 Value
*Address
= EmitScalarExpr(E
->getArg(0));
672 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address
))
673 CGM
.ErrorUnsupported(E
, "__builtin_init_dwarf_reg_size_table");
674 return RValue::get(llvm::UndefValue::get(ConvertType(E
->getType())));
676 case Builtin::BI__builtin_eh_return
: {
677 Value
*Int
= EmitScalarExpr(E
->getArg(0));
678 Value
*Ptr
= EmitScalarExpr(E
->getArg(1));
680 const llvm::IntegerType
*IntTy
= cast
<llvm::IntegerType
>(Int
->getType());
681 assert((IntTy
->getBitWidth() == 32 || IntTy
->getBitWidth() == 64) &&
682 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
683 Value
*F
= CGM
.getIntrinsic(IntTy
->getBitWidth() == 32
684 ? Intrinsic::eh_return_i32
685 : Intrinsic::eh_return_i64
,
687 Builder
.CreateCall2(F
, Int
, Ptr
);
688 Builder
.CreateUnreachable();
690 // We do need to preserve an insertion point.
691 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
693 return RValue::get(0);
695 case Builtin::BI__builtin_unwind_init
: {
696 Value
*F
= CGM
.getIntrinsic(Intrinsic::eh_unwind_init
, 0, 0);
697 return RValue::get(Builder
.CreateCall(F
));
699 case Builtin::BI__builtin_extend_pointer
: {
700 // Extends a pointer to the size of an _Unwind_Word, which is
701 // uint64_t on all platforms. Generally this gets poked into a
702 // register and eventually used as an address, so if the
703 // addressing registers are wider than pointers and the platform
704 // doesn't implicitly ignore high-order bits when doing
705 // addressing, we need to make sure we zext / sext based on
706 // the platform's expectations.
708 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
710 // Cast the pointer to intptr_t.
711 Value
*Ptr
= EmitScalarExpr(E
->getArg(0));
712 Value
*Result
= Builder
.CreatePtrToInt(Ptr
, IntPtrTy
, "extend.cast");
714 // If that's 64 bits, we're done.
715 if (IntPtrTy
->getBitWidth() == 64)
716 return RValue::get(Result
);
718 // Otherwise, ask the codegen data what to do.
719 if (getTargetHooks().extendPointerWithSExt())
720 return RValue::get(Builder
.CreateSExt(Result
, Int64Ty
, "extend.sext"));
722 return RValue::get(Builder
.CreateZExt(Result
, Int64Ty
, "extend.zext"));
724 case Builtin::BI__builtin_setjmp
: {
725 // Buffer is a void**.
726 Value
*Buf
= EmitScalarExpr(E
->getArg(0));
728 // Store the frame pointer to the setjmp buffer.
730 Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::frameaddress
),
731 ConstantInt::get(Int32Ty
, 0));
732 Builder
.CreateStore(FrameAddr
, Buf
);
734 // Store the stack pointer to the setjmp buffer.
736 Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::stacksave
));
737 Value
*StackSaveSlot
=
738 Builder
.CreateGEP(Buf
, ConstantInt::get(Int32Ty
, 2));
739 Builder
.CreateStore(StackAddr
, StackSaveSlot
);
741 // Call LLVM's EH setjmp, which is lightweight.
742 Value
*F
= CGM
.getIntrinsic(Intrinsic::eh_sjlj_setjmp
);
743 Buf
= Builder
.CreateBitCast(Buf
, Int8PtrTy
);
744 return RValue::get(Builder
.CreateCall(F
, Buf
));
746 case Builtin::BI__builtin_longjmp
: {
747 Value
*Buf
= EmitScalarExpr(E
->getArg(0));
748 Buf
= Builder
.CreateBitCast(Buf
, Int8PtrTy
);
750 // Call LLVM's EH longjmp, which is lightweight.
751 Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::eh_sjlj_longjmp
), Buf
);
753 // longjmp doesn't return; mark this as unreachable.
754 Builder
.CreateUnreachable();
756 // We do need to preserve an insertion point.
757 EmitBlock(createBasicBlock("longjmp.cont"));
759 return RValue::get(0);
761 case Builtin::BI__sync_fetch_and_add
:
762 case Builtin::BI__sync_fetch_and_sub
:
763 case Builtin::BI__sync_fetch_and_or
:
764 case Builtin::BI__sync_fetch_and_and
:
765 case Builtin::BI__sync_fetch_and_xor
:
766 case Builtin::BI__sync_add_and_fetch
:
767 case Builtin::BI__sync_sub_and_fetch
:
768 case Builtin::BI__sync_and_and_fetch
:
769 case Builtin::BI__sync_or_and_fetch
:
770 case Builtin::BI__sync_xor_and_fetch
:
771 case Builtin::BI__sync_val_compare_and_swap
:
772 case Builtin::BI__sync_bool_compare_and_swap
:
773 case Builtin::BI__sync_lock_test_and_set
:
774 case Builtin::BI__sync_lock_release
:
775 case Builtin::BI__sync_swap
:
776 assert(0 && "Shouldn't make it through sema");
777 case Builtin::BI__sync_fetch_and_add_1
:
778 case Builtin::BI__sync_fetch_and_add_2
:
779 case Builtin::BI__sync_fetch_and_add_4
:
780 case Builtin::BI__sync_fetch_and_add_8
:
781 case Builtin::BI__sync_fetch_and_add_16
:
782 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add
, E
);
783 case Builtin::BI__sync_fetch_and_sub_1
:
784 case Builtin::BI__sync_fetch_and_sub_2
:
785 case Builtin::BI__sync_fetch_and_sub_4
:
786 case Builtin::BI__sync_fetch_and_sub_8
:
787 case Builtin::BI__sync_fetch_and_sub_16
:
788 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub
, E
);
789 case Builtin::BI__sync_fetch_and_or_1
:
790 case Builtin::BI__sync_fetch_and_or_2
:
791 case Builtin::BI__sync_fetch_and_or_4
:
792 case Builtin::BI__sync_fetch_and_or_8
:
793 case Builtin::BI__sync_fetch_and_or_16
:
794 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or
, E
);
795 case Builtin::BI__sync_fetch_and_and_1
:
796 case Builtin::BI__sync_fetch_and_and_2
:
797 case Builtin::BI__sync_fetch_and_and_4
:
798 case Builtin::BI__sync_fetch_and_and_8
:
799 case Builtin::BI__sync_fetch_and_and_16
:
800 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and
, E
);
801 case Builtin::BI__sync_fetch_and_xor_1
:
802 case Builtin::BI__sync_fetch_and_xor_2
:
803 case Builtin::BI__sync_fetch_and_xor_4
:
804 case Builtin::BI__sync_fetch_and_xor_8
:
805 case Builtin::BI__sync_fetch_and_xor_16
:
806 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor
, E
);
808 // Clang extensions: not overloaded yet.
809 case Builtin::BI__sync_fetch_and_min
:
810 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min
, E
);
811 case Builtin::BI__sync_fetch_and_max
:
812 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max
, E
);
813 case Builtin::BI__sync_fetch_and_umin
:
814 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin
, E
);
815 case Builtin::BI__sync_fetch_and_umax
:
816 return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax
, E
);
818 case Builtin::BI__sync_add_and_fetch_1
:
819 case Builtin::BI__sync_add_and_fetch_2
:
820 case Builtin::BI__sync_add_and_fetch_4
:
821 case Builtin::BI__sync_add_and_fetch_8
:
822 case Builtin::BI__sync_add_and_fetch_16
:
823 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add
, E
,
824 llvm::Instruction::Add
);
825 case Builtin::BI__sync_sub_and_fetch_1
:
826 case Builtin::BI__sync_sub_and_fetch_2
:
827 case Builtin::BI__sync_sub_and_fetch_4
:
828 case Builtin::BI__sync_sub_and_fetch_8
:
829 case Builtin::BI__sync_sub_and_fetch_16
:
830 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub
, E
,
831 llvm::Instruction::Sub
);
832 case Builtin::BI__sync_and_and_fetch_1
:
833 case Builtin::BI__sync_and_and_fetch_2
:
834 case Builtin::BI__sync_and_and_fetch_4
:
835 case Builtin::BI__sync_and_and_fetch_8
:
836 case Builtin::BI__sync_and_and_fetch_16
:
837 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and
, E
,
838 llvm::Instruction::And
);
839 case Builtin::BI__sync_or_and_fetch_1
:
840 case Builtin::BI__sync_or_and_fetch_2
:
841 case Builtin::BI__sync_or_and_fetch_4
:
842 case Builtin::BI__sync_or_and_fetch_8
:
843 case Builtin::BI__sync_or_and_fetch_16
:
844 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or
, E
,
845 llvm::Instruction::Or
);
846 case Builtin::BI__sync_xor_and_fetch_1
:
847 case Builtin::BI__sync_xor_and_fetch_2
:
848 case Builtin::BI__sync_xor_and_fetch_4
:
849 case Builtin::BI__sync_xor_and_fetch_8
:
850 case Builtin::BI__sync_xor_and_fetch_16
:
851 return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor
, E
,
852 llvm::Instruction::Xor
);
854 case Builtin::BI__sync_val_compare_and_swap_1
:
855 case Builtin::BI__sync_val_compare_and_swap_2
:
856 case Builtin::BI__sync_val_compare_and_swap_4
:
857 case Builtin::BI__sync_val_compare_and_swap_8
:
858 case Builtin::BI__sync_val_compare_and_swap_16
: {
859 QualType T
= E
->getType();
860 llvm::Value
*DestPtr
= EmitScalarExpr(E
->getArg(0));
862 cast
<llvm::PointerType
>(DestPtr
->getType())->getAddressSpace();
864 const llvm::IntegerType
*IntType
=
865 llvm::IntegerType::get(getLLVMContext(),
866 getContext().getTypeSize(T
));
867 const llvm::Type
*IntPtrType
= IntType
->getPointerTo(AddrSpace
);
868 const llvm::Type
*IntrinsicTypes
[2] = { IntType
, IntPtrType
};
869 Value
*AtomF
= CGM
.getIntrinsic(Intrinsic::atomic_cmp_swap
,
873 Args
[0] = Builder
.CreateBitCast(DestPtr
, IntPtrType
);
874 Args
[1] = EmitScalarExpr(E
->getArg(1));
875 const llvm::Type
*ValueType
= Args
[1]->getType();
876 Args
[1] = EmitToInt(*this, Args
[1], T
, IntType
);
877 Args
[2] = EmitToInt(*this, EmitScalarExpr(E
->getArg(2)), T
, IntType
);
879 Value
*Result
= EmitCallWithBarrier(*this, AtomF
, Args
, Args
+ 3);
880 Result
= EmitFromInt(*this, Result
, T
, ValueType
);
881 return RValue::get(Result
);
884 case Builtin::BI__sync_bool_compare_and_swap_1
:
885 case Builtin::BI__sync_bool_compare_and_swap_2
:
886 case Builtin::BI__sync_bool_compare_and_swap_4
:
887 case Builtin::BI__sync_bool_compare_and_swap_8
:
888 case Builtin::BI__sync_bool_compare_and_swap_16
: {
889 QualType T
= E
->getArg(1)->getType();
890 llvm::Value
*DestPtr
= EmitScalarExpr(E
->getArg(0));
892 cast
<llvm::PointerType
>(DestPtr
->getType())->getAddressSpace();
894 const llvm::IntegerType
*IntType
=
895 llvm::IntegerType::get(getLLVMContext(),
896 getContext().getTypeSize(T
));
897 const llvm::Type
*IntPtrType
= IntType
->getPointerTo(AddrSpace
);
898 const llvm::Type
*IntrinsicTypes
[2] = { IntType
, IntPtrType
};
899 Value
*AtomF
= CGM
.getIntrinsic(Intrinsic::atomic_cmp_swap
,
903 Args
[0] = Builder
.CreateBitCast(DestPtr
, IntPtrType
);
904 Args
[1] = EmitToInt(*this, EmitScalarExpr(E
->getArg(1)), T
, IntType
);
905 Args
[2] = EmitToInt(*this, EmitScalarExpr(E
->getArg(2)), T
, IntType
);
907 Value
*OldVal
= Args
[1];
908 Value
*PrevVal
= EmitCallWithBarrier(*this, AtomF
, Args
, Args
+ 3);
909 Value
*Result
= Builder
.CreateICmpEQ(PrevVal
, OldVal
);
911 Result
= Builder
.CreateZExt(Result
, ConvertType(E
->getType()));
912 return RValue::get(Result
);
915 case Builtin::BI__sync_swap_1
:
916 case Builtin::BI__sync_swap_2
:
917 case Builtin::BI__sync_swap_4
:
918 case Builtin::BI__sync_swap_8
:
919 case Builtin::BI__sync_swap_16
:
920 return EmitBinaryAtomic(*this, Intrinsic::atomic_swap
, E
);
922 case Builtin::BI__sync_lock_test_and_set_1
:
923 case Builtin::BI__sync_lock_test_and_set_2
:
924 case Builtin::BI__sync_lock_test_and_set_4
:
925 case Builtin::BI__sync_lock_test_and_set_8
:
926 case Builtin::BI__sync_lock_test_and_set_16
:
927 return EmitBinaryAtomic(*this, Intrinsic::atomic_swap
, E
);
929 case Builtin::BI__sync_lock_release_1
:
930 case Builtin::BI__sync_lock_release_2
:
931 case Builtin::BI__sync_lock_release_4
:
932 case Builtin::BI__sync_lock_release_8
:
933 case Builtin::BI__sync_lock_release_16
: {
934 Value
*Ptr
= EmitScalarExpr(E
->getArg(0));
935 const llvm::Type
*ElTy
=
936 cast
<llvm::PointerType
>(Ptr
->getType())->getElementType();
937 llvm::StoreInst
*Store
=
938 Builder
.CreateStore(llvm::Constant::getNullValue(ElTy
), Ptr
);
939 Store
->setVolatile(true);
940 return RValue::get(0);
943 case Builtin::BI__sync_synchronize
: {
944 // We assume like gcc appears to, that this only applies to cached memory.
945 EmitMemoryBarrier(*this, true, true, true, true, false);
946 return RValue::get(0);
949 case Builtin::BI__builtin_llvm_memory_barrier
: {
951 EmitScalarExpr(E
->getArg(0)),
952 EmitScalarExpr(E
->getArg(1)),
953 EmitScalarExpr(E
->getArg(2)),
954 EmitScalarExpr(E
->getArg(3)),
955 EmitScalarExpr(E
->getArg(4))
957 Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::memory_barrier
), C
, C
+ 5);
958 return RValue::get(0);
961 // Library functions with special handling.
962 case Builtin::BIsqrt
:
963 case Builtin::BIsqrtf
:
964 case Builtin::BIsqrtl
: {
965 // TODO: there is currently no set of optimizer flags
966 // sufficient for us to rewrite sqrt to @llvm.sqrt.
967 // -fmath-errno=0 is not good enough; we need finiteness.
968 // We could probably precondition the call with an ult
969 // against 0, but is that worth the complexity?
974 case Builtin::BIpowf
:
975 case Builtin::BIpowl
: {
976 // Rewrite sqrt to intrinsic if allowed.
977 if (!FD
->hasAttr
<ConstAttr
>())
979 Value
*Base
= EmitScalarExpr(E
->getArg(0));
980 Value
*Exponent
= EmitScalarExpr(E
->getArg(1));
981 const llvm::Type
*ArgType
= Base
->getType();
982 Value
*F
= CGM
.getIntrinsic(Intrinsic::pow
, &ArgType
, 1);
983 return RValue::get(Builder
.CreateCall2(F
, Base
, Exponent
, "tmp"));
986 case Builtin::BI__builtin_signbit
:
987 case Builtin::BI__builtin_signbitf
:
988 case Builtin::BI__builtin_signbitl
: {
989 LLVMContext
&C
= CGM
.getLLVMContext();
991 Value
*Arg
= EmitScalarExpr(E
->getArg(0));
992 const llvm::Type
*ArgTy
= Arg
->getType();
993 if (ArgTy
->isPPC_FP128Ty())
994 break; // FIXME: I'm not sure what the right implementation is here.
995 int ArgWidth
= ArgTy
->getPrimitiveSizeInBits();
996 const llvm::Type
*ArgIntTy
= llvm::IntegerType::get(C
, ArgWidth
);
997 Value
*BCArg
= Builder
.CreateBitCast(Arg
, ArgIntTy
);
998 Value
*ZeroCmp
= llvm::Constant::getNullValue(ArgIntTy
);
999 Value
*Result
= Builder
.CreateICmpSLT(BCArg
, ZeroCmp
);
1000 return RValue::get(Builder
.CreateZExt(Result
, ConvertType(E
->getType())));
1004 // If this is an alias for a libm function (e.g. __builtin_sin) turn it into
1006 if (getContext().BuiltinInfo
.isLibFunction(BuiltinID
) ||
1007 getContext().BuiltinInfo
.isPredefinedLibFunction(BuiltinID
))
1008 return EmitCall(E
->getCallee()->getType(),
1009 CGM
.getBuiltinLibFunction(FD
, BuiltinID
),
1010 ReturnValueSlot(), E
->arg_begin(), E
->arg_end(), FD
);
1012 // See if we have a target specific intrinsic.
1013 const char *Name
= getContext().BuiltinInfo
.GetName(BuiltinID
);
1014 Intrinsic::ID IntrinsicID
= Intrinsic::not_intrinsic
;
1015 if (const char *Prefix
=
1016 llvm::Triple::getArchTypePrefix(Target
.getTriple().getArch()))
1017 IntrinsicID
= Intrinsic::getIntrinsicForGCCBuiltin(Prefix
, Name
);
1019 if (IntrinsicID
!= Intrinsic::not_intrinsic
) {
1020 SmallVector
<Value
*, 16> Args
;
1022 // Find out if any arguments are required to be integer constant
1024 unsigned ICEArguments
= 0;
1025 ASTContext::GetBuiltinTypeError Error
;
1026 getContext().GetBuiltinType(BuiltinID
, Error
, &ICEArguments
);
1027 assert(Error
== ASTContext::GE_None
&& "Should not codegen an error");
1029 Function
*F
= CGM
.getIntrinsic(IntrinsicID
);
1030 const llvm::FunctionType
*FTy
= F
->getFunctionType();
1032 for (unsigned i
= 0, e
= E
->getNumArgs(); i
!= e
; ++i
) {
1034 // If this is a normal argument, just emit it as a scalar.
1035 if ((ICEArguments
& (1 << i
)) == 0) {
1036 ArgValue
= EmitScalarExpr(E
->getArg(i
));
1038 // If this is required to be a constant, constant fold it so that we
1039 // know that the generated intrinsic gets a ConstantInt.
1040 llvm::APSInt Result
;
1041 bool IsConst
= E
->getArg(i
)->isIntegerConstantExpr(Result
,getContext());
1042 assert(IsConst
&& "Constant arg isn't actually constant?");
1044 ArgValue
= llvm::ConstantInt::get(getLLVMContext(), Result
);
1047 // If the intrinsic arg type is different from the builtin arg type
1048 // we need to do a bit cast.
1049 const llvm::Type
*PTy
= FTy
->getParamType(i
);
1050 if (PTy
!= ArgValue
->getType()) {
1051 assert(PTy
->canLosslesslyBitCastTo(FTy
->getParamType(i
)) &&
1052 "Must be able to losslessly bit cast to param");
1053 ArgValue
= Builder
.CreateBitCast(ArgValue
, PTy
);
1056 Args
.push_back(ArgValue
);
1059 Value
*V
= Builder
.CreateCall(F
, Args
.data(), Args
.data() + Args
.size());
1060 QualType BuiltinRetType
= E
->getType();
1062 const llvm::Type
*RetTy
= llvm::Type::getVoidTy(getLLVMContext());
1063 if (!BuiltinRetType
->isVoidType()) RetTy
= ConvertType(BuiltinRetType
);
1065 if (RetTy
!= V
->getType()) {
1066 assert(V
->getType()->canLosslesslyBitCastTo(RetTy
) &&
1067 "Must be able to losslessly bit cast result type");
1068 V
= Builder
.CreateBitCast(V
, RetTy
);
1071 return RValue::get(V
);
1074 // See if we have a target specific builtin that needs to be lowered.
1075 if (Value
*V
= EmitTargetBuiltinExpr(BuiltinID
, E
))
1076 return RValue::get(V
);
1078 ErrorUnsupported(E
, "builtin function");
1080 // Unknown builtin, for now just dump it out and return undef.
1081 if (hasAggregateLLVMType(E
->getType()))
1082 return RValue::getAggregate(CreateMemTemp(E
->getType()));
1083 return RValue::get(llvm::UndefValue::get(ConvertType(E
->getType())));
1086 Value
*CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID
,
1087 const CallExpr
*E
) {
1088 switch (Target
.getTriple().getArch()) {
1089 case llvm::Triple::arm
:
1090 case llvm::Triple::thumb
:
1091 return EmitARMBuiltinExpr(BuiltinID
, E
);
1092 case llvm::Triple::x86
:
1093 case llvm::Triple::x86_64
:
1094 return EmitX86BuiltinExpr(BuiltinID
, E
);
1095 case llvm::Triple::ppc
:
1096 case llvm::Triple::ppc64
:
1097 return EmitPPCBuiltinExpr(BuiltinID
, E
);
1103 static const llvm::VectorType
*GetNeonType(LLVMContext
&C
, unsigned type
,
1108 case 5: return llvm::VectorType::get(llvm::Type::getInt8Ty(C
), 8 << (int)q
);
1111 case 1: return llvm::VectorType::get(llvm::Type::getInt16Ty(C
),4 << (int)q
);
1112 case 2: return llvm::VectorType::get(llvm::Type::getInt32Ty(C
),2 << (int)q
);
1113 case 3: return llvm::VectorType::get(llvm::Type::getInt64Ty(C
),1 << (int)q
);
1114 case 4: return llvm::VectorType::get(llvm::Type::getFloatTy(C
),2 << (int)q
);
1119 Value
*CodeGenFunction::EmitNeonSplat(Value
*V
, Constant
*C
) {
1120 unsigned nElts
= cast
<llvm::VectorType
>(V
->getType())->getNumElements();
1121 SmallVector
<Constant
*, 16> Indices(nElts
, C
);
1122 Value
* SV
= llvm::ConstantVector::get(Indices
);
1123 return Builder
.CreateShuffleVector(V
, V
, SV
, "lane");
1126 Value
*CodeGenFunction::EmitNeonCall(Function
*F
, SmallVectorImpl
<Value
*> &Ops
,
1128 unsigned shift
, bool rightshift
) {
1130 for (Function::const_arg_iterator ai
= F
->arg_begin(), ae
= F
->arg_end();
1131 ai
!= ae
; ++ai
, ++j
)
1132 if (shift
> 0 && shift
== j
)
1133 Ops
[j
] = EmitNeonShiftVector(Ops
[j
], ai
->getType(), rightshift
);
1135 Ops
[j
] = Builder
.CreateBitCast(Ops
[j
], ai
->getType(), name
);
1137 return Builder
.CreateCall(F
, Ops
.begin(), Ops
.end(), name
);
1140 Value
*CodeGenFunction::EmitNeonShiftVector(Value
*V
, const llvm::Type
*Ty
,
1142 ConstantInt
*CI
= cast
<ConstantInt
>(V
);
1143 int SV
= CI
->getSExtValue();
1145 const llvm::VectorType
*VTy
= cast
<llvm::VectorType
>(Ty
);
1146 llvm::Constant
*C
= ConstantInt::get(VTy
->getElementType(), neg
? -SV
: SV
);
1147 SmallVector
<llvm::Constant
*, 16> CV(VTy
->getNumElements(), C
);
1148 return llvm::ConstantVector::get(CV
);
1151 /// GetPointeeAlignment - Given an expression with a pointer type, find the
1152 /// alignment of the type referenced by the pointer. Skip over implicit
1154 static Value
*GetPointeeAlignment(CodeGenFunction
&CGF
, const Expr
*Addr
) {
1156 // Check if the type is a pointer. The implicit cast operand might not be.
1157 while (Addr
->getType()->isPointerType()) {
1158 QualType PtTy
= Addr
->getType()->getPointeeType();
1159 unsigned NewA
= CGF
.getContext().getTypeAlignInChars(PtTy
).getQuantity();
1163 // If the address is an implicit cast, repeat with the cast operand.
1164 if (const ImplicitCastExpr
*CastAddr
= dyn_cast
<ImplicitCastExpr
>(Addr
)) {
1165 Addr
= CastAddr
->getSubExpr();
1170 return llvm::ConstantInt::get(CGF
.Int32Ty
, Align
);
1173 Value
*CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID
,
1174 const CallExpr
*E
) {
1175 if (BuiltinID
== ARM::BI__clear_cache
) {
1176 const FunctionDecl
*FD
= E
->getDirectCallee();
1177 // Oddly people write this call without args on occasion and gcc accepts
1178 // it - it's also marked as varargs in the description file.
1179 llvm::SmallVector
<Value
*, 2> Ops
;
1180 for (unsigned i
= 0; i
< E
->getNumArgs(); i
++)
1181 Ops
.push_back(EmitScalarExpr(E
->getArg(i
)));
1182 const llvm::Type
*Ty
= CGM
.getTypes().ConvertType(FD
->getType());
1183 const llvm::FunctionType
*FTy
= cast
<llvm::FunctionType
>(Ty
);
1184 llvm::StringRef Name
= FD
->getName();
1185 return Builder
.CreateCall(CGM
.CreateRuntimeFunction(FTy
, Name
),
1186 Ops
.begin(), Ops
.end());
1189 if (BuiltinID
== ARM::BI__builtin_arm_ldrexd
) {
1190 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_ldrexd
);
1192 Value
*LdPtr
= EmitScalarExpr(E
->getArg(0));
1193 Value
*Val
= Builder
.CreateCall(F
, LdPtr
, "ldrexd");
1195 Value
*Val0
= Builder
.CreateExtractValue(Val
, 1);
1196 Value
*Val1
= Builder
.CreateExtractValue(Val
, 0);
1197 Val0
= Builder
.CreateZExt(Val0
, Int64Ty
);
1198 Val1
= Builder
.CreateZExt(Val1
, Int64Ty
);
1200 Value
*ShiftCst
= llvm::ConstantInt::get(Int64Ty
, 32);
1201 Val
= Builder
.CreateShl(Val0
, ShiftCst
, "shl", true /* nuw */);
1202 return Builder
.CreateOr(Val
, Val1
);
1205 if (BuiltinID
== ARM::BI__builtin_arm_strexd
) {
1206 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_strexd
);
1207 llvm::Type
*STy
= llvm::StructType::get(Int32Ty
, Int32Ty
, NULL
);
1209 Value
*One
= llvm::ConstantInt::get(Int32Ty
, 1);
1210 Value
*Tmp
= Builder
.CreateAlloca(Int64Ty
, One
, "tmp");
1211 Value
*Val
= EmitScalarExpr(E
->getArg(0));
1212 Builder
.CreateStore(Val
, Tmp
);
1214 Value
*LdPtr
= Builder
.CreateBitCast(Tmp
,llvm::PointerType::getUnqual(STy
));
1215 Val
= Builder
.CreateLoad(LdPtr
);
1217 Value
*Arg0
= Builder
.CreateExtractValue(Val
, 0);
1218 Value
*Arg1
= Builder
.CreateExtractValue(Val
, 1);
1219 Value
*StPtr
= EmitScalarExpr(E
->getArg(1));
1220 return Builder
.CreateCall3(F
, Arg0
, Arg1
, StPtr
, "strexd");
1223 llvm::SmallVector
<Value
*, 4> Ops
;
1224 for (unsigned i
= 0, e
= E
->getNumArgs() - 1; i
!= e
; i
++)
1225 Ops
.push_back(EmitScalarExpr(E
->getArg(i
)));
1227 llvm::APSInt Result
;
1228 const Expr
*Arg
= E
->getArg(E
->getNumArgs()-1);
1229 if (!Arg
->isIntegerConstantExpr(Result
, getContext()))
1232 if (BuiltinID
== ARM::BI__builtin_arm_vcvtr_f
||
1233 BuiltinID
== ARM::BI__builtin_arm_vcvtr_d
) {
1234 // Determine the overloaded type of this builtin.
1235 const llvm::Type
*Ty
;
1236 if (BuiltinID
== ARM::BI__builtin_arm_vcvtr_f
)
1237 Ty
= llvm::Type::getFloatTy(getLLVMContext());
1239 Ty
= llvm::Type::getDoubleTy(getLLVMContext());
1241 // Determine whether this is an unsigned conversion or not.
1242 bool usgn
= Result
.getZExtValue() == 1;
1243 unsigned Int
= usgn
? Intrinsic::arm_vcvtru
: Intrinsic::arm_vcvtr
;
1245 // Call the appropriate intrinsic.
1246 Function
*F
= CGM
.getIntrinsic(Int
, &Ty
, 1);
1247 return Builder
.CreateCall(F
, Ops
.begin(), Ops
.end(), "vcvtr");
1250 // Determine the type of this overloaded NEON intrinsic.
1251 unsigned type
= Result
.getZExtValue();
1252 bool usgn
= type
& 0x08;
1253 bool quad
= type
& 0x10;
1254 bool poly
= (type
& 0x7) == 5 || (type
& 0x7) == 6;
1255 (void)poly
; // Only used in assert()s.
1256 bool rightShift
= false;
1258 const llvm::VectorType
*VTy
= GetNeonType(getLLVMContext(), type
& 0x7, quad
);
1259 const llvm::Type
*Ty
= VTy
;
1264 switch (BuiltinID
) {
1266 case ARM::BI__builtin_neon_vabd_v
:
1267 case ARM::BI__builtin_neon_vabdq_v
:
1268 Int
= usgn
? Intrinsic::arm_neon_vabdu
: Intrinsic::arm_neon_vabds
;
1269 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vabd");
1270 case ARM::BI__builtin_neon_vabs_v
:
1271 case ARM::BI__builtin_neon_vabsq_v
:
1272 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vabs
, &Ty
, 1),
1274 case ARM::BI__builtin_neon_vaddhn_v
:
1275 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vaddhn
, &Ty
, 1),
1277 case ARM::BI__builtin_neon_vcale_v
:
1278 std::swap(Ops
[0], Ops
[1]);
1279 case ARM::BI__builtin_neon_vcage_v
: {
1280 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vacged
);
1281 return EmitNeonCall(F
, Ops
, "vcage");
1283 case ARM::BI__builtin_neon_vcaleq_v
:
1284 std::swap(Ops
[0], Ops
[1]);
1285 case ARM::BI__builtin_neon_vcageq_v
: {
1286 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vacgeq
);
1287 return EmitNeonCall(F
, Ops
, "vcage");
1289 case ARM::BI__builtin_neon_vcalt_v
:
1290 std::swap(Ops
[0], Ops
[1]);
1291 case ARM::BI__builtin_neon_vcagt_v
: {
1292 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vacgtd
);
1293 return EmitNeonCall(F
, Ops
, "vcagt");
1295 case ARM::BI__builtin_neon_vcaltq_v
:
1296 std::swap(Ops
[0], Ops
[1]);
1297 case ARM::BI__builtin_neon_vcagtq_v
: {
1298 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vacgtq
);
1299 return EmitNeonCall(F
, Ops
, "vcagt");
1301 case ARM::BI__builtin_neon_vcls_v
:
1302 case ARM::BI__builtin_neon_vclsq_v
: {
1303 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vcls
, &Ty
, 1);
1304 return EmitNeonCall(F
, Ops
, "vcls");
1306 case ARM::BI__builtin_neon_vclz_v
:
1307 case ARM::BI__builtin_neon_vclzq_v
: {
1308 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vclz
, &Ty
, 1);
1309 return EmitNeonCall(F
, Ops
, "vclz");
1311 case ARM::BI__builtin_neon_vcnt_v
:
1312 case ARM::BI__builtin_neon_vcntq_v
: {
1313 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vcnt
, &Ty
, 1);
1314 return EmitNeonCall(F
, Ops
, "vcnt");
1316 case ARM::BI__builtin_neon_vcvt_f16_v
: {
1317 assert((type
& 0x7) == 7 && !quad
&& "unexpected vcvt_f16_v builtin");
1318 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf
);
1319 return EmitNeonCall(F
, Ops
, "vcvt");
1321 case ARM::BI__builtin_neon_vcvt_f32_f16
: {
1322 assert((type
& 0x7) == 7 && !quad
&& "unexpected vcvt_f32_f16 builtin");
1323 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp
);
1324 return EmitNeonCall(F
, Ops
, "vcvt");
1326 case ARM::BI__builtin_neon_vcvt_f32_v
:
1327 case ARM::BI__builtin_neon_vcvtq_f32_v
: {
1328 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1329 Ty
= GetNeonType(getLLVMContext(), 4, quad
);
1330 return usgn
? Builder
.CreateUIToFP(Ops
[0], Ty
, "vcvt")
1331 : Builder
.CreateSIToFP(Ops
[0], Ty
, "vcvt");
1333 case ARM::BI__builtin_neon_vcvt_s32_v
:
1334 case ARM::BI__builtin_neon_vcvt_u32_v
:
1335 case ARM::BI__builtin_neon_vcvtq_s32_v
:
1336 case ARM::BI__builtin_neon_vcvtq_u32_v
: {
1337 Ops
[0] = Builder
.CreateBitCast(Ops
[0], GetNeonType(getLLVMContext(), 4, quad
));
1338 return usgn
? Builder
.CreateFPToUI(Ops
[0], Ty
, "vcvt")
1339 : Builder
.CreateFPToSI(Ops
[0], Ty
, "vcvt");
1341 case ARM::BI__builtin_neon_vcvt_n_f32_v
:
1342 case ARM::BI__builtin_neon_vcvtq_n_f32_v
: {
1343 const llvm::Type
*Tys
[2] = { GetNeonType(getLLVMContext(), 4, quad
), Ty
};
1344 Int
= usgn
? Intrinsic::arm_neon_vcvtfxu2fp
: Intrinsic::arm_neon_vcvtfxs2fp
;
1345 Function
*F
= CGM
.getIntrinsic(Int
, Tys
, 2);
1346 return EmitNeonCall(F
, Ops
, "vcvt_n");
1348 case ARM::BI__builtin_neon_vcvt_n_s32_v
:
1349 case ARM::BI__builtin_neon_vcvt_n_u32_v
:
1350 case ARM::BI__builtin_neon_vcvtq_n_s32_v
:
1351 case ARM::BI__builtin_neon_vcvtq_n_u32_v
: {
1352 const llvm::Type
*Tys
[2] = { Ty
, GetNeonType(getLLVMContext(), 4, quad
) };
1353 Int
= usgn
? Intrinsic::arm_neon_vcvtfp2fxu
: Intrinsic::arm_neon_vcvtfp2fxs
;
1354 Function
*F
= CGM
.getIntrinsic(Int
, Tys
, 2);
1355 return EmitNeonCall(F
, Ops
, "vcvt_n");
1357 case ARM::BI__builtin_neon_vext_v
:
1358 case ARM::BI__builtin_neon_vextq_v
: {
1359 int CV
= cast
<ConstantInt
>(Ops
[2])->getSExtValue();
1360 SmallVector
<Constant
*, 16> Indices
;
1361 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; ++i
)
1362 Indices
.push_back(ConstantInt::get(Int32Ty
, i
+CV
));
1364 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1365 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
1366 Value
*SV
= llvm::ConstantVector::get(Indices
);
1367 return Builder
.CreateShuffleVector(Ops
[0], Ops
[1], SV
, "vext");
1369 case ARM::BI__builtin_neon_vget_lane_i8
:
1370 case ARM::BI__builtin_neon_vget_lane_i16
:
1371 case ARM::BI__builtin_neon_vget_lane_i32
:
1372 case ARM::BI__builtin_neon_vget_lane_i64
:
1373 case ARM::BI__builtin_neon_vget_lane_f32
:
1374 case ARM::BI__builtin_neon_vgetq_lane_i8
:
1375 case ARM::BI__builtin_neon_vgetq_lane_i16
:
1376 case ARM::BI__builtin_neon_vgetq_lane_i32
:
1377 case ARM::BI__builtin_neon_vgetq_lane_i64
:
1378 case ARM::BI__builtin_neon_vgetq_lane_f32
:
1379 return Builder
.CreateExtractElement(Ops
[0], EmitScalarExpr(E
->getArg(1)),
1381 case ARM::BI__builtin_neon_vhadd_v
:
1382 case ARM::BI__builtin_neon_vhaddq_v
:
1383 Int
= usgn
? Intrinsic::arm_neon_vhaddu
: Intrinsic::arm_neon_vhadds
;
1384 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vhadd");
1385 case ARM::BI__builtin_neon_vhsub_v
:
1386 case ARM::BI__builtin_neon_vhsubq_v
:
1387 Int
= usgn
? Intrinsic::arm_neon_vhsubu
: Intrinsic::arm_neon_vhsubs
;
1388 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vhsub");
1389 case ARM::BI__builtin_neon_vld1_v
:
1390 case ARM::BI__builtin_neon_vld1q_v
:
1391 Ops
.push_back(GetPointeeAlignment(*this, E
->getArg(0)));
1392 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vld1
, &Ty
, 1),
1394 case ARM::BI__builtin_neon_vld1_lane_v
:
1395 case ARM::BI__builtin_neon_vld1q_lane_v
:
1396 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
1397 Ty
= llvm::PointerType::getUnqual(VTy
->getElementType());
1398 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1399 Ops
[0] = Builder
.CreateLoad(Ops
[0]);
1400 return Builder
.CreateInsertElement(Ops
[1], Ops
[0], Ops
[2], "vld1_lane");
1401 case ARM::BI__builtin_neon_vld1_dup_v
:
1402 case ARM::BI__builtin_neon_vld1q_dup_v
: {
1403 Value
*V
= UndefValue::get(Ty
);
1404 Ty
= llvm::PointerType::getUnqual(VTy
->getElementType());
1405 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1406 Ops
[0] = Builder
.CreateLoad(Ops
[0]);
1407 llvm::Constant
*CI
= ConstantInt::get(Int32Ty
, 0);
1408 Ops
[0] = Builder
.CreateInsertElement(V
, Ops
[0], CI
);
1409 return EmitNeonSplat(Ops
[0], CI
);
1411 case ARM::BI__builtin_neon_vld2_v
:
1412 case ARM::BI__builtin_neon_vld2q_v
: {
1413 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vld2
, &Ty
, 1);
1414 Value
*Align
= GetPointeeAlignment(*this, E
->getArg(1));
1415 Ops
[1] = Builder
.CreateCall2(F
, Ops
[1], Align
, "vld2");
1416 Ty
= llvm::PointerType::getUnqual(Ops
[1]->getType());
1417 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1418 return Builder
.CreateStore(Ops
[1], Ops
[0]);
1420 case ARM::BI__builtin_neon_vld3_v
:
1421 case ARM::BI__builtin_neon_vld3q_v
: {
1422 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vld3
, &Ty
, 1);
1423 Value
*Align
= GetPointeeAlignment(*this, E
->getArg(1));
1424 Ops
[1] = Builder
.CreateCall2(F
, Ops
[1], Align
, "vld3");
1425 Ty
= llvm::PointerType::getUnqual(Ops
[1]->getType());
1426 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1427 return Builder
.CreateStore(Ops
[1], Ops
[0]);
1429 case ARM::BI__builtin_neon_vld4_v
:
1430 case ARM::BI__builtin_neon_vld4q_v
: {
1431 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vld4
, &Ty
, 1);
1432 Value
*Align
= GetPointeeAlignment(*this, E
->getArg(1));
1433 Ops
[1] = Builder
.CreateCall2(F
, Ops
[1], Align
, "vld4");
1434 Ty
= llvm::PointerType::getUnqual(Ops
[1]->getType());
1435 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1436 return Builder
.CreateStore(Ops
[1], Ops
[0]);
1438 case ARM::BI__builtin_neon_vld2_lane_v
:
1439 case ARM::BI__builtin_neon_vld2q_lane_v
: {
1440 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vld2lane
, &Ty
, 1);
1441 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
1442 Ops
[3] = Builder
.CreateBitCast(Ops
[3], Ty
);
1443 Ops
.push_back(GetPointeeAlignment(*this, E
->getArg(1)));
1444 Ops
[1] = Builder
.CreateCall(F
, Ops
.begin() + 1, Ops
.end(), "vld2_lane");
1445 Ty
= llvm::PointerType::getUnqual(Ops
[1]->getType());
1446 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1447 return Builder
.CreateStore(Ops
[1], Ops
[0]);
1449 case ARM::BI__builtin_neon_vld3_lane_v
:
1450 case ARM::BI__builtin_neon_vld3q_lane_v
: {
1451 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vld3lane
, &Ty
, 1);
1452 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
1453 Ops
[3] = Builder
.CreateBitCast(Ops
[3], Ty
);
1454 Ops
[4] = Builder
.CreateBitCast(Ops
[4], Ty
);
1455 Ops
.push_back(GetPointeeAlignment(*this, E
->getArg(1)));
1456 Ops
[1] = Builder
.CreateCall(F
, Ops
.begin() + 1, Ops
.end(), "vld3_lane");
1457 Ty
= llvm::PointerType::getUnqual(Ops
[1]->getType());
1458 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1459 return Builder
.CreateStore(Ops
[1], Ops
[0]);
1461 case ARM::BI__builtin_neon_vld4_lane_v
:
1462 case ARM::BI__builtin_neon_vld4q_lane_v
: {
1463 Function
*F
= CGM
.getIntrinsic(Intrinsic::arm_neon_vld4lane
, &Ty
, 1);
1464 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
1465 Ops
[3] = Builder
.CreateBitCast(Ops
[3], Ty
);
1466 Ops
[4] = Builder
.CreateBitCast(Ops
[4], Ty
);
1467 Ops
[5] = Builder
.CreateBitCast(Ops
[5], Ty
);
1468 Ops
.push_back(GetPointeeAlignment(*this, E
->getArg(1)));
1469 Ops
[1] = Builder
.CreateCall(F
, Ops
.begin() + 1, Ops
.end(), "vld3_lane");
1470 Ty
= llvm::PointerType::getUnqual(Ops
[1]->getType());
1471 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1472 return Builder
.CreateStore(Ops
[1], Ops
[0]);
1474 case ARM::BI__builtin_neon_vld2_dup_v
:
1475 case ARM::BI__builtin_neon_vld3_dup_v
:
1476 case ARM::BI__builtin_neon_vld4_dup_v
: {
1477 // Handle 64-bit elements as a special-case. There is no "dup" needed.
1478 if (VTy
->getElementType()->getPrimitiveSizeInBits() == 64) {
1479 switch (BuiltinID
) {
1480 case ARM::BI__builtin_neon_vld2_dup_v
:
1481 Int
= Intrinsic::arm_neon_vld2
;
1483 case ARM::BI__builtin_neon_vld3_dup_v
:
1484 Int
= Intrinsic::arm_neon_vld2
;
1486 case ARM::BI__builtin_neon_vld4_dup_v
:
1487 Int
= Intrinsic::arm_neon_vld2
;
1489 default: assert(0 && "unknown vld_dup intrinsic?");
1491 Function
*F
= CGM
.getIntrinsic(Int
, &Ty
, 1);
1492 Value
*Align
= GetPointeeAlignment(*this, E
->getArg(1));
1493 Ops
[1] = Builder
.CreateCall2(F
, Ops
[1], Align
, "vld_dup");
1494 Ty
= llvm::PointerType::getUnqual(Ops
[1]->getType());
1495 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1496 return Builder
.CreateStore(Ops
[1], Ops
[0]);
1498 switch (BuiltinID
) {
1499 case ARM::BI__builtin_neon_vld2_dup_v
:
1500 Int
= Intrinsic::arm_neon_vld2lane
;
1502 case ARM::BI__builtin_neon_vld3_dup_v
:
1503 Int
= Intrinsic::arm_neon_vld2lane
;
1505 case ARM::BI__builtin_neon_vld4_dup_v
:
1506 Int
= Intrinsic::arm_neon_vld2lane
;
1508 default: assert(0 && "unknown vld_dup intrinsic?");
1510 Function
*F
= CGM
.getIntrinsic(Int
, &Ty
, 1);
1511 const llvm::StructType
*STy
= cast
<llvm::StructType
>(F
->getReturnType());
1513 SmallVector
<Value
*, 6> Args
;
1514 Args
.push_back(Ops
[1]);
1515 Args
.append(STy
->getNumElements(), UndefValue::get(Ty
));
1517 llvm::Constant
*CI
= ConstantInt::get(Int32Ty
, 0);
1519 Args
.push_back(GetPointeeAlignment(*this, E
->getArg(1)));
1521 Ops
[1] = Builder
.CreateCall(F
, Args
.begin(), Args
.end(), "vld_dup");
1522 // splat lane 0 to all elts in each vector of the result.
1523 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
) {
1524 Value
*Val
= Builder
.CreateExtractValue(Ops
[1], i
);
1525 Value
*Elt
= Builder
.CreateBitCast(Val
, Ty
);
1526 Elt
= EmitNeonSplat(Elt
, CI
);
1527 Elt
= Builder
.CreateBitCast(Elt
, Val
->getType());
1528 Ops
[1] = Builder
.CreateInsertValue(Ops
[1], Elt
, i
);
1530 Ty
= llvm::PointerType::getUnqual(Ops
[1]->getType());
1531 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1532 return Builder
.CreateStore(Ops
[1], Ops
[0]);
1534 case ARM::BI__builtin_neon_vmax_v
:
1535 case ARM::BI__builtin_neon_vmaxq_v
:
1536 Int
= usgn
? Intrinsic::arm_neon_vmaxu
: Intrinsic::arm_neon_vmaxs
;
1537 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vmax");
1538 case ARM::BI__builtin_neon_vmin_v
:
1539 case ARM::BI__builtin_neon_vminq_v
:
1540 Int
= usgn
? Intrinsic::arm_neon_vminu
: Intrinsic::arm_neon_vmins
;
1541 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vmin");
1542 case ARM::BI__builtin_neon_vmovl_v
: {
1543 const llvm::Type
*DTy
=llvm::VectorType::getTruncatedElementVectorType(VTy
);
1544 Ops
[0] = Builder
.CreateBitCast(Ops
[0], DTy
);
1546 return Builder
.CreateZExt(Ops
[0], Ty
, "vmovl");
1547 return Builder
.CreateSExt(Ops
[0], Ty
, "vmovl");
1549 case ARM::BI__builtin_neon_vmovn_v
: {
1550 const llvm::Type
*QTy
= llvm::VectorType::getExtendedElementVectorType(VTy
);
1551 Ops
[0] = Builder
.CreateBitCast(Ops
[0], QTy
);
1552 return Builder
.CreateTrunc(Ops
[0], Ty
, "vmovn");
1554 case ARM::BI__builtin_neon_vmul_v
:
1555 case ARM::BI__builtin_neon_vmulq_v
:
1556 assert(poly
&& "vmul builtin only supported for polynomial types");
1557 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vmulp
, &Ty
, 1),
1559 case ARM::BI__builtin_neon_vmull_v
:
1560 Int
= usgn
? Intrinsic::arm_neon_vmullu
: Intrinsic::arm_neon_vmulls
;
1561 Int
= poly
? (unsigned)Intrinsic::arm_neon_vmullp
: Int
;
1562 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vmull");
1563 case ARM::BI__builtin_neon_vpadal_v
:
1564 case ARM::BI__builtin_neon_vpadalq_v
: {
1565 Int
= usgn
? Intrinsic::arm_neon_vpadalu
: Intrinsic::arm_neon_vpadals
;
1566 // The source operand type has twice as many elements of half the size.
1567 unsigned EltBits
= VTy
->getElementType()->getPrimitiveSizeInBits();
1568 const llvm::Type
*EltTy
=
1569 llvm::IntegerType::get(getLLVMContext(), EltBits
/ 2);
1570 const llvm::Type
*NarrowTy
=
1571 llvm::VectorType::get(EltTy
, VTy
->getNumElements() * 2);
1572 const llvm::Type
*Tys
[2] = { Ty
, NarrowTy
};
1573 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
, 2), Ops
, "vpadal");
1575 case ARM::BI__builtin_neon_vpadd_v
:
1576 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vpadd
, &Ty
, 1),
1578 case ARM::BI__builtin_neon_vpaddl_v
:
1579 case ARM::BI__builtin_neon_vpaddlq_v
: {
1580 Int
= usgn
? Intrinsic::arm_neon_vpaddlu
: Intrinsic::arm_neon_vpaddls
;
1581 // The source operand type has twice as many elements of half the size.
1582 unsigned EltBits
= VTy
->getElementType()->getPrimitiveSizeInBits();
1583 const llvm::Type
*EltTy
= llvm::IntegerType::get(getLLVMContext(), EltBits
/ 2);
1584 const llvm::Type
*NarrowTy
=
1585 llvm::VectorType::get(EltTy
, VTy
->getNumElements() * 2);
1586 const llvm::Type
*Tys
[2] = { Ty
, NarrowTy
};
1587 return EmitNeonCall(CGM
.getIntrinsic(Int
, Tys
, 2), Ops
, "vpaddl");
1589 case ARM::BI__builtin_neon_vpmax_v
:
1590 Int
= usgn
? Intrinsic::arm_neon_vpmaxu
: Intrinsic::arm_neon_vpmaxs
;
1591 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vpmax");
1592 case ARM::BI__builtin_neon_vpmin_v
:
1593 Int
= usgn
? Intrinsic::arm_neon_vpminu
: Intrinsic::arm_neon_vpmins
;
1594 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vpmin");
1595 case ARM::BI__builtin_neon_vqabs_v
:
1596 case ARM::BI__builtin_neon_vqabsq_v
:
1597 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vqabs
, &Ty
, 1),
1599 case ARM::BI__builtin_neon_vqadd_v
:
1600 case ARM::BI__builtin_neon_vqaddq_v
:
1601 Int
= usgn
? Intrinsic::arm_neon_vqaddu
: Intrinsic::arm_neon_vqadds
;
1602 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vqadd");
1603 case ARM::BI__builtin_neon_vqdmlal_v
:
1604 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vqdmlal
, &Ty
, 1),
1606 case ARM::BI__builtin_neon_vqdmlsl_v
:
1607 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vqdmlsl
, &Ty
, 1),
1609 case ARM::BI__builtin_neon_vqdmulh_v
:
1610 case ARM::BI__builtin_neon_vqdmulhq_v
:
1611 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vqdmulh
, &Ty
, 1),
1613 case ARM::BI__builtin_neon_vqdmull_v
:
1614 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vqdmull
, &Ty
, 1),
1616 case ARM::BI__builtin_neon_vqmovn_v
:
1617 Int
= usgn
? Intrinsic::arm_neon_vqmovnu
: Intrinsic::arm_neon_vqmovns
;
1618 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vqmovn");
1619 case ARM::BI__builtin_neon_vqmovun_v
:
1620 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vqmovnsu
, &Ty
, 1),
1622 case ARM::BI__builtin_neon_vqneg_v
:
1623 case ARM::BI__builtin_neon_vqnegq_v
:
1624 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vqneg
, &Ty
, 1),
1626 case ARM::BI__builtin_neon_vqrdmulh_v
:
1627 case ARM::BI__builtin_neon_vqrdmulhq_v
:
1628 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vqrdmulh
, &Ty
, 1),
1630 case ARM::BI__builtin_neon_vqrshl_v
:
1631 case ARM::BI__builtin_neon_vqrshlq_v
:
1632 Int
= usgn
? Intrinsic::arm_neon_vqrshiftu
: Intrinsic::arm_neon_vqrshifts
;
1633 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vqrshl");
1634 case ARM::BI__builtin_neon_vqrshrn_n_v
:
1635 Int
= usgn
? Intrinsic::arm_neon_vqrshiftnu
: Intrinsic::arm_neon_vqrshiftns
;
1636 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vqrshrn_n",
1638 case ARM::BI__builtin_neon_vqrshrun_n_v
:
1639 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu
, &Ty
, 1),
1640 Ops
, "vqrshrun_n", 1, true);
1641 case ARM::BI__builtin_neon_vqshl_v
:
1642 case ARM::BI__builtin_neon_vqshlq_v
:
1643 Int
= usgn
? Intrinsic::arm_neon_vqshiftu
: Intrinsic::arm_neon_vqshifts
;
1644 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vqshl");
1645 case ARM::BI__builtin_neon_vqshl_n_v
:
1646 case ARM::BI__builtin_neon_vqshlq_n_v
:
1647 Int
= usgn
? Intrinsic::arm_neon_vqshiftu
: Intrinsic::arm_neon_vqshifts
;
1648 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vqshl_n",
1650 case ARM::BI__builtin_neon_vqshlu_n_v
:
1651 case ARM::BI__builtin_neon_vqshluq_n_v
:
1652 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vqshiftsu
, &Ty
, 1),
1653 Ops
, "vqshlu", 1, false);
1654 case ARM::BI__builtin_neon_vqshrn_n_v
:
1655 Int
= usgn
? Intrinsic::arm_neon_vqshiftnu
: Intrinsic::arm_neon_vqshiftns
;
1656 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vqshrn_n",
1658 case ARM::BI__builtin_neon_vqshrun_n_v
:
1659 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu
, &Ty
, 1),
1660 Ops
, "vqshrun_n", 1, true);
1661 case ARM::BI__builtin_neon_vqsub_v
:
1662 case ARM::BI__builtin_neon_vqsubq_v
:
1663 Int
= usgn
? Intrinsic::arm_neon_vqsubu
: Intrinsic::arm_neon_vqsubs
;
1664 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vqsub");
1665 case ARM::BI__builtin_neon_vraddhn_v
:
1666 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vraddhn
, &Ty
, 1),
1668 case ARM::BI__builtin_neon_vrecpe_v
:
1669 case ARM::BI__builtin_neon_vrecpeq_v
:
1670 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vrecpe
, &Ty
, 1),
1672 case ARM::BI__builtin_neon_vrecps_v
:
1673 case ARM::BI__builtin_neon_vrecpsq_v
:
1674 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vrecps
, &Ty
, 1),
1676 case ARM::BI__builtin_neon_vrhadd_v
:
1677 case ARM::BI__builtin_neon_vrhaddq_v
:
1678 Int
= usgn
? Intrinsic::arm_neon_vrhaddu
: Intrinsic::arm_neon_vrhadds
;
1679 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vrhadd");
1680 case ARM::BI__builtin_neon_vrshl_v
:
1681 case ARM::BI__builtin_neon_vrshlq_v
:
1682 Int
= usgn
? Intrinsic::arm_neon_vrshiftu
: Intrinsic::arm_neon_vrshifts
;
1683 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vrshl");
1684 case ARM::BI__builtin_neon_vrshrn_n_v
:
1685 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vrshiftn
, &Ty
, 1),
1686 Ops
, "vrshrn_n", 1, true);
1687 case ARM::BI__builtin_neon_vrshr_n_v
:
1688 case ARM::BI__builtin_neon_vrshrq_n_v
:
1689 Int
= usgn
? Intrinsic::arm_neon_vrshiftu
: Intrinsic::arm_neon_vrshifts
;
1690 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vrshr_n", 1, true);
1691 case ARM::BI__builtin_neon_vrsqrte_v
:
1692 case ARM::BI__builtin_neon_vrsqrteq_v
:
1693 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vrsqrte
, &Ty
, 1),
1695 case ARM::BI__builtin_neon_vrsqrts_v
:
1696 case ARM::BI__builtin_neon_vrsqrtsq_v
:
1697 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vrsqrts
, &Ty
, 1),
1699 case ARM::BI__builtin_neon_vrsra_n_v
:
1700 case ARM::BI__builtin_neon_vrsraq_n_v
:
1701 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1702 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
1703 Ops
[2] = EmitNeonShiftVector(Ops
[2], Ty
, true);
1704 Int
= usgn
? Intrinsic::arm_neon_vrshiftu
: Intrinsic::arm_neon_vrshifts
;
1705 Ops
[1] = Builder
.CreateCall2(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
[1], Ops
[2]);
1706 return Builder
.CreateAdd(Ops
[0], Ops
[1], "vrsra_n");
1707 case ARM::BI__builtin_neon_vrsubhn_v
:
1708 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vrsubhn
, &Ty
, 1),
1710 case ARM::BI__builtin_neon_vset_lane_i8
:
1711 case ARM::BI__builtin_neon_vset_lane_i16
:
1712 case ARM::BI__builtin_neon_vset_lane_i32
:
1713 case ARM::BI__builtin_neon_vset_lane_i64
:
1714 case ARM::BI__builtin_neon_vset_lane_f32
:
1715 case ARM::BI__builtin_neon_vsetq_lane_i8
:
1716 case ARM::BI__builtin_neon_vsetq_lane_i16
:
1717 case ARM::BI__builtin_neon_vsetq_lane_i32
:
1718 case ARM::BI__builtin_neon_vsetq_lane_i64
:
1719 case ARM::BI__builtin_neon_vsetq_lane_f32
:
1720 Ops
.push_back(EmitScalarExpr(E
->getArg(2)));
1721 return Builder
.CreateInsertElement(Ops
[1], Ops
[0], Ops
[2], "vset_lane");
1722 case ARM::BI__builtin_neon_vshl_v
:
1723 case ARM::BI__builtin_neon_vshlq_v
:
1724 Int
= usgn
? Intrinsic::arm_neon_vshiftu
: Intrinsic::arm_neon_vshifts
;
1725 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vshl");
1726 case ARM::BI__builtin_neon_vshll_n_v
:
1727 Int
= usgn
? Intrinsic::arm_neon_vshiftlu
: Intrinsic::arm_neon_vshiftls
;
1728 return EmitNeonCall(CGM
.getIntrinsic(Int
, &Ty
, 1), Ops
, "vshll", 1);
1729 case ARM::BI__builtin_neon_vshl_n_v
:
1730 case ARM::BI__builtin_neon_vshlq_n_v
:
1731 Ops
[1] = EmitNeonShiftVector(Ops
[1], Ty
, false);
1732 return Builder
.CreateShl(Builder
.CreateBitCast(Ops
[0],Ty
), Ops
[1], "vshl_n");
1733 case ARM::BI__builtin_neon_vshrn_n_v
:
1734 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vshiftn
, &Ty
, 1),
1735 Ops
, "vshrn_n", 1, true);
1736 case ARM::BI__builtin_neon_vshr_n_v
:
1737 case ARM::BI__builtin_neon_vshrq_n_v
:
1738 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1739 Ops
[1] = EmitNeonShiftVector(Ops
[1], Ty
, false);
1741 return Builder
.CreateLShr(Ops
[0], Ops
[1], "vshr_n");
1743 return Builder
.CreateAShr(Ops
[0], Ops
[1], "vshr_n");
1744 case ARM::BI__builtin_neon_vsri_n_v
:
1745 case ARM::BI__builtin_neon_vsriq_n_v
:
1747 case ARM::BI__builtin_neon_vsli_n_v
:
1748 case ARM::BI__builtin_neon_vsliq_n_v
:
1749 Ops
[2] = EmitNeonShiftVector(Ops
[2], Ty
, rightShift
);
1750 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vshiftins
, &Ty
, 1),
1752 case ARM::BI__builtin_neon_vsra_n_v
:
1753 case ARM::BI__builtin_neon_vsraq_n_v
:
1754 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1755 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
1756 Ops
[2] = EmitNeonShiftVector(Ops
[2], Ty
, false);
1758 Ops
[1] = Builder
.CreateLShr(Ops
[1], Ops
[2], "vsra_n");
1760 Ops
[1] = Builder
.CreateAShr(Ops
[1], Ops
[2], "vsra_n");
1761 return Builder
.CreateAdd(Ops
[0], Ops
[1]);
1762 case ARM::BI__builtin_neon_vst1_v
:
1763 case ARM::BI__builtin_neon_vst1q_v
:
1764 Ops
.push_back(GetPointeeAlignment(*this, E
->getArg(0)));
1765 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vst1
, &Ty
, 1),
1767 case ARM::BI__builtin_neon_vst1_lane_v
:
1768 case ARM::BI__builtin_neon_vst1q_lane_v
:
1769 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
1770 Ops
[1] = Builder
.CreateExtractElement(Ops
[1], Ops
[2]);
1771 Ty
= llvm::PointerType::getUnqual(Ops
[1]->getType());
1772 return Builder
.CreateStore(Ops
[1], Builder
.CreateBitCast(Ops
[0], Ty
));
1773 case ARM::BI__builtin_neon_vst2_v
:
1774 case ARM::BI__builtin_neon_vst2q_v
:
1775 Ops
.push_back(GetPointeeAlignment(*this, E
->getArg(0)));
1776 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vst2
, &Ty
, 1),
1778 case ARM::BI__builtin_neon_vst2_lane_v
:
1779 case ARM::BI__builtin_neon_vst2q_lane_v
:
1780 Ops
.push_back(GetPointeeAlignment(*this, E
->getArg(0)));
1781 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vst2lane
, &Ty
, 1),
1783 case ARM::BI__builtin_neon_vst3_v
:
1784 case ARM::BI__builtin_neon_vst3q_v
:
1785 Ops
.push_back(GetPointeeAlignment(*this, E
->getArg(0)));
1786 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vst3
, &Ty
, 1),
1788 case ARM::BI__builtin_neon_vst3_lane_v
:
1789 case ARM::BI__builtin_neon_vst3q_lane_v
:
1790 Ops
.push_back(GetPointeeAlignment(*this, E
->getArg(0)));
1791 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vst3lane
, &Ty
, 1),
1793 case ARM::BI__builtin_neon_vst4_v
:
1794 case ARM::BI__builtin_neon_vst4q_v
:
1795 Ops
.push_back(GetPointeeAlignment(*this, E
->getArg(0)));
1796 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vst4
, &Ty
, 1),
1798 case ARM::BI__builtin_neon_vst4_lane_v
:
1799 case ARM::BI__builtin_neon_vst4q_lane_v
:
1800 Ops
.push_back(GetPointeeAlignment(*this, E
->getArg(0)));
1801 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vst4lane
, &Ty
, 1),
1803 case ARM::BI__builtin_neon_vsubhn_v
:
1804 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vsubhn
, &Ty
, 1),
1806 case ARM::BI__builtin_neon_vtbl1_v
:
1807 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbl1
),
1809 case ARM::BI__builtin_neon_vtbl2_v
:
1810 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbl2
),
1812 case ARM::BI__builtin_neon_vtbl3_v
:
1813 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbl3
),
1815 case ARM::BI__builtin_neon_vtbl4_v
:
1816 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbl4
),
1818 case ARM::BI__builtin_neon_vtbx1_v
:
1819 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbx1
),
1821 case ARM::BI__builtin_neon_vtbx2_v
:
1822 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbx2
),
1824 case ARM::BI__builtin_neon_vtbx3_v
:
1825 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbx3
),
1827 case ARM::BI__builtin_neon_vtbx4_v
:
1828 return EmitNeonCall(CGM
.getIntrinsic(Intrinsic::arm_neon_vtbx4
),
1830 case ARM::BI__builtin_neon_vtst_v
:
1831 case ARM::BI__builtin_neon_vtstq_v
: {
1832 Ops
[0] = Builder
.CreateBitCast(Ops
[0], Ty
);
1833 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
1834 Ops
[0] = Builder
.CreateAnd(Ops
[0], Ops
[1]);
1835 Ops
[0] = Builder
.CreateICmp(ICmpInst::ICMP_NE
, Ops
[0],
1836 ConstantAggregateZero::get(Ty
));
1837 return Builder
.CreateSExt(Ops
[0], Ty
, "vtst");
1839 case ARM::BI__builtin_neon_vtrn_v
:
1840 case ARM::BI__builtin_neon_vtrnq_v
: {
1841 Ops
[0] = Builder
.CreateBitCast(Ops
[0], llvm::PointerType::getUnqual(Ty
));
1842 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
1843 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
1846 for (unsigned vi
= 0; vi
!= 2; ++vi
) {
1847 SmallVector
<Constant
*, 16> Indices
;
1848 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; i
+= 2) {
1849 Indices
.push_back(ConstantInt::get(Int32Ty
, i
+vi
));
1850 Indices
.push_back(ConstantInt::get(Int32Ty
, i
+e
+vi
));
1852 Value
*Addr
= Builder
.CreateConstInBoundsGEP1_32(Ops
[0], vi
);
1853 SV
= llvm::ConstantVector::get(Indices
);
1854 SV
= Builder
.CreateShuffleVector(Ops
[1], Ops
[2], SV
, "vtrn");
1855 SV
= Builder
.CreateStore(SV
, Addr
);
1859 case ARM::BI__builtin_neon_vuzp_v
:
1860 case ARM::BI__builtin_neon_vuzpq_v
: {
1861 Ops
[0] = Builder
.CreateBitCast(Ops
[0], llvm::PointerType::getUnqual(Ty
));
1862 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
1863 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
1866 for (unsigned vi
= 0; vi
!= 2; ++vi
) {
1867 SmallVector
<Constant
*, 16> Indices
;
1868 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; ++i
)
1869 Indices
.push_back(ConstantInt::get(Int32Ty
, 2*i
+vi
));
1871 Value
*Addr
= Builder
.CreateConstInBoundsGEP1_32(Ops
[0], vi
);
1872 SV
= llvm::ConstantVector::get(Indices
);
1873 SV
= Builder
.CreateShuffleVector(Ops
[1], Ops
[2], SV
, "vuzp");
1874 SV
= Builder
.CreateStore(SV
, Addr
);
1878 case ARM::BI__builtin_neon_vzip_v
:
1879 case ARM::BI__builtin_neon_vzipq_v
: {
1880 Ops
[0] = Builder
.CreateBitCast(Ops
[0], llvm::PointerType::getUnqual(Ty
));
1881 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
);
1882 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Ty
);
1885 for (unsigned vi
= 0; vi
!= 2; ++vi
) {
1886 SmallVector
<Constant
*, 16> Indices
;
1887 for (unsigned i
= 0, e
= VTy
->getNumElements(); i
!= e
; i
+= 2) {
1888 Indices
.push_back(ConstantInt::get(Int32Ty
, (i
+ vi
*e
) >> 1));
1889 Indices
.push_back(ConstantInt::get(Int32Ty
, ((i
+ vi
*e
) >> 1)+e
));
1891 Value
*Addr
= Builder
.CreateConstInBoundsGEP1_32(Ops
[0], vi
);
1892 SV
= llvm::ConstantVector::get(Indices
);
1893 SV
= Builder
.CreateShuffleVector(Ops
[1], Ops
[2], SV
, "vzip");
1894 SV
= Builder
.CreateStore(SV
, Addr
);
1901 llvm::Value
*CodeGenFunction::
1902 BuildVector(const llvm::SmallVectorImpl
<llvm::Value
*> &Ops
) {
1903 assert((Ops
.size() & (Ops
.size() - 1)) == 0 &&
1904 "Not a power-of-two sized vector!");
1905 bool AllConstants
= true;
1906 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
&& AllConstants
; ++i
)
1907 AllConstants
&= isa
<Constant
>(Ops
[i
]);
1909 // If this is a constant vector, create a ConstantVector.
1911 std::vector
<llvm::Constant
*> CstOps
;
1912 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
1913 CstOps
.push_back(cast
<Constant
>(Ops
[i
]));
1914 return llvm::ConstantVector::get(CstOps
);
1917 // Otherwise, insertelement the values to build the vector.
1919 llvm::UndefValue::get(llvm::VectorType::get(Ops
[0]->getType(), Ops
.size()));
1921 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
1922 Result
= Builder
.CreateInsertElement(Result
, Ops
[i
],
1923 llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), i
));
1928 Value
*CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID
,
1929 const CallExpr
*E
) {
1930 llvm::SmallVector
<Value
*, 4> Ops
;
1932 // Find out if any arguments are required to be integer constant expressions.
1933 unsigned ICEArguments
= 0;
1934 ASTContext::GetBuiltinTypeError Error
;
1935 getContext().GetBuiltinType(BuiltinID
, Error
, &ICEArguments
);
1936 assert(Error
== ASTContext::GE_None
&& "Should not codegen an error");
1938 for (unsigned i
= 0, e
= E
->getNumArgs(); i
!= e
; i
++) {
1939 // If this is a normal argument, just emit it as a scalar.
1940 if ((ICEArguments
& (1 << i
)) == 0) {
1941 Ops
.push_back(EmitScalarExpr(E
->getArg(i
)));
1945 // If this is required to be a constant, constant fold it so that we know
1946 // that the generated intrinsic gets a ConstantInt.
1947 llvm::APSInt Result
;
1948 bool IsConst
= E
->getArg(i
)->isIntegerConstantExpr(Result
, getContext());
1949 assert(IsConst
&& "Constant arg isn't actually constant?"); (void)IsConst
;
1950 Ops
.push_back(llvm::ConstantInt::get(getLLVMContext(), Result
));
1953 switch (BuiltinID
) {
1955 case X86::BI__builtin_ia32_pslldi128
:
1956 case X86::BI__builtin_ia32_psllqi128
:
1957 case X86::BI__builtin_ia32_psllwi128
:
1958 case X86::BI__builtin_ia32_psradi128
:
1959 case X86::BI__builtin_ia32_psrawi128
:
1960 case X86::BI__builtin_ia32_psrldi128
:
1961 case X86::BI__builtin_ia32_psrlqi128
:
1962 case X86::BI__builtin_ia32_psrlwi128
: {
1963 Ops
[1] = Builder
.CreateZExt(Ops
[1], Int64Ty
, "zext");
1964 const llvm::Type
*Ty
= llvm::VectorType::get(Int64Ty
, 2);
1965 llvm::Value
*Zero
= llvm::ConstantInt::get(Int32Ty
, 0);
1966 Ops
[1] = Builder
.CreateInsertElement(llvm::UndefValue::get(Ty
),
1967 Ops
[1], Zero
, "insert");
1968 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ops
[0]->getType(), "bitcast");
1969 const char *name
= 0;
1970 Intrinsic::ID ID
= Intrinsic::not_intrinsic
;
1972 switch (BuiltinID
) {
1973 default: assert(0 && "Unsupported shift intrinsic!");
1974 case X86::BI__builtin_ia32_pslldi128
:
1976 ID
= Intrinsic::x86_sse2_psll_d
;
1978 case X86::BI__builtin_ia32_psllqi128
:
1980 ID
= Intrinsic::x86_sse2_psll_q
;
1982 case X86::BI__builtin_ia32_psllwi128
:
1984 ID
= Intrinsic::x86_sse2_psll_w
;
1986 case X86::BI__builtin_ia32_psradi128
:
1988 ID
= Intrinsic::x86_sse2_psra_d
;
1990 case X86::BI__builtin_ia32_psrawi128
:
1992 ID
= Intrinsic::x86_sse2_psra_w
;
1994 case X86::BI__builtin_ia32_psrldi128
:
1996 ID
= Intrinsic::x86_sse2_psrl_d
;
1998 case X86::BI__builtin_ia32_psrlqi128
:
2000 ID
= Intrinsic::x86_sse2_psrl_q
;
2002 case X86::BI__builtin_ia32_psrlwi128
:
2004 ID
= Intrinsic::x86_sse2_psrl_w
;
2007 llvm::Function
*F
= CGM
.getIntrinsic(ID
);
2008 return Builder
.CreateCall(F
, &Ops
[0], &Ops
[0] + Ops
.size(), name
);
2010 case X86::BI__builtin_ia32_vec_init_v8qi
:
2011 case X86::BI__builtin_ia32_vec_init_v4hi
:
2012 case X86::BI__builtin_ia32_vec_init_v2si
:
2013 return Builder
.CreateBitCast(BuildVector(Ops
),
2014 llvm::Type::getX86_MMXTy(getLLVMContext()));
2015 case X86::BI__builtin_ia32_vec_ext_v2si
:
2016 return Builder
.CreateExtractElement(Ops
[0],
2017 llvm::ConstantInt::get(Ops
[1]->getType(), 0));
2018 case X86::BI__builtin_ia32_pslldi
:
2019 case X86::BI__builtin_ia32_psllqi
:
2020 case X86::BI__builtin_ia32_psllwi
:
2021 case X86::BI__builtin_ia32_psradi
:
2022 case X86::BI__builtin_ia32_psrawi
:
2023 case X86::BI__builtin_ia32_psrldi
:
2024 case X86::BI__builtin_ia32_psrlqi
:
2025 case X86::BI__builtin_ia32_psrlwi
: {
2026 Ops
[1] = Builder
.CreateZExt(Ops
[1], Int64Ty
, "zext");
2027 const llvm::Type
*Ty
= llvm::VectorType::get(Int64Ty
, 1);
2028 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Ty
, "bitcast");
2029 const char *name
= 0;
2030 Intrinsic::ID ID
= Intrinsic::not_intrinsic
;
2032 switch (BuiltinID
) {
2033 default: assert(0 && "Unsupported shift intrinsic!");
2034 case X86::BI__builtin_ia32_pslldi
:
2036 ID
= Intrinsic::x86_mmx_psll_d
;
2038 case X86::BI__builtin_ia32_psllqi
:
2040 ID
= Intrinsic::x86_mmx_psll_q
;
2042 case X86::BI__builtin_ia32_psllwi
:
2044 ID
= Intrinsic::x86_mmx_psll_w
;
2046 case X86::BI__builtin_ia32_psradi
:
2048 ID
= Intrinsic::x86_mmx_psra_d
;
2050 case X86::BI__builtin_ia32_psrawi
:
2052 ID
= Intrinsic::x86_mmx_psra_w
;
2054 case X86::BI__builtin_ia32_psrldi
:
2056 ID
= Intrinsic::x86_mmx_psrl_d
;
2058 case X86::BI__builtin_ia32_psrlqi
:
2060 ID
= Intrinsic::x86_mmx_psrl_q
;
2062 case X86::BI__builtin_ia32_psrlwi
:
2064 ID
= Intrinsic::x86_mmx_psrl_w
;
2067 llvm::Function
*F
= CGM
.getIntrinsic(ID
);
2068 return Builder
.CreateCall(F
, &Ops
[0], &Ops
[0] + Ops
.size(), name
);
2070 case X86::BI__builtin_ia32_cmpps
: {
2071 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::x86_sse_cmp_ps
);
2072 return Builder
.CreateCall(F
, &Ops
[0], &Ops
[0] + Ops
.size(), "cmpps");
2074 case X86::BI__builtin_ia32_cmpss
: {
2075 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::x86_sse_cmp_ss
);
2076 return Builder
.CreateCall(F
, &Ops
[0], &Ops
[0] + Ops
.size(), "cmpss");
2078 case X86::BI__builtin_ia32_ldmxcsr
: {
2079 const llvm::Type
*PtrTy
= Int8PtrTy
;
2080 Value
*One
= llvm::ConstantInt::get(Int32Ty
, 1);
2081 Value
*Tmp
= Builder
.CreateAlloca(Int32Ty
, One
, "tmp");
2082 Builder
.CreateStore(Ops
[0], Tmp
);
2083 return Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::x86_sse_ldmxcsr
),
2084 Builder
.CreateBitCast(Tmp
, PtrTy
));
2086 case X86::BI__builtin_ia32_stmxcsr
: {
2087 const llvm::Type
*PtrTy
= Int8PtrTy
;
2088 Value
*One
= llvm::ConstantInt::get(Int32Ty
, 1);
2089 Value
*Tmp
= Builder
.CreateAlloca(Int32Ty
, One
, "tmp");
2090 One
= Builder
.CreateCall(CGM
.getIntrinsic(Intrinsic::x86_sse_stmxcsr
),
2091 Builder
.CreateBitCast(Tmp
, PtrTy
));
2092 return Builder
.CreateLoad(Tmp
, "stmxcsr");
2094 case X86::BI__builtin_ia32_cmppd
: {
2095 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::x86_sse2_cmp_pd
);
2096 return Builder
.CreateCall(F
, &Ops
[0], &Ops
[0] + Ops
.size(), "cmppd");
2098 case X86::BI__builtin_ia32_cmpsd
: {
2099 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::x86_sse2_cmp_sd
);
2100 return Builder
.CreateCall(F
, &Ops
[0], &Ops
[0] + Ops
.size(), "cmpsd");
2102 case X86::BI__builtin_ia32_storehps
:
2103 case X86::BI__builtin_ia32_storelps
: {
2104 llvm::Type
*PtrTy
= llvm::PointerType::getUnqual(Int64Ty
);
2105 llvm::Type
*VecTy
= llvm::VectorType::get(Int64Ty
, 2);
2108 Ops
[1] = Builder
.CreateBitCast(Ops
[1], VecTy
, "cast");
2111 unsigned Index
= BuiltinID
== X86::BI__builtin_ia32_storelps
? 0 : 1;
2112 llvm::Value
*Idx
= llvm::ConstantInt::get(Int32Ty
, Index
);
2113 Ops
[1] = Builder
.CreateExtractElement(Ops
[1], Idx
, "extract");
2115 // cast pointer to i64 & store
2116 Ops
[0] = Builder
.CreateBitCast(Ops
[0], PtrTy
);
2117 return Builder
.CreateStore(Ops
[1], Ops
[0]);
2119 case X86::BI__builtin_ia32_palignr
: {
2120 unsigned shiftVal
= cast
<llvm::ConstantInt
>(Ops
[2])->getZExtValue();
2122 // If palignr is shifting the pair of input vectors less than 9 bytes,
2123 // emit a shuffle instruction.
2124 if (shiftVal
<= 8) {
2125 llvm::SmallVector
<llvm::Constant
*, 8> Indices
;
2126 for (unsigned i
= 0; i
!= 8; ++i
)
2127 Indices
.push_back(llvm::ConstantInt::get(Int32Ty
, shiftVal
+ i
));
2129 Value
* SV
= llvm::ConstantVector::get(Indices
);
2130 return Builder
.CreateShuffleVector(Ops
[1], Ops
[0], SV
, "palignr");
2133 // If palignr is shifting the pair of input vectors more than 8 but less
2134 // than 16 bytes, emit a logical right shift of the destination.
2135 if (shiftVal
< 16) {
2136 // MMX has these as 1 x i64 vectors for some odd optimization reasons.
2137 const llvm::Type
*VecTy
= llvm::VectorType::get(Int64Ty
, 1);
2139 Ops
[0] = Builder
.CreateBitCast(Ops
[0], VecTy
, "cast");
2140 Ops
[1] = llvm::ConstantInt::get(VecTy
, (shiftVal
-8) * 8);
2142 // create i32 constant
2143 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::x86_mmx_psrl_q
);
2144 return Builder
.CreateCall(F
, &Ops
[0], &Ops
[0] + 2, "palignr");
2147 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
2148 return llvm::Constant::getNullValue(ConvertType(E
->getType()));
2150 case X86::BI__builtin_ia32_palignr128
: {
2151 unsigned shiftVal
= cast
<llvm::ConstantInt
>(Ops
[2])->getZExtValue();
2153 // If palignr is shifting the pair of input vectors less than 17 bytes,
2154 // emit a shuffle instruction.
2155 if (shiftVal
<= 16) {
2156 llvm::SmallVector
<llvm::Constant
*, 16> Indices
;
2157 for (unsigned i
= 0; i
!= 16; ++i
)
2158 Indices
.push_back(llvm::ConstantInt::get(Int32Ty
, shiftVal
+ i
));
2160 Value
* SV
= llvm::ConstantVector::get(Indices
);
2161 return Builder
.CreateShuffleVector(Ops
[1], Ops
[0], SV
, "palignr");
2164 // If palignr is shifting the pair of input vectors more than 16 but less
2165 // than 32 bytes, emit a logical right shift of the destination.
2166 if (shiftVal
< 32) {
2167 const llvm::Type
*VecTy
= llvm::VectorType::get(Int64Ty
, 2);
2169 Ops
[0] = Builder
.CreateBitCast(Ops
[0], VecTy
, "cast");
2170 Ops
[1] = llvm::ConstantInt::get(Int32Ty
, (shiftVal
-16) * 8);
2172 // create i32 constant
2173 llvm::Function
*F
= CGM
.getIntrinsic(Intrinsic::x86_sse2_psrl_dq
);
2174 return Builder
.CreateCall(F
, &Ops
[0], &Ops
[0] + 2, "palignr");
2177 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
2178 return llvm::Constant::getNullValue(ConvertType(E
->getType()));
2180 case X86::BI__builtin_ia32_movntps
:
2181 case X86::BI__builtin_ia32_movntpd
:
2182 case X86::BI__builtin_ia32_movntdq
:
2183 case X86::BI__builtin_ia32_movnti
: {
2184 llvm::MDNode
*Node
= llvm::MDNode::get(getLLVMContext(),
2185 Builder
.getInt32(1));
2187 // Convert the type of the pointer to a pointer to the stored type.
2188 Value
*BC
= Builder
.CreateBitCast(Ops
[0],
2189 llvm::PointerType::getUnqual(Ops
[1]->getType()),
2191 StoreInst
*SI
= Builder
.CreateStore(Ops
[1], BC
);
2192 SI
->setMetadata(CGM
.getModule().getMDKindID("nontemporal"), Node
);
2193 SI
->setAlignment(16);
2197 case X86::BI__builtin_ia32_pavgusb
:
2198 case X86::BI__builtin_ia32_pf2id
:
2199 case X86::BI__builtin_ia32_pfacc
:
2200 case X86::BI__builtin_ia32_pfadd
:
2201 case X86::BI__builtin_ia32_pfcmpeq
:
2202 case X86::BI__builtin_ia32_pfcmpge
:
2203 case X86::BI__builtin_ia32_pfcmpgt
:
2204 case X86::BI__builtin_ia32_pfmax
:
2205 case X86::BI__builtin_ia32_pfmin
:
2206 case X86::BI__builtin_ia32_pfmul
:
2207 case X86::BI__builtin_ia32_pfrcp
:
2208 case X86::BI__builtin_ia32_pfrcpit1
:
2209 case X86::BI__builtin_ia32_pfrcpit2
:
2210 case X86::BI__builtin_ia32_pfrsqrt
:
2211 case X86::BI__builtin_ia32_pfrsqit1
:
2212 case X86::BI__builtin_ia32_pfrsqrtit1
:
2213 case X86::BI__builtin_ia32_pfsub
:
2214 case X86::BI__builtin_ia32_pfsubr
:
2215 case X86::BI__builtin_ia32_pi2fd
:
2216 case X86::BI__builtin_ia32_pmulhrw
:
2217 case X86::BI__builtin_ia32_pf2iw
:
2218 case X86::BI__builtin_ia32_pfnacc
:
2219 case X86::BI__builtin_ia32_pfpnacc
:
2220 case X86::BI__builtin_ia32_pi2fw
:
2221 case X86::BI__builtin_ia32_pswapdsf
:
2222 case X86::BI__builtin_ia32_pswapdsi
: {
2223 const char *name
= 0;
2224 Intrinsic::ID ID
= Intrinsic::not_intrinsic
;
2226 case X86::BI__builtin_ia32_pavgusb
:
2228 ID
= Intrinsic::x86_3dnow_pavgusb
;
2230 case X86::BI__builtin_ia32_pf2id
:
2232 ID
= Intrinsic::x86_3dnow_pf2id
;
2234 case X86::BI__builtin_ia32_pfacc
:
2236 ID
= Intrinsic::x86_3dnow_pfacc
;
2238 case X86::BI__builtin_ia32_pfadd
:
2240 ID
= Intrinsic::x86_3dnow_pfadd
;
2242 case X86::BI__builtin_ia32_pfcmpeq
:
2244 ID
= Intrinsic::x86_3dnow_pfcmpeq
;
2246 case X86::BI__builtin_ia32_pfcmpge
:
2248 ID
= Intrinsic::x86_3dnow_pfcmpge
;
2250 case X86::BI__builtin_ia32_pfcmpgt
:
2252 ID
= Intrinsic::x86_3dnow_pfcmpgt
;
2254 case X86::BI__builtin_ia32_pfmax
:
2256 ID
= Intrinsic::x86_3dnow_pfmax
;
2258 case X86::BI__builtin_ia32_pfmin
:
2260 ID
= Intrinsic::x86_3dnow_pfmin
;
2262 case X86::BI__builtin_ia32_pfmul
:
2264 ID
= Intrinsic::x86_3dnow_pfmul
;
2266 case X86::BI__builtin_ia32_pfrcp
:
2268 ID
= Intrinsic::x86_3dnow_pfrcp
;
2270 case X86::BI__builtin_ia32_pfrcpit1
:
2272 ID
= Intrinsic::x86_3dnow_pfrcpit1
;
2274 case X86::BI__builtin_ia32_pfrcpit2
:
2276 ID
= Intrinsic::x86_3dnow_pfrcpit2
;
2278 case X86::BI__builtin_ia32_pfrsqrt
:
2280 ID
= Intrinsic::x86_3dnow_pfrsqrt
;
2282 case X86::BI__builtin_ia32_pfrsqit1
:
2283 case X86::BI__builtin_ia32_pfrsqrtit1
:
2285 ID
= Intrinsic::x86_3dnow_pfrsqit1
;
2287 case X86::BI__builtin_ia32_pfsub
:
2289 ID
= Intrinsic::x86_3dnow_pfsub
;
2291 case X86::BI__builtin_ia32_pfsubr
:
2293 ID
= Intrinsic::x86_3dnow_pfsubr
;
2295 case X86::BI__builtin_ia32_pi2fd
:
2297 ID
= Intrinsic::x86_3dnow_pi2fd
;
2299 case X86::BI__builtin_ia32_pmulhrw
:
2301 ID
= Intrinsic::x86_3dnow_pmulhrw
;
2303 case X86::BI__builtin_ia32_pf2iw
:
2305 ID
= Intrinsic::x86_3dnowa_pf2iw
;
2307 case X86::BI__builtin_ia32_pfnacc
:
2309 ID
= Intrinsic::x86_3dnowa_pfnacc
;
2311 case X86::BI__builtin_ia32_pfpnacc
:
2313 ID
= Intrinsic::x86_3dnowa_pfpnacc
;
2315 case X86::BI__builtin_ia32_pi2fw
:
2317 ID
= Intrinsic::x86_3dnowa_pi2fw
;
2319 case X86::BI__builtin_ia32_pswapdsf
:
2320 case X86::BI__builtin_ia32_pswapdsi
:
2322 ID
= Intrinsic::x86_3dnowa_pswapd
;
2325 llvm::Function
*F
= CGM
.getIntrinsic(ID
);
2326 return Builder
.CreateCall(F
, &Ops
[0], &Ops
[0] + Ops
.size(), name
);
2331 Value
*CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID
,
2332 const CallExpr
*E
) {
2333 llvm::SmallVector
<Value
*, 4> Ops
;
2335 for (unsigned i
= 0, e
= E
->getNumArgs(); i
!= e
; i
++)
2336 Ops
.push_back(EmitScalarExpr(E
->getArg(i
)));
2338 Intrinsic::ID ID
= Intrinsic::not_intrinsic
;
2340 switch (BuiltinID
) {
2343 // vec_ld, vec_lvsl, vec_lvsr
2344 case PPC::BI__builtin_altivec_lvx
:
2345 case PPC::BI__builtin_altivec_lvxl
:
2346 case PPC::BI__builtin_altivec_lvebx
:
2347 case PPC::BI__builtin_altivec_lvehx
:
2348 case PPC::BI__builtin_altivec_lvewx
:
2349 case PPC::BI__builtin_altivec_lvsl
:
2350 case PPC::BI__builtin_altivec_lvsr
:
2352 Ops
[1] = Builder
.CreateBitCast(Ops
[1], Int8PtrTy
);
2354 Ops
[0] = Builder
.CreateGEP(Ops
[1], Ops
[0], "tmp");
2357 switch (BuiltinID
) {
2358 default: assert(0 && "Unsupported ld/lvsl/lvsr intrinsic!");
2359 case PPC::BI__builtin_altivec_lvx
:
2360 ID
= Intrinsic::ppc_altivec_lvx
;
2362 case PPC::BI__builtin_altivec_lvxl
:
2363 ID
= Intrinsic::ppc_altivec_lvxl
;
2365 case PPC::BI__builtin_altivec_lvebx
:
2366 ID
= Intrinsic::ppc_altivec_lvebx
;
2368 case PPC::BI__builtin_altivec_lvehx
:
2369 ID
= Intrinsic::ppc_altivec_lvehx
;
2371 case PPC::BI__builtin_altivec_lvewx
:
2372 ID
= Intrinsic::ppc_altivec_lvewx
;
2374 case PPC::BI__builtin_altivec_lvsl
:
2375 ID
= Intrinsic::ppc_altivec_lvsl
;
2377 case PPC::BI__builtin_altivec_lvsr
:
2378 ID
= Intrinsic::ppc_altivec_lvsr
;
2381 llvm::Function
*F
= CGM
.getIntrinsic(ID
);
2382 return Builder
.CreateCall(F
, &Ops
[0], &Ops
[0] + Ops
.size(), "");
2386 case PPC::BI__builtin_altivec_stvx
:
2387 case PPC::BI__builtin_altivec_stvxl
:
2388 case PPC::BI__builtin_altivec_stvebx
:
2389 case PPC::BI__builtin_altivec_stvehx
:
2390 case PPC::BI__builtin_altivec_stvewx
:
2392 Ops
[2] = Builder
.CreateBitCast(Ops
[2], Int8PtrTy
);
2393 Ops
[1] = Builder
.CreateGEP(Ops
[2], Ops
[1], "tmp");
2396 switch (BuiltinID
) {
2397 default: assert(0 && "Unsupported st intrinsic!");
2398 case PPC::BI__builtin_altivec_stvx
:
2399 ID
= Intrinsic::ppc_altivec_stvx
;
2401 case PPC::BI__builtin_altivec_stvxl
:
2402 ID
= Intrinsic::ppc_altivec_stvxl
;
2404 case PPC::BI__builtin_altivec_stvebx
:
2405 ID
= Intrinsic::ppc_altivec_stvebx
;
2407 case PPC::BI__builtin_altivec_stvehx
:
2408 ID
= Intrinsic::ppc_altivec_stvehx
;
2410 case PPC::BI__builtin_altivec_stvewx
:
2411 ID
= Intrinsic::ppc_altivec_stvewx
;
2414 llvm::Function
*F
= CGM
.getIntrinsic(ID
);
2415 return Builder
.CreateCall(F
, &Ops
[0], &Ops
[0] + Ops
.size(), "");