1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/AliasAnalysis.h"
17 #include "llvm/Analysis/Passes.h"
18 #include "llvm/Constants.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Function.h"
21 #include "llvm/GlobalAlias.h"
22 #include "llvm/GlobalVariable.h"
23 #include "llvm/Instructions.h"
24 #include "llvm/IntrinsicInst.h"
25 #include "llvm/LLVMContext.h"
26 #include "llvm/Operator.h"
27 #include "llvm/Pass.h"
28 #include "llvm/Analysis/CaptureTracking.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/Target/TargetData.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/GetElementPtrTypeIterator.h"
40 //===----------------------------------------------------------------------===//
42 //===----------------------------------------------------------------------===//
44 /// isKnownNonNull - Return true if we know that the specified value is never
46 static bool isKnownNonNull(const Value
*V
) {
47 // Alloca never returns null, malloc might.
48 if (isa
<AllocaInst
>(V
)) return true;
50 // A byval argument is never null.
51 if (const Argument
*A
= dyn_cast
<Argument
>(V
))
52 return A
->hasByValAttr();
54 // Global values are not null unless extern weak.
55 if (const GlobalValue
*GV
= dyn_cast
<GlobalValue
>(V
))
56 return !GV
->hasExternalWeakLinkage();
60 /// isNonEscapingLocalObject - Return true if the pointer is to a function-local
61 /// object that never escapes from the function.
62 static bool isNonEscapingLocalObject(const Value
*V
) {
63 // If this is a local allocation, check to see if it escapes.
64 if (isa
<AllocaInst
>(V
) || isNoAliasCall(V
))
65 // Set StoreCaptures to True so that we can assume in our callers that the
66 // pointer is not the result of a load instruction. Currently
67 // PointerMayBeCaptured doesn't have any special analysis for the
68 // StoreCaptures=false case; if it did, our callers could be refined to be
70 return !PointerMayBeCaptured(V
, false, /*StoreCaptures=*/true);
72 // If this is an argument that corresponds to a byval or noalias argument,
73 // then it has not escaped before entering the function. Check if it escapes
74 // inside the function.
75 if (const Argument
*A
= dyn_cast
<Argument
>(V
))
76 if (A
->hasByValAttr() || A
->hasNoAliasAttr()) {
77 // Don't bother analyzing arguments already known not to escape.
78 if (A
->hasNoCaptureAttr())
80 return !PointerMayBeCaptured(V
, false, /*StoreCaptures=*/true);
85 /// isEscapeSource - Return true if the pointer is one which would have
86 /// been considered an escape by isNonEscapingLocalObject.
87 static bool isEscapeSource(const Value
*V
) {
88 if (isa
<CallInst
>(V
) || isa
<InvokeInst
>(V
) || isa
<Argument
>(V
))
91 // The load case works because isNonEscapingLocalObject considers all
92 // stores to be escapes (it passes true for the StoreCaptures argument
93 // to PointerMayBeCaptured).
100 /// getObjectSize - Return the size of the object specified by V, or
101 /// UnknownSize if unknown.
102 static uint64_t getObjectSize(const Value
*V
, const TargetData
&TD
) {
103 const Type
*AccessTy
;
104 if (const GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(V
)) {
105 if (!GV
->hasDefinitiveInitializer())
106 return AliasAnalysis::UnknownSize
;
107 AccessTy
= GV
->getType()->getElementType();
108 } else if (const AllocaInst
*AI
= dyn_cast
<AllocaInst
>(V
)) {
109 if (!AI
->isArrayAllocation())
110 AccessTy
= AI
->getType()->getElementType();
112 return AliasAnalysis::UnknownSize
;
113 } else if (const CallInst
* CI
= extractMallocCall(V
)) {
114 if (!isArrayMalloc(V
, &TD
))
115 // The size is the argument to the malloc call.
116 if (const ConstantInt
* C
= dyn_cast
<ConstantInt
>(CI
->getArgOperand(0)))
117 return C
->getZExtValue();
118 return AliasAnalysis::UnknownSize
;
119 } else if (const Argument
*A
= dyn_cast
<Argument
>(V
)) {
120 if (A
->hasByValAttr())
121 AccessTy
= cast
<PointerType
>(A
->getType())->getElementType();
123 return AliasAnalysis::UnknownSize
;
125 return AliasAnalysis::UnknownSize
;
128 if (AccessTy
->isSized())
129 return TD
.getTypeAllocSize(AccessTy
);
130 return AliasAnalysis::UnknownSize
;
133 /// isObjectSmallerThan - Return true if we can prove that the object specified
134 /// by V is smaller than Size.
135 static bool isObjectSmallerThan(const Value
*V
, uint64_t Size
,
136 const TargetData
&TD
) {
137 uint64_t ObjectSize
= getObjectSize(V
, TD
);
138 return ObjectSize
!= AliasAnalysis::UnknownSize
&& ObjectSize
< Size
;
141 /// isObjectSize - Return true if we can prove that the object specified
142 /// by V has size Size.
143 static bool isObjectSize(const Value
*V
, uint64_t Size
,
144 const TargetData
&TD
) {
145 uint64_t ObjectSize
= getObjectSize(V
, TD
);
146 return ObjectSize
!= AliasAnalysis::UnknownSize
&& ObjectSize
== Size
;
149 //===----------------------------------------------------------------------===//
150 // GetElementPtr Instruction Decomposition and Analysis
151 //===----------------------------------------------------------------------===//
160 struct VariableGEPIndex
{
162 ExtensionKind Extension
;
168 /// GetLinearExpression - Analyze the specified value as a linear expression:
169 /// "A*V + B", where A and B are constant integers. Return the scale and offset
170 /// values as APInts and return V as a Value*, and return whether we looked
171 /// through any sign or zero extends. The incoming Value is known to have
172 /// IntegerType and it may already be sign or zero extended.
174 /// Note that this looks through extends, so the high bits may not be
175 /// represented in the result.
176 static Value
*GetLinearExpression(Value
*V
, APInt
&Scale
, APInt
&Offset
,
177 ExtensionKind
&Extension
,
178 const TargetData
&TD
, unsigned Depth
) {
179 assert(V
->getType()->isIntegerTy() && "Not an integer value");
181 // Limit our recursion depth.
188 if (BinaryOperator
*BOp
= dyn_cast
<BinaryOperator
>(V
)) {
189 if (ConstantInt
*RHSC
= dyn_cast
<ConstantInt
>(BOp
->getOperand(1))) {
190 switch (BOp
->getOpcode()) {
192 case Instruction::Or
:
193 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
195 if (!MaskedValueIsZero(BOp
->getOperand(0), RHSC
->getValue(), &TD
))
198 case Instruction::Add
:
199 V
= GetLinearExpression(BOp
->getOperand(0), Scale
, Offset
, Extension
,
201 Offset
+= RHSC
->getValue();
203 case Instruction::Mul
:
204 V
= GetLinearExpression(BOp
->getOperand(0), Scale
, Offset
, Extension
,
206 Offset
*= RHSC
->getValue();
207 Scale
*= RHSC
->getValue();
209 case Instruction::Shl
:
210 V
= GetLinearExpression(BOp
->getOperand(0), Scale
, Offset
, Extension
,
212 Offset
<<= RHSC
->getValue().getLimitedValue();
213 Scale
<<= RHSC
->getValue().getLimitedValue();
219 // Since GEP indices are sign extended anyway, we don't care about the high
220 // bits of a sign or zero extended value - just scales and offsets. The
221 // extensions have to be consistent though.
222 if ((isa
<SExtInst
>(V
) && Extension
!= EK_ZeroExt
) ||
223 (isa
<ZExtInst
>(V
) && Extension
!= EK_SignExt
)) {
224 Value
*CastOp
= cast
<CastInst
>(V
)->getOperand(0);
225 unsigned OldWidth
= Scale
.getBitWidth();
226 unsigned SmallWidth
= CastOp
->getType()->getPrimitiveSizeInBits();
227 Scale
= Scale
.trunc(SmallWidth
);
228 Offset
= Offset
.trunc(SmallWidth
);
229 Extension
= isa
<SExtInst
>(V
) ? EK_SignExt
: EK_ZeroExt
;
231 Value
*Result
= GetLinearExpression(CastOp
, Scale
, Offset
, Extension
,
233 Scale
= Scale
.zext(OldWidth
);
234 Offset
= Offset
.zext(OldWidth
);
244 /// DecomposeGEPExpression - If V is a symbolic pointer expression, decompose it
245 /// into a base pointer with a constant offset and a number of scaled symbolic
248 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale in
249 /// the VarIndices vector) are Value*'s that are known to be scaled by the
250 /// specified amount, but which may have other unrepresented high bits. As such,
251 /// the gep cannot necessarily be reconstructed from its decomposed form.
253 /// When TargetData is around, this function is capable of analyzing everything
254 /// that GetUnderlyingObject can look through. When not, it just looks
255 /// through pointer casts.
258 DecomposeGEPExpression(const Value
*V
, int64_t &BaseOffs
,
259 SmallVectorImpl
<VariableGEPIndex
> &VarIndices
,
260 const TargetData
*TD
) {
261 // Limit recursion depth to limit compile time in crazy cases.
262 unsigned MaxLookup
= 6;
266 // See if this is a bitcast or GEP.
267 const Operator
*Op
= dyn_cast
<Operator
>(V
);
269 // The only non-operator case we can handle are GlobalAliases.
270 if (const GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(V
)) {
271 if (!GA
->mayBeOverridden()) {
272 V
= GA
->getAliasee();
279 if (Op
->getOpcode() == Instruction::BitCast
) {
280 V
= Op
->getOperand(0);
284 if (const Instruction
*I
= dyn_cast
<Instruction
>(V
))
285 // TODO: Get a DominatorTree and use it here.
286 if (const Value
*Simplified
=
287 SimplifyInstruction(const_cast<Instruction
*>(I
), TD
)) {
292 const GEPOperator
*GEPOp
= dyn_cast
<GEPOperator
>(Op
);
296 // Don't attempt to analyze GEPs over unsized objects.
297 if (!cast
<PointerType
>(GEPOp
->getOperand(0)->getType())
298 ->getElementType()->isSized())
301 // If we are lacking TargetData information, we can't compute the offets of
302 // elements computed by GEPs. However, we can handle bitcast equivalent
305 if (!GEPOp
->hasAllZeroIndices())
307 V
= GEPOp
->getOperand(0);
311 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
312 gep_type_iterator GTI
= gep_type_begin(GEPOp
);
313 for (User::const_op_iterator I
= GEPOp
->op_begin()+1,
314 E
= GEPOp
->op_end(); I
!= E
; ++I
) {
316 // Compute the (potentially symbolic) offset in bytes for this index.
317 if (const StructType
*STy
= dyn_cast
<StructType
>(*GTI
++)) {
318 // For a struct, add the member offset.
319 unsigned FieldNo
= cast
<ConstantInt
>(Index
)->getZExtValue();
320 if (FieldNo
== 0) continue;
322 BaseOffs
+= TD
->getStructLayout(STy
)->getElementOffset(FieldNo
);
326 // For an array/pointer, add the element offset, explicitly scaled.
327 if (ConstantInt
*CIdx
= dyn_cast
<ConstantInt
>(Index
)) {
328 if (CIdx
->isZero()) continue;
329 BaseOffs
+= TD
->getTypeAllocSize(*GTI
)*CIdx
->getSExtValue();
333 uint64_t Scale
= TD
->getTypeAllocSize(*GTI
);
334 ExtensionKind Extension
= EK_NotExtended
;
336 // If the integer type is smaller than the pointer size, it is implicitly
337 // sign extended to pointer size.
338 unsigned Width
= cast
<IntegerType
>(Index
->getType())->getBitWidth();
339 if (TD
->getPointerSizeInBits() > Width
)
340 Extension
= EK_SignExt
;
342 // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
343 APInt
IndexScale(Width
, 0), IndexOffset(Width
, 0);
344 Index
= GetLinearExpression(Index
, IndexScale
, IndexOffset
, Extension
,
347 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
348 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
349 BaseOffs
+= IndexOffset
.getSExtValue()*Scale
;
350 Scale
*= IndexScale
.getSExtValue();
353 // If we already had an occurrence of this index variable, merge this
354 // scale into it. For example, we want to handle:
355 // A[x][x] -> x*16 + x*4 -> x*20
356 // This also ensures that 'x' only appears in the index list once.
357 for (unsigned i
= 0, e
= VarIndices
.size(); i
!= e
; ++i
) {
358 if (VarIndices
[i
].V
== Index
&&
359 VarIndices
[i
].Extension
== Extension
) {
360 Scale
+= VarIndices
[i
].Scale
;
361 VarIndices
.erase(VarIndices
.begin()+i
);
366 // Make sure that we have a scale that makes sense for this target's
368 if (unsigned ShiftBits
= 64-TD
->getPointerSizeInBits()) {
370 Scale
= (int64_t)Scale
>> ShiftBits
;
374 VariableGEPIndex Entry
= {Index
, Extension
, Scale
};
375 VarIndices
.push_back(Entry
);
379 // Analyze the base pointer next.
380 V
= GEPOp
->getOperand(0);
381 } while (--MaxLookup
);
383 // If the chain of expressions is too deep, just return early.
387 /// GetIndexDifference - Dest and Src are the variable indices from two
388 /// decomposed GetElementPtr instructions GEP1 and GEP2 which have common base
389 /// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic
390 /// difference between the two pointers.
391 static void GetIndexDifference(SmallVectorImpl
<VariableGEPIndex
> &Dest
,
392 const SmallVectorImpl
<VariableGEPIndex
> &Src
) {
393 if (Src
.empty()) return;
395 for (unsigned i
= 0, e
= Src
.size(); i
!= e
; ++i
) {
396 const Value
*V
= Src
[i
].V
;
397 ExtensionKind Extension
= Src
[i
].Extension
;
398 int64_t Scale
= Src
[i
].Scale
;
400 // Find V in Dest. This is N^2, but pointer indices almost never have more
401 // than a few variable indexes.
402 for (unsigned j
= 0, e
= Dest
.size(); j
!= e
; ++j
) {
403 if (Dest
[j
].V
!= V
|| Dest
[j
].Extension
!= Extension
) continue;
405 // If we found it, subtract off Scale V's from the entry in Dest. If it
406 // goes to zero, remove the entry.
407 if (Dest
[j
].Scale
!= Scale
)
408 Dest
[j
].Scale
-= Scale
;
410 Dest
.erase(Dest
.begin()+j
);
415 // If we didn't consume this entry, add it to the end of the Dest list.
417 VariableGEPIndex Entry
= { V
, Extension
, -Scale
};
418 Dest
.push_back(Entry
);
423 //===----------------------------------------------------------------------===//
424 // BasicAliasAnalysis Pass
425 //===----------------------------------------------------------------------===//
428 static const Function
*getParent(const Value
*V
) {
429 if (const Instruction
*inst
= dyn_cast
<Instruction
>(V
))
430 return inst
->getParent()->getParent();
432 if (const Argument
*arg
= dyn_cast
<Argument
>(V
))
433 return arg
->getParent();
438 static bool notDifferentParent(const Value
*O1
, const Value
*O2
) {
440 const Function
*F1
= getParent(O1
);
441 const Function
*F2
= getParent(O2
);
443 return !F1
|| !F2
|| F1
== F2
;
448 /// BasicAliasAnalysis - This is the primary alias analysis implementation.
449 struct BasicAliasAnalysis
: public ImmutablePass
, public AliasAnalysis
{
450 static char ID
; // Class identification, replacement for typeinfo
451 BasicAliasAnalysis() : ImmutablePass(ID
) {
452 initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry());
455 virtual void initializePass() {
456 InitializeAliasAnalysis(this);
459 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
460 AU
.addRequired
<AliasAnalysis
>();
463 virtual AliasResult
alias(const Location
&LocA
,
464 const Location
&LocB
) {
465 assert(Visited
.empty() && "Visited must be cleared after use!");
466 assert(notDifferentParent(LocA
.Ptr
, LocB
.Ptr
) &&
467 "BasicAliasAnalysis doesn't support interprocedural queries.");
468 AliasResult Alias
= aliasCheck(LocA
.Ptr
, LocA
.Size
, LocA
.TBAATag
,
469 LocB
.Ptr
, LocB
.Size
, LocB
.TBAATag
);
474 virtual ModRefResult
getModRefInfo(ImmutableCallSite CS
,
475 const Location
&Loc
);
477 virtual ModRefResult
getModRefInfo(ImmutableCallSite CS1
,
478 ImmutableCallSite CS2
) {
479 // The AliasAnalysis base class has some smarts, lets use them.
480 return AliasAnalysis::getModRefInfo(CS1
, CS2
);
483 /// pointsToConstantMemory - Chase pointers until we find a (constant
485 virtual bool pointsToConstantMemory(const Location
&Loc
, bool OrLocal
);
487 /// getModRefBehavior - Return the behavior when calling the given
489 virtual ModRefBehavior
getModRefBehavior(ImmutableCallSite CS
);
491 /// getModRefBehavior - Return the behavior when calling the given function.
492 /// For use when the call site is not known.
493 virtual ModRefBehavior
getModRefBehavior(const Function
*F
);
495 /// getAdjustedAnalysisPointer - This method is used when a pass implements
496 /// an analysis interface through multiple inheritance. If needed, it
497 /// should override this to adjust the this pointer as needed for the
498 /// specified pass info.
499 virtual void *getAdjustedAnalysisPointer(const void *ID
) {
500 if (ID
== &AliasAnalysis::ID
)
501 return (AliasAnalysis
*)this;
506 // Visited - Track instructions visited by a aliasPHI, aliasSelect(), and aliasGEP().
507 SmallPtrSet
<const Value
*, 16> Visited
;
509 // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
510 // instruction against another.
511 AliasResult
aliasGEP(const GEPOperator
*V1
, uint64_t V1Size
,
512 const Value
*V2
, uint64_t V2Size
,
513 const MDNode
*V2TBAAInfo
,
514 const Value
*UnderlyingV1
, const Value
*UnderlyingV2
);
516 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI
517 // instruction against another.
518 AliasResult
aliasPHI(const PHINode
*PN
, uint64_t PNSize
,
519 const MDNode
*PNTBAAInfo
,
520 const Value
*V2
, uint64_t V2Size
,
521 const MDNode
*V2TBAAInfo
);
523 /// aliasSelect - Disambiguate a Select instruction against another value.
524 AliasResult
aliasSelect(const SelectInst
*SI
, uint64_t SISize
,
525 const MDNode
*SITBAAInfo
,
526 const Value
*V2
, uint64_t V2Size
,
527 const MDNode
*V2TBAAInfo
);
529 AliasResult
aliasCheck(const Value
*V1
, uint64_t V1Size
,
530 const MDNode
*V1TBAATag
,
531 const Value
*V2
, uint64_t V2Size
,
532 const MDNode
*V2TBAATag
);
534 } // End of anonymous namespace
536 // Register this pass...
537 char BasicAliasAnalysis::ID
= 0;
538 INITIALIZE_AG_PASS(BasicAliasAnalysis
, AliasAnalysis
, "basicaa",
539 "Basic Alias Analysis (stateless AA impl)",
542 ImmutablePass
*llvm::createBasicAliasAnalysisPass() {
543 return new BasicAliasAnalysis();
546 /// pointsToConstantMemory - Returns whether the given pointer value
547 /// points to memory that is local to the function, with global constants being
548 /// considered local to all functions.
550 BasicAliasAnalysis::pointsToConstantMemory(const Location
&Loc
, bool OrLocal
) {
551 assert(Visited
.empty() && "Visited must be cleared after use!");
553 unsigned MaxLookup
= 8;
554 SmallVector
<const Value
*, 16> Worklist
;
555 Worklist
.push_back(Loc
.Ptr
);
557 const Value
*V
= GetUnderlyingObject(Worklist
.pop_back_val(), TD
);
558 if (!Visited
.insert(V
)) {
560 return AliasAnalysis::pointsToConstantMemory(Loc
, OrLocal
);
563 // An alloca instruction defines local memory.
564 if (OrLocal
&& isa
<AllocaInst
>(V
))
567 // A global constant counts as local memory for our purposes.
568 if (const GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(V
)) {
569 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
570 // global to be marked constant in some modules and non-constant in
571 // others. GV may even be a declaration, not a definition.
572 if (!GV
->isConstant()) {
574 return AliasAnalysis::pointsToConstantMemory(Loc
, OrLocal
);
579 // If both select values point to local memory, then so does the select.
580 if (const SelectInst
*SI
= dyn_cast
<SelectInst
>(V
)) {
581 Worklist
.push_back(SI
->getTrueValue());
582 Worklist
.push_back(SI
->getFalseValue());
586 // If all values incoming to a phi node point to local memory, then so does
588 if (const PHINode
*PN
= dyn_cast
<PHINode
>(V
)) {
589 // Don't bother inspecting phi nodes with many operands.
590 if (PN
->getNumIncomingValues() > MaxLookup
) {
592 return AliasAnalysis::pointsToConstantMemory(Loc
, OrLocal
);
594 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
595 Worklist
.push_back(PN
->getIncomingValue(i
));
599 // Otherwise be conservative.
601 return AliasAnalysis::pointsToConstantMemory(Loc
, OrLocal
);
603 } while (!Worklist
.empty() && --MaxLookup
);
606 return Worklist
.empty();
609 /// getModRefBehavior - Return the behavior when calling the given call site.
610 AliasAnalysis::ModRefBehavior
611 BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS
) {
612 if (CS
.doesNotAccessMemory())
613 // Can't do better than this.
614 return DoesNotAccessMemory
;
616 ModRefBehavior Min
= UnknownModRefBehavior
;
618 // If the callsite knows it only reads memory, don't return worse
620 if (CS
.onlyReadsMemory())
621 Min
= OnlyReadsMemory
;
623 // The AliasAnalysis base class has some smarts, lets use them.
624 return ModRefBehavior(AliasAnalysis::getModRefBehavior(CS
) & Min
);
627 /// getModRefBehavior - Return the behavior when calling the given function.
628 /// For use when the call site is not known.
629 AliasAnalysis::ModRefBehavior
630 BasicAliasAnalysis::getModRefBehavior(const Function
*F
) {
631 // If the function declares it doesn't access memory, we can't do better.
632 if (F
->doesNotAccessMemory())
633 return DoesNotAccessMemory
;
635 // For intrinsics, we can check the table.
636 if (unsigned iid
= F
->getIntrinsicID()) {
637 #define GET_INTRINSIC_MODREF_BEHAVIOR
638 #include "llvm/Intrinsics.gen"
639 #undef GET_INTRINSIC_MODREF_BEHAVIOR
642 ModRefBehavior Min
= UnknownModRefBehavior
;
644 // If the function declares it only reads memory, go with that.
645 if (F
->onlyReadsMemory())
646 Min
= OnlyReadsMemory
;
648 // Otherwise be conservative.
649 return ModRefBehavior(AliasAnalysis::getModRefBehavior(F
) & Min
);
652 /// getModRefInfo - Check to see if the specified callsite can clobber the
653 /// specified memory object. Since we only look at local properties of this
654 /// function, we really can't say much about this query. We do, however, use
655 /// simple "address taken" analysis on local objects.
656 AliasAnalysis::ModRefResult
657 BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS
,
658 const Location
&Loc
) {
659 assert(notDifferentParent(CS
.getInstruction(), Loc
.Ptr
) &&
660 "AliasAnalysis query involving multiple functions!");
662 const Value
*Object
= GetUnderlyingObject(Loc
.Ptr
, TD
);
664 // If this is a tail call and Loc.Ptr points to a stack location, we know that
665 // the tail call cannot access or modify the local stack.
666 // We cannot exclude byval arguments here; these belong to the caller of
667 // the current function not to the current function, and a tail callee
668 // may reference them.
669 if (isa
<AllocaInst
>(Object
))
670 if (const CallInst
*CI
= dyn_cast
<CallInst
>(CS
.getInstruction()))
671 if (CI
->isTailCall())
674 // If the pointer is to a locally allocated object that does not escape,
675 // then the call can not mod/ref the pointer unless the call takes the pointer
676 // as an argument, and itself doesn't capture it.
677 if (!isa
<Constant
>(Object
) && CS
.getInstruction() != Object
&&
678 isNonEscapingLocalObject(Object
)) {
679 bool PassedAsArg
= false;
681 for (ImmutableCallSite::arg_iterator CI
= CS
.arg_begin(), CE
= CS
.arg_end();
682 CI
!= CE
; ++CI
, ++ArgNo
) {
683 // Only look at the no-capture pointer arguments.
684 if (!(*CI
)->getType()->isPointerTy() ||
685 !CS
.paramHasAttr(ArgNo
+1, Attribute::NoCapture
))
688 // If this is a no-capture pointer argument, see if we can tell that it
689 // is impossible to alias the pointer we're checking. If not, we have to
690 // assume that the call could touch the pointer, even though it doesn't
692 if (!isNoAlias(Location(cast
<Value
>(CI
)), Loc
)) {
702 ModRefResult Min
= ModRef
;
704 // Finally, handle specific knowledge of intrinsics.
705 const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(CS
.getInstruction());
707 switch (II
->getIntrinsicID()) {
709 case Intrinsic::memcpy
:
710 case Intrinsic::memmove
: {
711 uint64_t Len
= UnknownSize
;
712 if (ConstantInt
*LenCI
= dyn_cast
<ConstantInt
>(II
->getArgOperand(2)))
713 Len
= LenCI
->getZExtValue();
714 Value
*Dest
= II
->getArgOperand(0);
715 Value
*Src
= II
->getArgOperand(1);
716 // If it can't overlap the source dest, then it doesn't modref the loc.
717 if (isNoAlias(Location(Dest
, Len
), Loc
)) {
718 if (isNoAlias(Location(Src
, Len
), Loc
))
720 // If it can't overlap the dest, then worst case it reads the loc.
722 } else if (isNoAlias(Location(Src
, Len
), Loc
)) {
723 // If it can't overlap the source, then worst case it mutates the loc.
728 case Intrinsic::memset
:
729 // Since memset is 'accesses arguments' only, the AliasAnalysis base class
730 // will handle it for the variable length case.
731 if (ConstantInt
*LenCI
= dyn_cast
<ConstantInt
>(II
->getArgOperand(2))) {
732 uint64_t Len
= LenCI
->getZExtValue();
733 Value
*Dest
= II
->getArgOperand(0);
734 if (isNoAlias(Location(Dest
, Len
), Loc
))
737 // We know that memset doesn't load anything.
740 case Intrinsic::atomic_cmp_swap
:
741 case Intrinsic::atomic_swap
:
742 case Intrinsic::atomic_load_add
:
743 case Intrinsic::atomic_load_sub
:
744 case Intrinsic::atomic_load_and
:
745 case Intrinsic::atomic_load_nand
:
746 case Intrinsic::atomic_load_or
:
747 case Intrinsic::atomic_load_xor
:
748 case Intrinsic::atomic_load_max
:
749 case Intrinsic::atomic_load_min
:
750 case Intrinsic::atomic_load_umax
:
751 case Intrinsic::atomic_load_umin
:
753 Value
*Op1
= II
->getArgOperand(0);
754 uint64_t Op1Size
= TD
->getTypeStoreSize(Op1
->getType());
755 MDNode
*Tag
= II
->getMetadata(LLVMContext::MD_tbaa
);
756 if (isNoAlias(Location(Op1
, Op1Size
, Tag
), Loc
))
760 case Intrinsic::lifetime_start
:
761 case Intrinsic::lifetime_end
:
762 case Intrinsic::invariant_start
: {
764 cast
<ConstantInt
>(II
->getArgOperand(0))->getZExtValue();
765 if (isNoAlias(Location(II
->getArgOperand(1),
767 II
->getMetadata(LLVMContext::MD_tbaa
)),
772 case Intrinsic::invariant_end
: {
774 cast
<ConstantInt
>(II
->getArgOperand(1))->getZExtValue();
775 if (isNoAlias(Location(II
->getArgOperand(2),
777 II
->getMetadata(LLVMContext::MD_tbaa
)),
782 case Intrinsic::arm_neon_vld1
: {
783 // LLVM's vld1 and vst1 intrinsics currently only support a single
786 TD
? TD
->getTypeStoreSize(II
->getType()) : UnknownSize
;
787 if (isNoAlias(Location(II
->getArgOperand(0), Size
,
788 II
->getMetadata(LLVMContext::MD_tbaa
)),
793 case Intrinsic::arm_neon_vst1
: {
795 TD
? TD
->getTypeStoreSize(II
->getArgOperand(1)->getType()) : UnknownSize
;
796 if (isNoAlias(Location(II
->getArgOperand(0), Size
,
797 II
->getMetadata(LLVMContext::MD_tbaa
)),
804 // The AliasAnalysis base class has some smarts, lets use them.
805 return ModRefResult(AliasAnalysis::getModRefInfo(CS
, Loc
) & Min
);
808 /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
809 /// against another pointer. We know that V1 is a GEP, but we don't know
810 /// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, TD),
811 /// UnderlyingV2 is the same for V2.
813 AliasAnalysis::AliasResult
814 BasicAliasAnalysis::aliasGEP(const GEPOperator
*GEP1
, uint64_t V1Size
,
815 const Value
*V2
, uint64_t V2Size
,
816 const MDNode
*V2TBAAInfo
,
817 const Value
*UnderlyingV1
,
818 const Value
*UnderlyingV2
) {
819 // If this GEP has been visited before, we're on a use-def cycle.
820 // Such cycles are only valid when PHI nodes are involved or in unreachable
821 // code. The visitPHI function catches cycles containing PHIs, but there
822 // could still be a cycle without PHIs in unreachable code.
823 if (!Visited
.insert(GEP1
))
826 int64_t GEP1BaseOffset
;
827 SmallVector
<VariableGEPIndex
, 4> GEP1VariableIndices
;
829 // If we have two gep instructions with must-alias'ing base pointers, figure
830 // out if the indexes to the GEP tell us anything about the derived pointer.
831 if (const GEPOperator
*GEP2
= dyn_cast
<GEPOperator
>(V2
)) {
832 // Do the base pointers alias?
833 AliasResult BaseAlias
= aliasCheck(UnderlyingV1
, UnknownSize
, 0,
834 UnderlyingV2
, UnknownSize
, 0);
836 // If we get a No or May, then return it immediately, no amount of analysis
837 // will improve this situation.
838 if (BaseAlias
!= MustAlias
) return BaseAlias
;
840 // Otherwise, we have a MustAlias. Since the base pointers alias each other
841 // exactly, see if the computed offset from the common pointer tells us
842 // about the relation of the resulting pointer.
843 const Value
*GEP1BasePtr
=
844 DecomposeGEPExpression(GEP1
, GEP1BaseOffset
, GEP1VariableIndices
, TD
);
846 int64_t GEP2BaseOffset
;
847 SmallVector
<VariableGEPIndex
, 4> GEP2VariableIndices
;
848 const Value
*GEP2BasePtr
=
849 DecomposeGEPExpression(GEP2
, GEP2BaseOffset
, GEP2VariableIndices
, TD
);
851 // If DecomposeGEPExpression isn't able to look all the way through the
852 // addressing operation, we must not have TD and this is too complex for us
853 // to handle without it.
854 if (GEP1BasePtr
!= UnderlyingV1
|| GEP2BasePtr
!= UnderlyingV2
) {
856 "DecomposeGEPExpression and GetUnderlyingObject disagree!");
860 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
861 // symbolic difference.
862 GEP1BaseOffset
-= GEP2BaseOffset
;
863 GetIndexDifference(GEP1VariableIndices
, GEP2VariableIndices
);
866 // Check to see if these two pointers are related by the getelementptr
867 // instruction. If one pointer is a GEP with a non-zero index of the other
868 // pointer, we know they cannot alias.
870 // If both accesses are unknown size, we can't do anything useful here.
871 if (V1Size
== UnknownSize
&& V2Size
== UnknownSize
)
874 AliasResult R
= aliasCheck(UnderlyingV1
, UnknownSize
, 0,
875 V2
, V2Size
, V2TBAAInfo
);
877 // If V2 may alias GEP base pointer, conservatively returns MayAlias.
878 // If V2 is known not to alias GEP base pointer, then the two values
879 // cannot alias per GEP semantics: "A pointer value formed from a
880 // getelementptr instruction is associated with the addresses associated
881 // with the first operand of the getelementptr".
884 const Value
*GEP1BasePtr
=
885 DecomposeGEPExpression(GEP1
, GEP1BaseOffset
, GEP1VariableIndices
, TD
);
887 // If DecomposeGEPExpression isn't able to look all the way through the
888 // addressing operation, we must not have TD and this is too complex for us
889 // to handle without it.
890 if (GEP1BasePtr
!= UnderlyingV1
) {
892 "DecomposeGEPExpression and GetUnderlyingObject disagree!");
897 // In the two GEP Case, if there is no difference in the offsets of the
898 // computed pointers, the resultant pointers are a must alias. This
899 // hapens when we have two lexically identical GEP's (for example).
901 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
902 // must aliases the GEP, the end result is a must alias also.
903 if (GEP1BaseOffset
== 0 && GEP1VariableIndices
.empty())
906 // If there is a difference between the pointers, but the difference is
907 // less than the size of the associated memory object, then we know
908 // that the objects are partially overlapping.
909 if (GEP1BaseOffset
!= 0 && GEP1VariableIndices
.empty()) {
910 if (GEP1BaseOffset
>= 0 ?
911 (V2Size
!= UnknownSize
&& (uint64_t)GEP1BaseOffset
< V2Size
) :
912 (V1Size
!= UnknownSize
&& -(uint64_t)GEP1BaseOffset
< V1Size
&&
913 GEP1BaseOffset
!= INT64_MIN
))
917 // If we have a known constant offset, see if this offset is larger than the
918 // access size being queried. If so, and if no variable indices can remove
919 // pieces of this constant, then we know we have a no-alias. For example,
922 // In order to handle cases like &A[100][i] where i is an out of range
923 // subscript, we have to ignore all constant offset pieces that are a multiple
924 // of a scaled index. Do this by removing constant offsets that are a
925 // multiple of any of our variable indices. This allows us to transform
926 // things like &A[i][1] because i has a stride of (e.g.) 8 bytes but the 1
927 // provides an offset of 4 bytes (assuming a <= 4 byte access).
928 for (unsigned i
= 0, e
= GEP1VariableIndices
.size();
929 i
!= e
&& GEP1BaseOffset
;++i
)
930 if (int64_t RemovedOffset
= GEP1BaseOffset
/GEP1VariableIndices
[i
].Scale
)
931 GEP1BaseOffset
-= RemovedOffset
*GEP1VariableIndices
[i
].Scale
;
933 // If our known offset is bigger than the access size, we know we don't have
935 if (GEP1BaseOffset
) {
936 if (GEP1BaseOffset
>= 0 ?
937 (V2Size
!= UnknownSize
&& (uint64_t)GEP1BaseOffset
>= V2Size
) :
938 (V1Size
!= UnknownSize
&& -(uint64_t)GEP1BaseOffset
>= V1Size
&&
939 GEP1BaseOffset
!= INT64_MIN
))
946 /// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select
947 /// instruction against another.
948 AliasAnalysis::AliasResult
949 BasicAliasAnalysis::aliasSelect(const SelectInst
*SI
, uint64_t SISize
,
950 const MDNode
*SITBAAInfo
,
951 const Value
*V2
, uint64_t V2Size
,
952 const MDNode
*V2TBAAInfo
) {
953 // If this select has been visited before, we're on a use-def cycle.
954 // Such cycles are only valid when PHI nodes are involved or in unreachable
955 // code. The visitPHI function catches cycles containing PHIs, but there
956 // could still be a cycle without PHIs in unreachable code.
957 if (!Visited
.insert(SI
))
960 // If the values are Selects with the same condition, we can do a more precise
961 // check: just check for aliases between the values on corresponding arms.
962 if (const SelectInst
*SI2
= dyn_cast
<SelectInst
>(V2
))
963 if (SI
->getCondition() == SI2
->getCondition()) {
965 aliasCheck(SI
->getTrueValue(), SISize
, SITBAAInfo
,
966 SI2
->getTrueValue(), V2Size
, V2TBAAInfo
);
967 if (Alias
== MayAlias
)
969 AliasResult ThisAlias
=
970 aliasCheck(SI
->getFalseValue(), SISize
, SITBAAInfo
,
971 SI2
->getFalseValue(), V2Size
, V2TBAAInfo
);
972 if (ThisAlias
!= Alias
)
977 // If both arms of the Select node NoAlias or MustAlias V2, then returns
978 // NoAlias / MustAlias. Otherwise, returns MayAlias.
980 aliasCheck(V2
, V2Size
, V2TBAAInfo
, SI
->getTrueValue(), SISize
, SITBAAInfo
);
981 if (Alias
== MayAlias
)
984 // If V2 is visited, the recursive case will have been caught in the
985 // above aliasCheck call, so these subsequent calls to aliasCheck
986 // don't need to assume that V2 is being visited recursively.
989 AliasResult ThisAlias
=
990 aliasCheck(V2
, V2Size
, V2TBAAInfo
, SI
->getFalseValue(), SISize
, SITBAAInfo
);
991 if (ThisAlias
!= Alias
)
996 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction
998 AliasAnalysis::AliasResult
999 BasicAliasAnalysis::aliasPHI(const PHINode
*PN
, uint64_t PNSize
,
1000 const MDNode
*PNTBAAInfo
,
1001 const Value
*V2
, uint64_t V2Size
,
1002 const MDNode
*V2TBAAInfo
) {
1003 // The PHI node has already been visited, avoid recursion any further.
1004 if (!Visited
.insert(PN
))
1007 // If the values are PHIs in the same block, we can do a more precise
1008 // as well as efficient check: just check for aliases between the values
1009 // on corresponding edges.
1010 if (const PHINode
*PN2
= dyn_cast
<PHINode
>(V2
))
1011 if (PN2
->getParent() == PN
->getParent()) {
1013 aliasCheck(PN
->getIncomingValue(0), PNSize
, PNTBAAInfo
,
1014 PN2
->getIncomingValueForBlock(PN
->getIncomingBlock(0)),
1015 V2Size
, V2TBAAInfo
);
1016 if (Alias
== MayAlias
)
1018 for (unsigned i
= 1, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
1019 AliasResult ThisAlias
=
1020 aliasCheck(PN
->getIncomingValue(i
), PNSize
, PNTBAAInfo
,
1021 PN2
->getIncomingValueForBlock(PN
->getIncomingBlock(i
)),
1022 V2Size
, V2TBAAInfo
);
1023 if (ThisAlias
!= Alias
)
1029 SmallPtrSet
<Value
*, 4> UniqueSrc
;
1030 SmallVector
<Value
*, 4> V1Srcs
;
1031 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
1032 Value
*PV1
= PN
->getIncomingValue(i
);
1033 if (isa
<PHINode
>(PV1
))
1034 // If any of the source itself is a PHI, return MayAlias conservatively
1035 // to avoid compile time explosion. The worst possible case is if both
1036 // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1037 // and 'n' are the number of PHI sources.
1039 if (UniqueSrc
.insert(PV1
))
1040 V1Srcs
.push_back(PV1
);
1043 AliasResult Alias
= aliasCheck(V2
, V2Size
, V2TBAAInfo
,
1044 V1Srcs
[0], PNSize
, PNTBAAInfo
);
1045 // Early exit if the check of the first PHI source against V2 is MayAlias.
1046 // Other results are not possible.
1047 if (Alias
== MayAlias
)
1050 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1051 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1052 for (unsigned i
= 1, e
= V1Srcs
.size(); i
!= e
; ++i
) {
1053 Value
*V
= V1Srcs
[i
];
1055 // If V2 is visited, the recursive case will have been caught in the
1056 // above aliasCheck call, so these subsequent calls to aliasCheck
1057 // don't need to assume that V2 is being visited recursively.
1060 AliasResult ThisAlias
= aliasCheck(V2
, V2Size
, V2TBAAInfo
,
1061 V
, PNSize
, PNTBAAInfo
);
1062 if (ThisAlias
!= Alias
|| ThisAlias
== MayAlias
)
1069 // aliasCheck - Provide a bunch of ad-hoc rules to disambiguate in common cases,
1070 // such as array references.
1072 AliasAnalysis::AliasResult
1073 BasicAliasAnalysis::aliasCheck(const Value
*V1
, uint64_t V1Size
,
1074 const MDNode
*V1TBAAInfo
,
1075 const Value
*V2
, uint64_t V2Size
,
1076 const MDNode
*V2TBAAInfo
) {
1077 // If either of the memory references is empty, it doesn't matter what the
1078 // pointer values are.
1079 if (V1Size
== 0 || V2Size
== 0)
1082 // Strip off any casts if they exist.
1083 V1
= V1
->stripPointerCasts();
1084 V2
= V2
->stripPointerCasts();
1086 // Are we checking for alias of the same value?
1087 if (V1
== V2
) return MustAlias
;
1089 if (!V1
->getType()->isPointerTy() || !V2
->getType()->isPointerTy())
1090 return NoAlias
; // Scalars cannot alias each other
1092 // Figure out what objects these things are pointing to if we can.
1093 const Value
*O1
= GetUnderlyingObject(V1
, TD
);
1094 const Value
*O2
= GetUnderlyingObject(V2
, TD
);
1096 // Null values in the default address space don't point to any object, so they
1097 // don't alias any other pointer.
1098 if (const ConstantPointerNull
*CPN
= dyn_cast
<ConstantPointerNull
>(O1
))
1099 if (CPN
->getType()->getAddressSpace() == 0)
1101 if (const ConstantPointerNull
*CPN
= dyn_cast
<ConstantPointerNull
>(O2
))
1102 if (CPN
->getType()->getAddressSpace() == 0)
1106 // If V1/V2 point to two different objects we know that we have no alias.
1107 if (isIdentifiedObject(O1
) && isIdentifiedObject(O2
))
1110 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1111 if ((isa
<Constant
>(O1
) && isIdentifiedObject(O2
) && !isa
<Constant
>(O2
)) ||
1112 (isa
<Constant
>(O2
) && isIdentifiedObject(O1
) && !isa
<Constant
>(O1
)))
1115 // Arguments can't alias with local allocations or noalias calls
1116 // in the same function.
1117 if (((isa
<Argument
>(O1
) && (isa
<AllocaInst
>(O2
) || isNoAliasCall(O2
))) ||
1118 (isa
<Argument
>(O2
) && (isa
<AllocaInst
>(O1
) || isNoAliasCall(O1
)))))
1121 // Most objects can't alias null.
1122 if ((isa
<ConstantPointerNull
>(O2
) && isKnownNonNull(O1
)) ||
1123 (isa
<ConstantPointerNull
>(O1
) && isKnownNonNull(O2
)))
1126 // If one pointer is the result of a call/invoke or load and the other is a
1127 // non-escaping local object within the same function, then we know the
1128 // object couldn't escape to a point where the call could return it.
1130 // Note that if the pointers are in different functions, there are a
1131 // variety of complications. A call with a nocapture argument may still
1132 // temporary store the nocapture argument's value in a temporary memory
1133 // location if that memory location doesn't escape. Or it may pass a
1134 // nocapture value to other functions as long as they don't capture it.
1135 if (isEscapeSource(O1
) && isNonEscapingLocalObject(O2
))
1137 if (isEscapeSource(O2
) && isNonEscapingLocalObject(O1
))
1141 // If the size of one access is larger than the entire object on the other
1142 // side, then we know such behavior is undefined and can assume no alias.
1144 if ((V1Size
!= UnknownSize
&& isObjectSmallerThan(O2
, V1Size
, *TD
)) ||
1145 (V2Size
!= UnknownSize
&& isObjectSmallerThan(O1
, V2Size
, *TD
)))
1148 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1149 // GEP can't simplify, we don't even look at the PHI cases.
1150 if (!isa
<GEPOperator
>(V1
) && isa
<GEPOperator
>(V2
)) {
1152 std::swap(V1Size
, V2Size
);
1155 if (const GEPOperator
*GV1
= dyn_cast
<GEPOperator
>(V1
)) {
1156 AliasResult Result
= aliasGEP(GV1
, V1Size
, V2
, V2Size
, V2TBAAInfo
, O1
, O2
);
1157 if (Result
!= MayAlias
) return Result
;
1160 if (isa
<PHINode
>(V2
) && !isa
<PHINode
>(V1
)) {
1162 std::swap(V1Size
, V2Size
);
1164 if (const PHINode
*PN
= dyn_cast
<PHINode
>(V1
)) {
1165 AliasResult Result
= aliasPHI(PN
, V1Size
, V1TBAAInfo
,
1166 V2
, V2Size
, V2TBAAInfo
);
1167 if (Result
!= MayAlias
) return Result
;
1170 if (isa
<SelectInst
>(V2
) && !isa
<SelectInst
>(V1
)) {
1172 std::swap(V1Size
, V2Size
);
1174 if (const SelectInst
*S1
= dyn_cast
<SelectInst
>(V1
)) {
1175 AliasResult Result
= aliasSelect(S1
, V1Size
, V1TBAAInfo
,
1176 V2
, V2Size
, V2TBAAInfo
);
1177 if (Result
!= MayAlias
) return Result
;
1180 // If both pointers are pointing into the same object and one of them
1181 // accesses is accessing the entire object, then the accesses must
1182 // overlap in some way.
1184 if ((V1Size
!= UnknownSize
&& isObjectSize(O1
, V1Size
, *TD
)) ||
1185 (V2Size
!= UnknownSize
&& isObjectSize(O2
, V2Size
, *TD
)))
1186 return PartialAlias
;
1188 return AliasAnalysis::alias(Location(V1
, V1Size
, V1TBAAInfo
),
1189 Location(V2
, V2Size
, V2TBAAInfo
));