[ARM] Correct register for narrowing and widening MVE loads and stores.
[llvm-core.git] / lib / Analysis / Lint.cpp
blobd28b8a189d4b9e6fd9462dd0aba9de6d97ffae64
1 //===-- Lint.cpp - Check for common errors in LLVM IR ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass statically checks for common and easily-identified constructs
10 // which produce undefined or likely unintended behavior in LLVM IR.
12 // It is not a guarantee of correctness, in two ways. First, it isn't
13 // comprehensive. There are checks which could be done statically which are
14 // not yet implemented. Some of these are indicated by TODO comments, but
15 // those aren't comprehensive either. Second, many conditions cannot be
16 // checked statically. This pass does no dynamic instrumentation, so it
17 // can't check for all possible problems.
19 // Another limitation is that it assumes all code will be executed. A store
20 // through a null pointer in a basic block which is never reached is harmless,
21 // but this pass will warn about it anyway. This is the main reason why most
22 // of these checks live here instead of in the Verifier pass.
24 // Optimization passes may make conditions that this pass checks for more or
25 // less obvious. If an optimization pass appears to be introducing a warning,
26 // it may be that the optimization pass is merely exposing an existing
27 // condition in the code.
29 // This code may be run before instcombine. In many cases, instcombine checks
30 // for the same kinds of things and turns instructions with undefined behavior
31 // into unreachable (or equivalent). Because of this, this pass makes some
32 // effort to look through bitcasts and so on.
34 //===----------------------------------------------------------------------===//
36 #include "llvm/Analysis/Lint.h"
37 #include "llvm/ADT/APInt.h"
38 #include "llvm/ADT/ArrayRef.h"
39 #include "llvm/ADT/SmallPtrSet.h"
40 #include "llvm/ADT/Twine.h"
41 #include "llvm/Analysis/AliasAnalysis.h"
42 #include "llvm/Analysis/AssumptionCache.h"
43 #include "llvm/Analysis/ConstantFolding.h"
44 #include "llvm/Analysis/InstructionSimplify.h"
45 #include "llvm/Analysis/Loads.h"
46 #include "llvm/Analysis/MemoryLocation.h"
47 #include "llvm/Analysis/Passes.h"
48 #include "llvm/Analysis/TargetLibraryInfo.h"
49 #include "llvm/Analysis/ValueTracking.h"
50 #include "llvm/IR/Argument.h"
51 #include "llvm/IR/BasicBlock.h"
52 #include "llvm/IR/CallSite.h"
53 #include "llvm/IR/Constant.h"
54 #include "llvm/IR/Constants.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DerivedTypes.h"
57 #include "llvm/IR/Dominators.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/GlobalVariable.h"
60 #include "llvm/IR/InstVisitor.h"
61 #include "llvm/IR/InstrTypes.h"
62 #include "llvm/IR/Instruction.h"
63 #include "llvm/IR/Instructions.h"
64 #include "llvm/IR/IntrinsicInst.h"
65 #include "llvm/IR/LegacyPassManager.h"
66 #include "llvm/IR/Module.h"
67 #include "llvm/IR/Type.h"
68 #include "llvm/IR/Value.h"
69 #include "llvm/Pass.h"
70 #include "llvm/Support/Casting.h"
71 #include "llvm/Support/Debug.h"
72 #include "llvm/Support/KnownBits.h"
73 #include "llvm/Support/MathExtras.h"
74 #include "llvm/Support/raw_ostream.h"
75 #include <cassert>
76 #include <cstdint>
77 #include <iterator>
78 #include <string>
80 using namespace llvm;
82 namespace {
83 namespace MemRef {
84 static const unsigned Read = 1;
85 static const unsigned Write = 2;
86 static const unsigned Callee = 4;
87 static const unsigned Branchee = 8;
88 } // end namespace MemRef
90 class Lint : public FunctionPass, public InstVisitor<Lint> {
91 friend class InstVisitor<Lint>;
93 void visitFunction(Function &F);
95 void visitCallSite(CallSite CS);
96 void visitMemoryReference(Instruction &I, Value *Ptr,
97 uint64_t Size, unsigned Align,
98 Type *Ty, unsigned Flags);
99 void visitEHBeginCatch(IntrinsicInst *II);
100 void visitEHEndCatch(IntrinsicInst *II);
102 void visitCallInst(CallInst &I);
103 void visitInvokeInst(InvokeInst &I);
104 void visitReturnInst(ReturnInst &I);
105 void visitLoadInst(LoadInst &I);
106 void visitStoreInst(StoreInst &I);
107 void visitXor(BinaryOperator &I);
108 void visitSub(BinaryOperator &I);
109 void visitLShr(BinaryOperator &I);
110 void visitAShr(BinaryOperator &I);
111 void visitShl(BinaryOperator &I);
112 void visitSDiv(BinaryOperator &I);
113 void visitUDiv(BinaryOperator &I);
114 void visitSRem(BinaryOperator &I);
115 void visitURem(BinaryOperator &I);
116 void visitAllocaInst(AllocaInst &I);
117 void visitVAArgInst(VAArgInst &I);
118 void visitIndirectBrInst(IndirectBrInst &I);
119 void visitExtractElementInst(ExtractElementInst &I);
120 void visitInsertElementInst(InsertElementInst &I);
121 void visitUnreachableInst(UnreachableInst &I);
123 Value *findValue(Value *V, bool OffsetOk) const;
124 Value *findValueImpl(Value *V, bool OffsetOk,
125 SmallPtrSetImpl<Value *> &Visited) const;
127 public:
128 Module *Mod;
129 const DataLayout *DL;
130 AliasAnalysis *AA;
131 AssumptionCache *AC;
132 DominatorTree *DT;
133 TargetLibraryInfo *TLI;
135 std::string Messages;
136 raw_string_ostream MessagesStr;
138 static char ID; // Pass identification, replacement for typeid
139 Lint() : FunctionPass(ID), MessagesStr(Messages) {
140 initializeLintPass(*PassRegistry::getPassRegistry());
143 bool runOnFunction(Function &F) override;
145 void getAnalysisUsage(AnalysisUsage &AU) const override {
146 AU.setPreservesAll();
147 AU.addRequired<AAResultsWrapperPass>();
148 AU.addRequired<AssumptionCacheTracker>();
149 AU.addRequired<TargetLibraryInfoWrapperPass>();
150 AU.addRequired<DominatorTreeWrapperPass>();
152 void print(raw_ostream &O, const Module *M) const override {}
154 void WriteValues(ArrayRef<const Value *> Vs) {
155 for (const Value *V : Vs) {
156 if (!V)
157 continue;
158 if (isa<Instruction>(V)) {
159 MessagesStr << *V << '\n';
160 } else {
161 V->printAsOperand(MessagesStr, true, Mod);
162 MessagesStr << '\n';
167 /// A check failed, so printout out the condition and the message.
169 /// This provides a nice place to put a breakpoint if you want to see why
170 /// something is not correct.
171 void CheckFailed(const Twine &Message) { MessagesStr << Message << '\n'; }
173 /// A check failed (with values to print).
175 /// This calls the Message-only version so that the above is easier to set
176 /// a breakpoint on.
177 template <typename T1, typename... Ts>
178 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &...Vs) {
179 CheckFailed(Message);
180 WriteValues({V1, Vs...});
183 } // end anonymous namespace
185 char Lint::ID = 0;
186 INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR",
187 false, true)
188 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
189 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
190 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
191 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
192 INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR",
193 false, true)
195 // Assert - We know that cond should be true, if not print an error message.
196 #define Assert(C, ...) \
197 do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (false)
199 // Lint::run - This is the main Analysis entry point for a
200 // function.
202 bool Lint::runOnFunction(Function &F) {
203 Mod = F.getParent();
204 DL = &F.getParent()->getDataLayout();
205 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
206 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
207 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
208 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
209 visit(F);
210 dbgs() << MessagesStr.str();
211 Messages.clear();
212 return false;
215 void Lint::visitFunction(Function &F) {
216 // This isn't undefined behavior, it's just a little unusual, and it's a
217 // fairly common mistake to neglect to name a function.
218 Assert(F.hasName() || F.hasLocalLinkage(),
219 "Unusual: Unnamed function with non-local linkage", &F);
221 // TODO: Check for irreducible control flow.
224 void Lint::visitCallSite(CallSite CS) {
225 Instruction &I = *CS.getInstruction();
226 Value *Callee = CS.getCalledValue();
228 visitMemoryReference(I, Callee, MemoryLocation::UnknownSize, 0, nullptr,
229 MemRef::Callee);
231 if (Function *F = dyn_cast<Function>(findValue(Callee,
232 /*OffsetOk=*/false))) {
233 Assert(CS.getCallingConv() == F->getCallingConv(),
234 "Undefined behavior: Caller and callee calling convention differ",
235 &I);
237 FunctionType *FT = F->getFunctionType();
238 unsigned NumActualArgs = CS.arg_size();
240 Assert(FT->isVarArg() ? FT->getNumParams() <= NumActualArgs
241 : FT->getNumParams() == NumActualArgs,
242 "Undefined behavior: Call argument count mismatches callee "
243 "argument count",
244 &I);
246 Assert(FT->getReturnType() == I.getType(),
247 "Undefined behavior: Call return type mismatches "
248 "callee return type",
249 &I);
251 // Check argument types (in case the callee was casted) and attributes.
252 // TODO: Verify that caller and callee attributes are compatible.
253 Function::arg_iterator PI = F->arg_begin(), PE = F->arg_end();
254 CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
255 for (; AI != AE; ++AI) {
256 Value *Actual = *AI;
257 if (PI != PE) {
258 Argument *Formal = &*PI++;
259 Assert(Formal->getType() == Actual->getType(),
260 "Undefined behavior: Call argument type mismatches "
261 "callee parameter type",
262 &I);
264 // Check that noalias arguments don't alias other arguments. This is
265 // not fully precise because we don't know the sizes of the dereferenced
266 // memory regions.
267 if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy()) {
268 AttributeList PAL = CS.getAttributes();
269 unsigned ArgNo = 0;
270 for (CallSite::arg_iterator BI = CS.arg_begin(); BI != AE;
271 ++BI, ++ArgNo) {
272 // Skip ByVal arguments since they will be memcpy'd to the callee's
273 // stack so we're not really passing the pointer anyway.
274 if (PAL.hasParamAttribute(ArgNo, Attribute::ByVal))
275 continue;
276 // If both arguments are readonly, they have no dependence.
277 if (Formal->onlyReadsMemory() && CS.onlyReadsMemory(ArgNo))
278 continue;
279 if (AI != BI && (*BI)->getType()->isPointerTy()) {
280 AliasResult Result = AA->alias(*AI, *BI);
281 Assert(Result != MustAlias && Result != PartialAlias,
282 "Unusual: noalias argument aliases another argument", &I);
287 // Check that an sret argument points to valid memory.
288 if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) {
289 Type *Ty =
290 cast<PointerType>(Formal->getType())->getElementType();
291 visitMemoryReference(I, Actual, DL->getTypeStoreSize(Ty),
292 DL->getABITypeAlignment(Ty), Ty,
293 MemRef::Read | MemRef::Write);
299 if (CS.isCall()) {
300 const CallInst *CI = cast<CallInst>(CS.getInstruction());
301 if (CI->isTailCall()) {
302 const AttributeList &PAL = CI->getAttributes();
303 unsigned ArgNo = 0;
304 for (Value *Arg : CS.args()) {
305 // Skip ByVal arguments since they will be memcpy'd to the callee's
306 // stack anyway.
307 if (PAL.hasParamAttribute(ArgNo++, Attribute::ByVal))
308 continue;
309 Value *Obj = findValue(Arg, /*OffsetOk=*/true);
310 Assert(!isa<AllocaInst>(Obj),
311 "Undefined behavior: Call with \"tail\" keyword references "
312 "alloca",
313 &I);
319 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I))
320 switch (II->getIntrinsicID()) {
321 default: break;
323 // TODO: Check more intrinsics
325 case Intrinsic::memcpy: {
326 MemCpyInst *MCI = cast<MemCpyInst>(&I);
327 // TODO: If the size is known, use it.
328 visitMemoryReference(I, MCI->getDest(), MemoryLocation::UnknownSize,
329 MCI->getDestAlignment(), nullptr, MemRef::Write);
330 visitMemoryReference(I, MCI->getSource(), MemoryLocation::UnknownSize,
331 MCI->getSourceAlignment(), nullptr, MemRef::Read);
333 // Check that the memcpy arguments don't overlap. The AliasAnalysis API
334 // isn't expressive enough for what we really want to do. Known partial
335 // overlap is not distinguished from the case where nothing is known.
336 auto Size = LocationSize::unknown();
337 if (const ConstantInt *Len =
338 dyn_cast<ConstantInt>(findValue(MCI->getLength(),
339 /*OffsetOk=*/false)))
340 if (Len->getValue().isIntN(32))
341 Size = LocationSize::precise(Len->getValue().getZExtValue());
342 Assert(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) !=
343 MustAlias,
344 "Undefined behavior: memcpy source and destination overlap", &I);
345 break;
347 case Intrinsic::memmove: {
348 MemMoveInst *MMI = cast<MemMoveInst>(&I);
349 // TODO: If the size is known, use it.
350 visitMemoryReference(I, MMI->getDest(), MemoryLocation::UnknownSize,
351 MMI->getDestAlignment(), nullptr, MemRef::Write);
352 visitMemoryReference(I, MMI->getSource(), MemoryLocation::UnknownSize,
353 MMI->getSourceAlignment(), nullptr, MemRef::Read);
354 break;
356 case Intrinsic::memset: {
357 MemSetInst *MSI = cast<MemSetInst>(&I);
358 // TODO: If the size is known, use it.
359 visitMemoryReference(I, MSI->getDest(), MemoryLocation::UnknownSize,
360 MSI->getDestAlignment(), nullptr, MemRef::Write);
361 break;
364 case Intrinsic::vastart:
365 Assert(I.getParent()->getParent()->isVarArg(),
366 "Undefined behavior: va_start called in a non-varargs function",
367 &I);
369 visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
370 nullptr, MemRef::Read | MemRef::Write);
371 break;
372 case Intrinsic::vacopy:
373 visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
374 nullptr, MemRef::Write);
375 visitMemoryReference(I, CS.getArgument(1), MemoryLocation::UnknownSize, 0,
376 nullptr, MemRef::Read);
377 break;
378 case Intrinsic::vaend:
379 visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
380 nullptr, MemRef::Read | MemRef::Write);
381 break;
383 case Intrinsic::stackrestore:
384 // Stackrestore doesn't read or write memory, but it sets the
385 // stack pointer, which the compiler may read from or write to
386 // at any time, so check it for both readability and writeability.
387 visitMemoryReference(I, CS.getArgument(0), MemoryLocation::UnknownSize, 0,
388 nullptr, MemRef::Read | MemRef::Write);
389 break;
393 void Lint::visitCallInst(CallInst &I) {
394 return visitCallSite(&I);
397 void Lint::visitInvokeInst(InvokeInst &I) {
398 return visitCallSite(&I);
401 void Lint::visitReturnInst(ReturnInst &I) {
402 Function *F = I.getParent()->getParent();
403 Assert(!F->doesNotReturn(),
404 "Unusual: Return statement in function with noreturn attribute", &I);
406 if (Value *V = I.getReturnValue()) {
407 Value *Obj = findValue(V, /*OffsetOk=*/true);
408 Assert(!isa<AllocaInst>(Obj), "Unusual: Returning alloca value", &I);
412 // TODO: Check that the reference is in bounds.
413 // TODO: Check readnone/readonly function attributes.
414 void Lint::visitMemoryReference(Instruction &I,
415 Value *Ptr, uint64_t Size, unsigned Align,
416 Type *Ty, unsigned Flags) {
417 // If no memory is being referenced, it doesn't matter if the pointer
418 // is valid.
419 if (Size == 0)
420 return;
422 Value *UnderlyingObject = findValue(Ptr, /*OffsetOk=*/true);
423 Assert(!isa<ConstantPointerNull>(UnderlyingObject),
424 "Undefined behavior: Null pointer dereference", &I);
425 Assert(!isa<UndefValue>(UnderlyingObject),
426 "Undefined behavior: Undef pointer dereference", &I);
427 Assert(!isa<ConstantInt>(UnderlyingObject) ||
428 !cast<ConstantInt>(UnderlyingObject)->isMinusOne(),
429 "Unusual: All-ones pointer dereference", &I);
430 Assert(!isa<ConstantInt>(UnderlyingObject) ||
431 !cast<ConstantInt>(UnderlyingObject)->isOne(),
432 "Unusual: Address one pointer dereference", &I);
434 if (Flags & MemRef::Write) {
435 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(UnderlyingObject))
436 Assert(!GV->isConstant(), "Undefined behavior: Write to read-only memory",
437 &I);
438 Assert(!isa<Function>(UnderlyingObject) &&
439 !isa<BlockAddress>(UnderlyingObject),
440 "Undefined behavior: Write to text section", &I);
442 if (Flags & MemRef::Read) {
443 Assert(!isa<Function>(UnderlyingObject), "Unusual: Load from function body",
444 &I);
445 Assert(!isa<BlockAddress>(UnderlyingObject),
446 "Undefined behavior: Load from block address", &I);
448 if (Flags & MemRef::Callee) {
449 Assert(!isa<BlockAddress>(UnderlyingObject),
450 "Undefined behavior: Call to block address", &I);
452 if (Flags & MemRef::Branchee) {
453 Assert(!isa<Constant>(UnderlyingObject) ||
454 isa<BlockAddress>(UnderlyingObject),
455 "Undefined behavior: Branch to non-blockaddress", &I);
458 // Check for buffer overflows and misalignment.
459 // Only handles memory references that read/write something simple like an
460 // alloca instruction or a global variable.
461 int64_t Offset = 0;
462 if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, *DL)) {
463 // OK, so the access is to a constant offset from Ptr. Check that Ptr is
464 // something we can handle and if so extract the size of this base object
465 // along with its alignment.
466 uint64_t BaseSize = MemoryLocation::UnknownSize;
467 unsigned BaseAlign = 0;
469 if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
470 Type *ATy = AI->getAllocatedType();
471 if (!AI->isArrayAllocation() && ATy->isSized())
472 BaseSize = DL->getTypeAllocSize(ATy);
473 BaseAlign = AI->getAlignment();
474 if (BaseAlign == 0 && ATy->isSized())
475 BaseAlign = DL->getABITypeAlignment(ATy);
476 } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
477 // If the global may be defined differently in another compilation unit
478 // then don't warn about funky memory accesses.
479 if (GV->hasDefinitiveInitializer()) {
480 Type *GTy = GV->getValueType();
481 if (GTy->isSized())
482 BaseSize = DL->getTypeAllocSize(GTy);
483 BaseAlign = GV->getAlignment();
484 if (BaseAlign == 0 && GTy->isSized())
485 BaseAlign = DL->getABITypeAlignment(GTy);
489 // Accesses from before the start or after the end of the object are not
490 // defined.
491 Assert(Size == MemoryLocation::UnknownSize ||
492 BaseSize == MemoryLocation::UnknownSize ||
493 (Offset >= 0 && Offset + Size <= BaseSize),
494 "Undefined behavior: Buffer overflow", &I);
496 // Accesses that say that the memory is more aligned than it is are not
497 // defined.
498 if (Align == 0 && Ty && Ty->isSized())
499 Align = DL->getABITypeAlignment(Ty);
500 Assert(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
501 "Undefined behavior: Memory reference address is misaligned", &I);
505 void Lint::visitLoadInst(LoadInst &I) {
506 visitMemoryReference(I, I.getPointerOperand(),
507 DL->getTypeStoreSize(I.getType()), I.getAlignment(),
508 I.getType(), MemRef::Read);
511 void Lint::visitStoreInst(StoreInst &I) {
512 visitMemoryReference(I, I.getPointerOperand(),
513 DL->getTypeStoreSize(I.getOperand(0)->getType()),
514 I.getAlignment(),
515 I.getOperand(0)->getType(), MemRef::Write);
518 void Lint::visitXor(BinaryOperator &I) {
519 Assert(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)),
520 "Undefined result: xor(undef, undef)", &I);
523 void Lint::visitSub(BinaryOperator &I) {
524 Assert(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)),
525 "Undefined result: sub(undef, undef)", &I);
528 void Lint::visitLShr(BinaryOperator &I) {
529 if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(I.getOperand(1),
530 /*OffsetOk=*/false)))
531 Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
532 "Undefined result: Shift count out of range", &I);
535 void Lint::visitAShr(BinaryOperator &I) {
536 if (ConstantInt *CI =
537 dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
538 Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
539 "Undefined result: Shift count out of range", &I);
542 void Lint::visitShl(BinaryOperator &I) {
543 if (ConstantInt *CI =
544 dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
545 Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
546 "Undefined result: Shift count out of range", &I);
549 static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
550 AssumptionCache *AC) {
551 // Assume undef could be zero.
552 if (isa<UndefValue>(V))
553 return true;
555 VectorType *VecTy = dyn_cast<VectorType>(V->getType());
556 if (!VecTy) {
557 KnownBits Known = computeKnownBits(V, DL, 0, AC, dyn_cast<Instruction>(V), DT);
558 return Known.isZero();
561 // Per-component check doesn't work with zeroinitializer
562 Constant *C = dyn_cast<Constant>(V);
563 if (!C)
564 return false;
566 if (C->isZeroValue())
567 return true;
569 // For a vector, KnownZero will only be true if all values are zero, so check
570 // this per component
571 for (unsigned I = 0, N = VecTy->getNumElements(); I != N; ++I) {
572 Constant *Elem = C->getAggregateElement(I);
573 if (isa<UndefValue>(Elem))
574 return true;
576 KnownBits Known = computeKnownBits(Elem, DL);
577 if (Known.isZero())
578 return true;
581 return false;
584 void Lint::visitSDiv(BinaryOperator &I) {
585 Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
586 "Undefined behavior: Division by zero", &I);
589 void Lint::visitUDiv(BinaryOperator &I) {
590 Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
591 "Undefined behavior: Division by zero", &I);
594 void Lint::visitSRem(BinaryOperator &I) {
595 Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
596 "Undefined behavior: Division by zero", &I);
599 void Lint::visitURem(BinaryOperator &I) {
600 Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
601 "Undefined behavior: Division by zero", &I);
604 void Lint::visitAllocaInst(AllocaInst &I) {
605 if (isa<ConstantInt>(I.getArraySize()))
606 // This isn't undefined behavior, it's just an obvious pessimization.
607 Assert(&I.getParent()->getParent()->getEntryBlock() == I.getParent(),
608 "Pessimization: Static alloca outside of entry block", &I);
610 // TODO: Check for an unusual size (MSB set?)
613 void Lint::visitVAArgInst(VAArgInst &I) {
614 visitMemoryReference(I, I.getOperand(0), MemoryLocation::UnknownSize, 0,
615 nullptr, MemRef::Read | MemRef::Write);
618 void Lint::visitIndirectBrInst(IndirectBrInst &I) {
619 visitMemoryReference(I, I.getAddress(), MemoryLocation::UnknownSize, 0,
620 nullptr, MemRef::Branchee);
622 Assert(I.getNumDestinations() != 0,
623 "Undefined behavior: indirectbr with no destinations", &I);
626 void Lint::visitExtractElementInst(ExtractElementInst &I) {
627 if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(I.getIndexOperand(),
628 /*OffsetOk=*/false)))
629 Assert(CI->getValue().ult(I.getVectorOperandType()->getNumElements()),
630 "Undefined result: extractelement index out of range", &I);
633 void Lint::visitInsertElementInst(InsertElementInst &I) {
634 if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(I.getOperand(2),
635 /*OffsetOk=*/false)))
636 Assert(CI->getValue().ult(I.getType()->getNumElements()),
637 "Undefined result: insertelement index out of range", &I);
640 void Lint::visitUnreachableInst(UnreachableInst &I) {
641 // This isn't undefined behavior, it's merely suspicious.
642 Assert(&I == &I.getParent()->front() ||
643 std::prev(I.getIterator())->mayHaveSideEffects(),
644 "Unusual: unreachable immediately preceded by instruction without "
645 "side effects",
646 &I);
649 /// findValue - Look through bitcasts and simple memory reference patterns
650 /// to identify an equivalent, but more informative, value. If OffsetOk
651 /// is true, look through getelementptrs with non-zero offsets too.
653 /// Most analysis passes don't require this logic, because instcombine
654 /// will simplify most of these kinds of things away. But it's a goal of
655 /// this Lint pass to be useful even on non-optimized IR.
656 Value *Lint::findValue(Value *V, bool OffsetOk) const {
657 SmallPtrSet<Value *, 4> Visited;
658 return findValueImpl(V, OffsetOk, Visited);
661 /// findValueImpl - Implementation helper for findValue.
662 Value *Lint::findValueImpl(Value *V, bool OffsetOk,
663 SmallPtrSetImpl<Value *> &Visited) const {
664 // Detect self-referential values.
665 if (!Visited.insert(V).second)
666 return UndefValue::get(V->getType());
668 // TODO: Look through sext or zext cast, when the result is known to
669 // be interpreted as signed or unsigned, respectively.
670 // TODO: Look through eliminable cast pairs.
671 // TODO: Look through calls with unique return values.
672 // TODO: Look through vector insert/extract/shuffle.
673 V = OffsetOk ? GetUnderlyingObject(V, *DL) : V->stripPointerCasts();
674 if (LoadInst *L = dyn_cast<LoadInst>(V)) {
675 BasicBlock::iterator BBI = L->getIterator();
676 BasicBlock *BB = L->getParent();
677 SmallPtrSet<BasicBlock *, 4> VisitedBlocks;
678 for (;;) {
679 if (!VisitedBlocks.insert(BB).second)
680 break;
681 if (Value *U =
682 FindAvailableLoadedValue(L, BB, BBI, DefMaxInstsToScan, AA))
683 return findValueImpl(U, OffsetOk, Visited);
684 if (BBI != BB->begin()) break;
685 BB = BB->getUniquePredecessor();
686 if (!BB) break;
687 BBI = BB->end();
689 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
690 if (Value *W = PN->hasConstantValue())
691 if (W != V)
692 return findValueImpl(W, OffsetOk, Visited);
693 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
694 if (CI->isNoopCast(*DL))
695 return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
696 } else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
697 if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
698 Ex->getIndices()))
699 if (W != V)
700 return findValueImpl(W, OffsetOk, Visited);
701 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
702 // Same as above, but for ConstantExpr instead of Instruction.
703 if (Instruction::isCast(CE->getOpcode())) {
704 if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
705 CE->getOperand(0)->getType(), CE->getType(),
706 *DL))
707 return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
708 } else if (CE->getOpcode() == Instruction::ExtractValue) {
709 ArrayRef<unsigned> Indices = CE->getIndices();
710 if (Value *W = FindInsertedValue(CE->getOperand(0), Indices))
711 if (W != V)
712 return findValueImpl(W, OffsetOk, Visited);
716 // As a last resort, try SimplifyInstruction or constant folding.
717 if (Instruction *Inst = dyn_cast<Instruction>(V)) {
718 if (Value *W = SimplifyInstruction(Inst, {*DL, TLI, DT, AC}))
719 return findValueImpl(W, OffsetOk, Visited);
720 } else if (auto *C = dyn_cast<Constant>(V)) {
721 if (Value *W = ConstantFoldConstant(C, *DL, TLI))
722 if (W && W != V)
723 return findValueImpl(W, OffsetOk, Visited);
726 return V;
729 //===----------------------------------------------------------------------===//
730 // Implement the public interfaces to this file...
731 //===----------------------------------------------------------------------===//
733 FunctionPass *llvm::createLintPass() {
734 return new Lint();
737 /// lintFunction - Check a function for errors, printing messages on stderr.
739 void llvm::lintFunction(const Function &f) {
740 Function &F = const_cast<Function&>(f);
741 assert(!F.isDeclaration() && "Cannot lint external functions");
743 legacy::FunctionPassManager FPM(F.getParent());
744 Lint *V = new Lint();
745 FPM.add(V);
746 FPM.run(F);
749 /// lintModule - Check a module for errors, printing messages on stderr.
751 void llvm::lintModule(const Module &M) {
752 legacy::PassManager PM;
753 Lint *V = new Lint();
754 PM.add(V);
755 PM.run(const_cast<Module&>(M));