[analyzer] Use the new registration mechanism on the non-path-sensitive-checkers:
[clang.git] / lib / CodeGen / CodeGenFunction.h
blob120eab484d22a878eba9c99624d5480d4cb0c6a1
1 //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is the internal per-function state used for llvm translation.
12 //===----------------------------------------------------------------------===//
14 #ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
15 #define CLANG_CODEGEN_CODEGENFUNCTION_H
17 #include "clang/AST/Type.h"
18 #include "clang/AST/ExprCXX.h"
19 #include "clang/AST/ExprObjC.h"
20 #include "clang/AST/CharUnits.h"
21 #include "clang/Basic/ABI.h"
22 #include "clang/Basic/TargetInfo.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/Support/ValueHandle.h"
26 #include "CodeGenModule.h"
27 #include "CGBuilder.h"
28 #include "CGCall.h"
29 #include "CGValue.h"
31 namespace llvm {
32 class BasicBlock;
33 class LLVMContext;
34 class MDNode;
35 class Module;
36 class SwitchInst;
37 class Twine;
38 class Value;
39 class CallSite;
42 namespace clang {
43 class APValue;
44 class ASTContext;
45 class CXXDestructorDecl;
46 class CXXTryStmt;
47 class Decl;
48 class LabelDecl;
49 class EnumConstantDecl;
50 class FunctionDecl;
51 class FunctionProtoType;
52 class LabelStmt;
53 class ObjCContainerDecl;
54 class ObjCInterfaceDecl;
55 class ObjCIvarDecl;
56 class ObjCMethodDecl;
57 class ObjCImplementationDecl;
58 class ObjCPropertyImplDecl;
59 class TargetInfo;
60 class TargetCodeGenInfo;
61 class VarDecl;
62 class ObjCForCollectionStmt;
63 class ObjCAtTryStmt;
64 class ObjCAtThrowStmt;
65 class ObjCAtSynchronizedStmt;
67 namespace CodeGen {
68 class CodeGenTypes;
69 class CGDebugInfo;
70 class CGFunctionInfo;
71 class CGRecordLayout;
72 class CGBlockInfo;
73 class CGCXXABI;
74 class BlockFlags;
75 class BlockFieldFlags;
77 /// A branch fixup. These are required when emitting a goto to a
78 /// label which hasn't been emitted yet. The goto is optimistically
79 /// emitted as a branch to the basic block for the label, and (if it
80 /// occurs in a scope with non-trivial cleanups) a fixup is added to
81 /// the innermost cleanup. When a (normal) cleanup is popped, any
82 /// unresolved fixups in that scope are threaded through the cleanup.
83 struct BranchFixup {
84 /// The block containing the terminator which needs to be modified
85 /// into a switch if this fixup is resolved into the current scope.
86 /// If null, LatestBranch points directly to the destination.
87 llvm::BasicBlock *OptimisticBranchBlock;
89 /// The ultimate destination of the branch.
90 ///
91 /// This can be set to null to indicate that this fixup was
92 /// successfully resolved.
93 llvm::BasicBlock *Destination;
95 /// The destination index value.
96 unsigned DestinationIndex;
98 /// The initial branch of the fixup.
99 llvm::BranchInst *InitialBranch;
102 template <class T> struct InvariantValue {
103 typedef T type;
104 typedef T saved_type;
105 static bool needsSaving(type value) { return false; }
106 static saved_type save(CodeGenFunction &CGF, type value) { return value; }
107 static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
110 /// A metaprogramming class for ensuring that a value will dominate an
111 /// arbitrary position in a function.
112 template <class T> struct DominatingValue : InvariantValue<T> {};
114 template <class T, bool mightBeInstruction =
115 llvm::is_base_of<llvm::Value, T>::value &&
116 !llvm::is_base_of<llvm::Constant, T>::value &&
117 !llvm::is_base_of<llvm::BasicBlock, T>::value>
118 struct DominatingPointer;
119 template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
120 // template <class T> struct DominatingPointer<T,true> at end of file
122 template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
124 enum CleanupKind {
125 EHCleanup = 0x1,
126 NormalCleanup = 0x2,
127 NormalAndEHCleanup = EHCleanup | NormalCleanup,
129 InactiveCleanup = 0x4,
130 InactiveEHCleanup = EHCleanup | InactiveCleanup,
131 InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
132 InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
135 /// A stack of scopes which respond to exceptions, including cleanups
136 /// and catch blocks.
137 class EHScopeStack {
138 public:
139 /// A saved depth on the scope stack. This is necessary because
140 /// pushing scopes onto the stack invalidates iterators.
141 class stable_iterator {
142 friend class EHScopeStack;
144 /// Offset from StartOfData to EndOfBuffer.
145 ptrdiff_t Size;
147 stable_iterator(ptrdiff_t Size) : Size(Size) {}
149 public:
150 static stable_iterator invalid() { return stable_iterator(-1); }
151 stable_iterator() : Size(-1) {}
153 bool isValid() const { return Size >= 0; }
155 /// Returns true if this scope encloses I.
156 /// Returns false if I is invalid.
157 /// This scope must be valid.
158 bool encloses(stable_iterator I) const { return Size <= I.Size; }
160 /// Returns true if this scope strictly encloses I: that is,
161 /// if it encloses I and is not I.
162 /// Returns false is I is invalid.
163 /// This scope must be valid.
164 bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
166 friend bool operator==(stable_iterator A, stable_iterator B) {
167 return A.Size == B.Size;
169 friend bool operator!=(stable_iterator A, stable_iterator B) {
170 return A.Size != B.Size;
174 /// Information for lazily generating a cleanup. Subclasses must be
175 /// POD-like: cleanups will not be destructed, and they will be
176 /// allocated on the cleanup stack and freely copied and moved
177 /// around.
179 /// Cleanup implementations should generally be declared in an
180 /// anonymous namespace.
181 class Cleanup {
182 public:
183 // Anchor the construction vtable. We use the destructor because
184 // gcc gives an obnoxious warning if there are virtual methods
185 // with an accessible non-virtual destructor. Unfortunately,
186 // declaring this destructor makes it non-trivial, but there
187 // doesn't seem to be any other way around this warning.
189 // This destructor will never be called.
190 virtual ~Cleanup();
192 /// Emit the cleanup. For normal cleanups, this is run in the
193 /// same EH context as when the cleanup was pushed, i.e. the
194 /// immediately-enclosing context of the cleanup scope. For
195 /// EH cleanups, this is run in a terminate context.
197 // \param IsForEHCleanup true if this is for an EH cleanup, false
198 /// if for a normal cleanup.
199 virtual void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) = 0;
202 /// UnconditionalCleanupN stores its N parameters and just passes
203 /// them to the real cleanup function.
204 template <class T, class A0>
205 class UnconditionalCleanup1 : public Cleanup {
206 A0 a0;
207 public:
208 UnconditionalCleanup1(A0 a0) : a0(a0) {}
209 void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
210 T::Emit(CGF, IsForEHCleanup, a0);
214 template <class T, class A0, class A1>
215 class UnconditionalCleanup2 : public Cleanup {
216 A0 a0; A1 a1;
217 public:
218 UnconditionalCleanup2(A0 a0, A1 a1) : a0(a0), a1(a1) {}
219 void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
220 T::Emit(CGF, IsForEHCleanup, a0, a1);
224 /// ConditionalCleanupN stores the saved form of its N parameters,
225 /// then restores them and performs the cleanup.
226 template <class T, class A0>
227 class ConditionalCleanup1 : public Cleanup {
228 typedef typename DominatingValue<A0>::saved_type A0_saved;
229 A0_saved a0_saved;
231 void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
232 A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
233 T::Emit(CGF, IsForEHCleanup, a0);
236 public:
237 ConditionalCleanup1(A0_saved a0)
238 : a0_saved(a0) {}
241 template <class T, class A0, class A1>
242 class ConditionalCleanup2 : public Cleanup {
243 typedef typename DominatingValue<A0>::saved_type A0_saved;
244 typedef typename DominatingValue<A1>::saved_type A1_saved;
245 A0_saved a0_saved;
246 A1_saved a1_saved;
248 void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) {
249 A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
250 A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
251 T::Emit(CGF, IsForEHCleanup, a0, a1);
254 public:
255 ConditionalCleanup2(A0_saved a0, A1_saved a1)
256 : a0_saved(a0), a1_saved(a1) {}
259 private:
260 // The implementation for this class is in CGException.h and
261 // CGException.cpp; the definition is here because it's used as a
262 // member of CodeGenFunction.
264 /// The start of the scope-stack buffer, i.e. the allocated pointer
265 /// for the buffer. All of these pointers are either simultaneously
266 /// null or simultaneously valid.
267 char *StartOfBuffer;
269 /// The end of the buffer.
270 char *EndOfBuffer;
272 /// The first valid entry in the buffer.
273 char *StartOfData;
275 /// The innermost normal cleanup on the stack.
276 stable_iterator InnermostNormalCleanup;
278 /// The innermost EH cleanup on the stack.
279 stable_iterator InnermostEHCleanup;
281 /// The number of catches on the stack.
282 unsigned CatchDepth;
284 /// The current EH destination index. Reset to FirstCatchIndex
285 /// whenever the last EH cleanup is popped.
286 unsigned NextEHDestIndex;
287 enum { FirstEHDestIndex = 1 };
289 /// The current set of branch fixups. A branch fixup is a jump to
290 /// an as-yet unemitted label, i.e. a label for which we don't yet
291 /// know the EH stack depth. Whenever we pop a cleanup, we have
292 /// to thread all the current branch fixups through it.
294 /// Fixups are recorded as the Use of the respective branch or
295 /// switch statement. The use points to the final destination.
296 /// When popping out of a cleanup, these uses are threaded through
297 /// the cleanup and adjusted to point to the new cleanup.
299 /// Note that branches are allowed to jump into protected scopes
300 /// in certain situations; e.g. the following code is legal:
301 /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
302 /// goto foo;
303 /// A a;
304 /// foo:
305 /// bar();
306 llvm::SmallVector<BranchFixup, 8> BranchFixups;
308 char *allocate(size_t Size);
310 void *pushCleanup(CleanupKind K, size_t DataSize);
312 public:
313 EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
314 InnermostNormalCleanup(stable_end()),
315 InnermostEHCleanup(stable_end()),
316 CatchDepth(0), NextEHDestIndex(FirstEHDestIndex) {}
317 ~EHScopeStack() { delete[] StartOfBuffer; }
319 // Variadic templates would make this not terrible.
321 /// Push a lazily-created cleanup on the stack.
322 template <class T>
323 void pushCleanup(CleanupKind Kind) {
324 void *Buffer = pushCleanup(Kind, sizeof(T));
325 Cleanup *Obj = new(Buffer) T();
326 (void) Obj;
329 /// Push a lazily-created cleanup on the stack.
330 template <class T, class A0>
331 void pushCleanup(CleanupKind Kind, A0 a0) {
332 void *Buffer = pushCleanup(Kind, sizeof(T));
333 Cleanup *Obj = new(Buffer) T(a0);
334 (void) Obj;
337 /// Push a lazily-created cleanup on the stack.
338 template <class T, class A0, class A1>
339 void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) {
340 void *Buffer = pushCleanup(Kind, sizeof(T));
341 Cleanup *Obj = new(Buffer) T(a0, a1);
342 (void) Obj;
345 /// Push a lazily-created cleanup on the stack.
346 template <class T, class A0, class A1, class A2>
347 void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
348 void *Buffer = pushCleanup(Kind, sizeof(T));
349 Cleanup *Obj = new(Buffer) T(a0, a1, a2);
350 (void) Obj;
353 /// Push a lazily-created cleanup on the stack.
354 template <class T, class A0, class A1, class A2, class A3>
355 void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
356 void *Buffer = pushCleanup(Kind, sizeof(T));
357 Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
358 (void) Obj;
361 /// Push a lazily-created cleanup on the stack.
362 template <class T, class A0, class A1, class A2, class A3, class A4>
363 void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) {
364 void *Buffer = pushCleanup(Kind, sizeof(T));
365 Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4);
366 (void) Obj;
369 // Feel free to add more variants of the following:
371 /// Push a cleanup with non-constant storage requirements on the
372 /// stack. The cleanup type must provide an additional static method:
373 /// static size_t getExtraSize(size_t);
374 /// The argument to this method will be the value N, which will also
375 /// be passed as the first argument to the constructor.
377 /// The data stored in the extra storage must obey the same
378 /// restrictions as normal cleanup member data.
380 /// The pointer returned from this method is valid until the cleanup
381 /// stack is modified.
382 template <class T, class A0, class A1, class A2>
383 T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
384 void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
385 return new (Buffer) T(N, a0, a1, a2);
388 /// Pops a cleanup scope off the stack. This should only be called
389 /// by CodeGenFunction::PopCleanupBlock.
390 void popCleanup();
392 /// Push a set of catch handlers on the stack. The catch is
393 /// uninitialized and will need to have the given number of handlers
394 /// set on it.
395 class EHCatchScope *pushCatch(unsigned NumHandlers);
397 /// Pops a catch scope off the stack.
398 void popCatch();
400 /// Push an exceptions filter on the stack.
401 class EHFilterScope *pushFilter(unsigned NumFilters);
403 /// Pops an exceptions filter off the stack.
404 void popFilter();
406 /// Push a terminate handler on the stack.
407 void pushTerminate();
409 /// Pops a terminate handler off the stack.
410 void popTerminate();
412 /// Determines whether the exception-scopes stack is empty.
413 bool empty() const { return StartOfData == EndOfBuffer; }
415 bool requiresLandingPad() const {
416 return (CatchDepth || hasEHCleanups());
419 /// Determines whether there are any normal cleanups on the stack.
420 bool hasNormalCleanups() const {
421 return InnermostNormalCleanup != stable_end();
424 /// Returns the innermost normal cleanup on the stack, or
425 /// stable_end() if there are no normal cleanups.
426 stable_iterator getInnermostNormalCleanup() const {
427 return InnermostNormalCleanup;
429 stable_iterator getInnermostActiveNormalCleanup() const; // CGException.h
431 /// Determines whether there are any EH cleanups on the stack.
432 bool hasEHCleanups() const {
433 return InnermostEHCleanup != stable_end();
436 /// Returns the innermost EH cleanup on the stack, or stable_end()
437 /// if there are no EH cleanups.
438 stable_iterator getInnermostEHCleanup() const {
439 return InnermostEHCleanup;
441 stable_iterator getInnermostActiveEHCleanup() const; // CGException.h
443 /// An unstable reference to a scope-stack depth. Invalidated by
444 /// pushes but not pops.
445 class iterator;
447 /// Returns an iterator pointing to the innermost EH scope.
448 iterator begin() const;
450 /// Returns an iterator pointing to the outermost EH scope.
451 iterator end() const;
453 /// Create a stable reference to the top of the EH stack. The
454 /// returned reference is valid until that scope is popped off the
455 /// stack.
456 stable_iterator stable_begin() const {
457 return stable_iterator(EndOfBuffer - StartOfData);
460 /// Create a stable reference to the bottom of the EH stack.
461 static stable_iterator stable_end() {
462 return stable_iterator(0);
465 /// Translates an iterator into a stable_iterator.
466 stable_iterator stabilize(iterator it) const;
468 /// Finds the nearest cleanup enclosing the given iterator.
469 /// Returns stable_iterator::invalid() if there are no such cleanups.
470 stable_iterator getEnclosingEHCleanup(iterator it) const;
472 /// Turn a stable reference to a scope depth into a unstable pointer
473 /// to the EH stack.
474 iterator find(stable_iterator save) const;
476 /// Removes the cleanup pointed to by the given stable_iterator.
477 void removeCleanup(stable_iterator save);
479 /// Add a branch fixup to the current cleanup scope.
480 BranchFixup &addBranchFixup() {
481 assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
482 BranchFixups.push_back(BranchFixup());
483 return BranchFixups.back();
486 unsigned getNumBranchFixups() const { return BranchFixups.size(); }
487 BranchFixup &getBranchFixup(unsigned I) {
488 assert(I < getNumBranchFixups());
489 return BranchFixups[I];
492 /// Pops lazily-removed fixups from the end of the list. This
493 /// should only be called by procedures which have just popped a
494 /// cleanup or resolved one or more fixups.
495 void popNullFixups();
497 /// Clears the branch-fixups list. This should only be called by
498 /// ResolveAllBranchFixups.
499 void clearFixups() { BranchFixups.clear(); }
501 /// Gets the next EH destination index.
502 unsigned getNextEHDestIndex() { return NextEHDestIndex++; }
505 /// CodeGenFunction - This class organizes the per-function state that is used
506 /// while generating LLVM code.
507 class CodeGenFunction : public CodeGenTypeCache {
508 CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
509 void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
511 friend class CGCXXABI;
512 public:
513 /// A jump destination is an abstract label, branching to which may
514 /// require a jump out through normal cleanups.
515 struct JumpDest {
516 JumpDest() : Block(0), ScopeDepth(), Index(0) {}
517 JumpDest(llvm::BasicBlock *Block,
518 EHScopeStack::stable_iterator Depth,
519 unsigned Index)
520 : Block(Block), ScopeDepth(Depth), Index(Index) {}
522 bool isValid() const { return Block != 0; }
523 llvm::BasicBlock *getBlock() const { return Block; }
524 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
525 unsigned getDestIndex() const { return Index; }
527 private:
528 llvm::BasicBlock *Block;
529 EHScopeStack::stable_iterator ScopeDepth;
530 unsigned Index;
533 /// An unwind destination is an abstract label, branching to which
534 /// may require a jump out through EH cleanups.
535 struct UnwindDest {
536 UnwindDest() : Block(0), ScopeDepth(), Index(0) {}
537 UnwindDest(llvm::BasicBlock *Block,
538 EHScopeStack::stable_iterator Depth,
539 unsigned Index)
540 : Block(Block), ScopeDepth(Depth), Index(Index) {}
542 bool isValid() const { return Block != 0; }
543 llvm::BasicBlock *getBlock() const { return Block; }
544 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
545 unsigned getDestIndex() const { return Index; }
547 private:
548 llvm::BasicBlock *Block;
549 EHScopeStack::stable_iterator ScopeDepth;
550 unsigned Index;
553 CodeGenModule &CGM; // Per-module state.
554 const TargetInfo &Target;
556 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
557 CGBuilderTy Builder;
559 /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
560 /// This excludes BlockDecls.
561 const Decl *CurFuncDecl;
562 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
563 const Decl *CurCodeDecl;
564 const CGFunctionInfo *CurFnInfo;
565 QualType FnRetTy;
566 llvm::Function *CurFn;
568 /// CurGD - The GlobalDecl for the current function being compiled.
569 GlobalDecl CurGD;
571 /// ReturnBlock - Unified return block.
572 JumpDest ReturnBlock;
574 /// ReturnValue - The temporary alloca to hold the return value. This is null
575 /// iff the function has no return value.
576 llvm::Value *ReturnValue;
578 /// RethrowBlock - Unified rethrow block.
579 UnwindDest RethrowBlock;
581 /// AllocaInsertPoint - This is an instruction in the entry block before which
582 /// we prefer to insert allocas.
583 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
585 bool Exceptions;
586 bool CatchUndefined;
588 const CodeGen::CGBlockInfo *BlockInfo;
589 llvm::Value *BlockPointer;
591 /// \brief A mapping from NRVO variables to the flags used to indicate
592 /// when the NRVO has been applied to this variable.
593 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
595 EHScopeStack EHStack;
597 /// i32s containing the indexes of the cleanup destinations.
598 llvm::AllocaInst *NormalCleanupDest;
599 llvm::AllocaInst *EHCleanupDest;
601 unsigned NextCleanupDestIndex;
603 /// The exception slot. All landing pads write the current
604 /// exception pointer into this alloca.
605 llvm::Value *ExceptionSlot;
607 /// Emits a landing pad for the current EH stack.
608 llvm::BasicBlock *EmitLandingPad();
610 llvm::BasicBlock *getInvokeDestImpl();
612 /// Set up the last cleaup that was pushed as a conditional
613 /// full-expression cleanup.
614 void initFullExprCleanup();
616 template <class T>
617 typename DominatingValue<T>::saved_type saveValueInCond(T value) {
618 return DominatingValue<T>::save(*this, value);
621 public:
622 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
623 /// rethrows.
624 llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
626 // A struct holding information about a finally block's IR
627 // generation. For now, doesn't actually hold anything.
628 struct FinallyInfo {
631 FinallyInfo EnterFinallyBlock(const Stmt *Stmt,
632 llvm::Constant *BeginCatchFn,
633 llvm::Constant *EndCatchFn,
634 llvm::Constant *RethrowFn);
635 void ExitFinallyBlock(FinallyInfo &FinallyInfo);
637 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
638 /// current full-expression. Safe against the possibility that
639 /// we're currently inside a conditionally-evaluated expression.
640 template <class T, class A0>
641 void pushFullExprCleanup(CleanupKind kind, A0 a0) {
642 // If we're not in a conditional branch, or if none of the
643 // arguments requires saving, then use the unconditional cleanup.
644 if (!isInConditionalBranch()) {
645 typedef EHScopeStack::UnconditionalCleanup1<T, A0> CleanupType;
646 return EHStack.pushCleanup<CleanupType>(kind, a0);
649 typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
651 typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
652 EHStack.pushCleanup<CleanupType>(kind, a0_saved);
653 initFullExprCleanup();
656 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
657 /// current full-expression. Safe against the possibility that
658 /// we're currently inside a conditionally-evaluated expression.
659 template <class T, class A0, class A1>
660 void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
661 // If we're not in a conditional branch, or if none of the
662 // arguments requires saving, then use the unconditional cleanup.
663 if (!isInConditionalBranch()) {
664 typedef EHScopeStack::UnconditionalCleanup2<T, A0, A1> CleanupType;
665 return EHStack.pushCleanup<CleanupType>(kind, a0, a1);
668 typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
669 typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
671 typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
672 EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
673 initFullExprCleanup();
676 /// PushDestructorCleanup - Push a cleanup to call the
677 /// complete-object destructor of an object of the given type at the
678 /// given address. Does nothing if T is not a C++ class type with a
679 /// non-trivial destructor.
680 void PushDestructorCleanup(QualType T, llvm::Value *Addr);
682 /// PushDestructorCleanup - Push a cleanup to call the
683 /// complete-object variant of the given destructor on the object at
684 /// the given address.
685 void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
686 llvm::Value *Addr);
688 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
689 /// process all branch fixups.
690 void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
692 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
693 /// The block cannot be reactivated. Pops it if it's the top of the
694 /// stack.
695 void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
697 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
698 /// Cannot be used to resurrect a deactivated cleanup.
699 void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup);
701 /// \brief Enters a new scope for capturing cleanups, all of which
702 /// will be executed once the scope is exited.
703 class RunCleanupsScope {
704 CodeGenFunction& CGF;
705 EHScopeStack::stable_iterator CleanupStackDepth;
706 bool OldDidCallStackSave;
707 bool PerformCleanup;
709 RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
710 RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
712 public:
713 /// \brief Enter a new cleanup scope.
714 explicit RunCleanupsScope(CodeGenFunction &CGF)
715 : CGF(CGF), PerformCleanup(true)
717 CleanupStackDepth = CGF.EHStack.stable_begin();
718 OldDidCallStackSave = CGF.DidCallStackSave;
719 CGF.DidCallStackSave = false;
722 /// \brief Exit this cleanup scope, emitting any accumulated
723 /// cleanups.
724 ~RunCleanupsScope() {
725 if (PerformCleanup) {
726 CGF.DidCallStackSave = OldDidCallStackSave;
727 CGF.PopCleanupBlocks(CleanupStackDepth);
731 /// \brief Determine whether this scope requires any cleanups.
732 bool requiresCleanups() const {
733 return CGF.EHStack.stable_begin() != CleanupStackDepth;
736 /// \brief Force the emission of cleanups now, instead of waiting
737 /// until this object is destroyed.
738 void ForceCleanup() {
739 assert(PerformCleanup && "Already forced cleanup");
740 CGF.DidCallStackSave = OldDidCallStackSave;
741 CGF.PopCleanupBlocks(CleanupStackDepth);
742 PerformCleanup = false;
747 /// PopCleanupBlocks - Takes the old cleanup stack size and emits
748 /// the cleanup blocks that have been added.
749 void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
751 void ResolveBranchFixups(llvm::BasicBlock *Target);
753 /// The given basic block lies in the current EH scope, but may be a
754 /// target of a potentially scope-crossing jump; get a stable handle
755 /// to which we can perform this jump later.
756 JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
757 return JumpDest(Target,
758 EHStack.getInnermostNormalCleanup(),
759 NextCleanupDestIndex++);
762 /// The given basic block lies in the current EH scope, but may be a
763 /// target of a potentially scope-crossing jump; get a stable handle
764 /// to which we can perform this jump later.
765 JumpDest getJumpDestInCurrentScope(llvm::StringRef Name = llvm::StringRef()) {
766 return getJumpDestInCurrentScope(createBasicBlock(Name));
769 /// EmitBranchThroughCleanup - Emit a branch from the current insert
770 /// block through the normal cleanup handling code (if any) and then
771 /// on to \arg Dest.
772 void EmitBranchThroughCleanup(JumpDest Dest);
774 /// EmitBranchThroughEHCleanup - Emit a branch from the current
775 /// insert block through the EH cleanup handling code (if any) and
776 /// then on to \arg Dest.
777 void EmitBranchThroughEHCleanup(UnwindDest Dest);
779 /// getRethrowDest - Returns the unified outermost-scope rethrow
780 /// destination.
781 UnwindDest getRethrowDest();
783 /// An object to manage conditionally-evaluated expressions.
784 class ConditionalEvaluation {
785 llvm::BasicBlock *StartBB;
787 public:
788 ConditionalEvaluation(CodeGenFunction &CGF)
789 : StartBB(CGF.Builder.GetInsertBlock()) {}
791 void begin(CodeGenFunction &CGF) {
792 assert(CGF.OutermostConditional != this);
793 if (!CGF.OutermostConditional)
794 CGF.OutermostConditional = this;
797 void end(CodeGenFunction &CGF) {
798 assert(CGF.OutermostConditional != 0);
799 if (CGF.OutermostConditional == this)
800 CGF.OutermostConditional = 0;
803 /// Returns a block which will be executed prior to each
804 /// evaluation of the conditional code.
805 llvm::BasicBlock *getStartingBlock() const {
806 return StartBB;
810 /// isInConditionalBranch - Return true if we're currently emitting
811 /// one branch or the other of a conditional expression.
812 bool isInConditionalBranch() const { return OutermostConditional != 0; }
814 /// An RAII object to record that we're evaluating a statement
815 /// expression.
816 class StmtExprEvaluation {
817 CodeGenFunction &CGF;
819 /// We have to save the outermost conditional: cleanups in a
820 /// statement expression aren't conditional just because the
821 /// StmtExpr is.
822 ConditionalEvaluation *SavedOutermostConditional;
824 public:
825 StmtExprEvaluation(CodeGenFunction &CGF)
826 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
827 CGF.OutermostConditional = 0;
830 ~StmtExprEvaluation() {
831 CGF.OutermostConditional = SavedOutermostConditional;
832 CGF.EnsureInsertPoint();
836 /// An object which temporarily prevents a value from being
837 /// destroyed by aggressive peephole optimizations that assume that
838 /// all uses of a value have been realized in the IR.
839 class PeepholeProtection {
840 llvm::Instruction *Inst;
841 friend class CodeGenFunction;
843 public:
844 PeepholeProtection() : Inst(0) {}
847 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
848 class OpaqueValueMapping {
849 CodeGenFunction &CGF;
850 const OpaqueValueExpr *OpaqueValue;
851 bool BoundLValue;
852 CodeGenFunction::PeepholeProtection Protection;
854 public:
855 static bool shouldBindAsLValue(const Expr *expr) {
856 return expr->isGLValue() || expr->getType()->isRecordType();
859 /// Build the opaque value mapping for the given conditional
860 /// operator if it's the GNU ?: extension. This is a common
861 /// enough pattern that the convenience operator is really
862 /// helpful.
864 OpaqueValueMapping(CodeGenFunction &CGF,
865 const AbstractConditionalOperator *op) : CGF(CGF) {
866 if (isa<ConditionalOperator>(op)) {
867 OpaqueValue = 0;
868 BoundLValue = false;
869 return;
872 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
873 init(e->getOpaqueValue(), e->getCommon());
876 OpaqueValueMapping(CodeGenFunction &CGF,
877 const OpaqueValueExpr *opaqueValue,
878 LValue lvalue)
879 : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(true) {
880 assert(opaqueValue && "no opaque value expression!");
881 assert(shouldBindAsLValue(opaqueValue));
882 initLValue(lvalue);
885 OpaqueValueMapping(CodeGenFunction &CGF,
886 const OpaqueValueExpr *opaqueValue,
887 RValue rvalue)
888 : CGF(CGF), OpaqueValue(opaqueValue), BoundLValue(false) {
889 assert(opaqueValue && "no opaque value expression!");
890 assert(!shouldBindAsLValue(opaqueValue));
891 initRValue(rvalue);
894 void pop() {
895 assert(OpaqueValue && "mapping already popped!");
896 popImpl();
897 OpaqueValue = 0;
900 ~OpaqueValueMapping() {
901 if (OpaqueValue) popImpl();
904 private:
905 void popImpl() {
906 if (BoundLValue)
907 CGF.OpaqueLValues.erase(OpaqueValue);
908 else {
909 CGF.OpaqueRValues.erase(OpaqueValue);
910 CGF.unprotectFromPeepholes(Protection);
914 void init(const OpaqueValueExpr *ov, const Expr *e) {
915 OpaqueValue = ov;
916 BoundLValue = shouldBindAsLValue(ov);
917 assert(BoundLValue == shouldBindAsLValue(e)
918 && "inconsistent expression value kinds!");
919 if (BoundLValue)
920 initLValue(CGF.EmitLValue(e));
921 else
922 initRValue(CGF.EmitAnyExpr(e));
925 void initLValue(const LValue &lv) {
926 CGF.OpaqueLValues.insert(std::make_pair(OpaqueValue, lv));
929 void initRValue(const RValue &rv) {
930 // Work around an extremely aggressive peephole optimization in
931 // EmitScalarConversion which assumes that all other uses of a
932 // value are extant.
933 Protection = CGF.protectFromPeepholes(rv);
934 CGF.OpaqueRValues.insert(std::make_pair(OpaqueValue, rv));
938 /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
939 /// number that holds the value.
940 unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
942 /// BuildBlockByrefAddress - Computes address location of the
943 /// variable which is declared as __block.
944 llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
945 const VarDecl *V);
946 private:
947 CGDebugInfo *DebugInfo;
949 /// IndirectBranch - The first time an indirect goto is seen we create a block
950 /// with an indirect branch. Every time we see the address of a label taken,
951 /// we add the label to the indirect goto. Every subsequent indirect goto is
952 /// codegen'd as a jump to the IndirectBranch's basic block.
953 llvm::IndirectBrInst *IndirectBranch;
955 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
956 /// decls.
957 typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
958 DeclMapTy LocalDeclMap;
960 /// LabelMap - This keeps track of the LLVM basic block for each C label.
961 llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
963 // BreakContinueStack - This keeps track of where break and continue
964 // statements should jump to.
965 struct BreakContinue {
966 BreakContinue(JumpDest Break, JumpDest Continue)
967 : BreakBlock(Break), ContinueBlock(Continue) {}
969 JumpDest BreakBlock;
970 JumpDest ContinueBlock;
972 llvm::SmallVector<BreakContinue, 8> BreakContinueStack;
974 /// SwitchInsn - This is nearest current switch instruction. It is null if if
975 /// current context is not in a switch.
976 llvm::SwitchInst *SwitchInsn;
978 /// CaseRangeBlock - This block holds if condition check for last case
979 /// statement range in current switch instruction.
980 llvm::BasicBlock *CaseRangeBlock;
982 /// OpaqueLValues - Keeps track of the current set of opaque value
983 /// expressions.
984 llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
985 llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
987 // VLASizeMap - This keeps track of the associated size for each VLA type.
988 // We track this by the size expression rather than the type itself because
989 // in certain situations, like a const qualifier applied to an VLA typedef,
990 // multiple VLA types can share the same size expression.
991 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
992 // enter/leave scopes.
993 llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
995 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
996 /// calling llvm.stacksave for multiple VLAs in the same scope.
997 bool DidCallStackSave;
999 /// A block containing a single 'unreachable' instruction. Created
1000 /// lazily by getUnreachableBlock().
1001 llvm::BasicBlock *UnreachableBlock;
1003 /// CXXThisDecl - When generating code for a C++ member function,
1004 /// this will hold the implicit 'this' declaration.
1005 ImplicitParamDecl *CXXThisDecl;
1006 llvm::Value *CXXThisValue;
1008 /// CXXVTTDecl - When generating code for a base object constructor or
1009 /// base object destructor with virtual bases, this will hold the implicit
1010 /// VTT parameter.
1011 ImplicitParamDecl *CXXVTTDecl;
1012 llvm::Value *CXXVTTValue;
1014 /// OutermostConditional - Points to the outermost active
1015 /// conditional control. This is used so that we know if a
1016 /// temporary should be destroyed conditionally.
1017 ConditionalEvaluation *OutermostConditional;
1020 /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
1021 /// type as well as the field number that contains the actual data.
1022 llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *,
1023 unsigned> > ByRefValueInfo;
1025 llvm::BasicBlock *TerminateLandingPad;
1026 llvm::BasicBlock *TerminateHandler;
1027 llvm::BasicBlock *TrapBB;
1029 public:
1030 CodeGenFunction(CodeGenModule &cgm);
1032 CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1033 ASTContext &getContext() const;
1034 CGDebugInfo *getDebugInfo() { return DebugInfo; }
1036 const LangOptions &getLangOptions() const { return CGM.getLangOptions(); }
1038 /// Returns a pointer to the function's exception object slot, which
1039 /// is assigned in every landing pad.
1040 llvm::Value *getExceptionSlot();
1042 llvm::Value *getNormalCleanupDestSlot();
1043 llvm::Value *getEHCleanupDestSlot();
1045 llvm::BasicBlock *getUnreachableBlock() {
1046 if (!UnreachableBlock) {
1047 UnreachableBlock = createBasicBlock("unreachable");
1048 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1050 return UnreachableBlock;
1053 llvm::BasicBlock *getInvokeDest() {
1054 if (!EHStack.requiresLandingPad()) return 0;
1055 return getInvokeDestImpl();
1058 llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1060 //===--------------------------------------------------------------------===//
1061 // Objective-C
1062 //===--------------------------------------------------------------------===//
1064 void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1066 void StartObjCMethod(const ObjCMethodDecl *MD,
1067 const ObjCContainerDecl *CD);
1069 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1070 void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1071 const ObjCPropertyImplDecl *PID);
1072 void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1073 ObjCMethodDecl *MD, bool ctor);
1075 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1076 /// for the given property.
1077 void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1078 const ObjCPropertyImplDecl *PID);
1079 bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
1080 bool IvarTypeWithAggrGCObjects(QualType Ty);
1082 //===--------------------------------------------------------------------===//
1083 // Block Bits
1084 //===--------------------------------------------------------------------===//
1086 llvm::Value *EmitBlockLiteral(const BlockExpr *);
1087 llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
1088 const CGBlockInfo &Info,
1089 const llvm::StructType *,
1090 llvm::Constant *BlockVarLayout);
1092 llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1093 const CGBlockInfo &Info,
1094 const Decl *OuterFuncDecl,
1095 const DeclMapTy &ldm);
1097 llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1098 llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1100 llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *,
1101 BlockFieldFlags flags,
1102 const VarDecl *BD);
1103 llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
1104 BlockFieldFlags flags,
1105 const VarDecl *BD);
1107 void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
1109 llvm::Value *LoadBlockStruct() {
1110 assert(BlockPointer && "no block pointer set!");
1111 return BlockPointer;
1114 void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
1115 void AllocateBlockDecl(const BlockDeclRefExpr *E);
1116 llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
1117 return GetAddrOfBlockDecl(E->getDecl(), E->isByRef());
1119 llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
1120 const llvm::Type *BuildByRefType(const VarDecl *var);
1122 void GenerateCode(GlobalDecl GD, llvm::Function *Fn);
1123 void StartFunction(GlobalDecl GD, QualType RetTy,
1124 llvm::Function *Fn,
1125 const FunctionArgList &Args,
1126 SourceLocation StartLoc);
1128 void EmitConstructorBody(FunctionArgList &Args);
1129 void EmitDestructorBody(FunctionArgList &Args);
1130 void EmitFunctionBody(FunctionArgList &Args);
1132 /// EmitReturnBlock - Emit the unified return block, trying to avoid its
1133 /// emission when possible.
1134 void EmitReturnBlock();
1136 /// FinishFunction - Complete IR generation of the current function. It is
1137 /// legal to call this function even if there is no current insertion point.
1138 void FinishFunction(SourceLocation EndLoc=SourceLocation());
1140 /// GenerateThunk - Generate a thunk for the given method.
1141 void GenerateThunk(llvm::Function *Fn, GlobalDecl GD, const ThunkInfo &Thunk);
1143 void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1144 FunctionArgList &Args);
1146 /// InitializeVTablePointer - Initialize the vtable pointer of the given
1147 /// subobject.
1149 void InitializeVTablePointer(BaseSubobject Base,
1150 const CXXRecordDecl *NearestVBase,
1151 uint64_t OffsetFromNearestVBase,
1152 llvm::Constant *VTable,
1153 const CXXRecordDecl *VTableClass);
1155 typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1156 void InitializeVTablePointers(BaseSubobject Base,
1157 const CXXRecordDecl *NearestVBase,
1158 uint64_t OffsetFromNearestVBase,
1159 bool BaseIsNonVirtualPrimaryBase,
1160 llvm::Constant *VTable,
1161 const CXXRecordDecl *VTableClass,
1162 VisitedVirtualBasesSetTy& VBases);
1164 void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1166 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1167 /// to by This.
1168 llvm::Value *GetVTablePtr(llvm::Value *This, const llvm::Type *Ty);
1170 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1171 /// given phase of destruction for a destructor. The end result
1172 /// should call destructors on members and base classes in reverse
1173 /// order of their construction.
1174 void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1176 /// ShouldInstrumentFunction - Return true if the current function should be
1177 /// instrumented with __cyg_profile_func_* calls
1178 bool ShouldInstrumentFunction();
1180 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
1181 /// instrumentation function with the current function and the call site, if
1182 /// function instrumentation is enabled.
1183 void EmitFunctionInstrumentation(const char *Fn);
1185 /// EmitMCountInstrumentation - Emit call to .mcount.
1186 void EmitMCountInstrumentation();
1188 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1189 /// arguments for the given function. This is also responsible for naming the
1190 /// LLVM function arguments.
1191 void EmitFunctionProlog(const CGFunctionInfo &FI,
1192 llvm::Function *Fn,
1193 const FunctionArgList &Args);
1195 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1196 /// given temporary.
1197 void EmitFunctionEpilog(const CGFunctionInfo &FI);
1199 /// EmitStartEHSpec - Emit the start of the exception spec.
1200 void EmitStartEHSpec(const Decl *D);
1202 /// EmitEndEHSpec - Emit the end of the exception spec.
1203 void EmitEndEHSpec(const Decl *D);
1205 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
1206 llvm::BasicBlock *getTerminateLandingPad();
1208 /// getTerminateHandler - Return a handler (not a landing pad, just
1209 /// a catch handler) that just calls terminate. This is used when
1210 /// a terminate scope encloses a try.
1211 llvm::BasicBlock *getTerminateHandler();
1213 const llvm::Type *ConvertTypeForMem(QualType T);
1214 const llvm::Type *ConvertType(QualType T);
1215 const llvm::Type *ConvertType(const TypeDecl *T) {
1216 return ConvertType(getContext().getTypeDeclType(T));
1219 /// LoadObjCSelf - Load the value of self. This function is only valid while
1220 /// generating code for an Objective-C method.
1221 llvm::Value *LoadObjCSelf();
1223 /// TypeOfSelfObject - Return type of object that this self represents.
1224 QualType TypeOfSelfObject();
1226 /// hasAggregateLLVMType - Return true if the specified AST type will map into
1227 /// an aggregate LLVM type or is void.
1228 static bool hasAggregateLLVMType(QualType T);
1230 /// createBasicBlock - Create an LLVM basic block.
1231 llvm::BasicBlock *createBasicBlock(llvm::StringRef name = "",
1232 llvm::Function *parent = 0,
1233 llvm::BasicBlock *before = 0) {
1234 #ifdef NDEBUG
1235 return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
1236 #else
1237 return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
1238 #endif
1241 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
1242 /// label maps to.
1243 JumpDest getJumpDestForLabel(const LabelDecl *S);
1245 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
1246 /// another basic block, simplify it. This assumes that no other code could
1247 /// potentially reference the basic block.
1248 void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
1250 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
1251 /// adding a fall-through branch from the current insert block if
1252 /// necessary. It is legal to call this function even if there is no current
1253 /// insertion point.
1255 /// IsFinished - If true, indicates that the caller has finished emitting
1256 /// branches to the given block and does not expect to emit code into it. This
1257 /// means the block can be ignored if it is unreachable.
1258 void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
1260 /// EmitBranch - Emit a branch to the specified basic block from the current
1261 /// insert block, taking care to avoid creation of branches from dummy
1262 /// blocks. It is legal to call this function even if there is no current
1263 /// insertion point.
1265 /// This function clears the current insertion point. The caller should follow
1266 /// calls to this function with calls to Emit*Block prior to generation new
1267 /// code.
1268 void EmitBranch(llvm::BasicBlock *Block);
1270 /// HaveInsertPoint - True if an insertion point is defined. If not, this
1271 /// indicates that the current code being emitted is unreachable.
1272 bool HaveInsertPoint() const {
1273 return Builder.GetInsertBlock() != 0;
1276 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
1277 /// emitted IR has a place to go. Note that by definition, if this function
1278 /// creates a block then that block is unreachable; callers may do better to
1279 /// detect when no insertion point is defined and simply skip IR generation.
1280 void EnsureInsertPoint() {
1281 if (!HaveInsertPoint())
1282 EmitBlock(createBasicBlock());
1285 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1286 /// specified stmt yet.
1287 void ErrorUnsupported(const Stmt *S, const char *Type,
1288 bool OmitOnError=false);
1290 //===--------------------------------------------------------------------===//
1291 // Helpers
1292 //===--------------------------------------------------------------------===//
1294 LValue MakeAddrLValue(llvm::Value *V, QualType T, unsigned Alignment = 0) {
1295 return LValue::MakeAddr(V, T, Alignment, getContext(),
1296 CGM.getTBAAInfo(T));
1299 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
1300 /// block. The caller is responsible for setting an appropriate alignment on
1301 /// the alloca.
1302 llvm::AllocaInst *CreateTempAlloca(const llvm::Type *Ty,
1303 const llvm::Twine &Name = "tmp");
1305 /// InitTempAlloca - Provide an initial value for the given alloca.
1306 void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
1308 /// CreateIRTemp - Create a temporary IR object of the given type, with
1309 /// appropriate alignment. This routine should only be used when an temporary
1310 /// value needs to be stored into an alloca (for example, to avoid explicit
1311 /// PHI construction), but the type is the IR type, not the type appropriate
1312 /// for storing in memory.
1313 llvm::AllocaInst *CreateIRTemp(QualType T, const llvm::Twine &Name = "tmp");
1315 /// CreateMemTemp - Create a temporary memory object of the given type, with
1316 /// appropriate alignment.
1317 llvm::AllocaInst *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp");
1319 /// CreateAggTemp - Create a temporary memory object for the given
1320 /// aggregate type.
1321 AggValueSlot CreateAggTemp(QualType T, const llvm::Twine &Name = "tmp") {
1322 return AggValueSlot::forAddr(CreateMemTemp(T, Name), false, false);
1325 /// Emit a cast to void* in the appropriate address space.
1326 llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
1328 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
1329 /// expression and compare the result against zero, returning an Int1Ty value.
1330 llvm::Value *EvaluateExprAsBool(const Expr *E);
1332 /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
1333 void EmitIgnoredExpr(const Expr *E);
1335 /// EmitAnyExpr - Emit code to compute the specified expression which can have
1336 /// any type. The result is returned as an RValue struct. If this is an
1337 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
1338 /// the result should be returned.
1340 /// \param IgnoreResult - True if the resulting value isn't used.
1341 RValue EmitAnyExpr(const Expr *E,
1342 AggValueSlot AggSlot = AggValueSlot::ignored(),
1343 bool IgnoreResult = false);
1345 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
1346 // or the value of the expression, depending on how va_list is defined.
1347 llvm::Value *EmitVAListRef(const Expr *E);
1349 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
1350 /// always be accessible even if no aggregate location is provided.
1351 RValue EmitAnyExprToTemp(const Expr *E);
1353 /// EmitsAnyExprToMem - Emits the code necessary to evaluate an
1354 /// arbitrary expression into the given memory location.
1355 void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
1356 bool IsLocationVolatile,
1357 bool IsInitializer);
1359 /// EmitAggregateCopy - Emit an aggrate copy.
1361 /// \param isVolatile - True iff either the source or the destination is
1362 /// volatile.
1363 void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1364 QualType EltTy, bool isVolatile=false);
1366 /// StartBlock - Start new block named N. If insert block is a dummy block
1367 /// then reuse it.
1368 void StartBlock(const char *N);
1370 /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
1371 llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD) {
1372 return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
1375 /// GetAddrOfLocalVar - Return the address of a local variable.
1376 llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
1377 llvm::Value *Res = LocalDeclMap[VD];
1378 assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
1379 return Res;
1382 /// getOpaqueLValueMapping - Given an opaque value expression (which
1383 /// must be mapped to an l-value), return its mapping.
1384 const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
1385 assert(OpaqueValueMapping::shouldBindAsLValue(e));
1387 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
1388 it = OpaqueLValues.find(e);
1389 assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
1390 return it->second;
1393 /// getOpaqueRValueMapping - Given an opaque value expression (which
1394 /// must be mapped to an r-value), return its mapping.
1395 const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
1396 assert(!OpaqueValueMapping::shouldBindAsLValue(e));
1398 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
1399 it = OpaqueRValues.find(e);
1400 assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
1401 return it->second;
1404 /// getAccessedFieldNo - Given an encoded value and a result number, return
1405 /// the input field number being accessed.
1406 static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
1408 llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
1409 llvm::BasicBlock *GetIndirectGotoBlock();
1411 /// EmitNullInitialization - Generate code to set a value of the given type to
1412 /// null, If the type contains data member pointers, they will be initialized
1413 /// to -1 in accordance with the Itanium C++ ABI.
1414 void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
1416 // EmitVAArg - Generate code to get an argument from the passed in pointer
1417 // and update it accordingly. The return value is a pointer to the argument.
1418 // FIXME: We should be able to get rid of this method and use the va_arg
1419 // instruction in LLVM instead once it works well enough.
1420 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
1422 /// EmitVLASize - Generate code for any VLA size expressions that might occur
1423 /// in a variably modified type. If Ty is a VLA, will return the value that
1424 /// corresponds to the size in bytes of the VLA type. Will return 0 otherwise.
1426 /// This function can be called with a null (unreachable) insert point.
1427 llvm::Value *EmitVLASize(QualType Ty);
1429 // GetVLASize - Returns an LLVM value that corresponds to the size in bytes
1430 // of a variable length array type.
1431 llvm::Value *GetVLASize(const VariableArrayType *);
1433 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
1434 /// generating code for an C++ member function.
1435 llvm::Value *LoadCXXThis() {
1436 assert(CXXThisValue && "no 'this' value for this function");
1437 return CXXThisValue;
1440 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
1441 /// virtual bases.
1442 llvm::Value *LoadCXXVTT() {
1443 assert(CXXVTTValue && "no VTT value for this function");
1444 return CXXVTTValue;
1447 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
1448 /// complete class to the given direct base.
1449 llvm::Value *
1450 GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
1451 const CXXRecordDecl *Derived,
1452 const CXXRecordDecl *Base,
1453 bool BaseIsVirtual);
1455 /// GetAddressOfBaseClass - This function will add the necessary delta to the
1456 /// load of 'this' and returns address of the base class.
1457 llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
1458 const CXXRecordDecl *Derived,
1459 CastExpr::path_const_iterator PathBegin,
1460 CastExpr::path_const_iterator PathEnd,
1461 bool NullCheckValue);
1463 llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
1464 const CXXRecordDecl *Derived,
1465 CastExpr::path_const_iterator PathBegin,
1466 CastExpr::path_const_iterator PathEnd,
1467 bool NullCheckValue);
1469 llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
1470 const CXXRecordDecl *ClassDecl,
1471 const CXXRecordDecl *BaseClassDecl);
1473 void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1474 CXXCtorType CtorType,
1475 const FunctionArgList &Args);
1476 void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
1477 bool ForVirtualBase, llvm::Value *This,
1478 CallExpr::const_arg_iterator ArgBeg,
1479 CallExpr::const_arg_iterator ArgEnd);
1481 void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1482 llvm::Value *This, llvm::Value *Src,
1483 CallExpr::const_arg_iterator ArgBeg,
1484 CallExpr::const_arg_iterator ArgEnd);
1486 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1487 const ConstantArrayType *ArrayTy,
1488 llvm::Value *ArrayPtr,
1489 CallExpr::const_arg_iterator ArgBeg,
1490 CallExpr::const_arg_iterator ArgEnd,
1491 bool ZeroInitialization = false);
1493 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1494 llvm::Value *NumElements,
1495 llvm::Value *ArrayPtr,
1496 CallExpr::const_arg_iterator ArgBeg,
1497 CallExpr::const_arg_iterator ArgEnd,
1498 bool ZeroInitialization = false);
1500 void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
1501 const ArrayType *Array,
1502 llvm::Value *This);
1504 void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
1505 llvm::Value *NumElements,
1506 llvm::Value *This);
1508 llvm::Function *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
1509 const ArrayType *Array,
1510 llvm::Value *This);
1512 void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
1513 bool ForVirtualBase, llvm::Value *This);
1515 void EmitNewArrayInitializer(const CXXNewExpr *E, llvm::Value *NewPtr,
1516 llvm::Value *NumElements);
1518 void EmitCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
1520 llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
1521 void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
1523 void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
1524 QualType DeleteTy);
1526 llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
1527 llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
1529 void EmitCheck(llvm::Value *, unsigned Size);
1531 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
1532 bool isInc, bool isPre);
1533 ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1534 bool isInc, bool isPre);
1535 //===--------------------------------------------------------------------===//
1536 // Declaration Emission
1537 //===--------------------------------------------------------------------===//
1539 /// EmitDecl - Emit a declaration.
1541 /// This function can be called with a null (unreachable) insert point.
1542 void EmitDecl(const Decl &D);
1544 /// EmitVarDecl - Emit a local variable declaration.
1546 /// This function can be called with a null (unreachable) insert point.
1547 void EmitVarDecl(const VarDecl &D);
1549 typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
1550 llvm::Value *Address);
1552 /// EmitAutoVarDecl - Emit an auto variable declaration.
1554 /// This function can be called with a null (unreachable) insert point.
1555 void EmitAutoVarDecl(const VarDecl &D, SpecialInitFn *SpecialInit = 0);
1557 void EmitStaticVarDecl(const VarDecl &D,
1558 llvm::GlobalValue::LinkageTypes Linkage);
1560 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
1561 void EmitParmDecl(const VarDecl &D, llvm::Value *Arg);
1563 /// protectFromPeepholes - Protect a value that we're intending to
1564 /// store to the side, but which will probably be used later, from
1565 /// aggressive peepholing optimizations that might delete it.
1567 /// Pass the result to unprotectFromPeepholes to declare that
1568 /// protection is no longer required.
1570 /// There's no particular reason why this shouldn't apply to
1571 /// l-values, it's just that no existing peepholes work on pointers.
1572 PeepholeProtection protectFromPeepholes(RValue rvalue);
1573 void unprotectFromPeepholes(PeepholeProtection protection);
1575 //===--------------------------------------------------------------------===//
1576 // Statement Emission
1577 //===--------------------------------------------------------------------===//
1579 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
1580 void EmitStopPoint(const Stmt *S);
1582 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
1583 /// this function even if there is no current insertion point.
1585 /// This function may clear the current insertion point; callers should use
1586 /// EnsureInsertPoint if they wish to subsequently generate code without first
1587 /// calling EmitBlock, EmitBranch, or EmitStmt.
1588 void EmitStmt(const Stmt *S);
1590 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
1591 /// necessarily require an insertion point or debug information; typically
1592 /// because the statement amounts to a jump or a container of other
1593 /// statements.
1595 /// \return True if the statement was handled.
1596 bool EmitSimpleStmt(const Stmt *S);
1598 RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
1599 AggValueSlot AVS = AggValueSlot::ignored());
1601 /// EmitLabel - Emit the block for the given label. It is legal to call this
1602 /// function even if there is no current insertion point.
1603 void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
1605 void EmitLabelStmt(const LabelStmt &S);
1606 void EmitGotoStmt(const GotoStmt &S);
1607 void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
1608 void EmitIfStmt(const IfStmt &S);
1609 void EmitWhileStmt(const WhileStmt &S);
1610 void EmitDoStmt(const DoStmt &S);
1611 void EmitForStmt(const ForStmt &S);
1612 void EmitReturnStmt(const ReturnStmt &S);
1613 void EmitDeclStmt(const DeclStmt &S);
1614 void EmitBreakStmt(const BreakStmt &S);
1615 void EmitContinueStmt(const ContinueStmt &S);
1616 void EmitSwitchStmt(const SwitchStmt &S);
1617 void EmitDefaultStmt(const DefaultStmt &S);
1618 void EmitCaseStmt(const CaseStmt &S);
1619 void EmitCaseStmtRange(const CaseStmt &S);
1620 void EmitAsmStmt(const AsmStmt &S);
1622 void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
1623 void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
1624 void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
1625 void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
1627 llvm::Constant *getUnwindResumeOrRethrowFn();
1628 void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1629 void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1631 void EmitCXXTryStmt(const CXXTryStmt &S);
1633 //===--------------------------------------------------------------------===//
1634 // LValue Expression Emission
1635 //===--------------------------------------------------------------------===//
1637 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
1638 RValue GetUndefRValue(QualType Ty);
1640 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
1641 /// and issue an ErrorUnsupported style diagnostic (using the
1642 /// provided Name).
1643 RValue EmitUnsupportedRValue(const Expr *E,
1644 const char *Name);
1646 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
1647 /// an ErrorUnsupported style diagnostic (using the provided Name).
1648 LValue EmitUnsupportedLValue(const Expr *E,
1649 const char *Name);
1651 /// EmitLValue - Emit code to compute a designator that specifies the location
1652 /// of the expression.
1654 /// This can return one of two things: a simple address or a bitfield
1655 /// reference. In either case, the LLVM Value* in the LValue structure is
1656 /// guaranteed to be an LLVM pointer type.
1658 /// If this returns a bitfield reference, nothing about the pointee type of
1659 /// the LLVM value is known: For example, it may not be a pointer to an
1660 /// integer.
1662 /// If this returns a normal address, and if the lvalue's C type is fixed
1663 /// size, this method guarantees that the returned pointer type will point to
1664 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
1665 /// variable length type, this is not possible.
1667 LValue EmitLValue(const Expr *E);
1669 /// EmitCheckedLValue - Same as EmitLValue but additionally we generate
1670 /// checking code to guard against undefined behavior. This is only
1671 /// suitable when we know that the address will be used to access the
1672 /// object.
1673 LValue EmitCheckedLValue(const Expr *E);
1675 /// EmitToMemory - Change a scalar value from its value
1676 /// representation to its in-memory representation.
1677 llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
1679 /// EmitFromMemory - Change a scalar value from its memory
1680 /// representation to its value representation.
1681 llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
1683 /// EmitLoadOfScalar - Load a scalar value from an address, taking
1684 /// care to appropriately convert from the memory representation to
1685 /// the LLVM value representation.
1686 llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
1687 unsigned Alignment, QualType Ty,
1688 llvm::MDNode *TBAAInfo = 0);
1690 /// EmitStoreOfScalar - Store a scalar value to an address, taking
1691 /// care to appropriately convert from the memory representation to
1692 /// the LLVM value representation.
1693 void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
1694 bool Volatile, unsigned Alignment, QualType Ty,
1695 llvm::MDNode *TBAAInfo = 0);
1697 /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
1698 /// this method emits the address of the lvalue, then loads the result as an
1699 /// rvalue, returning the rvalue.
1700 RValue EmitLoadOfLValue(LValue V, QualType LVType);
1701 RValue EmitLoadOfExtVectorElementLValue(LValue V, QualType LVType);
1702 RValue EmitLoadOfBitfieldLValue(LValue LV, QualType ExprType);
1703 RValue EmitLoadOfPropertyRefLValue(LValue LV,
1704 ReturnValueSlot Return = ReturnValueSlot());
1706 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
1707 /// lvalue, where both are guaranteed to the have the same type, and that type
1708 /// is 'Ty'.
1709 void EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty);
1710 void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst,
1711 QualType Ty);
1712 void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst);
1714 /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
1715 /// EmitStoreThroughLValue.
1717 /// \param Result [out] - If non-null, this will be set to a Value* for the
1718 /// bit-field contents after the store, appropriate for use as the result of
1719 /// an assignment to the bit-field.
1720 void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty,
1721 llvm::Value **Result=0);
1723 /// Emit an l-value for an assignment (simple or compound) of complex type.
1724 LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
1725 LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
1727 // Note: only availabe for agg return types
1728 LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
1729 LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
1730 // Note: only available for agg return types
1731 LValue EmitCallExprLValue(const CallExpr *E);
1732 // Note: only available for agg return types
1733 LValue EmitVAArgExprLValue(const VAArgExpr *E);
1734 LValue EmitDeclRefLValue(const DeclRefExpr *E);
1735 LValue EmitStringLiteralLValue(const StringLiteral *E);
1736 LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
1737 LValue EmitPredefinedLValue(const PredefinedExpr *E);
1738 LValue EmitUnaryOpLValue(const UnaryOperator *E);
1739 LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
1740 LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
1741 LValue EmitMemberExpr(const MemberExpr *E);
1742 LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
1743 LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
1744 LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
1745 LValue EmitCastLValue(const CastExpr *E);
1746 LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
1747 LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
1749 llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
1750 const ObjCIvarDecl *Ivar);
1751 LValue EmitLValueForAnonRecordField(llvm::Value* Base,
1752 const IndirectFieldDecl* Field,
1753 unsigned CVRQualifiers);
1754 LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field,
1755 unsigned CVRQualifiers);
1757 /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
1758 /// if the Field is a reference, this will return the address of the reference
1759 /// and not the address of the value stored in the reference.
1760 LValue EmitLValueForFieldInitialization(llvm::Value* Base,
1761 const FieldDecl* Field,
1762 unsigned CVRQualifiers);
1764 LValue EmitLValueForIvar(QualType ObjectTy,
1765 llvm::Value* Base, const ObjCIvarDecl *Ivar,
1766 unsigned CVRQualifiers);
1768 LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
1769 unsigned CVRQualifiers);
1771 LValue EmitBlockDeclRefLValue(const BlockDeclRefExpr *E);
1773 LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
1774 LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
1775 LValue EmitExprWithCleanupsLValue(const ExprWithCleanups *E);
1776 LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
1778 LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
1779 LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
1780 LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E);
1781 LValue EmitStmtExprLValue(const StmtExpr *E);
1782 LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
1783 LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
1784 void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
1786 //===--------------------------------------------------------------------===//
1787 // Scalar Expression Emission
1788 //===--------------------------------------------------------------------===//
1790 /// EmitCall - Generate a call of the given function, expecting the given
1791 /// result type, and using the given argument list which specifies both the
1792 /// LLVM arguments and the types they were derived from.
1794 /// \param TargetDecl - If given, the decl of the function in a direct call;
1795 /// used to set attributes on the call (noreturn, etc.).
1796 RValue EmitCall(const CGFunctionInfo &FnInfo,
1797 llvm::Value *Callee,
1798 ReturnValueSlot ReturnValue,
1799 const CallArgList &Args,
1800 const Decl *TargetDecl = 0,
1801 llvm::Instruction **callOrInvoke = 0);
1803 RValue EmitCall(QualType FnType, llvm::Value *Callee,
1804 ReturnValueSlot ReturnValue,
1805 CallExpr::const_arg_iterator ArgBeg,
1806 CallExpr::const_arg_iterator ArgEnd,
1807 const Decl *TargetDecl = 0);
1808 RValue EmitCallExpr(const CallExpr *E,
1809 ReturnValueSlot ReturnValue = ReturnValueSlot());
1811 llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
1812 llvm::Value * const *ArgBegin,
1813 llvm::Value * const *ArgEnd,
1814 const llvm::Twine &Name = "");
1816 llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
1817 const llvm::Type *Ty);
1818 llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
1819 llvm::Value *This, const llvm::Type *Ty);
1820 llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
1821 NestedNameSpecifier *Qual,
1822 const llvm::Type *Ty);
1824 llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
1825 CXXDtorType Type,
1826 const CXXRecordDecl *RD);
1828 RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
1829 llvm::Value *Callee,
1830 ReturnValueSlot ReturnValue,
1831 llvm::Value *This,
1832 llvm::Value *VTT,
1833 CallExpr::const_arg_iterator ArgBeg,
1834 CallExpr::const_arg_iterator ArgEnd);
1835 RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
1836 ReturnValueSlot ReturnValue);
1837 RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
1838 ReturnValueSlot ReturnValue);
1840 RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
1841 const CXXMethodDecl *MD,
1842 ReturnValueSlot ReturnValue);
1845 RValue EmitBuiltinExpr(const FunctionDecl *FD,
1846 unsigned BuiltinID, const CallExpr *E);
1848 RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
1850 /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
1851 /// is unhandled by the current target.
1852 llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
1854 llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
1855 llvm::Value *EmitNeonCall(llvm::Function *F,
1856 llvm::SmallVectorImpl<llvm::Value*> &O,
1857 const char *name,
1858 unsigned shift = 0, bool rightshift = false);
1859 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
1860 llvm::Value *EmitNeonShiftVector(llvm::Value *V, const llvm::Type *Ty,
1861 bool negateForRightShift);
1863 llvm::Value *BuildVector(const llvm::SmallVectorImpl<llvm::Value*> &Ops);
1864 llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
1865 llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
1867 llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
1868 llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
1869 llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
1870 RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
1871 ReturnValueSlot Return = ReturnValueSlot());
1873 /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
1874 /// expression. Will emit a temporary variable if E is not an LValue.
1875 RValue EmitReferenceBindingToExpr(const Expr* E,
1876 const NamedDecl *InitializedDecl);
1878 //===--------------------------------------------------------------------===//
1879 // Expression Emission
1880 //===--------------------------------------------------------------------===//
1882 // Expressions are broken into three classes: scalar, complex, aggregate.
1884 /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
1885 /// scalar type, returning the result.
1886 llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
1888 /// EmitScalarConversion - Emit a conversion from the specified type to the
1889 /// specified destination type, both of which are LLVM scalar types.
1890 llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
1891 QualType DstTy);
1893 /// EmitComplexToScalarConversion - Emit a conversion from the specified
1894 /// complex type to the specified destination type, where the destination type
1895 /// is an LLVM scalar type.
1896 llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
1897 QualType DstTy);
1900 /// EmitAggExpr - Emit the computation of the specified expression
1901 /// of aggregate type. The result is computed into the given slot,
1902 /// which may be null to indicate that the value is not needed.
1903 void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false);
1905 /// EmitAggExprToLValue - Emit the computation of the specified expression of
1906 /// aggregate type into a temporary LValue.
1907 LValue EmitAggExprToLValue(const Expr *E);
1909 /// EmitGCMemmoveCollectable - Emit special API for structs with object
1910 /// pointers.
1911 void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1912 QualType Ty);
1914 /// EmitComplexExpr - Emit the computation of the specified expression of
1915 /// complex type, returning the result.
1916 ComplexPairTy EmitComplexExpr(const Expr *E,
1917 bool IgnoreReal = false,
1918 bool IgnoreImag = false);
1920 /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
1921 /// of complex type, storing into the specified Value*.
1922 void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
1923 bool DestIsVolatile);
1925 /// StoreComplexToAddr - Store a complex number into the specified address.
1926 void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
1927 bool DestIsVolatile);
1928 /// LoadComplexFromAddr - Load a complex number from the specified address.
1929 ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
1931 /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
1932 /// a static local variable.
1933 llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
1934 const char *Separator,
1935 llvm::GlobalValue::LinkageTypes Linkage);
1937 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
1938 /// global variable that has already been created for it. If the initializer
1939 /// has a different type than GV does, this may free GV and return a different
1940 /// one. Otherwise it just returns GV.
1941 llvm::GlobalVariable *
1942 AddInitializerToStaticVarDecl(const VarDecl &D,
1943 llvm::GlobalVariable *GV);
1946 /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
1947 /// variable with global storage.
1948 void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr);
1950 /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
1951 /// with the C++ runtime so that its destructor will be called at exit.
1952 void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
1953 llvm::Constant *DeclPtr);
1955 /// Emit code in this function to perform a guarded variable
1956 /// initialization. Guarded initializations are used when it's not
1957 /// possible to prove that an initialization will be done exactly
1958 /// once, e.g. with a static local variable or a static data member
1959 /// of a class template.
1960 void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr);
1962 /// GenerateCXXGlobalInitFunc - Generates code for initializing global
1963 /// variables.
1964 void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
1965 llvm::Constant **Decls,
1966 unsigned NumDecls);
1968 /// GenerateCXXGlobalDtorFunc - Generates code for destroying global
1969 /// variables.
1970 void GenerateCXXGlobalDtorFunc(llvm::Function *Fn,
1971 const std::vector<std::pair<llvm::WeakVH,
1972 llvm::Constant*> > &DtorsAndObjects);
1974 void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D,
1975 llvm::GlobalVariable *Addr);
1977 void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
1979 void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
1980 const Expr *Exp);
1982 RValue EmitExprWithCleanups(const ExprWithCleanups *E,
1983 AggValueSlot Slot =AggValueSlot::ignored());
1985 void EmitCXXThrowExpr(const CXXThrowExpr *E);
1987 //===--------------------------------------------------------------------===//
1988 // Internal Helpers
1989 //===--------------------------------------------------------------------===//
1991 /// ContainsLabel - Return true if the statement contains a label in it. If
1992 /// this statement is not executed normally, it not containing a label means
1993 /// that we can just remove the code.
1994 static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
1996 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1997 /// to a constant, or if it does but contains a label, return 0. If it
1998 /// constant folds to 'true' and does not contain a label, return 1, if it
1999 /// constant folds to 'false' and does not contain a label, return -1.
2000 int ConstantFoldsToSimpleInteger(const Expr *Cond);
2002 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
2003 /// if statement) to the specified blocks. Based on the condition, this might
2004 /// try to simplify the codegen of the conditional based on the branch.
2005 void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
2006 llvm::BasicBlock *FalseBlock);
2008 /// getTrapBB - Create a basic block that will call the trap intrinsic. We'll
2009 /// generate a branch around the created basic block as necessary.
2010 llvm::BasicBlock *getTrapBB();
2012 /// EmitCallArg - Emit a single call argument.
2013 RValue EmitCallArg(const Expr *E, QualType ArgType);
2015 /// EmitDelegateCallArg - We are performing a delegate call; that
2016 /// is, the current function is delegating to another one. Produce
2017 /// a r-value suitable for passing the given parameter.
2018 RValue EmitDelegateCallArg(const VarDecl *Param);
2020 private:
2021 void EmitReturnOfRValue(RValue RV, QualType Ty);
2023 /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
2024 /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
2026 /// \param AI - The first function argument of the expansion.
2027 /// \return The argument following the last expanded function
2028 /// argument.
2029 llvm::Function::arg_iterator
2030 ExpandTypeFromArgs(QualType Ty, LValue Dst,
2031 llvm::Function::arg_iterator AI);
2033 /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
2034 /// Ty, into individual arguments on the provided vector \arg Args. See
2035 /// ABIArgInfo::Expand.
2036 void ExpandTypeToArgs(QualType Ty, RValue Src,
2037 llvm::SmallVector<llvm::Value*, 16> &Args);
2039 llvm::Value* EmitAsmInput(const AsmStmt &S,
2040 const TargetInfo::ConstraintInfo &Info,
2041 const Expr *InputExpr, std::string &ConstraintStr);
2043 llvm::Value* EmitAsmInputLValue(const AsmStmt &S,
2044 const TargetInfo::ConstraintInfo &Info,
2045 LValue InputValue, QualType InputType,
2046 std::string &ConstraintStr);
2048 /// EmitCallArgs - Emit call arguments for a function.
2049 /// The CallArgTypeInfo parameter is used for iterating over the known
2050 /// argument types of the function being called.
2051 template<typename T>
2052 void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
2053 CallExpr::const_arg_iterator ArgBeg,
2054 CallExpr::const_arg_iterator ArgEnd) {
2055 CallExpr::const_arg_iterator Arg = ArgBeg;
2057 // First, use the argument types that the type info knows about
2058 if (CallArgTypeInfo) {
2059 for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
2060 E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
2061 assert(Arg != ArgEnd && "Running over edge of argument list!");
2062 QualType ArgType = *I;
2063 #ifndef NDEBUG
2064 QualType ActualArgType = Arg->getType();
2065 if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
2066 QualType ActualBaseType =
2067 ActualArgType->getAs<PointerType>()->getPointeeType();
2068 QualType ArgBaseType =
2069 ArgType->getAs<PointerType>()->getPointeeType();
2070 if (ArgBaseType->isVariableArrayType()) {
2071 if (const VariableArrayType *VAT =
2072 getContext().getAsVariableArrayType(ActualBaseType)) {
2073 if (!VAT->getSizeExpr())
2074 ActualArgType = ArgType;
2078 assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
2079 getTypePtr() ==
2080 getContext().getCanonicalType(ActualArgType).getTypePtr() &&
2081 "type mismatch in call argument!");
2082 #endif
2083 Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
2084 ArgType));
2087 // Either we've emitted all the call args, or we have a call to a
2088 // variadic function.
2089 assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
2090 "Extra arguments in non-variadic function!");
2094 // If we still have any arguments, emit them using the type of the argument.
2095 for (; Arg != ArgEnd; ++Arg) {
2096 QualType ArgType = Arg->getType();
2097 Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
2098 ArgType));
2102 const TargetCodeGenInfo &getTargetHooks() const {
2103 return CGM.getTargetCodeGenInfo();
2106 void EmitDeclMetadata();
2109 /// Helper class with most of the code for saving a value for a
2110 /// conditional expression cleanup.
2111 struct DominatingLLVMValue {
2112 typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
2114 /// Answer whether the given value needs extra work to be saved.
2115 static bool needsSaving(llvm::Value *value) {
2116 // If it's not an instruction, we don't need to save.
2117 if (!isa<llvm::Instruction>(value)) return false;
2119 // If it's an instruction in the entry block, we don't need to save.
2120 llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
2121 return (block != &block->getParent()->getEntryBlock());
2124 /// Try to save the given value.
2125 static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
2126 if (!needsSaving(value)) return saved_type(value, false);
2128 // Otherwise we need an alloca.
2129 llvm::Value *alloca =
2130 CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
2131 CGF.Builder.CreateStore(value, alloca);
2133 return saved_type(alloca, true);
2136 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
2137 if (!value.getInt()) return value.getPointer();
2138 return CGF.Builder.CreateLoad(value.getPointer());
2142 /// A partial specialization of DominatingValue for llvm::Values that
2143 /// might be llvm::Instructions.
2144 template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
2145 typedef T *type;
2146 static type restore(CodeGenFunction &CGF, saved_type value) {
2147 return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
2151 /// A specialization of DominatingValue for RValue.
2152 template <> struct DominatingValue<RValue> {
2153 typedef RValue type;
2154 class saved_type {
2155 enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
2156 AggregateAddress, ComplexAddress };
2158 llvm::Value *Value;
2159 Kind K;
2160 saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
2162 public:
2163 static bool needsSaving(RValue value);
2164 static saved_type save(CodeGenFunction &CGF, RValue value);
2165 RValue restore(CodeGenFunction &CGF);
2167 // implementations in CGExprCXX.cpp
2170 static bool needsSaving(type value) {
2171 return saved_type::needsSaving(value);
2173 static saved_type save(CodeGenFunction &CGF, type value) {
2174 return saved_type::save(CGF, value);
2176 static type restore(CodeGenFunction &CGF, saved_type value) {
2177 return value.restore(CGF);
2181 } // end namespace CodeGen
2182 } // end namespace clang
2184 #endif