implement basic support for __label__. I wouldn't be shocked if there are
[clang.git] / lib / CodeGen / CGDecl.cpp
blob22f28197c60ccf000ff507132e4cc4eccb80adaa
1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Decl nodes as LLVM code.
12 //===----------------------------------------------------------------------===//
14 #include "CGDebugInfo.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "CGBlocks.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/CharUnits.h"
20 #include "clang/AST/Decl.h"
21 #include "clang/AST/DeclObjC.h"
22 #include "clang/Basic/SourceManager.h"
23 #include "clang/Basic/TargetInfo.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/GlobalVariable.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/Target/TargetData.h"
28 #include "llvm/Type.h"
29 using namespace clang;
30 using namespace CodeGen;
33 void CodeGenFunction::EmitDecl(const Decl &D) {
34 switch (D.getKind()) {
35 case Decl::TranslationUnit:
36 case Decl::Namespace:
37 case Decl::UnresolvedUsingTypename:
38 case Decl::ClassTemplateSpecialization:
39 case Decl::ClassTemplatePartialSpecialization:
40 case Decl::TemplateTypeParm:
41 case Decl::UnresolvedUsingValue:
42 case Decl::NonTypeTemplateParm:
43 case Decl::CXXMethod:
44 case Decl::CXXConstructor:
45 case Decl::CXXDestructor:
46 case Decl::CXXConversion:
47 case Decl::Field:
48 case Decl::IndirectField:
49 case Decl::ObjCIvar:
50 case Decl::ObjCAtDefsField:
51 case Decl::ParmVar:
52 case Decl::ImplicitParam:
53 case Decl::ClassTemplate:
54 case Decl::FunctionTemplate:
55 case Decl::TemplateTemplateParm:
56 case Decl::ObjCMethod:
57 case Decl::ObjCCategory:
58 case Decl::ObjCProtocol:
59 case Decl::ObjCInterface:
60 case Decl::ObjCCategoryImpl:
61 case Decl::ObjCImplementation:
62 case Decl::ObjCProperty:
63 case Decl::ObjCCompatibleAlias:
64 case Decl::AccessSpec:
65 case Decl::LinkageSpec:
66 case Decl::ObjCPropertyImpl:
67 case Decl::ObjCClass:
68 case Decl::ObjCForwardProtocol:
69 case Decl::FileScopeAsm:
70 case Decl::Friend:
71 case Decl::FriendTemplate:
72 case Decl::Block:
73 assert(0 && "Declaration not should not be in declstmts!");
74 case Decl::Function: // void X();
75 case Decl::Record: // struct/union/class X;
76 case Decl::Enum: // enum X;
77 case Decl::EnumConstant: // enum ? { X = ? }
78 case Decl::CXXRecord: // struct/union/class X; [C++]
79 case Decl::Using: // using X; [C++]
80 case Decl::UsingShadow:
81 case Decl::UsingDirective: // using namespace X; [C++]
82 case Decl::NamespaceAlias:
83 case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
84 case Decl::Label: // __label__ x;
85 // None of these decls require codegen support.
86 return;
88 case Decl::Var: {
89 const VarDecl &VD = cast<VarDecl>(D);
90 assert(VD.isLocalVarDecl() &&
91 "Should not see file-scope variables inside a function!");
92 return EmitVarDecl(VD);
95 case Decl::Typedef: { // typedef int X;
96 const TypedefDecl &TD = cast<TypedefDecl>(D);
97 QualType Ty = TD.getUnderlyingType();
99 if (Ty->isVariablyModifiedType())
100 EmitVLASize(Ty);
105 /// EmitVarDecl - This method handles emission of any variable declaration
106 /// inside a function, including static vars etc.
107 void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
108 switch (D.getStorageClass()) {
109 case SC_None:
110 case SC_Auto:
111 case SC_Register:
112 return EmitAutoVarDecl(D);
113 case SC_Static: {
114 llvm::GlobalValue::LinkageTypes Linkage =
115 llvm::GlobalValue::InternalLinkage;
117 // If the function definition has some sort of weak linkage, its
118 // static variables should also be weak so that they get properly
119 // uniqued. We can't do this in C, though, because there's no
120 // standard way to agree on which variables are the same (i.e.
121 // there's no mangling).
122 if (getContext().getLangOptions().CPlusPlus)
123 if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
124 Linkage = CurFn->getLinkage();
126 return EmitStaticVarDecl(D, Linkage);
128 case SC_Extern:
129 case SC_PrivateExtern:
130 // Don't emit it now, allow it to be emitted lazily on its first use.
131 return;
134 assert(0 && "Unknown storage class");
137 static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
138 const char *Separator) {
139 CodeGenModule &CGM = CGF.CGM;
140 if (CGF.getContext().getLangOptions().CPlusPlus) {
141 llvm::StringRef Name = CGM.getMangledName(&D);
142 return Name.str();
145 std::string ContextName;
146 if (!CGF.CurFuncDecl) {
147 // Better be in a block declared in global scope.
148 const NamedDecl *ND = cast<NamedDecl>(&D);
149 const DeclContext *DC = ND->getDeclContext();
150 if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
151 MangleBuffer Name;
152 CGM.getBlockMangledName(GlobalDecl(), Name, BD);
153 ContextName = Name.getString();
155 else
156 assert(0 && "Unknown context for block static var decl");
157 } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) {
158 llvm::StringRef Name = CGM.getMangledName(FD);
159 ContextName = Name.str();
160 } else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
161 ContextName = CGF.CurFn->getName();
162 else
163 assert(0 && "Unknown context for static var decl");
165 return ContextName + Separator + D.getNameAsString();
168 llvm::GlobalVariable *
169 CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
170 const char *Separator,
171 llvm::GlobalValue::LinkageTypes Linkage) {
172 QualType Ty = D.getType();
173 assert(Ty->isConstantSizeType() && "VLAs can't be static");
175 std::string Name = GetStaticDeclName(*this, D, Separator);
177 const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
178 llvm::GlobalVariable *GV =
179 new llvm::GlobalVariable(CGM.getModule(), LTy,
180 Ty.isConstant(getContext()), Linkage,
181 CGM.EmitNullConstant(D.getType()), Name, 0,
182 D.isThreadSpecified(), Ty.getAddressSpace());
183 GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
184 if (Linkage != llvm::GlobalValue::InternalLinkage)
185 GV->setVisibility(CurFn->getVisibility());
186 return GV;
189 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
190 /// global variable that has already been created for it. If the initializer
191 /// has a different type than GV does, this may free GV and return a different
192 /// one. Otherwise it just returns GV.
193 llvm::GlobalVariable *
194 CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
195 llvm::GlobalVariable *GV) {
196 llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);
198 // If constant emission failed, then this should be a C++ static
199 // initializer.
200 if (!Init) {
201 if (!getContext().getLangOptions().CPlusPlus)
202 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
203 else if (Builder.GetInsertBlock()) {
204 // Since we have a static initializer, this global variable can't
205 // be constant.
206 GV->setConstant(false);
208 EmitCXXGuardedInit(D, GV);
210 return GV;
213 // The initializer may differ in type from the global. Rewrite
214 // the global to match the initializer. (We have to do this
215 // because some types, like unions, can't be completely represented
216 // in the LLVM type system.)
217 if (GV->getType()->getElementType() != Init->getType()) {
218 llvm::GlobalVariable *OldGV = GV;
220 GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
221 OldGV->isConstant(),
222 OldGV->getLinkage(), Init, "",
223 /*InsertBefore*/ OldGV,
224 D.isThreadSpecified(),
225 D.getType().getAddressSpace());
226 GV->setVisibility(OldGV->getVisibility());
228 // Steal the name of the old global
229 GV->takeName(OldGV);
231 // Replace all uses of the old global with the new global
232 llvm::Constant *NewPtrForOldDecl =
233 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
234 OldGV->replaceAllUsesWith(NewPtrForOldDecl);
236 // Erase the old global, since it is no longer used.
237 OldGV->eraseFromParent();
240 GV->setInitializer(Init);
241 return GV;
244 void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
245 llvm::GlobalValue::LinkageTypes Linkage) {
246 llvm::Value *&DMEntry = LocalDeclMap[&D];
247 assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
249 llvm::GlobalVariable *GV = CreateStaticVarDecl(D, ".", Linkage);
251 // Store into LocalDeclMap before generating initializer to handle
252 // circular references.
253 DMEntry = GV;
255 // We can't have a VLA here, but we can have a pointer to a VLA,
256 // even though that doesn't really make any sense.
257 // Make sure to evaluate VLA bounds now so that we have them for later.
258 if (D.getType()->isVariablyModifiedType())
259 EmitVLASize(D.getType());
261 // Local static block variables must be treated as globals as they may be
262 // referenced in their RHS initializer block-literal expresion.
263 CGM.setStaticLocalDeclAddress(&D, GV);
265 // If this value has an initializer, emit it.
266 if (D.getInit())
267 GV = AddInitializerToStaticVarDecl(D, GV);
269 GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
271 // FIXME: Merge attribute handling.
272 if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) {
273 SourceManager &SM = CGM.getContext().getSourceManager();
274 llvm::Constant *Ann =
275 CGM.EmitAnnotateAttr(GV, AA,
276 SM.getInstantiationLineNumber(D.getLocation()));
277 CGM.AddAnnotation(Ann);
280 if (const SectionAttr *SA = D.getAttr<SectionAttr>())
281 GV->setSection(SA->getName());
283 if (D.hasAttr<UsedAttr>())
284 CGM.AddUsedGlobal(GV);
286 // We may have to cast the constant because of the initializer
287 // mismatch above.
289 // FIXME: It is really dangerous to store this in the map; if anyone
290 // RAUW's the GV uses of this constant will be invalid.
291 const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
292 const llvm::Type *LPtrTy = LTy->getPointerTo(D.getType().getAddressSpace());
293 DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
295 // Emit global variable debug descriptor for static vars.
296 CGDebugInfo *DI = getDebugInfo();
297 if (DI) {
298 DI->setLocation(D.getLocation());
299 DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D);
303 unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
304 assert(ByRefValueInfo.count(VD) && "Did not find value!");
306 return ByRefValueInfo.find(VD)->second.second;
309 llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
310 const VarDecl *V) {
311 llvm::Value *Loc = Builder.CreateStructGEP(BaseAddr, 1, "forwarding");
312 Loc = Builder.CreateLoad(Loc);
313 Loc = Builder.CreateStructGEP(Loc, getByRefValueLLVMField(V),
314 V->getNameAsString());
315 return Loc;
318 /// BuildByRefType - This routine changes a __block variable declared as T x
319 /// into:
321 /// struct {
322 /// void *__isa;
323 /// void *__forwarding;
324 /// int32_t __flags;
325 /// int32_t __size;
326 /// void *__copy_helper; // only if needed
327 /// void *__destroy_helper; // only if needed
328 /// char padding[X]; // only if needed
329 /// T x;
330 /// } x
332 const llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
333 std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
334 if (Info.first)
335 return Info.first;
337 QualType Ty = D->getType();
339 std::vector<const llvm::Type *> Types;
341 llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(getLLVMContext());
343 // void *__isa;
344 Types.push_back(Int8PtrTy);
346 // void *__forwarding;
347 Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
349 // int32_t __flags;
350 Types.push_back(Int32Ty);
352 // int32_t __size;
353 Types.push_back(Int32Ty);
355 bool HasCopyAndDispose = getContext().BlockRequiresCopying(Ty);
356 if (HasCopyAndDispose) {
357 /// void *__copy_helper;
358 Types.push_back(Int8PtrTy);
360 /// void *__destroy_helper;
361 Types.push_back(Int8PtrTy);
364 bool Packed = false;
365 CharUnits Align = getContext().getDeclAlign(D);
366 if (Align > getContext().toCharUnitsFromBits(Target.getPointerAlign(0))) {
367 // We have to insert padding.
369 // The struct above has 2 32-bit integers.
370 unsigned CurrentOffsetInBytes = 4 * 2;
372 // And either 2 or 4 pointers.
373 CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) *
374 CGM.getTargetData().getTypeAllocSize(Int8PtrTy);
376 // Align the offset.
377 unsigned AlignedOffsetInBytes =
378 llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity());
380 unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
381 if (NumPaddingBytes > 0) {
382 const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
383 // FIXME: We need a sema error for alignment larger than the minimum of
384 // the maximal stack alignmint and the alignment of malloc on the system.
385 if (NumPaddingBytes > 1)
386 Ty = llvm::ArrayType::get(Ty, NumPaddingBytes);
388 Types.push_back(Ty);
390 // We want a packed struct.
391 Packed = true;
395 // T x;
396 Types.push_back(ConvertTypeForMem(Ty));
398 const llvm::Type *T = llvm::StructType::get(getLLVMContext(), Types, Packed);
400 cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T);
401 CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(),
402 ByRefTypeHolder.get());
404 Info.first = ByRefTypeHolder.get();
406 Info.second = Types.size() - 1;
408 return Info.first;
411 namespace {
412 struct CallArrayDtor : EHScopeStack::Cleanup {
413 CallArrayDtor(const CXXDestructorDecl *Dtor,
414 const ConstantArrayType *Type,
415 llvm::Value *Loc)
416 : Dtor(Dtor), Type(Type), Loc(Loc) {}
418 const CXXDestructorDecl *Dtor;
419 const ConstantArrayType *Type;
420 llvm::Value *Loc;
422 void Emit(CodeGenFunction &CGF, bool IsForEH) {
423 QualType BaseElementTy = CGF.getContext().getBaseElementType(Type);
424 const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
425 BasePtr = llvm::PointerType::getUnqual(BasePtr);
426 llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(Loc, BasePtr);
427 CGF.EmitCXXAggrDestructorCall(Dtor, Type, BaseAddrPtr);
431 struct CallVarDtor : EHScopeStack::Cleanup {
432 CallVarDtor(const CXXDestructorDecl *Dtor,
433 llvm::Value *NRVOFlag,
434 llvm::Value *Loc)
435 : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(Loc) {}
437 const CXXDestructorDecl *Dtor;
438 llvm::Value *NRVOFlag;
439 llvm::Value *Loc;
441 void Emit(CodeGenFunction &CGF, bool IsForEH) {
442 // Along the exceptions path we always execute the dtor.
443 bool NRVO = !IsForEH && NRVOFlag;
445 llvm::BasicBlock *SkipDtorBB = 0;
446 if (NRVO) {
447 // If we exited via NRVO, we skip the destructor call.
448 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
449 SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
450 llvm::Value *DidNRVO = CGF.Builder.CreateLoad(NRVOFlag, "nrvo.val");
451 CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
452 CGF.EmitBlock(RunDtorBB);
455 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
456 /*ForVirtualBase=*/false, Loc);
458 if (NRVO) CGF.EmitBlock(SkipDtorBB);
463 namespace {
464 struct CallStackRestore : EHScopeStack::Cleanup {
465 llvm::Value *Stack;
466 CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
467 void Emit(CodeGenFunction &CGF, bool IsForEH) {
468 llvm::Value *V = CGF.Builder.CreateLoad(Stack, "tmp");
469 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
470 CGF.Builder.CreateCall(F, V);
474 struct CallCleanupFunction : EHScopeStack::Cleanup {
475 llvm::Constant *CleanupFn;
476 const CGFunctionInfo &FnInfo;
477 llvm::Value *Addr;
478 const VarDecl &Var;
480 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
481 llvm::Value *Addr, const VarDecl *Var)
482 : CleanupFn(CleanupFn), FnInfo(*Info), Addr(Addr), Var(*Var) {}
484 void Emit(CodeGenFunction &CGF, bool IsForEH) {
485 // In some cases, the type of the function argument will be different from
486 // the type of the pointer. An example of this is
487 // void f(void* arg);
488 // __attribute__((cleanup(f))) void *g;
490 // To fix this we insert a bitcast here.
491 QualType ArgTy = FnInfo.arg_begin()->type;
492 llvm::Value *Arg =
493 CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
495 CallArgList Args;
496 Args.push_back(std::make_pair(RValue::get(Arg),
497 CGF.getContext().getPointerType(Var.getType())));
498 CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args);
502 struct CallBlockRelease : EHScopeStack::Cleanup {
503 llvm::Value *Addr;
504 CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
506 void Emit(CodeGenFunction &CGF, bool IsForEH) {
507 llvm::Value *V = CGF.Builder.CreateStructGEP(Addr, 1, "forwarding");
508 V = CGF.Builder.CreateLoad(V);
509 CGF.BuildBlockRelease(V, BLOCK_FIELD_IS_BYREF);
515 /// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
516 /// non-zero parts of the specified initializer with equal or fewer than
517 /// NumStores scalar stores.
518 static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
519 unsigned &NumStores) {
520 // Zero and Undef never requires any extra stores.
521 if (isa<llvm::ConstantAggregateZero>(Init) ||
522 isa<llvm::ConstantPointerNull>(Init) ||
523 isa<llvm::UndefValue>(Init))
524 return true;
525 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
526 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
527 isa<llvm::ConstantExpr>(Init))
528 return Init->isNullValue() || NumStores--;
530 // See if we can emit each element.
531 if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
532 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
533 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
534 if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
535 return false;
537 return true;
540 // Anything else is hard and scary.
541 return false;
544 /// emitStoresForInitAfterMemset - For inits that
545 /// canEmitInitWithFewStoresAfterMemset returned true for, emit the scalar
546 /// stores that would be required.
547 static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
548 CGBuilderTy &Builder) {
549 // Zero doesn't require any stores.
550 if (isa<llvm::ConstantAggregateZero>(Init) ||
551 isa<llvm::ConstantPointerNull>(Init) ||
552 isa<llvm::UndefValue>(Init))
553 return;
555 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
556 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
557 isa<llvm::ConstantExpr>(Init)) {
558 if (!Init->isNullValue())
559 Builder.CreateStore(Init, Loc);
560 return;
563 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
564 "Unknown value type!");
566 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
567 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
568 if (Elt->isNullValue()) continue;
570 // Otherwise, get a pointer to the element and emit it.
571 emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
572 Builder);
577 /// shouldUseMemSetPlusStoresToInitialize - Decide whether we should use memset
578 /// plus some stores to initialize a local variable instead of using a memcpy
579 /// from a constant global. It is beneficial to use memset if the global is all
580 /// zeros, or mostly zeros and large.
581 static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
582 uint64_t GlobalSize) {
583 // If a global is all zeros, always use a memset.
584 if (isa<llvm::ConstantAggregateZero>(Init)) return true;
587 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
588 // do it if it will require 6 or fewer scalar stores.
589 // TODO: Should budget depends on the size? Avoiding a large global warrants
590 // plopping in more stores.
591 unsigned StoreBudget = 6;
592 uint64_t SizeLimit = 32;
594 return GlobalSize > SizeLimit &&
595 canEmitInitWithFewStoresAfterMemset(Init, StoreBudget);
599 /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
600 /// variable declaration with auto, register, or no storage class specifier.
601 /// These turn into simple stack objects, or GlobalValues depending on target.
602 void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D,
603 SpecialInitFn *SpecialInit) {
604 QualType Ty = D.getType();
605 unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
606 bool isByRef = D.hasAttr<BlocksAttr>();
607 bool needsDispose = false;
608 CharUnits Align = CharUnits::Zero();
609 bool IsSimpleConstantInitializer = false;
611 bool NRVO = false;
612 llvm::Value *NRVOFlag = 0;
613 llvm::Value *DeclPtr;
614 if (Ty->isConstantSizeType()) {
615 if (!Target.useGlobalsForAutomaticVariables()) {
616 NRVO = getContext().getLangOptions().ElideConstructors &&
617 D.isNRVOVariable();
618 // If this value is an array or struct, is POD, and if the initializer is
619 // a staticly determinable constant, try to optimize it (unless the NRVO
620 // is already optimizing this).
621 if (!NRVO && D.getInit() && !isByRef &&
622 (Ty->isArrayType() || Ty->isRecordType()) &&
623 Ty->isPODType() &&
624 D.getInit()->isConstantInitializer(getContext(), false)) {
625 // If this variable is marked 'const', emit the value as a global.
626 if (CGM.getCodeGenOpts().MergeAllConstants &&
627 Ty.isConstant(getContext())) {
628 EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
629 return;
632 IsSimpleConstantInitializer = true;
635 // A normal fixed sized variable becomes an alloca in the entry block,
636 // unless it's an NRVO variable.
637 const llvm::Type *LTy = ConvertTypeForMem(Ty);
639 if (NRVO) {
640 // The named return value optimization: allocate this variable in the
641 // return slot, so that we can elide the copy when returning this
642 // variable (C++0x [class.copy]p34).
643 DeclPtr = ReturnValue;
645 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
646 if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
647 // Create a flag that is used to indicate when the NRVO was applied
648 // to this variable. Set it to zero to indicate that NRVO was not
649 // applied.
650 llvm::Value *Zero = Builder.getFalse();
651 NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo");
652 EnsureInsertPoint();
653 Builder.CreateStore(Zero, NRVOFlag);
655 // Record the NRVO flag for this variable.
656 NRVOFlags[&D] = NRVOFlag;
659 } else {
660 if (isByRef)
661 LTy = BuildByRefType(&D);
663 llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
664 Alloc->setName(D.getNameAsString());
666 Align = getContext().getDeclAlign(&D);
667 if (isByRef)
668 Align = std::max(Align,
669 getContext().toCharUnitsFromBits(Target.getPointerAlign(0)));
670 Alloc->setAlignment(Align.getQuantity());
671 DeclPtr = Alloc;
673 } else {
674 // Targets that don't support recursion emit locals as globals.
675 const char *Class =
676 D.getStorageClass() == SC_Register ? ".reg." : ".auto.";
677 DeclPtr = CreateStaticVarDecl(D, Class,
678 llvm::GlobalValue::InternalLinkage);
681 // FIXME: Can this happen?
682 if (Ty->isVariablyModifiedType())
683 EmitVLASize(Ty);
684 } else {
685 EnsureInsertPoint();
687 if (!DidCallStackSave) {
688 // Save the stack.
689 llvm::Value *Stack = CreateTempAlloca(Int8PtrTy, "saved_stack");
691 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
692 llvm::Value *V = Builder.CreateCall(F);
694 Builder.CreateStore(V, Stack);
696 DidCallStackSave = true;
698 // Push a cleanup block and restore the stack there.
699 // FIXME: in general circumstances, this should be an EH cleanup.
700 EHStack.pushCleanup<CallStackRestore>(NormalCleanup, Stack);
703 // Get the element type.
704 const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
705 const llvm::Type *LElemPtrTy = LElemTy->getPointerTo(Ty.getAddressSpace());
707 llvm::Value *VLASize = EmitVLASize(Ty);
709 // Allocate memory for the array.
710 llvm::AllocaInst *VLA =
711 Builder.CreateAlloca(llvm::Type::getInt8Ty(getLLVMContext()), VLASize, "vla");
712 VLA->setAlignment(getContext().getDeclAlign(&D).getQuantity());
714 DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
717 llvm::Value *&DMEntry = LocalDeclMap[&D];
718 assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
719 DMEntry = DeclPtr;
721 // Emit debug info for local var declaration.
722 if (CGDebugInfo *DI = getDebugInfo()) {
723 assert(HaveInsertPoint() && "Unexpected unreachable point!");
725 DI->setLocation(D.getLocation());
726 if (Target.useGlobalsForAutomaticVariables()) {
727 DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
728 } else
729 DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
732 // If this local has an initializer, emit it now.
733 const Expr *Init = D.getInit();
735 // If we are at an unreachable point, we don't need to emit the initializer
736 // unless it contains a label.
737 if (!HaveInsertPoint()) {
738 if (!ContainsLabel(Init))
739 Init = 0;
740 else
741 EnsureInsertPoint();
744 if (isByRef) {
745 EnsureInsertPoint();
746 llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
747 llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
748 llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
749 llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
750 llvm::Value *V;
752 BlockFieldFlags fieldFlags;
753 bool fieldNeedsCopyDispose = false;
755 needsDispose = true;
757 if (Ty->isBlockPointerType()) {
758 fieldFlags |= BLOCK_FIELD_IS_BLOCK;
759 fieldNeedsCopyDispose = true;
760 } else if (getContext().isObjCNSObjectType(Ty) ||
761 Ty->isObjCObjectPointerType()) {
762 fieldFlags |= BLOCK_FIELD_IS_OBJECT;
763 fieldNeedsCopyDispose = true;
764 } else if (getLangOptions().CPlusPlus) {
765 if (getContext().getBlockVarCopyInits(&D))
766 fieldNeedsCopyDispose = true;
767 else if (const CXXRecordDecl *record = D.getType()->getAsCXXRecordDecl())
768 fieldNeedsCopyDispose = !record->hasTrivialDestructor();
771 // FIXME: Someone double check this.
772 if (Ty.isObjCGCWeak())
773 fieldFlags |= BLOCK_FIELD_IS_WEAK;
775 int isa = 0;
776 if (fieldFlags & BLOCK_FIELD_IS_WEAK)
777 isa = 1;
778 V = Builder.CreateIntToPtr(Builder.getInt32(isa), Int8PtrTy, "isa");
779 Builder.CreateStore(V, isa_field);
781 Builder.CreateStore(DeclPtr, forwarding_field);
783 Builder.CreateStore(Builder.getInt32(fieldFlags.getBitMask()), flags_field);
785 const llvm::Type *V1;
786 V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
787 V = Builder.getInt32(CGM.GetTargetTypeStoreSize(V1).getQuantity());
788 Builder.CreateStore(V, size_field);
790 if (fieldNeedsCopyDispose) {
791 llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
792 Builder.CreateStore(CGM.BuildbyrefCopyHelper(DeclPtr->getType(),
793 fieldFlags,
794 Align.getQuantity(), &D),
795 copy_helper);
797 llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
798 Builder.CreateStore(CGM.BuildbyrefDestroyHelper(DeclPtr->getType(),
799 fieldFlags,
800 Align.getQuantity(), &D),
801 destroy_helper);
805 if (SpecialInit) {
806 SpecialInit(*this, D, DeclPtr);
807 } else if (Init) {
808 llvm::Value *Loc = DeclPtr;
810 bool isVolatile = getContext().getCanonicalType(Ty).isVolatileQualified();
812 // If the initializer was a simple constant initializer, we can optimize it
813 // in various ways.
814 if (IsSimpleConstantInitializer) {
815 llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), Ty,this);
816 assert(Init != 0 && "Wasn't a simple constant init?");
818 llvm::Value *SizeVal =
819 llvm::ConstantInt::get(IntPtrTy,
820 getContext().getTypeSizeInChars(Ty).getQuantity());
822 const llvm::Type *BP = Int8PtrTy;
823 if (Loc->getType() != BP)
824 Loc = Builder.CreateBitCast(Loc, BP, "tmp");
826 // If the initializer is all or mostly zeros, codegen with memset then do
827 // a few stores afterward.
828 if (shouldUseMemSetPlusStoresToInitialize(Init,
829 CGM.getTargetData().getTypeAllocSize(Init->getType()))) {
830 Builder.CreateMemSet(Loc, Builder.getInt8(0), SizeVal,
831 Align.getQuantity(), false);
832 if (!Init->isNullValue()) {
833 Loc = Builder.CreateBitCast(Loc, Init->getType()->getPointerTo());
834 emitStoresForInitAfterMemset(Init, Loc, Builder);
837 } else {
838 // Otherwise, create a temporary global with the initializer then
839 // memcpy from the global to the alloca.
840 std::string Name = GetStaticDeclName(*this, D, ".");
841 llvm::GlobalVariable *GV =
842 new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true,
843 llvm::GlobalValue::InternalLinkage,
844 Init, Name, 0, false, 0);
845 GV->setAlignment(Align.getQuantity());
847 llvm::Value *SrcPtr = GV;
848 if (SrcPtr->getType() != BP)
849 SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
851 Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, Align.getQuantity(), false);
853 } else if (Ty->isReferenceType()) {
854 RValue RV = EmitReferenceBindingToExpr(Init, &D);
855 if (isByRef)
856 Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
857 D.getNameAsString());
858 EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Alignment, Ty);
859 } else if (!hasAggregateLLVMType(Init->getType())) {
860 llvm::Value *V = EmitScalarExpr(Init);
861 if (isByRef) {
862 // When RHS has side-effect, must go through "forwarding' field
863 // to get to the address of the __block variable descriptor.
864 if (Init->HasSideEffects(getContext()))
865 Loc = BuildBlockByrefAddress(DeclPtr, &D);
866 else
867 Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
868 D.getNameAsString());
870 EmitStoreOfScalar(V, Loc, isVolatile, Alignment, Ty);
871 } else if (Init->getType()->isAnyComplexType()) {
872 if (isByRef)
873 Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
874 D.getNameAsString());
875 EmitComplexExprIntoAddr(Init, Loc, isVolatile);
876 } else {
877 if (isByRef)
878 Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
879 D.getNameAsString());
880 EmitAggExpr(Init, AggValueSlot::forAddr(Loc, isVolatile, true, false));
884 // Handle CXX destruction of variables.
885 QualType DtorTy(Ty);
886 while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
887 DtorTy = getContext().getBaseElementType(Array);
888 if (const RecordType *RT = DtorTy->getAs<RecordType>())
889 if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
890 if (!ClassDecl->hasTrivialDestructor()) {
891 // Note: We suppress the destructor call when the corresponding NRVO
892 // flag has been set.
893 llvm::Value *Loc = DeclPtr;
894 if (isByRef)
895 Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
896 D.getNameAsString());
898 const CXXDestructorDecl *D = ClassDecl->getDestructor();
899 assert(D && "EmitLocalBlockVarDecl - destructor is nul");
901 if (const ConstantArrayType *Array =
902 getContext().getAsConstantArrayType(Ty)) {
903 EHStack.pushCleanup<CallArrayDtor>(NormalAndEHCleanup,
904 D, Array, Loc);
905 } else {
906 EHStack.pushCleanup<CallVarDtor>(NormalAndEHCleanup,
907 D, NRVOFlag, Loc);
912 // Handle the cleanup attribute
913 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
914 const FunctionDecl *FD = CA->getFunctionDecl();
916 llvm::Constant* F = CGM.GetAddrOfFunction(FD);
917 assert(F && "Could not find function!");
919 const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);
920 EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup,
921 F, &Info, DeclPtr, &D);
924 // If this is a block variable, clean it up.
925 if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly)
926 EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, DeclPtr);
929 /// Emit an alloca (or GlobalValue depending on target)
930 /// for the specified parameter and set up LocalDeclMap.
931 void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
932 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
933 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
934 "Invalid argument to EmitParmDecl");
935 QualType Ty = D.getType();
937 llvm::Value *DeclPtr;
938 // If this is an aggregate or variable sized value, reuse the input pointer.
939 if (!Ty->isConstantSizeType() ||
940 CodeGenFunction::hasAggregateLLVMType(Ty)) {
941 DeclPtr = Arg;
942 } else {
943 // Otherwise, create a temporary to hold the value.
944 DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr");
946 // Store the initial value into the alloca.
947 EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(),
948 getContext().getDeclAlign(&D).getQuantity(), Ty,
949 CGM.getTBAAInfo(Ty));
951 Arg->setName(D.getName());
953 llvm::Value *&DMEntry = LocalDeclMap[&D];
954 assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
955 DMEntry = DeclPtr;
957 // Emit debug info for param declaration.
958 if (CGDebugInfo *DI = getDebugInfo()) {
959 DI->setLocation(D.getLocation());
960 DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder);