Add include needed for MSVC.
[clang/acc.git] / lib / CodeGen / CodeGenTypes.cpp
blob41d2ba24fe3ddc2a0b530abf930718b8e1fef28e
1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This is the code that handles AST -> LLVM type lowering.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenTypes.h"
15 #include "clang/AST/ASTContext.h"
16 #include "clang/AST/DeclObjC.h"
17 #include "clang/AST/Expr.h"
18 #include "clang/AST/RecordLayout.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Module.h"
21 #include "llvm/Target/TargetData.h"
23 #include "CGCall.h"
25 using namespace clang;
26 using namespace CodeGen;
28 namespace {
29 /// RecordOrganizer - This helper class, used by CGRecordLayout, layouts
30 /// structs and unions. It manages transient information used during layout.
31 /// FIXME : Handle field aligments. Handle packed structs.
32 class RecordOrganizer {
33 public:
34 explicit RecordOrganizer(CodeGenTypes &Types, const RecordDecl& Record) :
35 CGT(Types), RD(Record), STy(NULL) {}
37 /// layoutStructFields - Do the actual work and lay out all fields. Create
38 /// corresponding llvm struct type. This should be invoked only after
39 /// all fields are added.
40 void layoutStructFields(const ASTRecordLayout &RL);
42 /// layoutUnionFields - Do the actual work and lay out all fields. Create
43 /// corresponding llvm struct type. This should be invoked only after
44 /// all fields are added.
45 void layoutUnionFields(const ASTRecordLayout &RL);
47 /// getLLVMType - Return associated llvm struct type. This may be NULL
48 /// if fields are not laid out.
49 llvm::Type *getLLVMType() const {
50 return STy;
53 llvm::SmallSet<unsigned, 8> &getPaddingFields() {
54 return PaddingFields;
57 private:
58 CodeGenTypes &CGT;
59 const RecordDecl& RD;
60 llvm::Type *STy;
61 llvm::SmallSet<unsigned, 8> PaddingFields;
65 CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
66 const llvm::TargetData &TD)
67 : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
68 TheABIInfo(0) {
71 CodeGenTypes::~CodeGenTypes() {
72 for(llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
73 I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
74 I != E; ++I)
75 delete I->second;
76 CGRecordLayouts.clear();
79 /// ConvertType - Convert the specified type to its LLVM form.
80 const llvm::Type *CodeGenTypes::ConvertType(QualType T) {
81 llvm::PATypeHolder Result = ConvertTypeRecursive(T);
83 // Any pointers that were converted defered evaluation of their pointee type,
84 // creating an opaque type instead. This is in order to avoid problems with
85 // circular types. Loop through all these defered pointees, if any, and
86 // resolve them now.
87 while (!PointersToResolve.empty()) {
88 std::pair<QualType, llvm::OpaqueType*> P =
89 PointersToResolve.back();
90 PointersToResolve.pop_back();
91 // We can handle bare pointers here because we know that the only pointers
92 // to the Opaque type are P.second and from other types. Refining the
93 // opqaue type away will invalidate P.second, but we don't mind :).
94 const llvm::Type *NT = ConvertTypeForMemRecursive(P.first);
95 P.second->refineAbstractTypeTo(NT);
98 return Result;
101 const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
102 T = Context.getCanonicalType(T);
104 // See if type is already cached.
105 llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator
106 I = TypeCache.find(T.getTypePtr());
107 // If type is found in map and this is not a definition for a opaque
108 // place holder type then use it. Otherwise, convert type T.
109 if (I != TypeCache.end())
110 return I->second.get();
112 const llvm::Type *ResultType = ConvertNewType(T);
113 TypeCache.insert(std::make_pair(T.getTypePtr(),
114 llvm::PATypeHolder(ResultType)));
115 return ResultType;
118 const llvm::Type *CodeGenTypes::ConvertTypeForMemRecursive(QualType T) {
119 const llvm::Type *ResultType = ConvertTypeRecursive(T);
120 if (ResultType == llvm::Type::Int1Ty)
121 return llvm::IntegerType::get((unsigned)Context.getTypeSize(T));
122 return ResultType;
125 /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
126 /// ConvertType in that it is used to convert to the memory representation for
127 /// a type. For example, the scalar representation for _Bool is i1, but the
128 /// memory representation is usually i8 or i32, depending on the target.
129 const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
130 const llvm::Type *R = ConvertType(T);
132 // If this is a non-bool type, don't map it.
133 if (R != llvm::Type::Int1Ty)
134 return R;
136 // Otherwise, return an integer of the target-specified size.
137 return llvm::IntegerType::get((unsigned)Context.getTypeSize(T));
141 // Code to verify a given function type is complete, i.e. the return type
142 // and all of the argument types are complete.
143 static const TagType *VerifyFuncTypeComplete(const Type* T) {
144 const FunctionType *FT = cast<FunctionType>(T);
145 if (const TagType* TT = FT->getResultType()->getAsTagType())
146 if (!TT->getDecl()->isDefinition())
147 return TT;
148 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T))
149 for (unsigned i = 0; i < FPT->getNumArgs(); i++)
150 if (const TagType* TT = FPT->getArgType(i)->getAsTagType())
151 if (!TT->getDecl()->isDefinition())
152 return TT;
153 return 0;
156 /// UpdateCompletedType - When we find the full definition for a TagDecl,
157 /// replace the 'opaque' type we previously made for it if applicable.
158 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
159 const Type *Key =
160 Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
161 llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
162 TagDeclTypes.find(Key);
163 if (TDTI == TagDeclTypes.end()) return;
165 // Remember the opaque LLVM type for this tagdecl.
166 llvm::PATypeHolder OpaqueHolder = TDTI->second;
167 assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) &&
168 "Updating compilation of an already non-opaque type?");
170 // Remove it from TagDeclTypes so that it will be regenerated.
171 TagDeclTypes.erase(TDTI);
173 // Generate the new type.
174 const llvm::Type *NT = ConvertTagDeclType(TD);
176 // Refine the old opaque type to its new definition.
177 cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT);
179 // Since we just completed a tag type, check to see if any function types
180 // were completed along with the tag type.
181 // FIXME: This is very inefficient; if we track which function types depend
182 // on which tag types, though, it should be reasonably efficient.
183 llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i;
184 for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) {
185 if (const TagType* TT = VerifyFuncTypeComplete(i->first)) {
186 // This function type still depends on an incomplete tag type; make sure
187 // that tag type has an associated opaque type.
188 ConvertTagDeclType(TT->getDecl());
189 } else {
190 // This function no longer depends on an incomplete tag type; create the
191 // function type, and refine the opaque type to the new function type.
192 llvm::PATypeHolder OpaqueHolder = i->second;
193 const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0));
194 cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT);
195 FunctionTypes.erase(i);
200 static const llvm::Type* getTypeForFormat(const llvm::fltSemantics &format) {
201 if (&format == &llvm::APFloat::IEEEsingle)
202 return llvm::Type::FloatTy;
203 if (&format == &llvm::APFloat::IEEEdouble)
204 return llvm::Type::DoubleTy;
205 if (&format == &llvm::APFloat::IEEEquad)
206 return llvm::Type::FP128Ty;
207 if (&format == &llvm::APFloat::PPCDoubleDouble)
208 return llvm::Type::PPC_FP128Ty;
209 if (&format == &llvm::APFloat::x87DoubleExtended)
210 return llvm::Type::X86_FP80Ty;
211 assert(0 && "Unknown float format!");
212 return 0;
215 const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
216 const clang::Type &Ty = *Context.getCanonicalType(T);
218 switch (Ty.getTypeClass()) {
219 #define TYPE(Class, Base)
220 #define ABSTRACT_TYPE(Class, Base)
221 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
222 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
223 #include "clang/AST/TypeNodes.def"
224 assert(false && "Non-canonical or dependent types aren't possible.");
225 break;
227 case Type::Builtin: {
228 switch (cast<BuiltinType>(Ty).getKind()) {
229 default: assert(0 && "Unknown builtin type!");
230 case BuiltinType::Void:
231 case BuiltinType::ObjCId:
232 case BuiltinType::ObjCClass:
233 // LLVM void type can only be used as the result of a function call. Just
234 // map to the same as char.
235 return llvm::IntegerType::get(8);
237 case BuiltinType::Bool:
238 // Note that we always return bool as i1 for use as a scalar type.
239 return llvm::Type::Int1Ty;
241 case BuiltinType::Char_S:
242 case BuiltinType::Char_U:
243 case BuiltinType::SChar:
244 case BuiltinType::UChar:
245 case BuiltinType::Short:
246 case BuiltinType::UShort:
247 case BuiltinType::Int:
248 case BuiltinType::UInt:
249 case BuiltinType::Long:
250 case BuiltinType::ULong:
251 case BuiltinType::LongLong:
252 case BuiltinType::ULongLong:
253 case BuiltinType::WChar:
254 case BuiltinType::Char16:
255 case BuiltinType::Char32:
256 return llvm::IntegerType::get(
257 static_cast<unsigned>(Context.getTypeSize(T)));
259 case BuiltinType::Float:
260 case BuiltinType::Double:
261 case BuiltinType::LongDouble:
262 return getTypeForFormat(Context.getFloatTypeSemantics(T));
264 case BuiltinType::UInt128:
265 case BuiltinType::Int128:
266 return llvm::IntegerType::get(128);
268 break;
270 case Type::FixedWidthInt:
271 return llvm::IntegerType::get(cast<FixedWidthIntType>(T)->getWidth());
272 case Type::Complex: {
273 const llvm::Type *EltTy =
274 ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType());
275 return llvm::StructType::get(EltTy, EltTy, NULL);
277 case Type::LValueReference:
278 case Type::RValueReference: {
279 const ReferenceType &RTy = cast<ReferenceType>(Ty);
280 QualType ETy = RTy.getPointeeType();
281 llvm::OpaqueType *PointeeType = llvm::OpaqueType::get();
282 PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
283 return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
285 case Type::Pointer: {
286 const PointerType &PTy = cast<PointerType>(Ty);
287 QualType ETy = PTy.getPointeeType();
288 llvm::OpaqueType *PointeeType = llvm::OpaqueType::get();
289 PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
290 return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
293 case Type::VariableArray: {
294 const VariableArrayType &A = cast<VariableArrayType>(Ty);
295 assert(A.getIndexTypeQualifier() == 0 &&
296 "FIXME: We only handle trivial array types so far!");
297 // VLAs resolve to the innermost element type; this matches
298 // the return of alloca, and there isn't any obviously better choice.
299 return ConvertTypeForMemRecursive(A.getElementType());
301 case Type::IncompleteArray: {
302 const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty);
303 assert(A.getIndexTypeQualifier() == 0 &&
304 "FIXME: We only handle trivial array types so far!");
305 // int X[] -> [0 x int]
306 return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()), 0);
308 case Type::ConstantArray: {
309 const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
310 const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType());
311 return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue());
313 case Type::ExtVector:
314 case Type::Vector: {
315 const VectorType &VT = cast<VectorType>(Ty);
316 return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()),
317 VT.getNumElements());
319 case Type::FunctionNoProto:
320 case Type::FunctionProto: {
321 // First, check whether we can build the full function type.
322 if (const TagType* TT = VerifyFuncTypeComplete(&Ty)) {
323 // This function's type depends on an incomplete tag type; make sure
324 // we have an opaque type corresponding to the tag type.
325 ConvertTagDeclType(TT->getDecl());
326 // Create an opaque type for this function type, save it, and return it.
327 llvm::Type *ResultType = llvm::OpaqueType::get();
328 FunctionTypes.insert(std::make_pair(&Ty, ResultType));
329 return ResultType;
331 // The function type can be built; call the appropriate routines to
332 // build it.
333 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty))
334 return GetFunctionType(getFunctionInfo(FPT), FPT->isVariadic());
336 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
337 return GetFunctionType(getFunctionInfo(FNPT), true);
340 case Type::ExtQual:
341 return
342 ConvertTypeRecursive(QualType(cast<ExtQualType>(Ty).getBaseType(), 0));
344 case Type::ObjCInterface: {
345 // Objective-C interfaces are always opaque (outside of the
346 // runtime, which can do whatever it likes); we never refine
347 // these.
348 const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)];
349 if (!T)
350 T = llvm::OpaqueType::get();
351 return T;
354 case Type::ObjCObjectPointer: {
355 // Protocol qualifications do not influence the LLVM type, we just return a
356 // pointer to the underlying interface type. We don't need to worry about
357 // recursive conversion.
358 const llvm::Type *T =
359 ConvertTypeRecursive(cast<ObjCObjectPointerType>(Ty).getPointeeType());
360 return llvm::PointerType::getUnqual(T);
363 case Type::Record:
364 case Type::Enum: {
365 const TagDecl *TD = cast<TagType>(Ty).getDecl();
366 const llvm::Type *Res = ConvertTagDeclType(TD);
368 std::string TypeName(TD->getKindName());
369 TypeName += '.';
371 // Name the codegen type after the typedef name
372 // if there is no tag type name available
373 if (TD->getIdentifier())
374 TypeName += TD->getNameAsString();
375 else if (const TypedefType *TdT = dyn_cast<TypedefType>(T))
376 TypeName += TdT->getDecl()->getNameAsString();
377 else
378 TypeName += "anon";
380 TheModule.addTypeName(TypeName, Res);
381 return Res;
384 case Type::BlockPointer: {
385 const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType();
386 llvm::OpaqueType *PointeeType = llvm::OpaqueType::get();
387 PointersToResolve.push_back(std::make_pair(FTy, PointeeType));
388 return llvm::PointerType::get(PointeeType, FTy.getAddressSpace());
391 case Type::MemberPointer: {
392 // FIXME: This is ABI dependent. We use the Itanium C++ ABI.
393 // http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers
394 // If we ever want to support other ABIs this needs to be abstracted.
396 QualType ETy = cast<MemberPointerType>(Ty).getPointeeType();
397 if (ETy->isFunctionType()) {
398 return llvm::StructType::get(ConvertType(Context.getPointerDiffType()),
399 ConvertType(Context.getPointerDiffType()),
400 NULL);
401 } else
402 return ConvertType(Context.getPointerDiffType());
405 case Type::TemplateSpecialization:
406 assert(false && "Dependent types can't get here");
409 // FIXME: implement.
410 return llvm::OpaqueType::get();
413 /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
414 /// enum.
415 const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
416 // TagDecl's are not necessarily unique, instead use the (clang)
417 // type connected to the decl.
418 const Type *Key =
419 Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
420 llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
421 TagDeclTypes.find(Key);
423 // If we've already compiled this tag type, use the previous definition.
424 if (TDTI != TagDeclTypes.end())
425 return TDTI->second;
427 // If this is still a forward definition, just define an opaque type to use
428 // for this tagged decl.
429 if (!TD->isDefinition()) {
430 llvm::Type *ResultType = llvm::OpaqueType::get();
431 TagDeclTypes.insert(std::make_pair(Key, ResultType));
432 return ResultType;
435 // Okay, this is a definition of a type. Compile the implementation now.
437 if (TD->isEnum()) {
438 // Don't bother storing enums in TagDeclTypes.
439 return ConvertTypeRecursive(cast<EnumDecl>(TD)->getIntegerType());
442 // This decl could well be recursive. In this case, insert an opaque
443 // definition of this type, which the recursive uses will get. We will then
444 // refine this opaque version later.
446 // Create new OpaqueType now for later use in case this is a recursive
447 // type. This will later be refined to the actual type.
448 llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get();
449 TagDeclTypes.insert(std::make_pair(Key, ResultHolder));
451 const llvm::Type *ResultType;
452 const RecordDecl *RD = cast<const RecordDecl>(TD);
454 // There isn't any extra information for empty structures/unions.
455 if (RD->field_empty()) {
456 ResultType = llvm::StructType::get(std::vector<const llvm::Type*>());
457 } else {
458 // Layout fields.
459 RecordOrganizer RO(*this, *RD);
461 if (TD->isStruct() || TD->isClass())
462 RO.layoutStructFields(Context.getASTRecordLayout(RD));
463 else {
464 assert(TD->isUnion() && "unknown tag decl kind!");
465 RO.layoutUnionFields(Context.getASTRecordLayout(RD));
468 // Get llvm::StructType.
469 const Type *Key =
470 Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
471 CGRecordLayouts[Key] = new CGRecordLayout(RO.getLLVMType(),
472 RO.getPaddingFields());
473 ResultType = RO.getLLVMType();
476 // Refine our Opaque type to ResultType. This can invalidate ResultType, so
477 // make sure to read the result out of the holder.
478 cast<llvm::OpaqueType>(ResultHolder.get())
479 ->refineAbstractTypeTo(ResultType);
481 return ResultHolder.get();
484 /// getLLVMFieldNo - Return llvm::StructType element number
485 /// that corresponds to the field FD.
486 unsigned CodeGenTypes::getLLVMFieldNo(const FieldDecl *FD) {
487 llvm::DenseMap<const FieldDecl*, unsigned>::iterator I = FieldInfo.find(FD);
488 assert (I != FieldInfo.end() && "Unable to find field info");
489 return I->second;
492 /// addFieldInfo - Assign field number to field FD.
493 void CodeGenTypes::addFieldInfo(const FieldDecl *FD, unsigned No) {
494 FieldInfo[FD] = No;
497 /// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field FD.
498 CodeGenTypes::BitFieldInfo CodeGenTypes::getBitFieldInfo(const FieldDecl *FD) {
499 llvm::DenseMap<const FieldDecl *, BitFieldInfo>::iterator
500 I = BitFields.find(FD);
501 assert (I != BitFields.end() && "Unable to find bitfield info");
502 return I->second;
505 /// addBitFieldInfo - Assign a start bit and a size to field FD.
506 void CodeGenTypes::addBitFieldInfo(const FieldDecl *FD, unsigned Begin,
507 unsigned Size) {
508 BitFields.insert(std::make_pair(FD, BitFieldInfo(Begin, Size)));
511 /// getCGRecordLayout - Return record layout info for the given llvm::Type.
512 const CGRecordLayout *
513 CodeGenTypes::getCGRecordLayout(const TagDecl *TD) const {
514 const Type *Key =
515 Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
516 llvm::DenseMap<const Type*, CGRecordLayout *>::iterator I
517 = CGRecordLayouts.find(Key);
518 assert (I != CGRecordLayouts.end()
519 && "Unable to find record layout information for type");
520 return I->second;
523 /// layoutStructFields - Do the actual work and lay out all fields. Create
524 /// corresponding llvm struct type.
525 /// Note that this doesn't actually try to do struct layout; it depends on
526 /// the layout built by the AST. (We have to do struct layout to do Sema,
527 /// and there's no point to duplicating the work.)
528 void RecordOrganizer::layoutStructFields(const ASTRecordLayout &RL) {
529 // FIXME: This code currently always generates packed structures.
530 // Unpacked structures are more readable, and sometimes more efficient!
531 // (But note that any changes here are likely to impact CGExprConstant,
532 // which makes some messy assumptions.)
533 uint64_t llvmSize = 0;
534 // FIXME: Make this a SmallVector
535 std::vector<const llvm::Type*> LLVMFields;
537 unsigned curField = 0;
538 for (RecordDecl::field_iterator Field = RD.field_begin(),
539 FieldEnd = RD.field_end();
540 Field != FieldEnd; ++Field) {
541 uint64_t offset = RL.getFieldOffset(curField);
542 const llvm::Type *Ty = CGT.ConvertTypeForMemRecursive(Field->getType());
543 uint64_t size = CGT.getTargetData().getTypeAllocSizeInBits(Ty);
545 if (Field->isBitField()) {
546 uint64_t BitFieldSize =
547 Field->getBitWidth()->EvaluateAsInt(CGT.getContext()).getZExtValue();
549 // Bitfield field info is different from other field info;
550 // it actually ignores the underlying LLVM struct because
551 // there isn't any convenient mapping.
552 CGT.addFieldInfo(*Field, offset / size);
553 CGT.addBitFieldInfo(*Field, offset % size, BitFieldSize);
554 } else {
555 // Put the element into the struct. This would be simpler
556 // if we didn't bother, but it seems a bit too strange to
557 // allocate all structs as i8 arrays.
558 while (llvmSize < offset) {
559 LLVMFields.push_back(llvm::Type::Int8Ty);
560 llvmSize += 8;
563 llvmSize += size;
564 CGT.addFieldInfo(*Field, LLVMFields.size());
565 LLVMFields.push_back(Ty);
567 ++curField;
570 while (llvmSize < RL.getSize()) {
571 LLVMFields.push_back(llvm::Type::Int8Ty);
572 llvmSize += 8;
575 STy = llvm::StructType::get(LLVMFields, true);
576 assert(CGT.getTargetData().getTypeAllocSizeInBits(STy) == RL.getSize());
579 /// layoutUnionFields - Do the actual work and lay out all fields. Create
580 /// corresponding llvm struct type. This should be invoked only after
581 /// all fields are added.
582 void RecordOrganizer::layoutUnionFields(const ASTRecordLayout &RL) {
583 unsigned curField = 0;
584 for (RecordDecl::field_iterator Field = RD.field_begin(),
585 FieldEnd = RD.field_end();
586 Field != FieldEnd; ++Field) {
587 // The offset should usually be zero, but bitfields could be strange
588 uint64_t offset = RL.getFieldOffset(curField);
589 CGT.ConvertTypeRecursive(Field->getType());
591 if (Field->isBitField()) {
592 Expr *BitWidth = Field->getBitWidth();
593 uint64_t BitFieldSize =
594 BitWidth->EvaluateAsInt(CGT.getContext()).getZExtValue();
596 CGT.addFieldInfo(*Field, 0);
597 CGT.addBitFieldInfo(*Field, offset, BitFieldSize);
598 } else {
599 CGT.addFieldInfo(*Field, 0);
601 ++curField;
604 // This looks stupid, but it is correct in the sense that
605 // it works no matter how complicated the sizes and alignments
606 // of the union elements are. The natural alignment
607 // of the result doesn't matter because anyone allocating
608 // structures should be aligning them appropriately anyway.
609 // FIXME: We can be a bit more intuitive in a lot of cases.
610 // FIXME: Make this a struct type to work around PR2399; the
611 // C backend doesn't like structs using array types.
612 std::vector<const llvm::Type*> LLVMFields;
613 LLVMFields.push_back(llvm::ArrayType::get(llvm::Type::Int8Ty,
614 RL.getSize() / 8));
615 STy = llvm::StructType::get(LLVMFields, true);
616 assert(CGT.getTargetData().getTypeAllocSizeInBits(STy) == RL.getSize());