clang API Documentation
00001 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This is the code that handles AST -> LLVM type lowering. 00011 // 00012 //===----------------------------------------------------------------------===// 00013 00014 #include "CodeGenTypes.h" 00015 #include "CGCXXABI.h" 00016 #include "CGCall.h" 00017 #include "CGOpenCLRuntime.h" 00018 #include "CGRecordLayout.h" 00019 #include "TargetInfo.h" 00020 #include "clang/AST/ASTContext.h" 00021 #include "clang/AST/DeclCXX.h" 00022 #include "clang/AST/DeclObjC.h" 00023 #include "clang/AST/Expr.h" 00024 #include "clang/AST/RecordLayout.h" 00025 #include "clang/CodeGen/CGFunctionInfo.h" 00026 #include "llvm/IR/DataLayout.h" 00027 #include "llvm/IR/DerivedTypes.h" 00028 #include "llvm/IR/Module.h" 00029 using namespace clang; 00030 using namespace CodeGen; 00031 00032 CodeGenTypes::CodeGenTypes(CodeGenModule &cgm) 00033 : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), 00034 TheDataLayout(cgm.getDataLayout()), 00035 Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), 00036 TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) { 00037 SkippedLayout = false; 00038 } 00039 00040 CodeGenTypes::~CodeGenTypes() { 00041 llvm::DeleteContainerSeconds(CGRecordLayouts); 00042 00043 for (llvm::FoldingSet<CGFunctionInfo>::iterator 00044 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; ) 00045 delete &*I++; 00046 } 00047 00048 void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, 00049 llvm::StructType *Ty, 00050 StringRef suffix) { 00051 SmallString<256> TypeName; 00052 llvm::raw_svector_ostream OS(TypeName); 00053 OS << RD->getKindName() << '.'; 00054 00055 // Name the codegen type after the typedef name 00056 // if there is no tag type name available 00057 if (RD->getIdentifier()) { 00058 // FIXME: We should not have to check for a null decl context here. 00059 // Right now we do it because the implicit Obj-C decls don't have one. 00060 if (RD->getDeclContext()) 00061 RD->printQualifiedName(OS); 00062 else 00063 RD->printName(OS); 00064 } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) { 00065 // FIXME: We should not have to check for a null decl context here. 00066 // Right now we do it because the implicit Obj-C decls don't have one. 00067 if (TDD->getDeclContext()) 00068 TDD->printQualifiedName(OS); 00069 else 00070 TDD->printName(OS); 00071 } else 00072 OS << "anon"; 00073 00074 if (!suffix.empty()) 00075 OS << suffix; 00076 00077 Ty->setName(OS.str()); 00078 } 00079 00080 /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from 00081 /// ConvertType in that it is used to convert to the memory representation for 00082 /// a type. For example, the scalar representation for _Bool is i1, but the 00083 /// memory representation is usually i8 or i32, depending on the target. 00084 llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) { 00085 llvm::Type *R = ConvertType(T); 00086 00087 // If this is a non-bool type, don't map it. 00088 if (!R->isIntegerTy(1)) 00089 return R; 00090 00091 // Otherwise, return an integer of the target-specified size. 00092 return llvm::IntegerType::get(getLLVMContext(), 00093 (unsigned)Context.getTypeSize(T)); 00094 } 00095 00096 00097 /// isRecordLayoutComplete - Return true if the specified type is already 00098 /// completely laid out. 00099 bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const { 00100 llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I = 00101 RecordDeclTypes.find(Ty); 00102 return I != RecordDeclTypes.end() && !I->second->isOpaque(); 00103 } 00104 00105 static bool 00106 isSafeToConvert(QualType T, CodeGenTypes &CGT, 00107 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked); 00108 00109 00110 /// isSafeToConvert - Return true if it is safe to convert the specified record 00111 /// decl to IR and lay it out, false if doing so would cause us to get into a 00112 /// recursive compilation mess. 00113 static bool 00114 isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT, 00115 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { 00116 // If we have already checked this type (maybe the same type is used by-value 00117 // multiple times in multiple structure fields, don't check again. 00118 if (!AlreadyChecked.insert(RD)) return true; 00119 00120 const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr(); 00121 00122 // If this type is already laid out, converting it is a noop. 00123 if (CGT.isRecordLayoutComplete(Key)) return true; 00124 00125 // If this type is currently being laid out, we can't recursively compile it. 00126 if (CGT.isRecordBeingLaidOut(Key)) 00127 return false; 00128 00129 // If this type would require laying out bases that are currently being laid 00130 // out, don't do it. This includes virtual base classes which get laid out 00131 // when a class is translated, even though they aren't embedded by-value into 00132 // the class. 00133 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 00134 for (const auto &I : CRD->bases()) 00135 if (!isSafeToConvert(I.getType()->getAs<RecordType>()->getDecl(), 00136 CGT, AlreadyChecked)) 00137 return false; 00138 } 00139 00140 // If this type would require laying out members that are currently being laid 00141 // out, don't do it. 00142 for (const auto *I : RD->fields()) 00143 if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked)) 00144 return false; 00145 00146 // If there are no problems, lets do it. 00147 return true; 00148 } 00149 00150 /// isSafeToConvert - Return true if it is safe to convert this field type, 00151 /// which requires the structure elements contained by-value to all be 00152 /// recursively safe to convert. 00153 static bool 00154 isSafeToConvert(QualType T, CodeGenTypes &CGT, 00155 llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { 00156 T = T.getCanonicalType(); 00157 00158 // If this is a record, check it. 00159 if (const RecordType *RT = dyn_cast<RecordType>(T)) 00160 return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked); 00161 00162 // If this is an array, check the elements, which are embedded inline. 00163 if (const ArrayType *AT = dyn_cast<ArrayType>(T)) 00164 return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked); 00165 00166 // Otherwise, there is no concern about transforming this. We only care about 00167 // things that are contained by-value in a structure that can have another 00168 // structure as a member. 00169 return true; 00170 } 00171 00172 00173 /// isSafeToConvert - Return true if it is safe to convert the specified record 00174 /// decl to IR and lay it out, false if doing so would cause us to get into a 00175 /// recursive compilation mess. 00176 static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) { 00177 // If no structs are being laid out, we can certainly do this one. 00178 if (CGT.noRecordsBeingLaidOut()) return true; 00179 00180 llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked; 00181 return isSafeToConvert(RD, CGT, AlreadyChecked); 00182 } 00183 00184 /// isFuncParamTypeConvertible - Return true if the specified type in a 00185 /// function parameter or result position can be converted to an IR type at this 00186 /// point. This boils down to being whether it is complete, as well as whether 00187 /// we've temporarily deferred expanding the type because we're in a recursive 00188 /// context. 00189 bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) { 00190 // Some ABIs cannot have their member pointers represented in IR unless 00191 // certain circumstances have been reached. 00192 if (const auto *MPT = Ty->getAs<MemberPointerType>()) 00193 return getCXXABI().isMemberPointerConvertible(MPT); 00194 00195 // If this isn't a tagged type, we can convert it! 00196 const TagType *TT = Ty->getAs<TagType>(); 00197 if (!TT) return true; 00198 00199 // Incomplete types cannot be converted. 00200 if (TT->isIncompleteType()) 00201 return false; 00202 00203 // If this is an enum, then it is always safe to convert. 00204 const RecordType *RT = dyn_cast<RecordType>(TT); 00205 if (!RT) return true; 00206 00207 // Otherwise, we have to be careful. If it is a struct that we're in the 00208 // process of expanding, then we can't convert the function type. That's ok 00209 // though because we must be in a pointer context under the struct, so we can 00210 // just convert it to a dummy type. 00211 // 00212 // We decide this by checking whether ConvertRecordDeclType returns us an 00213 // opaque type for a struct that we know is defined. 00214 return isSafeToConvert(RT->getDecl(), *this); 00215 } 00216 00217 00218 /// Code to verify a given function type is complete, i.e. the return type 00219 /// and all of the parameter types are complete. Also check to see if we are in 00220 /// a RS_StructPointer context, and if so whether any struct types have been 00221 /// pended. If so, we don't want to ask the ABI lowering code to handle a type 00222 /// that cannot be converted to an IR type. 00223 bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) { 00224 if (!isFuncParamTypeConvertible(FT->getReturnType())) 00225 return false; 00226 00227 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 00228 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) 00229 if (!isFuncParamTypeConvertible(FPT->getParamType(i))) 00230 return false; 00231 00232 return true; 00233 } 00234 00235 /// UpdateCompletedType - When we find the full definition for a TagDecl, 00236 /// replace the 'opaque' type we previously made for it if applicable. 00237 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { 00238 // If this is an enum being completed, then we flush all non-struct types from 00239 // the cache. This allows function types and other things that may be derived 00240 // from the enum to be recomputed. 00241 if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) { 00242 // Only flush the cache if we've actually already converted this type. 00243 if (TypeCache.count(ED->getTypeForDecl())) { 00244 // Okay, we formed some types based on this. We speculated that the enum 00245 // would be lowered to i32, so we only need to flush the cache if this 00246 // didn't happen. 00247 if (!ConvertType(ED->getIntegerType())->isIntegerTy(32)) 00248 TypeCache.clear(); 00249 } 00250 // If necessary, provide the full definition of a type only used with a 00251 // declaration so far. 00252 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 00253 DI->completeType(ED); 00254 return; 00255 } 00256 00257 // If we completed a RecordDecl that we previously used and converted to an 00258 // anonymous type, then go ahead and complete it now. 00259 const RecordDecl *RD = cast<RecordDecl>(TD); 00260 if (RD->isDependentType()) return; 00261 00262 // Only complete it if we converted it already. If we haven't converted it 00263 // yet, we'll just do it lazily. 00264 if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr())) 00265 ConvertRecordDeclType(RD); 00266 00267 // If necessary, provide the full definition of a type only used with a 00268 // declaration so far. 00269 if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 00270 DI->completeType(RD); 00271 } 00272 00273 static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext, 00274 const llvm::fltSemantics &format, 00275 bool UseNativeHalf = false) { 00276 if (&format == &llvm::APFloat::IEEEhalf) { 00277 if (UseNativeHalf) 00278 return llvm::Type::getHalfTy(VMContext); 00279 else 00280 return llvm::Type::getInt16Ty(VMContext); 00281 } 00282 if (&format == &llvm::APFloat::IEEEsingle) 00283 return llvm::Type::getFloatTy(VMContext); 00284 if (&format == &llvm::APFloat::IEEEdouble) 00285 return llvm::Type::getDoubleTy(VMContext); 00286 if (&format == &llvm::APFloat::IEEEquad) 00287 return llvm::Type::getFP128Ty(VMContext); 00288 if (&format == &llvm::APFloat::PPCDoubleDouble) 00289 return llvm::Type::getPPC_FP128Ty(VMContext); 00290 if (&format == &llvm::APFloat::x87DoubleExtended) 00291 return llvm::Type::getX86_FP80Ty(VMContext); 00292 llvm_unreachable("Unknown float format!"); 00293 } 00294 00295 /// ConvertType - Convert the specified type to its LLVM form. 00296 llvm::Type *CodeGenTypes::ConvertType(QualType T) { 00297 T = Context.getCanonicalType(T); 00298 00299 const Type *Ty = T.getTypePtr(); 00300 00301 // RecordTypes are cached and processed specially. 00302 if (const RecordType *RT = dyn_cast<RecordType>(Ty)) 00303 return ConvertRecordDeclType(RT->getDecl()); 00304 00305 // See if type is already cached. 00306 llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty); 00307 // If type is found in map then use it. Otherwise, convert type T. 00308 if (TCI != TypeCache.end()) 00309 return TCI->second; 00310 00311 // If we don't have it in the cache, convert it now. 00312 llvm::Type *ResultType = nullptr; 00313 switch (Ty->getTypeClass()) { 00314 case Type::Record: // Handled above. 00315 #define TYPE(Class, Base) 00316 #define ABSTRACT_TYPE(Class, Base) 00317 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 00318 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 00319 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 00320 #include "clang/AST/TypeNodes.def" 00321 llvm_unreachable("Non-canonical or dependent types aren't possible."); 00322 00323 case Type::Builtin: { 00324 switch (cast<BuiltinType>(Ty)->getKind()) { 00325 case BuiltinType::Void: 00326 case BuiltinType::ObjCId: 00327 case BuiltinType::ObjCClass: 00328 case BuiltinType::ObjCSel: 00329 // LLVM void type can only be used as the result of a function call. Just 00330 // map to the same as char. 00331 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 00332 break; 00333 00334 case BuiltinType::Bool: 00335 // Note that we always return bool as i1 for use as a scalar type. 00336 ResultType = llvm::Type::getInt1Ty(getLLVMContext()); 00337 break; 00338 00339 case BuiltinType::Char_S: 00340 case BuiltinType::Char_U: 00341 case BuiltinType::SChar: 00342 case BuiltinType::UChar: 00343 case BuiltinType::Short: 00344 case BuiltinType::UShort: 00345 case BuiltinType::Int: 00346 case BuiltinType::UInt: 00347 case BuiltinType::Long: 00348 case BuiltinType::ULong: 00349 case BuiltinType::LongLong: 00350 case BuiltinType::ULongLong: 00351 case BuiltinType::WChar_S: 00352 case BuiltinType::WChar_U: 00353 case BuiltinType::Char16: 00354 case BuiltinType::Char32: 00355 ResultType = llvm::IntegerType::get(getLLVMContext(), 00356 static_cast<unsigned>(Context.getTypeSize(T))); 00357 break; 00358 00359 case BuiltinType::Half: 00360 // Half FP can either be storage-only (lowered to i16) or native. 00361 ResultType = 00362 getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T), 00363 Context.getLangOpts().NativeHalfType || 00364 Context.getLangOpts().HalfArgsAndReturns); 00365 break; 00366 case BuiltinType::Float: 00367 case BuiltinType::Double: 00368 case BuiltinType::LongDouble: 00369 ResultType = getTypeForFormat(getLLVMContext(), 00370 Context.getFloatTypeSemantics(T), 00371 /* UseNativeHalf = */ false); 00372 break; 00373 00374 case BuiltinType::NullPtr: 00375 // Model std::nullptr_t as i8* 00376 ResultType = llvm::Type::getInt8PtrTy(getLLVMContext()); 00377 break; 00378 00379 case BuiltinType::UInt128: 00380 case BuiltinType::Int128: 00381 ResultType = llvm::IntegerType::get(getLLVMContext(), 128); 00382 break; 00383 00384 case BuiltinType::OCLImage1d: 00385 case BuiltinType::OCLImage1dArray: 00386 case BuiltinType::OCLImage1dBuffer: 00387 case BuiltinType::OCLImage2d: 00388 case BuiltinType::OCLImage2dArray: 00389 case BuiltinType::OCLImage3d: 00390 case BuiltinType::OCLSampler: 00391 case BuiltinType::OCLEvent: 00392 ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty); 00393 break; 00394 00395 case BuiltinType::Dependent: 00396 #define BUILTIN_TYPE(Id, SingletonId) 00397 #define PLACEHOLDER_TYPE(Id, SingletonId) \ 00398 case BuiltinType::Id: 00399 #include "clang/AST/BuiltinTypes.def" 00400 llvm_unreachable("Unexpected placeholder builtin type!"); 00401 } 00402 break; 00403 } 00404 case Type::Auto: 00405 llvm_unreachable("Unexpected undeduced auto type!"); 00406 case Type::Complex: { 00407 llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType()); 00408 ResultType = llvm::StructType::get(EltTy, EltTy, NULL); 00409 break; 00410 } 00411 case Type::LValueReference: 00412 case Type::RValueReference: { 00413 const ReferenceType *RTy = cast<ReferenceType>(Ty); 00414 QualType ETy = RTy->getPointeeType(); 00415 llvm::Type *PointeeType = ConvertTypeForMem(ETy); 00416 unsigned AS = Context.getTargetAddressSpace(ETy); 00417 ResultType = llvm::PointerType::get(PointeeType, AS); 00418 break; 00419 } 00420 case Type::Pointer: { 00421 const PointerType *PTy = cast<PointerType>(Ty); 00422 QualType ETy = PTy->getPointeeType(); 00423 llvm::Type *PointeeType = ConvertTypeForMem(ETy); 00424 if (PointeeType->isVoidTy()) 00425 PointeeType = llvm::Type::getInt8Ty(getLLVMContext()); 00426 unsigned AS = Context.getTargetAddressSpace(ETy); 00427 ResultType = llvm::PointerType::get(PointeeType, AS); 00428 break; 00429 } 00430 00431 case Type::VariableArray: { 00432 const VariableArrayType *A = cast<VariableArrayType>(Ty); 00433 assert(A->getIndexTypeCVRQualifiers() == 0 && 00434 "FIXME: We only handle trivial array types so far!"); 00435 // VLAs resolve to the innermost element type; this matches 00436 // the return of alloca, and there isn't any obviously better choice. 00437 ResultType = ConvertTypeForMem(A->getElementType()); 00438 break; 00439 } 00440 case Type::IncompleteArray: { 00441 const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty); 00442 assert(A->getIndexTypeCVRQualifiers() == 0 && 00443 "FIXME: We only handle trivial array types so far!"); 00444 // int X[] -> [0 x int], unless the element type is not sized. If it is 00445 // unsized (e.g. an incomplete struct) just use [0 x i8]. 00446 ResultType = ConvertTypeForMem(A->getElementType()); 00447 if (!ResultType->isSized()) { 00448 SkippedLayout = true; 00449 ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 00450 } 00451 ResultType = llvm::ArrayType::get(ResultType, 0); 00452 break; 00453 } 00454 case Type::ConstantArray: { 00455 const ConstantArrayType *A = cast<ConstantArrayType>(Ty); 00456 llvm::Type *EltTy = ConvertTypeForMem(A->getElementType()); 00457 00458 // Lower arrays of undefined struct type to arrays of i8 just to have a 00459 // concrete type. 00460 if (!EltTy->isSized()) { 00461 SkippedLayout = true; 00462 EltTy = llvm::Type::getInt8Ty(getLLVMContext()); 00463 } 00464 00465 ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue()); 00466 break; 00467 } 00468 case Type::ExtVector: 00469 case Type::Vector: { 00470 const VectorType *VT = cast<VectorType>(Ty); 00471 ResultType = llvm::VectorType::get(ConvertType(VT->getElementType()), 00472 VT->getNumElements()); 00473 break; 00474 } 00475 case Type::FunctionNoProto: 00476 case Type::FunctionProto: { 00477 const FunctionType *FT = cast<FunctionType>(Ty); 00478 // First, check whether we can build the full function type. If the 00479 // function type depends on an incomplete type (e.g. a struct or enum), we 00480 // cannot lower the function type. 00481 if (!isFuncTypeConvertible(FT)) { 00482 // This function's type depends on an incomplete tag type. 00483 00484 // Force conversion of all the relevant record types, to make sure 00485 // we re-convert the FunctionType when appropriate. 00486 if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>()) 00487 ConvertRecordDeclType(RT->getDecl()); 00488 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 00489 for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) 00490 if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>()) 00491 ConvertRecordDeclType(RT->getDecl()); 00492 00493 // Return a placeholder type. 00494 ResultType = llvm::StructType::get(getLLVMContext()); 00495 00496 SkippedLayout = true; 00497 break; 00498 } 00499 00500 // While we're converting the parameter types for a function, we don't want 00501 // to recursively convert any pointed-to structs. Converting directly-used 00502 // structs is ok though. 00503 if (!RecordsBeingLaidOut.insert(Ty)) { 00504 ResultType = llvm::StructType::get(getLLVMContext()); 00505 00506 SkippedLayout = true; 00507 break; 00508 } 00509 00510 // The function type can be built; call the appropriate routines to 00511 // build it. 00512 const CGFunctionInfo *FI; 00513 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) { 00514 FI = &arrangeFreeFunctionType( 00515 CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0))); 00516 } else { 00517 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT); 00518 FI = &arrangeFreeFunctionType( 00519 CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0))); 00520 } 00521 00522 // If there is something higher level prodding our CGFunctionInfo, then 00523 // don't recurse into it again. 00524 if (FunctionsBeingProcessed.count(FI)) { 00525 00526 ResultType = llvm::StructType::get(getLLVMContext()); 00527 SkippedLayout = true; 00528 } else { 00529 00530 // Otherwise, we're good to go, go ahead and convert it. 00531 ResultType = GetFunctionType(*FI); 00532 } 00533 00534 RecordsBeingLaidOut.erase(Ty); 00535 00536 if (SkippedLayout) 00537 TypeCache.clear(); 00538 00539 if (RecordsBeingLaidOut.empty()) 00540 while (!DeferredRecords.empty()) 00541 ConvertRecordDeclType(DeferredRecords.pop_back_val()); 00542 break; 00543 } 00544 00545 case Type::ObjCObject: 00546 ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType()); 00547 break; 00548 00549 case Type::ObjCInterface: { 00550 // Objective-C interfaces are always opaque (outside of the 00551 // runtime, which can do whatever it likes); we never refine 00552 // these. 00553 llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)]; 00554 if (!T) 00555 T = llvm::StructType::create(getLLVMContext()); 00556 ResultType = T; 00557 break; 00558 } 00559 00560 case Type::ObjCObjectPointer: { 00561 // Protocol qualifications do not influence the LLVM type, we just return a 00562 // pointer to the underlying interface type. We don't need to worry about 00563 // recursive conversion. 00564 llvm::Type *T = 00565 ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); 00566 ResultType = T->getPointerTo(); 00567 break; 00568 } 00569 00570 case Type::Enum: { 00571 const EnumDecl *ED = cast<EnumType>(Ty)->getDecl(); 00572 if (ED->isCompleteDefinition() || ED->isFixed()) 00573 return ConvertType(ED->getIntegerType()); 00574 // Return a placeholder 'i32' type. This can be changed later when the 00575 // type is defined (see UpdateCompletedType), but is likely to be the 00576 // "right" answer. 00577 ResultType = llvm::Type::getInt32Ty(getLLVMContext()); 00578 break; 00579 } 00580 00581 case Type::BlockPointer: { 00582 const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType(); 00583 llvm::Type *PointeeType = ConvertTypeForMem(FTy); 00584 unsigned AS = Context.getTargetAddressSpace(FTy); 00585 ResultType = llvm::PointerType::get(PointeeType, AS); 00586 break; 00587 } 00588 00589 case Type::MemberPointer: { 00590 if (!getCXXABI().isMemberPointerConvertible(cast<MemberPointerType>(Ty))) 00591 return llvm::StructType::create(getLLVMContext()); 00592 ResultType = 00593 getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(Ty)); 00594 break; 00595 } 00596 00597 case Type::Atomic: { 00598 QualType valueType = cast<AtomicType>(Ty)->getValueType(); 00599 ResultType = ConvertTypeForMem(valueType); 00600 00601 // Pad out to the inflated size if necessary. 00602 uint64_t valueSize = Context.getTypeSize(valueType); 00603 uint64_t atomicSize = Context.getTypeSize(Ty); 00604 if (valueSize != atomicSize) { 00605 assert(valueSize < atomicSize); 00606 llvm::Type *elts[] = { 00607 ResultType, 00608 llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8) 00609 }; 00610 ResultType = llvm::StructType::get(getLLVMContext(), 00611 llvm::makeArrayRef(elts)); 00612 } 00613 break; 00614 } 00615 } 00616 00617 assert(ResultType && "Didn't convert a type?"); 00618 00619 TypeCache[Ty] = ResultType; 00620 return ResultType; 00621 } 00622 00623 bool CodeGenModule::isPaddedAtomicType(QualType type) { 00624 return isPaddedAtomicType(type->castAs<AtomicType>()); 00625 } 00626 00627 bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) { 00628 return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType()); 00629 } 00630 00631 /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. 00632 llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) { 00633 // TagDecl's are not necessarily unique, instead use the (clang) 00634 // type connected to the decl. 00635 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 00636 00637 llvm::StructType *&Entry = RecordDeclTypes[Key]; 00638 00639 // If we don't have a StructType at all yet, create the forward declaration. 00640 if (!Entry) { 00641 Entry = llvm::StructType::create(getLLVMContext()); 00642 addRecordTypeName(RD, Entry, ""); 00643 } 00644 llvm::StructType *Ty = Entry; 00645 00646 // If this is still a forward declaration, or the LLVM type is already 00647 // complete, there's nothing more to do. 00648 RD = RD->getDefinition(); 00649 if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque()) 00650 return Ty; 00651 00652 // If converting this type would cause us to infinitely loop, don't do it! 00653 if (!isSafeToConvert(RD, *this)) { 00654 DeferredRecords.push_back(RD); 00655 return Ty; 00656 } 00657 00658 // Okay, this is a definition of a type. Compile the implementation now. 00659 bool InsertResult = RecordsBeingLaidOut.insert(Key); (void)InsertResult; 00660 assert(InsertResult && "Recursively compiling a struct?"); 00661 00662 // Force conversion of non-virtual base classes recursively. 00663 if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 00664 for (const auto &I : CRD->bases()) { 00665 if (I.isVirtual()) continue; 00666 00667 ConvertRecordDeclType(I.getType()->getAs<RecordType>()->getDecl()); 00668 } 00669 } 00670 00671 // Layout fields. 00672 CGRecordLayout *Layout = ComputeRecordLayout(RD, Ty); 00673 CGRecordLayouts[Key] = Layout; 00674 00675 // We're done laying out this struct. 00676 bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult; 00677 assert(EraseResult && "struct not in RecordsBeingLaidOut set?"); 00678 00679 // If this struct blocked a FunctionType conversion, then recompute whatever 00680 // was derived from that. 00681 // FIXME: This is hugely overconservative. 00682 if (SkippedLayout) 00683 TypeCache.clear(); 00684 00685 // If we're done converting the outer-most record, then convert any deferred 00686 // structs as well. 00687 if (RecordsBeingLaidOut.empty()) 00688 while (!DeferredRecords.empty()) 00689 ConvertRecordDeclType(DeferredRecords.pop_back_val()); 00690 00691 return Ty; 00692 } 00693 00694 /// getCGRecordLayout - Return record layout info for the given record decl. 00695 const CGRecordLayout & 00696 CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { 00697 const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 00698 00699 const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key); 00700 if (!Layout) { 00701 // Compute the type information. 00702 ConvertRecordDeclType(RD); 00703 00704 // Now try again. 00705 Layout = CGRecordLayouts.lookup(Key); 00706 } 00707 00708 assert(Layout && "Unable to find record layout information for type"); 00709 return *Layout; 00710 } 00711 00712 bool CodeGenTypes::isZeroInitializable(QualType T) { 00713 // No need to check for member pointers when not compiling C++. 00714 if (!Context.getLangOpts().CPlusPlus) 00715 return true; 00716 00717 T = Context.getBaseElementType(T); 00718 00719 // Records are non-zero-initializable if they contain any 00720 // non-zero-initializable subobjects. 00721 if (const RecordType *RT = T->getAs<RecordType>()) { 00722 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 00723 return isZeroInitializable(RD); 00724 } 00725 00726 // We have to ask the ABI about member pointers. 00727 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) 00728 return getCXXABI().isZeroInitializable(MPT); 00729 00730 // Everything else is okay. 00731 return true; 00732 } 00733 00734 bool CodeGenTypes::isZeroInitializable(const CXXRecordDecl *RD) { 00735 return getCGRecordLayout(RD).isZeroInitializable(); 00736 }