clang API Documentation
00001 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // These classes wrap the information about a call or function 00011 // definition used to handle ABI compliancy. 00012 // 00013 //===----------------------------------------------------------------------===// 00014 00015 #include "CGCall.h" 00016 #include "ABIInfo.h" 00017 #include "CGCXXABI.h" 00018 #include "CodeGenFunction.h" 00019 #include "CodeGenModule.h" 00020 #include "TargetInfo.h" 00021 #include "clang/AST/Decl.h" 00022 #include "clang/AST/DeclCXX.h" 00023 #include "clang/AST/DeclObjC.h" 00024 #include "clang/Basic/TargetInfo.h" 00025 #include "clang/CodeGen/CGFunctionInfo.h" 00026 #include "clang/Frontend/CodeGenOptions.h" 00027 #include "llvm/ADT/StringExtras.h" 00028 #include "llvm/IR/Attributes.h" 00029 #include "llvm/IR/CallSite.h" 00030 #include "llvm/IR/DataLayout.h" 00031 #include "llvm/IR/InlineAsm.h" 00032 #include "llvm/IR/Intrinsics.h" 00033 #include "llvm/Transforms/Utils/Local.h" 00034 using namespace clang; 00035 using namespace CodeGen; 00036 00037 /***/ 00038 00039 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 00040 switch (CC) { 00041 default: return llvm::CallingConv::C; 00042 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 00043 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 00044 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 00045 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; 00046 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 00047 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 00048 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 00049 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 00050 // TODO: Add support for __pascal to LLVM. 00051 case CC_X86Pascal: return llvm::CallingConv::C; 00052 // TODO: Add support for __vectorcall to LLVM. 00053 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 00054 } 00055 } 00056 00057 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 00058 /// qualification. 00059 /// FIXME: address space qualification? 00060 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 00061 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 00062 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 00063 } 00064 00065 /// Returns the canonical formal type of the given C++ method. 00066 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 00067 return MD->getType()->getCanonicalTypeUnqualified() 00068 .getAs<FunctionProtoType>(); 00069 } 00070 00071 /// Returns the "extra-canonicalized" return type, which discards 00072 /// qualifiers on the return type. Codegen doesn't care about them, 00073 /// and it makes ABI code a little easier to be able to assume that 00074 /// all parameter and return types are top-level unqualified. 00075 static CanQualType GetReturnType(QualType RetTy) { 00076 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 00077 } 00078 00079 /// Arrange the argument and result information for a value of the given 00080 /// unprototyped freestanding function type. 00081 const CGFunctionInfo & 00082 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 00083 // When translating an unprototyped function type, always use a 00084 // variadic type. 00085 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 00086 false, None, FTNP->getExtInfo(), 00087 RequiredArgs(0)); 00088 } 00089 00090 /// Arrange the LLVM function layout for a value of the given function 00091 /// type, on top of any implicit parameters already stored. 00092 static const CGFunctionInfo & 00093 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool IsInstanceMethod, 00094 SmallVectorImpl<CanQualType> &prefix, 00095 CanQual<FunctionProtoType> FTP) { 00096 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 00097 // FIXME: Kill copy. 00098 for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i) 00099 prefix.push_back(FTP->getParamType(i)); 00100 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 00101 return CGT.arrangeLLVMFunctionInfo(resultType, IsInstanceMethod, prefix, 00102 FTP->getExtInfo(), required); 00103 } 00104 00105 /// Arrange the argument and result information for a value of the 00106 /// given freestanding function type. 00107 const CGFunctionInfo & 00108 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 00109 SmallVector<CanQualType, 16> argTypes; 00110 return ::arrangeLLVMFunctionInfo(*this, false, argTypes, FTP); 00111 } 00112 00113 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 00114 // Set the appropriate calling convention for the Function. 00115 if (D->hasAttr<StdCallAttr>()) 00116 return CC_X86StdCall; 00117 00118 if (D->hasAttr<FastCallAttr>()) 00119 return CC_X86FastCall; 00120 00121 if (D->hasAttr<ThisCallAttr>()) 00122 return CC_X86ThisCall; 00123 00124 if (D->hasAttr<VectorCallAttr>()) 00125 return CC_X86VectorCall; 00126 00127 if (D->hasAttr<PascalAttr>()) 00128 return CC_X86Pascal; 00129 00130 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 00131 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 00132 00133 if (D->hasAttr<PnaclCallAttr>()) 00134 return CC_PnaclCall; 00135 00136 if (D->hasAttr<IntelOclBiccAttr>()) 00137 return CC_IntelOclBicc; 00138 00139 if (D->hasAttr<MSABIAttr>()) 00140 return IsWindows ? CC_C : CC_X86_64Win64; 00141 00142 if (D->hasAttr<SysVABIAttr>()) 00143 return IsWindows ? CC_X86_64SysV : CC_C; 00144 00145 return CC_C; 00146 } 00147 00148 /// Arrange the argument and result information for a call to an 00149 /// unknown C++ non-static member function of the given abstract type. 00150 /// (Zero value of RD means we don't have any meaningful "this" argument type, 00151 /// so fall back to a generic pointer type). 00152 /// The member function must be an ordinary function, i.e. not a 00153 /// constructor or destructor. 00154 const CGFunctionInfo & 00155 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 00156 const FunctionProtoType *FTP) { 00157 SmallVector<CanQualType, 16> argTypes; 00158 00159 // Add the 'this' pointer. 00160 if (RD) 00161 argTypes.push_back(GetThisType(Context, RD)); 00162 else 00163 argTypes.push_back(Context.VoidPtrTy); 00164 00165 return ::arrangeLLVMFunctionInfo( 00166 *this, true, argTypes, 00167 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 00168 } 00169 00170 /// Arrange the argument and result information for a declaration or 00171 /// definition of the given C++ non-static member function. The 00172 /// member function must be an ordinary function, i.e. not a 00173 /// constructor or destructor. 00174 const CGFunctionInfo & 00175 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 00176 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 00177 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 00178 00179 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 00180 00181 if (MD->isInstance()) { 00182 // The abstract case is perfectly fine. 00183 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 00184 return arrangeCXXMethodType(ThisType, prototype.getTypePtr()); 00185 } 00186 00187 return arrangeFreeFunctionType(prototype); 00188 } 00189 00190 const CGFunctionInfo & 00191 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, 00192 StructorType Type) { 00193 00194 SmallVector<CanQualType, 16> argTypes; 00195 argTypes.push_back(GetThisType(Context, MD->getParent())); 00196 00197 GlobalDecl GD; 00198 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 00199 GD = GlobalDecl(CD, toCXXCtorType(Type)); 00200 } else { 00201 auto *DD = dyn_cast<CXXDestructorDecl>(MD); 00202 GD = GlobalDecl(DD, toCXXDtorType(Type)); 00203 } 00204 00205 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 00206 00207 // Add the formal parameters. 00208 for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i) 00209 argTypes.push_back(FTP->getParamType(i)); 00210 00211 TheCXXABI.buildStructorSignature(MD, Type, argTypes); 00212 00213 RequiredArgs required = 00214 (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All); 00215 00216 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 00217 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 00218 ? argTypes.front() 00219 : TheCXXABI.hasMostDerivedReturn(GD) 00220 ? CGM.getContext().VoidPtrTy 00221 : Context.VoidTy; 00222 return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo, required); 00223 } 00224 00225 /// Arrange a call to a C++ method, passing the given arguments. 00226 const CGFunctionInfo & 00227 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 00228 const CXXConstructorDecl *D, 00229 CXXCtorType CtorKind, 00230 unsigned ExtraArgs) { 00231 // FIXME: Kill copy. 00232 SmallVector<CanQualType, 16> ArgTypes; 00233 for (const auto &Arg : args) 00234 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 00235 00236 CanQual<FunctionProtoType> FPT = GetFormalType(D); 00237 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs); 00238 GlobalDecl GD(D, CtorKind); 00239 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 00240 ? ArgTypes.front() 00241 : TheCXXABI.hasMostDerivedReturn(GD) 00242 ? CGM.getContext().VoidPtrTy 00243 : Context.VoidTy; 00244 00245 FunctionType::ExtInfo Info = FPT->getExtInfo(); 00246 return arrangeLLVMFunctionInfo(ResultType, true, ArgTypes, Info, Required); 00247 } 00248 00249 /// Arrange the argument and result information for the declaration or 00250 /// definition of the given function. 00251 const CGFunctionInfo & 00252 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 00253 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 00254 if (MD->isInstance()) 00255 return arrangeCXXMethodDeclaration(MD); 00256 00257 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 00258 00259 assert(isa<FunctionType>(FTy)); 00260 00261 // When declaring a function without a prototype, always use a 00262 // non-variadic type. 00263 if (isa<FunctionNoProtoType>(FTy)) { 00264 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 00265 return arrangeLLVMFunctionInfo(noProto->getReturnType(), false, None, 00266 noProto->getExtInfo(), RequiredArgs::All); 00267 } 00268 00269 assert(isa<FunctionProtoType>(FTy)); 00270 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>()); 00271 } 00272 00273 /// Arrange the argument and result information for the declaration or 00274 /// definition of an Objective-C method. 00275 const CGFunctionInfo & 00276 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 00277 // It happens that this is the same as a call with no optional 00278 // arguments, except also using the formal 'self' type. 00279 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 00280 } 00281 00282 /// Arrange the argument and result information for the function type 00283 /// through which to perform a send to the given Objective-C method, 00284 /// using the given receiver type. The receiver type is not always 00285 /// the 'self' type of the method or even an Objective-C pointer type. 00286 /// This is *not* the right method for actually performing such a 00287 /// message send, due to the possibility of optional arguments. 00288 const CGFunctionInfo & 00289 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 00290 QualType receiverType) { 00291 SmallVector<CanQualType, 16> argTys; 00292 argTys.push_back(Context.getCanonicalParamType(receiverType)); 00293 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 00294 // FIXME: Kill copy? 00295 for (const auto *I : MD->params()) { 00296 argTys.push_back(Context.getCanonicalParamType(I->getType())); 00297 } 00298 00299 FunctionType::ExtInfo einfo; 00300 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 00301 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 00302 00303 if (getContext().getLangOpts().ObjCAutoRefCount && 00304 MD->hasAttr<NSReturnsRetainedAttr>()) 00305 einfo = einfo.withProducesResult(true); 00306 00307 RequiredArgs required = 00308 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 00309 00310 return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()), false, 00311 argTys, einfo, required); 00312 } 00313 00314 const CGFunctionInfo & 00315 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 00316 // FIXME: Do we need to handle ObjCMethodDecl? 00317 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 00318 00319 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 00320 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType())); 00321 00322 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 00323 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType())); 00324 00325 return arrangeFunctionDeclaration(FD); 00326 } 00327 00328 /// Arrange a thunk that takes 'this' as the first parameter followed by 00329 /// varargs. Return a void pointer, regardless of the actual return type. 00330 /// The body of the thunk will end in a musttail call to a function of the 00331 /// correct type, and the caller will bitcast the function to the correct 00332 /// prototype. 00333 const CGFunctionInfo & 00334 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) { 00335 assert(MD->isVirtual() && "only virtual memptrs have thunks"); 00336 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 00337 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) }; 00338 return arrangeLLVMFunctionInfo(Context.VoidTy, false, ArgTys, 00339 FTP->getExtInfo(), RequiredArgs(1)); 00340 } 00341 00342 /// Arrange a call as unto a free function, except possibly with an 00343 /// additional number of formal parameters considered required. 00344 static const CGFunctionInfo & 00345 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 00346 CodeGenModule &CGM, 00347 const CallArgList &args, 00348 const FunctionType *fnType, 00349 unsigned numExtraRequiredArgs) { 00350 assert(args.size() >= numExtraRequiredArgs); 00351 00352 // In most cases, there are no optional arguments. 00353 RequiredArgs required = RequiredArgs::All; 00354 00355 // If we have a variadic prototype, the required arguments are the 00356 // extra prefix plus the arguments in the prototype. 00357 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 00358 if (proto->isVariadic()) 00359 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 00360 00361 // If we don't have a prototype at all, but we're supposed to 00362 // explicitly use the variadic convention for unprototyped calls, 00363 // treat all of the arguments as required but preserve the nominal 00364 // possibility of variadics. 00365 } else if (CGM.getTargetCodeGenInfo() 00366 .isNoProtoCallVariadic(args, 00367 cast<FunctionNoProtoType>(fnType))) { 00368 required = RequiredArgs(args.size()); 00369 } 00370 00371 return CGT.arrangeFreeFunctionCall(fnType->getReturnType(), args, 00372 fnType->getExtInfo(), required); 00373 } 00374 00375 /// Figure out the rules for calling a function with the given formal 00376 /// type using the given arguments. The arguments are necessary 00377 /// because the function might be unprototyped, in which case it's 00378 /// target-dependent in crazy ways. 00379 const CGFunctionInfo & 00380 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 00381 const FunctionType *fnType) { 00382 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 0); 00383 } 00384 00385 /// A block function call is essentially a free-function call with an 00386 /// extra implicit argument. 00387 const CGFunctionInfo & 00388 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 00389 const FunctionType *fnType) { 00390 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1); 00391 } 00392 00393 const CGFunctionInfo & 00394 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, 00395 const CallArgList &args, 00396 FunctionType::ExtInfo info, 00397 RequiredArgs required) { 00398 // FIXME: Kill copy. 00399 SmallVector<CanQualType, 16> argTypes; 00400 for (const auto &Arg : args) 00401 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 00402 return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, 00403 info, required); 00404 } 00405 00406 /// Arrange a call to a C++ method, passing the given arguments. 00407 const CGFunctionInfo & 00408 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 00409 const FunctionProtoType *FPT, 00410 RequiredArgs required) { 00411 // FIXME: Kill copy. 00412 SmallVector<CanQualType, 16> argTypes; 00413 for (const auto &Arg : args) 00414 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 00415 00416 FunctionType::ExtInfo info = FPT->getExtInfo(); 00417 return arrangeLLVMFunctionInfo(GetReturnType(FPT->getReturnType()), true, 00418 argTypes, info, required); 00419 } 00420 00421 const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration( 00422 QualType resultType, const FunctionArgList &args, 00423 const FunctionType::ExtInfo &info, bool isVariadic) { 00424 // FIXME: Kill copy. 00425 SmallVector<CanQualType, 16> argTypes; 00426 for (auto Arg : args) 00427 argTypes.push_back(Context.getCanonicalParamType(Arg->getType())); 00428 00429 RequiredArgs required = 00430 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 00431 return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, info, 00432 required); 00433 } 00434 00435 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 00436 return arrangeLLVMFunctionInfo(getContext().VoidTy, false, None, 00437 FunctionType::ExtInfo(), RequiredArgs::All); 00438 } 00439 00440 /// Arrange the argument and result information for an abstract value 00441 /// of a given function type. This is the method which all of the 00442 /// above functions ultimately defer to. 00443 const CGFunctionInfo & 00444 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 00445 bool IsInstanceMethod, 00446 ArrayRef<CanQualType> argTypes, 00447 FunctionType::ExtInfo info, 00448 RequiredArgs required) { 00449 #ifndef NDEBUG 00450 for (ArrayRef<CanQualType>::const_iterator 00451 I = argTypes.begin(), E = argTypes.end(); I != E; ++I) 00452 assert(I->isCanonicalAsParam()); 00453 #endif 00454 00455 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 00456 00457 // Lookup or create unique function info. 00458 llvm::FoldingSetNodeID ID; 00459 CGFunctionInfo::Profile(ID, IsInstanceMethod, info, required, resultType, 00460 argTypes); 00461 00462 void *insertPos = nullptr; 00463 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 00464 if (FI) 00465 return *FI; 00466 00467 // Construct the function info. We co-allocate the ArgInfos. 00468 FI = CGFunctionInfo::create(CC, IsInstanceMethod, info, resultType, argTypes, 00469 required); 00470 FunctionInfos.InsertNode(FI, insertPos); 00471 00472 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted; 00473 assert(inserted && "Recursively being processed?"); 00474 00475 // Compute ABI information. 00476 getABIInfo().computeInfo(*FI); 00477 00478 // Loop over all of the computed argument and return value info. If any of 00479 // them are direct or extend without a specified coerce type, specify the 00480 // default now. 00481 ABIArgInfo &retInfo = FI->getReturnInfo(); 00482 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 00483 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 00484 00485 for (auto &I : FI->arguments()) 00486 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 00487 I.info.setCoerceToType(ConvertType(I.type)); 00488 00489 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 00490 assert(erased && "Not in set?"); 00491 00492 return *FI; 00493 } 00494 00495 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 00496 bool IsInstanceMethod, 00497 const FunctionType::ExtInfo &info, 00498 CanQualType resultType, 00499 ArrayRef<CanQualType> argTypes, 00500 RequiredArgs required) { 00501 void *buffer = operator new(sizeof(CGFunctionInfo) + 00502 sizeof(ArgInfo) * (argTypes.size() + 1)); 00503 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 00504 FI->CallingConvention = llvmCC; 00505 FI->EffectiveCallingConvention = llvmCC; 00506 FI->ASTCallingConvention = info.getCC(); 00507 FI->InstanceMethod = IsInstanceMethod; 00508 FI->NoReturn = info.getNoReturn(); 00509 FI->ReturnsRetained = info.getProducesResult(); 00510 FI->Required = required; 00511 FI->HasRegParm = info.getHasRegParm(); 00512 FI->RegParm = info.getRegParm(); 00513 FI->ArgStruct = nullptr; 00514 FI->NumArgs = argTypes.size(); 00515 FI->getArgsBuffer()[0].type = resultType; 00516 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 00517 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 00518 return FI; 00519 } 00520 00521 /***/ 00522 00523 namespace { 00524 // ABIArgInfo::Expand implementation. 00525 00526 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 00527 struct TypeExpansion { 00528 enum TypeExpansionKind { 00529 // Elements of constant arrays are expanded recursively. 00530 TEK_ConstantArray, 00531 // Record fields are expanded recursively (but if record is a union, only 00532 // the field with the largest size is expanded). 00533 TEK_Record, 00534 // For complex types, real and imaginary parts are expanded recursively. 00535 TEK_Complex, 00536 // All other types are not expandable. 00537 TEK_None 00538 }; 00539 00540 const TypeExpansionKind Kind; 00541 00542 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 00543 virtual ~TypeExpansion() {} 00544 }; 00545 00546 struct ConstantArrayExpansion : TypeExpansion { 00547 QualType EltTy; 00548 uint64_t NumElts; 00549 00550 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 00551 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 00552 static bool classof(const TypeExpansion *TE) { 00553 return TE->Kind == TEK_ConstantArray; 00554 } 00555 }; 00556 00557 struct RecordExpansion : TypeExpansion { 00558 SmallVector<const CXXBaseSpecifier *, 1> Bases; 00559 00560 SmallVector<const FieldDecl *, 1> Fields; 00561 00562 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 00563 SmallVector<const FieldDecl *, 1> &&Fields) 00564 : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {} 00565 static bool classof(const TypeExpansion *TE) { 00566 return TE->Kind == TEK_Record; 00567 } 00568 }; 00569 00570 struct ComplexExpansion : TypeExpansion { 00571 QualType EltTy; 00572 00573 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 00574 static bool classof(const TypeExpansion *TE) { 00575 return TE->Kind == TEK_Complex; 00576 } 00577 }; 00578 00579 struct NoExpansion : TypeExpansion { 00580 NoExpansion() : TypeExpansion(TEK_None) {} 00581 static bool classof(const TypeExpansion *TE) { 00582 return TE->Kind == TEK_None; 00583 } 00584 }; 00585 } // namespace 00586 00587 static std::unique_ptr<TypeExpansion> 00588 getTypeExpansion(QualType Ty, const ASTContext &Context) { 00589 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 00590 return llvm::make_unique<ConstantArrayExpansion>( 00591 AT->getElementType(), AT->getSize().getZExtValue()); 00592 } 00593 if (const RecordType *RT = Ty->getAs<RecordType>()) { 00594 SmallVector<const CXXBaseSpecifier *, 1> Bases; 00595 SmallVector<const FieldDecl *, 1> Fields; 00596 const RecordDecl *RD = RT->getDecl(); 00597 assert(!RD->hasFlexibleArrayMember() && 00598 "Cannot expand structure with flexible array."); 00599 if (RD->isUnion()) { 00600 // Unions can be here only in degenerative cases - all the fields are same 00601 // after flattening. Thus we have to use the "largest" field. 00602 const FieldDecl *LargestFD = nullptr; 00603 CharUnits UnionSize = CharUnits::Zero(); 00604 00605 for (const auto *FD : RD->fields()) { 00606 // Skip zero length bitfields. 00607 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 00608 continue; 00609 assert(!FD->isBitField() && 00610 "Cannot expand structure with bit-field members."); 00611 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 00612 if (UnionSize < FieldSize) { 00613 UnionSize = FieldSize; 00614 LargestFD = FD; 00615 } 00616 } 00617 if (LargestFD) 00618 Fields.push_back(LargestFD); 00619 } else { 00620 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 00621 assert(!CXXRD->isDynamicClass() && 00622 "cannot expand vtable pointers in dynamic classes"); 00623 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 00624 Bases.push_back(&BS); 00625 } 00626 00627 for (const auto *FD : RD->fields()) { 00628 // Skip zero length bitfields. 00629 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 00630 continue; 00631 assert(!FD->isBitField() && 00632 "Cannot expand structure with bit-field members."); 00633 Fields.push_back(FD); 00634 } 00635 } 00636 return llvm::make_unique<RecordExpansion>(std::move(Bases), 00637 std::move(Fields)); 00638 } 00639 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 00640 return llvm::make_unique<ComplexExpansion>(CT->getElementType()); 00641 } 00642 return llvm::make_unique<NoExpansion>(); 00643 } 00644 00645 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 00646 auto Exp = getTypeExpansion(Ty, Context); 00647 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 00648 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 00649 } 00650 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 00651 int Res = 0; 00652 for (auto BS : RExp->Bases) 00653 Res += getExpansionSize(BS->getType(), Context); 00654 for (auto FD : RExp->Fields) 00655 Res += getExpansionSize(FD->getType(), Context); 00656 return Res; 00657 } 00658 if (isa<ComplexExpansion>(Exp.get())) 00659 return 2; 00660 assert(isa<NoExpansion>(Exp.get())); 00661 return 1; 00662 } 00663 00664 void 00665 CodeGenTypes::getExpandedTypes(QualType Ty, 00666 SmallVectorImpl<llvm::Type *>::iterator &TI) { 00667 auto Exp = getTypeExpansion(Ty, Context); 00668 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 00669 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 00670 getExpandedTypes(CAExp->EltTy, TI); 00671 } 00672 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 00673 for (auto BS : RExp->Bases) 00674 getExpandedTypes(BS->getType(), TI); 00675 for (auto FD : RExp->Fields) 00676 getExpandedTypes(FD->getType(), TI); 00677 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 00678 llvm::Type *EltTy = ConvertType(CExp->EltTy); 00679 *TI++ = EltTy; 00680 *TI++ = EltTy; 00681 } else { 00682 assert(isa<NoExpansion>(Exp.get())); 00683 *TI++ = ConvertType(Ty); 00684 } 00685 } 00686 00687 void CodeGenFunction::ExpandTypeFromArgs( 00688 QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) { 00689 assert(LV.isSimple() && 00690 "Unexpected non-simple lvalue during struct expansion."); 00691 00692 auto Exp = getTypeExpansion(Ty, getContext()); 00693 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 00694 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 00695 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, i); 00696 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 00697 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 00698 } 00699 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 00700 llvm::Value *This = LV.getAddress(); 00701 for (const CXXBaseSpecifier *BS : RExp->Bases) { 00702 // Perform a single step derived-to-base conversion. 00703 llvm::Value *Base = 00704 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 00705 /*NullCheckValue=*/false, SourceLocation()); 00706 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 00707 00708 // Recurse onto bases. 00709 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 00710 } 00711 for (auto FD : RExp->Fields) { 00712 // FIXME: What are the right qualifiers here? 00713 LValue SubLV = EmitLValueForField(LV, FD); 00714 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 00715 } 00716 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 00717 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real"); 00718 EmitStoreThroughLValue(RValue::get(*AI++), 00719 MakeAddrLValue(RealAddr, CExp->EltTy)); 00720 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag"); 00721 EmitStoreThroughLValue(RValue::get(*AI++), 00722 MakeAddrLValue(ImagAddr, CExp->EltTy)); 00723 } else { 00724 assert(isa<NoExpansion>(Exp.get())); 00725 EmitStoreThroughLValue(RValue::get(*AI++), LV); 00726 } 00727 } 00728 00729 void CodeGenFunction::ExpandTypeToArgs( 00730 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy, 00731 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 00732 auto Exp = getTypeExpansion(Ty, getContext()); 00733 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 00734 llvm::Value *Addr = RV.getAggregateAddr(); 00735 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 00736 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, i); 00737 RValue EltRV = 00738 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()); 00739 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos); 00740 } 00741 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 00742 llvm::Value *This = RV.getAggregateAddr(); 00743 for (const CXXBaseSpecifier *BS : RExp->Bases) { 00744 // Perform a single step derived-to-base conversion. 00745 llvm::Value *Base = 00746 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 00747 /*NullCheckValue=*/false, SourceLocation()); 00748 RValue BaseRV = RValue::getAggregate(Base); 00749 00750 // Recurse onto bases. 00751 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs, 00752 IRCallArgPos); 00753 } 00754 00755 LValue LV = MakeAddrLValue(This, Ty); 00756 for (auto FD : RExp->Fields) { 00757 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); 00758 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs, 00759 IRCallArgPos); 00760 } 00761 } else if (isa<ComplexExpansion>(Exp.get())) { 00762 ComplexPairTy CV = RV.getComplexVal(); 00763 IRCallArgs[IRCallArgPos++] = CV.first; 00764 IRCallArgs[IRCallArgPos++] = CV.second; 00765 } else { 00766 assert(isa<NoExpansion>(Exp.get())); 00767 assert(RV.isScalar() && 00768 "Unexpected non-scalar rvalue during struct expansion."); 00769 00770 // Insert a bitcast as needed. 00771 llvm::Value *V = RV.getScalarVal(); 00772 if (IRCallArgPos < IRFuncTy->getNumParams() && 00773 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 00774 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 00775 00776 IRCallArgs[IRCallArgPos++] = V; 00777 } 00778 } 00779 00780 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 00781 /// accessing some number of bytes out of it, try to gep into the struct to get 00782 /// at its inner goodness. Dive as deep as possible without entering an element 00783 /// with an in-memory size smaller than DstSize. 00784 static llvm::Value * 00785 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 00786 llvm::StructType *SrcSTy, 00787 uint64_t DstSize, CodeGenFunction &CGF) { 00788 // We can't dive into a zero-element struct. 00789 if (SrcSTy->getNumElements() == 0) return SrcPtr; 00790 00791 llvm::Type *FirstElt = SrcSTy->getElementType(0); 00792 00793 // If the first elt is at least as large as what we're looking for, or if the 00794 // first element is the same size as the whole struct, we can enter it. The 00795 // comparison must be made on the store size and not the alloca size. Using 00796 // the alloca size may overstate the size of the load. 00797 uint64_t FirstEltSize = 00798 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 00799 if (FirstEltSize < DstSize && 00800 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 00801 return SrcPtr; 00802 00803 // GEP into the first element. 00804 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 00805 00806 // If the first element is a struct, recurse. 00807 llvm::Type *SrcTy = 00808 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 00809 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 00810 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 00811 00812 return SrcPtr; 00813 } 00814 00815 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 00816 /// are either integers or pointers. This does a truncation of the value if it 00817 /// is too large or a zero extension if it is too small. 00818 /// 00819 /// This behaves as if the value were coerced through memory, so on big-endian 00820 /// targets the high bits are preserved in a truncation, while little-endian 00821 /// targets preserve the low bits. 00822 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 00823 llvm::Type *Ty, 00824 CodeGenFunction &CGF) { 00825 if (Val->getType() == Ty) 00826 return Val; 00827 00828 if (isa<llvm::PointerType>(Val->getType())) { 00829 // If this is Pointer->Pointer avoid conversion to and from int. 00830 if (isa<llvm::PointerType>(Ty)) 00831 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 00832 00833 // Convert the pointer to an integer so we can play with its width. 00834 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 00835 } 00836 00837 llvm::Type *DestIntTy = Ty; 00838 if (isa<llvm::PointerType>(DestIntTy)) 00839 DestIntTy = CGF.IntPtrTy; 00840 00841 if (Val->getType() != DestIntTy) { 00842 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 00843 if (DL.isBigEndian()) { 00844 // Preserve the high bits on big-endian targets. 00845 // That is what memory coercion does. 00846 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 00847 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 00848 00849 if (SrcSize > DstSize) { 00850 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 00851 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 00852 } else { 00853 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 00854 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 00855 } 00856 } else { 00857 // Little-endian targets preserve the low bits. No shifts required. 00858 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 00859 } 00860 } 00861 00862 if (isa<llvm::PointerType>(Ty)) 00863 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 00864 return Val; 00865 } 00866 00867 00868 00869 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 00870 /// a pointer to an object of type \arg Ty. 00871 /// 00872 /// This safely handles the case when the src type is smaller than the 00873 /// destination type; in this situation the values of bits which not 00874 /// present in the src are undefined. 00875 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 00876 llvm::Type *Ty, 00877 CodeGenFunction &CGF) { 00878 llvm::Type *SrcTy = 00879 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 00880 00881 // If SrcTy and Ty are the same, just do a load. 00882 if (SrcTy == Ty) 00883 return CGF.Builder.CreateLoad(SrcPtr); 00884 00885 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 00886 00887 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 00888 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 00889 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 00890 } 00891 00892 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 00893 00894 // If the source and destination are integer or pointer types, just do an 00895 // extension or truncation to the desired type. 00896 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 00897 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 00898 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 00899 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 00900 } 00901 00902 // If load is legal, just bitcast the src pointer. 00903 if (SrcSize >= DstSize) { 00904 // Generally SrcSize is never greater than DstSize, since this means we are 00905 // losing bits. However, this can happen in cases where the structure has 00906 // additional padding, for example due to a user specified alignment. 00907 // 00908 // FIXME: Assert that we aren't truncating non-padding bits when have access 00909 // to that information. 00910 llvm::Value *Casted = 00911 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 00912 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 00913 // FIXME: Use better alignment / avoid requiring aligned load. 00914 Load->setAlignment(1); 00915 return Load; 00916 } 00917 00918 // Otherwise do coercion through memory. This is stupid, but 00919 // simple. 00920 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 00921 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 00922 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 00923 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy); 00924 // FIXME: Use better alignment. 00925 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 00926 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 00927 1, false); 00928 return CGF.Builder.CreateLoad(Tmp); 00929 } 00930 00931 // Function to store a first-class aggregate into memory. We prefer to 00932 // store the elements rather than the aggregate to be more friendly to 00933 // fast-isel. 00934 // FIXME: Do we need to recurse here? 00935 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 00936 llvm::Value *DestPtr, bool DestIsVolatile, 00937 bool LowAlignment) { 00938 // Prefer scalar stores to first-class aggregate stores. 00939 if (llvm::StructType *STy = 00940 dyn_cast<llvm::StructType>(Val->getType())) { 00941 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 00942 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i); 00943 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 00944 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 00945 DestIsVolatile); 00946 if (LowAlignment) 00947 SI->setAlignment(1); 00948 } 00949 } else { 00950 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 00951 if (LowAlignment) 00952 SI->setAlignment(1); 00953 } 00954 } 00955 00956 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 00957 /// where the source and destination may have different types. 00958 /// 00959 /// This safely handles the case when the src type is larger than the 00960 /// destination type; the upper bits of the src will be lost. 00961 static void CreateCoercedStore(llvm::Value *Src, 00962 llvm::Value *DstPtr, 00963 bool DstIsVolatile, 00964 CodeGenFunction &CGF) { 00965 llvm::Type *SrcTy = Src->getType(); 00966 llvm::Type *DstTy = 00967 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 00968 if (SrcTy == DstTy) { 00969 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 00970 return; 00971 } 00972 00973 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 00974 00975 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 00976 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 00977 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 00978 } 00979 00980 // If the source and destination are integer or pointer types, just do an 00981 // extension or truncation to the desired type. 00982 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 00983 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 00984 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 00985 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 00986 return; 00987 } 00988 00989 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 00990 00991 // If store is legal, just bitcast the src pointer. 00992 if (SrcSize <= DstSize) { 00993 llvm::Value *Casted = 00994 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 00995 // FIXME: Use better alignment / avoid requiring aligned store. 00996 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 00997 } else { 00998 // Otherwise do coercion through memory. This is stupid, but 00999 // simple. 01000 01001 // Generally SrcSize is never greater than DstSize, since this means we are 01002 // losing bits. However, this can happen in cases where the structure has 01003 // additional padding, for example due to a user specified alignment. 01004 // 01005 // FIXME: Assert that we aren't truncating non-padding bits when have access 01006 // to that information. 01007 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 01008 CGF.Builder.CreateStore(Src, Tmp); 01009 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 01010 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 01011 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy); 01012 // FIXME: Use better alignment. 01013 CGF.Builder.CreateMemCpy(DstCasted, Casted, 01014 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 01015 1, false); 01016 } 01017 } 01018 01019 namespace { 01020 01021 /// Encapsulates information about the way function arguments from 01022 /// CGFunctionInfo should be passed to actual LLVM IR function. 01023 class ClangToLLVMArgMapping { 01024 static const unsigned InvalidIndex = ~0U; 01025 unsigned InallocaArgNo; 01026 unsigned SRetArgNo; 01027 unsigned TotalIRArgs; 01028 01029 /// Arguments of LLVM IR function corresponding to single Clang argument. 01030 struct IRArgs { 01031 unsigned PaddingArgIndex; 01032 // Argument is expanded to IR arguments at positions 01033 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 01034 unsigned FirstArgIndex; 01035 unsigned NumberOfArgs; 01036 01037 IRArgs() 01038 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 01039 NumberOfArgs(0) {} 01040 }; 01041 01042 SmallVector<IRArgs, 8> ArgInfo; 01043 01044 public: 01045 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 01046 bool OnlyRequiredArgs = false) 01047 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 01048 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 01049 construct(Context, FI, OnlyRequiredArgs); 01050 } 01051 01052 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 01053 unsigned getInallocaArgNo() const { 01054 assert(hasInallocaArg()); 01055 return InallocaArgNo; 01056 } 01057 01058 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 01059 unsigned getSRetArgNo() const { 01060 assert(hasSRetArg()); 01061 return SRetArgNo; 01062 } 01063 01064 unsigned totalIRArgs() const { return TotalIRArgs; } 01065 01066 bool hasPaddingArg(unsigned ArgNo) const { 01067 assert(ArgNo < ArgInfo.size()); 01068 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 01069 } 01070 unsigned getPaddingArgNo(unsigned ArgNo) const { 01071 assert(hasPaddingArg(ArgNo)); 01072 return ArgInfo[ArgNo].PaddingArgIndex; 01073 } 01074 01075 /// Returns index of first IR argument corresponding to ArgNo, and their 01076 /// quantity. 01077 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 01078 assert(ArgNo < ArgInfo.size()); 01079 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 01080 ArgInfo[ArgNo].NumberOfArgs); 01081 } 01082 01083 private: 01084 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 01085 bool OnlyRequiredArgs); 01086 }; 01087 01088 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 01089 const CGFunctionInfo &FI, 01090 bool OnlyRequiredArgs) { 01091 unsigned IRArgNo = 0; 01092 bool SwapThisWithSRet = false; 01093 const ABIArgInfo &RetAI = FI.getReturnInfo(); 01094 01095 if (RetAI.getKind() == ABIArgInfo::Indirect) { 01096 SwapThisWithSRet = RetAI.isSRetAfterThis(); 01097 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 01098 } 01099 01100 unsigned ArgNo = 0; 01101 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 01102 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 01103 ++I, ++ArgNo) { 01104 assert(I != FI.arg_end()); 01105 QualType ArgType = I->type; 01106 const ABIArgInfo &AI = I->info; 01107 // Collect data about IR arguments corresponding to Clang argument ArgNo. 01108 auto &IRArgs = ArgInfo[ArgNo]; 01109 01110 if (AI.getPaddingType()) 01111 IRArgs.PaddingArgIndex = IRArgNo++; 01112 01113 switch (AI.getKind()) { 01114 case ABIArgInfo::Extend: 01115 case ABIArgInfo::Direct: { 01116 // FIXME: handle sseregparm someday... 01117 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 01118 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 01119 IRArgs.NumberOfArgs = STy->getNumElements(); 01120 } else { 01121 IRArgs.NumberOfArgs = 1; 01122 } 01123 break; 01124 } 01125 case ABIArgInfo::Indirect: 01126 IRArgs.NumberOfArgs = 1; 01127 break; 01128 case ABIArgInfo::Ignore: 01129 case ABIArgInfo::InAlloca: 01130 // ignore and inalloca doesn't have matching LLVM parameters. 01131 IRArgs.NumberOfArgs = 0; 01132 break; 01133 case ABIArgInfo::Expand: { 01134 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 01135 break; 01136 } 01137 } 01138 01139 if (IRArgs.NumberOfArgs > 0) { 01140 IRArgs.FirstArgIndex = IRArgNo; 01141 IRArgNo += IRArgs.NumberOfArgs; 01142 } 01143 01144 // Skip over the sret parameter when it comes second. We already handled it 01145 // above. 01146 if (IRArgNo == 1 && SwapThisWithSRet) 01147 IRArgNo++; 01148 } 01149 assert(ArgNo == ArgInfo.size()); 01150 01151 if (FI.usesInAlloca()) 01152 InallocaArgNo = IRArgNo++; 01153 01154 TotalIRArgs = IRArgNo; 01155 } 01156 } // namespace 01157 01158 /***/ 01159 01160 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 01161 return FI.getReturnInfo().isIndirect(); 01162 } 01163 01164 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 01165 return ReturnTypeUsesSRet(FI) && 01166 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 01167 } 01168 01169 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 01170 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 01171 switch (BT->getKind()) { 01172 default: 01173 return false; 01174 case BuiltinType::Float: 01175 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 01176 case BuiltinType::Double: 01177 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 01178 case BuiltinType::LongDouble: 01179 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 01180 } 01181 } 01182 01183 return false; 01184 } 01185 01186 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 01187 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 01188 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 01189 if (BT->getKind() == BuiltinType::LongDouble) 01190 return getTarget().useObjCFP2RetForComplexLongDouble(); 01191 } 01192 } 01193 01194 return false; 01195 } 01196 01197 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 01198 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 01199 return GetFunctionType(FI); 01200 } 01201 01202 llvm::FunctionType * 01203 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 01204 01205 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted; 01206 assert(Inserted && "Recursively being processed?"); 01207 01208 llvm::Type *resultType = nullptr; 01209 const ABIArgInfo &retAI = FI.getReturnInfo(); 01210 switch (retAI.getKind()) { 01211 case ABIArgInfo::Expand: 01212 llvm_unreachable("Invalid ABI kind for return argument"); 01213 01214 case ABIArgInfo::Extend: 01215 case ABIArgInfo::Direct: 01216 resultType = retAI.getCoerceToType(); 01217 break; 01218 01219 case ABIArgInfo::InAlloca: 01220 if (retAI.getInAllocaSRet()) { 01221 // sret things on win32 aren't void, they return the sret pointer. 01222 QualType ret = FI.getReturnType(); 01223 llvm::Type *ty = ConvertType(ret); 01224 unsigned addressSpace = Context.getTargetAddressSpace(ret); 01225 resultType = llvm::PointerType::get(ty, addressSpace); 01226 } else { 01227 resultType = llvm::Type::getVoidTy(getLLVMContext()); 01228 } 01229 break; 01230 01231 case ABIArgInfo::Indirect: { 01232 assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 01233 resultType = llvm::Type::getVoidTy(getLLVMContext()); 01234 break; 01235 } 01236 01237 case ABIArgInfo::Ignore: 01238 resultType = llvm::Type::getVoidTy(getLLVMContext()); 01239 break; 01240 } 01241 01242 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 01243 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 01244 01245 // Add type for sret argument. 01246 if (IRFunctionArgs.hasSRetArg()) { 01247 QualType Ret = FI.getReturnType(); 01248 llvm::Type *Ty = ConvertType(Ret); 01249 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 01250 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 01251 llvm::PointerType::get(Ty, AddressSpace); 01252 } 01253 01254 // Add type for inalloca argument. 01255 if (IRFunctionArgs.hasInallocaArg()) { 01256 auto ArgStruct = FI.getArgStruct(); 01257 assert(ArgStruct); 01258 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 01259 } 01260 01261 // Add in all of the required arguments. 01262 unsigned ArgNo = 0; 01263 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 01264 ie = it + FI.getNumRequiredArgs(); 01265 for (; it != ie; ++it, ++ArgNo) { 01266 const ABIArgInfo &ArgInfo = it->info; 01267 01268 // Insert a padding type to ensure proper alignment. 01269 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 01270 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 01271 ArgInfo.getPaddingType(); 01272 01273 unsigned FirstIRArg, NumIRArgs; 01274 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 01275 01276 switch (ArgInfo.getKind()) { 01277 case ABIArgInfo::Ignore: 01278 case ABIArgInfo::InAlloca: 01279 assert(NumIRArgs == 0); 01280 break; 01281 01282 case ABIArgInfo::Indirect: { 01283 assert(NumIRArgs == 1); 01284 // indirect arguments are always on the stack, which is addr space #0. 01285 llvm::Type *LTy = ConvertTypeForMem(it->type); 01286 ArgTypes[FirstIRArg] = LTy->getPointerTo(); 01287 break; 01288 } 01289 01290 case ABIArgInfo::Extend: 01291 case ABIArgInfo::Direct: { 01292 // Fast-isel and the optimizer generally like scalar values better than 01293 // FCAs, so we flatten them if this is safe to do for this argument. 01294 llvm::Type *argType = ArgInfo.getCoerceToType(); 01295 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 01296 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 01297 assert(NumIRArgs == st->getNumElements()); 01298 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 01299 ArgTypes[FirstIRArg + i] = st->getElementType(i); 01300 } else { 01301 assert(NumIRArgs == 1); 01302 ArgTypes[FirstIRArg] = argType; 01303 } 01304 break; 01305 } 01306 01307 case ABIArgInfo::Expand: 01308 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 01309 getExpandedTypes(it->type, ArgTypesIter); 01310 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 01311 break; 01312 } 01313 } 01314 01315 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 01316 assert(Erased && "Not in set?"); 01317 01318 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 01319 } 01320 01321 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 01322 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 01323 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 01324 01325 if (!isFuncTypeConvertible(FPT)) 01326 return llvm::StructType::get(getLLVMContext()); 01327 01328 const CGFunctionInfo *Info; 01329 if (isa<CXXDestructorDecl>(MD)) 01330 Info = 01331 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType())); 01332 else 01333 Info = &arrangeCXXMethodDeclaration(MD); 01334 return GetFunctionType(*Info); 01335 } 01336 01337 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 01338 const Decl *TargetDecl, 01339 AttributeListType &PAL, 01340 unsigned &CallingConv, 01341 bool AttrOnCallSite) { 01342 llvm::AttrBuilder FuncAttrs; 01343 llvm::AttrBuilder RetAttrs; 01344 01345 CallingConv = FI.getEffectiveCallingConvention(); 01346 01347 if (FI.isNoReturn()) 01348 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 01349 01350 // FIXME: handle sseregparm someday... 01351 if (TargetDecl) { 01352 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 01353 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 01354 if (TargetDecl->hasAttr<NoThrowAttr>()) 01355 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 01356 if (TargetDecl->hasAttr<NoReturnAttr>()) 01357 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 01358 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 01359 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 01360 01361 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 01362 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 01363 if (FPT && FPT->isNothrow(getContext())) 01364 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 01365 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 01366 // These attributes are not inherited by overloads. 01367 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 01368 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 01369 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 01370 } 01371 01372 // 'const' and 'pure' attribute functions are also nounwind. 01373 if (TargetDecl->hasAttr<ConstAttr>()) { 01374 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 01375 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 01376 } else if (TargetDecl->hasAttr<PureAttr>()) { 01377 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 01378 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 01379 } 01380 if (TargetDecl->hasAttr<MallocAttr>()) 01381 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 01382 if (TargetDecl->hasAttr<ReturnsNonNullAttr>()) 01383 RetAttrs.addAttribute(llvm::Attribute::NonNull); 01384 } 01385 01386 if (CodeGenOpts.OptimizeSize) 01387 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 01388 if (CodeGenOpts.OptimizeSize == 2) 01389 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 01390 if (CodeGenOpts.DisableRedZone) 01391 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 01392 if (CodeGenOpts.NoImplicitFloat) 01393 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 01394 if (CodeGenOpts.EnableSegmentedStacks && 01395 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 01396 FuncAttrs.addAttribute("split-stack"); 01397 01398 if (AttrOnCallSite) { 01399 // Attributes that should go on the call site only. 01400 if (!CodeGenOpts.SimplifyLibCalls) 01401 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 01402 } else { 01403 // Attributes that should go on the function, but not the call site. 01404 if (!CodeGenOpts.DisableFPElim) { 01405 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 01406 } else if (CodeGenOpts.OmitLeafFramePointer) { 01407 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 01408 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 01409 } else { 01410 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 01411 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 01412 } 01413 01414 FuncAttrs.addAttribute("less-precise-fpmad", 01415 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 01416 FuncAttrs.addAttribute("no-infs-fp-math", 01417 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 01418 FuncAttrs.addAttribute("no-nans-fp-math", 01419 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 01420 FuncAttrs.addAttribute("unsafe-fp-math", 01421 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 01422 FuncAttrs.addAttribute("use-soft-float", 01423 llvm::toStringRef(CodeGenOpts.SoftFloat)); 01424 FuncAttrs.addAttribute("stack-protector-buffer-size", 01425 llvm::utostr(CodeGenOpts.SSPBufferSize)); 01426 01427 if (!CodeGenOpts.StackRealignment) 01428 FuncAttrs.addAttribute("no-realign-stack"); 01429 } 01430 01431 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 01432 01433 QualType RetTy = FI.getReturnType(); 01434 const ABIArgInfo &RetAI = FI.getReturnInfo(); 01435 switch (RetAI.getKind()) { 01436 case ABIArgInfo::Extend: 01437 if (RetTy->hasSignedIntegerRepresentation()) 01438 RetAttrs.addAttribute(llvm::Attribute::SExt); 01439 else if (RetTy->hasUnsignedIntegerRepresentation()) 01440 RetAttrs.addAttribute(llvm::Attribute::ZExt); 01441 // FALL THROUGH 01442 case ABIArgInfo::Direct: 01443 if (RetAI.getInReg()) 01444 RetAttrs.addAttribute(llvm::Attribute::InReg); 01445 break; 01446 case ABIArgInfo::Ignore: 01447 break; 01448 01449 case ABIArgInfo::InAlloca: 01450 case ABIArgInfo::Indirect: { 01451 // inalloca and sret disable readnone and readonly 01452 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 01453 .removeAttribute(llvm::Attribute::ReadNone); 01454 break; 01455 } 01456 01457 case ABIArgInfo::Expand: 01458 llvm_unreachable("Invalid ABI kind for return argument"); 01459 } 01460 01461 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 01462 QualType PTy = RefTy->getPointeeType(); 01463 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 01464 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 01465 .getQuantity()); 01466 else if (getContext().getTargetAddressSpace(PTy) == 0) 01467 RetAttrs.addAttribute(llvm::Attribute::NonNull); 01468 } 01469 01470 // Attach return attributes. 01471 if (RetAttrs.hasAttributes()) { 01472 PAL.push_back(llvm::AttributeSet::get( 01473 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs)); 01474 } 01475 01476 // Attach attributes to sret. 01477 if (IRFunctionArgs.hasSRetArg()) { 01478 llvm::AttrBuilder SRETAttrs; 01479 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 01480 if (RetAI.getInReg()) 01481 SRETAttrs.addAttribute(llvm::Attribute::InReg); 01482 PAL.push_back(llvm::AttributeSet::get( 01483 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs)); 01484 } 01485 01486 // Attach attributes to inalloca argument. 01487 if (IRFunctionArgs.hasInallocaArg()) { 01488 llvm::AttrBuilder Attrs; 01489 Attrs.addAttribute(llvm::Attribute::InAlloca); 01490 PAL.push_back(llvm::AttributeSet::get( 01491 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs)); 01492 } 01493 01494 01495 unsigned ArgNo = 0; 01496 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 01497 E = FI.arg_end(); 01498 I != E; ++I, ++ArgNo) { 01499 QualType ParamType = I->type; 01500 const ABIArgInfo &AI = I->info; 01501 llvm::AttrBuilder Attrs; 01502 01503 // Add attribute for padding argument, if necessary. 01504 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 01505 if (AI.getPaddingInReg()) 01506 PAL.push_back(llvm::AttributeSet::get( 01507 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1, 01508 llvm::Attribute::InReg)); 01509 } 01510 01511 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 01512 // have the corresponding parameter variable. It doesn't make 01513 // sense to do it here because parameters are so messed up. 01514 switch (AI.getKind()) { 01515 case ABIArgInfo::Extend: 01516 if (ParamType->isSignedIntegerOrEnumerationType()) 01517 Attrs.addAttribute(llvm::Attribute::SExt); 01518 else if (ParamType->isUnsignedIntegerOrEnumerationType()) 01519 Attrs.addAttribute(llvm::Attribute::ZExt); 01520 // FALL THROUGH 01521 case ABIArgInfo::Direct: 01522 if (AI.getInReg()) 01523 Attrs.addAttribute(llvm::Attribute::InReg); 01524 break; 01525 01526 case ABIArgInfo::Indirect: 01527 if (AI.getInReg()) 01528 Attrs.addAttribute(llvm::Attribute::InReg); 01529 01530 if (AI.getIndirectByVal()) 01531 Attrs.addAttribute(llvm::Attribute::ByVal); 01532 01533 Attrs.addAlignmentAttr(AI.getIndirectAlign()); 01534 01535 // byval disables readnone and readonly. 01536 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 01537 .removeAttribute(llvm::Attribute::ReadNone); 01538 break; 01539 01540 case ABIArgInfo::Ignore: 01541 case ABIArgInfo::Expand: 01542 continue; 01543 01544 case ABIArgInfo::InAlloca: 01545 // inalloca disables readnone and readonly. 01546 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 01547 .removeAttribute(llvm::Attribute::ReadNone); 01548 continue; 01549 } 01550 01551 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 01552 QualType PTy = RefTy->getPointeeType(); 01553 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 01554 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 01555 .getQuantity()); 01556 else if (getContext().getTargetAddressSpace(PTy) == 0) 01557 Attrs.addAttribute(llvm::Attribute::NonNull); 01558 } 01559 01560 if (Attrs.hasAttributes()) { 01561 unsigned FirstIRArg, NumIRArgs; 01562 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 01563 for (unsigned i = 0; i < NumIRArgs; i++) 01564 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), 01565 FirstIRArg + i + 1, Attrs)); 01566 } 01567 } 01568 assert(ArgNo == FI.arg_size()); 01569 01570 if (FuncAttrs.hasAttributes()) 01571 PAL.push_back(llvm:: 01572 AttributeSet::get(getLLVMContext(), 01573 llvm::AttributeSet::FunctionIndex, 01574 FuncAttrs)); 01575 } 01576 01577 /// An argument came in as a promoted argument; demote it back to its 01578 /// declared type. 01579 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 01580 const VarDecl *var, 01581 llvm::Value *value) { 01582 llvm::Type *varType = CGF.ConvertType(var->getType()); 01583 01584 // This can happen with promotions that actually don't change the 01585 // underlying type, like the enum promotions. 01586 if (value->getType() == varType) return value; 01587 01588 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 01589 && "unexpected promotion type"); 01590 01591 if (isa<llvm::IntegerType>(varType)) 01592 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 01593 01594 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 01595 } 01596 01597 /// Returns the attribute (either parameter attribute, or function 01598 /// attribute), which declares argument ArgNo to be non-null. 01599 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 01600 QualType ArgType, unsigned ArgNo) { 01601 // FIXME: __attribute__((nonnull)) can also be applied to: 01602 // - references to pointers, where the pointee is known to be 01603 // nonnull (apparently a Clang extension) 01604 // - transparent unions containing pointers 01605 // In the former case, LLVM IR cannot represent the constraint. In 01606 // the latter case, we have no guarantee that the transparent union 01607 // is in fact passed as a pointer. 01608 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 01609 return nullptr; 01610 // First, check attribute on parameter itself. 01611 if (PVD) { 01612 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 01613 return ParmNNAttr; 01614 } 01615 // Check function attributes. 01616 if (!FD) 01617 return nullptr; 01618 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 01619 if (NNAttr->isNonNull(ArgNo)) 01620 return NNAttr; 01621 } 01622 return nullptr; 01623 } 01624 01625 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 01626 llvm::Function *Fn, 01627 const FunctionArgList &Args) { 01628 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 01629 // Naked functions don't have prologues. 01630 return; 01631 01632 // If this is an implicit-return-zero function, go ahead and 01633 // initialize the return value. TODO: it might be nice to have 01634 // a more general mechanism for this that didn't require synthesized 01635 // return statements. 01636 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 01637 if (FD->hasImplicitReturnZero()) { 01638 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 01639 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 01640 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 01641 Builder.CreateStore(Zero, ReturnValue); 01642 } 01643 } 01644 01645 // FIXME: We no longer need the types from FunctionArgList; lift up and 01646 // simplify. 01647 01648 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 01649 // Flattened function arguments. 01650 SmallVector<llvm::Argument *, 16> FnArgs; 01651 FnArgs.reserve(IRFunctionArgs.totalIRArgs()); 01652 for (auto &Arg : Fn->args()) { 01653 FnArgs.push_back(&Arg); 01654 } 01655 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); 01656 01657 // If we're using inalloca, all the memory arguments are GEPs off of the last 01658 // parameter, which is a pointer to the complete memory area. 01659 llvm::Value *ArgStruct = nullptr; 01660 if (IRFunctionArgs.hasInallocaArg()) { 01661 ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()]; 01662 assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo()); 01663 } 01664 01665 // Name the struct return parameter. 01666 if (IRFunctionArgs.hasSRetArg()) { 01667 auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()]; 01668 AI->setName("agg.result"); 01669 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, 01670 llvm::Attribute::NoAlias)); 01671 } 01672 01673 // Track if we received the parameter as a pointer (indirect, byval, or 01674 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 01675 // into a local alloca for us. 01676 enum ValOrPointer { HaveValue = 0, HavePointer = 1 }; 01677 typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr; 01678 SmallVector<ValueAndIsPtr, 16> ArgVals; 01679 ArgVals.reserve(Args.size()); 01680 01681 // Create a pointer value for every parameter declaration. This usually 01682 // entails copying one or more LLVM IR arguments into an alloca. Don't push 01683 // any cleanups or do anything that might unwind. We do that separately, so 01684 // we can push the cleanups in the correct order for the ABI. 01685 assert(FI.arg_size() == Args.size() && 01686 "Mismatch between function signature & arguments."); 01687 unsigned ArgNo = 0; 01688 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 01689 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 01690 i != e; ++i, ++info_it, ++ArgNo) { 01691 const VarDecl *Arg = *i; 01692 QualType Ty = info_it->type; 01693 const ABIArgInfo &ArgI = info_it->info; 01694 01695 bool isPromoted = 01696 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 01697 01698 unsigned FirstIRArg, NumIRArgs; 01699 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 01700 01701 switch (ArgI.getKind()) { 01702 case ABIArgInfo::InAlloca: { 01703 assert(NumIRArgs == 0); 01704 llvm::Value *V = Builder.CreateStructGEP( 01705 ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName()); 01706 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 01707 break; 01708 } 01709 01710 case ABIArgInfo::Indirect: { 01711 assert(NumIRArgs == 1); 01712 llvm::Value *V = FnArgs[FirstIRArg]; 01713 01714 if (!hasScalarEvaluationKind(Ty)) { 01715 // Aggregates and complex variables are accessed by reference. All we 01716 // need to do is realign the value, if requested 01717 if (ArgI.getIndirectRealign()) { 01718 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 01719 01720 // Copy from the incoming argument pointer to the temporary with the 01721 // appropriate alignment. 01722 // 01723 // FIXME: We should have a common utility for generating an aggregate 01724 // copy. 01725 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 01726 CharUnits Size = getContext().getTypeSizeInChars(Ty); 01727 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 01728 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 01729 Builder.CreateMemCpy(Dst, 01730 Src, 01731 llvm::ConstantInt::get(IntPtrTy, 01732 Size.getQuantity()), 01733 ArgI.getIndirectAlign(), 01734 false); 01735 V = AlignedTemp; 01736 } 01737 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 01738 } else { 01739 // Load scalar value from indirect argument. 01740 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 01741 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty, 01742 Arg->getLocStart()); 01743 01744 if (isPromoted) 01745 V = emitArgumentDemotion(*this, Arg, V); 01746 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 01747 } 01748 break; 01749 } 01750 01751 case ABIArgInfo::Extend: 01752 case ABIArgInfo::Direct: { 01753 01754 // If we have the trivial case, handle it with no muss and fuss. 01755 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 01756 ArgI.getCoerceToType() == ConvertType(Ty) && 01757 ArgI.getDirectOffset() == 0) { 01758 assert(NumIRArgs == 1); 01759 auto AI = FnArgs[FirstIRArg]; 01760 llvm::Value *V = AI; 01761 01762 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 01763 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 01764 PVD->getFunctionScopeIndex())) 01765 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 01766 AI->getArgNo() + 1, 01767 llvm::Attribute::NonNull)); 01768 01769 QualType OTy = PVD->getOriginalType(); 01770 if (const auto *ArrTy = 01771 getContext().getAsConstantArrayType(OTy)) { 01772 // A C99 array parameter declaration with the static keyword also 01773 // indicates dereferenceability, and if the size is constant we can 01774 // use the dereferenceable attribute (which requires the size in 01775 // bytes). 01776 if (ArrTy->getSizeModifier() == ArrayType::Static) { 01777 QualType ETy = ArrTy->getElementType(); 01778 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 01779 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 01780 ArrSize) { 01781 llvm::AttrBuilder Attrs; 01782 Attrs.addDereferenceableAttr( 01783 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 01784 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 01785 AI->getArgNo() + 1, Attrs)); 01786 } else if (getContext().getTargetAddressSpace(ETy) == 0) { 01787 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 01788 AI->getArgNo() + 1, 01789 llvm::Attribute::NonNull)); 01790 } 01791 } 01792 } else if (const auto *ArrTy = 01793 getContext().getAsVariableArrayType(OTy)) { 01794 // For C99 VLAs with the static keyword, we don't know the size so 01795 // we can't use the dereferenceable attribute, but in addrspace(0) 01796 // we know that it must be nonnull. 01797 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 01798 !getContext().getTargetAddressSpace(ArrTy->getElementType())) 01799 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 01800 AI->getArgNo() + 1, 01801 llvm::Attribute::NonNull)); 01802 } 01803 01804 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 01805 if (!AVAttr) 01806 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 01807 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 01808 if (AVAttr) { 01809 llvm::Value *AlignmentValue = 01810 EmitScalarExpr(AVAttr->getAlignment()); 01811 llvm::ConstantInt *AlignmentCI = 01812 cast<llvm::ConstantInt>(AlignmentValue); 01813 unsigned Alignment = 01814 std::min((unsigned) AlignmentCI->getZExtValue(), 01815 +llvm::Value::MaximumAlignment); 01816 01817 llvm::AttrBuilder Attrs; 01818 Attrs.addAlignmentAttr(Alignment); 01819 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 01820 AI->getArgNo() + 1, Attrs)); 01821 } 01822 } 01823 01824 if (Arg->getType().isRestrictQualified()) 01825 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 01826 AI->getArgNo() + 1, 01827 llvm::Attribute::NoAlias)); 01828 01829 // Ensure the argument is the correct type. 01830 if (V->getType() != ArgI.getCoerceToType()) 01831 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 01832 01833 if (isPromoted) 01834 V = emitArgumentDemotion(*this, Arg, V); 01835 01836 if (const CXXMethodDecl *MD = 01837 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) { 01838 if (MD->isVirtual() && Arg == CXXABIThisDecl) 01839 V = CGM.getCXXABI(). 01840 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V); 01841 } 01842 01843 // Because of merging of function types from multiple decls it is 01844 // possible for the type of an argument to not match the corresponding 01845 // type in the function type. Since we are codegening the callee 01846 // in here, add a cast to the argument type. 01847 llvm::Type *LTy = ConvertType(Arg->getType()); 01848 if (V->getType() != LTy) 01849 V = Builder.CreateBitCast(V, LTy); 01850 01851 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 01852 break; 01853 } 01854 01855 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 01856 01857 // The alignment we need to use is the max of the requested alignment for 01858 // the argument plus the alignment required by our access code below. 01859 unsigned AlignmentToUse = 01860 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); 01861 AlignmentToUse = std::max(AlignmentToUse, 01862 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 01863 01864 Alloca->setAlignment(AlignmentToUse); 01865 llvm::Value *V = Alloca; 01866 llvm::Value *Ptr = V; // Pointer to store into. 01867 01868 // If the value is offset in memory, apply the offset now. 01869 if (unsigned Offs = ArgI.getDirectOffset()) { 01870 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 01871 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 01872 Ptr = Builder.CreateBitCast(Ptr, 01873 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 01874 } 01875 01876 // Fast-isel and the optimizer generally like scalar values better than 01877 // FCAs, so we flatten them if this is safe to do for this argument. 01878 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 01879 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 01880 STy->getNumElements() > 1) { 01881 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 01882 llvm::Type *DstTy = 01883 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 01884 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 01885 01886 if (SrcSize <= DstSize) { 01887 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 01888 01889 assert(STy->getNumElements() == NumIRArgs); 01890 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 01891 auto AI = FnArgs[FirstIRArg + i]; 01892 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 01893 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 01894 Builder.CreateStore(AI, EltPtr); 01895 } 01896 } else { 01897 llvm::AllocaInst *TempAlloca = 01898 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 01899 TempAlloca->setAlignment(AlignmentToUse); 01900 llvm::Value *TempV = TempAlloca; 01901 01902 assert(STy->getNumElements() == NumIRArgs); 01903 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 01904 auto AI = FnArgs[FirstIRArg + i]; 01905 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 01906 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); 01907 Builder.CreateStore(AI, EltPtr); 01908 } 01909 01910 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 01911 } 01912 } else { 01913 // Simple case, just do a coerced store of the argument into the alloca. 01914 assert(NumIRArgs == 1); 01915 auto AI = FnArgs[FirstIRArg]; 01916 AI->setName(Arg->getName() + ".coerce"); 01917 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this); 01918 } 01919 01920 01921 // Match to what EmitParmDecl is expecting for this type. 01922 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 01923 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart()); 01924 if (isPromoted) 01925 V = emitArgumentDemotion(*this, Arg, V); 01926 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 01927 } else { 01928 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 01929 } 01930 break; 01931 } 01932 01933 case ABIArgInfo::Expand: { 01934 // If this structure was expanded into multiple arguments then 01935 // we need to create a temporary and reconstruct it from the 01936 // arguments. 01937 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 01938 CharUnits Align = getContext().getDeclAlign(Arg); 01939 Alloca->setAlignment(Align.getQuantity()); 01940 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 01941 ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer)); 01942 01943 auto FnArgIter = FnArgs.begin() + FirstIRArg; 01944 ExpandTypeFromArgs(Ty, LV, FnArgIter); 01945 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); 01946 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 01947 auto AI = FnArgs[FirstIRArg + i]; 01948 AI->setName(Arg->getName() + "." + Twine(i)); 01949 } 01950 break; 01951 } 01952 01953 case ABIArgInfo::Ignore: 01954 assert(NumIRArgs == 0); 01955 // Initialize the local variable appropriately. 01956 if (!hasScalarEvaluationKind(Ty)) { 01957 ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer)); 01958 } else { 01959 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 01960 ArgVals.push_back(ValueAndIsPtr(U, HaveValue)); 01961 } 01962 break; 01963 } 01964 } 01965 01966 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 01967 for (int I = Args.size() - 1; I >= 0; --I) 01968 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 01969 I + 1); 01970 } else { 01971 for (unsigned I = 0, E = Args.size(); I != E; ++I) 01972 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 01973 I + 1); 01974 } 01975 } 01976 01977 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 01978 while (insn->use_empty()) { 01979 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 01980 if (!bitcast) return; 01981 01982 // This is "safe" because we would have used a ConstantExpr otherwise. 01983 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 01984 bitcast->eraseFromParent(); 01985 } 01986 } 01987 01988 /// Try to emit a fused autorelease of a return result. 01989 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 01990 llvm::Value *result) { 01991 // We must be immediately followed the cast. 01992 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 01993 if (BB->empty()) return nullptr; 01994 if (&BB->back() != result) return nullptr; 01995 01996 llvm::Type *resultType = result->getType(); 01997 01998 // result is in a BasicBlock and is therefore an Instruction. 01999 llvm::Instruction *generator = cast<llvm::Instruction>(result); 02000 02001 SmallVector<llvm::Instruction*,4> insnsToKill; 02002 02003 // Look for: 02004 // %generator = bitcast %type1* %generator2 to %type2* 02005 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 02006 // We would have emitted this as a constant if the operand weren't 02007 // an Instruction. 02008 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 02009 02010 // Require the generator to be immediately followed by the cast. 02011 if (generator->getNextNode() != bitcast) 02012 return nullptr; 02013 02014 insnsToKill.push_back(bitcast); 02015 } 02016 02017 // Look for: 02018 // %generator = call i8* @objc_retain(i8* %originalResult) 02019 // or 02020 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 02021 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 02022 if (!call) return nullptr; 02023 02024 bool doRetainAutorelease; 02025 02026 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 02027 doRetainAutorelease = true; 02028 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 02029 .objc_retainAutoreleasedReturnValue) { 02030 doRetainAutorelease = false; 02031 02032 // If we emitted an assembly marker for this call (and the 02033 // ARCEntrypoints field should have been set if so), go looking 02034 // for that call. If we can't find it, we can't do this 02035 // optimization. But it should always be the immediately previous 02036 // instruction, unless we needed bitcasts around the call. 02037 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { 02038 llvm::Instruction *prev = call->getPrevNode(); 02039 assert(prev); 02040 if (isa<llvm::BitCastInst>(prev)) { 02041 prev = prev->getPrevNode(); 02042 assert(prev); 02043 } 02044 assert(isa<llvm::CallInst>(prev)); 02045 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 02046 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); 02047 insnsToKill.push_back(prev); 02048 } 02049 } else { 02050 return nullptr; 02051 } 02052 02053 result = call->getArgOperand(0); 02054 insnsToKill.push_back(call); 02055 02056 // Keep killing bitcasts, for sanity. Note that we no longer care 02057 // about precise ordering as long as there's exactly one use. 02058 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 02059 if (!bitcast->hasOneUse()) break; 02060 insnsToKill.push_back(bitcast); 02061 result = bitcast->getOperand(0); 02062 } 02063 02064 // Delete all the unnecessary instructions, from latest to earliest. 02065 for (SmallVectorImpl<llvm::Instruction*>::iterator 02066 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 02067 (*i)->eraseFromParent(); 02068 02069 // Do the fused retain/autorelease if we were asked to. 02070 if (doRetainAutorelease) 02071 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 02072 02073 // Cast back to the result type. 02074 return CGF.Builder.CreateBitCast(result, resultType); 02075 } 02076 02077 /// If this is a +1 of the value of an immutable 'self', remove it. 02078 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 02079 llvm::Value *result) { 02080 // This is only applicable to a method with an immutable 'self'. 02081 const ObjCMethodDecl *method = 02082 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 02083 if (!method) return nullptr; 02084 const VarDecl *self = method->getSelfDecl(); 02085 if (!self->getType().isConstQualified()) return nullptr; 02086 02087 // Look for a retain call. 02088 llvm::CallInst *retainCall = 02089 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 02090 if (!retainCall || 02091 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 02092 return nullptr; 02093 02094 // Look for an ordinary load of 'self'. 02095 llvm::Value *retainedValue = retainCall->getArgOperand(0); 02096 llvm::LoadInst *load = 02097 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 02098 if (!load || load->isAtomic() || load->isVolatile() || 02099 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 02100 return nullptr; 02101 02102 // Okay! Burn it all down. This relies for correctness on the 02103 // assumption that the retain is emitted as part of the return and 02104 // that thereafter everything is used "linearly". 02105 llvm::Type *resultType = result->getType(); 02106 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 02107 assert(retainCall->use_empty()); 02108 retainCall->eraseFromParent(); 02109 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 02110 02111 return CGF.Builder.CreateBitCast(load, resultType); 02112 } 02113 02114 /// Emit an ARC autorelease of the result of a function. 02115 /// 02116 /// \return the value to actually return from the function 02117 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 02118 llvm::Value *result) { 02119 // If we're returning 'self', kill the initial retain. This is a 02120 // heuristic attempt to "encourage correctness" in the really unfortunate 02121 // case where we have a return of self during a dealloc and we desperately 02122 // need to avoid the possible autorelease. 02123 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 02124 return self; 02125 02126 // At -O0, try to emit a fused retain/autorelease. 02127 if (CGF.shouldUseFusedARCCalls()) 02128 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 02129 return fused; 02130 02131 return CGF.EmitARCAutoreleaseReturnValue(result); 02132 } 02133 02134 /// Heuristically search for a dominating store to the return-value slot. 02135 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 02136 // If there are multiple uses of the return-value slot, just check 02137 // for something immediately preceding the IP. Sometimes this can 02138 // happen with how we generate implicit-returns; it can also happen 02139 // with noreturn cleanups. 02140 if (!CGF.ReturnValue->hasOneUse()) { 02141 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 02142 if (IP->empty()) return nullptr; 02143 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); 02144 if (!store) return nullptr; 02145 if (store->getPointerOperand() != CGF.ReturnValue) return nullptr; 02146 assert(!store->isAtomic() && !store->isVolatile()); // see below 02147 return store; 02148 } 02149 02150 llvm::StoreInst *store = 02151 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back()); 02152 if (!store) return nullptr; 02153 02154 // These aren't actually possible for non-coerced returns, and we 02155 // only care about non-coerced returns on this code path. 02156 assert(!store->isAtomic() && !store->isVolatile()); 02157 02158 // Now do a first-and-dirty dominance check: just walk up the 02159 // single-predecessors chain from the current insertion point. 02160 llvm::BasicBlock *StoreBB = store->getParent(); 02161 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 02162 while (IP != StoreBB) { 02163 if (!(IP = IP->getSinglePredecessor())) 02164 return nullptr; 02165 } 02166 02167 // Okay, the store's basic block dominates the insertion point; we 02168 // can do our thing. 02169 return store; 02170 } 02171 02172 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 02173 bool EmitRetDbgLoc, 02174 SourceLocation EndLoc) { 02175 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 02176 // Naked functions don't have epilogues. 02177 Builder.CreateUnreachable(); 02178 return; 02179 } 02180 02181 // Functions with no result always return void. 02182 if (!ReturnValue) { 02183 Builder.CreateRetVoid(); 02184 return; 02185 } 02186 02187 llvm::DebugLoc RetDbgLoc; 02188 llvm::Value *RV = nullptr; 02189 QualType RetTy = FI.getReturnType(); 02190 const ABIArgInfo &RetAI = FI.getReturnInfo(); 02191 02192 switch (RetAI.getKind()) { 02193 case ABIArgInfo::InAlloca: 02194 // Aggregrates get evaluated directly into the destination. Sometimes we 02195 // need to return the sret value in a register, though. 02196 assert(hasAggregateEvaluationKind(RetTy)); 02197 if (RetAI.getInAllocaSRet()) { 02198 llvm::Function::arg_iterator EI = CurFn->arg_end(); 02199 --EI; 02200 llvm::Value *ArgStruct = EI; 02201 llvm::Value *SRet = 02202 Builder.CreateStructGEP(ArgStruct, RetAI.getInAllocaFieldIndex()); 02203 RV = Builder.CreateLoad(SRet, "sret"); 02204 } 02205 break; 02206 02207 case ABIArgInfo::Indirect: { 02208 auto AI = CurFn->arg_begin(); 02209 if (RetAI.isSRetAfterThis()) 02210 ++AI; 02211 switch (getEvaluationKind(RetTy)) { 02212 case TEK_Complex: { 02213 ComplexPairTy RT = 02214 EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy), 02215 EndLoc); 02216 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy), 02217 /*isInit*/ true); 02218 break; 02219 } 02220 case TEK_Aggregate: 02221 // Do nothing; aggregrates get evaluated directly into the destination. 02222 break; 02223 case TEK_Scalar: 02224 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 02225 MakeNaturalAlignAddrLValue(AI, RetTy), 02226 /*isInit*/ true); 02227 break; 02228 } 02229 break; 02230 } 02231 02232 case ABIArgInfo::Extend: 02233 case ABIArgInfo::Direct: 02234 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 02235 RetAI.getDirectOffset() == 0) { 02236 // The internal return value temp always will have pointer-to-return-type 02237 // type, just do a load. 02238 02239 // If there is a dominating store to ReturnValue, we can elide 02240 // the load, zap the store, and usually zap the alloca. 02241 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { 02242 // Reuse the debug location from the store unless there is 02243 // cleanup code to be emitted between the store and return 02244 // instruction. 02245 if (EmitRetDbgLoc && !AutoreleaseResult) 02246 RetDbgLoc = SI->getDebugLoc(); 02247 // Get the stored value and nuke the now-dead store. 02248 RV = SI->getValueOperand(); 02249 SI->eraseFromParent(); 02250 02251 // If that was the only use of the return value, nuke it as well now. 02252 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 02253 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 02254 ReturnValue = nullptr; 02255 } 02256 02257 // Otherwise, we have to do a simple load. 02258 } else { 02259 RV = Builder.CreateLoad(ReturnValue); 02260 } 02261 } else { 02262 llvm::Value *V = ReturnValue; 02263 // If the value is offset in memory, apply the offset now. 02264 if (unsigned Offs = RetAI.getDirectOffset()) { 02265 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 02266 V = Builder.CreateConstGEP1_32(V, Offs); 02267 V = Builder.CreateBitCast(V, 02268 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 02269 } 02270 02271 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 02272 } 02273 02274 // In ARC, end functions that return a retainable type with a call 02275 // to objc_autoreleaseReturnValue. 02276 if (AutoreleaseResult) { 02277 assert(getLangOpts().ObjCAutoRefCount && 02278 !FI.isReturnsRetained() && 02279 RetTy->isObjCRetainableType()); 02280 RV = emitAutoreleaseOfResult(*this, RV); 02281 } 02282 02283 break; 02284 02285 case ABIArgInfo::Ignore: 02286 break; 02287 02288 case ABIArgInfo::Expand: 02289 llvm_unreachable("Invalid ABI kind for return argument"); 02290 } 02291 02292 llvm::Instruction *Ret; 02293 if (RV) { 02294 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) { 02295 if (auto RetNNAttr = CurGD.getDecl()->getAttr<ReturnsNonNullAttr>()) { 02296 SanitizerScope SanScope(this); 02297 llvm::Value *Cond = Builder.CreateICmpNE( 02298 RV, llvm::Constant::getNullValue(RV->getType())); 02299 llvm::Constant *StaticData[] = { 02300 EmitCheckSourceLocation(EndLoc), 02301 EmitCheckSourceLocation(RetNNAttr->getLocation()), 02302 }; 02303 EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute), 02304 "nonnull_return", StaticData, None); 02305 } 02306 } 02307 Ret = Builder.CreateRet(RV); 02308 } else { 02309 Ret = Builder.CreateRetVoid(); 02310 } 02311 02312 if (!RetDbgLoc.isUnknown()) 02313 Ret->setDebugLoc(RetDbgLoc); 02314 } 02315 02316 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 02317 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 02318 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 02319 } 02320 02321 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) { 02322 // FIXME: Generate IR in one pass, rather than going back and fixing up these 02323 // placeholders. 02324 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 02325 llvm::Value *Placeholder = 02326 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo()); 02327 Placeholder = CGF.Builder.CreateLoad(Placeholder); 02328 return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(), 02329 Ty.getQualifiers(), 02330 AggValueSlot::IsNotDestructed, 02331 AggValueSlot::DoesNotNeedGCBarriers, 02332 AggValueSlot::IsNotAliased); 02333 } 02334 02335 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 02336 const VarDecl *param, 02337 SourceLocation loc) { 02338 // StartFunction converted the ABI-lowered parameter(s) into a 02339 // local alloca. We need to turn that into an r-value suitable 02340 // for EmitCall. 02341 llvm::Value *local = GetAddrOfLocalVar(param); 02342 02343 QualType type = param->getType(); 02344 02345 // For the most part, we just need to load the alloca, except: 02346 // 1) aggregate r-values are actually pointers to temporaries, and 02347 // 2) references to non-scalars are pointers directly to the aggregate. 02348 // I don't know why references to scalars are different here. 02349 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 02350 if (!hasScalarEvaluationKind(ref->getPointeeType())) 02351 return args.add(RValue::getAggregate(local), type); 02352 02353 // Locals which are references to scalars are represented 02354 // with allocas holding the pointer. 02355 return args.add(RValue::get(Builder.CreateLoad(local)), type); 02356 } 02357 02358 assert(!isInAllocaArgument(CGM.getCXXABI(), type) && 02359 "cannot emit delegate call arguments for inalloca arguments!"); 02360 02361 args.add(convertTempToRValue(local, type, loc), type); 02362 } 02363 02364 static bool isProvablyNull(llvm::Value *addr) { 02365 return isa<llvm::ConstantPointerNull>(addr); 02366 } 02367 02368 static bool isProvablyNonNull(llvm::Value *addr) { 02369 return isa<llvm::AllocaInst>(addr); 02370 } 02371 02372 /// Emit the actual writing-back of a writeback. 02373 static void emitWriteback(CodeGenFunction &CGF, 02374 const CallArgList::Writeback &writeback) { 02375 const LValue &srcLV = writeback.Source; 02376 llvm::Value *srcAddr = srcLV.getAddress(); 02377 assert(!isProvablyNull(srcAddr) && 02378 "shouldn't have writeback for provably null argument"); 02379 02380 llvm::BasicBlock *contBB = nullptr; 02381 02382 // If the argument wasn't provably non-null, we need to null check 02383 // before doing the store. 02384 bool provablyNonNull = isProvablyNonNull(srcAddr); 02385 if (!provablyNonNull) { 02386 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 02387 contBB = CGF.createBasicBlock("icr.done"); 02388 02389 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 02390 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 02391 CGF.EmitBlock(writebackBB); 02392 } 02393 02394 // Load the value to writeback. 02395 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 02396 02397 // Cast it back, in case we're writing an id to a Foo* or something. 02398 value = CGF.Builder.CreateBitCast(value, 02399 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 02400 "icr.writeback-cast"); 02401 02402 // Perform the writeback. 02403 02404 // If we have a "to use" value, it's something we need to emit a use 02405 // of. This has to be carefully threaded in: if it's done after the 02406 // release it's potentially undefined behavior (and the optimizer 02407 // will ignore it), and if it happens before the retain then the 02408 // optimizer could move the release there. 02409 if (writeback.ToUse) { 02410 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 02411 02412 // Retain the new value. No need to block-copy here: the block's 02413 // being passed up the stack. 02414 value = CGF.EmitARCRetainNonBlock(value); 02415 02416 // Emit the intrinsic use here. 02417 CGF.EmitARCIntrinsicUse(writeback.ToUse); 02418 02419 // Load the old value (primitively). 02420 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 02421 02422 // Put the new value in place (primitively). 02423 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 02424 02425 // Release the old value. 02426 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 02427 02428 // Otherwise, we can just do a normal lvalue store. 02429 } else { 02430 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 02431 } 02432 02433 // Jump to the continuation block. 02434 if (!provablyNonNull) 02435 CGF.EmitBlock(contBB); 02436 } 02437 02438 static void emitWritebacks(CodeGenFunction &CGF, 02439 const CallArgList &args) { 02440 for (const auto &I : args.writebacks()) 02441 emitWriteback(CGF, I); 02442 } 02443 02444 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 02445 const CallArgList &CallArgs) { 02446 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()); 02447 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 02448 CallArgs.getCleanupsToDeactivate(); 02449 // Iterate in reverse to increase the likelihood of popping the cleanup. 02450 for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator 02451 I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) { 02452 CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP); 02453 I->IsActiveIP->eraseFromParent(); 02454 } 02455 } 02456 02457 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 02458 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 02459 if (uop->getOpcode() == UO_AddrOf) 02460 return uop->getSubExpr(); 02461 return nullptr; 02462 } 02463 02464 /// Emit an argument that's being passed call-by-writeback. That is, 02465 /// we are passing the address of 02466 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 02467 const ObjCIndirectCopyRestoreExpr *CRE) { 02468 LValue srcLV; 02469 02470 // Make an optimistic effort to emit the address as an l-value. 02471 // This can fail if the the argument expression is more complicated. 02472 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 02473 srcLV = CGF.EmitLValue(lvExpr); 02474 02475 // Otherwise, just emit it as a scalar. 02476 } else { 02477 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 02478 02479 QualType srcAddrType = 02480 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 02481 srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType); 02482 } 02483 llvm::Value *srcAddr = srcLV.getAddress(); 02484 02485 // The dest and src types don't necessarily match in LLVM terms 02486 // because of the crazy ObjC compatibility rules. 02487 02488 llvm::PointerType *destType = 02489 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 02490 02491 // If the address is a constant null, just pass the appropriate null. 02492 if (isProvablyNull(srcAddr)) { 02493 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 02494 CRE->getType()); 02495 return; 02496 } 02497 02498 // Create the temporary. 02499 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 02500 "icr.temp"); 02501 // Loading an l-value can introduce a cleanup if the l-value is __weak, 02502 // and that cleanup will be conditional if we can't prove that the l-value 02503 // isn't null, so we need to register a dominating point so that the cleanups 02504 // system will make valid IR. 02505 CodeGenFunction::ConditionalEvaluation condEval(CGF); 02506 02507 // Zero-initialize it if we're not doing a copy-initialization. 02508 bool shouldCopy = CRE->shouldCopy(); 02509 if (!shouldCopy) { 02510 llvm::Value *null = 02511 llvm::ConstantPointerNull::get( 02512 cast<llvm::PointerType>(destType->getElementType())); 02513 CGF.Builder.CreateStore(null, temp); 02514 } 02515 02516 llvm::BasicBlock *contBB = nullptr; 02517 llvm::BasicBlock *originBB = nullptr; 02518 02519 // If the address is *not* known to be non-null, we need to switch. 02520 llvm::Value *finalArgument; 02521 02522 bool provablyNonNull = isProvablyNonNull(srcAddr); 02523 if (provablyNonNull) { 02524 finalArgument = temp; 02525 } else { 02526 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 02527 02528 finalArgument = CGF.Builder.CreateSelect(isNull, 02529 llvm::ConstantPointerNull::get(destType), 02530 temp, "icr.argument"); 02531 02532 // If we need to copy, then the load has to be conditional, which 02533 // means we need control flow. 02534 if (shouldCopy) { 02535 originBB = CGF.Builder.GetInsertBlock(); 02536 contBB = CGF.createBasicBlock("icr.cont"); 02537 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 02538 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 02539 CGF.EmitBlock(copyBB); 02540 condEval.begin(CGF); 02541 } 02542 } 02543 02544 llvm::Value *valueToUse = nullptr; 02545 02546 // Perform a copy if necessary. 02547 if (shouldCopy) { 02548 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 02549 assert(srcRV.isScalar()); 02550 02551 llvm::Value *src = srcRV.getScalarVal(); 02552 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 02553 "icr.cast"); 02554 02555 // Use an ordinary store, not a store-to-lvalue. 02556 CGF.Builder.CreateStore(src, temp); 02557 02558 // If optimization is enabled, and the value was held in a 02559 // __strong variable, we need to tell the optimizer that this 02560 // value has to stay alive until we're doing the store back. 02561 // This is because the temporary is effectively unretained, 02562 // and so otherwise we can violate the high-level semantics. 02563 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 02564 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 02565 valueToUse = src; 02566 } 02567 } 02568 02569 // Finish the control flow if we needed it. 02570 if (shouldCopy && !provablyNonNull) { 02571 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 02572 CGF.EmitBlock(contBB); 02573 02574 // Make a phi for the value to intrinsically use. 02575 if (valueToUse) { 02576 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 02577 "icr.to-use"); 02578 phiToUse->addIncoming(valueToUse, copyBB); 02579 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 02580 originBB); 02581 valueToUse = phiToUse; 02582 } 02583 02584 condEval.end(CGF); 02585 } 02586 02587 args.addWriteback(srcLV, temp, valueToUse); 02588 args.add(RValue::get(finalArgument), CRE->getType()); 02589 } 02590 02591 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 02592 assert(!StackBase && !StackCleanup.isValid()); 02593 02594 // Save the stack. 02595 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 02596 StackBase = CGF.Builder.CreateCall(F, "inalloca.save"); 02597 02598 // Control gets really tied up in landing pads, so we have to spill the 02599 // stacksave to an alloca to avoid violating SSA form. 02600 // TODO: This is dead if we never emit the cleanup. We should create the 02601 // alloca and store lazily on the first cleanup emission. 02602 StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem"); 02603 CGF.Builder.CreateStore(StackBase, StackBaseMem); 02604 CGF.pushStackRestore(EHCleanup, StackBaseMem); 02605 StackCleanup = CGF.EHStack.getInnermostEHScope(); 02606 assert(StackCleanup.isValid()); 02607 } 02608 02609 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 02610 if (StackBase) { 02611 CGF.DeactivateCleanupBlock(StackCleanup, StackBase); 02612 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 02613 // We could load StackBase from StackBaseMem, but in the non-exceptional 02614 // case we can skip it. 02615 CGF.Builder.CreateCall(F, StackBase); 02616 } 02617 } 02618 02619 static void emitNonNullArgCheck(CodeGenFunction &CGF, RValue RV, 02620 QualType ArgType, SourceLocation ArgLoc, 02621 const FunctionDecl *FD, unsigned ParmNum) { 02622 if (!CGF.SanOpts.has(SanitizerKind::NonnullAttribute) || !FD) 02623 return; 02624 auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr; 02625 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 02626 auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo); 02627 if (!NNAttr) 02628 return; 02629 CodeGenFunction::SanitizerScope SanScope(&CGF); 02630 assert(RV.isScalar()); 02631 llvm::Value *V = RV.getScalarVal(); 02632 llvm::Value *Cond = 02633 CGF.Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 02634 llvm::Constant *StaticData[] = { 02635 CGF.EmitCheckSourceLocation(ArgLoc), 02636 CGF.EmitCheckSourceLocation(NNAttr->getLocation()), 02637 llvm::ConstantInt::get(CGF.Int32Ty, ArgNo + 1), 02638 }; 02639 CGF.EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute), 02640 "nonnull_arg", StaticData, None); 02641 } 02642 02643 void CodeGenFunction::EmitCallArgs(CallArgList &Args, 02644 ArrayRef<QualType> ArgTypes, 02645 CallExpr::const_arg_iterator ArgBeg, 02646 CallExpr::const_arg_iterator ArgEnd, 02647 const FunctionDecl *CalleeDecl, 02648 unsigned ParamsToSkip, 02649 bool ForceColumnInfo) { 02650 CGDebugInfo *DI = getDebugInfo(); 02651 SourceLocation CallLoc; 02652 if (DI) CallLoc = DI->getLocation(); 02653 02654 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 02655 // because arguments are destroyed left to right in the callee. 02656 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 02657 // Insert a stack save if we're going to need any inalloca args. 02658 bool HasInAllocaArgs = false; 02659 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 02660 I != E && !HasInAllocaArgs; ++I) 02661 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 02662 if (HasInAllocaArgs) { 02663 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 02664 Args.allocateArgumentMemory(*this); 02665 } 02666 02667 // Evaluate each argument. 02668 size_t CallArgsStart = Args.size(); 02669 for (int I = ArgTypes.size() - 1; I >= 0; --I) { 02670 CallExpr::const_arg_iterator Arg = ArgBeg + I; 02671 EmitCallArg(Args, *Arg, ArgTypes[I]); 02672 emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(), 02673 CalleeDecl, ParamsToSkip + I); 02674 // Restore the debug location. 02675 if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo); 02676 } 02677 02678 // Un-reverse the arguments we just evaluated so they match up with the LLVM 02679 // IR function. 02680 std::reverse(Args.begin() + CallArgsStart, Args.end()); 02681 return; 02682 } 02683 02684 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 02685 CallExpr::const_arg_iterator Arg = ArgBeg + I; 02686 assert(Arg != ArgEnd); 02687 EmitCallArg(Args, *Arg, ArgTypes[I]); 02688 emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(), 02689 CalleeDecl, ParamsToSkip + I); 02690 // Restore the debug location. 02691 if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo); 02692 } 02693 } 02694 02695 namespace { 02696 02697 struct DestroyUnpassedArg : EHScopeStack::Cleanup { 02698 DestroyUnpassedArg(llvm::Value *Addr, QualType Ty) 02699 : Addr(Addr), Ty(Ty) {} 02700 02701 llvm::Value *Addr; 02702 QualType Ty; 02703 02704 void Emit(CodeGenFunction &CGF, Flags flags) override { 02705 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 02706 assert(!Dtor->isTrivial()); 02707 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 02708 /*Delegating=*/false, Addr); 02709 } 02710 }; 02711 02712 } 02713 02714 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 02715 QualType type) { 02716 if (const ObjCIndirectCopyRestoreExpr *CRE 02717 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 02718 assert(getLangOpts().ObjCAutoRefCount); 02719 assert(getContext().hasSameType(E->getType(), type)); 02720 return emitWritebackArg(*this, args, CRE); 02721 } 02722 02723 assert(type->isReferenceType() == E->isGLValue() && 02724 "reference binding to unmaterialized r-value!"); 02725 02726 if (E->isGLValue()) { 02727 assert(E->getObjectKind() == OK_Ordinary); 02728 return args.add(EmitReferenceBindingToExpr(E), type); 02729 } 02730 02731 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 02732 02733 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 02734 // However, we still have to push an EH-only cleanup in case we unwind before 02735 // we make it to the call. 02736 if (HasAggregateEvalKind && 02737 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 02738 // If we're using inalloca, use the argument memory. Otherwise, use a 02739 // temporary. 02740 AggValueSlot Slot; 02741 if (args.isUsingInAlloca()) 02742 Slot = createPlaceholderSlot(*this, type); 02743 else 02744 Slot = CreateAggTemp(type, "agg.tmp"); 02745 02746 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 02747 bool DestroyedInCallee = 02748 RD && RD->hasNonTrivialDestructor() && 02749 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default; 02750 if (DestroyedInCallee) 02751 Slot.setExternallyDestructed(); 02752 02753 EmitAggExpr(E, Slot); 02754 RValue RV = Slot.asRValue(); 02755 args.add(RV, type); 02756 02757 if (DestroyedInCallee) { 02758 // Create a no-op GEP between the placeholder and the cleanup so we can 02759 // RAUW it successfully. It also serves as a marker of the first 02760 // instruction where the cleanup is active. 02761 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type); 02762 // This unreachable is a temporary marker which will be removed later. 02763 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 02764 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 02765 } 02766 return; 02767 } 02768 02769 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 02770 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 02771 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 02772 assert(L.isSimple()); 02773 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { 02774 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 02775 } else { 02776 // We can't represent a misaligned lvalue in the CallArgList, so copy 02777 // to an aligned temporary now. 02778 llvm::Value *tmp = CreateMemTemp(type); 02779 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(), 02780 L.getAlignment()); 02781 args.add(RValue::getAggregate(tmp), type); 02782 } 02783 return; 02784 } 02785 02786 args.add(EmitAnyExprToTemp(E), type); 02787 } 02788 02789 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 02790 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 02791 // implicitly widens null pointer constants that are arguments to varargs 02792 // functions to pointer-sized ints. 02793 if (!getTarget().getTriple().isOSWindows()) 02794 return Arg->getType(); 02795 02796 if (Arg->getType()->isIntegerType() && 02797 getContext().getTypeSize(Arg->getType()) < 02798 getContext().getTargetInfo().getPointerWidth(0) && 02799 Arg->isNullPointerConstant(getContext(), 02800 Expr::NPC_ValueDependentIsNotNull)) { 02801 return getContext().getIntPtrType(); 02802 } 02803 02804 return Arg->getType(); 02805 } 02806 02807 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 02808 // optimizer it can aggressively ignore unwind edges. 02809 void 02810 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 02811 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 02812 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 02813 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 02814 CGM.getNoObjCARCExceptionsMetadata()); 02815 } 02816 02817 /// Emits a call to the given no-arguments nounwind runtime function. 02818 llvm::CallInst * 02819 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 02820 const llvm::Twine &name) { 02821 return EmitNounwindRuntimeCall(callee, None, name); 02822 } 02823 02824 /// Emits a call to the given nounwind runtime function. 02825 llvm::CallInst * 02826 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 02827 ArrayRef<llvm::Value*> args, 02828 const llvm::Twine &name) { 02829 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 02830 call->setDoesNotThrow(); 02831 return call; 02832 } 02833 02834 /// Emits a simple call (never an invoke) to the given no-arguments 02835 /// runtime function. 02836 llvm::CallInst * 02837 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 02838 const llvm::Twine &name) { 02839 return EmitRuntimeCall(callee, None, name); 02840 } 02841 02842 /// Emits a simple call (never an invoke) to the given runtime 02843 /// function. 02844 llvm::CallInst * 02845 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 02846 ArrayRef<llvm::Value*> args, 02847 const llvm::Twine &name) { 02848 llvm::CallInst *call = Builder.CreateCall(callee, args, name); 02849 call->setCallingConv(getRuntimeCC()); 02850 return call; 02851 } 02852 02853 /// Emits a call or invoke to the given noreturn runtime function. 02854 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 02855 ArrayRef<llvm::Value*> args) { 02856 if (getInvokeDest()) { 02857 llvm::InvokeInst *invoke = 02858 Builder.CreateInvoke(callee, 02859 getUnreachableBlock(), 02860 getInvokeDest(), 02861 args); 02862 invoke->setDoesNotReturn(); 02863 invoke->setCallingConv(getRuntimeCC()); 02864 } else { 02865 llvm::CallInst *call = Builder.CreateCall(callee, args); 02866 call->setDoesNotReturn(); 02867 call->setCallingConv(getRuntimeCC()); 02868 Builder.CreateUnreachable(); 02869 } 02870 PGO.setCurrentRegionUnreachable(); 02871 } 02872 02873 /// Emits a call or invoke instruction to the given nullary runtime 02874 /// function. 02875 llvm::CallSite 02876 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 02877 const Twine &name) { 02878 return EmitRuntimeCallOrInvoke(callee, None, name); 02879 } 02880 02881 /// Emits a call or invoke instruction to the given runtime function. 02882 llvm::CallSite 02883 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 02884 ArrayRef<llvm::Value*> args, 02885 const Twine &name) { 02886 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 02887 callSite.setCallingConv(getRuntimeCC()); 02888 return callSite; 02889 } 02890 02891 llvm::CallSite 02892 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 02893 const Twine &Name) { 02894 return EmitCallOrInvoke(Callee, None, Name); 02895 } 02896 02897 /// Emits a call or invoke instruction to the given function, depending 02898 /// on the current state of the EH stack. 02899 llvm::CallSite 02900 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 02901 ArrayRef<llvm::Value *> Args, 02902 const Twine &Name) { 02903 llvm::BasicBlock *InvokeDest = getInvokeDest(); 02904 02905 llvm::Instruction *Inst; 02906 if (!InvokeDest) 02907 Inst = Builder.CreateCall(Callee, Args, Name); 02908 else { 02909 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 02910 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 02911 EmitBlock(ContBB); 02912 } 02913 02914 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 02915 // optimizer it can aggressively ignore unwind edges. 02916 if (CGM.getLangOpts().ObjCAutoRefCount) 02917 AddObjCARCExceptionMetadata(Inst); 02918 02919 return Inst; 02920 } 02921 02922 /// \brief Store a non-aggregate value to an address to initialize it. For 02923 /// initialization, a non-atomic store will be used. 02924 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, 02925 LValue Dst) { 02926 if (Src.isScalar()) 02927 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); 02928 else 02929 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); 02930 } 02931 02932 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 02933 llvm::Value *New) { 02934 DeferredReplacements.push_back(std::make_pair(Old, New)); 02935 } 02936 02937 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 02938 llvm::Value *Callee, 02939 ReturnValueSlot ReturnValue, 02940 const CallArgList &CallArgs, 02941 const Decl *TargetDecl, 02942 llvm::Instruction **callOrInvoke) { 02943 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 02944 02945 // Handle struct-return functions by passing a pointer to the 02946 // location that we would like to return into. 02947 QualType RetTy = CallInfo.getReturnType(); 02948 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 02949 02950 llvm::FunctionType *IRFuncTy = 02951 cast<llvm::FunctionType>( 02952 cast<llvm::PointerType>(Callee->getType())->getElementType()); 02953 02954 // If we're using inalloca, insert the allocation after the stack save. 02955 // FIXME: Do this earlier rather than hacking it in here! 02956 llvm::Value *ArgMemory = nullptr; 02957 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 02958 llvm::Instruction *IP = CallArgs.getStackBase(); 02959 llvm::AllocaInst *AI; 02960 if (IP) { 02961 IP = IP->getNextNode(); 02962 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP); 02963 } else { 02964 AI = CreateTempAlloca(ArgStruct, "argmem"); 02965 } 02966 AI->setUsedWithInAlloca(true); 02967 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 02968 ArgMemory = AI; 02969 } 02970 02971 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 02972 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 02973 02974 // If the call returns a temporary with struct return, create a temporary 02975 // alloca to hold the result, unless one is given to us. 02976 llvm::Value *SRetPtr = nullptr; 02977 if (RetAI.isIndirect() || RetAI.isInAlloca()) { 02978 SRetPtr = ReturnValue.getValue(); 02979 if (!SRetPtr) 02980 SRetPtr = CreateMemTemp(RetTy); 02981 if (IRFunctionArgs.hasSRetArg()) { 02982 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr; 02983 } else { 02984 llvm::Value *Addr = 02985 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); 02986 Builder.CreateStore(SRetPtr, Addr); 02987 } 02988 } 02989 02990 assert(CallInfo.arg_size() == CallArgs.size() && 02991 "Mismatch between function signature & arguments."); 02992 unsigned ArgNo = 0; 02993 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 02994 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 02995 I != E; ++I, ++info_it, ++ArgNo) { 02996 const ABIArgInfo &ArgInfo = info_it->info; 02997 RValue RV = I->RV; 02998 02999 CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty); 03000 03001 // Insert a padding argument to ensure proper alignment. 03002 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 03003 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 03004 llvm::UndefValue::get(ArgInfo.getPaddingType()); 03005 03006 unsigned FirstIRArg, NumIRArgs; 03007 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 03008 03009 switch (ArgInfo.getKind()) { 03010 case ABIArgInfo::InAlloca: { 03011 assert(NumIRArgs == 0); 03012 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 03013 if (RV.isAggregate()) { 03014 // Replace the placeholder with the appropriate argument slot GEP. 03015 llvm::Instruction *Placeholder = 03016 cast<llvm::Instruction>(RV.getAggregateAddr()); 03017 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 03018 Builder.SetInsertPoint(Placeholder); 03019 llvm::Value *Addr = Builder.CreateStructGEP( 03020 ArgMemory, ArgInfo.getInAllocaFieldIndex()); 03021 Builder.restoreIP(IP); 03022 deferPlaceholderReplacement(Placeholder, Addr); 03023 } else { 03024 // Store the RValue into the argument struct. 03025 llvm::Value *Addr = 03026 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 03027 unsigned AS = Addr->getType()->getPointerAddressSpace(); 03028 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 03029 // There are some cases where a trivial bitcast is not avoidable. The 03030 // definition of a type later in a translation unit may change it's type 03031 // from {}* to (%struct.foo*)*. 03032 if (Addr->getType() != MemType) 03033 Addr = Builder.CreateBitCast(Addr, MemType); 03034 LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign); 03035 EmitInitStoreOfNonAggregate(*this, RV, argLV); 03036 } 03037 break; 03038 } 03039 03040 case ABIArgInfo::Indirect: { 03041 assert(NumIRArgs == 1); 03042 if (RV.isScalar() || RV.isComplex()) { 03043 // Make a temporary alloca to pass the argument. 03044 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 03045 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 03046 AI->setAlignment(ArgInfo.getIndirectAlign()); 03047 IRCallArgs[FirstIRArg] = AI; 03048 03049 LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign); 03050 EmitInitStoreOfNonAggregate(*this, RV, argLV); 03051 } else { 03052 // We want to avoid creating an unnecessary temporary+copy here; 03053 // however, we need one in three cases: 03054 // 1. If the argument is not byval, and we are required to copy the 03055 // source. (This case doesn't occur on any common architecture.) 03056 // 2. If the argument is byval, RV is not sufficiently aligned, and 03057 // we cannot force it to be sufficiently aligned. 03058 // 3. If the argument is byval, but RV is located in an address space 03059 // different than that of the argument (0). 03060 llvm::Value *Addr = RV.getAggregateAddr(); 03061 unsigned Align = ArgInfo.getIndirectAlign(); 03062 const llvm::DataLayout *TD = &CGM.getDataLayout(); 03063 const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace(); 03064 const unsigned ArgAddrSpace = 03065 (FirstIRArg < IRFuncTy->getNumParams() 03066 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() 03067 : 0); 03068 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 03069 (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align && 03070 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) || 03071 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 03072 // Create an aligned temporary, and copy to it. 03073 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 03074 if (Align > AI->getAlignment()) 03075 AI->setAlignment(Align); 03076 IRCallArgs[FirstIRArg] = AI; 03077 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 03078 } else { 03079 // Skip the extra memcpy call. 03080 IRCallArgs[FirstIRArg] = Addr; 03081 } 03082 } 03083 break; 03084 } 03085 03086 case ABIArgInfo::Ignore: 03087 assert(NumIRArgs == 0); 03088 break; 03089 03090 case ABIArgInfo::Extend: 03091 case ABIArgInfo::Direct: { 03092 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 03093 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 03094 ArgInfo.getDirectOffset() == 0) { 03095 assert(NumIRArgs == 1); 03096 llvm::Value *V; 03097 if (RV.isScalar()) 03098 V = RV.getScalarVal(); 03099 else 03100 V = Builder.CreateLoad(RV.getAggregateAddr()); 03101 03102 // We might have to widen integers, but we should never truncate. 03103 if (ArgInfo.getCoerceToType() != V->getType() && 03104 V->getType()->isIntegerTy()) 03105 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 03106 03107 // If the argument doesn't match, perform a bitcast to coerce it. This 03108 // can happen due to trivial type mismatches. 03109 if (FirstIRArg < IRFuncTy->getNumParams() && 03110 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 03111 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 03112 IRCallArgs[FirstIRArg] = V; 03113 break; 03114 } 03115 03116 // FIXME: Avoid the conversion through memory if possible. 03117 llvm::Value *SrcPtr; 03118 if (RV.isScalar() || RV.isComplex()) { 03119 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 03120 LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign); 03121 EmitInitStoreOfNonAggregate(*this, RV, SrcLV); 03122 } else 03123 SrcPtr = RV.getAggregateAddr(); 03124 03125 // If the value is offset in memory, apply the offset now. 03126 if (unsigned Offs = ArgInfo.getDirectOffset()) { 03127 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 03128 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 03129 SrcPtr = Builder.CreateBitCast(SrcPtr, 03130 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 03131 03132 } 03133 03134 // Fast-isel and the optimizer generally like scalar values better than 03135 // FCAs, so we flatten them if this is safe to do for this argument. 03136 llvm::StructType *STy = 03137 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 03138 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 03139 llvm::Type *SrcTy = 03140 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 03141 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 03142 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 03143 03144 // If the source type is smaller than the destination type of the 03145 // coerce-to logic, copy the source value into a temp alloca the size 03146 // of the destination type to allow loading all of it. The bits past 03147 // the source value are left undef. 03148 if (SrcSize < DstSize) { 03149 llvm::AllocaInst *TempAlloca 03150 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); 03151 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); 03152 SrcPtr = TempAlloca; 03153 } else { 03154 SrcPtr = Builder.CreateBitCast(SrcPtr, 03155 llvm::PointerType::getUnqual(STy)); 03156 } 03157 03158 assert(NumIRArgs == STy->getNumElements()); 03159 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 03160 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 03161 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 03162 // We don't know what we're loading from. 03163 LI->setAlignment(1); 03164 IRCallArgs[FirstIRArg + i] = LI; 03165 } 03166 } else { 03167 // In the simple case, just pass the coerced loaded value. 03168 assert(NumIRArgs == 1); 03169 IRCallArgs[FirstIRArg] = 03170 CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this); 03171 } 03172 03173 break; 03174 } 03175 03176 case ABIArgInfo::Expand: 03177 unsigned IRArgPos = FirstIRArg; 03178 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos); 03179 assert(IRArgPos == FirstIRArg + NumIRArgs); 03180 break; 03181 } 03182 } 03183 03184 if (ArgMemory) { 03185 llvm::Value *Arg = ArgMemory; 03186 if (CallInfo.isVariadic()) { 03187 // When passing non-POD arguments by value to variadic functions, we will 03188 // end up with a variadic prototype and an inalloca call site. In such 03189 // cases, we can't do any parameter mismatch checks. Give up and bitcast 03190 // the callee. 03191 unsigned CalleeAS = 03192 cast<llvm::PointerType>(Callee->getType())->getAddressSpace(); 03193 Callee = Builder.CreateBitCast( 03194 Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS)); 03195 } else { 03196 llvm::Type *LastParamTy = 03197 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 03198 if (Arg->getType() != LastParamTy) { 03199 #ifndef NDEBUG 03200 // Assert that these structs have equivalent element types. 03201 llvm::StructType *FullTy = CallInfo.getArgStruct(); 03202 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 03203 cast<llvm::PointerType>(LastParamTy)->getElementType()); 03204 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 03205 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 03206 DE = DeclaredTy->element_end(), 03207 FI = FullTy->element_begin(); 03208 DI != DE; ++DI, ++FI) 03209 assert(*DI == *FI); 03210 #endif 03211 Arg = Builder.CreateBitCast(Arg, LastParamTy); 03212 } 03213 } 03214 assert(IRFunctionArgs.hasInallocaArg()); 03215 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 03216 } 03217 03218 if (!CallArgs.getCleanupsToDeactivate().empty()) 03219 deactivateArgCleanupsBeforeCall(*this, CallArgs); 03220 03221 // If the callee is a bitcast of a function to a varargs pointer to function 03222 // type, check to see if we can remove the bitcast. This handles some cases 03223 // with unprototyped functions. 03224 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 03225 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 03226 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 03227 llvm::FunctionType *CurFT = 03228 cast<llvm::FunctionType>(CurPT->getElementType()); 03229 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 03230 03231 if (CE->getOpcode() == llvm::Instruction::BitCast && 03232 ActualFT->getReturnType() == CurFT->getReturnType() && 03233 ActualFT->getNumParams() == CurFT->getNumParams() && 03234 ActualFT->getNumParams() == IRCallArgs.size() && 03235 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 03236 bool ArgsMatch = true; 03237 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 03238 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 03239 ArgsMatch = false; 03240 break; 03241 } 03242 03243 // Strip the cast if we can get away with it. This is a nice cleanup, 03244 // but also allows us to inline the function at -O0 if it is marked 03245 // always_inline. 03246 if (ArgsMatch) 03247 Callee = CalleeF; 03248 } 03249 } 03250 03251 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 03252 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 03253 // Inalloca argument can have different type. 03254 if (IRFunctionArgs.hasInallocaArg() && 03255 i == IRFunctionArgs.getInallocaArgNo()) 03256 continue; 03257 if (i < IRFuncTy->getNumParams()) 03258 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 03259 } 03260 03261 unsigned CallingConv; 03262 CodeGen::AttributeListType AttributeList; 03263 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, 03264 CallingConv, true); 03265 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 03266 AttributeList); 03267 03268 llvm::BasicBlock *InvokeDest = nullptr; 03269 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 03270 llvm::Attribute::NoUnwind)) 03271 InvokeDest = getInvokeDest(); 03272 03273 llvm::CallSite CS; 03274 if (!InvokeDest) { 03275 CS = Builder.CreateCall(Callee, IRCallArgs); 03276 } else { 03277 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 03278 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs); 03279 EmitBlock(Cont); 03280 } 03281 if (callOrInvoke) 03282 *callOrInvoke = CS.getInstruction(); 03283 03284 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 03285 !CS.hasFnAttr(llvm::Attribute::NoInline)) 03286 Attrs = 03287 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 03288 llvm::Attribute::AlwaysInline); 03289 03290 CS.setAttributes(Attrs); 03291 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 03292 03293 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 03294 // optimizer it can aggressively ignore unwind edges. 03295 if (CGM.getLangOpts().ObjCAutoRefCount) 03296 AddObjCARCExceptionMetadata(CS.getInstruction()); 03297 03298 // If the call doesn't return, finish the basic block and clear the 03299 // insertion point; this allows the rest of IRgen to discard 03300 // unreachable code. 03301 if (CS.doesNotReturn()) { 03302 Builder.CreateUnreachable(); 03303 Builder.ClearInsertionPoint(); 03304 03305 // FIXME: For now, emit a dummy basic block because expr emitters in 03306 // generally are not ready to handle emitting expressions at unreachable 03307 // points. 03308 EnsureInsertPoint(); 03309 03310 // Return a reasonable RValue. 03311 return GetUndefRValue(RetTy); 03312 } 03313 03314 llvm::Instruction *CI = CS.getInstruction(); 03315 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 03316 CI->setName("call"); 03317 03318 // Emit any writebacks immediately. Arguably this should happen 03319 // after any return-value munging. 03320 if (CallArgs.hasWritebacks()) 03321 emitWritebacks(*this, CallArgs); 03322 03323 // The stack cleanup for inalloca arguments has to run out of the normal 03324 // lexical order, so deactivate it and run it manually here. 03325 CallArgs.freeArgumentMemory(*this); 03326 03327 RValue Ret = [&] { 03328 switch (RetAI.getKind()) { 03329 case ABIArgInfo::InAlloca: 03330 case ABIArgInfo::Indirect: 03331 return convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 03332 03333 case ABIArgInfo::Ignore: 03334 // If we are ignoring an argument that had a result, make sure to 03335 // construct the appropriate return value for our caller. 03336 return GetUndefRValue(RetTy); 03337 03338 case ABIArgInfo::Extend: 03339 case ABIArgInfo::Direct: { 03340 llvm::Type *RetIRTy = ConvertType(RetTy); 03341 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 03342 switch (getEvaluationKind(RetTy)) { 03343 case TEK_Complex: { 03344 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 03345 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 03346 return RValue::getComplex(std::make_pair(Real, Imag)); 03347 } 03348 case TEK_Aggregate: { 03349 llvm::Value *DestPtr = ReturnValue.getValue(); 03350 bool DestIsVolatile = ReturnValue.isVolatile(); 03351 03352 if (!DestPtr) { 03353 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 03354 DestIsVolatile = false; 03355 } 03356 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 03357 return RValue::getAggregate(DestPtr); 03358 } 03359 case TEK_Scalar: { 03360 // If the argument doesn't match, perform a bitcast to coerce it. This 03361 // can happen due to trivial type mismatches. 03362 llvm::Value *V = CI; 03363 if (V->getType() != RetIRTy) 03364 V = Builder.CreateBitCast(V, RetIRTy); 03365 return RValue::get(V); 03366 } 03367 } 03368 llvm_unreachable("bad evaluation kind"); 03369 } 03370 03371 llvm::Value *DestPtr = ReturnValue.getValue(); 03372 bool DestIsVolatile = ReturnValue.isVolatile(); 03373 03374 if (!DestPtr) { 03375 DestPtr = CreateMemTemp(RetTy, "coerce"); 03376 DestIsVolatile = false; 03377 } 03378 03379 // If the value is offset in memory, apply the offset now. 03380 llvm::Value *StorePtr = DestPtr; 03381 if (unsigned Offs = RetAI.getDirectOffset()) { 03382 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 03383 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 03384 StorePtr = Builder.CreateBitCast(StorePtr, 03385 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 03386 } 03387 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 03388 03389 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 03390 } 03391 03392 case ABIArgInfo::Expand: 03393 llvm_unreachable("Invalid ABI kind for return argument"); 03394 } 03395 03396 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 03397 } (); 03398 03399 if (Ret.isScalar() && TargetDecl) { 03400 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) { 03401 llvm::Value *OffsetValue = nullptr; 03402 if (const auto *Offset = AA->getOffset()) 03403 OffsetValue = EmitScalarExpr(Offset); 03404 03405 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment()); 03406 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment); 03407 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(), 03408 OffsetValue); 03409 } 03410 } 03411 03412 return Ret; 03413 } 03414 03415 /* VarArg handling */ 03416 03417 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 03418 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 03419 }