clang API Documentation
00001 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This coordinates the per-function state used while generating code. 00011 // 00012 //===----------------------------------------------------------------------===// 00013 00014 #include "CodeGenFunction.h" 00015 #include "CGCUDARuntime.h" 00016 #include "CGCXXABI.h" 00017 #include "CGDebugInfo.h" 00018 #include "CGOpenMPRuntime.h" 00019 #include "CodeGenModule.h" 00020 #include "CodeGenPGO.h" 00021 #include "TargetInfo.h" 00022 #include "clang/AST/ASTContext.h" 00023 #include "clang/AST/Decl.h" 00024 #include "clang/AST/DeclCXX.h" 00025 #include "clang/AST/StmtCXX.h" 00026 #include "clang/Basic/TargetInfo.h" 00027 #include "clang/CodeGen/CGFunctionInfo.h" 00028 #include "clang/Frontend/CodeGenOptions.h" 00029 #include "llvm/IR/DataLayout.h" 00030 #include "llvm/IR/Intrinsics.h" 00031 #include "llvm/IR/MDBuilder.h" 00032 #include "llvm/IR/Operator.h" 00033 using namespace clang; 00034 using namespace CodeGen; 00035 00036 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext) 00037 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()), 00038 Builder(cgm.getModule().getContext(), llvm::ConstantFolder(), 00039 CGBuilderInserterTy(this)), 00040 CurFn(nullptr), CapturedStmtInfo(nullptr), 00041 SanOpts(CGM.getLangOpts().Sanitize), IsSanitizerScope(false), 00042 CurFuncIsThunk(false), AutoreleaseResult(false), SawAsmBlock(false), 00043 BlockInfo(nullptr), BlockPointer(nullptr), 00044 LambdaThisCaptureField(nullptr), NormalCleanupDest(nullptr), 00045 NextCleanupDestIndex(1), FirstBlockInfo(nullptr), EHResumeBlock(nullptr), 00046 ExceptionSlot(nullptr), EHSelectorSlot(nullptr), 00047 DebugInfo(CGM.getModuleDebugInfo()), DisableDebugInfo(false), 00048 DidCallStackSave(false), IndirectBranch(nullptr), PGO(cgm), 00049 SwitchInsn(nullptr), SwitchWeights(nullptr), CaseRangeBlock(nullptr), 00050 UnreachableBlock(nullptr), NumReturnExprs(0), NumSimpleReturnExprs(0), 00051 CXXABIThisDecl(nullptr), CXXABIThisValue(nullptr), CXXThisValue(nullptr), 00052 CXXDefaultInitExprThis(nullptr), CXXStructorImplicitParamDecl(nullptr), 00053 CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr), 00054 CurLexicalScope(nullptr), TerminateLandingPad(nullptr), 00055 TerminateHandler(nullptr), TrapBB(nullptr) { 00056 if (!suppressNewContext) 00057 CGM.getCXXABI().getMangleContext().startNewFunction(); 00058 00059 llvm::FastMathFlags FMF; 00060 if (CGM.getLangOpts().FastMath) 00061 FMF.setUnsafeAlgebra(); 00062 if (CGM.getLangOpts().FiniteMathOnly) { 00063 FMF.setNoNaNs(); 00064 FMF.setNoInfs(); 00065 } 00066 Builder.SetFastMathFlags(FMF); 00067 } 00068 00069 CodeGenFunction::~CodeGenFunction() { 00070 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup"); 00071 00072 // If there are any unclaimed block infos, go ahead and destroy them 00073 // now. This can happen if IR-gen gets clever and skips evaluating 00074 // something. 00075 if (FirstBlockInfo) 00076 destroyBlockInfos(FirstBlockInfo); 00077 00078 if (getLangOpts().OpenMP) { 00079 CGM.getOpenMPRuntime().FunctionFinished(*this); 00080 } 00081 } 00082 00083 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) { 00084 CharUnits Alignment; 00085 if (CGM.getCXXABI().isTypeInfoCalculable(T)) { 00086 Alignment = getContext().getTypeAlignInChars(T); 00087 unsigned MaxAlign = getContext().getLangOpts().MaxTypeAlign; 00088 if (MaxAlign && Alignment.getQuantity() > MaxAlign && 00089 !getContext().isAlignmentRequired(T)) 00090 Alignment = CharUnits::fromQuantity(MaxAlign); 00091 } 00092 return LValue::MakeAddr(V, T, Alignment, getContext(), CGM.getTBAAInfo(T)); 00093 } 00094 00095 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) { 00096 return CGM.getTypes().ConvertTypeForMem(T); 00097 } 00098 00099 llvm::Type *CodeGenFunction::ConvertType(QualType T) { 00100 return CGM.getTypes().ConvertType(T); 00101 } 00102 00103 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) { 00104 type = type.getCanonicalType(); 00105 while (true) { 00106 switch (type->getTypeClass()) { 00107 #define TYPE(name, parent) 00108 #define ABSTRACT_TYPE(name, parent) 00109 #define NON_CANONICAL_TYPE(name, parent) case Type::name: 00110 #define DEPENDENT_TYPE(name, parent) case Type::name: 00111 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name: 00112 #include "clang/AST/TypeNodes.def" 00113 llvm_unreachable("non-canonical or dependent type in IR-generation"); 00114 00115 case Type::Auto: 00116 llvm_unreachable("undeduced auto type in IR-generation"); 00117 00118 // Various scalar types. 00119 case Type::Builtin: 00120 case Type::Pointer: 00121 case Type::BlockPointer: 00122 case Type::LValueReference: 00123 case Type::RValueReference: 00124 case Type::MemberPointer: 00125 case Type::Vector: 00126 case Type::ExtVector: 00127 case Type::FunctionProto: 00128 case Type::FunctionNoProto: 00129 case Type::Enum: 00130 case Type::ObjCObjectPointer: 00131 return TEK_Scalar; 00132 00133 // Complexes. 00134 case Type::Complex: 00135 return TEK_Complex; 00136 00137 // Arrays, records, and Objective-C objects. 00138 case Type::ConstantArray: 00139 case Type::IncompleteArray: 00140 case Type::VariableArray: 00141 case Type::Record: 00142 case Type::ObjCObject: 00143 case Type::ObjCInterface: 00144 return TEK_Aggregate; 00145 00146 // We operate on atomic values according to their underlying type. 00147 case Type::Atomic: 00148 type = cast<AtomicType>(type)->getValueType(); 00149 continue; 00150 } 00151 llvm_unreachable("unknown type kind!"); 00152 } 00153 } 00154 00155 void CodeGenFunction::EmitReturnBlock() { 00156 // For cleanliness, we try to avoid emitting the return block for 00157 // simple cases. 00158 llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); 00159 00160 if (CurBB) { 00161 assert(!CurBB->getTerminator() && "Unexpected terminated block."); 00162 00163 // We have a valid insert point, reuse it if it is empty or there are no 00164 // explicit jumps to the return block. 00165 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) { 00166 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB); 00167 delete ReturnBlock.getBlock(); 00168 } else 00169 EmitBlock(ReturnBlock.getBlock()); 00170 return; 00171 } 00172 00173 // Otherwise, if the return block is the target of a single direct 00174 // branch then we can just put the code in that block instead. This 00175 // cleans up functions which started with a unified return block. 00176 if (ReturnBlock.getBlock()->hasOneUse()) { 00177 llvm::BranchInst *BI = 00178 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin()); 00179 if (BI && BI->isUnconditional() && 00180 BI->getSuccessor(0) == ReturnBlock.getBlock()) { 00181 // Reset insertion point, including debug location, and delete the 00182 // branch. This is really subtle and only works because the next change 00183 // in location will hit the caching in CGDebugInfo::EmitLocation and not 00184 // override this. 00185 Builder.SetCurrentDebugLocation(BI->getDebugLoc()); 00186 Builder.SetInsertPoint(BI->getParent()); 00187 BI->eraseFromParent(); 00188 delete ReturnBlock.getBlock(); 00189 return; 00190 } 00191 } 00192 00193 // FIXME: We are at an unreachable point, there is no reason to emit the block 00194 // unless it has uses. However, we still need a place to put the debug 00195 // region.end for now. 00196 00197 EmitBlock(ReturnBlock.getBlock()); 00198 } 00199 00200 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) { 00201 if (!BB) return; 00202 if (!BB->use_empty()) 00203 return CGF.CurFn->getBasicBlockList().push_back(BB); 00204 delete BB; 00205 } 00206 00207 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) { 00208 assert(BreakContinueStack.empty() && 00209 "mismatched push/pop in break/continue stack!"); 00210 00211 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0 00212 && NumSimpleReturnExprs == NumReturnExprs 00213 && ReturnBlock.getBlock()->use_empty(); 00214 // Usually the return expression is evaluated before the cleanup 00215 // code. If the function contains only a simple return statement, 00216 // such as a constant, the location before the cleanup code becomes 00217 // the last useful breakpoint in the function, because the simple 00218 // return expression will be evaluated after the cleanup code. To be 00219 // safe, set the debug location for cleanup code to the location of 00220 // the return statement. Otherwise the cleanup code should be at the 00221 // end of the function's lexical scope. 00222 // 00223 // If there are multiple branches to the return block, the branch 00224 // instructions will get the location of the return statements and 00225 // all will be fine. 00226 if (CGDebugInfo *DI = getDebugInfo()) { 00227 if (OnlySimpleReturnStmts) 00228 DI->EmitLocation(Builder, LastStopPoint); 00229 else 00230 DI->EmitLocation(Builder, EndLoc); 00231 } 00232 00233 // Pop any cleanups that might have been associated with the 00234 // parameters. Do this in whatever block we're currently in; it's 00235 // important to do this before we enter the return block or return 00236 // edges will be *really* confused. 00237 bool EmitRetDbgLoc = true; 00238 if (EHStack.stable_begin() != PrologueCleanupDepth) { 00239 PopCleanupBlocks(PrologueCleanupDepth); 00240 00241 // Make sure the line table doesn't jump back into the body for 00242 // the ret after it's been at EndLoc. 00243 EmitRetDbgLoc = false; 00244 00245 if (CGDebugInfo *DI = getDebugInfo()) 00246 if (OnlySimpleReturnStmts) 00247 DI->EmitLocation(Builder, EndLoc); 00248 } 00249 00250 // Emit function epilog (to return). 00251 EmitReturnBlock(); 00252 00253 if (ShouldInstrumentFunction()) 00254 EmitFunctionInstrumentation("__cyg_profile_func_exit"); 00255 00256 // Emit debug descriptor for function end. 00257 if (CGDebugInfo *DI = getDebugInfo()) { 00258 DI->EmitFunctionEnd(Builder); 00259 } 00260 00261 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc); 00262 EmitEndEHSpec(CurCodeDecl); 00263 00264 assert(EHStack.empty() && 00265 "did not remove all scopes from cleanup stack!"); 00266 00267 // If someone did an indirect goto, emit the indirect goto block at the end of 00268 // the function. 00269 if (IndirectBranch) { 00270 EmitBlock(IndirectBranch->getParent()); 00271 Builder.ClearInsertionPoint(); 00272 } 00273 00274 // Remove the AllocaInsertPt instruction, which is just a convenience for us. 00275 llvm::Instruction *Ptr = AllocaInsertPt; 00276 AllocaInsertPt = nullptr; 00277 Ptr->eraseFromParent(); 00278 00279 // If someone took the address of a label but never did an indirect goto, we 00280 // made a zero entry PHI node, which is illegal, zap it now. 00281 if (IndirectBranch) { 00282 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress()); 00283 if (PN->getNumIncomingValues() == 0) { 00284 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType())); 00285 PN->eraseFromParent(); 00286 } 00287 } 00288 00289 EmitIfUsed(*this, EHResumeBlock); 00290 EmitIfUsed(*this, TerminateLandingPad); 00291 EmitIfUsed(*this, TerminateHandler); 00292 EmitIfUsed(*this, UnreachableBlock); 00293 00294 if (CGM.getCodeGenOpts().EmitDeclMetadata) 00295 EmitDeclMetadata(); 00296 00297 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator 00298 I = DeferredReplacements.begin(), 00299 E = DeferredReplacements.end(); 00300 I != E; ++I) { 00301 I->first->replaceAllUsesWith(I->second); 00302 I->first->eraseFromParent(); 00303 } 00304 } 00305 00306 /// ShouldInstrumentFunction - Return true if the current function should be 00307 /// instrumented with __cyg_profile_func_* calls 00308 bool CodeGenFunction::ShouldInstrumentFunction() { 00309 if (!CGM.getCodeGenOpts().InstrumentFunctions) 00310 return false; 00311 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) 00312 return false; 00313 return true; 00314 } 00315 00316 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified 00317 /// instrumentation function with the current function and the call site, if 00318 /// function instrumentation is enabled. 00319 void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { 00320 // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site); 00321 llvm::PointerType *PointerTy = Int8PtrTy; 00322 llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy }; 00323 llvm::FunctionType *FunctionTy = 00324 llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false); 00325 00326 llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn); 00327 llvm::CallInst *CallSite = Builder.CreateCall( 00328 CGM.getIntrinsic(llvm::Intrinsic::returnaddress), 00329 llvm::ConstantInt::get(Int32Ty, 0), 00330 "callsite"); 00331 00332 llvm::Value *args[] = { 00333 llvm::ConstantExpr::getBitCast(CurFn, PointerTy), 00334 CallSite 00335 }; 00336 00337 EmitNounwindRuntimeCall(F, args); 00338 } 00339 00340 void CodeGenFunction::EmitMCountInstrumentation() { 00341 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false); 00342 00343 llvm::Constant *MCountFn = 00344 CGM.CreateRuntimeFunction(FTy, getTarget().getMCountName()); 00345 EmitNounwindRuntimeCall(MCountFn); 00346 } 00347 00348 // OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument 00349 // information in the program executable. The argument information stored 00350 // includes the argument name, its type, the address and access qualifiers used. 00351 static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn, 00352 CodeGenModule &CGM,llvm::LLVMContext &Context, 00353 SmallVector <llvm::Value*, 5> &kernelMDArgs, 00354 CGBuilderTy& Builder, ASTContext &ASTCtx) { 00355 // Create MDNodes that represent the kernel arg metadata. 00356 // Each MDNode is a list in the form of "key", N number of values which is 00357 // the same number of values as their are kernel arguments. 00358 00359 const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy(); 00360 00361 // MDNode for the kernel argument address space qualifiers. 00362 SmallVector<llvm::Value*, 8> addressQuals; 00363 addressQuals.push_back(llvm::MDString::get(Context, "kernel_arg_addr_space")); 00364 00365 // MDNode for the kernel argument access qualifiers (images only). 00366 SmallVector<llvm::Value*, 8> accessQuals; 00367 accessQuals.push_back(llvm::MDString::get(Context, "kernel_arg_access_qual")); 00368 00369 // MDNode for the kernel argument type names. 00370 SmallVector<llvm::Value*, 8> argTypeNames; 00371 argTypeNames.push_back(llvm::MDString::get(Context, "kernel_arg_type")); 00372 00373 // MDNode for the kernel argument base type names. 00374 SmallVector<llvm::Value*, 8> argBaseTypeNames; 00375 argBaseTypeNames.push_back( 00376 llvm::MDString::get(Context, "kernel_arg_base_type")); 00377 00378 // MDNode for the kernel argument type qualifiers. 00379 SmallVector<llvm::Value*, 8> argTypeQuals; 00380 argTypeQuals.push_back(llvm::MDString::get(Context, "kernel_arg_type_qual")); 00381 00382 // MDNode for the kernel argument names. 00383 SmallVector<llvm::Value*, 8> argNames; 00384 argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name")); 00385 00386 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) { 00387 const ParmVarDecl *parm = FD->getParamDecl(i); 00388 QualType ty = parm->getType(); 00389 std::string typeQuals; 00390 00391 if (ty->isPointerType()) { 00392 QualType pointeeTy = ty->getPointeeType(); 00393 00394 // Get address qualifier. 00395 addressQuals.push_back(Builder.getInt32(ASTCtx.getTargetAddressSpace( 00396 pointeeTy.getAddressSpace()))); 00397 00398 // Get argument type name. 00399 std::string typeName = 00400 pointeeTy.getUnqualifiedType().getAsString(Policy) + "*"; 00401 00402 // Turn "unsigned type" to "utype" 00403 std::string::size_type pos = typeName.find("unsigned"); 00404 if (pointeeTy.isCanonical() && pos != std::string::npos) 00405 typeName.erase(pos+1, 8); 00406 00407 argTypeNames.push_back(llvm::MDString::get(Context, typeName)); 00408 00409 std::string baseTypeName = 00410 pointeeTy.getUnqualifiedType().getCanonicalType().getAsString( 00411 Policy) + 00412 "*"; 00413 00414 // Turn "unsigned type" to "utype" 00415 pos = baseTypeName.find("unsigned"); 00416 if (pos != std::string::npos) 00417 baseTypeName.erase(pos+1, 8); 00418 00419 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName)); 00420 00421 // Get argument type qualifiers: 00422 if (ty.isRestrictQualified()) 00423 typeQuals = "restrict"; 00424 if (pointeeTy.isConstQualified() || 00425 (pointeeTy.getAddressSpace() == LangAS::opencl_constant)) 00426 typeQuals += typeQuals.empty() ? "const" : " const"; 00427 if (pointeeTy.isVolatileQualified()) 00428 typeQuals += typeQuals.empty() ? "volatile" : " volatile"; 00429 } else { 00430 uint32_t AddrSpc = 0; 00431 if (ty->isImageType()) 00432 AddrSpc = 00433 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global); 00434 00435 addressQuals.push_back(Builder.getInt32(AddrSpc)); 00436 00437 // Get argument type name. 00438 std::string typeName = ty.getUnqualifiedType().getAsString(Policy); 00439 00440 // Turn "unsigned type" to "utype" 00441 std::string::size_type pos = typeName.find("unsigned"); 00442 if (ty.isCanonical() && pos != std::string::npos) 00443 typeName.erase(pos+1, 8); 00444 00445 argTypeNames.push_back(llvm::MDString::get(Context, typeName)); 00446 00447 std::string baseTypeName = 00448 ty.getUnqualifiedType().getCanonicalType().getAsString(Policy); 00449 00450 // Turn "unsigned type" to "utype" 00451 pos = baseTypeName.find("unsigned"); 00452 if (pos != std::string::npos) 00453 baseTypeName.erase(pos+1, 8); 00454 00455 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName)); 00456 00457 // Get argument type qualifiers: 00458 if (ty.isConstQualified()) 00459 typeQuals = "const"; 00460 if (ty.isVolatileQualified()) 00461 typeQuals += typeQuals.empty() ? "volatile" : " volatile"; 00462 } 00463 00464 argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals)); 00465 00466 // Get image access qualifier: 00467 if (ty->isImageType()) { 00468 const OpenCLImageAccessAttr *A = parm->getAttr<OpenCLImageAccessAttr>(); 00469 if (A && A->isWriteOnly()) 00470 accessQuals.push_back(llvm::MDString::get(Context, "write_only")); 00471 else 00472 accessQuals.push_back(llvm::MDString::get(Context, "read_only")); 00473 // FIXME: what about read_write? 00474 } else 00475 accessQuals.push_back(llvm::MDString::get(Context, "none")); 00476 00477 // Get argument name. 00478 argNames.push_back(llvm::MDString::get(Context, parm->getName())); 00479 } 00480 00481 kernelMDArgs.push_back(llvm::MDNode::get(Context, addressQuals)); 00482 kernelMDArgs.push_back(llvm::MDNode::get(Context, accessQuals)); 00483 kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeNames)); 00484 kernelMDArgs.push_back(llvm::MDNode::get(Context, argBaseTypeNames)); 00485 kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeQuals)); 00486 kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames)); 00487 } 00488 00489 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD, 00490 llvm::Function *Fn) 00491 { 00492 if (!FD->hasAttr<OpenCLKernelAttr>()) 00493 return; 00494 00495 llvm::LLVMContext &Context = getLLVMContext(); 00496 00497 SmallVector <llvm::Value*, 5> kernelMDArgs; 00498 kernelMDArgs.push_back(Fn); 00499 00500 if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata) 00501 GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs, 00502 Builder, getContext()); 00503 00504 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) { 00505 QualType hintQTy = A->getTypeHint(); 00506 const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>(); 00507 bool isSignedInteger = 00508 hintQTy->isSignedIntegerType() || 00509 (hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType()); 00510 llvm::Value *attrMDArgs[] = { 00511 llvm::MDString::get(Context, "vec_type_hint"), 00512 llvm::UndefValue::get(CGM.getTypes().ConvertType(A->getTypeHint())), 00513 llvm::ConstantInt::get( 00514 llvm::IntegerType::get(Context, 32), 00515 llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0))) 00516 }; 00517 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs)); 00518 } 00519 00520 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) { 00521 llvm::Value *attrMDArgs[] = { 00522 llvm::MDString::get(Context, "work_group_size_hint"), 00523 Builder.getInt32(A->getXDim()), 00524 Builder.getInt32(A->getYDim()), 00525 Builder.getInt32(A->getZDim()) 00526 }; 00527 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs)); 00528 } 00529 00530 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) { 00531 llvm::Value *attrMDArgs[] = { 00532 llvm::MDString::get(Context, "reqd_work_group_size"), 00533 Builder.getInt32(A->getXDim()), 00534 Builder.getInt32(A->getYDim()), 00535 Builder.getInt32(A->getZDim()) 00536 }; 00537 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs)); 00538 } 00539 00540 llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs); 00541 llvm::NamedMDNode *OpenCLKernelMetadata = 00542 CGM.getModule().getOrInsertNamedMetadata("opencl.kernels"); 00543 OpenCLKernelMetadata->addOperand(kernelMDNode); 00544 } 00545 00546 /// Determine whether the function F ends with a return stmt. 00547 static bool endsWithReturn(const Decl* F) { 00548 const Stmt *Body = nullptr; 00549 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F)) 00550 Body = FD->getBody(); 00551 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F)) 00552 Body = OMD->getBody(); 00553 00554 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) { 00555 auto LastStmt = CS->body_rbegin(); 00556 if (LastStmt != CS->body_rend()) 00557 return isa<ReturnStmt>(*LastStmt); 00558 } 00559 return false; 00560 } 00561 00562 void CodeGenFunction::StartFunction(GlobalDecl GD, 00563 QualType RetTy, 00564 llvm::Function *Fn, 00565 const CGFunctionInfo &FnInfo, 00566 const FunctionArgList &Args, 00567 SourceLocation Loc, 00568 SourceLocation StartLoc) { 00569 assert(!CurFn && 00570 "Do not use a CodeGenFunction object for more than one function"); 00571 00572 const Decl *D = GD.getDecl(); 00573 00574 DidCallStackSave = false; 00575 CurCodeDecl = D; 00576 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr); 00577 FnRetTy = RetTy; 00578 CurFn = Fn; 00579 CurFnInfo = &FnInfo; 00580 assert(CurFn->isDeclaration() && "Function already has body?"); 00581 00582 if (CGM.isInSanitizerBlacklist(Fn, Loc)) 00583 SanOpts.clear(); 00584 00585 // Pass inline keyword to optimizer if it appears explicitly on any 00586 // declaration. Also, in the case of -fno-inline attach NoInline 00587 // attribute to all function that are not marked AlwaysInline. 00588 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 00589 if (!CGM.getCodeGenOpts().NoInline) { 00590 for (auto RI : FD->redecls()) 00591 if (RI->isInlineSpecified()) { 00592 Fn->addFnAttr(llvm::Attribute::InlineHint); 00593 break; 00594 } 00595 } else if (!FD->hasAttr<AlwaysInlineAttr>()) 00596 Fn->addFnAttr(llvm::Attribute::NoInline); 00597 } 00598 00599 if (getLangOpts().OpenCL) { 00600 // Add metadata for a kernel function. 00601 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) 00602 EmitOpenCLKernelMetadata(FD, Fn); 00603 } 00604 00605 // If we are checking function types, emit a function type signature as 00606 // prefix data. 00607 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) { 00608 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) { 00609 if (llvm::Constant *PrefixSig = 00610 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { 00611 llvm::Constant *FTRTTIConst = 00612 CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true); 00613 llvm::Constant *PrefixStructElems[] = { PrefixSig, FTRTTIConst }; 00614 llvm::Constant *PrefixStructConst = 00615 llvm::ConstantStruct::getAnon(PrefixStructElems, /*Packed=*/true); 00616 Fn->setPrefixData(PrefixStructConst); 00617 } 00618 } 00619 } 00620 00621 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn); 00622 00623 // Create a marker to make it easy to insert allocas into the entryblock 00624 // later. Don't create this with the builder, because we don't want it 00625 // folded. 00626 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); 00627 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB); 00628 if (Builder.isNamePreserving()) 00629 AllocaInsertPt->setName("allocapt"); 00630 00631 ReturnBlock = getJumpDestInCurrentScope("return"); 00632 00633 Builder.SetInsertPoint(EntryBB); 00634 00635 // Emit subprogram debug descriptor. 00636 if (CGDebugInfo *DI = getDebugInfo()) { 00637 SmallVector<QualType, 16> ArgTypes; 00638 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 00639 i != e; ++i) { 00640 ArgTypes.push_back((*i)->getType()); 00641 } 00642 00643 QualType FnType = 00644 getContext().getFunctionType(RetTy, ArgTypes, 00645 FunctionProtoType::ExtProtoInfo()); 00646 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, Builder); 00647 } 00648 00649 if (ShouldInstrumentFunction()) 00650 EmitFunctionInstrumentation("__cyg_profile_func_enter"); 00651 00652 if (CGM.getCodeGenOpts().InstrumentForProfiling) 00653 EmitMCountInstrumentation(); 00654 00655 if (RetTy->isVoidType()) { 00656 // Void type; nothing to return. 00657 ReturnValue = nullptr; 00658 00659 // Count the implicit return. 00660 if (!endsWithReturn(D)) 00661 ++NumReturnExprs; 00662 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && 00663 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 00664 // Indirect aggregate return; emit returned value directly into sret slot. 00665 // This reduces code size, and affects correctness in C++. 00666 auto AI = CurFn->arg_begin(); 00667 if (CurFnInfo->getReturnInfo().isSRetAfterThis()) 00668 ++AI; 00669 ReturnValue = AI; 00670 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca && 00671 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) { 00672 // Load the sret pointer from the argument struct and return into that. 00673 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex(); 00674 llvm::Function::arg_iterator EI = CurFn->arg_end(); 00675 --EI; 00676 llvm::Value *Addr = Builder.CreateStructGEP(EI, Idx); 00677 ReturnValue = Builder.CreateLoad(Addr, "agg.result"); 00678 } else { 00679 ReturnValue = CreateIRTemp(RetTy, "retval"); 00680 00681 // Tell the epilog emitter to autorelease the result. We do this 00682 // now so that various specialized functions can suppress it 00683 // during their IR-generation. 00684 if (getLangOpts().ObjCAutoRefCount && 00685 !CurFnInfo->isReturnsRetained() && 00686 RetTy->isObjCRetainableType()) 00687 AutoreleaseResult = true; 00688 } 00689 00690 EmitStartEHSpec(CurCodeDecl); 00691 00692 PrologueCleanupDepth = EHStack.stable_begin(); 00693 EmitFunctionProlog(*CurFnInfo, CurFn, Args); 00694 00695 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) { 00696 CGM.getCXXABI().EmitInstanceFunctionProlog(*this); 00697 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D); 00698 if (MD->getParent()->isLambda() && 00699 MD->getOverloadedOperator() == OO_Call) { 00700 // We're in a lambda; figure out the captures. 00701 MD->getParent()->getCaptureFields(LambdaCaptureFields, 00702 LambdaThisCaptureField); 00703 if (LambdaThisCaptureField) { 00704 // If this lambda captures this, load it. 00705 LValue ThisLValue = EmitLValueForLambdaField(LambdaThisCaptureField); 00706 CXXThisValue = EmitLoadOfLValue(ThisLValue, 00707 SourceLocation()).getScalarVal(); 00708 } 00709 for (auto *FD : MD->getParent()->fields()) { 00710 if (FD->hasCapturedVLAType()) { 00711 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD), 00712 SourceLocation()).getScalarVal(); 00713 auto VAT = FD->getCapturedVLAType(); 00714 VLASizeMap[VAT->getSizeExpr()] = ExprArg; 00715 } 00716 } 00717 } else { 00718 // Not in a lambda; just use 'this' from the method. 00719 // FIXME: Should we generate a new load for each use of 'this'? The 00720 // fast register allocator would be happier... 00721 CXXThisValue = CXXABIThisValue; 00722 } 00723 } 00724 00725 // If any of the arguments have a variably modified type, make sure to 00726 // emit the type size. 00727 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 00728 i != e; ++i) { 00729 const VarDecl *VD = *i; 00730 00731 // Dig out the type as written from ParmVarDecls; it's unclear whether 00732 // the standard (C99 6.9.1p10) requires this, but we're following the 00733 // precedent set by gcc. 00734 QualType Ty; 00735 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) 00736 Ty = PVD->getOriginalType(); 00737 else 00738 Ty = VD->getType(); 00739 00740 if (Ty->isVariablyModifiedType()) 00741 EmitVariablyModifiedType(Ty); 00742 } 00743 // Emit a location at the end of the prologue. 00744 if (CGDebugInfo *DI = getDebugInfo()) 00745 DI->EmitLocation(Builder, StartLoc); 00746 } 00747 00748 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args, 00749 const Stmt *Body) { 00750 RegionCounter Cnt = getPGORegionCounter(Body); 00751 Cnt.beginRegion(Builder); 00752 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body)) 00753 EmitCompoundStmtWithoutScope(*S); 00754 else 00755 EmitStmt(Body); 00756 } 00757 00758 /// When instrumenting to collect profile data, the counts for some blocks 00759 /// such as switch cases need to not include the fall-through counts, so 00760 /// emit a branch around the instrumentation code. When not instrumenting, 00761 /// this just calls EmitBlock(). 00762 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB, 00763 RegionCounter &Cnt) { 00764 llvm::BasicBlock *SkipCountBB = nullptr; 00765 if (HaveInsertPoint() && CGM.getCodeGenOpts().ProfileInstrGenerate) { 00766 // When instrumenting for profiling, the fallthrough to certain 00767 // statements needs to skip over the instrumentation code so that we 00768 // get an accurate count. 00769 SkipCountBB = createBasicBlock("skipcount"); 00770 EmitBranch(SkipCountBB); 00771 } 00772 EmitBlock(BB); 00773 Cnt.beginRegion(Builder, /*AddIncomingFallThrough=*/true); 00774 if (SkipCountBB) 00775 EmitBlock(SkipCountBB); 00776 } 00777 00778 /// Tries to mark the given function nounwind based on the 00779 /// non-existence of any throwing calls within it. We believe this is 00780 /// lightweight enough to do at -O0. 00781 static void TryMarkNoThrow(llvm::Function *F) { 00782 // LLVM treats 'nounwind' on a function as part of the type, so we 00783 // can't do this on functions that can be overwritten. 00784 if (F->mayBeOverridden()) return; 00785 00786 for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI) 00787 for (llvm::BasicBlock::iterator 00788 BI = FI->begin(), BE = FI->end(); BI != BE; ++BI) 00789 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) { 00790 if (!Call->doesNotThrow()) 00791 return; 00792 } else if (isa<llvm::ResumeInst>(&*BI)) { 00793 return; 00794 } 00795 F->setDoesNotThrow(); 00796 } 00797 00798 static void EmitSizedDeallocationFunction(CodeGenFunction &CGF, 00799 const FunctionDecl *UnsizedDealloc) { 00800 // This is a weak discardable definition of the sized deallocation function. 00801 CGF.CurFn->setLinkage(llvm::Function::LinkOnceAnyLinkage); 00802 00803 // Call the unsized deallocation function and forward the first argument 00804 // unchanged. 00805 llvm::Constant *Unsized = CGF.CGM.GetAddrOfFunction(UnsizedDealloc); 00806 CGF.Builder.CreateCall(Unsized, &*CGF.CurFn->arg_begin()); 00807 } 00808 00809 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn, 00810 const CGFunctionInfo &FnInfo) { 00811 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 00812 00813 // Check if we should generate debug info for this function. 00814 if (FD->hasAttr<NoDebugAttr>()) 00815 DebugInfo = nullptr; // disable debug info indefinitely for this function 00816 00817 FunctionArgList Args; 00818 QualType ResTy = FD->getReturnType(); 00819 00820 CurGD = GD; 00821 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); 00822 if (MD && MD->isInstance()) { 00823 if (CGM.getCXXABI().HasThisReturn(GD)) 00824 ResTy = MD->getThisType(getContext()); 00825 else if (CGM.getCXXABI().hasMostDerivedReturn(GD)) 00826 ResTy = CGM.getContext().VoidPtrTy; 00827 CGM.getCXXABI().buildThisParam(*this, Args); 00828 } 00829 00830 Args.append(FD->param_begin(), FD->param_end()); 00831 00832 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))) 00833 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args); 00834 00835 SourceRange BodyRange; 00836 if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange(); 00837 CurEHLocation = BodyRange.getEnd(); 00838 00839 // Use the location of the start of the function to determine where 00840 // the function definition is located. By default use the location 00841 // of the declaration as the location for the subprogram. A function 00842 // may lack a declaration in the source code if it is created by code 00843 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk). 00844 SourceLocation Loc = FD->getLocation(); 00845 00846 // If this is a function specialization then use the pattern body 00847 // as the location for the function. 00848 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern()) 00849 if (SpecDecl->hasBody(SpecDecl)) 00850 Loc = SpecDecl->getLocation(); 00851 00852 // Emit the standard function prologue. 00853 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin()); 00854 00855 // Generate the body of the function. 00856 PGO.checkGlobalDecl(GD); 00857 PGO.assignRegionCounters(GD.getDecl(), CurFn); 00858 if (isa<CXXDestructorDecl>(FD)) 00859 EmitDestructorBody(Args); 00860 else if (isa<CXXConstructorDecl>(FD)) 00861 EmitConstructorBody(Args); 00862 else if (getLangOpts().CUDA && 00863 !CGM.getCodeGenOpts().CUDAIsDevice && 00864 FD->hasAttr<CUDAGlobalAttr>()) 00865 CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args); 00866 else if (isa<CXXConversionDecl>(FD) && 00867 cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) { 00868 // The lambda conversion to block pointer is special; the semantics can't be 00869 // expressed in the AST, so IRGen needs to special-case it. 00870 EmitLambdaToBlockPointerBody(Args); 00871 } else if (isa<CXXMethodDecl>(FD) && 00872 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) { 00873 // The lambda static invoker function is special, because it forwards or 00874 // clones the body of the function call operator (but is actually static). 00875 EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD)); 00876 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) && 00877 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() || 00878 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) { 00879 // Implicit copy-assignment gets the same special treatment as implicit 00880 // copy-constructors. 00881 emitImplicitAssignmentOperatorBody(Args); 00882 } else if (Stmt *Body = FD->getBody()) { 00883 EmitFunctionBody(Args, Body); 00884 } else if (FunctionDecl *UnsizedDealloc = 00885 FD->getCorrespondingUnsizedGlobalDeallocationFunction()) { 00886 // Global sized deallocation functions get an implicit weak definition if 00887 // they don't have an explicit definition. 00888 EmitSizedDeallocationFunction(*this, UnsizedDealloc); 00889 } else 00890 llvm_unreachable("no definition for emitted function"); 00891 00892 // C++11 [stmt.return]p2: 00893 // Flowing off the end of a function [...] results in undefined behavior in 00894 // a value-returning function. 00895 // C11 6.9.1p12: 00896 // If the '}' that terminates a function is reached, and the value of the 00897 // function call is used by the caller, the behavior is undefined. 00898 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock && 00899 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) { 00900 if (SanOpts.has(SanitizerKind::Return)) { 00901 SanitizerScope SanScope(this); 00902 llvm::Value *IsFalse = Builder.getFalse(); 00903 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return), 00904 "missing_return", EmitCheckSourceLocation(FD->getLocation()), 00905 None); 00906 } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) 00907 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap)); 00908 Builder.CreateUnreachable(); 00909 Builder.ClearInsertionPoint(); 00910 } 00911 00912 // Emit the standard function epilogue. 00913 FinishFunction(BodyRange.getEnd()); 00914 00915 // If we haven't marked the function nothrow through other means, do 00916 // a quick pass now to see if we can. 00917 if (!CurFn->doesNotThrow()) 00918 TryMarkNoThrow(CurFn); 00919 00920 PGO.emitInstrumentationData(); 00921 PGO.destroyRegionCounters(); 00922 } 00923 00924 /// ContainsLabel - Return true if the statement contains a label in it. If 00925 /// this statement is not executed normally, it not containing a label means 00926 /// that we can just remove the code. 00927 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) { 00928 // Null statement, not a label! 00929 if (!S) return false; 00930 00931 // If this is a label, we have to emit the code, consider something like: 00932 // if (0) { ... foo: bar(); } goto foo; 00933 // 00934 // TODO: If anyone cared, we could track __label__'s, since we know that you 00935 // can't jump to one from outside their declared region. 00936 if (isa<LabelStmt>(S)) 00937 return true; 00938 00939 // If this is a case/default statement, and we haven't seen a switch, we have 00940 // to emit the code. 00941 if (isa<SwitchCase>(S) && !IgnoreCaseStmts) 00942 return true; 00943 00944 // If this is a switch statement, we want to ignore cases below it. 00945 if (isa<SwitchStmt>(S)) 00946 IgnoreCaseStmts = true; 00947 00948 // Scan subexpressions for verboten labels. 00949 for (Stmt::const_child_range I = S->children(); I; ++I) 00950 if (ContainsLabel(*I, IgnoreCaseStmts)) 00951 return true; 00952 00953 return false; 00954 } 00955 00956 /// containsBreak - Return true if the statement contains a break out of it. 00957 /// If the statement (recursively) contains a switch or loop with a break 00958 /// inside of it, this is fine. 00959 bool CodeGenFunction::containsBreak(const Stmt *S) { 00960 // Null statement, not a label! 00961 if (!S) return false; 00962 00963 // If this is a switch or loop that defines its own break scope, then we can 00964 // include it and anything inside of it. 00965 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) || 00966 isa<ForStmt>(S)) 00967 return false; 00968 00969 if (isa<BreakStmt>(S)) 00970 return true; 00971 00972 // Scan subexpressions for verboten breaks. 00973 for (Stmt::const_child_range I = S->children(); I; ++I) 00974 if (containsBreak(*I)) 00975 return true; 00976 00977 return false; 00978 } 00979 00980 00981 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 00982 /// to a constant, or if it does but contains a label, return false. If it 00983 /// constant folds return true and set the boolean result in Result. 00984 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond, 00985 bool &ResultBool) { 00986 llvm::APSInt ResultInt; 00987 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt)) 00988 return false; 00989 00990 ResultBool = ResultInt.getBoolValue(); 00991 return true; 00992 } 00993 00994 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold 00995 /// to a constant, or if it does but contains a label, return false. If it 00996 /// constant folds return true and set the folded value. 00997 bool CodeGenFunction:: 00998 ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) { 00999 // FIXME: Rename and handle conversion of other evaluatable things 01000 // to bool. 01001 llvm::APSInt Int; 01002 if (!Cond->EvaluateAsInt(Int, getContext())) 01003 return false; // Not foldable, not integer or not fully evaluatable. 01004 01005 if (CodeGenFunction::ContainsLabel(Cond)) 01006 return false; // Contains a label. 01007 01008 ResultInt = Int; 01009 return true; 01010 } 01011 01012 01013 01014 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if 01015 /// statement) to the specified blocks. Based on the condition, this might try 01016 /// to simplify the codegen of the conditional based on the branch. 01017 /// 01018 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond, 01019 llvm::BasicBlock *TrueBlock, 01020 llvm::BasicBlock *FalseBlock, 01021 uint64_t TrueCount) { 01022 Cond = Cond->IgnoreParens(); 01023 01024 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) { 01025 01026 // Handle X && Y in a condition. 01027 if (CondBOp->getOpcode() == BO_LAnd) { 01028 RegionCounter Cnt = getPGORegionCounter(CondBOp); 01029 01030 // If we have "1 && X", simplify the code. "0 && X" would have constant 01031 // folded if the case was simple enough. 01032 bool ConstantBool = false; 01033 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 01034 ConstantBool) { 01035 // br(1 && X) -> br(X). 01036 Cnt.beginRegion(Builder); 01037 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 01038 TrueCount); 01039 } 01040 01041 // If we have "X && 1", simplify the code to use an uncond branch. 01042 // "X && 0" would have been constant folded to 0. 01043 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 01044 ConstantBool) { 01045 // br(X && 1) -> br(X). 01046 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 01047 TrueCount); 01048 } 01049 01050 // Emit the LHS as a conditional. If the LHS conditional is false, we 01051 // want to jump to the FalseBlock. 01052 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true"); 01053 // The counter tells us how often we evaluate RHS, and all of TrueCount 01054 // can be propagated to that branch. 01055 uint64_t RHSCount = Cnt.getCount(); 01056 01057 ConditionalEvaluation eval(*this); 01058 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount); 01059 EmitBlock(LHSTrue); 01060 01061 // Any temporaries created here are conditional. 01062 Cnt.beginRegion(Builder); 01063 eval.begin(*this); 01064 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount); 01065 eval.end(*this); 01066 01067 return; 01068 } 01069 01070 if (CondBOp->getOpcode() == BO_LOr) { 01071 RegionCounter Cnt = getPGORegionCounter(CondBOp); 01072 01073 // If we have "0 || X", simplify the code. "1 || X" would have constant 01074 // folded if the case was simple enough. 01075 bool ConstantBool = false; 01076 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) && 01077 !ConstantBool) { 01078 // br(0 || X) -> br(X). 01079 Cnt.beginRegion(Builder); 01080 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, 01081 TrueCount); 01082 } 01083 01084 // If we have "X || 0", simplify the code to use an uncond branch. 01085 // "X || 1" would have been constant folded to 1. 01086 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) && 01087 !ConstantBool) { 01088 // br(X || 0) -> br(X). 01089 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock, 01090 TrueCount); 01091 } 01092 01093 // Emit the LHS as a conditional. If the LHS conditional is true, we 01094 // want to jump to the TrueBlock. 01095 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false"); 01096 // We have the count for entry to the RHS and for the whole expression 01097 // being true, so we can divy up True count between the short circuit and 01098 // the RHS. 01099 uint64_t LHSCount = Cnt.getParentCount() - Cnt.getCount(); 01100 uint64_t RHSCount = TrueCount - LHSCount; 01101 01102 ConditionalEvaluation eval(*this); 01103 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount); 01104 EmitBlock(LHSFalse); 01105 01106 // Any temporaries created here are conditional. 01107 Cnt.beginRegion(Builder); 01108 eval.begin(*this); 01109 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount); 01110 01111 eval.end(*this); 01112 01113 return; 01114 } 01115 } 01116 01117 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) { 01118 // br(!x, t, f) -> br(x, f, t) 01119 if (CondUOp->getOpcode() == UO_LNot) { 01120 // Negate the count. 01121 uint64_t FalseCount = PGO.getCurrentRegionCount() - TrueCount; 01122 // Negate the condition and swap the destination blocks. 01123 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock, 01124 FalseCount); 01125 } 01126 } 01127 01128 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) { 01129 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f)) 01130 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true"); 01131 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false"); 01132 01133 RegionCounter Cnt = getPGORegionCounter(CondOp); 01134 ConditionalEvaluation cond(*this); 01135 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, Cnt.getCount()); 01136 01137 // When computing PGO branch weights, we only know the overall count for 01138 // the true block. This code is essentially doing tail duplication of the 01139 // naive code-gen, introducing new edges for which counts are not 01140 // available. Divide the counts proportionally between the LHS and RHS of 01141 // the conditional operator. 01142 uint64_t LHSScaledTrueCount = 0; 01143 if (TrueCount) { 01144 double LHSRatio = Cnt.getCount() / (double) Cnt.getParentCount(); 01145 LHSScaledTrueCount = TrueCount * LHSRatio; 01146 } 01147 01148 cond.begin(*this); 01149 EmitBlock(LHSBlock); 01150 Cnt.beginRegion(Builder); 01151 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock, 01152 LHSScaledTrueCount); 01153 cond.end(*this); 01154 01155 cond.begin(*this); 01156 EmitBlock(RHSBlock); 01157 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock, 01158 TrueCount - LHSScaledTrueCount); 01159 cond.end(*this); 01160 01161 return; 01162 } 01163 01164 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) { 01165 // Conditional operator handling can give us a throw expression as a 01166 // condition for a case like: 01167 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f) 01168 // Fold this to: 01169 // br(c, throw x, br(y, t, f)) 01170 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false); 01171 return; 01172 } 01173 01174 // Create branch weights based on the number of times we get here and the 01175 // number of times the condition should be true. 01176 uint64_t CurrentCount = std::max(PGO.getCurrentRegionCount(), TrueCount); 01177 llvm::MDNode *Weights = PGO.createBranchWeights(TrueCount, 01178 CurrentCount - TrueCount); 01179 01180 // Emit the code with the fully general case. 01181 llvm::Value *CondV = EvaluateExprAsBool(Cond); 01182 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights); 01183 } 01184 01185 /// ErrorUnsupported - Print out an error that codegen doesn't support the 01186 /// specified stmt yet. 01187 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) { 01188 CGM.ErrorUnsupported(S, Type); 01189 } 01190 01191 /// emitNonZeroVLAInit - Emit the "zero" initialization of a 01192 /// variable-length array whose elements have a non-zero bit-pattern. 01193 /// 01194 /// \param baseType the inner-most element type of the array 01195 /// \param src - a char* pointing to the bit-pattern for a single 01196 /// base element of the array 01197 /// \param sizeInChars - the total size of the VLA, in chars 01198 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType, 01199 llvm::Value *dest, llvm::Value *src, 01200 llvm::Value *sizeInChars) { 01201 std::pair<CharUnits,CharUnits> baseSizeAndAlign 01202 = CGF.getContext().getTypeInfoInChars(baseType); 01203 01204 CGBuilderTy &Builder = CGF.Builder; 01205 01206 llvm::Value *baseSizeInChars 01207 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity()); 01208 01209 llvm::Type *i8p = Builder.getInt8PtrTy(); 01210 01211 llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin"); 01212 llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end"); 01213 01214 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock(); 01215 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop"); 01216 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont"); 01217 01218 // Make a loop over the VLA. C99 guarantees that the VLA element 01219 // count must be nonzero. 01220 CGF.EmitBlock(loopBB); 01221 01222 llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur"); 01223 cur->addIncoming(begin, originBB); 01224 01225 // memcpy the individual element bit-pattern. 01226 Builder.CreateMemCpy(cur, src, baseSizeInChars, 01227 baseSizeAndAlign.second.getQuantity(), 01228 /*volatile*/ false); 01229 01230 // Go to the next element. 01231 llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next"); 01232 01233 // Leave if that's the end of the VLA. 01234 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone"); 01235 Builder.CreateCondBr(done, contBB, loopBB); 01236 cur->addIncoming(next, loopBB); 01237 01238 CGF.EmitBlock(contBB); 01239 } 01240 01241 void 01242 CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) { 01243 // Ignore empty classes in C++. 01244 if (getLangOpts().CPlusPlus) { 01245 if (const RecordType *RT = Ty->getAs<RecordType>()) { 01246 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty()) 01247 return; 01248 } 01249 } 01250 01251 // Cast the dest ptr to the appropriate i8 pointer type. 01252 unsigned DestAS = 01253 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace(); 01254 llvm::Type *BP = Builder.getInt8PtrTy(DestAS); 01255 if (DestPtr->getType() != BP) 01256 DestPtr = Builder.CreateBitCast(DestPtr, BP); 01257 01258 // Get size and alignment info for this aggregate. 01259 std::pair<CharUnits, CharUnits> TypeInfo = 01260 getContext().getTypeInfoInChars(Ty); 01261 CharUnits Size = TypeInfo.first; 01262 CharUnits Align = TypeInfo.second; 01263 01264 llvm::Value *SizeVal; 01265 const VariableArrayType *vla; 01266 01267 // Don't bother emitting a zero-byte memset. 01268 if (Size.isZero()) { 01269 // But note that getTypeInfo returns 0 for a VLA. 01270 if (const VariableArrayType *vlaType = 01271 dyn_cast_or_null<VariableArrayType>( 01272 getContext().getAsArrayType(Ty))) { 01273 QualType eltType; 01274 llvm::Value *numElts; 01275 std::tie(numElts, eltType) = getVLASize(vlaType); 01276 01277 SizeVal = numElts; 01278 CharUnits eltSize = getContext().getTypeSizeInChars(eltType); 01279 if (!eltSize.isOne()) 01280 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize)); 01281 vla = vlaType; 01282 } else { 01283 return; 01284 } 01285 } else { 01286 SizeVal = CGM.getSize(Size); 01287 vla = nullptr; 01288 } 01289 01290 // If the type contains a pointer to data member we can't memset it to zero. 01291 // Instead, create a null constant and copy it to the destination. 01292 // TODO: there are other patterns besides zero that we can usefully memset, 01293 // like -1, which happens to be the pattern used by member-pointers. 01294 if (!CGM.getTypes().isZeroInitializable(Ty)) { 01295 // For a VLA, emit a single element, then splat that over the VLA. 01296 if (vla) Ty = getContext().getBaseElementType(vla); 01297 01298 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty); 01299 01300 llvm::GlobalVariable *NullVariable = 01301 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(), 01302 /*isConstant=*/true, 01303 llvm::GlobalVariable::PrivateLinkage, 01304 NullConstant, Twine()); 01305 llvm::Value *SrcPtr = 01306 Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()); 01307 01308 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal); 01309 01310 // Get and call the appropriate llvm.memcpy overload. 01311 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false); 01312 return; 01313 } 01314 01315 // Otherwise, just memset the whole thing to zero. This is legal 01316 // because in LLVM, all default initializers (other than the ones we just 01317 // handled above) are guaranteed to have a bit pattern of all zeros. 01318 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, 01319 Align.getQuantity(), false); 01320 } 01321 01322 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) { 01323 // Make sure that there is a block for the indirect goto. 01324 if (!IndirectBranch) 01325 GetIndirectGotoBlock(); 01326 01327 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock(); 01328 01329 // Make sure the indirect branch includes all of the address-taken blocks. 01330 IndirectBranch->addDestination(BB); 01331 return llvm::BlockAddress::get(CurFn, BB); 01332 } 01333 01334 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() { 01335 // If we already made the indirect branch for indirect goto, return its block. 01336 if (IndirectBranch) return IndirectBranch->getParent(); 01337 01338 CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto")); 01339 01340 // Create the PHI node that indirect gotos will add entries to. 01341 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0, 01342 "indirect.goto.dest"); 01343 01344 // Create the indirect branch instruction. 01345 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal); 01346 return IndirectBranch->getParent(); 01347 } 01348 01349 /// Computes the length of an array in elements, as well as the base 01350 /// element type and a properly-typed first element pointer. 01351 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType, 01352 QualType &baseType, 01353 llvm::Value *&addr) { 01354 const ArrayType *arrayType = origArrayType; 01355 01356 // If it's a VLA, we have to load the stored size. Note that 01357 // this is the size of the VLA in bytes, not its size in elements. 01358 llvm::Value *numVLAElements = nullptr; 01359 if (isa<VariableArrayType>(arrayType)) { 01360 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first; 01361 01362 // Walk into all VLAs. This doesn't require changes to addr, 01363 // which has type T* where T is the first non-VLA element type. 01364 do { 01365 QualType elementType = arrayType->getElementType(); 01366 arrayType = getContext().getAsArrayType(elementType); 01367 01368 // If we only have VLA components, 'addr' requires no adjustment. 01369 if (!arrayType) { 01370 baseType = elementType; 01371 return numVLAElements; 01372 } 01373 } while (isa<VariableArrayType>(arrayType)); 01374 01375 // We get out here only if we find a constant array type 01376 // inside the VLA. 01377 } 01378 01379 // We have some number of constant-length arrays, so addr should 01380 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks 01381 // down to the first element of addr. 01382 SmallVector<llvm::Value*, 8> gepIndices; 01383 01384 // GEP down to the array type. 01385 llvm::ConstantInt *zero = Builder.getInt32(0); 01386 gepIndices.push_back(zero); 01387 01388 uint64_t countFromCLAs = 1; 01389 QualType eltType; 01390 01391 llvm::ArrayType *llvmArrayType = 01392 dyn_cast<llvm::ArrayType>( 01393 cast<llvm::PointerType>(addr->getType())->getElementType()); 01394 while (llvmArrayType) { 01395 assert(isa<ConstantArrayType>(arrayType)); 01396 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue() 01397 == llvmArrayType->getNumElements()); 01398 01399 gepIndices.push_back(zero); 01400 countFromCLAs *= llvmArrayType->getNumElements(); 01401 eltType = arrayType->getElementType(); 01402 01403 llvmArrayType = 01404 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType()); 01405 arrayType = getContext().getAsArrayType(arrayType->getElementType()); 01406 assert((!llvmArrayType || arrayType) && 01407 "LLVM and Clang types are out-of-synch"); 01408 } 01409 01410 if (arrayType) { 01411 // From this point onwards, the Clang array type has been emitted 01412 // as some other type (probably a packed struct). Compute the array 01413 // size, and just emit the 'begin' expression as a bitcast. 01414 while (arrayType) { 01415 countFromCLAs *= 01416 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue(); 01417 eltType = arrayType->getElementType(); 01418 arrayType = getContext().getAsArrayType(eltType); 01419 } 01420 01421 unsigned AddressSpace = addr->getType()->getPointerAddressSpace(); 01422 llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace); 01423 addr = Builder.CreateBitCast(addr, BaseType, "array.begin"); 01424 } else { 01425 // Create the actual GEP. 01426 addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin"); 01427 } 01428 01429 baseType = eltType; 01430 01431 llvm::Value *numElements 01432 = llvm::ConstantInt::get(SizeTy, countFromCLAs); 01433 01434 // If we had any VLA dimensions, factor them in. 01435 if (numVLAElements) 01436 numElements = Builder.CreateNUWMul(numVLAElements, numElements); 01437 01438 return numElements; 01439 } 01440 01441 std::pair<llvm::Value*, QualType> 01442 CodeGenFunction::getVLASize(QualType type) { 01443 const VariableArrayType *vla = getContext().getAsVariableArrayType(type); 01444 assert(vla && "type was not a variable array type!"); 01445 return getVLASize(vla); 01446 } 01447 01448 std::pair<llvm::Value*, QualType> 01449 CodeGenFunction::getVLASize(const VariableArrayType *type) { 01450 // The number of elements so far; always size_t. 01451 llvm::Value *numElements = nullptr; 01452 01453 QualType elementType; 01454 do { 01455 elementType = type->getElementType(); 01456 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()]; 01457 assert(vlaSize && "no size for VLA!"); 01458 assert(vlaSize->getType() == SizeTy); 01459 01460 if (!numElements) { 01461 numElements = vlaSize; 01462 } else { 01463 // It's undefined behavior if this wraps around, so mark it that way. 01464 // FIXME: Teach -fsanitize=undefined to trap this. 01465 numElements = Builder.CreateNUWMul(numElements, vlaSize); 01466 } 01467 } while ((type = getContext().getAsVariableArrayType(elementType))); 01468 01469 return std::pair<llvm::Value*,QualType>(numElements, elementType); 01470 } 01471 01472 void CodeGenFunction::EmitVariablyModifiedType(QualType type) { 01473 assert(type->isVariablyModifiedType() && 01474 "Must pass variably modified type to EmitVLASizes!"); 01475 01476 EnsureInsertPoint(); 01477 01478 // We're going to walk down into the type and look for VLA 01479 // expressions. 01480 do { 01481 assert(type->isVariablyModifiedType()); 01482 01483 const Type *ty = type.getTypePtr(); 01484 switch (ty->getTypeClass()) { 01485 01486 #define TYPE(Class, Base) 01487 #define ABSTRACT_TYPE(Class, Base) 01488 #define NON_CANONICAL_TYPE(Class, Base) 01489 #define DEPENDENT_TYPE(Class, Base) case Type::Class: 01490 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) 01491 #include "clang/AST/TypeNodes.def" 01492 llvm_unreachable("unexpected dependent type!"); 01493 01494 // These types are never variably-modified. 01495 case Type::Builtin: 01496 case Type::Complex: 01497 case Type::Vector: 01498 case Type::ExtVector: 01499 case Type::Record: 01500 case Type::Enum: 01501 case Type::Elaborated: 01502 case Type::TemplateSpecialization: 01503 case Type::ObjCObject: 01504 case Type::ObjCInterface: 01505 case Type::ObjCObjectPointer: 01506 llvm_unreachable("type class is never variably-modified!"); 01507 01508 case Type::Adjusted: 01509 type = cast<AdjustedType>(ty)->getAdjustedType(); 01510 break; 01511 01512 case Type::Decayed: 01513 type = cast<DecayedType>(ty)->getPointeeType(); 01514 break; 01515 01516 case Type::Pointer: 01517 type = cast<PointerType>(ty)->getPointeeType(); 01518 break; 01519 01520 case Type::BlockPointer: 01521 type = cast<BlockPointerType>(ty)->getPointeeType(); 01522 break; 01523 01524 case Type::LValueReference: 01525 case Type::RValueReference: 01526 type = cast<ReferenceType>(ty)->getPointeeType(); 01527 break; 01528 01529 case Type::MemberPointer: 01530 type = cast<MemberPointerType>(ty)->getPointeeType(); 01531 break; 01532 01533 case Type::ConstantArray: 01534 case Type::IncompleteArray: 01535 // Losing element qualification here is fine. 01536 type = cast<ArrayType>(ty)->getElementType(); 01537 break; 01538 01539 case Type::VariableArray: { 01540 // Losing element qualification here is fine. 01541 const VariableArrayType *vat = cast<VariableArrayType>(ty); 01542 01543 // Unknown size indication requires no size computation. 01544 // Otherwise, evaluate and record it. 01545 if (const Expr *size = vat->getSizeExpr()) { 01546 // It's possible that we might have emitted this already, 01547 // e.g. with a typedef and a pointer to it. 01548 llvm::Value *&entry = VLASizeMap[size]; 01549 if (!entry) { 01550 llvm::Value *Size = EmitScalarExpr(size); 01551 01552 // C11 6.7.6.2p5: 01553 // If the size is an expression that is not an integer constant 01554 // expression [...] each time it is evaluated it shall have a value 01555 // greater than zero. 01556 if (SanOpts.has(SanitizerKind::VLABound) && 01557 size->getType()->isSignedIntegerType()) { 01558 SanitizerScope SanScope(this); 01559 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType()); 01560 llvm::Constant *StaticArgs[] = { 01561 EmitCheckSourceLocation(size->getLocStart()), 01562 EmitCheckTypeDescriptor(size->getType()) 01563 }; 01564 EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero), 01565 SanitizerKind::VLABound), 01566 "vla_bound_not_positive", StaticArgs, Size); 01567 } 01568 01569 // Always zexting here would be wrong if it weren't 01570 // undefined behavior to have a negative bound. 01571 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false); 01572 } 01573 } 01574 type = vat->getElementType(); 01575 break; 01576 } 01577 01578 case Type::FunctionProto: 01579 case Type::FunctionNoProto: 01580 type = cast<FunctionType>(ty)->getReturnType(); 01581 break; 01582 01583 case Type::Paren: 01584 case Type::TypeOf: 01585 case Type::UnaryTransform: 01586 case Type::Attributed: 01587 case Type::SubstTemplateTypeParm: 01588 case Type::PackExpansion: 01589 // Keep walking after single level desugaring. 01590 type = type.getSingleStepDesugaredType(getContext()); 01591 break; 01592 01593 case Type::Typedef: 01594 case Type::Decltype: 01595 case Type::Auto: 01596 // Stop walking: nothing to do. 01597 return; 01598 01599 case Type::TypeOfExpr: 01600 // Stop walking: emit typeof expression. 01601 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr()); 01602 return; 01603 01604 case Type::Atomic: 01605 type = cast<AtomicType>(ty)->getValueType(); 01606 break; 01607 } 01608 } while (type->isVariablyModifiedType()); 01609 } 01610 01611 llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) { 01612 if (getContext().getBuiltinVaListType()->isArrayType()) 01613 return EmitScalarExpr(E); 01614 return EmitLValue(E).getAddress(); 01615 } 01616 01617 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E, 01618 llvm::Constant *Init) { 01619 assert (Init && "Invalid DeclRefExpr initializer!"); 01620 if (CGDebugInfo *Dbg = getDebugInfo()) 01621 if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) 01622 Dbg->EmitGlobalVariable(E->getDecl(), Init); 01623 } 01624 01625 CodeGenFunction::PeepholeProtection 01626 CodeGenFunction::protectFromPeepholes(RValue rvalue) { 01627 // At the moment, the only aggressive peephole we do in IR gen 01628 // is trunc(zext) folding, but if we add more, we can easily 01629 // extend this protection. 01630 01631 if (!rvalue.isScalar()) return PeepholeProtection(); 01632 llvm::Value *value = rvalue.getScalarVal(); 01633 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection(); 01634 01635 // Just make an extra bitcast. 01636 assert(HaveInsertPoint()); 01637 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "", 01638 Builder.GetInsertBlock()); 01639 01640 PeepholeProtection protection; 01641 protection.Inst = inst; 01642 return protection; 01643 } 01644 01645 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) { 01646 if (!protection.Inst) return; 01647 01648 // In theory, we could try to duplicate the peepholes now, but whatever. 01649 protection.Inst->eraseFromParent(); 01650 } 01651 01652 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn, 01653 llvm::Value *AnnotatedVal, 01654 StringRef AnnotationStr, 01655 SourceLocation Location) { 01656 llvm::Value *Args[4] = { 01657 AnnotatedVal, 01658 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy), 01659 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy), 01660 CGM.EmitAnnotationLineNo(Location) 01661 }; 01662 return Builder.CreateCall(AnnotationFn, Args); 01663 } 01664 01665 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) { 01666 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 01667 // FIXME We create a new bitcast for every annotation because that's what 01668 // llvm-gcc was doing. 01669 for (const auto *I : D->specific_attrs<AnnotateAttr>()) 01670 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation), 01671 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()), 01672 I->getAnnotation(), D->getLocation()); 01673 } 01674 01675 llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D, 01676 llvm::Value *V) { 01677 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute"); 01678 llvm::Type *VTy = V->getType(); 01679 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, 01680 CGM.Int8PtrTy); 01681 01682 for (const auto *I : D->specific_attrs<AnnotateAttr>()) { 01683 // FIXME Always emit the cast inst so we can differentiate between 01684 // annotation on the first field of a struct and annotation on the struct 01685 // itself. 01686 if (VTy != CGM.Int8PtrTy) 01687 V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy)); 01688 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation()); 01689 V = Builder.CreateBitCast(V, VTy); 01690 } 01691 01692 return V; 01693 } 01694 01695 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { } 01696 01697 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF) 01698 : CGF(CGF) { 01699 assert(!CGF->IsSanitizerScope); 01700 CGF->IsSanitizerScope = true; 01701 } 01702 01703 CodeGenFunction::SanitizerScope::~SanitizerScope() { 01704 CGF->IsSanitizerScope = false; 01705 } 01706 01707 void CodeGenFunction::InsertHelper(llvm::Instruction *I, 01708 const llvm::Twine &Name, 01709 llvm::BasicBlock *BB, 01710 llvm::BasicBlock::iterator InsertPt) const { 01711 LoopStack.InsertHelper(I); 01712 if (IsSanitizerScope) 01713 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I); 01714 } 01715 01716 template <bool PreserveNames> 01717 void CGBuilderInserter<PreserveNames>::InsertHelper( 01718 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, 01719 llvm::BasicBlock::iterator InsertPt) const { 01720 llvm::IRBuilderDefaultInserter<PreserveNames>::InsertHelper(I, Name, BB, 01721 InsertPt); 01722 if (CGF) 01723 CGF->InsertHelper(I, Name, BB, InsertPt); 01724 } 01725 01726 #ifdef NDEBUG 01727 #define PreserveNames false 01728 #else 01729 #define PreserveNames true 01730 #endif 01731 template void CGBuilderInserter<PreserveNames>::InsertHelper( 01732 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, 01733 llvm::BasicBlock::iterator InsertPt) const; 01734 #undef PreserveNames