clang API Documentation
00001 //===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This contains code dealing with C++ code generation of classes 00011 // 00012 //===----------------------------------------------------------------------===// 00013 00014 #include "CGBlocks.h" 00015 #include "CGCXXABI.h" 00016 #include "CGDebugInfo.h" 00017 #include "CGRecordLayout.h" 00018 #include "CodeGenFunction.h" 00019 #include "clang/AST/CXXInheritance.h" 00020 #include "clang/AST/DeclTemplate.h" 00021 #include "clang/AST/EvaluatedExprVisitor.h" 00022 #include "clang/AST/RecordLayout.h" 00023 #include "clang/AST/StmtCXX.h" 00024 #include "clang/Basic/TargetBuiltins.h" 00025 #include "clang/CodeGen/CGFunctionInfo.h" 00026 #include "clang/Frontend/CodeGenOptions.h" 00027 00028 using namespace clang; 00029 using namespace CodeGen; 00030 00031 static CharUnits 00032 ComputeNonVirtualBaseClassOffset(ASTContext &Context, 00033 const CXXRecordDecl *DerivedClass, 00034 CastExpr::path_const_iterator Start, 00035 CastExpr::path_const_iterator End) { 00036 CharUnits Offset = CharUnits::Zero(); 00037 00038 const CXXRecordDecl *RD = DerivedClass; 00039 00040 for (CastExpr::path_const_iterator I = Start; I != End; ++I) { 00041 const CXXBaseSpecifier *Base = *I; 00042 assert(!Base->isVirtual() && "Should not see virtual bases here!"); 00043 00044 // Get the layout. 00045 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); 00046 00047 const CXXRecordDecl *BaseDecl = 00048 cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); 00049 00050 // Add the offset. 00051 Offset += Layout.getBaseClassOffset(BaseDecl); 00052 00053 RD = BaseDecl; 00054 } 00055 00056 return Offset; 00057 } 00058 00059 llvm::Constant * 00060 CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, 00061 CastExpr::path_const_iterator PathBegin, 00062 CastExpr::path_const_iterator PathEnd) { 00063 assert(PathBegin != PathEnd && "Base path should not be empty!"); 00064 00065 CharUnits Offset = 00066 ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl, 00067 PathBegin, PathEnd); 00068 if (Offset.isZero()) 00069 return nullptr; 00070 00071 llvm::Type *PtrDiffTy = 00072 Types.ConvertType(getContext().getPointerDiffType()); 00073 00074 return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); 00075 } 00076 00077 /// Gets the address of a direct base class within a complete object. 00078 /// This should only be used for (1) non-virtual bases or (2) virtual bases 00079 /// when the type is known to be complete (e.g. in complete destructors). 00080 /// 00081 /// The object pointed to by 'This' is assumed to be non-null. 00082 llvm::Value * 00083 CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This, 00084 const CXXRecordDecl *Derived, 00085 const CXXRecordDecl *Base, 00086 bool BaseIsVirtual) { 00087 // 'this' must be a pointer (in some address space) to Derived. 00088 assert(This->getType()->isPointerTy() && 00089 cast<llvm::PointerType>(This->getType())->getElementType() 00090 == ConvertType(Derived)); 00091 00092 // Compute the offset of the virtual base. 00093 CharUnits Offset; 00094 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived); 00095 if (BaseIsVirtual) 00096 Offset = Layout.getVBaseClassOffset(Base); 00097 else 00098 Offset = Layout.getBaseClassOffset(Base); 00099 00100 // Shift and cast down to the base type. 00101 // TODO: for complete types, this should be possible with a GEP. 00102 llvm::Value *V = This; 00103 if (Offset.isPositive()) { 00104 V = Builder.CreateBitCast(V, Int8PtrTy); 00105 V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity()); 00106 } 00107 V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo()); 00108 00109 return V; 00110 } 00111 00112 static llvm::Value * 00113 ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr, 00114 CharUnits nonVirtualOffset, 00115 llvm::Value *virtualOffset) { 00116 // Assert that we have something to do. 00117 assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr); 00118 00119 // Compute the offset from the static and dynamic components. 00120 llvm::Value *baseOffset; 00121 if (!nonVirtualOffset.isZero()) { 00122 baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy, 00123 nonVirtualOffset.getQuantity()); 00124 if (virtualOffset) { 00125 baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset); 00126 } 00127 } else { 00128 baseOffset = virtualOffset; 00129 } 00130 00131 // Apply the base offset. 00132 ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); 00133 ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); 00134 return ptr; 00135 } 00136 00137 llvm::Value *CodeGenFunction::GetAddressOfBaseClass( 00138 llvm::Value *Value, const CXXRecordDecl *Derived, 00139 CastExpr::path_const_iterator PathBegin, 00140 CastExpr::path_const_iterator PathEnd, bool NullCheckValue, 00141 SourceLocation Loc) { 00142 assert(PathBegin != PathEnd && "Base path should not be empty!"); 00143 00144 CastExpr::path_const_iterator Start = PathBegin; 00145 const CXXRecordDecl *VBase = nullptr; 00146 00147 // Sema has done some convenient canonicalization here: if the 00148 // access path involved any virtual steps, the conversion path will 00149 // *start* with a step down to the correct virtual base subobject, 00150 // and hence will not require any further steps. 00151 if ((*Start)->isVirtual()) { 00152 VBase = 00153 cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl()); 00154 ++Start; 00155 } 00156 00157 // Compute the static offset of the ultimate destination within its 00158 // allocating subobject (the virtual base, if there is one, or else 00159 // the "complete" object that we see). 00160 CharUnits NonVirtualOffset = 00161 ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived, 00162 Start, PathEnd); 00163 00164 // If there's a virtual step, we can sometimes "devirtualize" it. 00165 // For now, that's limited to when the derived type is final. 00166 // TODO: "devirtualize" this for accesses to known-complete objects. 00167 if (VBase && Derived->hasAttr<FinalAttr>()) { 00168 const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived); 00169 CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase); 00170 NonVirtualOffset += vBaseOffset; 00171 VBase = nullptr; // we no longer have a virtual step 00172 } 00173 00174 // Get the base pointer type. 00175 llvm::Type *BasePtrTy = 00176 ConvertType((PathEnd[-1])->getType())->getPointerTo(); 00177 00178 QualType DerivedTy = getContext().getRecordType(Derived); 00179 CharUnits DerivedAlign = getContext().getTypeAlignInChars(DerivedTy); 00180 00181 // If the static offset is zero and we don't have a virtual step, 00182 // just do a bitcast; null checks are unnecessary. 00183 if (NonVirtualOffset.isZero() && !VBase) { 00184 if (sanitizePerformTypeCheck()) { 00185 EmitTypeCheck(TCK_Upcast, Loc, Value, DerivedTy, DerivedAlign, 00186 !NullCheckValue); 00187 } 00188 return Builder.CreateBitCast(Value, BasePtrTy); 00189 } 00190 00191 llvm::BasicBlock *origBB = nullptr; 00192 llvm::BasicBlock *endBB = nullptr; 00193 00194 // Skip over the offset (and the vtable load) if we're supposed to 00195 // null-check the pointer. 00196 if (NullCheckValue) { 00197 origBB = Builder.GetInsertBlock(); 00198 llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); 00199 endBB = createBasicBlock("cast.end"); 00200 00201 llvm::Value *isNull = Builder.CreateIsNull(Value); 00202 Builder.CreateCondBr(isNull, endBB, notNullBB); 00203 EmitBlock(notNullBB); 00204 } 00205 00206 if (sanitizePerformTypeCheck()) { 00207 EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc, Value, 00208 DerivedTy, DerivedAlign, true); 00209 } 00210 00211 // Compute the virtual offset. 00212 llvm::Value *VirtualOffset = nullptr; 00213 if (VBase) { 00214 VirtualOffset = 00215 CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); 00216 } 00217 00218 // Apply both offsets. 00219 Value = ApplyNonVirtualAndVirtualOffset(*this, Value, 00220 NonVirtualOffset, 00221 VirtualOffset); 00222 00223 // Cast to the destination type. 00224 Value = Builder.CreateBitCast(Value, BasePtrTy); 00225 00226 // Build a phi if we needed a null check. 00227 if (NullCheckValue) { 00228 llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); 00229 Builder.CreateBr(endBB); 00230 EmitBlock(endBB); 00231 00232 llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); 00233 PHI->addIncoming(Value, notNullBB); 00234 PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); 00235 Value = PHI; 00236 } 00237 00238 return Value; 00239 } 00240 00241 llvm::Value * 00242 CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, 00243 const CXXRecordDecl *Derived, 00244 CastExpr::path_const_iterator PathBegin, 00245 CastExpr::path_const_iterator PathEnd, 00246 bool NullCheckValue) { 00247 assert(PathBegin != PathEnd && "Base path should not be empty!"); 00248 00249 QualType DerivedTy = 00250 getContext().getCanonicalType(getContext().getTagDeclType(Derived)); 00251 llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(); 00252 00253 llvm::Value *NonVirtualOffset = 00254 CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); 00255 00256 if (!NonVirtualOffset) { 00257 // No offset, we can just cast back. 00258 return Builder.CreateBitCast(Value, DerivedPtrTy); 00259 } 00260 00261 llvm::BasicBlock *CastNull = nullptr; 00262 llvm::BasicBlock *CastNotNull = nullptr; 00263 llvm::BasicBlock *CastEnd = nullptr; 00264 00265 if (NullCheckValue) { 00266 CastNull = createBasicBlock("cast.null"); 00267 CastNotNull = createBasicBlock("cast.notnull"); 00268 CastEnd = createBasicBlock("cast.end"); 00269 00270 llvm::Value *IsNull = Builder.CreateIsNull(Value); 00271 Builder.CreateCondBr(IsNull, CastNull, CastNotNull); 00272 EmitBlock(CastNotNull); 00273 } 00274 00275 // Apply the offset. 00276 Value = Builder.CreateBitCast(Value, Int8PtrTy); 00277 Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset), 00278 "sub.ptr"); 00279 00280 // Just cast. 00281 Value = Builder.CreateBitCast(Value, DerivedPtrTy); 00282 00283 if (NullCheckValue) { 00284 Builder.CreateBr(CastEnd); 00285 EmitBlock(CastNull); 00286 Builder.CreateBr(CastEnd); 00287 EmitBlock(CastEnd); 00288 00289 llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); 00290 PHI->addIncoming(Value, CastNotNull); 00291 PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), 00292 CastNull); 00293 Value = PHI; 00294 } 00295 00296 return Value; 00297 } 00298 00299 llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, 00300 bool ForVirtualBase, 00301 bool Delegating) { 00302 if (!CGM.getCXXABI().NeedsVTTParameter(GD)) { 00303 // This constructor/destructor does not need a VTT parameter. 00304 return nullptr; 00305 } 00306 00307 const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent(); 00308 const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent(); 00309 00310 llvm::Value *VTT; 00311 00312 uint64_t SubVTTIndex; 00313 00314 if (Delegating) { 00315 // If this is a delegating constructor call, just load the VTT. 00316 return LoadCXXVTT(); 00317 } else if (RD == Base) { 00318 // If the record matches the base, this is the complete ctor/dtor 00319 // variant calling the base variant in a class with virtual bases. 00320 assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) && 00321 "doing no-op VTT offset in base dtor/ctor?"); 00322 assert(!ForVirtualBase && "Can't have same class as virtual base!"); 00323 SubVTTIndex = 0; 00324 } else { 00325 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 00326 CharUnits BaseOffset = ForVirtualBase ? 00327 Layout.getVBaseClassOffset(Base) : 00328 Layout.getBaseClassOffset(Base); 00329 00330 SubVTTIndex = 00331 CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); 00332 assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); 00333 } 00334 00335 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 00336 // A VTT parameter was passed to the constructor, use it. 00337 VTT = LoadCXXVTT(); 00338 VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex); 00339 } else { 00340 // We're the complete constructor, so get the VTT by name. 00341 VTT = CGM.getVTables().GetAddrOfVTT(RD); 00342 VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex); 00343 } 00344 00345 return VTT; 00346 } 00347 00348 namespace { 00349 /// Call the destructor for a direct base class. 00350 struct CallBaseDtor : EHScopeStack::Cleanup { 00351 const CXXRecordDecl *BaseClass; 00352 bool BaseIsVirtual; 00353 CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual) 00354 : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} 00355 00356 void Emit(CodeGenFunction &CGF, Flags flags) override { 00357 const CXXRecordDecl *DerivedClass = 00358 cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent(); 00359 00360 const CXXDestructorDecl *D = BaseClass->getDestructor(); 00361 llvm::Value *Addr = 00362 CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(), 00363 DerivedClass, BaseClass, 00364 BaseIsVirtual); 00365 CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, 00366 /*Delegating=*/false, Addr); 00367 } 00368 }; 00369 00370 /// A visitor which checks whether an initializer uses 'this' in a 00371 /// way which requires the vtable to be properly set. 00372 struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> { 00373 typedef EvaluatedExprVisitor<DynamicThisUseChecker> super; 00374 00375 bool UsesThis; 00376 00377 DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {} 00378 00379 // Black-list all explicit and implicit references to 'this'. 00380 // 00381 // Do we need to worry about external references to 'this' derived 00382 // from arbitrary code? If so, then anything which runs arbitrary 00383 // external code might potentially access the vtable. 00384 void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; } 00385 }; 00386 } 00387 00388 static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { 00389 DynamicThisUseChecker Checker(C); 00390 Checker.Visit(const_cast<Expr*>(Init)); 00391 return Checker.UsesThis; 00392 } 00393 00394 static void EmitBaseInitializer(CodeGenFunction &CGF, 00395 const CXXRecordDecl *ClassDecl, 00396 CXXCtorInitializer *BaseInit, 00397 CXXCtorType CtorType) { 00398 assert(BaseInit->isBaseInitializer() && 00399 "Must have base initializer!"); 00400 00401 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 00402 00403 const Type *BaseType = BaseInit->getBaseClass(); 00404 CXXRecordDecl *BaseClassDecl = 00405 cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); 00406 00407 bool isBaseVirtual = BaseInit->isBaseVirtual(); 00408 00409 // The base constructor doesn't construct virtual bases. 00410 if (CtorType == Ctor_Base && isBaseVirtual) 00411 return; 00412 00413 // If the initializer for the base (other than the constructor 00414 // itself) accesses 'this' in any way, we need to initialize the 00415 // vtables. 00416 if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) 00417 CGF.InitializeVTablePointers(ClassDecl); 00418 00419 // We can pretend to be a complete class because it only matters for 00420 // virtual bases, and we only do virtual bases for complete ctors. 00421 llvm::Value *V = 00422 CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl, 00423 BaseClassDecl, 00424 isBaseVirtual); 00425 CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType); 00426 AggValueSlot AggSlot = 00427 AggValueSlot::forAddr(V, Alignment, Qualifiers(), 00428 AggValueSlot::IsDestructed, 00429 AggValueSlot::DoesNotNeedGCBarriers, 00430 AggValueSlot::IsNotAliased); 00431 00432 CGF.EmitAggExpr(BaseInit->getInit(), AggSlot); 00433 00434 if (CGF.CGM.getLangOpts().Exceptions && 00435 !BaseClassDecl->hasTrivialDestructor()) 00436 CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl, 00437 isBaseVirtual); 00438 } 00439 00440 static void EmitAggMemberInitializer(CodeGenFunction &CGF, 00441 LValue LHS, 00442 Expr *Init, 00443 llvm::Value *ArrayIndexVar, 00444 QualType T, 00445 ArrayRef<VarDecl *> ArrayIndexes, 00446 unsigned Index) { 00447 if (Index == ArrayIndexes.size()) { 00448 LValue LV = LHS; 00449 00450 if (ArrayIndexVar) { 00451 // If we have an array index variable, load it and use it as an offset. 00452 // Then, increment the value. 00453 llvm::Value *Dest = LHS.getAddress(); 00454 llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar); 00455 Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress"); 00456 llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1); 00457 Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc"); 00458 CGF.Builder.CreateStore(Next, ArrayIndexVar); 00459 00460 // Update the LValue. 00461 LV.setAddress(Dest); 00462 CharUnits Align = CGF.getContext().getTypeAlignInChars(T); 00463 LV.setAlignment(std::min(Align, LV.getAlignment())); 00464 } 00465 00466 switch (CGF.getEvaluationKind(T)) { 00467 case TEK_Scalar: 00468 CGF.EmitScalarInit(Init, /*decl*/ nullptr, LV, false); 00469 break; 00470 case TEK_Complex: 00471 CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true); 00472 break; 00473 case TEK_Aggregate: { 00474 AggValueSlot Slot = 00475 AggValueSlot::forLValue(LV, 00476 AggValueSlot::IsDestructed, 00477 AggValueSlot::DoesNotNeedGCBarriers, 00478 AggValueSlot::IsNotAliased); 00479 00480 CGF.EmitAggExpr(Init, Slot); 00481 break; 00482 } 00483 } 00484 00485 return; 00486 } 00487 00488 const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T); 00489 assert(Array && "Array initialization without the array type?"); 00490 llvm::Value *IndexVar 00491 = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]); 00492 assert(IndexVar && "Array index variable not loaded"); 00493 00494 // Initialize this index variable to zero. 00495 llvm::Value* Zero 00496 = llvm::Constant::getNullValue( 00497 CGF.ConvertType(CGF.getContext().getSizeType())); 00498 CGF.Builder.CreateStore(Zero, IndexVar); 00499 00500 // Start the loop with a block that tests the condition. 00501 llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); 00502 llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); 00503 00504 CGF.EmitBlock(CondBlock); 00505 00506 llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); 00507 // Generate: if (loop-index < number-of-elements) fall to the loop body, 00508 // otherwise, go to the block after the for-loop. 00509 uint64_t NumElements = Array->getSize().getZExtValue(); 00510 llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar); 00511 llvm::Value *NumElementsPtr = 00512 llvm::ConstantInt::get(Counter->getType(), NumElements); 00513 llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr, 00514 "isless"); 00515 00516 // If the condition is true, execute the body. 00517 CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor); 00518 00519 CGF.EmitBlock(ForBody); 00520 llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc"); 00521 00522 // Inside the loop body recurse to emit the inner loop or, eventually, the 00523 // constructor call. 00524 EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar, 00525 Array->getElementType(), ArrayIndexes, Index + 1); 00526 00527 CGF.EmitBlock(ContinueBlock); 00528 00529 // Emit the increment of the loop counter. 00530 llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1); 00531 Counter = CGF.Builder.CreateLoad(IndexVar); 00532 NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc"); 00533 CGF.Builder.CreateStore(NextVal, IndexVar); 00534 00535 // Finally, branch back up to the condition for the next iteration. 00536 CGF.EmitBranch(CondBlock); 00537 00538 // Emit the fall-through block. 00539 CGF.EmitBlock(AfterFor, true); 00540 } 00541 00542 static void EmitMemberInitializer(CodeGenFunction &CGF, 00543 const CXXRecordDecl *ClassDecl, 00544 CXXCtorInitializer *MemberInit, 00545 const CXXConstructorDecl *Constructor, 00546 FunctionArgList &Args) { 00547 assert(MemberInit->isAnyMemberInitializer() && 00548 "Must have member initializer!"); 00549 assert(MemberInit->getInit() && "Must have initializer!"); 00550 00551 // non-static data member initializers. 00552 FieldDecl *Field = MemberInit->getAnyMember(); 00553 QualType FieldType = Field->getType(); 00554 00555 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 00556 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 00557 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 00558 00559 if (MemberInit->isIndirectMemberInitializer()) { 00560 // If we are initializing an anonymous union field, drill down to 00561 // the field. 00562 IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); 00563 for (const auto *I : IndirectField->chain()) 00564 LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(I)); 00565 FieldType = MemberInit->getIndirectMember()->getAnonField()->getType(); 00566 } else { 00567 LHS = CGF.EmitLValueForFieldInitialization(LHS, Field); 00568 } 00569 00570 // Special case: if we are in a copy or move constructor, and we are copying 00571 // an array of PODs or classes with trivial copy constructors, ignore the 00572 // AST and perform the copy we know is equivalent. 00573 // FIXME: This is hacky at best... if we had a bit more explicit information 00574 // in the AST, we could generalize it more easily. 00575 const ConstantArrayType *Array 00576 = CGF.getContext().getAsConstantArrayType(FieldType); 00577 if (Array && Constructor->isDefaulted() && 00578 Constructor->isCopyOrMoveConstructor()) { 00579 QualType BaseElementTy = CGF.getContext().getBaseElementType(Array); 00580 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 00581 if (BaseElementTy.isPODType(CGF.getContext()) || 00582 (CE && CE->getConstructor()->isTrivial())) { 00583 unsigned SrcArgIndex = 00584 CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args); 00585 llvm::Value *SrcPtr 00586 = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex])); 00587 LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 00588 LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field); 00589 00590 // Copy the aggregate. 00591 CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType, 00592 LHS.isVolatileQualified()); 00593 return; 00594 } 00595 } 00596 00597 ArrayRef<VarDecl *> ArrayIndexes; 00598 if (MemberInit->getNumArrayIndices()) 00599 ArrayIndexes = MemberInit->getArrayIndexes(); 00600 CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes); 00601 } 00602 00603 void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, 00604 LValue LHS, Expr *Init, 00605 ArrayRef<VarDecl *> ArrayIndexes) { 00606 QualType FieldType = Field->getType(); 00607 switch (getEvaluationKind(FieldType)) { 00608 case TEK_Scalar: 00609 if (LHS.isSimple()) { 00610 EmitExprAsInit(Init, Field, LHS, false); 00611 } else { 00612 RValue RHS = RValue::get(EmitScalarExpr(Init)); 00613 EmitStoreThroughLValue(RHS, LHS); 00614 } 00615 break; 00616 case TEK_Complex: 00617 EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true); 00618 break; 00619 case TEK_Aggregate: { 00620 llvm::Value *ArrayIndexVar = nullptr; 00621 if (ArrayIndexes.size()) { 00622 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 00623 00624 // The LHS is a pointer to the first object we'll be constructing, as 00625 // a flat array. 00626 QualType BaseElementTy = getContext().getBaseElementType(FieldType); 00627 llvm::Type *BasePtr = ConvertType(BaseElementTy); 00628 BasePtr = llvm::PointerType::getUnqual(BasePtr); 00629 llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), 00630 BasePtr); 00631 LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy); 00632 00633 // Create an array index that will be used to walk over all of the 00634 // objects we're constructing. 00635 ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index"); 00636 llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); 00637 Builder.CreateStore(Zero, ArrayIndexVar); 00638 00639 00640 // Emit the block variables for the array indices, if any. 00641 for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I) 00642 EmitAutoVarDecl(*ArrayIndexes[I]); 00643 } 00644 00645 EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType, 00646 ArrayIndexes, 0); 00647 } 00648 } 00649 00650 // Ensure that we destroy this object if an exception is thrown 00651 // later in the constructor. 00652 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 00653 if (needsEHCleanup(dtorKind)) 00654 pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 00655 } 00656 00657 /// Checks whether the given constructor is a valid subject for the 00658 /// complete-to-base constructor delegation optimization, i.e. 00659 /// emitting the complete constructor as a simple call to the base 00660 /// constructor. 00661 static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) { 00662 00663 // Currently we disable the optimization for classes with virtual 00664 // bases because (1) the addresses of parameter variables need to be 00665 // consistent across all initializers but (2) the delegate function 00666 // call necessarily creates a second copy of the parameter variable. 00667 // 00668 // The limiting example (purely theoretical AFAIK): 00669 // struct A { A(int &c) { c++; } }; 00670 // struct B : virtual A { 00671 // B(int count) : A(count) { printf("%d\n", count); } 00672 // }; 00673 // ...although even this example could in principle be emitted as a 00674 // delegation since the address of the parameter doesn't escape. 00675 if (Ctor->getParent()->getNumVBases()) { 00676 // TODO: white-list trivial vbase initializers. This case wouldn't 00677 // be subject to the restrictions below. 00678 00679 // TODO: white-list cases where: 00680 // - there are no non-reference parameters to the constructor 00681 // - the initializers don't access any non-reference parameters 00682 // - the initializers don't take the address of non-reference 00683 // parameters 00684 // - etc. 00685 // If we ever add any of the above cases, remember that: 00686 // - function-try-blocks will always blacklist this optimization 00687 // - we need to perform the constructor prologue and cleanup in 00688 // EmitConstructorBody. 00689 00690 return false; 00691 } 00692 00693 // We also disable the optimization for variadic functions because 00694 // it's impossible to "re-pass" varargs. 00695 if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic()) 00696 return false; 00697 00698 // FIXME: Decide if we can do a delegation of a delegating constructor. 00699 if (Ctor->isDelegatingConstructor()) 00700 return false; 00701 00702 return true; 00703 } 00704 00705 // Emit code in ctor (Prologue==true) or dtor (Prologue==false) 00706 // to poison the extra field paddings inserted under 00707 // -fsanitize-address-field-padding=1|2. 00708 void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) { 00709 ASTContext &Context = getContext(); 00710 const CXXRecordDecl *ClassDecl = 00711 Prologue ? cast<CXXConstructorDecl>(CurGD.getDecl())->getParent() 00712 : cast<CXXDestructorDecl>(CurGD.getDecl())->getParent(); 00713 if (!ClassDecl->mayInsertExtraPadding()) return; 00714 00715 struct SizeAndOffset { 00716 uint64_t Size; 00717 uint64_t Offset; 00718 }; 00719 00720 unsigned PtrSize = CGM.getDataLayout().getPointerSizeInBits(); 00721 const ASTRecordLayout &Info = Context.getASTRecordLayout(ClassDecl); 00722 00723 // Populate sizes and offsets of fields. 00724 SmallVector<SizeAndOffset, 16> SSV(Info.getFieldCount()); 00725 for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) 00726 SSV[i].Offset = 00727 Context.toCharUnitsFromBits(Info.getFieldOffset(i)).getQuantity(); 00728 00729 size_t NumFields = 0; 00730 for (const auto *Field : ClassDecl->fields()) { 00731 const FieldDecl *D = Field; 00732 std::pair<CharUnits, CharUnits> FieldInfo = 00733 Context.getTypeInfoInChars(D->getType()); 00734 CharUnits FieldSize = FieldInfo.first; 00735 assert(NumFields < SSV.size()); 00736 SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity(); 00737 NumFields++; 00738 } 00739 assert(NumFields == SSV.size()); 00740 if (SSV.size() <= 1) return; 00741 00742 // We will insert calls to __asan_* run-time functions. 00743 // LLVM AddressSanitizer pass may decide to inline them later. 00744 llvm::Type *Args[2] = {IntPtrTy, IntPtrTy}; 00745 llvm::FunctionType *FTy = 00746 llvm::FunctionType::get(CGM.VoidTy, Args, false); 00747 llvm::Constant *F = CGM.CreateRuntimeFunction( 00748 FTy, Prologue ? "__asan_poison_intra_object_redzone" 00749 : "__asan_unpoison_intra_object_redzone"); 00750 00751 llvm::Value *ThisPtr = LoadCXXThis(); 00752 ThisPtr = Builder.CreatePtrToInt(ThisPtr, IntPtrTy); 00753 uint64_t TypeSize = Info.getNonVirtualSize().getQuantity(); 00754 // For each field check if it has sufficient padding, 00755 // if so (un)poison it with a call. 00756 for (size_t i = 0; i < SSV.size(); i++) { 00757 uint64_t AsanAlignment = 8; 00758 uint64_t NextField = i == SSV.size() - 1 ? TypeSize : SSV[i + 1].Offset; 00759 uint64_t PoisonSize = NextField - SSV[i].Offset - SSV[i].Size; 00760 uint64_t EndOffset = SSV[i].Offset + SSV[i].Size; 00761 if (PoisonSize < AsanAlignment || !SSV[i].Size || 00762 (NextField % AsanAlignment) != 0) 00763 continue; 00764 Builder.CreateCall2( 00765 F, Builder.CreateAdd(ThisPtr, Builder.getIntN(PtrSize, EndOffset)), 00766 Builder.getIntN(PtrSize, PoisonSize)); 00767 } 00768 } 00769 00770 /// EmitConstructorBody - Emits the body of the current constructor. 00771 void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { 00772 EmitAsanPrologueOrEpilogue(true); 00773 const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl()); 00774 CXXCtorType CtorType = CurGD.getCtorType(); 00775 00776 assert((CGM.getTarget().getCXXABI().hasConstructorVariants() || 00777 CtorType == Ctor_Complete) && 00778 "can only generate complete ctor for this ABI"); 00779 00780 // Before we go any further, try the complete->base constructor 00781 // delegation optimization. 00782 if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && 00783 CGM.getTarget().getCXXABI().hasConstructorVariants()) { 00784 if (CGDebugInfo *DI = getDebugInfo()) 00785 DI->EmitLocation(Builder, Ctor->getLocEnd()); 00786 EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd()); 00787 return; 00788 } 00789 00790 const FunctionDecl *Definition = 0; 00791 Stmt *Body = Ctor->getBody(Definition); 00792 assert(Definition == Ctor && "emitting wrong constructor body"); 00793 00794 // Enter the function-try-block before the constructor prologue if 00795 // applicable. 00796 bool IsTryBody = (Body && isa<CXXTryStmt>(Body)); 00797 if (IsTryBody) 00798 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 00799 00800 RegionCounter Cnt = getPGORegionCounter(Body); 00801 Cnt.beginRegion(Builder); 00802 00803 RunCleanupsScope RunCleanups(*this); 00804 00805 // TODO: in restricted cases, we can emit the vbase initializers of 00806 // a complete ctor and then delegate to the base ctor. 00807 00808 // Emit the constructor prologue, i.e. the base and member 00809 // initializers. 00810 EmitCtorPrologue(Ctor, CtorType, Args); 00811 00812 // Emit the body of the statement. 00813 if (IsTryBody) 00814 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 00815 else if (Body) 00816 EmitStmt(Body); 00817 00818 // Emit any cleanup blocks associated with the member or base 00819 // initializers, which includes (along the exceptional path) the 00820 // destructors for those members and bases that were fully 00821 // constructed. 00822 RunCleanups.ForceCleanup(); 00823 00824 if (IsTryBody) 00825 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 00826 } 00827 00828 namespace { 00829 /// RAII object to indicate that codegen is copying the value representation 00830 /// instead of the object representation. Useful when copying a struct or 00831 /// class which has uninitialized members and we're only performing 00832 /// lvalue-to-rvalue conversion on the object but not its members. 00833 class CopyingValueRepresentation { 00834 public: 00835 explicit CopyingValueRepresentation(CodeGenFunction &CGF) 00836 : CGF(CGF), OldSanOpts(CGF.SanOpts) { 00837 CGF.SanOpts.set(SanitizerKind::Bool, false); 00838 CGF.SanOpts.set(SanitizerKind::Enum, false); 00839 } 00840 ~CopyingValueRepresentation() { 00841 CGF.SanOpts = OldSanOpts; 00842 } 00843 private: 00844 CodeGenFunction &CGF; 00845 SanitizerSet OldSanOpts; 00846 }; 00847 } 00848 00849 namespace { 00850 class FieldMemcpyizer { 00851 public: 00852 FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, 00853 const VarDecl *SrcRec) 00854 : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), 00855 RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), 00856 FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0), 00857 LastFieldOffset(0), LastAddedFieldIndex(0) {} 00858 00859 bool isMemcpyableField(FieldDecl *F) const { 00860 // Never memcpy fields when we are adding poisoned paddings. 00861 if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding) 00862 return false; 00863 Qualifiers Qual = F->getType().getQualifiers(); 00864 if (Qual.hasVolatile() || Qual.hasObjCLifetime()) 00865 return false; 00866 return true; 00867 } 00868 00869 void addMemcpyableField(FieldDecl *F) { 00870 if (!FirstField) 00871 addInitialField(F); 00872 else 00873 addNextField(F); 00874 } 00875 00876 CharUnits getMemcpySize(uint64_t FirstByteOffset) const { 00877 unsigned LastFieldSize = 00878 LastField->isBitField() ? 00879 LastField->getBitWidthValue(CGF.getContext()) : 00880 CGF.getContext().getTypeSize(LastField->getType()); 00881 uint64_t MemcpySizeBits = 00882 LastFieldOffset + LastFieldSize - FirstByteOffset + 00883 CGF.getContext().getCharWidth() - 1; 00884 CharUnits MemcpySize = 00885 CGF.getContext().toCharUnitsFromBits(MemcpySizeBits); 00886 return MemcpySize; 00887 } 00888 00889 void emitMemcpy() { 00890 // Give the subclass a chance to bail out if it feels the memcpy isn't 00891 // worth it (e.g. Hasn't aggregated enough data). 00892 if (!FirstField) { 00893 return; 00894 } 00895 00896 CharUnits Alignment; 00897 00898 uint64_t FirstByteOffset; 00899 if (FirstField->isBitField()) { 00900 const CGRecordLayout &RL = 00901 CGF.getTypes().getCGRecordLayout(FirstField->getParent()); 00902 const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField); 00903 Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment); 00904 // FirstFieldOffset is not appropriate for bitfields, 00905 // it won't tell us what the storage offset should be and thus might not 00906 // be properly aligned. 00907 // 00908 // Instead calculate the storage offset using the offset of the field in 00909 // the struct type. 00910 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 00911 FirstByteOffset = 00912 DL.getStructLayout(RL.getLLVMType()) 00913 ->getElementOffsetInBits(RL.getLLVMFieldNo(FirstField)); 00914 } else { 00915 Alignment = CGF.getContext().getDeclAlign(FirstField); 00916 FirstByteOffset = FirstFieldOffset; 00917 } 00918 00919 assert((CGF.getContext().toCharUnitsFromBits(FirstByteOffset) % 00920 Alignment) == 0 && "Bad field alignment."); 00921 00922 CharUnits MemcpySize = getMemcpySize(FirstByteOffset); 00923 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 00924 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 00925 LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 00926 LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField); 00927 llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec)); 00928 LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); 00929 LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField); 00930 00931 emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(), 00932 Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(), 00933 MemcpySize, Alignment); 00934 reset(); 00935 } 00936 00937 void reset() { 00938 FirstField = nullptr; 00939 } 00940 00941 protected: 00942 CodeGenFunction &CGF; 00943 const CXXRecordDecl *ClassDecl; 00944 00945 private: 00946 00947 void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr, 00948 CharUnits Size, CharUnits Alignment) { 00949 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 00950 llvm::Type *DBP = 00951 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace()); 00952 DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP); 00953 00954 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 00955 llvm::Type *SBP = 00956 llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace()); 00957 SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP); 00958 00959 CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(), 00960 Alignment.getQuantity()); 00961 } 00962 00963 void addInitialField(FieldDecl *F) { 00964 FirstField = F; 00965 LastField = F; 00966 FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 00967 LastFieldOffset = FirstFieldOffset; 00968 LastAddedFieldIndex = F->getFieldIndex(); 00969 return; 00970 } 00971 00972 void addNextField(FieldDecl *F) { 00973 // For the most part, the following invariant will hold: 00974 // F->getFieldIndex() == LastAddedFieldIndex + 1 00975 // The one exception is that Sema won't add a copy-initializer for an 00976 // unnamed bitfield, which will show up here as a gap in the sequence. 00977 assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 && 00978 "Cannot aggregate fields out of order."); 00979 LastAddedFieldIndex = F->getFieldIndex(); 00980 00981 // The 'first' and 'last' fields are chosen by offset, rather than field 00982 // index. This allows the code to support bitfields, as well as regular 00983 // fields. 00984 uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex()); 00985 if (FOffset < FirstFieldOffset) { 00986 FirstField = F; 00987 FirstFieldOffset = FOffset; 00988 } else if (FOffset > LastFieldOffset) { 00989 LastField = F; 00990 LastFieldOffset = FOffset; 00991 } 00992 } 00993 00994 const VarDecl *SrcRec; 00995 const ASTRecordLayout &RecLayout; 00996 FieldDecl *FirstField; 00997 FieldDecl *LastField; 00998 uint64_t FirstFieldOffset, LastFieldOffset; 00999 unsigned LastAddedFieldIndex; 01000 }; 01001 01002 class ConstructorMemcpyizer : public FieldMemcpyizer { 01003 private: 01004 01005 /// Get source argument for copy constructor. Returns null if not a copy 01006 /// constructor. 01007 static const VarDecl *getTrivialCopySource(CodeGenFunction &CGF, 01008 const CXXConstructorDecl *CD, 01009 FunctionArgList &Args) { 01010 if (CD->isCopyOrMoveConstructor() && CD->isDefaulted()) 01011 return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)]; 01012 return nullptr; 01013 } 01014 01015 // Returns true if a CXXCtorInitializer represents a member initialization 01016 // that can be rolled into a memcpy. 01017 bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { 01018 if (!MemcpyableCtor) 01019 return false; 01020 FieldDecl *Field = MemberInit->getMember(); 01021 assert(Field && "No field for member init."); 01022 QualType FieldType = Field->getType(); 01023 CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); 01024 01025 // Bail out on non-POD, not-trivially-constructable members. 01026 if (!(CE && CE->getConstructor()->isTrivial()) && 01027 !(FieldType.isTriviallyCopyableType(CGF.getContext()) || 01028 FieldType->isReferenceType())) 01029 return false; 01030 01031 // Bail out on volatile fields. 01032 if (!isMemcpyableField(Field)) 01033 return false; 01034 01035 // Otherwise we're good. 01036 return true; 01037 } 01038 01039 public: 01040 ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD, 01041 FunctionArgList &Args) 01042 : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CGF, CD, Args)), 01043 ConstructorDecl(CD), 01044 MemcpyableCtor(CD->isDefaulted() && 01045 CD->isCopyOrMoveConstructor() && 01046 CGF.getLangOpts().getGC() == LangOptions::NonGC), 01047 Args(Args) { } 01048 01049 void addMemberInitializer(CXXCtorInitializer *MemberInit) { 01050 if (isMemberInitMemcpyable(MemberInit)) { 01051 AggregatedInits.push_back(MemberInit); 01052 addMemcpyableField(MemberInit->getMember()); 01053 } else { 01054 emitAggregatedInits(); 01055 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, 01056 ConstructorDecl, Args); 01057 } 01058 } 01059 01060 void emitAggregatedInits() { 01061 if (AggregatedInits.size() <= 1) { 01062 // This memcpy is too small to be worthwhile. Fall back on default 01063 // codegen. 01064 if (!AggregatedInits.empty()) { 01065 CopyingValueRepresentation CVR(CGF); 01066 EmitMemberInitializer(CGF, ConstructorDecl->getParent(), 01067 AggregatedInits[0], ConstructorDecl, Args); 01068 } 01069 reset(); 01070 return; 01071 } 01072 01073 pushEHDestructors(); 01074 emitMemcpy(); 01075 AggregatedInits.clear(); 01076 } 01077 01078 void pushEHDestructors() { 01079 llvm::Value *ThisPtr = CGF.LoadCXXThis(); 01080 QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); 01081 LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); 01082 01083 for (unsigned i = 0; i < AggregatedInits.size(); ++i) { 01084 QualType FieldType = AggregatedInits[i]->getMember()->getType(); 01085 QualType::DestructionKind dtorKind = FieldType.isDestructedType(); 01086 if (CGF.needsEHCleanup(dtorKind)) 01087 CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); 01088 } 01089 } 01090 01091 void finish() { 01092 emitAggregatedInits(); 01093 } 01094 01095 private: 01096 const CXXConstructorDecl *ConstructorDecl; 01097 bool MemcpyableCtor; 01098 FunctionArgList &Args; 01099 SmallVector<CXXCtorInitializer*, 16> AggregatedInits; 01100 }; 01101 01102 class AssignmentMemcpyizer : public FieldMemcpyizer { 01103 private: 01104 01105 // Returns the memcpyable field copied by the given statement, if one 01106 // exists. Otherwise returns null. 01107 FieldDecl *getMemcpyableField(Stmt *S) { 01108 if (!AssignmentsMemcpyable) 01109 return nullptr; 01110 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) { 01111 // Recognise trivial assignments. 01112 if (BO->getOpcode() != BO_Assign) 01113 return nullptr; 01114 MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS()); 01115 if (!ME) 01116 return nullptr; 01117 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 01118 if (!Field || !isMemcpyableField(Field)) 01119 return nullptr; 01120 Stmt *RHS = BO->getRHS(); 01121 if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS)) 01122 RHS = EC->getSubExpr(); 01123 if (!RHS) 01124 return nullptr; 01125 MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS); 01126 if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field) 01127 return nullptr; 01128 return Field; 01129 } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) { 01130 CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl()); 01131 if (!(MD && (MD->isCopyAssignmentOperator() || 01132 MD->isMoveAssignmentOperator()) && 01133 MD->isTrivial())) 01134 return nullptr; 01135 MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument()); 01136 if (!IOA) 01137 return nullptr; 01138 FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl()); 01139 if (!Field || !isMemcpyableField(Field)) 01140 return nullptr; 01141 MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0)); 01142 if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl())) 01143 return nullptr; 01144 return Field; 01145 } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) { 01146 FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl()); 01147 if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) 01148 return nullptr; 01149 Expr *DstPtr = CE->getArg(0); 01150 if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr)) 01151 DstPtr = DC->getSubExpr(); 01152 UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr); 01153 if (!DUO || DUO->getOpcode() != UO_AddrOf) 01154 return nullptr; 01155 MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr()); 01156 if (!ME) 01157 return nullptr; 01158 FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); 01159 if (!Field || !isMemcpyableField(Field)) 01160 return nullptr; 01161 Expr *SrcPtr = CE->getArg(1); 01162 if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr)) 01163 SrcPtr = SC->getSubExpr(); 01164 UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr); 01165 if (!SUO || SUO->getOpcode() != UO_AddrOf) 01166 return nullptr; 01167 MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr()); 01168 if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl())) 01169 return nullptr; 01170 return Field; 01171 } 01172 01173 return nullptr; 01174 } 01175 01176 bool AssignmentsMemcpyable; 01177 SmallVector<Stmt*, 16> AggregatedStmts; 01178 01179 public: 01180 01181 AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD, 01182 FunctionArgList &Args) 01183 : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), 01184 AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { 01185 assert(Args.size() == 2); 01186 } 01187 01188 void emitAssignment(Stmt *S) { 01189 FieldDecl *F = getMemcpyableField(S); 01190 if (F) { 01191 addMemcpyableField(F); 01192 AggregatedStmts.push_back(S); 01193 } else { 01194 emitAggregatedStmts(); 01195 CGF.EmitStmt(S); 01196 } 01197 } 01198 01199 void emitAggregatedStmts() { 01200 if (AggregatedStmts.size() <= 1) { 01201 if (!AggregatedStmts.empty()) { 01202 CopyingValueRepresentation CVR(CGF); 01203 CGF.EmitStmt(AggregatedStmts[0]); 01204 } 01205 reset(); 01206 } 01207 01208 emitMemcpy(); 01209 AggregatedStmts.clear(); 01210 } 01211 01212 void finish() { 01213 emitAggregatedStmts(); 01214 } 01215 }; 01216 01217 } 01218 01219 /// EmitCtorPrologue - This routine generates necessary code to initialize 01220 /// base classes and non-static data members belonging to this constructor. 01221 void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, 01222 CXXCtorType CtorType, 01223 FunctionArgList &Args) { 01224 if (CD->isDelegatingConstructor()) 01225 return EmitDelegatingCXXConstructorCall(CD, Args); 01226 01227 const CXXRecordDecl *ClassDecl = CD->getParent(); 01228 01229 CXXConstructorDecl::init_const_iterator B = CD->init_begin(), 01230 E = CD->init_end(); 01231 01232 llvm::BasicBlock *BaseCtorContinueBB = nullptr; 01233 if (ClassDecl->getNumVBases() && 01234 !CGM.getTarget().getCXXABI().hasConstructorVariants()) { 01235 // The ABIs that don't have constructor variants need to put a branch 01236 // before the virtual base initialization code. 01237 BaseCtorContinueBB = 01238 CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl); 01239 assert(BaseCtorContinueBB); 01240 } 01241 01242 // Virtual base initializers first. 01243 for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { 01244 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 01245 } 01246 01247 if (BaseCtorContinueBB) { 01248 // Complete object handler should continue to the remaining initializers. 01249 Builder.CreateBr(BaseCtorContinueBB); 01250 EmitBlock(BaseCtorContinueBB); 01251 } 01252 01253 // Then, non-virtual base initializers. 01254 for (; B != E && (*B)->isBaseInitializer(); B++) { 01255 assert(!(*B)->isBaseVirtual()); 01256 EmitBaseInitializer(*this, ClassDecl, *B, CtorType); 01257 } 01258 01259 InitializeVTablePointers(ClassDecl); 01260 01261 // And finally, initialize class members. 01262 FieldConstructionScope FCS(*this, CXXThisValue); 01263 ConstructorMemcpyizer CM(*this, CD, Args); 01264 for (; B != E; B++) { 01265 CXXCtorInitializer *Member = (*B); 01266 assert(!Member->isBaseInitializer()); 01267 assert(Member->isAnyMemberInitializer() && 01268 "Delegating initializer on non-delegating constructor"); 01269 CM.addMemberInitializer(Member); 01270 } 01271 CM.finish(); 01272 } 01273 01274 static bool 01275 FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field); 01276 01277 static bool 01278 HasTrivialDestructorBody(ASTContext &Context, 01279 const CXXRecordDecl *BaseClassDecl, 01280 const CXXRecordDecl *MostDerivedClassDecl) 01281 { 01282 // If the destructor is trivial we don't have to check anything else. 01283 if (BaseClassDecl->hasTrivialDestructor()) 01284 return true; 01285 01286 if (!BaseClassDecl->getDestructor()->hasTrivialBody()) 01287 return false; 01288 01289 // Check fields. 01290 for (const auto *Field : BaseClassDecl->fields()) 01291 if (!FieldHasTrivialDestructorBody(Context, Field)) 01292 return false; 01293 01294 // Check non-virtual bases. 01295 for (const auto &I : BaseClassDecl->bases()) { 01296 if (I.isVirtual()) 01297 continue; 01298 01299 const CXXRecordDecl *NonVirtualBase = 01300 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 01301 if (!HasTrivialDestructorBody(Context, NonVirtualBase, 01302 MostDerivedClassDecl)) 01303 return false; 01304 } 01305 01306 if (BaseClassDecl == MostDerivedClassDecl) { 01307 // Check virtual bases. 01308 for (const auto &I : BaseClassDecl->vbases()) { 01309 const CXXRecordDecl *VirtualBase = 01310 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); 01311 if (!HasTrivialDestructorBody(Context, VirtualBase, 01312 MostDerivedClassDecl)) 01313 return false; 01314 } 01315 } 01316 01317 return true; 01318 } 01319 01320 static bool 01321 FieldHasTrivialDestructorBody(ASTContext &Context, 01322 const FieldDecl *Field) 01323 { 01324 QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); 01325 01326 const RecordType *RT = FieldBaseElementType->getAs<RecordType>(); 01327 if (!RT) 01328 return true; 01329 01330 CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); 01331 return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); 01332 } 01333 01334 /// CanSkipVTablePointerInitialization - Check whether we need to initialize 01335 /// any vtable pointers before calling this destructor. 01336 static bool CanSkipVTablePointerInitialization(ASTContext &Context, 01337 const CXXDestructorDecl *Dtor) { 01338 if (!Dtor->hasTrivialBody()) 01339 return false; 01340 01341 // Check the fields. 01342 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 01343 for (const auto *Field : ClassDecl->fields()) 01344 if (!FieldHasTrivialDestructorBody(Context, Field)) 01345 return false; 01346 01347 return true; 01348 } 01349 01350 /// EmitDestructorBody - Emits the body of the current destructor. 01351 void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { 01352 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl()); 01353 CXXDtorType DtorType = CurGD.getDtorType(); 01354 01355 // The call to operator delete in a deleting destructor happens 01356 // outside of the function-try-block, which means it's always 01357 // possible to delegate the destructor body to the complete 01358 // destructor. Do so. 01359 if (DtorType == Dtor_Deleting) { 01360 EnterDtorCleanups(Dtor, Dtor_Deleting); 01361 EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, 01362 /*Delegating=*/false, LoadCXXThis()); 01363 PopCleanupBlock(); 01364 return; 01365 } 01366 01367 Stmt *Body = Dtor->getBody(); 01368 01369 // If the body is a function-try-block, enter the try before 01370 // anything else. 01371 bool isTryBody = (Body && isa<CXXTryStmt>(Body)); 01372 if (isTryBody) 01373 EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); 01374 EmitAsanPrologueOrEpilogue(false); 01375 01376 // Enter the epilogue cleanups. 01377 RunCleanupsScope DtorEpilogue(*this); 01378 01379 // If this is the complete variant, just invoke the base variant; 01380 // the epilogue will destruct the virtual bases. But we can't do 01381 // this optimization if the body is a function-try-block, because 01382 // we'd introduce *two* handler blocks. In the Microsoft ABI, we 01383 // always delegate because we might not have a definition in this TU. 01384 switch (DtorType) { 01385 case Dtor_Comdat: 01386 llvm_unreachable("not expecting a COMDAT"); 01387 01388 case Dtor_Deleting: llvm_unreachable("already handled deleting case"); 01389 01390 case Dtor_Complete: 01391 assert((Body || getTarget().getCXXABI().isMicrosoft()) && 01392 "can't emit a dtor without a body for non-Microsoft ABIs"); 01393 01394 // Enter the cleanup scopes for virtual bases. 01395 EnterDtorCleanups(Dtor, Dtor_Complete); 01396 01397 if (!isTryBody) { 01398 EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, 01399 /*Delegating=*/false, LoadCXXThis()); 01400 break; 01401 } 01402 // Fallthrough: act like we're in the base variant. 01403 01404 case Dtor_Base: 01405 assert(Body); 01406 01407 RegionCounter Cnt = getPGORegionCounter(Body); 01408 Cnt.beginRegion(Builder); 01409 01410 // Enter the cleanup scopes for fields and non-virtual bases. 01411 EnterDtorCleanups(Dtor, Dtor_Base); 01412 01413 // Initialize the vtable pointers before entering the body. 01414 if (!CanSkipVTablePointerInitialization(getContext(), Dtor)) 01415 InitializeVTablePointers(Dtor->getParent()); 01416 01417 if (isTryBody) 01418 EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); 01419 else if (Body) 01420 EmitStmt(Body); 01421 else { 01422 assert(Dtor->isImplicit() && "bodyless dtor not implicit"); 01423 // nothing to do besides what's in the epilogue 01424 } 01425 // -fapple-kext must inline any call to this dtor into 01426 // the caller's body. 01427 if (getLangOpts().AppleKext) 01428 CurFn->addFnAttr(llvm::Attribute::AlwaysInline); 01429 break; 01430 } 01431 01432 // Jump out through the epilogue cleanups. 01433 DtorEpilogue.ForceCleanup(); 01434 01435 // Exit the try if applicable. 01436 if (isTryBody) 01437 ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); 01438 } 01439 01440 void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) { 01441 const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl()); 01442 const Stmt *RootS = AssignOp->getBody(); 01443 assert(isa<CompoundStmt>(RootS) && 01444 "Body of an implicit assignment operator should be compound stmt."); 01445 const CompoundStmt *RootCS = cast<CompoundStmt>(RootS); 01446 01447 LexicalScope Scope(*this, RootCS->getSourceRange()); 01448 01449 AssignmentMemcpyizer AM(*this, AssignOp, Args); 01450 for (auto *I : RootCS->body()) 01451 AM.emitAssignment(I); 01452 AM.finish(); 01453 } 01454 01455 namespace { 01456 /// Call the operator delete associated with the current destructor. 01457 struct CallDtorDelete : EHScopeStack::Cleanup { 01458 CallDtorDelete() {} 01459 01460 void Emit(CodeGenFunction &CGF, Flags flags) override { 01461 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 01462 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 01463 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 01464 CGF.getContext().getTagDeclType(ClassDecl)); 01465 } 01466 }; 01467 01468 struct CallDtorDeleteConditional : EHScopeStack::Cleanup { 01469 llvm::Value *ShouldDeleteCondition; 01470 public: 01471 CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition) 01472 : ShouldDeleteCondition(ShouldDeleteCondition) { 01473 assert(ShouldDeleteCondition != nullptr); 01474 } 01475 01476 void Emit(CodeGenFunction &CGF, Flags flags) override { 01477 llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete"); 01478 llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue"); 01479 llvm::Value *ShouldCallDelete 01480 = CGF.Builder.CreateIsNull(ShouldDeleteCondition); 01481 CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB); 01482 01483 CGF.EmitBlock(callDeleteBB); 01484 const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); 01485 const CXXRecordDecl *ClassDecl = Dtor->getParent(); 01486 CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), 01487 CGF.getContext().getTagDeclType(ClassDecl)); 01488 CGF.Builder.CreateBr(continueBB); 01489 01490 CGF.EmitBlock(continueBB); 01491 } 01492 }; 01493 01494 class DestroyField : public EHScopeStack::Cleanup { 01495 const FieldDecl *field; 01496 CodeGenFunction::Destroyer *destroyer; 01497 bool useEHCleanupForArray; 01498 01499 public: 01500 DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer, 01501 bool useEHCleanupForArray) 01502 : field(field), destroyer(destroyer), 01503 useEHCleanupForArray(useEHCleanupForArray) {} 01504 01505 void Emit(CodeGenFunction &CGF, Flags flags) override { 01506 // Find the address of the field. 01507 llvm::Value *thisValue = CGF.LoadCXXThis(); 01508 QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); 01509 LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy); 01510 LValue LV = CGF.EmitLValueForField(ThisLV, field); 01511 assert(LV.isSimple()); 01512 01513 CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, 01514 flags.isForNormalCleanup() && useEHCleanupForArray); 01515 } 01516 }; 01517 } 01518 01519 /// \brief Emit all code that comes at the end of class's 01520 /// destructor. This is to call destructors on members and base classes 01521 /// in reverse order of their construction. 01522 void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, 01523 CXXDtorType DtorType) { 01524 assert((!DD->isTrivial() || DD->hasAttr<DLLExportAttr>()) && 01525 "Should not emit dtor epilogue for non-exported trivial dtor!"); 01526 01527 // The deleting-destructor phase just needs to call the appropriate 01528 // operator delete that Sema picked up. 01529 if (DtorType == Dtor_Deleting) { 01530 assert(DD->getOperatorDelete() && 01531 "operator delete missing - EnterDtorCleanups"); 01532 if (CXXStructorImplicitParamValue) { 01533 // If there is an implicit param to the deleting dtor, it's a boolean 01534 // telling whether we should call delete at the end of the dtor. 01535 EHStack.pushCleanup<CallDtorDeleteConditional>( 01536 NormalAndEHCleanup, CXXStructorImplicitParamValue); 01537 } else { 01538 EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup); 01539 } 01540 return; 01541 } 01542 01543 const CXXRecordDecl *ClassDecl = DD->getParent(); 01544 01545 // Unions have no bases and do not call field destructors. 01546 if (ClassDecl->isUnion()) 01547 return; 01548 01549 // The complete-destructor phase just destructs all the virtual bases. 01550 if (DtorType == Dtor_Complete) { 01551 01552 // We push them in the forward order so that they'll be popped in 01553 // the reverse order. 01554 for (const auto &Base : ClassDecl->vbases()) { 01555 CXXRecordDecl *BaseClassDecl 01556 = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); 01557 01558 // Ignore trivial destructors. 01559 if (BaseClassDecl->hasTrivialDestructor()) 01560 continue; 01561 01562 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 01563 BaseClassDecl, 01564 /*BaseIsVirtual*/ true); 01565 } 01566 01567 return; 01568 } 01569 01570 assert(DtorType == Dtor_Base); 01571 01572 // Destroy non-virtual bases. 01573 for (const auto &Base : ClassDecl->bases()) { 01574 // Ignore virtual bases. 01575 if (Base.isVirtual()) 01576 continue; 01577 01578 CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); 01579 01580 // Ignore trivial destructors. 01581 if (BaseClassDecl->hasTrivialDestructor()) 01582 continue; 01583 01584 EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, 01585 BaseClassDecl, 01586 /*BaseIsVirtual*/ false); 01587 } 01588 01589 // Destroy direct fields. 01590 for (const auto *Field : ClassDecl->fields()) { 01591 QualType type = Field->getType(); 01592 QualType::DestructionKind dtorKind = type.isDestructedType(); 01593 if (!dtorKind) continue; 01594 01595 // Anonymous union members do not have their destructors called. 01596 const RecordType *RT = type->getAsUnionType(); 01597 if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue; 01598 01599 CleanupKind cleanupKind = getCleanupKind(dtorKind); 01600 EHStack.pushCleanup<DestroyField>(cleanupKind, Field, 01601 getDestroyer(dtorKind), 01602 cleanupKind & EHCleanup); 01603 } 01604 } 01605 01606 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 01607 /// constructor for each of several members of an array. 01608 /// 01609 /// \param ctor the constructor to call for each element 01610 /// \param arrayType the type of the array to initialize 01611 /// \param arrayBegin an arrayType* 01612 /// \param zeroInitialize true if each element should be 01613 /// zero-initialized before it is constructed 01614 void CodeGenFunction::EmitCXXAggrConstructorCall( 01615 const CXXConstructorDecl *ctor, const ConstantArrayType *arrayType, 01616 llvm::Value *arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) { 01617 QualType elementType; 01618 llvm::Value *numElements = 01619 emitArrayLength(arrayType, elementType, arrayBegin); 01620 01621 EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, zeroInitialize); 01622 } 01623 01624 /// EmitCXXAggrConstructorCall - Emit a loop to call a particular 01625 /// constructor for each of several members of an array. 01626 /// 01627 /// \param ctor the constructor to call for each element 01628 /// \param numElements the number of elements in the array; 01629 /// may be zero 01630 /// \param arrayBegin a T*, where T is the type constructed by ctor 01631 /// \param zeroInitialize true if each element should be 01632 /// zero-initialized before it is constructed 01633 void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, 01634 llvm::Value *numElements, 01635 llvm::Value *arrayBegin, 01636 const CXXConstructExpr *E, 01637 bool zeroInitialize) { 01638 01639 // It's legal for numElements to be zero. This can happen both 01640 // dynamically, because x can be zero in 'new A[x]', and statically, 01641 // because of GCC extensions that permit zero-length arrays. There 01642 // are probably legitimate places where we could assume that this 01643 // doesn't happen, but it's not clear that it's worth it. 01644 llvm::BranchInst *zeroCheckBranch = nullptr; 01645 01646 // Optimize for a constant count. 01647 llvm::ConstantInt *constantCount 01648 = dyn_cast<llvm::ConstantInt>(numElements); 01649 if (constantCount) { 01650 // Just skip out if the constant count is zero. 01651 if (constantCount->isZero()) return; 01652 01653 // Otherwise, emit the check. 01654 } else { 01655 llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop"); 01656 llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty"); 01657 zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB); 01658 EmitBlock(loopBB); 01659 } 01660 01661 // Find the end of the array. 01662 llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, 01663 "arrayctor.end"); 01664 01665 // Enter the loop, setting up a phi for the current location to initialize. 01666 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 01667 llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop"); 01668 EmitBlock(loopBB); 01669 llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2, 01670 "arrayctor.cur"); 01671 cur->addIncoming(arrayBegin, entryBB); 01672 01673 // Inside the loop body, emit the constructor call on the array element. 01674 01675 QualType type = getContext().getTypeDeclType(ctor->getParent()); 01676 01677 // Zero initialize the storage, if requested. 01678 if (zeroInitialize) 01679 EmitNullInitialization(cur, type); 01680 01681 // C++ [class.temporary]p4: 01682 // There are two contexts in which temporaries are destroyed at a different 01683 // point than the end of the full-expression. The first context is when a 01684 // default constructor is called to initialize an element of an array. 01685 // If the constructor has one or more default arguments, the destruction of 01686 // every temporary created in a default argument expression is sequenced 01687 // before the construction of the next array element, if any. 01688 01689 { 01690 RunCleanupsScope Scope(*this); 01691 01692 // Evaluate the constructor and its arguments in a regular 01693 // partial-destroy cleanup. 01694 if (getLangOpts().Exceptions && 01695 !ctor->getParent()->hasTrivialDestructor()) { 01696 Destroyer *destroyer = destroyCXXObject; 01697 pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer); 01698 } 01699 01700 EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false, 01701 /*Delegating=*/false, cur, E); 01702 } 01703 01704 // Go to the next element. 01705 llvm::Value *next = 01706 Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1), 01707 "arrayctor.next"); 01708 cur->addIncoming(next, Builder.GetInsertBlock()); 01709 01710 // Check whether that's the end of the loop. 01711 llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done"); 01712 llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont"); 01713 Builder.CreateCondBr(done, contBB, loopBB); 01714 01715 // Patch the earlier check to skip over the loop. 01716 if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB); 01717 01718 EmitBlock(contBB); 01719 } 01720 01721 void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF, 01722 llvm::Value *addr, 01723 QualType type) { 01724 const RecordType *rtype = type->castAs<RecordType>(); 01725 const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl()); 01726 const CXXDestructorDecl *dtor = record->getDestructor(); 01727 assert(!dtor->isTrivial()); 01728 CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, 01729 /*Delegating=*/false, addr); 01730 } 01731 01732 void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, 01733 CXXCtorType Type, 01734 bool ForVirtualBase, 01735 bool Delegating, llvm::Value *This, 01736 const CXXConstructExpr *E) { 01737 // If this is a trivial constructor, just emit what's needed. 01738 if (D->isTrivial()) { 01739 if (E->getNumArgs() == 0) { 01740 // Trivial default constructor, no codegen required. 01741 assert(D->isDefaultConstructor() && 01742 "trivial 0-arg ctor not a default ctor"); 01743 return; 01744 } 01745 01746 assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); 01747 assert(D->isCopyOrMoveConstructor() && 01748 "trivial 1-arg ctor not a copy/move ctor"); 01749 01750 const Expr *Arg = E->getArg(0); 01751 QualType Ty = Arg->getType(); 01752 llvm::Value *Src = EmitLValue(Arg).getAddress(); 01753 EmitAggregateCopy(This, Src, Ty); 01754 return; 01755 } 01756 01757 // C++11 [class.mfct.non-static]p2: 01758 // If a non-static member function of a class X is called for an object that 01759 // is not of type X, or of a type derived from X, the behavior is undefined. 01760 // FIXME: Provide a source location here. 01761 EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This, 01762 getContext().getRecordType(D->getParent())); 01763 01764 CallArgList Args; 01765 01766 // Push the this ptr. 01767 Args.add(RValue::get(This), D->getThisType(getContext())); 01768 01769 // Add the rest of the user-supplied arguments. 01770 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 01771 EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getConstructor()); 01772 01773 // Insert any ABI-specific implicit constructor arguments. 01774 unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs( 01775 *this, D, Type, ForVirtualBase, Delegating, Args); 01776 01777 // Emit the call. 01778 llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, getFromCtorType(Type)); 01779 const CGFunctionInfo &Info = 01780 CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs); 01781 EmitCall(Info, Callee, ReturnValueSlot(), Args, D); 01782 } 01783 01784 void 01785 CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, 01786 llvm::Value *This, llvm::Value *Src, 01787 const CXXConstructExpr *E) { 01788 if (D->isTrivial()) { 01789 assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); 01790 assert(D->isCopyOrMoveConstructor() && 01791 "trivial 1-arg ctor not a copy/move ctor"); 01792 EmitAggregateCopy(This, Src, E->arg_begin()->getType()); 01793 return; 01794 } 01795 llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, StructorType::Complete); 01796 assert(D->isInstance() && 01797 "Trying to emit a member call expr on a static method!"); 01798 01799 const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); 01800 01801 CallArgList Args; 01802 01803 // Push the this ptr. 01804 Args.add(RValue::get(This), D->getThisType(getContext())); 01805 01806 // Push the src ptr. 01807 QualType QT = *(FPT->param_type_begin()); 01808 llvm::Type *t = CGM.getTypes().ConvertType(QT); 01809 Src = Builder.CreateBitCast(Src, t); 01810 Args.add(RValue::get(Src), QT); 01811 01812 // Skip over first argument (Src). 01813 EmitCallArgs(Args, FPT, E->arg_begin() + 1, E->arg_end(), E->getConstructor(), 01814 /*ParamsToSkip*/ 1); 01815 01816 EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All), 01817 Callee, ReturnValueSlot(), Args, D); 01818 } 01819 01820 void 01821 CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, 01822 CXXCtorType CtorType, 01823 const FunctionArgList &Args, 01824 SourceLocation Loc) { 01825 CallArgList DelegateArgs; 01826 01827 FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); 01828 assert(I != E && "no parameters to constructor"); 01829 01830 // this 01831 DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType()); 01832 ++I; 01833 01834 // vtt 01835 if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType), 01836 /*ForVirtualBase=*/false, 01837 /*Delegating=*/true)) { 01838 QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy); 01839 DelegateArgs.add(RValue::get(VTT), VoidPP); 01840 01841 if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { 01842 assert(I != E && "cannot skip vtt parameter, already done with args"); 01843 assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type"); 01844 ++I; 01845 } 01846 } 01847 01848 // Explicit arguments. 01849 for (; I != E; ++I) { 01850 const VarDecl *param = *I; 01851 // FIXME: per-argument source location 01852 EmitDelegateCallArg(DelegateArgs, param, Loc); 01853 } 01854 01855 llvm::Value *Callee = 01856 CGM.getAddrOfCXXStructor(Ctor, getFromCtorType(CtorType)); 01857 EmitCall(CGM.getTypes() 01858 .arrangeCXXStructorDeclaration(Ctor, getFromCtorType(CtorType)), 01859 Callee, ReturnValueSlot(), DelegateArgs, Ctor); 01860 } 01861 01862 namespace { 01863 struct CallDelegatingCtorDtor : EHScopeStack::Cleanup { 01864 const CXXDestructorDecl *Dtor; 01865 llvm::Value *Addr; 01866 CXXDtorType Type; 01867 01868 CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr, 01869 CXXDtorType Type) 01870 : Dtor(D), Addr(Addr), Type(Type) {} 01871 01872 void Emit(CodeGenFunction &CGF, Flags flags) override { 01873 CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, 01874 /*Delegating=*/true, Addr); 01875 } 01876 }; 01877 } 01878 01879 void 01880 CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, 01881 const FunctionArgList &Args) { 01882 assert(Ctor->isDelegatingConstructor()); 01883 01884 llvm::Value *ThisPtr = LoadCXXThis(); 01885 01886 QualType Ty = getContext().getTagDeclType(Ctor->getParent()); 01887 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 01888 AggValueSlot AggSlot = 01889 AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(), 01890 AggValueSlot::IsDestructed, 01891 AggValueSlot::DoesNotNeedGCBarriers, 01892 AggValueSlot::IsNotAliased); 01893 01894 EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); 01895 01896 const CXXRecordDecl *ClassDecl = Ctor->getParent(); 01897 if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { 01898 CXXDtorType Type = 01899 CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base; 01900 01901 EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup, 01902 ClassDecl->getDestructor(), 01903 ThisPtr, Type); 01904 } 01905 } 01906 01907 void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD, 01908 CXXDtorType Type, 01909 bool ForVirtualBase, 01910 bool Delegating, 01911 llvm::Value *This) { 01912 CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase, 01913 Delegating, This); 01914 } 01915 01916 namespace { 01917 struct CallLocalDtor : EHScopeStack::Cleanup { 01918 const CXXDestructorDecl *Dtor; 01919 llvm::Value *Addr; 01920 01921 CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr) 01922 : Dtor(D), Addr(Addr) {} 01923 01924 void Emit(CodeGenFunction &CGF, Flags flags) override { 01925 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, 01926 /*ForVirtualBase=*/false, 01927 /*Delegating=*/false, Addr); 01928 } 01929 }; 01930 } 01931 01932 void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D, 01933 llvm::Value *Addr) { 01934 EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr); 01935 } 01936 01937 void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) { 01938 CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl(); 01939 if (!ClassDecl) return; 01940 if (ClassDecl->hasTrivialDestructor()) return; 01941 01942 const CXXDestructorDecl *D = ClassDecl->getDestructor(); 01943 assert(D && D->isUsed() && "destructor not marked as used!"); 01944 PushDestructorCleanup(D, Addr); 01945 } 01946 01947 void 01948 CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, 01949 const CXXRecordDecl *NearestVBase, 01950 CharUnits OffsetFromNearestVBase, 01951 const CXXRecordDecl *VTableClass) { 01952 // Compute the address point. 01953 bool NeedsVirtualOffset; 01954 llvm::Value *VTableAddressPoint = 01955 CGM.getCXXABI().getVTableAddressPointInStructor( 01956 *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset); 01957 if (!VTableAddressPoint) 01958 return; 01959 01960 // Compute where to store the address point. 01961 llvm::Value *VirtualOffset = nullptr; 01962 CharUnits NonVirtualOffset = CharUnits::Zero(); 01963 01964 if (NeedsVirtualOffset) { 01965 // We need to use the virtual base offset offset because the virtual base 01966 // might have a different offset in the most derived class. 01967 VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this, 01968 LoadCXXThis(), 01969 VTableClass, 01970 NearestVBase); 01971 NonVirtualOffset = OffsetFromNearestVBase; 01972 } else { 01973 // We can just use the base offset in the complete class. 01974 NonVirtualOffset = Base.getBaseOffset(); 01975 } 01976 01977 // Apply the offsets. 01978 llvm::Value *VTableField = LoadCXXThis(); 01979 01980 if (!NonVirtualOffset.isZero() || VirtualOffset) 01981 VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, 01982 NonVirtualOffset, 01983 VirtualOffset); 01984 01985 // Finally, store the address point. 01986 llvm::Type *AddressPointPtrTy = 01987 VTableAddressPoint->getType()->getPointerTo(); 01988 VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy); 01989 llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField); 01990 CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr()); 01991 } 01992 01993 void 01994 CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, 01995 const CXXRecordDecl *NearestVBase, 01996 CharUnits OffsetFromNearestVBase, 01997 bool BaseIsNonVirtualPrimaryBase, 01998 const CXXRecordDecl *VTableClass, 01999 VisitedVirtualBasesSetTy& VBases) { 02000 // If this base is a non-virtual primary base the address point has already 02001 // been set. 02002 if (!BaseIsNonVirtualPrimaryBase) { 02003 // Initialize the vtable pointer for this base. 02004 InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase, 02005 VTableClass); 02006 } 02007 02008 const CXXRecordDecl *RD = Base.getBase(); 02009 02010 // Traverse bases. 02011 for (const auto &I : RD->bases()) { 02012 CXXRecordDecl *BaseDecl 02013 = cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); 02014 02015 // Ignore classes without a vtable. 02016 if (!BaseDecl->isDynamicClass()) 02017 continue; 02018 02019 CharUnits BaseOffset; 02020 CharUnits BaseOffsetFromNearestVBase; 02021 bool BaseDeclIsNonVirtualPrimaryBase; 02022 02023 if (I.isVirtual()) { 02024 // Check if we've visited this virtual base before. 02025 if (!VBases.insert(BaseDecl)) 02026 continue; 02027 02028 const ASTRecordLayout &Layout = 02029 getContext().getASTRecordLayout(VTableClass); 02030 02031 BaseOffset = Layout.getVBaseClassOffset(BaseDecl); 02032 BaseOffsetFromNearestVBase = CharUnits::Zero(); 02033 BaseDeclIsNonVirtualPrimaryBase = false; 02034 } else { 02035 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); 02036 02037 BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); 02038 BaseOffsetFromNearestVBase = 02039 OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); 02040 BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; 02041 } 02042 02043 InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), 02044 I.isVirtual() ? BaseDecl : NearestVBase, 02045 BaseOffsetFromNearestVBase, 02046 BaseDeclIsNonVirtualPrimaryBase, 02047 VTableClass, VBases); 02048 } 02049 } 02050 02051 void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { 02052 // Ignore classes without a vtable. 02053 if (!RD->isDynamicClass()) 02054 return; 02055 02056 // Initialize the vtable pointers for this class and all of its bases. 02057 VisitedVirtualBasesSetTy VBases; 02058 InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), 02059 /*NearestVBase=*/nullptr, 02060 /*OffsetFromNearestVBase=*/CharUnits::Zero(), 02061 /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases); 02062 02063 if (RD->getNumVBases()) 02064 CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD); 02065 } 02066 02067 llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This, 02068 llvm::Type *Ty) { 02069 llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo()); 02070 llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable"); 02071 CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr()); 02072 return VTable; 02073 } 02074 02075 02076 // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do 02077 // quite what we want. 02078 static const Expr *skipNoOpCastsAndParens(const Expr *E) { 02079 while (true) { 02080 if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { 02081 E = PE->getSubExpr(); 02082 continue; 02083 } 02084 02085 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { 02086 if (CE->getCastKind() == CK_NoOp) { 02087 E = CE->getSubExpr(); 02088 continue; 02089 } 02090 } 02091 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { 02092 if (UO->getOpcode() == UO_Extension) { 02093 E = UO->getSubExpr(); 02094 continue; 02095 } 02096 } 02097 return E; 02098 } 02099 } 02100 02101 bool 02102 CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base, 02103 const CXXMethodDecl *MD) { 02104 // When building with -fapple-kext, all calls must go through the vtable since 02105 // the kernel linker can do runtime patching of vtables. 02106 if (getLangOpts().AppleKext) 02107 return false; 02108 02109 // If the most derived class is marked final, we know that no subclass can 02110 // override this member function and so we can devirtualize it. For example: 02111 // 02112 // struct A { virtual void f(); } 02113 // struct B final : A { }; 02114 // 02115 // void f(B *b) { 02116 // b->f(); 02117 // } 02118 // 02119 const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType(); 02120 if (MostDerivedClassDecl->hasAttr<FinalAttr>()) 02121 return true; 02122 02123 // If the member function is marked 'final', we know that it can't be 02124 // overridden and can therefore devirtualize it. 02125 if (MD->hasAttr<FinalAttr>()) 02126 return true; 02127 02128 // Similarly, if the class itself is marked 'final' it can't be overridden 02129 // and we can therefore devirtualize the member function call. 02130 if (MD->getParent()->hasAttr<FinalAttr>()) 02131 return true; 02132 02133 Base = skipNoOpCastsAndParens(Base); 02134 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { 02135 if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { 02136 // This is a record decl. We know the type and can devirtualize it. 02137 return VD->getType()->isRecordType(); 02138 } 02139 02140 return false; 02141 } 02142 02143 // We can devirtualize calls on an object accessed by a class member access 02144 // expression, since by C++11 [basic.life]p6 we know that it can't refer to 02145 // a derived class object constructed in the same location. 02146 if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base)) 02147 if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl())) 02148 return VD->getType()->isRecordType(); 02149 02150 // We can always devirtualize calls on temporary object expressions. 02151 if (isa<CXXConstructExpr>(Base)) 02152 return true; 02153 02154 // And calls on bound temporaries. 02155 if (isa<CXXBindTemporaryExpr>(Base)) 02156 return true; 02157 02158 // Check if this is a call expr that returns a record type. 02159 if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) 02160 return CE->getCallReturnType()->isRecordType(); 02161 02162 // We can't devirtualize the call. 02163 return false; 02164 } 02165 02166 llvm::Value * 02167 CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E, 02168 const CXXMethodDecl *MD, 02169 llvm::Value *This) { 02170 llvm::FunctionType *fnType = 02171 CGM.getTypes().GetFunctionType( 02172 CGM.getTypes().arrangeCXXMethodDeclaration(MD)); 02173 02174 if (MD->isVirtual() && !CanDevirtualizeMemberFunctionCall(E->getArg(0), MD)) 02175 return CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, fnType); 02176 02177 return CGM.GetAddrOfFunction(MD, fnType); 02178 } 02179 02180 void CodeGenFunction::EmitForwardingCallToLambda( 02181 const CXXMethodDecl *callOperator, 02182 CallArgList &callArgs) { 02183 // Get the address of the call operator. 02184 const CGFunctionInfo &calleeFnInfo = 02185 CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); 02186 llvm::Value *callee = 02187 CGM.GetAddrOfFunction(GlobalDecl(callOperator), 02188 CGM.getTypes().GetFunctionType(calleeFnInfo)); 02189 02190 // Prepare the return slot. 02191 const FunctionProtoType *FPT = 02192 callOperator->getType()->castAs<FunctionProtoType>(); 02193 QualType resultType = FPT->getReturnType(); 02194 ReturnValueSlot returnSlot; 02195 if (!resultType->isVoidType() && 02196 calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && 02197 !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) 02198 returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified()); 02199 02200 // We don't need to separately arrange the call arguments because 02201 // the call can't be variadic anyway --- it's impossible to forward 02202 // variadic arguments. 02203 02204 // Now emit our call. 02205 RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, 02206 callArgs, callOperator); 02207 02208 // If necessary, copy the returned value into the slot. 02209 if (!resultType->isVoidType() && returnSlot.isNull()) 02210 EmitReturnOfRValue(RV, resultType); 02211 else 02212 EmitBranchThroughCleanup(ReturnBlock); 02213 } 02214 02215 void CodeGenFunction::EmitLambdaBlockInvokeBody() { 02216 const BlockDecl *BD = BlockInfo->getBlockDecl(); 02217 const VarDecl *variable = BD->capture_begin()->getVariable(); 02218 const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl(); 02219 02220 // Start building arguments for forwarding call 02221 CallArgList CallArgs; 02222 02223 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 02224 llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false); 02225 CallArgs.add(RValue::get(ThisPtr), ThisType); 02226 02227 // Add the rest of the parameters. 02228 for (auto param : BD->params()) 02229 EmitDelegateCallArg(CallArgs, param, param->getLocStart()); 02230 02231 assert(!Lambda->isGenericLambda() && 02232 "generic lambda interconversion to block not implemented"); 02233 EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs); 02234 } 02235 02236 void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) { 02237 if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) { 02238 // FIXME: Making this work correctly is nasty because it requires either 02239 // cloning the body of the call operator or making the call operator forward. 02240 CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function"); 02241 return; 02242 } 02243 02244 EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody()); 02245 } 02246 02247 void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { 02248 const CXXRecordDecl *Lambda = MD->getParent(); 02249 02250 // Start building arguments for forwarding call 02251 CallArgList CallArgs; 02252 02253 QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); 02254 llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType)); 02255 CallArgs.add(RValue::get(ThisPtr), ThisType); 02256 02257 // Add the rest of the parameters. 02258 for (auto Param : MD->params()) 02259 EmitDelegateCallArg(CallArgs, Param, Param->getLocStart()); 02260 02261 const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); 02262 // For a generic lambda, find the corresponding call operator specialization 02263 // to which the call to the static-invoker shall be forwarded. 02264 if (Lambda->isGenericLambda()) { 02265 assert(MD->isFunctionTemplateSpecialization()); 02266 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); 02267 FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate(); 02268 void *InsertPos = nullptr; 02269 FunctionDecl *CorrespondingCallOpSpecialization = 02270 CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos); 02271 assert(CorrespondingCallOpSpecialization); 02272 CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); 02273 } 02274 EmitForwardingCallToLambda(CallOp, CallArgs); 02275 } 02276 02277 void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) { 02278 if (MD->isVariadic()) { 02279 // FIXME: Making this work correctly is nasty because it requires either 02280 // cloning the body of the call operator or making the call operator forward. 02281 CGM.ErrorUnsupported(MD, "lambda conversion to variadic function"); 02282 return; 02283 } 02284 02285 EmitLambdaDelegatingInvokeBody(MD); 02286 }