clang API Documentation
00001 //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===// 00002 // 00003 // The LLVM Compiler Infrastructure 00004 // 00005 // This file is distributed under the University of Illinois Open Source 00006 // License. See LICENSE.TXT for details. 00007 // 00008 //===----------------------------------------------------------------------===// 00009 // 00010 // This contains code to emit Aggregate Expr nodes as LLVM code. 00011 // 00012 //===----------------------------------------------------------------------===// 00013 00014 #include "CodeGenFunction.h" 00015 #include "CGObjCRuntime.h" 00016 #include "CodeGenModule.h" 00017 #include "clang/AST/ASTContext.h" 00018 #include "clang/AST/DeclCXX.h" 00019 #include "clang/AST/DeclTemplate.h" 00020 #include "clang/AST/StmtVisitor.h" 00021 #include "llvm/IR/Constants.h" 00022 #include "llvm/IR/Function.h" 00023 #include "llvm/IR/GlobalVariable.h" 00024 #include "llvm/IR/Intrinsics.h" 00025 using namespace clang; 00026 using namespace CodeGen; 00027 00028 //===----------------------------------------------------------------------===// 00029 // Aggregate Expression Emitter 00030 //===----------------------------------------------------------------------===// 00031 00032 namespace { 00033 class AggExprEmitter : public StmtVisitor<AggExprEmitter> { 00034 CodeGenFunction &CGF; 00035 CGBuilderTy &Builder; 00036 AggValueSlot Dest; 00037 00038 /// We want to use 'dest' as the return slot except under two 00039 /// conditions: 00040 /// - The destination slot requires garbage collection, so we 00041 /// need to use the GC API. 00042 /// - The destination slot is potentially aliased. 00043 bool shouldUseDestForReturnSlot() const { 00044 return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased()); 00045 } 00046 00047 ReturnValueSlot getReturnValueSlot() const { 00048 if (!shouldUseDestForReturnSlot()) 00049 return ReturnValueSlot(); 00050 00051 return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile()); 00052 } 00053 00054 AggValueSlot EnsureSlot(QualType T) { 00055 if (!Dest.isIgnored()) return Dest; 00056 return CGF.CreateAggTemp(T, "agg.tmp.ensured"); 00057 } 00058 void EnsureDest(QualType T) { 00059 if (!Dest.isIgnored()) return; 00060 Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured"); 00061 } 00062 00063 public: 00064 AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest) 00065 : CGF(cgf), Builder(CGF.Builder), Dest(Dest) { 00066 } 00067 00068 //===--------------------------------------------------------------------===// 00069 // Utilities 00070 //===--------------------------------------------------------------------===// 00071 00072 /// EmitAggLoadOfLValue - Given an expression with aggregate type that 00073 /// represents a value lvalue, this method emits the address of the lvalue, 00074 /// then loads the result into DestPtr. 00075 void EmitAggLoadOfLValue(const Expr *E); 00076 00077 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 00078 void EmitFinalDestCopy(QualType type, const LValue &src); 00079 void EmitFinalDestCopy(QualType type, RValue src, 00080 CharUnits srcAlignment = CharUnits::Zero()); 00081 void EmitCopy(QualType type, const AggValueSlot &dest, 00082 const AggValueSlot &src); 00083 00084 void EmitMoveFromReturnSlot(const Expr *E, RValue Src); 00085 00086 void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType, 00087 QualType elementType, InitListExpr *E); 00088 00089 AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { 00090 if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)) 00091 return AggValueSlot::NeedsGCBarriers; 00092 return AggValueSlot::DoesNotNeedGCBarriers; 00093 } 00094 00095 bool TypeRequiresGCollection(QualType T); 00096 00097 //===--------------------------------------------------------------------===// 00098 // Visitor Methods 00099 //===--------------------------------------------------------------------===// 00100 00101 void VisitStmt(Stmt *S) { 00102 CGF.ErrorUnsupported(S, "aggregate expression"); 00103 } 00104 void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } 00105 void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { 00106 Visit(GE->getResultExpr()); 00107 } 00108 void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); } 00109 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { 00110 return Visit(E->getReplacement()); 00111 } 00112 00113 // l-values. 00114 void VisitDeclRefExpr(DeclRefExpr *E) { 00115 // For aggregates, we should always be able to emit the variable 00116 // as an l-value unless it's a reference. This is due to the fact 00117 // that we can't actually ever see a normal l2r conversion on an 00118 // aggregate in C++, and in C there's no language standard 00119 // actively preventing us from listing variables in the captures 00120 // list of a block. 00121 if (E->getDecl()->getType()->isReferenceType()) { 00122 if (CodeGenFunction::ConstantEmission result 00123 = CGF.tryEmitAsConstant(E)) { 00124 EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E)); 00125 return; 00126 } 00127 } 00128 00129 EmitAggLoadOfLValue(E); 00130 } 00131 00132 void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); } 00133 void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); } 00134 void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); } 00135 void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); 00136 void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { 00137 EmitAggLoadOfLValue(E); 00138 } 00139 void VisitPredefinedExpr(const PredefinedExpr *E) { 00140 EmitAggLoadOfLValue(E); 00141 } 00142 00143 // Operators. 00144 void VisitCastExpr(CastExpr *E); 00145 void VisitCallExpr(const CallExpr *E); 00146 void VisitStmtExpr(const StmtExpr *E); 00147 void VisitBinaryOperator(const BinaryOperator *BO); 00148 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO); 00149 void VisitBinAssign(const BinaryOperator *E); 00150 void VisitBinComma(const BinaryOperator *E); 00151 00152 void VisitObjCMessageExpr(ObjCMessageExpr *E); 00153 void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { 00154 EmitAggLoadOfLValue(E); 00155 } 00156 00157 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO); 00158 void VisitChooseExpr(const ChooseExpr *CE); 00159 void VisitInitListExpr(InitListExpr *E); 00160 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); 00161 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { 00162 Visit(DAE->getExpr()); 00163 } 00164 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { 00165 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF); 00166 Visit(DIE->getExpr()); 00167 } 00168 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); 00169 void VisitCXXConstructExpr(const CXXConstructExpr *E); 00170 void VisitLambdaExpr(LambdaExpr *E); 00171 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E); 00172 void VisitExprWithCleanups(ExprWithCleanups *E); 00173 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); 00174 void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); } 00175 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E); 00176 void VisitOpaqueValueExpr(OpaqueValueExpr *E); 00177 00178 void VisitPseudoObjectExpr(PseudoObjectExpr *E) { 00179 if (E->isGLValue()) { 00180 LValue LV = CGF.EmitPseudoObjectLValue(E); 00181 return EmitFinalDestCopy(E->getType(), LV); 00182 } 00183 00184 CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType())); 00185 } 00186 00187 void VisitVAArgExpr(VAArgExpr *E); 00188 00189 void EmitInitializationToLValue(Expr *E, LValue Address); 00190 void EmitNullInitializationToLValue(LValue Address); 00191 // case Expr::ChooseExprClass: 00192 void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); } 00193 void VisitAtomicExpr(AtomicExpr *E) { 00194 CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr()); 00195 } 00196 }; 00197 } // end anonymous namespace. 00198 00199 //===----------------------------------------------------------------------===// 00200 // Utilities 00201 //===----------------------------------------------------------------------===// 00202 00203 /// EmitAggLoadOfLValue - Given an expression with aggregate type that 00204 /// represents a value lvalue, this method emits the address of the lvalue, 00205 /// then loads the result into DestPtr. 00206 void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { 00207 LValue LV = CGF.EmitLValue(E); 00208 00209 // If the type of the l-value is atomic, then do an atomic load. 00210 if (LV.getType()->isAtomicType()) { 00211 CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest); 00212 return; 00213 } 00214 00215 EmitFinalDestCopy(E->getType(), LV); 00216 } 00217 00218 /// \brief True if the given aggregate type requires special GC API calls. 00219 bool AggExprEmitter::TypeRequiresGCollection(QualType T) { 00220 // Only record types have members that might require garbage collection. 00221 const RecordType *RecordTy = T->getAs<RecordType>(); 00222 if (!RecordTy) return false; 00223 00224 // Don't mess with non-trivial C++ types. 00225 RecordDecl *Record = RecordTy->getDecl(); 00226 if (isa<CXXRecordDecl>(Record) && 00227 (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() || 00228 !cast<CXXRecordDecl>(Record)->hasTrivialDestructor())) 00229 return false; 00230 00231 // Check whether the type has an object member. 00232 return Record->hasObjectMember(); 00233 } 00234 00235 /// \brief Perform the final move to DestPtr if for some reason 00236 /// getReturnValueSlot() didn't use it directly. 00237 /// 00238 /// The idea is that you do something like this: 00239 /// RValue Result = EmitSomething(..., getReturnValueSlot()); 00240 /// EmitMoveFromReturnSlot(E, Result); 00241 /// 00242 /// If nothing interferes, this will cause the result to be emitted 00243 /// directly into the return value slot. Otherwise, a final move 00244 /// will be performed. 00245 void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) { 00246 if (shouldUseDestForReturnSlot()) { 00247 // Logically, Dest.getAddr() should equal Src.getAggregateAddr(). 00248 // The possibility of undef rvalues complicates that a lot, 00249 // though, so we can't really assert. 00250 return; 00251 } 00252 00253 // Otherwise, copy from there to the destination. 00254 assert(Dest.getAddr() != src.getAggregateAddr()); 00255 std::pair<CharUnits, CharUnits> typeInfo = 00256 CGF.getContext().getTypeInfoInChars(E->getType()); 00257 EmitFinalDestCopy(E->getType(), src, typeInfo.second); 00258 } 00259 00260 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 00261 void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src, 00262 CharUnits srcAlign) { 00263 assert(src.isAggregate() && "value must be aggregate value!"); 00264 LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddr(), type, srcAlign); 00265 EmitFinalDestCopy(type, srcLV); 00266 } 00267 00268 /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. 00269 void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) { 00270 // If Dest is ignored, then we're evaluating an aggregate expression 00271 // in a context that doesn't care about the result. Note that loads 00272 // from volatile l-values force the existence of a non-ignored 00273 // destination. 00274 if (Dest.isIgnored()) 00275 return; 00276 00277 AggValueSlot srcAgg = 00278 AggValueSlot::forLValue(src, AggValueSlot::IsDestructed, 00279 needsGC(type), AggValueSlot::IsAliased); 00280 EmitCopy(type, Dest, srcAgg); 00281 } 00282 00283 /// Perform a copy from the source into the destination. 00284 /// 00285 /// \param type - the type of the aggregate being copied; qualifiers are 00286 /// ignored 00287 void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest, 00288 const AggValueSlot &src) { 00289 if (dest.requiresGCollection()) { 00290 CharUnits sz = CGF.getContext().getTypeSizeInChars(type); 00291 llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity()); 00292 CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, 00293 dest.getAddr(), 00294 src.getAddr(), 00295 size); 00296 return; 00297 } 00298 00299 // If the result of the assignment is used, copy the LHS there also. 00300 // It's volatile if either side is. Use the minimum alignment of 00301 // the two sides. 00302 CGF.EmitAggregateCopy(dest.getAddr(), src.getAddr(), type, 00303 dest.isVolatile() || src.isVolatile(), 00304 std::min(dest.getAlignment(), src.getAlignment())); 00305 } 00306 00307 /// \brief Emit the initializer for a std::initializer_list initialized with a 00308 /// real initializer list. 00309 void 00310 AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { 00311 // Emit an array containing the elements. The array is externally destructed 00312 // if the std::initializer_list object is. 00313 ASTContext &Ctx = CGF.getContext(); 00314 LValue Array = CGF.EmitLValue(E->getSubExpr()); 00315 assert(Array.isSimple() && "initializer_list array not a simple lvalue"); 00316 llvm::Value *ArrayPtr = Array.getAddress(); 00317 00318 const ConstantArrayType *ArrayType = 00319 Ctx.getAsConstantArrayType(E->getSubExpr()->getType()); 00320 assert(ArrayType && "std::initializer_list constructed from non-array"); 00321 00322 // FIXME: Perform the checks on the field types in SemaInit. 00323 RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl(); 00324 RecordDecl::field_iterator Field = Record->field_begin(); 00325 if (Field == Record->field_end()) { 00326 CGF.ErrorUnsupported(E, "weird std::initializer_list"); 00327 return; 00328 } 00329 00330 // Start pointer. 00331 if (!Field->getType()->isPointerType() || 00332 !Ctx.hasSameType(Field->getType()->getPointeeType(), 00333 ArrayType->getElementType())) { 00334 CGF.ErrorUnsupported(E, "weird std::initializer_list"); 00335 return; 00336 } 00337 00338 AggValueSlot Dest = EnsureSlot(E->getType()); 00339 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(), 00340 Dest.getAlignment()); 00341 LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field); 00342 llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0); 00343 llvm::Value *IdxStart[] = { Zero, Zero }; 00344 llvm::Value *ArrayStart = 00345 Builder.CreateInBoundsGEP(ArrayPtr, IdxStart, "arraystart"); 00346 CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start); 00347 ++Field; 00348 00349 if (Field == Record->field_end()) { 00350 CGF.ErrorUnsupported(E, "weird std::initializer_list"); 00351 return; 00352 } 00353 00354 llvm::Value *Size = Builder.getInt(ArrayType->getSize()); 00355 LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field); 00356 if (Field->getType()->isPointerType() && 00357 Ctx.hasSameType(Field->getType()->getPointeeType(), 00358 ArrayType->getElementType())) { 00359 // End pointer. 00360 llvm::Value *IdxEnd[] = { Zero, Size }; 00361 llvm::Value *ArrayEnd = 00362 Builder.CreateInBoundsGEP(ArrayPtr, IdxEnd, "arrayend"); 00363 CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); 00364 } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) { 00365 // Length. 00366 CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength); 00367 } else { 00368 CGF.ErrorUnsupported(E, "weird std::initializer_list"); 00369 return; 00370 } 00371 } 00372 00373 /// \brief Determine if E is a trivial array filler, that is, one that is 00374 /// equivalent to zero-initialization. 00375 static bool isTrivialFiller(Expr *E) { 00376 if (!E) 00377 return true; 00378 00379 if (isa<ImplicitValueInitExpr>(E)) 00380 return true; 00381 00382 if (auto *ILE = dyn_cast<InitListExpr>(E)) { 00383 if (ILE->getNumInits()) 00384 return false; 00385 return isTrivialFiller(ILE->getArrayFiller()); 00386 } 00387 00388 if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E)) 00389 return Cons->getConstructor()->isDefaultConstructor() && 00390 Cons->getConstructor()->isTrivial(); 00391 00392 // FIXME: Are there other cases where we can avoid emitting an initializer? 00393 return false; 00394 } 00395 00396 /// \brief Emit initialization of an array from an initializer list. 00397 void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType, 00398 QualType elementType, InitListExpr *E) { 00399 uint64_t NumInitElements = E->getNumInits(); 00400 00401 uint64_t NumArrayElements = AType->getNumElements(); 00402 assert(NumInitElements <= NumArrayElements); 00403 00404 // DestPtr is an array*. Construct an elementType* by drilling 00405 // down a level. 00406 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); 00407 llvm::Value *indices[] = { zero, zero }; 00408 llvm::Value *begin = 00409 Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin"); 00410 00411 // Exception safety requires us to destroy all the 00412 // already-constructed members if an initializer throws. 00413 // For that, we'll need an EH cleanup. 00414 QualType::DestructionKind dtorKind = elementType.isDestructedType(); 00415 llvm::AllocaInst *endOfInit = nullptr; 00416 EHScopeStack::stable_iterator cleanup; 00417 llvm::Instruction *cleanupDominator = nullptr; 00418 if (CGF.needsEHCleanup(dtorKind)) { 00419 // In principle we could tell the cleanup where we are more 00420 // directly, but the control flow can get so varied here that it 00421 // would actually be quite complex. Therefore we go through an 00422 // alloca. 00423 endOfInit = CGF.CreateTempAlloca(begin->getType(), 00424 "arrayinit.endOfInit"); 00425 cleanupDominator = Builder.CreateStore(begin, endOfInit); 00426 CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType, 00427 CGF.getDestroyer(dtorKind)); 00428 cleanup = CGF.EHStack.stable_begin(); 00429 00430 // Otherwise, remember that we didn't need a cleanup. 00431 } else { 00432 dtorKind = QualType::DK_none; 00433 } 00434 00435 llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1); 00436 00437 // The 'current element to initialize'. The invariants on this 00438 // variable are complicated. Essentially, after each iteration of 00439 // the loop, it points to the last initialized element, except 00440 // that it points to the beginning of the array before any 00441 // elements have been initialized. 00442 llvm::Value *element = begin; 00443 00444 // Emit the explicit initializers. 00445 for (uint64_t i = 0; i != NumInitElements; ++i) { 00446 // Advance to the next element. 00447 if (i > 0) { 00448 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element"); 00449 00450 // Tell the cleanup that it needs to destroy up to this 00451 // element. TODO: some of these stores can be trivially 00452 // observed to be unnecessary. 00453 if (endOfInit) Builder.CreateStore(element, endOfInit); 00454 } 00455 00456 LValue elementLV = CGF.MakeAddrLValue(element, elementType); 00457 EmitInitializationToLValue(E->getInit(i), elementLV); 00458 } 00459 00460 // Check whether there's a non-trivial array-fill expression. 00461 Expr *filler = E->getArrayFiller(); 00462 bool hasTrivialFiller = isTrivialFiller(filler); 00463 00464 // Any remaining elements need to be zero-initialized, possibly 00465 // using the filler expression. We can skip this if the we're 00466 // emitting to zeroed memory. 00467 if (NumInitElements != NumArrayElements && 00468 !(Dest.isZeroed() && hasTrivialFiller && 00469 CGF.getTypes().isZeroInitializable(elementType))) { 00470 00471 // Use an actual loop. This is basically 00472 // do { *array++ = filler; } while (array != end); 00473 00474 // Advance to the start of the rest of the array. 00475 if (NumInitElements) { 00476 element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start"); 00477 if (endOfInit) Builder.CreateStore(element, endOfInit); 00478 } 00479 00480 // Compute the end of the array. 00481 llvm::Value *end = Builder.CreateInBoundsGEP(begin, 00482 llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), 00483 "arrayinit.end"); 00484 00485 llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); 00486 llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body"); 00487 00488 // Jump into the body. 00489 CGF.EmitBlock(bodyBB); 00490 llvm::PHINode *currentElement = 00491 Builder.CreatePHI(element->getType(), 2, "arrayinit.cur"); 00492 currentElement->addIncoming(element, entryBB); 00493 00494 // Emit the actual filler expression. 00495 LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType); 00496 if (filler) 00497 EmitInitializationToLValue(filler, elementLV); 00498 else 00499 EmitNullInitializationToLValue(elementLV); 00500 00501 // Move on to the next element. 00502 llvm::Value *nextElement = 00503 Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next"); 00504 00505 // Tell the EH cleanup that we finished with the last element. 00506 if (endOfInit) Builder.CreateStore(nextElement, endOfInit); 00507 00508 // Leave the loop if we're done. 00509 llvm::Value *done = Builder.CreateICmpEQ(nextElement, end, 00510 "arrayinit.done"); 00511 llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); 00512 Builder.CreateCondBr(done, endBB, bodyBB); 00513 currentElement->addIncoming(nextElement, Builder.GetInsertBlock()); 00514 00515 CGF.EmitBlock(endBB); 00516 } 00517 00518 // Leave the partial-array cleanup if we entered one. 00519 if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator); 00520 } 00521 00522 //===----------------------------------------------------------------------===// 00523 // Visitor Methods 00524 //===----------------------------------------------------------------------===// 00525 00526 void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){ 00527 Visit(E->GetTemporaryExpr()); 00528 } 00529 00530 void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) { 00531 EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e)); 00532 } 00533 00534 void 00535 AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { 00536 if (Dest.isPotentiallyAliased() && 00537 E->getType().isPODType(CGF.getContext())) { 00538 // For a POD type, just emit a load of the lvalue + a copy, because our 00539 // compound literal might alias the destination. 00540 EmitAggLoadOfLValue(E); 00541 return; 00542 } 00543 00544 AggValueSlot Slot = EnsureSlot(E->getType()); 00545 CGF.EmitAggExpr(E->getInitializer(), Slot); 00546 } 00547 00548 /// Attempt to look through various unimportant expressions to find a 00549 /// cast of the given kind. 00550 static Expr *findPeephole(Expr *op, CastKind kind) { 00551 while (true) { 00552 op = op->IgnoreParens(); 00553 if (CastExpr *castE = dyn_cast<CastExpr>(op)) { 00554 if (castE->getCastKind() == kind) 00555 return castE->getSubExpr(); 00556 if (castE->getCastKind() == CK_NoOp) 00557 continue; 00558 } 00559 return nullptr; 00560 } 00561 } 00562 00563 void AggExprEmitter::VisitCastExpr(CastExpr *E) { 00564 switch (E->getCastKind()) { 00565 case CK_Dynamic: { 00566 // FIXME: Can this actually happen? We have no test coverage for it. 00567 assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); 00568 LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(), 00569 CodeGenFunction::TCK_Load); 00570 // FIXME: Do we also need to handle property references here? 00571 if (LV.isSimple()) 00572 CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E)); 00573 else 00574 CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast"); 00575 00576 if (!Dest.isIgnored()) 00577 CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination"); 00578 break; 00579 } 00580 00581 case CK_ToUnion: { 00582 if (Dest.isIgnored()) break; 00583 00584 // GCC union extension 00585 QualType Ty = E->getSubExpr()->getType(); 00586 QualType PtrTy = CGF.getContext().getPointerType(Ty); 00587 llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(), 00588 CGF.ConvertType(PtrTy)); 00589 EmitInitializationToLValue(E->getSubExpr(), 00590 CGF.MakeAddrLValue(CastPtr, Ty)); 00591 break; 00592 } 00593 00594 case CK_DerivedToBase: 00595 case CK_BaseToDerived: 00596 case CK_UncheckedDerivedToBase: { 00597 llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: " 00598 "should have been unpacked before we got here"); 00599 } 00600 00601 case CK_NonAtomicToAtomic: 00602 case CK_AtomicToNonAtomic: { 00603 bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic); 00604 00605 // Determine the atomic and value types. 00606 QualType atomicType = E->getSubExpr()->getType(); 00607 QualType valueType = E->getType(); 00608 if (isToAtomic) std::swap(atomicType, valueType); 00609 00610 assert(atomicType->isAtomicType()); 00611 assert(CGF.getContext().hasSameUnqualifiedType(valueType, 00612 atomicType->castAs<AtomicType>()->getValueType())); 00613 00614 // Just recurse normally if we're ignoring the result or the 00615 // atomic type doesn't change representation. 00616 if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) { 00617 return Visit(E->getSubExpr()); 00618 } 00619 00620 CastKind peepholeTarget = 00621 (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic); 00622 00623 // These two cases are reverses of each other; try to peephole them. 00624 if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) { 00625 assert(CGF.getContext().hasSameUnqualifiedType(op->getType(), 00626 E->getType()) && 00627 "peephole significantly changed types?"); 00628 return Visit(op); 00629 } 00630 00631 // If we're converting an r-value of non-atomic type to an r-value 00632 // of atomic type, just emit directly into the relevant sub-object. 00633 if (isToAtomic) { 00634 AggValueSlot valueDest = Dest; 00635 if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) { 00636 // Zero-initialize. (Strictly speaking, we only need to intialize 00637 // the padding at the end, but this is simpler.) 00638 if (!Dest.isZeroed()) 00639 CGF.EmitNullInitialization(Dest.getAddr(), atomicType); 00640 00641 // Build a GEP to refer to the subobject. 00642 llvm::Value *valueAddr = 00643 CGF.Builder.CreateStructGEP(valueDest.getAddr(), 0); 00644 valueDest = AggValueSlot::forAddr(valueAddr, 00645 valueDest.getAlignment(), 00646 valueDest.getQualifiers(), 00647 valueDest.isExternallyDestructed(), 00648 valueDest.requiresGCollection(), 00649 valueDest.isPotentiallyAliased(), 00650 AggValueSlot::IsZeroed); 00651 } 00652 00653 CGF.EmitAggExpr(E->getSubExpr(), valueDest); 00654 return; 00655 } 00656 00657 // Otherwise, we're converting an atomic type to a non-atomic type. 00658 // Make an atomic temporary, emit into that, and then copy the value out. 00659 AggValueSlot atomicSlot = 00660 CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp"); 00661 CGF.EmitAggExpr(E->getSubExpr(), atomicSlot); 00662 00663 llvm::Value *valueAddr = 00664 Builder.CreateStructGEP(atomicSlot.getAddr(), 0); 00665 RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile()); 00666 return EmitFinalDestCopy(valueType, rvalue); 00667 } 00668 00669 case CK_LValueToRValue: 00670 // If we're loading from a volatile type, force the destination 00671 // into existence. 00672 if (E->getSubExpr()->getType().isVolatileQualified()) { 00673 EnsureDest(E->getType()); 00674 return Visit(E->getSubExpr()); 00675 } 00676 00677 // fallthrough 00678 00679 case CK_NoOp: 00680 case CK_UserDefinedConversion: 00681 case CK_ConstructorConversion: 00682 assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), 00683 E->getType()) && 00684 "Implicit cast types must be compatible"); 00685 Visit(E->getSubExpr()); 00686 break; 00687 00688 case CK_LValueBitCast: 00689 llvm_unreachable("should not be emitting lvalue bitcast as rvalue"); 00690 00691 case CK_Dependent: 00692 case CK_BitCast: 00693 case CK_ArrayToPointerDecay: 00694 case CK_FunctionToPointerDecay: 00695 case CK_NullToPointer: 00696 case CK_NullToMemberPointer: 00697 case CK_BaseToDerivedMemberPointer: 00698 case CK_DerivedToBaseMemberPointer: 00699 case CK_MemberPointerToBoolean: 00700 case CK_ReinterpretMemberPointer: 00701 case CK_IntegralToPointer: 00702 case CK_PointerToIntegral: 00703 case CK_PointerToBoolean: 00704 case CK_ToVoid: 00705 case CK_VectorSplat: 00706 case CK_IntegralCast: 00707 case CK_IntegralToBoolean: 00708 case CK_IntegralToFloating: 00709 case CK_FloatingToIntegral: 00710 case CK_FloatingToBoolean: 00711 case CK_FloatingCast: 00712 case CK_CPointerToObjCPointerCast: 00713 case CK_BlockPointerToObjCPointerCast: 00714 case CK_AnyPointerToBlockPointerCast: 00715 case CK_ObjCObjectLValueCast: 00716 case CK_FloatingRealToComplex: 00717 case CK_FloatingComplexToReal: 00718 case CK_FloatingComplexToBoolean: 00719 case CK_FloatingComplexCast: 00720 case CK_FloatingComplexToIntegralComplex: 00721 case CK_IntegralRealToComplex: 00722 case CK_IntegralComplexToReal: 00723 case CK_IntegralComplexToBoolean: 00724 case CK_IntegralComplexCast: 00725 case CK_IntegralComplexToFloatingComplex: 00726 case CK_ARCProduceObject: 00727 case CK_ARCConsumeObject: 00728 case CK_ARCReclaimReturnedObject: 00729 case CK_ARCExtendBlockObject: 00730 case CK_CopyAndAutoreleaseBlockObject: 00731 case CK_BuiltinFnToFnPtr: 00732 case CK_ZeroToOCLEvent: 00733 case CK_AddressSpaceConversion: 00734 llvm_unreachable("cast kind invalid for aggregate types"); 00735 } 00736 } 00737 00738 void AggExprEmitter::VisitCallExpr(const CallExpr *E) { 00739 if (E->getCallReturnType()->isReferenceType()) { 00740 EmitAggLoadOfLValue(E); 00741 return; 00742 } 00743 00744 RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot()); 00745 EmitMoveFromReturnSlot(E, RV); 00746 } 00747 00748 void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { 00749 RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot()); 00750 EmitMoveFromReturnSlot(E, RV); 00751 } 00752 00753 void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { 00754 CGF.EmitIgnoredExpr(E->getLHS()); 00755 Visit(E->getRHS()); 00756 } 00757 00758 void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) { 00759 CodeGenFunction::StmtExprEvaluation eval(CGF); 00760 CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest); 00761 } 00762 00763 void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { 00764 if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) 00765 VisitPointerToDataMemberBinaryOperator(E); 00766 else 00767 CGF.ErrorUnsupported(E, "aggregate binary expression"); 00768 } 00769 00770 void AggExprEmitter::VisitPointerToDataMemberBinaryOperator( 00771 const BinaryOperator *E) { 00772 LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E); 00773 EmitFinalDestCopy(E->getType(), LV); 00774 } 00775 00776 /// Is the value of the given expression possibly a reference to or 00777 /// into a __block variable? 00778 static bool isBlockVarRef(const Expr *E) { 00779 // Make sure we look through parens. 00780 E = E->IgnoreParens(); 00781 00782 // Check for a direct reference to a __block variable. 00783 if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { 00784 const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl()); 00785 return (var && var->hasAttr<BlocksAttr>()); 00786 } 00787 00788 // More complicated stuff. 00789 00790 // Binary operators. 00791 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) { 00792 // For an assignment or pointer-to-member operation, just care 00793 // about the LHS. 00794 if (op->isAssignmentOp() || op->isPtrMemOp()) 00795 return isBlockVarRef(op->getLHS()); 00796 00797 // For a comma, just care about the RHS. 00798 if (op->getOpcode() == BO_Comma) 00799 return isBlockVarRef(op->getRHS()); 00800 00801 // FIXME: pointer arithmetic? 00802 return false; 00803 00804 // Check both sides of a conditional operator. 00805 } else if (const AbstractConditionalOperator *op 00806 = dyn_cast<AbstractConditionalOperator>(E)) { 00807 return isBlockVarRef(op->getTrueExpr()) 00808 || isBlockVarRef(op->getFalseExpr()); 00809 00810 // OVEs are required to support BinaryConditionalOperators. 00811 } else if (const OpaqueValueExpr *op 00812 = dyn_cast<OpaqueValueExpr>(E)) { 00813 if (const Expr *src = op->getSourceExpr()) 00814 return isBlockVarRef(src); 00815 00816 // Casts are necessary to get things like (*(int*)&var) = foo(). 00817 // We don't really care about the kind of cast here, except 00818 // we don't want to look through l2r casts, because it's okay 00819 // to get the *value* in a __block variable. 00820 } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) { 00821 if (cast->getCastKind() == CK_LValueToRValue) 00822 return false; 00823 return isBlockVarRef(cast->getSubExpr()); 00824 00825 // Handle unary operators. Again, just aggressively look through 00826 // it, ignoring the operation. 00827 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) { 00828 return isBlockVarRef(uop->getSubExpr()); 00829 00830 // Look into the base of a field access. 00831 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) { 00832 return isBlockVarRef(mem->getBase()); 00833 00834 // Look into the base of a subscript. 00835 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) { 00836 return isBlockVarRef(sub->getBase()); 00837 } 00838 00839 return false; 00840 } 00841 00842 void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { 00843 // For an assignment to work, the value on the right has 00844 // to be compatible with the value on the left. 00845 assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), 00846 E->getRHS()->getType()) 00847 && "Invalid assignment"); 00848 00849 // If the LHS might be a __block variable, and the RHS can 00850 // potentially cause a block copy, we need to evaluate the RHS first 00851 // so that the assignment goes the right place. 00852 // This is pretty semantically fragile. 00853 if (isBlockVarRef(E->getLHS()) && 00854 E->getRHS()->HasSideEffects(CGF.getContext())) { 00855 // Ensure that we have a destination, and evaluate the RHS into that. 00856 EnsureDest(E->getRHS()->getType()); 00857 Visit(E->getRHS()); 00858 00859 // Now emit the LHS and copy into it. 00860 LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); 00861 00862 // That copy is an atomic copy if the LHS is atomic. 00863 if (LHS.getType()->isAtomicType()) { 00864 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); 00865 return; 00866 } 00867 00868 EmitCopy(E->getLHS()->getType(), 00869 AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed, 00870 needsGC(E->getLHS()->getType()), 00871 AggValueSlot::IsAliased), 00872 Dest); 00873 return; 00874 } 00875 00876 LValue LHS = CGF.EmitLValue(E->getLHS()); 00877 00878 // If we have an atomic type, evaluate into the destination and then 00879 // do an atomic copy. 00880 if (LHS.getType()->isAtomicType()) { 00881 EnsureDest(E->getRHS()->getType()); 00882 Visit(E->getRHS()); 00883 CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); 00884 return; 00885 } 00886 00887 // Codegen the RHS so that it stores directly into the LHS. 00888 AggValueSlot LHSSlot = 00889 AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed, 00890 needsGC(E->getLHS()->getType()), 00891 AggValueSlot::IsAliased); 00892 // A non-volatile aggregate destination might have volatile member. 00893 if (!LHSSlot.isVolatile() && 00894 CGF.hasVolatileMember(E->getLHS()->getType())) 00895 LHSSlot.setVolatile(true); 00896 00897 CGF.EmitAggExpr(E->getRHS(), LHSSlot); 00898 00899 // Copy into the destination if the assignment isn't ignored. 00900 EmitFinalDestCopy(E->getType(), LHS); 00901 } 00902 00903 void AggExprEmitter:: 00904 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { 00905 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); 00906 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); 00907 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); 00908 00909 // Bind the common expression if necessary. 00910 CodeGenFunction::OpaqueValueMapping binding(CGF, E); 00911 00912 RegionCounter Cnt = CGF.getPGORegionCounter(E); 00913 CodeGenFunction::ConditionalEvaluation eval(CGF); 00914 CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock, Cnt.getCount()); 00915 00916 // Save whether the destination's lifetime is externally managed. 00917 bool isExternallyDestructed = Dest.isExternallyDestructed(); 00918 00919 eval.begin(CGF); 00920 CGF.EmitBlock(LHSBlock); 00921 Cnt.beginRegion(Builder); 00922 Visit(E->getTrueExpr()); 00923 eval.end(CGF); 00924 00925 assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!"); 00926 CGF.Builder.CreateBr(ContBlock); 00927 00928 // If the result of an agg expression is unused, then the emission 00929 // of the LHS might need to create a destination slot. That's fine 00930 // with us, and we can safely emit the RHS into the same slot, but 00931 // we shouldn't claim that it's already being destructed. 00932 Dest.setExternallyDestructed(isExternallyDestructed); 00933 00934 eval.begin(CGF); 00935 CGF.EmitBlock(RHSBlock); 00936 Visit(E->getFalseExpr()); 00937 eval.end(CGF); 00938 00939 CGF.EmitBlock(ContBlock); 00940 } 00941 00942 void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) { 00943 Visit(CE->getChosenSubExpr()); 00944 } 00945 00946 void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { 00947 llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr()); 00948 llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); 00949 00950 if (!ArgPtr) { 00951 // If EmitVAArg fails, we fall back to the LLVM instruction. 00952 llvm::Value *Val = 00953 Builder.CreateVAArg(ArgValue, CGF.ConvertType(VE->getType())); 00954 if (!Dest.isIgnored()) 00955 Builder.CreateStore(Val, Dest.getAddr()); 00956 return; 00957 } 00958 00959 EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType())); 00960 } 00961 00962 void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { 00963 // Ensure that we have a slot, but if we already do, remember 00964 // whether it was externally destructed. 00965 bool wasExternallyDestructed = Dest.isExternallyDestructed(); 00966 EnsureDest(E->getType()); 00967 00968 // We're going to push a destructor if there isn't already one. 00969 Dest.setExternallyDestructed(); 00970 00971 Visit(E->getSubExpr()); 00972 00973 // Push that destructor we promised. 00974 if (!wasExternallyDestructed) 00975 CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr()); 00976 } 00977 00978 void 00979 AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { 00980 AggValueSlot Slot = EnsureSlot(E->getType()); 00981 CGF.EmitCXXConstructExpr(E, Slot); 00982 } 00983 00984 void 00985 AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { 00986 AggValueSlot Slot = EnsureSlot(E->getType()); 00987 CGF.EmitLambdaExpr(E, Slot); 00988 } 00989 00990 void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { 00991 CGF.enterFullExpression(E); 00992 CodeGenFunction::RunCleanupsScope cleanups(CGF); 00993 Visit(E->getSubExpr()); 00994 } 00995 00996 void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { 00997 QualType T = E->getType(); 00998 AggValueSlot Slot = EnsureSlot(T); 00999 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T)); 01000 } 01001 01002 void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { 01003 QualType T = E->getType(); 01004 AggValueSlot Slot = EnsureSlot(T); 01005 EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T)); 01006 } 01007 01008 /// isSimpleZero - If emitting this value will obviously just cause a store of 01009 /// zero to memory, return true. This can return false if uncertain, so it just 01010 /// handles simple cases. 01011 static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) { 01012 E = E->IgnoreParens(); 01013 01014 // 0 01015 if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) 01016 return IL->getValue() == 0; 01017 // +0.0 01018 if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E)) 01019 return FL->getValue().isPosZero(); 01020 // int() 01021 if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) && 01022 CGF.getTypes().isZeroInitializable(E->getType())) 01023 return true; 01024 // (int*)0 - Null pointer expressions. 01025 if (const CastExpr *ICE = dyn_cast<CastExpr>(E)) 01026 return ICE->getCastKind() == CK_NullToPointer; 01027 // '\0' 01028 if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) 01029 return CL->getValue() == 0; 01030 01031 // Otherwise, hard case: conservatively return false. 01032 return false; 01033 } 01034 01035 01036 void 01037 AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { 01038 QualType type = LV.getType(); 01039 // FIXME: Ignore result? 01040 // FIXME: Are initializers affected by volatile? 01041 if (Dest.isZeroed() && isSimpleZero(E, CGF)) { 01042 // Storing "i32 0" to a zero'd memory location is a noop. 01043 return; 01044 } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) { 01045 return EmitNullInitializationToLValue(LV); 01046 } else if (type->isReferenceType()) { 01047 RValue RV = CGF.EmitReferenceBindingToExpr(E); 01048 return CGF.EmitStoreThroughLValue(RV, LV); 01049 } 01050 01051 switch (CGF.getEvaluationKind(type)) { 01052 case TEK_Complex: 01053 CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true); 01054 return; 01055 case TEK_Aggregate: 01056 CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV, 01057 AggValueSlot::IsDestructed, 01058 AggValueSlot::DoesNotNeedGCBarriers, 01059 AggValueSlot::IsNotAliased, 01060 Dest.isZeroed())); 01061 return; 01062 case TEK_Scalar: 01063 if (LV.isSimple()) { 01064 CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false); 01065 } else { 01066 CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); 01067 } 01068 return; 01069 } 01070 llvm_unreachable("bad evaluation kind"); 01071 } 01072 01073 void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) { 01074 QualType type = lv.getType(); 01075 01076 // If the destination slot is already zeroed out before the aggregate is 01077 // copied into it, we don't have to emit any zeros here. 01078 if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type)) 01079 return; 01080 01081 if (CGF.hasScalarEvaluationKind(type)) { 01082 // For non-aggregates, we can store the appropriate null constant. 01083 llvm::Value *null = CGF.CGM.EmitNullConstant(type); 01084 // Note that the following is not equivalent to 01085 // EmitStoreThroughBitfieldLValue for ARC types. 01086 if (lv.isBitField()) { 01087 CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv); 01088 } else { 01089 assert(lv.isSimple()); 01090 CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true); 01091 } 01092 } else { 01093 // There's a potential optimization opportunity in combining 01094 // memsets; that would be easy for arrays, but relatively 01095 // difficult for structures with the current code. 01096 CGF.EmitNullInitialization(lv.getAddress(), lv.getType()); 01097 } 01098 } 01099 01100 void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { 01101 #if 0 01102 // FIXME: Assess perf here? Figure out what cases are worth optimizing here 01103 // (Length of globals? Chunks of zeroed-out space?). 01104 // 01105 // If we can, prefer a copy from a global; this is a lot less code for long 01106 // globals, and it's easier for the current optimizers to analyze. 01107 if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) { 01108 llvm::GlobalVariable* GV = 01109 new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, 01110 llvm::GlobalValue::InternalLinkage, C, ""); 01111 EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType())); 01112 return; 01113 } 01114 #endif 01115 if (E->hadArrayRangeDesignator()) 01116 CGF.ErrorUnsupported(E, "GNU array range designator extension"); 01117 01118 AggValueSlot Dest = EnsureSlot(E->getType()); 01119 01120 LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(), 01121 Dest.getAlignment()); 01122 01123 // Handle initialization of an array. 01124 if (E->getType()->isArrayType()) { 01125 if (E->isStringLiteralInit()) 01126 return Visit(E->getInit(0)); 01127 01128 QualType elementType = 01129 CGF.getContext().getAsArrayType(E->getType())->getElementType(); 01130 01131 llvm::PointerType *APType = 01132 cast<llvm::PointerType>(Dest.getAddr()->getType()); 01133 llvm::ArrayType *AType = 01134 cast<llvm::ArrayType>(APType->getElementType()); 01135 01136 EmitArrayInit(Dest.getAddr(), AType, elementType, E); 01137 return; 01138 } 01139 01140 if (E->getType()->isAtomicType()) { 01141 // An _Atomic(T) object can be list-initialized from an expression 01142 // of the same type. 01143 assert(E->getNumInits() == 1 && 01144 CGF.getContext().hasSameUnqualifiedType(E->getInit(0)->getType(), 01145 E->getType()) && 01146 "unexpected list initialization for atomic object"); 01147 return Visit(E->getInit(0)); 01148 } 01149 01150 assert(E->getType()->isRecordType() && "Only support structs/unions here!"); 01151 01152 // Do struct initialization; this code just sets each individual member 01153 // to the approprate value. This makes bitfield support automatic; 01154 // the disadvantage is that the generated code is more difficult for 01155 // the optimizer, especially with bitfields. 01156 unsigned NumInitElements = E->getNumInits(); 01157 RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl(); 01158 01159 // Prepare a 'this' for CXXDefaultInitExprs. 01160 CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddr()); 01161 01162 if (record->isUnion()) { 01163 // Only initialize one field of a union. The field itself is 01164 // specified by the initializer list. 01165 if (!E->getInitializedFieldInUnion()) { 01166 // Empty union; we have nothing to do. 01167 01168 #ifndef NDEBUG 01169 // Make sure that it's really an empty and not a failure of 01170 // semantic analysis. 01171 for (const auto *Field : record->fields()) 01172 assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed"); 01173 #endif 01174 return; 01175 } 01176 01177 // FIXME: volatility 01178 FieldDecl *Field = E->getInitializedFieldInUnion(); 01179 01180 LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field); 01181 if (NumInitElements) { 01182 // Store the initializer into the field 01183 EmitInitializationToLValue(E->getInit(0), FieldLoc); 01184 } else { 01185 // Default-initialize to null. 01186 EmitNullInitializationToLValue(FieldLoc); 01187 } 01188 01189 return; 01190 } 01191 01192 // We'll need to enter cleanup scopes in case any of the member 01193 // initializers throw an exception. 01194 SmallVector<EHScopeStack::stable_iterator, 16> cleanups; 01195 llvm::Instruction *cleanupDominator = nullptr; 01196 01197 // Here we iterate over the fields; this makes it simpler to both 01198 // default-initialize fields and skip over unnamed fields. 01199 unsigned curInitIndex = 0; 01200 for (const auto *field : record->fields()) { 01201 // We're done once we hit the flexible array member. 01202 if (field->getType()->isIncompleteArrayType()) 01203 break; 01204 01205 // Always skip anonymous bitfields. 01206 if (field->isUnnamedBitfield()) 01207 continue; 01208 01209 // We're done if we reach the end of the explicit initializers, we 01210 // have a zeroed object, and the rest of the fields are 01211 // zero-initializable. 01212 if (curInitIndex == NumInitElements && Dest.isZeroed() && 01213 CGF.getTypes().isZeroInitializable(E->getType())) 01214 break; 01215 01216 01217 LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field); 01218 // We never generate write-barries for initialized fields. 01219 LV.setNonGC(true); 01220 01221 if (curInitIndex < NumInitElements) { 01222 // Store the initializer into the field. 01223 EmitInitializationToLValue(E->getInit(curInitIndex++), LV); 01224 } else { 01225 // We're out of initalizers; default-initialize to null 01226 EmitNullInitializationToLValue(LV); 01227 } 01228 01229 // Push a destructor if necessary. 01230 // FIXME: if we have an array of structures, all explicitly 01231 // initialized, we can end up pushing a linear number of cleanups. 01232 bool pushedCleanup = false; 01233 if (QualType::DestructionKind dtorKind 01234 = field->getType().isDestructedType()) { 01235 assert(LV.isSimple()); 01236 if (CGF.needsEHCleanup(dtorKind)) { 01237 if (!cleanupDominator) 01238 cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder 01239 01240 CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(), 01241 CGF.getDestroyer(dtorKind), false); 01242 cleanups.push_back(CGF.EHStack.stable_begin()); 01243 pushedCleanup = true; 01244 } 01245 } 01246 01247 // If the GEP didn't get used because of a dead zero init or something 01248 // else, clean it up for -O0 builds and general tidiness. 01249 if (!pushedCleanup && LV.isSimple()) 01250 if (llvm::GetElementPtrInst *GEP = 01251 dyn_cast<llvm::GetElementPtrInst>(LV.getAddress())) 01252 if (GEP->use_empty()) 01253 GEP->eraseFromParent(); 01254 } 01255 01256 // Deactivate all the partial cleanups in reverse order, which 01257 // generally means popping them. 01258 for (unsigned i = cleanups.size(); i != 0; --i) 01259 CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator); 01260 01261 // Destroy the placeholder if we made one. 01262 if (cleanupDominator) 01263 cleanupDominator->eraseFromParent(); 01264 } 01265 01266 //===----------------------------------------------------------------------===// 01267 // Entry Points into this File 01268 //===----------------------------------------------------------------------===// 01269 01270 /// GetNumNonZeroBytesInInit - Get an approximate count of the number of 01271 /// non-zero bytes that will be stored when outputting the initializer for the 01272 /// specified initializer expression. 01273 static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { 01274 E = E->IgnoreParens(); 01275 01276 // 0 and 0.0 won't require any non-zero stores! 01277 if (isSimpleZero(E, CGF)) return CharUnits::Zero(); 01278 01279 // If this is an initlist expr, sum up the size of sizes of the (present) 01280 // elements. If this is something weird, assume the whole thing is non-zero. 01281 const InitListExpr *ILE = dyn_cast<InitListExpr>(E); 01282 if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType())) 01283 return CGF.getContext().getTypeSizeInChars(E->getType()); 01284 01285 // InitListExprs for structs have to be handled carefully. If there are 01286 // reference members, we need to consider the size of the reference, not the 01287 // referencee. InitListExprs for unions and arrays can't have references. 01288 if (const RecordType *RT = E->getType()->getAs<RecordType>()) { 01289 if (!RT->isUnionType()) { 01290 RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl(); 01291 CharUnits NumNonZeroBytes = CharUnits::Zero(); 01292 01293 unsigned ILEElement = 0; 01294 for (const auto *Field : SD->fields()) { 01295 // We're done once we hit the flexible array member or run out of 01296 // InitListExpr elements. 01297 if (Field->getType()->isIncompleteArrayType() || 01298 ILEElement == ILE->getNumInits()) 01299 break; 01300 if (Field->isUnnamedBitfield()) 01301 continue; 01302 01303 const Expr *E = ILE->getInit(ILEElement++); 01304 01305 // Reference values are always non-null and have the width of a pointer. 01306 if (Field->getType()->isReferenceType()) 01307 NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits( 01308 CGF.getTarget().getPointerWidth(0)); 01309 else 01310 NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF); 01311 } 01312 01313 return NumNonZeroBytes; 01314 } 01315 } 01316 01317 01318 CharUnits NumNonZeroBytes = CharUnits::Zero(); 01319 for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) 01320 NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF); 01321 return NumNonZeroBytes; 01322 } 01323 01324 /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of 01325 /// zeros in it, emit a memset and avoid storing the individual zeros. 01326 /// 01327 static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, 01328 CodeGenFunction &CGF) { 01329 // If the slot is already known to be zeroed, nothing to do. Don't mess with 01330 // volatile stores. 01331 if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == nullptr) 01332 return; 01333 01334 // C++ objects with a user-declared constructor don't need zero'ing. 01335 if (CGF.getLangOpts().CPlusPlus) 01336 if (const RecordType *RT = CGF.getContext() 01337 .getBaseElementType(E->getType())->getAs<RecordType>()) { 01338 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 01339 if (RD->hasUserDeclaredConstructor()) 01340 return; 01341 } 01342 01343 // If the type is 16-bytes or smaller, prefer individual stores over memset. 01344 std::pair<CharUnits, CharUnits> TypeInfo = 01345 CGF.getContext().getTypeInfoInChars(E->getType()); 01346 if (TypeInfo.first <= CharUnits::fromQuantity(16)) 01347 return; 01348 01349 // Check to see if over 3/4 of the initializer are known to be zero. If so, 01350 // we prefer to emit memset + individual stores for the rest. 01351 CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); 01352 if (NumNonZeroBytes*4 > TypeInfo.first) 01353 return; 01354 01355 // Okay, it seems like a good idea to use an initial memset, emit the call. 01356 llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity()); 01357 CharUnits Align = TypeInfo.second; 01358 01359 llvm::Value *Loc = Slot.getAddr(); 01360 01361 Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy); 01362 CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, 01363 Align.getQuantity(), false); 01364 01365 // Tell the AggExprEmitter that the slot is known zero. 01366 Slot.setZeroed(); 01367 } 01368 01369 01370 01371 01372 /// EmitAggExpr - Emit the computation of the specified expression of aggregate 01373 /// type. The result is computed into DestPtr. Note that if DestPtr is null, 01374 /// the value of the aggregate expression is not needed. If VolatileDest is 01375 /// true, DestPtr cannot be 0. 01376 void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) { 01377 assert(E && hasAggregateEvaluationKind(E->getType()) && 01378 "Invalid aggregate expression to emit"); 01379 assert((Slot.getAddr() != nullptr || Slot.isIgnored()) && 01380 "slot has bits but no address"); 01381 01382 // Optimize the slot if possible. 01383 CheckAggExprForMemSetUse(Slot, E, *this); 01384 01385 AggExprEmitter(*this, Slot).Visit(const_cast<Expr*>(E)); 01386 } 01387 01388 LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { 01389 assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!"); 01390 llvm::Value *Temp = CreateMemTemp(E->getType()); 01391 LValue LV = MakeAddrLValue(Temp, E->getType()); 01392 EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed, 01393 AggValueSlot::DoesNotNeedGCBarriers, 01394 AggValueSlot::IsNotAliased)); 01395 return LV; 01396 } 01397 01398 void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, 01399 llvm::Value *SrcPtr, QualType Ty, 01400 bool isVolatile, 01401 CharUnits alignment, 01402 bool isAssignment) { 01403 assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); 01404 01405 if (getLangOpts().CPlusPlus) { 01406 if (const RecordType *RT = Ty->getAs<RecordType>()) { 01407 CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl()); 01408 assert((Record->hasTrivialCopyConstructor() || 01409 Record->hasTrivialCopyAssignment() || 01410 Record->hasTrivialMoveConstructor() || 01411 Record->hasTrivialMoveAssignment()) && 01412 "Trying to aggregate-copy a type without a trivial copy/move " 01413 "constructor or assignment operator"); 01414 // Ignore empty classes in C++. 01415 if (Record->isEmpty()) 01416 return; 01417 } 01418 } 01419 01420 // Aggregate assignment turns into llvm.memcpy. This is almost valid per 01421 // C99 6.5.16.1p3, which states "If the value being stored in an object is 01422 // read from another object that overlaps in anyway the storage of the first 01423 // object, then the overlap shall be exact and the two objects shall have 01424 // qualified or unqualified versions of a compatible type." 01425 // 01426 // memcpy is not defined if the source and destination pointers are exactly 01427 // equal, but other compilers do this optimization, and almost every memcpy 01428 // implementation handles this case safely. If there is a libc that does not 01429 // safely handle this, we can add a target hook. 01430 01431 // Get data size and alignment info for this aggregate. If this is an 01432 // assignment don't copy the tail padding. Otherwise copying it is fine. 01433 std::pair<CharUnits, CharUnits> TypeInfo; 01434 if (isAssignment) 01435 TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty); 01436 else 01437 TypeInfo = getContext().getTypeInfoInChars(Ty); 01438 01439 if (alignment.isZero()) 01440 alignment = TypeInfo.second; 01441 01442 // FIXME: Handle variable sized types. 01443 01444 // FIXME: If we have a volatile struct, the optimizer can remove what might 01445 // appear to be `extra' memory ops: 01446 // 01447 // volatile struct { int i; } a, b; 01448 // 01449 // int main() { 01450 // a = b; 01451 // a = b; 01452 // } 01453 // 01454 // we need to use a different call here. We use isVolatile to indicate when 01455 // either the source or the destination is volatile. 01456 01457 llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); 01458 llvm::Type *DBP = 01459 llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace()); 01460 DestPtr = Builder.CreateBitCast(DestPtr, DBP); 01461 01462 llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); 01463 llvm::Type *SBP = 01464 llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace()); 01465 SrcPtr = Builder.CreateBitCast(SrcPtr, SBP); 01466 01467 // Don't do any of the memmove_collectable tests if GC isn't set. 01468 if (CGM.getLangOpts().getGC() == LangOptions::NonGC) { 01469 // fall through 01470 } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { 01471 RecordDecl *Record = RecordTy->getDecl(); 01472 if (Record->hasObjectMember()) { 01473 CharUnits size = TypeInfo.first; 01474 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 01475 llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity()); 01476 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 01477 SizeVal); 01478 return; 01479 } 01480 } else if (Ty->isArrayType()) { 01481 QualType BaseType = getContext().getBaseElementType(Ty); 01482 if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) { 01483 if (RecordTy->getDecl()->hasObjectMember()) { 01484 CharUnits size = TypeInfo.first; 01485 llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); 01486 llvm::Value *SizeVal = 01487 llvm::ConstantInt::get(SizeTy, size.getQuantity()); 01488 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, 01489 SizeVal); 01490 return; 01491 } 01492 } 01493 } 01494 01495 // Determine the metadata to describe the position of any padding in this 01496 // memcpy, as well as the TBAA tags for the members of the struct, in case 01497 // the optimizer wishes to expand it in to scalar memory operations. 01498 llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty); 01499 01500 Builder.CreateMemCpy(DestPtr, SrcPtr, 01501 llvm::ConstantInt::get(IntPtrTy, 01502 TypeInfo.first.getQuantity()), 01503 alignment.getQuantity(), isVolatile, 01504 /*TBAATag=*/nullptr, TBAAStructTag); 01505 }